Skip to content

Commit

Permalink
Merge pull request #91 from OSINT-TECHNOLOGIES/rolling
Browse files Browse the repository at this point in the history
Stabilized v1.1.3
  • Loading branch information
OSINT-TECHNOLOGIES authored Nov 2, 2024
2 parents a22acbb + 2eaf834 commit 84ac817
Show file tree
Hide file tree
Showing 14 changed files with 431 additions and 250 deletions.
Binary file added apis/api_keys.db
Binary file not shown.
Binary file added apis/api_keys_reference.db
Binary file not shown.
59 changes: 59 additions & 0 deletions apis/api_securitytrails.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import requests
import sqlite3
from colorama import Fore, Style

def api_securitytrails_check(domain):
conn = sqlite3.connect('apis//api_keys.db')
cursor = conn.cursor()
cursor.execute("SELECT api_name, api_key FROM api_keys")
rows = cursor.fetchall()
for row in rows:
api_name, api_key = row
if api_name == 'SecurityTrails':
api_key = str(row[1])
print(Fore.GREEN + 'Got SecurityTrails API key. Starting SecurityTrails scan...\n')

subdomains_url = f"https://api.securitytrails.com/v1/domain/{domain}/subdomains?apikey={api_key}"
response = requests.get(subdomains_url)

url = f"https://api.securitytrails.com/v1/domain/{domain}?apikey={api_key}"
general_response = requests.get(url)
general_data = general_response.json()

print(Fore.GREEN + "[DOMAIN GENERAL INFORMATION]\n")
print(Fore.GREEN + "Alexa Rank: " + Fore.LIGHTCYAN_EX + f"{general_data['alexa_rank']}")
print(Fore.GREEN + "Apex Domain: " + Fore.LIGHTCYAN_EX + f"{general_data['apex_domain']}")
print(Fore.GREEN + "Hostname: " + Fore.LIGHTCYAN_EX + f"{general_data['hostname']}" + Style.RESET_ALL)

print(Fore.GREEN + "\n[DNS RECORDS]" + Style.RESET_ALL)
for record_type, record_data in general_data['current_dns'].items():
print(Fore.GREEN + f"\n[+] {record_type.upper()} RECORDS:" + Style.RESET_ALL)
for value in record_data.get('values', []):
if record_type == 'a':
print(Fore.GREEN + "IP: " + Fore.LIGHTCYAN_EX + f"{value['ip']} " + Fore.GREEN + "| Organization: " + Fore.LIGHTCYAN_EX + f"{value['ip_organization']}")
elif record_type == 'mx':
print(Fore.GREEN + "Hostname: " + Fore.LIGHTCYAN_EX + f"{value['hostname']} " + Fore.GREEN + "| Priority: " + Fore.LIGHTCYAN_EX + f"{value['priority']} " + Fore.GREEN + "| Organization: " + Fore.LIGHTCYAN_EX + f"{value['hostname_organization']}")
elif record_type == 'ns':
print(Fore.GREEN + "Nameserver: " + Fore.LIGHTCYAN_EX + f"{value['nameserver']} " + Fore.GREEN + "| Organization: " + Fore.LIGHTCYAN_EX + f"{value['nameserver_organization']}")
elif record_type == 'soa':
print(Fore.GREEN + "Email: " + Fore.LIGHTCYAN_EX + f"{value['email']} " + Fore.GREEN + "| TTL: " + Fore.LIGHTCYAN_EX + f"{value['ttl']}")
elif record_type == 'txt':
print(Fore.GREEN + "Value: " + Fore.LIGHTCYAN_EX + f"{value['value']}")

if response.status_code == 200:
data = response.json()
print(Fore.GREEN + "\n[SUBDOMAINS DEEP ENUMERATION]\n")
print(Fore.GREEN + f"Found " + Fore.LIGHTCYAN_EX + f"{data['subdomain_count']} " + Fore.GREEN + "subdomains")
print(Fore.GREEN + "Subdomains list: ")
for i, subdomain in enumerate(data['subdomains'], start=1):
subdomain_url = f"http://{subdomain}.{domain}"
try:
response = requests.get(subdomain_url, timeout=5)
if response.status_code == 200:
print(Fore.GREEN + f"{i}. " + Fore.LIGHTCYAN_EX + f"{subdomain_url} " + Fore.GREEN + "is alive")
else:
pass
except Exception:
pass
else:
pass
49 changes: 49 additions & 0 deletions apis/api_virustotal.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import requests
import sqlite3
from colorama import Fore, Style

def check_domain(domain, api_key):
url = "https://www.virustotal.com/vtapi/v2/domain/report"
params = {
'domain': domain,
'apikey': api_key
}

response = requests.get(url, params=params)

if response.status_code == 200:
return response.json()
else:
print(f"Error: {response.status_code}")
return None


def api_virustotal_check(domain):
conn = sqlite3.connect('apis//api_keys.db')
cursor = conn.cursor()
cursor.execute("SELECT api_name, api_key FROM api_keys")
rows = cursor.fetchall()
for row in rows:
api_name, api_key = row
if api_name == 'VirusTotal':
api_key = str(row[1])
print(Fore.GREEN + 'Got VirusTotal API key. Starting VirusTotal scan...\n')

result = check_domain(domain, api_key)

if result:
print(Fore.GREEN + "[VIRUSTOTAL DOMAIN REPORT]")
print(Fore.GREEN + f"Domain: {result.get('domain')}")
print(Fore.GREEN + f"Categories: {result.get('categories')}")
print(Fore.GREEN + f"Detected URLs: {len(result.get('detected_urls', []))}")
print(Fore.GREEN + f"Detected Samples: {len(result.get('detected_samples', []))}")
print(Fore.GREEN + f"Undetected Samples: {len(result.get('undetected_samples', []))}\n")
print(Fore.LIGHTGREEN_EX + "-------------------------------------------------\n" + Style.RESET_ALL)
conn.close()
else:
print(Fore.RED + "Failed to get domain report\n")
print(Fore.LIGHTGREEN_EX + "-------------------------------------------------\n" + Style.RESET_ALL)
conn.close()
pass


40 changes: 20 additions & 20 deletions datagather_modules/crawl_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,25 +115,25 @@ def sm_gather(url):
for link in links:
parsed_url = urlparse(link)
hostname = parsed_url.hostname
if hostname and hostname.endswith('facebook.com'):
if hostname and (hostname == 'facebook.com' or hostname.endswith('.facebook.com')):
categorized_links['Facebook'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('twitter.com'):
elif hostname and (hostname == 'twitter.com' or hostname.endswith('.twitter.com')):
categorized_links['Twitter'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('instagram.com'):
elif hostname and (hostname == 'instagram.com' or hostname.endswith('.instagram.com')):
categorized_links['Instagram'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('t.me'):
elif hostname and (hostname == 't.me' or hostname.endswith('.t.me')):
categorized_links['Telegram'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('tiktok.com'):
elif hostname and (hostname == 'tiktok.com' or hostname.endswith('.tiktok.com')):
categorized_links['TikTok'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('linkedin.com'):
elif hostname and (hostname == 'linkedin.com' or hostname.endswith('.linkedin.com')):
categorized_links['LinkedIn'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('vk.com'):
elif hostname and (hostname == 'vk.com' or hostname.endswith('.vk.com')):
categorized_links['VKontakte'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('youtube.com'):
elif hostname and (hostname == 'youtube.com' or hostname.endswith('.youtube.com')):
categorized_links['YouTube'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('wechat.com'):
elif hostname and (hostname == 'wechat.com' or hostname.endswith('.wechat.com')):
categorized_links['WeChat'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('ok.ru'):
elif hostname and (hostname == 'ok.ru' or hostname.endswith('.ok.ru')):
categorized_links['Odnoklassniki'].append(urllib.parse.unquote(link))

if not categorized_links['Odnoklassniki']:
Expand Down Expand Up @@ -214,25 +214,25 @@ def domains_reverse_research(subdomains, report_file_type):
for inner_list in subdomain_socials_grouped:
for link in inner_list:
hostname = urlparse(link).hostname
if hostname and hostname.endswith('facebook.com'):
if hostname and (hostname == 'facebook.com' or hostname.endswith('.facebook.com')):
sd_socials['Facebook'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('twitter.com'):
elif hostname and (hostname == 'twitter.com' or hostname.endswith('.twitter.com')):
sd_socials['Twitter'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('instagram.com'):
elif hostname and (hostname == 'instagram.com' or hostname.endswith('.instagram.com')):
sd_socials['Instagram'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('t.me'):
elif hostname and (hostname == 't.me' or hostname.endswith('.t.me')):
sd_socials['Telegram'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('tiktok.com'):
elif hostname and (hostname == 'tiktok.com' or hostname.endswith('.tiktok.com')):
sd_socials['TikTok'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('linkedin.com'):
elif hostname and (hostname == 'linkedin.com' or hostname.endswith('.linkedin.com')):
sd_socials['LinkedIn'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('vk.com'):
elif hostname and (hostname == 'vk.com' or hostname.endswith('.vk.com')):
sd_socials['VKontakte'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('youtube.com'):
elif hostname and (hostname == 'youtube.com' or hostname.endswith('.youtube.com')):
sd_socials['YouTube'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('wechat.com'):
elif hostname and (hostname == 'wechat.com' or hostname.endswith('.wechat.com')):
sd_socials['WeChat'].append(urllib.parse.unquote(link))
elif hostname and hostname.endswith('ok.ru'):
elif hostname and (hostname == 'ok.ru' or hostname.endswith('.ok.ru')):
sd_socials['Odnoklassniki'].append(urllib.parse.unquote(link))

sd_socials = {k: list(set(v)) for k, v in sd_socials.items()}
Expand Down
38 changes: 35 additions & 3 deletions datagather_modules/data_assembler.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
import networking_processor as np
from pagesearch_main import normal_search, sitemap_inspection_search
from logs_processing import logging
from api_virustotal import api_virustotal_check
from api_securitytrails import api_securitytrails_check

try:
import requests
Expand Down Expand Up @@ -65,7 +67,7 @@ def report_preprocessing(self, short_domain, report_file_type):
os.makedirs(report_folder, exist_ok=True)
return casename, db_casename, db_creation_date, robots_filepath, sitemap_filepath, sitemap_links_filepath, report_file_type, report_folder, files_ctime, report_ctime

def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, keywords, keywords_flag, dorking_flag):
def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, keywords, keywords_flag, dorking_flag, used_api_flag):
casename, db_casename, db_creation_date, robots_filepath, sitemap_filepath, sitemap_links_filepath, report_file_type, report_folder, ctime, report_ctime = self.report_preprocessing(short_domain, report_file_type)
logging.info(f'### THIS LOG PART FOR {casename} CASE, TIME: {ctime} STARTS HERE')
print(Fore.GREEN + "Started scanning domain" + Style.RESET_ALL)
Expand Down Expand Up @@ -147,6 +149,16 @@ def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, k
dorking_status, dorking_file_path = dp.save_results_to_txt(report_folder, table, dp.get_dorking_query(short_domain, dorking_db_path, table))
print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)

if used_api_flag != ['Empty']:
print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: API SCANNING]\n" + Style.RESET_ALL)
if 1 in used_api_flag:
api_virustotal_check(short_domain)
if 2 in used_api_flag:
api_securitytrails_check(short_domain)
print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: API SCANNING]\n" + Style.RESET_ALL)
else:
pass

data_array = [ip, res, mails, subdomains, subdomains_amount, social_medias, subdomain_mails, sd_socials,
subdomain_ip, issuer, subject, notBefore, notAfter, commonName, serialNumber, mx_records,
robots_txt_result, sitemap_xml_result, sitemap_links_status,
Expand Down Expand Up @@ -180,13 +192,23 @@ def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, k

if dorking_flag == 'none':
dorking_status = 'Google Dorking mode was not selected for this scan'
dorking_file_path = 'Google Dorking mode was not selected for this scan'
dorking_results = ['Google Dorking mode was not selected for this scan']
else:
dorking_db_path, table = establishing_dork_db_connection(dorking_flag.lower())
print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
dorking_status, dorking_file_path = dp.save_results_to_txt(report_folder, table, dp.get_dorking_query(short_domain, dorking_db_path, table))
dorking_status, dorking_results = dp.transfer_results_to_xlsx(table, dp.get_dorking_query(short_domain, dorking_db_path, table))
print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)

if used_api_flag != ['Empty']:
print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: API SCANNING]\n" + Style.RESET_ALL)
if 1 in used_api_flag:
api_virustotal_check(short_domain)
if 2 in used_api_flag:
api_securitytrails_check(short_domain)
print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: API SCANNING]\n" + Style.RESET_ALL)
else:
pass

data_array = [ip, res, mails, subdomains, subdomains_amount, social_medias, subdomain_mails, sd_socials,
subdomain_ip, issuer, subject, notBefore, notAfter, commonName, serialNumber, mx_records,
robots_txt_result, sitemap_xml_result, sitemap_links_status,
Expand Down Expand Up @@ -230,6 +252,16 @@ def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, k
dorking_status, dorking_file_path = dp.save_results_to_txt(report_folder, table, dp.get_dorking_query(short_domain, dorking_db_path, table))
print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)

if used_api_flag != ['Empty']:
print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: API SCANNING]\n" + Style.RESET_ALL)
if 1 in used_api_flag:
api_virustotal_check(short_domain)
if 2 in used_api_flag:
api_securitytrails_check(short_domain)
print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: API SCANNING]\n" + Style.RESET_ALL)
else:
pass

data_array = [ip, res, mails, subdomains, subdomains_amount, social_medias, subdomain_mails, sd_socials,
subdomain_ip, issuer, subject, notBefore, notAfter, commonName, serialNumber, mx_records,
robots_txt_result, sitemap_xml_result, sitemap_links_status,
Expand Down
Loading

0 comments on commit 84ac817

Please sign in to comment.