diff --git a/app.py b/app.py
index 5bc7fec..09bb8a8 100644
--- a/app.py
+++ b/app.py
@@ -8,21 +8,24 @@ import click
from libs.core import Bootstrapper
from libs.task.base_task import BaseTask
+
@click.group(help="Python script for automatically retrieving key information in app.")
def cli():
pass
# 创建Android任务
+
+
@cli.command(help="Get the key information of Android system.")
@click.option("-i", "--inputs", required=True, type=str, help="Please enter the APK file or DEX file to be scanned or the corresponding APK download address.")
@click.option("-r", "--rules", required=False, type=str, default="", help="Please enter a rule for temporary scanning of file contents.")
@click.option("-s", "--sniffer", is_flag=True, default=False, help="Enable the network sniffer function. It is on by default.")
-@click.option("-n", '--no-resource', is_flag=True, default=False,help="Ignore all resource files, including network sniffing. It is not enabled by default.")
-@click.option("-a", '--all',is_flag=True, default=False,help="Output the string content that conforms to the scan rules.It is on by default.")
-@click.option("-t", '--threads',required=False, type=int,default=10,help="Set the number of concurrency. The larger the concurrency, the faster the speed. The default value is 10.")
-@click.option("-o", '--output',required=False, type=str,default=None,help="Specify the result set output directory.")
-@click.option("-p", '--package',required=False,type=str,default="",help="Specifies the package name information that needs to be scanned.")
-def android(inputs: str, rules: str, sniffer: bool, no_resource:bool, all:bool, threads:int, output, package:str) -> None:
+@click.option("-n", '--no-resource', is_flag=True, default=False, help="Ignore all resource files, including network sniffing. It is not enabled by default.")
+@click.option("-a", '--all', is_flag=True, default=False, help="Output the string content that conforms to the scan rules.It is on by default.")
+@click.option("-t", '--threads', required=False, type=int, default=10, help="Set the number of concurrency. The larger the concurrency, the faster the speed. The default value is 10.")
+@click.option("-o", '--output', required=False, type=str, default=None, help="Specify the result set output directory.")
+@click.option("-p", '--package', required=False, type=str, default="", help="Specifies the package name information that needs to be scanned.")
+def android(inputs: str, rules: str, sniffer: bool, no_resource: bool, all: bool, threads: int, output, package: str) -> None:
try:
bootstrapper = Bootstrapper(__file__, output, all, no_resource)
bootstrapper.init()
@@ -31,15 +34,16 @@ def android(inputs: str, rules: str, sniffer: bool, no_resource:bool, all:bool,
except Exception as e:
raise e
+
@cli.command(help="Get the key information of iOS system.")
@click.option("-i", "--inputs", required=True, type=str, help="Please enter IPA file or ELF file to scan or corresponding IPA download address. App store is not supported at present.")
@click.option("-r", "--rules", required=False, type=str, default="", help="Please enter a rule for temporary scanning of file contents.")
@click.option("-s", "--sniffer", is_flag=True, default=False, help="Enable the network sniffer function. It is on by default.")
-@click.option("-n", '--no-resource', is_flag=True, default=False,help="Ignore all resource files, including network sniffing. It is not enabled by default.")
-@click.option("-a", '--all',is_flag=True, default=False,help="Output the string content that conforms to the scan rules.It is on by default.")
-@click.option("-t", '--threads',required=False, type=int,default=10,help="Set the number of concurrency. The larger the concurrency, the faster the speed. The default value is 10.")
-@click.option("-o", '--output',required=False, type=str,default=None,help="Specify the result set output directory.")
-def ios(inputs: str, rules: str, sniffer: bool, no_resource:bool, all:bool, threads:int, output:str) -> None:
+@click.option("-n", '--no-resource', is_flag=True, default=False, help="Ignore all resource files, including network sniffing. It is not enabled by default.")
+@click.option("-a", '--all', is_flag=True, default=False, help="Output the string content that conforms to the scan rules.It is on by default.")
+@click.option("-t", '--threads', required=False, type=int, default=10, help="Set the number of concurrency. The larger the concurrency, the faster the speed. The default value is 10.")
+@click.option("-o", '--output', required=False, type=str, default=None, help="Specify the result set output directory.")
+def ios(inputs: str, rules: str, sniffer: bool, no_resource: bool, all: bool, threads: int, output: str) -> None:
try:
bootstrapper = Bootstrapper(__file__, output, all, no_resource)
bootstrapper.init()
@@ -48,15 +52,16 @@ def ios(inputs: str, rules: str, sniffer: bool, no_resource:bool, all:bool, thre
except Exception as e:
raise e
+
@cli.command(help="Get the key information of Web system.")
@click.option("-i", "--inputs", required=True, type=str, help="Please enter the site directory or site file to scan or the corresponding site download address.")
@click.option("-r", "--rules", required=False, type=str, default="", help="Please enter a rule for temporary scanning of file contents.")
@click.option("-s", "--sniffer", is_flag=True, default=False, help="Enable the network sniffer function. It is on by default.")
-@click.option("-n", '--no-resource', is_flag=True, default=False,help="Ignore all resource files, including network sniffing. It is not enabled by default.")
-@click.option("-a", '--all',is_flag=True, default=False,help="Output the string content that conforms to the scan rules.It is on by default.")
-@click.option("-t", '--threads',required=False, type=int,default=10,help="Set the number of concurrency. The larger the concurrency, the faster the speed. The default value is 10.")
-@click.option("-o", '--output',required=False, type=str,default=None,help="Specify the result set output directory.")
-def web(inputs: str, rules: str, sniffer: bool, no_resource:bool, all:bool, threads:int, output:str) -> None:
+@click.option("-n", '--no-resource', is_flag=True, default=False, help="Ignore all resource files, including network sniffing. It is not enabled by default.")
+@click.option("-a", '--all', is_flag=True, default=False, help="Output the string content that conforms to the scan rules.It is on by default.")
+@click.option("-t", '--threads', required=False, type=int, default=10, help="Set the number of concurrency. The larger the concurrency, the faster the speed. The default value is 10.")
+@click.option("-o", '--output', required=False, type=str, default=None, help="Specify the result set output directory.")
+def web(inputs: str, rules: str, sniffer: bool, no_resource: bool, all: bool, threads: int, output: str) -> None:
try:
bootstrapper = Bootstrapper(__file__, output, all, no_resource)
bootstrapper.init()
@@ -65,9 +70,10 @@ def web(inputs: str, rules: str, sniffer: bool, no_resource:bool, all:bool, thre
except Exception as e:
raise e
+
def main():
cli()
+
if __name__ == "__main__":
main()
-
diff --git a/config.py b/config.py
index ff6ad97..f06ac80 100644
--- a/config.py
+++ b/config.py
@@ -8,7 +8,7 @@
# com.alibaba.fastjson -> fastjson
# com.google.gson -> gson
# com.fasterxml.jackson -> jackson
-# net.sf.json ->
+# net.sf.json ->
# javax.xml.parsers.DocumentBuilder -> dom方式
# javax.xml.parsers.SAXParser -> sax方式
# org.jdom.input.SAXBuilder -> jdom
@@ -28,7 +28,7 @@ filter_components = [
# 1. https://以及http://开头的
# 2. IPv4的ip地址
# 3. URI地址,URI不能很好的拼接所以此处忽略
-filter_strs =[
+filter_strs = [
r'https://.*|http://.*',
# r'.*://([[0-9]{1,3}\.]{3}[0-9]{1,3}).*',
r'.*://([\d{1,3}\.]{3}\d{1,3}).*',
@@ -50,79 +50,80 @@ filter_no = [
r'.*w3school.com.cn',
r'.*apple.com',
r'.*.amap.com',
+ r'.*slf4j.org',
]
# AK集合
filter_ak_map = {
"Aliyun_OSS": [
- r'.*accessKeyId.*".*"',
- r'.*accessKeySecret.*".*"',
- r'.*secret.*".*"'
+ r'.*accessKeyId.*".*?"',
+ r'.*accessKeySecret.*".*?"',
+ r'.*secret.*".*?"'
],
- #"Amazon_AWS_Access_Key_ID": r"([^A-Z0-9]|^)(AKIA|A3T|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{12,}",
- #"Amazon_AWS_S3_Bucket": [
- # r"//s3-[a-z0-9-]+\\.amazonaws\\.com/[a-z0-9._-]+",
- # r"//s3\\.amazonaws\\.com/[a-z0-9._-]+",
- # r"[a-z0-9.-]+\\.s3-[a-z0-9-]\\.amazonaws\\.com",
- # r"[a-z0-9.-]+\\.s3-website[.-](eu|ap|us|ca|sa|cn)",
- # r"[a-z0-9.-]+\\.s3\\.amazonaws\\.com",
- # r"amzn\\.mws\\.[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
- #],
- #"Artifactory_API_Token": r"(?:\\s|=|:|\"|^)AKC[a-zA-Z0-9]{10,}",
- #"Artifactory_Password": r"(?:\\s|=|:|\"|^)AP[\\dABCDEF][a-zA-Z0-9]{8,}",
- # "Authorization_Basic": r"basic\\s[a-zA-Z0-9_\\-:\\.=]+",
- # "Authorization_Bearer": r"bearer\\s[a-zA-Z0-9_\\-:\\.=]+",
- #"AWS_API_Key": r"AKIA[0-9A-Z]{16}",
- #"Basic_Auth_Credentials": r"(?<=:\/\/)[a-zA-Z0-9]+:[a-zA-Z0-9]+@[a-zA-Z0-9]+\\.[a-zA-Z]+",
- #"Cloudinary_Basic_Auth": r"cloudinary:\/\/[0-9]{15}:[0-9A-Za-z]+@[a-z]+",
- #"DEFCON_CTF_Flag": r"O{3}\\{.*\\}",
- #"Discord_BOT_Token": r"((?:N|M|O)[a-zA-Z0-9]{23}\\.[a-zA-Z0-9-_]{6}\\.[a-zA-Z0-9-_]{27})$",
- #"Facebook_Access_Token": r"EAACEdEose0cBA[0-9A-Za-z]+",
- #"Facebook_ClientID": r"[f|F][a|A][c|C][e|E][b|B][o|O][o|O][k|K](.{0,20})?['\"][0-9]{13,17}",
- #"Facebook_OAuth": r"[f|F][a|A][c|C][e|E][b|B][o|O][o|O][k|K].*['|\"][0-9a-f]{32}['|\"]",
- #"Facebook_Secret_Key": r"([f|F][a|A][c|C][e|E][b|B][o|O][o|O][k|K]|[f|F][b|B])(.{0,20})?['\"][0-9a-f]{32}",
- #"Firebase": r"[a-z0-9.-]+\\.firebaseio\\.com",
- #"Generic_API_Key": r"[a|A][p|P][i|I][_]?[k|K][e|E][y|Y].*['|\"][0-9a-zA-Z]{32,45}['|\"]",
- #"Generic_Secret": r"[s|S][e|E][c|C][r|R][e|E][t|T].*['|\"][0-9a-zA-Z]{32,45}['|\"]",
- #"GitHub": r"[g|G][i|I][t|T][h|H][u|U][b|B].*['|\"][0-9a-zA-Z]{35,40}['|\"]",
- #"GitHub_Access_Token": r"([a-zA-Z0-9_-]*:[a-zA-Z0-9_-]+@github.com*)$",
- #"Google_API_Key": r"AIza[0-9A-Za-z\\-_]{35}",
- #"Google_Cloud_Platform_OAuth": r"[0-9]+-[0-9A-Za-z_]{32}\\.apps\\.googleusercontent\\.com",
- #"Google_Cloud_Platform_Service_Account": r"\"type\": \"service_account\"",
- #"Google_OAuth_Access_Token": r"ya29\\.[0-9A-Za-z\\-_]+",
- #"HackerOne_CTF_Flag": r"[h|H]1(?:[c|C][t|T][f|F])?\\{.*\\}",
- #"HackTheBox_CTF_Flag": r"[h|H](?:[a|A][c|C][k|K][t|T][h|H][e|E][b|B][o|O][x|X]|[t|T][b|B])\\{.*\\}$",
- #"Heroku_API_Key": r"[h|H][e|E][r|R][o|O][k|K][u|U].*[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}",
- # "IP_Address": r"(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])",
- #"JSON_Web_Token": r"(?i)^((?=.*[a-z])(?=.*[0-9])(?:[a-z0-9_=]+\\.){2}(?:[a-z0-9_\\-\\+\/=]*))$",
- # "LinkFinder": r"(?:\"|')(((?:[a-zA-Z]{1,10}:\/\/|\/\/)[^\"'\/]{1,}\\.[a-zA-Z]{2,}[^\"']{0,})|((?:\/|\\.\\.\/|\\.\/)[^\"'><,;| *()(%%$^\/\\\\\\[\\]][^\"'><,;|()]{1,})|([a-zA-Z0-9_\\-\/]{1,}\/[a-zA-Z0-9_\\-\/]{1,}\\.(?:[a-zA-Z]{1,4}|action)(?:[\\?|#][^\"|']{0,}|))|([a-zA-Z0-9_\\-\/]{1,}\/[a-zA-Z0-9_\\-\/]{3,}(?:[\\?|#][^\"|']{0,}|))|([a-zA-Z0-9_\\-]{1,}\\.(?:php|asp|aspx|jsp|json|action|html|js|txt|xml)(?:[\\?|#][^\"|']{0,}|)))(?:\"|')",
- #"Mac_Address": r"(([0-9A-Fa-f]{2}[:]){5}[0-9A-Fa-f]{2}|([0-9A-Fa-f]{2}[-]){5}[0-9A-Fa-f]{2}|([0-9A-Fa-f]{4}[\\.]){2}[0-9A-Fa-f]{4})$",
- #"MailChimp_API_Key": r"[0-9a-f]{32}-us[0-9]{1,2}",
- #"Mailgun_API_Key": r"key-[0-9a-zA-Z]{32}",
- #"Mailto": r"(?<=mailto:)[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9.-]+",
- #"Password_in_URL": r"[a-zA-Z]{3,10}://[^/\\s:@]{3,20}:[^/\\s:@]{3,20}@.{1,100}[\"'\\s]",
- #"PayPal_Braintree_Access_Token": r"access_token\\$production\\$[0-9a-z]{16}\\$[0-9a-f]{32}",
- #"PGP_private_key_block": r"-----BEGIN PGP PRIVATE KEY BLOCK-----",
- #"Picatic_API_Key": r"sk_live_[0-9a-z]{32}",
- #"RSA_Private_Key": r"-----BEGIN RSA PRIVATE KEY-----",
- #"Slack_Token": r"(xox[p|b|o|a]-[0-9]{12}-[0-9]{12}-[0-9]{12}-[a-z0-9]{32})",
- #"Slack_Webhook": r"https://hooks.slack.com/services/T[a-zA-Z0-9_]{8}/B[a-zA-Z0-9_]{8}/[a-zA-Z0-9_]{24}",
- #"Square_Access_Token": r"sq0atp-[0-9A-Za-z\\-_]{22}",
- #"Square_OAuth_Secret": r"sq0csp-[0-9A-Za-z\\-_]{43}",
- #"SSH_DSA_Private_Key": r"-----BEGIN DSA PRIVATE KEY-----",
- #"SSH_EC_Private_Key": r"-----BEGIN EC PRIVATE KEY-----",
- #"Stripe_API_Key": r"sk_live_[0-9a-zA-Z]{24}",
- #"Stripe_Restricted_API_Key": r"rk_live_[0-9a-zA-Z]{24}",
- #"TryHackMe_CTF_Flag": r"[t|T](?:[r|R][y|Y][h|H][a|A][c|C][k|K][m|M][e|E]|[h|H][m|M])\\{.*\\}$",
- #"Twilio_API_Key": r"SK[0-9a-fA-F]{32}",
- #"Twitter_Access_Token": r"[t|T][w|W][i|I][t|T][t|T][e|E][r|R].*[1-9][0-9]+-[0-9a-zA-Z]{40}",
- #"Twitter_ClientID": r"[t|T][w|W][i|I][t|T][t|T][e|E][r|R](.{0,20})?['\"][0-9a-z]{18,25}",
- #"Twitter_OAuth": r"[t|T][w|W][i|I][t|T][t|T][e|E][r|R].*['|\"][0-9a-zA-Z]{35,44}['|\"]",
- #"Twitter_Secret_Key": r"[t|T][w|W][i|I][t|T][t|T][e|E][r|R](.{0,20})?['\"][0-9a-z]{35,44}"
+ # "Amazon_AWS_Access_Key_ID": r"([^A-Z0-9]|^)(AKIA|A3T|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{12,}",
+ # "Amazon_AWS_S3_Bucket": [
+ # r"//s3-[a-z0-9-]+\\.amazonaws\\.com/[a-z0-9._-]+",
+ # r"//s3\\.amazonaws\\.com/[a-z0-9._-]+",
+ # r"[a-z0-9.-]+\\.s3-[a-z0-9-]\\.amazonaws\\.com",
+ # r"[a-z0-9.-]+\\.s3-website[.-](eu|ap|us|ca|sa|cn)",
+ # r"[a-z0-9.-]+\\.s3\\.amazonaws\\.com",
+ # r"amzn\\.mws\\.[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
+ # ],
+ # "Artifactory_API_Token": r"(?:\\s|=|:|\"|^)AKC[a-zA-Z0-9]{10,}",
+ # "Artifactory_Password": r"(?:\\s|=|:|\"|^)AP[\\dABCDEF][a-zA-Z0-9]{8,}",
+ # "Authorization_Basic": r"basic\\s[a-zA-Z0-9_\\-:\\.=]+",
+ # "Authorization_Bearer": r"bearer\\s[a-zA-Z0-9_\\-:\\.=]+",
+ # "AWS_API_Key": r"AKIA[0-9A-Z]{16}",
+ # "Basic_Auth_Credentials": r"(?<=:\/\/)[a-zA-Z0-9]+:[a-zA-Z0-9]+@[a-zA-Z0-9]+\\.[a-zA-Z]+",
+ # "Cloudinary_Basic_Auth": r"cloudinary:\/\/[0-9]{15}:[0-9A-Za-z]+@[a-z]+",
+ # "DEFCON_CTF_Flag": r"O{3}\\{.*\\}",
+ # "Discord_BOT_Token": r"((?:N|M|O)[a-zA-Z0-9]{23}\\.[a-zA-Z0-9-_]{6}\\.[a-zA-Z0-9-_]{27})$",
+ # "Facebook_Access_Token": r"EAACEdEose0cBA[0-9A-Za-z]+",
+ # "Facebook_ClientID": r"[f|F][a|A][c|C][e|E][b|B][o|O][o|O][k|K](.{0,20})?['\"][0-9]{13,17}",
+ # "Facebook_OAuth": r"[f|F][a|A][c|C][e|E][b|B][o|O][o|O][k|K].*['|\"][0-9a-f]{32}['|\"]",
+ # "Facebook_Secret_Key": r"([f|F][a|A][c|C][e|E][b|B][o|O][o|O][k|K]|[f|F][b|B])(.{0,20})?['\"][0-9a-f]{32}",
+ # "Firebase": r"[a-z0-9.-]+\\.firebaseio\\.com",
+ # "Generic_API_Key": r"[a|A][p|P][i|I][_]?[k|K][e|E][y|Y].*['|\"][0-9a-zA-Z]{32,45}['|\"]",
+ # "Generic_Secret": r"[s|S][e|E][c|C][r|R][e|E][t|T].*['|\"][0-9a-zA-Z]{32,45}['|\"]",
+ # "GitHub": r"[g|G][i|I][t|T][h|H][u|U][b|B].*['|\"][0-9a-zA-Z]{35,40}['|\"]",
+ # "GitHub_Access_Token": r"([a-zA-Z0-9_-]*:[a-zA-Z0-9_-]+@github.com*)$",
+ # "Google_API_Key": r"AIza[0-9A-Za-z\\-_]{35}",
+ # "Google_Cloud_Platform_OAuth": r"[0-9]+-[0-9A-Za-z_]{32}\\.apps\\.googleusercontent\\.com",
+ # "Google_Cloud_Platform_Service_Account": r"\"type\": \"service_account\"",
+ # "Google_OAuth_Access_Token": r"ya29\\.[0-9A-Za-z\\-_]+",
+ # "HackerOne_CTF_Flag": r"[h|H]1(?:[c|C][t|T][f|F])?\\{.*\\}",
+ # "HackTheBox_CTF_Flag": r"[h|H](?:[a|A][c|C][k|K][t|T][h|H][e|E][b|B][o|O][x|X]|[t|T][b|B])\\{.*\\}$",
+ # "Heroku_API_Key": r"[h|H][e|E][r|R][o|O][k|K][u|U].*[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}",
+ # "IP_Address": r"(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])",
+ # "JSON_Web_Token": r"(?i)^((?=.*[a-z])(?=.*[0-9])(?:[a-z0-9_=]+\\.){2}(?:[a-z0-9_\\-\\+\/=]*))$",
+ # "LinkFinder": r"(?:\"|')(((?:[a-zA-Z]{1,10}:\/\/|\/\/)[^\"'\/]{1,}\\.[a-zA-Z]{2,}[^\"']{0,})|((?:\/|\\.\\.\/|\\.\/)[^\"'><,;| *()(%%$^\/\\\\\\[\\]][^\"'><,;|()]{1,})|([a-zA-Z0-9_\\-\/]{1,}\/[a-zA-Z0-9_\\-\/]{1,}\\.(?:[a-zA-Z]{1,4}|action)(?:[\\?|#][^\"|']{0,}|))|([a-zA-Z0-9_\\-\/]{1,}\/[a-zA-Z0-9_\\-\/]{3,}(?:[\\?|#][^\"|']{0,}|))|([a-zA-Z0-9_\\-]{1,}\\.(?:php|asp|aspx|jsp|json|action|html|js|txt|xml)(?:[\\?|#][^\"|']{0,}|)))(?:\"|')",
+ # "Mac_Address": r"(([0-9A-Fa-f]{2}[:]){5}[0-9A-Fa-f]{2}|([0-9A-Fa-f]{2}[-]){5}[0-9A-Fa-f]{2}|([0-9A-Fa-f]{4}[\\.]){2}[0-9A-Fa-f]{4})$",
+ # "MailChimp_API_Key": r"[0-9a-f]{32}-us[0-9]{1,2}",
+ # "Mailgun_API_Key": r"key-[0-9a-zA-Z]{32}",
+ # "Mailto": r"(?<=mailto:)[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9.-]+",
+ # "Password_in_URL": r"[a-zA-Z]{3,10}://[^/\\s:@]{3,20}:[^/\\s:@]{3,20}@.{1,100}[\"'\\s]",
+ # "PayPal_Braintree_Access_Token": r"access_token\\$production\\$[0-9a-z]{16}\\$[0-9a-f]{32}",
+ # "PGP_private_key_block": r"-----BEGIN PGP PRIVATE KEY BLOCK-----",
+ # "Picatic_API_Key": r"sk_live_[0-9a-z]{32}",
+ # "RSA_Private_Key": r"-----BEGIN RSA PRIVATE KEY-----",
+ # "Slack_Token": r"(xox[p|b|o|a]-[0-9]{12}-[0-9]{12}-[0-9]{12}-[a-z0-9]{32})",
+ # "Slack_Webhook": r"https://hooks.slack.com/services/T[a-zA-Z0-9_]{8}/B[a-zA-Z0-9_]{8}/[a-zA-Z0-9_]{24}",
+ # "Square_Access_Token": r"sq0atp-[0-9A-Za-z\\-_]{22}",
+ # "Square_OAuth_Secret": r"sq0csp-[0-9A-Za-z\\-_]{43}",
+ # "SSH_DSA_Private_Key": r"-----BEGIN DSA PRIVATE KEY-----",
+ # "SSH_EC_Private_Key": r"-----BEGIN EC PRIVATE KEY-----",
+ # "Stripe_API_Key": r"sk_live_[0-9a-zA-Z]{24}",
+ # "Stripe_Restricted_API_Key": r"rk_live_[0-9a-zA-Z]{24}",
+ # "TryHackMe_CTF_Flag": r"[t|T](?:[r|R][y|Y][h|H][a|A][c|C][k|K][m|M][e|E]|[h|H][m|M])\\{.*\\}$",
+ # "Twilio_API_Key": r"SK[0-9a-fA-F]{32}",
+ # "Twitter_Access_Token": r"[t|T][w|W][i|I][t|T][t|T][e|E][r|R].*[1-9][0-9]+-[0-9a-zA-Z]{40}",
+ # "Twitter_ClientID": r"[t|T][w|W][i|I][t|T][t|T][e|E][r|R](.{0,20})?['\"][0-9a-z]{18,25}",
+ # "Twitter_OAuth": r"[t|T][w|W][i|I][t|T][t|T][e|E][r|R].*['|\"][0-9a-zA-Z]{35,44}['|\"]",
+ # "Twitter_Secret_Key": r"[t|T][w|W][i|I][t|T][t|T][e|E][r|R](.{0,20})?['\"][0-9a-z]{35,44}"
}
# 此处配置壳信息
-shell_list =[
+shell_list = [
'com.stub.StubApp',
's.h.e.l.l.S',
'com.Kiwisec.KiwiSecApplication',
@@ -150,7 +151,7 @@ apk_permissions = [
]
# 此处配置需要扫描的web文件后缀
-web_file_suffix =[
+web_file_suffix = [
"html",
"js",
"xml",
@@ -163,7 +164,7 @@ web_file_suffix =[
]
# 配置需要忽略网络嗅探的文件后缀名,此处根据具体需求进行配置,默认为不过滤
-sniffer_filter=[
+sniffer_filter = [
"jpg",
"png",
"jpeg",
@@ -172,8 +173,8 @@ sniffer_filter=[
# 配置自动下载Apk文件或者缓存HTML的请求头信息
headers = {
- "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0",
- "Connection":"close"
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0",
+ "Connection": "close"
}
# 配置自动下载Apk文件或者缓存HTML的请求体信息
@@ -183,4 +184,3 @@ data = {
# 配置自动下载Apk文件或者缓存HTML的请求方法信息,目前仅支持GET和POST
method = "GET"
-
diff --git a/libs/core/__init__.py b/libs/core/__init__.py
index 231ee88..febe6b3 100644
--- a/libs/core/__init__.py
+++ b/libs/core/__init__.py
@@ -26,7 +26,7 @@ output_path = ""
download_flag = False
# excel 起始行号
-excel_row = 0
+excel_row = 1
class Bootstrapper(object):
@@ -78,7 +78,7 @@ class Bootstrapper(object):
apktool_path = os.path.join(tools_dir, "apktool.jar")
download_path = os.path.join(out_dir,"download")
txt_result_path = os.path.join(out_dir,"result_"+str(create_time)+".txt")
- xls_result_path = os.path.join(out_dir,"result_"+str(create_time)+".xls")
+ xls_result_path = os.path.join(out_dir,"result_"+str(create_time)+".xlsx")
app_history_path = os.path.join(history_path,"app_history.txt")
domain_history_path = os.path.join(history_path,"domain_history.txt")
@@ -100,7 +100,6 @@ class Bootstrapper(object):
print("[*] Create directory {}".format(output_path))
if not os.path.exists(download_path):
- # shutil.rmtree(download_path)
os.makedirs(download_path)
print("[*] Create directory {}".format(download_path))
diff --git a/libs/core/download.py b/libs/core/download.py
index 9c61d1d..cd8ded1 100644
--- a/libs/core/download.py
+++ b/libs/core/download.py
@@ -2,22 +2,20 @@
# -*- coding: utf-8 -*-
# Author: kelvinBen
# Github: https://github.com/kelvinBen/AppInfoScanner
-import re
-import os
import sys
-import time
import config
import requests
import threading
import libs.core as cores
from requests.packages import urllib3
-from requests.adapters import HTTPAdapter
+from requests.adapters import HTTPAdapter
-class DownloadThreads(threading.Thread):
- def __init__(self,input_path,file_name,cache_path,types):
- threading.Thread.__init__(self)
- self.url = input_path
+class DownloadThreads(threading.Thread):
+
+ def __init__(self, input_path, file_name, cache_path, types):
+ threading.Thread.__init__(self)
+ self.url = input_path
self.types = types
self.cache_path = cache_path
self.file_name = file_name
@@ -27,23 +25,24 @@ class DownloadThreads(threading.Thread):
session = requests.Session()
session.mount('http://', HTTPAdapter(max_retries=3))
session.mount('https://', HTTPAdapter(max_retries=3))
- session.keep_alive =False
+ session.keep_alive = False
session.adapters.DEFAULT_RETRIES = 5
urllib3.disable_warnings()
if config.method.upper() == "POST":
- resp = session.post(url=self.url,params=config.data ,headers=config.headers,timeout=30)
+ resp = session.post(
+ url=self.url, params=config.data, headers=config.headers, timeout=30)
else:
- resp = session.get(url=self.url,data=config.data ,headers=config.headers,timeout=30)
-
+ resp = session.get(url=self.url, data=config.data,
+ headers=config.headers, timeout=30)
+
if resp.status_code == requests.codes.ok:
if self.types == "Android" or self.types == "iOS":
count = 0
progress_tmp = 0
- time1 = time.time()
length = float(resp.headers['content-length'])
with open(self.cache_path, "wb") as f:
- for chunk in resp.iter_content(chunk_size = 512):
+ for chunk in resp.iter_content(chunk_size=512):
if chunk:
f.write(chunk)
count += len(chunk)
@@ -51,19 +50,19 @@ class DownloadThreads(threading.Thread):
if progress != progress_tmp:
progress_tmp = progress
print("\r", end="")
- print("[*] Download progress: {}%: ".format(progress), "▋" * (progress // 2), end="")
+ print(
+ "[*] Download progress: {}%: ".format(progress), "▋" * (progress // 2), end="")
sys.stdout.flush()
f.close()
else:
html = resp.text
- with open(self.cache_path,"w",encoding='utf-8',errors='ignore') as f:
+ with open(self.cache_path, "w", encoding='utf-8', errors='ignore') as f:
f.write(html)
f.close()
cores.download_flag = True
except Exception as e:
raise Exception(e)
- return
def run(self):
threadLock = threading.Lock()
- self.__requset__()
\ No newline at end of file
+ self.__requset__()
diff --git a/libs/core/net.py b/libs/core/net.py
index 434515a..52b3a2d 100644
--- a/libs/core/net.py
+++ b/libs/core/net.py
@@ -9,21 +9,21 @@ import requests
import libs.core as cores
class NetThreads(threading.Thread):
-
- def __init__(self,threadID,name,domain_queue,worksheet):
- threading.Thread.__init__(self)
+
+ def __init__(self, threadID, name, domain_queue, worksheet):
+ threading.Thread.__init__(self)
self.name = name
self.threadID = threadID
- self.lock = threading.Lock()
- self.domain_queue = domain_queue
+ self.lock = threading.Lock()
+ self.domain_queue = domain_queue
self.worksheet = worksheet
- def __get_Http_info__(self,threadLock):
+ def __get_Http_info__(self, threadLock):
while True:
if self.domain_queue.empty():
break
domains = self.domain_queue.get(timeout=5)
- domain = domains["domain"]
+ domain = domains["domain"]
url_ip = domains["url_ip"]
time.sleep(2)
result = self.__get_request_result__(url_ip)
@@ -31,23 +31,33 @@ class NetThreads(threading.Thread):
if result != "error":
if self.lock.acquire(True):
cores.excel_row = cores.excel_row + 1
- self.worksheet.write(cores.excel_row, 0, label = cores.excel_row)
- self.worksheet.write(cores.excel_row, 1, label = url_ip)
- self.worksheet.write(cores.excel_row, 2, label = domain)
+ self.worksheet.cell(row=cores.excel_row,
+ column=1, value=cores.excel_row-1)
+ self.worksheet.cell(row=cores.excel_row,
+ column=2, value=url_ip)
+ self.worksheet.cell(row=cores.excel_row,
+ column=3, value=domain)
+
if result != "timeout":
- self.worksheet.write(cores.excel_row, 3, label = result["status"])
- self.worksheet.write(cores.excel_row, 4, label = result["des_ip"])
- self.worksheet.write(cores.excel_row, 5, label = result["server"])
- self.worksheet.write(cores.excel_row, 6, label = result["title"])
- self.worksheet.write(cores.excel_row, 7, label = result["cdn"])
- # self.worksheet.write(cores.excel_row, 8, label = "")
+ self.worksheet.cell(
+ row=cores.excel_row, column=4, value=result["status"])
+ self.worksheet.cell(
+ row=cores.excel_row, column=5, value=result["des_ip"])
+ self.worksheet.cell(
+ row=cores.excel_row, column=6, value=result["server"])
+ self.worksheet.cell(
+ row=cores.excel_row, column=7, value=result["title"])
+ self.worksheet.cell(
+ row=cores.excel_row, column=8, value=result["cdn"])
+
self.lock.release()
-
- def __get_request_result__(self,url):
- result={"status":"","server":"","cookie":"","cdn":"","des_ip":"","sou_ip":"","title":""}
+
+ def __get_request_result__(self, url):
+ result = {"status": "", "server": "", "cookie": "",
+ "cdn": "", "des_ip": "", "sou_ip": "", "title": ""}
cdn = ""
try:
- with requests.get(url, timeout=5,stream=True) as rsp:
+ with requests.get(url, timeout=5, stream=True) as rsp:
status_code = rsp.status_code
result["status"] = status_code
headers = rsp.headers
@@ -59,21 +69,21 @@ class NetThreads(threading.Thread):
cdn = cdn + headers['X-Via']
if "Via" in headers:
cdn = cdn + headers['Via']
- result["cdn"] = cdn
+ result["cdn"] = cdn
sock = rsp.raw._connection.sock
-
+
if sock:
des_ip = sock.getpeername()[0]
sou_ip = sock.getsockname()[0]
if des_ip:
- result["des_ip"] = des_ip
+ result["des_ip"] = des_ip
if sou_ip:
- result["sou_ip"] = sou_ip
+ result["sou_ip"] = sou_ip
sock.close()
html = rsp.text
- title = re.findall('
(.+)',html)
+ title = re.findall('(.+)', html)
if title:
- result["title"] = title[0]
+ result["title"] = title[0]
rsp.close()
return result
except requests.exceptions.InvalidURL as e:
diff --git a/libs/core/parses.py b/libs/core/parses.py
index 3bfcbdd..f6f1f0b 100644
--- a/libs/core/parses.py
+++ b/libs/core/parses.py
@@ -11,76 +11,81 @@ import libs.core as cores
class ParsesThreads(threading.Thread):
- def __init__(self,threadID,name,file_queue,result_dict,types):
- threading.Thread.__init__(self)
+ def __init__(self, threadID, name, file_queue, result_dict, types):
+ threading.Thread.__init__(self)
self.file_queue = file_queue
self.name = name
self.threadID = threadID
self.result_list = []
- self.result_dict=result_dict
+ self.result_dict = result_dict
self.types = types
-
+
def __regular_parse__(self):
while True:
if self.file_queue.empty():
break
-
- file_path = self.file_queue.get(timeout = 5)
+
+ file_path = self.file_queue.get(timeout=5)
scan_str = ("[+] Scan file : %s" % file_path)
if self.types == "iOS":
self.__get_string_by_iOS__(file_path)
else:
self.__get_string_by_file__(file_path)
-
+
result_set = set(self.result_list)
if len(result_set) != 0:
self.result_dict[file_path] = result_set
- def __get_string_by_iOS__(self,file_path):
+ def __get_string_by_iOS__(self, file_path):
output_path = cores.output_path
strings_path = cores.strings_path
- temp = os.path.join(output_path,"temp.txt")
- cmd_str = ('"%s" "%s" > "%s"') % (str(strings_path),str(file_path),str(temp))
+ temp = os.path.join(output_path, "temp.txt")
+ cmd_str = ('"%s" "%s" > "%s"') % (
+ str(strings_path), str(file_path), str(temp))
if os.system(cmd_str) == 0:
- with open(temp,"r",encoding='utf-8',errors='ignore') as f:
+ with open(temp, "r", encoding='utf-8', errors='ignore') as f:
lines = f.readlines()
for line in lines:
self.__parse_string__(line)
- def __get_string_by_file__(self,file_path):
- with open(file_path,"r",encoding="utf8",errors='ignore') as f :
- file_content = f.read()
+ def __get_string_by_file__(self, file_path):
+ with open(file_path, "r", encoding="utf8", errors='ignore') as f:
+ file_content = f.read()
# 获取到所有的字符串
- pattern = re.compile(r'\"(.*?)\"')
+ pattern = re.compile(r'\"(.*?)\"')
results = pattern.findall(file_content)
# 搜素AK和SK信息,由于iOS的逻辑处理效率过慢暂时忽略对iOS的AK检测
if not (".js" == file_path[-3:] and self.types == "iOS"):
- for key,values in config.filter_ak_map.items():
- if isinstance(values,list):
- for value in values:
- self.__ak_and_sk__(key, value, file_content)
- else:
- self.__ak_and_sk__(key, values, file_content)
+ # 未包含相关字段不进行ak或者sk信息采集
+ if "access" in file_content or "secret" in file_content:
+ for key, values in config.filter_ak_map.items():
+ if isinstance(values, list):
+ for value in values:
+ self.__ak_and_sk__(key, value, file_content)
+ else:
+ self.__ak_and_sk__(key, values, file_content)
# 遍历所有的字符串
- for result in set(results):
- self.__parse_string__(result)
-
- def __ak_and_sk__(self, name, ak_rule,content):
+ for result in set(results):
+ if ("http://" == result) or ("https://" == result) or result.startswith("https://.") or result.startswith("http://.") :
+ continue
+ self.__parse_string__(result)
+
+ def __ak_and_sk__(self, name, ak_rule, content):
akAndSkList = re.compile(ak_rule).findall(content)
for akAndSk in akAndSkList:
- ak = ("[%s]-->:%s") % (name,akAndSk.strip())
+ ak = ("[%s]-->:%s") % (name, akAndSk.strip())
self.result_list.append(ak)
print(("[+] [%s] AK or SK in %s:") % (name, akAndSk.strip()))
- def __parse_string__(self,result):
+ def __parse_string__(self, result):
# 通过正则筛选需要过滤的字符串
for filter_str in config.filter_strs:
- filter_str_pat = re.compile(filter_str)
+ filter_str_pat = re.compile(filter_str)
filter_resl = filter_str_pat.findall(result)
# 过滤掉未搜索到的内容
- if len(filter_resl)!=0:
+ if len(filter_resl) != 0:
# 提取第一个结果
resl_str = filter_resl[0]
# 过滤
@@ -89,29 +94,32 @@ class ParsesThreads(threading.Thread):
self.threadLock.acquire()
if cores.all_flag:
- print(("[+] The string searched for matching rule is: %s") % (resl_str))
+ print(
+ ("[+] The string searched for matching rule is: %s") % (resl_str))
self.result_list.append(resl_str)
self.threadLock.release()
continue
- def __filter__(self,resl_str):
- return_flag = 1
- resl_str = resl_str.replace("\r","").replace("\n","").replace(" ","")
-
+ def __filter__(self, resl_str):
+ return_flag = 1
+ resl_str = resl_str.replace("\r", "").replace(
+ "\n", "").replace(" ", "")
+
if len(resl_str) == 0:
return 0
-
+
for filte in set(config.filter_no):
- resl_str = resl_str.replace(filte,"")
+ resl_str = resl_str.replace(filte, "")
if len(resl_str) == 0:
- return_flag = 0
+ return_flag = 0
continue
-
- if re.match(filte,resl_str):
- return_flag = 0
+
+ if re.match(filte, resl_str):
+ return_flag = 0
continue
- return return_flag
+
+ return return_flag
def run(self):
self.threadLock = threading.Lock()
- self.__regular_parse__()
\ No newline at end of file
+ self.__regular_parse__()
diff --git a/libs/task/android_task.py b/libs/task/android_task.py
index 319e3cf..8a03a0e 100644
--- a/libs/task/android_task.py
+++ b/libs/task/android_task.py
@@ -12,141 +12,142 @@ import libs.core as cores
class AndroidTask(object):
- def __init__(self,path,package):
+ def __init__(self, path, package):
self.path = path
self.package = package
self.file_queue = Queue()
- self.shell_flag=False
- self.packagename=""
- self.comp_list=[]
- self.file_identifier=[]
+ self.shell_flag = False
+ self.packagename = ""
+ self.comp_list = []
+ self.file_identifier = []
self.permissions = []
def start(self):
# 检查java环境是否存在
- if os.system("java -version") !=0 :
+ if os.system("java -version") != 0:
raise Exception("Please install the Java environment!")
-
+
input_file_path = self.path
-
+
if os.path.isdir(input_file_path):
self.__decode_dir__(input_file_path)
else:
- if self.__decode_file__(input_file_path) == "error":
- raise Exception("Retrieval of this file type is not supported. Select APK file or DEX file.")
-
- return {"comp_list":self.comp_list,"shell_flag":self.shell_flag,"file_queue":self.file_queue,"packagename":self.packagename,"file_identifier":self.file_identifier,"permissions":self.permissions}
+ if self.__decode_file__(input_file_path) == "error":
+ raise Exception(
+ "Retrieval of this file type is not supported. Select APK file or DEX file.")
- def __decode_file__(self,file_path):
+ return {"comp_list": self.comp_list, "shell_flag": self.shell_flag, "file_queue": self.file_queue, "packagename": self.packagename, "file_identifier": self.file_identifier, "permissions": self.permissions}
+
+ def __decode_file__(self, file_path):
apktool_path = str(cores.apktool_path)
backsmali_path = str(cores.backsmali_path)
base_out_path = str(cores.output_path)
- filename = os.path.basename(file_path)
+ filename = os.path.basename(file_path)
suffix_name = filename.split(".")[-1]
- if suffix_name == "apk":
+ if suffix_name == "apk" or suffix_name == "hpk":
name = filename.split(".")[0]
- output_path = os.path.join(base_out_path,name)
- self.__decode_apk__(file_path,apktool_path,output_path)
+ output_path = os.path.join(base_out_path, name)
+ self.__decode_apk__(file_path, apktool_path, output_path)
elif suffix_name == "dex":
- f = open(file_path,'rb')
+ f = open(file_path, 'rb')
md5_obj = hashlib.md5()
while True:
- r = f.read(1024)
+ r = f.read(1024)
if not r:
break
md5_obj.update(r)
dex_md5 = md5_obj.hexdigest().lower()
self.file_identifier.append(dex_md5)
- output_path = os.path.join(base_out_path,dex_md5)
+ output_path = os.path.join(base_out_path, dex_md5)
if not os.path.exists(output_path):
os.makedirs(output_path)
- self.__decode_dex__(file_path,backsmali_path,output_path)
+ self.__decode_dex__(file_path, backsmali_path, output_path)
else:
return "error"
-
- def __decode_dir__(self,root_dir):
+
+ def __decode_dir__(self, root_dir):
dir_or_files = os.listdir(root_dir)
for dir_or_file in dir_or_files:
- dir_or_file_path = os.path.join(root_dir,dir_or_file)
+ dir_or_file_path = os.path.join(root_dir, dir_or_file)
if os.path.isdir(dir_or_file_path):
self.__decode_dir__(dir_or_file_path)
- else:
+ else:
if self.__decode_file__(dir_or_file_path) == "error":
continue
# 分解apk
- def __decode_apk__(self,file_path,apktool_path,output_path):
- cmd_str = ('java -jar "%s" d -f "%s" -o "%s" --only-main-classe') % (str(apktool_path),str(file_path),str(output_path))
+ def __decode_apk__(self, file_path, apktool_path, output_path):
+ cmd_str = ('java -jar "%s" d -f "%s" -o "%s" --only-main-classe') % (
+ str(apktool_path), str(file_path), str(output_path))
if os.system(cmd_str) == 0:
self.__shell_test__(output_path)
self.__scanner_file_by_apktool__(output_path)
else:
print("[-] Decompilation failed, please submit error information at https://github.com/kelvinBen/AppInfoScanner/issues")
raise Exception(file_path + ", Decompilation failed.")
-
# 分解dex
- def __decode_dex__(self,file_path,backsmali_path,output_path):
- cmd_str = ('java -jar "%s" d "%s"') % (str(backsmali_path),str(file_path))
+ def __decode_dex__(self, file_path, backsmali_path, output_path):
+ cmd_str = ('java -jar "%s" d "%s"') % (str(backsmali_path),
+ str(file_path))
if os.system(cmd_str) == 0:
self.__get_scanner_file__(output_path)
else:
print("[-] Decompilation failed, please submit error information at https://github.com/kelvinBen/AppInfoScanner/issues")
raise Exception(file_path + ", Decompilation failed.")
-
# 初始化检测文件信息
- def __scanner_file_by_apktool__(self,output_path):
+ def __scanner_file_by_apktool__(self, output_path):
file_names = os.listdir(output_path)
for file_name in file_names:
- file_path = os.path.join(output_path,file_name)
+ file_path = os.path.join(output_path, file_name)
if not os.path.isdir(file_path):
continue
if "smali" in file_name or "assets" in file_name:
- scanner_file_suffixs = ["smali","js","xml"]
+ scanner_file_suffixs = ["smali", "js", "xml"]
if cores.resource_flag:
- scanner_file_suffixs =["smali"]
- self.__get_scanner_file__(file_path,scanner_file_suffixs)
+ scanner_file_suffixs = ["smali"]
+ self.__get_scanner_file__(file_path, scanner_file_suffixs)
- def __get_scanner_file__(self,scanner_dir,scanner_file_suffixs=["smali"]):
+ def __get_scanner_file__(self, scanner_dir, scanner_file_suffixs=["smali"]):
dir_or_files = os.listdir(scanner_dir)
for dir_or_file in dir_or_files:
- dir_file_path = os.path.join(scanner_dir,dir_or_file)
-
+ dir_file_path = os.path.join(scanner_dir, dir_or_file)
+
if os.path.isdir(dir_file_path):
- self.__get_scanner_file__(dir_file_path,scanner_file_suffixs)
+ self.__get_scanner_file__(dir_file_path, scanner_file_suffixs)
else:
if ("." not in dir_or_file) or (len(dir_or_file.split(".")) < 1) or (dir_or_file.split(".")[-1] not in scanner_file_suffixs):
continue
self.file_queue.put(dir_file_path)
for component in config.filter_components:
- comp = component.replace(".","/")
+ comp = component.replace(".", "/")
if(comp in dir_file_path):
if(component not in self.comp_list):
self.comp_list.append(component)
- def __shell_test__(self,output):
- am_path = os.path.join(output,"AndroidManifest.xml")
-
- with open(am_path,"r",encoding='utf-8',errors='ignore') as f:
+ def __shell_test__(self, output):
+ am_path = os.path.join(output, "AndroidManifest.xml")
+
+ with open(am_path, "r", encoding='utf-8', errors='ignore') as f:
am_str = f.read()
- am_package= re.compile(r'=1:
+ if len(apackage) >= 1:
self.packagename = apackage[0]
self.file_identifier.append(apackage[0])
- am_name = re.compile(r'')
+ am_name = re.compile(r'')
aname = am_name.findall(am_str)
- if aname and len(aname)>=1:
+ if aname and len(aname) >= 1:
if aname[0] in config.shell_list:
self.shell_flag = True
-
+
am_permission = re.compile(r'')
ampermissions = am_permission.findall(am_str)
for ampermission in ampermissions:
if ampermission in config.apk_permissions:
- self.permissions.append(ampermission)
\ No newline at end of file
+ self.permissions.append(ampermission)
diff --git a/libs/task/base_task.py b/libs/task/base_task.py
index 4ff3f08..108c298 100644
--- a/libs/task/base_task.py
+++ b/libs/task/base_task.py
@@ -3,9 +3,7 @@
# Author: kelvinBen
# Github: https://github.com/kelvinBen/AppInfoScanner
import os
-import re
import config
-import threading
from queue import Queue
import libs.core as cores
from libs.task.ios_task import iOSTask
@@ -15,12 +13,14 @@ from libs.core.parses import ParsesThreads
from libs.task.android_task import AndroidTask
from libs.task.download_task import DownloadTask
+
class BaseTask(object):
- thread_list =[]
+ thread_list = []
result_dict = {}
- app_history_list=[]
- domain_history_list=[]
+ app_history_list = []
+ domain_history_list = []
# 统一初始化入口
+
def __init__(self, types="Android", inputs="", rules="", sniffer=True, threads=10, package=""):
self.types = types
self.path = inputs
@@ -30,17 +30,18 @@ class BaseTask(object):
self.threads = threads
self.package = package
self.file_queue = Queue()
-
-
+
# 统一调度平台
+
def start(self):
-
+
print("[*] AI is analyzing filtering rules......")
# 获取历史记录
self.__history_handle__()
- print("[*] The filtering rules obtained by AI are as follows: %s" % (set(config.filter_no)) )
+ print("[*] The filtering rules obtained by AI are as follows: %s" %
+ (set(config.filter_no)))
# 任务控制中心
task_info = self.__tast_control__()
@@ -53,37 +54,39 @@ class BaseTask(object):
packagename = task_info["packagename"]
file_identifier = task_info["file_identifier"]
permissions = task_info["permissions"]
-
+
if shell_flag:
print('[-] \033[3;31m Error: This application has shell, the retrieval results may not be accurate, Please remove the shell and try again!')
return
# 线程控制中心
- print("[*] ========= Searching for strings that match the rules ===============")
+ print(
+ "[*] ========= Searching for strings that match the rules ===============")
self.__threads_control__(file_queue)
# 等待线程结束
for thread in self.thread_list:
thread.join()
-
- # 结果输出中心
- self.__print_control__(packagename,comp_list,file_identifier,permissions)
+ # 结果输出中心
+ self.__print_control__(packagename, comp_list,
+ file_identifier, permissions)
def __tast_control__(self):
task_info = {}
# 自动根据文件后缀名称进行修正
- cache_info = DownloadTask().start(self.path,self.types)
+ cache_info = DownloadTask().start(self.path, self.types)
cacar_path = cache_info["path"]
types = cache_info["type"]
-
+
if (not os.path.exists(cacar_path) and cores.download_flag):
- print("[-] File download failed! Please download the file manually and try again.")
+ print(
+ "[-] File download failed! Please download the file manually and try again.")
return task_info
# 调用Android 相关处理逻辑
if types == "Android":
- task_info = AndroidTask(cacar_path,self.package).start()
+ task_info = AndroidTask(cacar_path, self.package).start()
# 调用iOS 相关处理逻辑
elif types == "iOS":
task_info = iOSTask(cacar_path).start()
@@ -92,23 +95,26 @@ class BaseTask(object):
task_info = WebTask(cacar_path).start()
return task_info
- def __threads_control__(self,file_queue):
- for threadID in range(1,self.threads):
+ def __threads_control__(self, file_queue):
+ for threadID in range(1, self.threads):
name = "Thread - " + str(int(threadID))
- thread = ParsesThreads(threadID,name,file_queue,self.result_dict,self.types)
+ thread = ParsesThreads(
+ threadID, name, file_queue, self.result_dict, self.types)
thread.start()
self.thread_list.append(thread)
- def __print_control__(self,packagename,comp_list,file_identifier,permissions):
+ def __print_control__(self, packagename, comp_list, file_identifier, permissions):
txt_result_path = cores.txt_result_path
xls_result_path = cores.xls_result_path
all_flag = cores.all_flag
-
+
if self.sniffer:
- print("[*] ========= Sniffing the URL address of the search ===============")
- NetTask(self.result_dict,self.app_history_list,self.domain_history_list,file_identifier,self.threads).start()
-
- if packagename:
+ print(
+ "[*] ========= Sniffing the URL address of the search ===============")
+ NetTask(self.result_dict, self.app_history_list,
+ self.domain_history_list, file_identifier, self.threads).start()
+
+ if packagename:
print("[*] ========= The package name of this APP is: ===============")
print(packagename)
@@ -116,16 +122,17 @@ class BaseTask(object):
print("[*] ========= Component information is as follows: ===============")
for json in comp_list:
print(json)
-
+
if len(permissions) != 0:
- print("[*] ========= Sensitive permission information is as follows: ===============")
+ print(
+ "[*] ========= Sensitive permission information is as follows: ===============")
for permission in permissions:
print(permission)
if all_flag:
value_list = []
- with open(txt_result_path,"a+",encoding='utf-8',errors='ignore') as f:
- for key,value in self.result_dict.items():
+ with open(txt_result_path, "a+", encoding='utf-8', errors='ignore') as f:
+ for key, value in self.result_dict.items():
f.write(key+"\r")
for result in value:
if result in value_list:
@@ -133,36 +140,37 @@ class BaseTask(object):
value_list.append(result)
f.write("\t"+result+"\r")
f.close()
- print("[*] For more information about the search, see TXT file result: %s" %(txt_result_path))
+ print("[*] For more information about the search, see TXT file result: %s" %
+ (txt_result_path))
if self.sniffer:
- print("[*] For more information about the search, see XLS file result: %s" %(xls_result_path))
+ print("[*] For more information about the search, see XLSX file result: %s" %
+ (xls_result_path))
def __history_handle__(self):
- domain_history_path = cores.domain_history_path
+ domain_history_path = cores.domain_history_path
app_history_path = cores.app_history_path
if os.path.exists(domain_history_path):
domain_counts = {}
- app_size = 0
- with open(app_history_path,"r",encoding='utf-8',errors='ignore') as f:
+ app_size = 0
+ with open(app_history_path, "r", encoding='utf-8', errors='ignore') as f:
lines = f.readlines()
app_size = len(lines)
- for line in lines:
- self.app_history_list.append(line.replace("\r","").replace("\n",""))
+ for line in lines:
+ self.app_history_list.append(
+ line.replace("\r", "").replace("\n", ""))
f.close()
- with open(domain_history_path,"r",encoding='utf-8',errors='ignore') as f:
+ with open(domain_history_path, "r", encoding='utf-8', errors='ignore') as f:
lines = f.readlines()
cout = 3
- if (app_size>3) and (app_size%3==0):
+ if (app_size > 3) and (app_size % 3 == 0):
cout = cout + 1
for line in lines:
- domain = line.replace("\r","").replace("\n","")
+ domain = line.replace("\r", "").replace("\n", "")
self.domain_history_list.append(domain)
domain_count = lines.count(line)
if domain_count >= cout:
config.filter_no.append(".*" + domain)
f.close()
-
-
diff --git a/libs/task/download_task.py b/libs/task/download_task.py
index f388a05..e6ae867 100644
--- a/libs/task/download_task.py
+++ b/libs/task/download_task.py
@@ -11,20 +11,21 @@ from queue import Queue
import libs.core as cores
from libs.core.download import DownloadThreads
+
class DownloadTask(object):
- def start(self,path,types):
+ def start(self, path, types):
create_time = time.strftime("%Y%m%d%H%M%S", time.localtime())
if path.endswith("apk"):
types = "Android"
- file_name = create_time+ ".apk"
+ file_name = create_time + ".apk"
elif path.endswith("ipa"):
types = "iOS"
file_name = create_time + ".ipa"
else:
if types == "Android":
types = "Android"
- file_name = create_time+ ".apk"
+ file_name = create_time + ".apk"
elif types == "iOS":
types = "iOS"
file_name = create_time + ".ipa"
@@ -32,15 +33,16 @@ class DownloadTask(object):
types = "WEB"
file_name = create_time + ".html"
if not(path.startswith("http://") or path.startswith("https://")):
- if not os.path.isdir(path): # 不是目录
- return {"path":path,"type":types}
- else: # 目录处理
- return {"path":path,"type":types}
+ if not os.path.isdir(path): # 不是目录
+ return {"path": path, "type": types}
+ else: # 目录处理
+ return {"path": path, "type": types}
else:
- print("[*] Detected that the task is not local, preparing to download file......")
- cache_path = os.path.join(cores.download_path, file_name)
- thread = DownloadThreads(path,file_name,cache_path,types)
+ print(
+ "[*] Detected that the task is not local, preparing to download file......")
+ cache_path = os.path.join(cores.download_path, file_name)
+ thread = DownloadThreads(path, file_name, cache_path, types)
thread.start()
thread.join()
print()
- return {"path":cache_path,"type":types}
\ No newline at end of file
+ return {"path": cache_path, "type": types}
diff --git a/libs/task/ios_task.py b/libs/task/ios_task.py
index a9e8330..8b725db 100644
--- a/libs/task/ios_task.py
+++ b/libs/task/ios_task.py
@@ -11,125 +11,133 @@ import platform
import libs.core as cores
from queue import Queue
+
class iOSTask(object):
elf_file_name = ""
- def __init__(self,path):
+
+ def __init__(self, path):
self.path = path
self.file_queue = Queue()
self.shell_flag = False
- self.file_identifier= []
+ self.file_identifier = []
self.permissions = []
-
+
def start(self):
file_path = self.path
if file_path.split(".")[-1] == 'ipa':
self.__decode_ipa__(cores.output_path)
self.__scanner_file_by_ipa__(cores.output_path)
- elif self.__get_file_header__(file_path):
+ elif self.__get_file_header__(file_path):
self.file_queue.put(file_path)
else:
- raise Exception("Retrieval of this file type is not supported. Select IPA file or Mach-o file.")
- return {"shell_flag":self.shell_flag,"file_queue":self.file_queue,"comp_list":[],"packagename":None,"file_identifier":self.file_identifier,"permissions":self.permissions}
+ raise Exception(
+ "Retrieval of this file type is not supported. Select IPA file or Mach-o file.")
+ return {"shell_flag": self.shell_flag, "file_queue": self.file_queue, "comp_list": [], "packagename": None, "file_identifier": self.file_identifier, "permissions": self.permissions}
- def __get_file_header__(self,file_path):
+ def __get_file_header__(self, file_path):
hex_hand = 0x0
- macho_name = os.path.split(file_path)[-1]
+ macho_name = os.path.split(file_path)[-1]
self.file_identifier.append(macho_name)
- with open(file_path,"rb") as macho_file:
- macho_file.seek(hex_hand,0)
+ with open(file_path, "rb") as macho_file:
+ macho_file.seek(hex_hand, 0)
magic = binascii.hexlify(macho_file.read(4)).decode().upper()
- macho_magics = ["CFFAEDFE","CEFAEDFE","BEBAFECA","CAFEBABE"]
+ macho_magics = ["CFFAEDFE", "CEFAEDFE", "BEBAFECA", "CAFEBABE"]
if magic in macho_magics:
- self.__shell_test__(macho_file,hex_hand)
+ self.__shell_test__(macho_file, hex_hand)
macho_file.close()
return True
macho_file.close()
return False
-
-
- def __shell_test__(self,macho_file,hex_hand):
+
+ def __shell_test__(self, macho_file, hex_hand):
while True:
magic = binascii.hexlify(macho_file.read(4)).decode().upper()
if magic == "2C000000":
- macho_file.seek(hex_hand,0)
- encryption_info_command = binascii.hexlify(macho_file.read(24)).decode()
- cryptid = encryption_info_command[-8:len(encryption_info_command)]
+ macho_file.seek(hex_hand, 0)
+ encryption_info_command = binascii.hexlify(
+ macho_file.read(24)).decode()
+ cryptid = encryption_info_command[-8:len(
+ encryption_info_command)]
if cryptid == "01000000":
self.shell_flag = True
break
- hex_hand = hex_hand + 4
+ hex_hand = hex_hand + 4
- def __scanner_file_by_ipa__(self,output):
- scanner_file_suffix = ["plist","js","xml","html"]
- scanner_dir = os.path.join(output,"Payload")
- self.__get_scanner_file__(scanner_dir,scanner_file_suffix)
+ def __scanner_file_by_ipa__(self, output):
+ scanner_file_suffix = ["plist", "js", "xml", "html"]
+ scanner_dir = os.path.join(output, "Payload")
+ self.__get_scanner_file__(scanner_dir, scanner_file_suffix)
- def __get_scanner_file__(self,scanner_dir,file_suffix):
+ def __get_scanner_file__(self, scanner_dir, file_suffix):
dir_or_files = os.listdir(scanner_dir)
for dir_file in dir_or_files:
- dir_file_path = os.path.join(scanner_dir,dir_file)
+ dir_file_path = os.path.join(scanner_dir, dir_file)
if os.path.isdir(dir_file_path):
if dir_file.endswith(".app"):
- self.elf_file_name = dir_file.replace(".app","")
- self.__get_scanner_file__(dir_file_path,file_suffix)
+ self.elf_file_name = dir_file.replace(".app", "")
+ self.__get_scanner_file__(dir_file_path, file_suffix)
else:
if self.elf_file_name == dir_file:
self.__get_file_header__(dir_file_path)
self.file_queue.put(dir_file_path)
continue
- if cores.resource_flag:
- dir_file_suffix = dir_file.split(".")
+ if cores.resource_flag:
+ dir_file_suffix = dir_file.split(".")
if len(dir_file_suffix) > 1:
if dir_file_suffix[-1] in file_suffix:
self.__get_file_header__(dir_file_path)
self.file_queue.put(dir_file_path)
- def __decode_ipa__(self,output_path):
- with zipfile.ZipFile(self.path,"r") as zip_files:
+ def __decode_ipa__(self, output_path):
+ with zipfile.ZipFile(self.path, "r") as zip_files:
zip_file_names = zip_files.namelist()
- zip_files.extract(zip_file_names[0],output_path)
+ zip_files.extract(zip_file_names[0], output_path)
try:
- new_zip_file = zip_file_names[0].encode('cp437').decode('utf-8')
+ new_zip_file = zip_file_names[0].encode(
+ 'cp437').decode('utf-8')
except UnicodeEncodeError:
- new_zip_file = zip_file_names[0].encode('utf-8').decode('utf-8')
-
- old_zip_dir = self.__get_parse_dir__(output_path,zip_file_names[0])
- new_zip_dir = self.__get_parse_dir__(output_path,new_zip_file)
- os.rename(old_zip_dir,new_zip_dir)
-
+ new_zip_file = zip_file_names[0].encode(
+ 'utf-8').decode('utf-8')
+
+ old_zip_dir = self.__get_parse_dir__(
+ output_path, zip_file_names[0])
+ new_zip_dir = self.__get_parse_dir__(output_path, new_zip_file)
+ os.rename(old_zip_dir, new_zip_dir)
+
for zip_file in zip_file_names:
- old_ext_path = zip_files.extract(zip_file,output_path)
+ old_ext_path = zip_files.extract(zip_file, output_path)
+ if not "Payload" in old_ext_path:
+ continue
start = str(old_ext_path).index("Payload")
dir_path = old_ext_path[start:len(old_ext_path)]
- old_ext_path = os.path.join(output_path,dir_path)
+ old_ext_path = os.path.join(output_path, dir_path)
try:
new_zip_file = zip_file.encode('cp437').decode('utf-8')
except UnicodeEncodeError:
new_zip_file = zip_file.encode('utf-8').decode('utf-8')
-
- new_ext_path = os.path.join(output_path,new_zip_file)
+
+ new_ext_path = os.path.join(output_path, new_zip_file)
if platform.system() == "Windows":
- new_ext_path = new_ext_path.replace("/","\\")
+ new_ext_path = new_ext_path.replace("/", "\\")
if not os.path.exists(new_ext_path):
dir_path = os.path.dirname(new_ext_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
shutil.move(old_ext_path, new_ext_path)
- if os.path.exists(old_ext_path):
+ if os.path.exists(old_ext_path) and (".app" in old_ext_path):
try:
# mac发生权限问题的时候做处理
os.remove(old_ext_path)
except Exception:
shutil.rmtree(old_ext_path)
-
- def __get_parse_dir__(self,output_path,file_path):
+ def __get_parse_dir__(self, output_path, file_path):
start = file_path.index("Payload/")
end = file_path.index(".app")
root_dir = file_path[start:end]
if platform.system() == "Windows":
- root_dir = root_dir.replace("/","\\")
- old_root_dir = os.path.join(output_path,root_dir+".app")
- return old_root_dir
\ No newline at end of file
+ root_dir = root_dir.replace("/", "\\")
+ old_root_dir = os.path.join(output_path, root_dir+".app")
+ return old_root_dir
diff --git a/libs/task/net_task.py b/libs/task/net_task.py
index a10e4ba..e781a55 100644
--- a/libs/task/net_task.py
+++ b/libs/task/net_task.py
@@ -3,108 +3,108 @@
# Author: kelvinBen
# Github: https://github.com/kelvinBen/AppInfoScanner
-import re
-import xlwt
-import socket
+import openpyxl
import config
from queue import Queue
import libs.core as cores
from libs.core.net import NetThreads
-import requests
+
class NetTask(object):
value_list = []
- domain_list=[]
-
- def __init__(self,result_dict,app_history_list,domain_history_list,file_identifier,threads):
+ domain_list = []
+
+ def __init__(self, result_dict, app_history_list, domain_history_list, file_identifier, threads):
self.result_dict = result_dict
self.app_history_list = app_history_list
self.file_identifier = file_identifier
self.domain_queue = Queue()
- self.threads = int(threads)
+ self.threads = int(threads)
self.thread_list = []
self.domain_history_list = domain_history_list
def start(self):
xls_result_path = cores.xls_result_path
- workbook = xlwt.Workbook(encoding = 'utf-8')
+ workbook = openpyxl.Workbook()
worksheet = self.__creating_excel_header__(workbook)
-
+
self.__write_result_to_txt__()
self.__start_threads__(worksheet)
-
+
for thread in self.thread_list:
thread.join()
workbook.save(xls_result_path)
- def __creating_excel_header__(self,workbook):
- worksheet = workbook.add_sheet("Result",cell_overwrite_ok=True)
- worksheet.write(0,0, label = "Number")
- worksheet.write(0,1, label = "IP/URL")
- worksheet.write(0,2, label = "Domain")
- worksheet.write(0,3, label = "Status")
- worksheet.write(0,4, label = "IP")
- worksheet.write(0,5, label = "Server")
- worksheet.write(0,6, label = "Title")
- worksheet.write(0,7, label = "CDN")
- # worksheet.write(0,8, label = "Finger")
- return worksheet
-
+ def __creating_excel_header__(self, workbook):
+ worksheet = workbook.create_sheet("Result", 0)
+ worksheet.cell(row=1, column=1, value="Number")
+ worksheet.cell(row=1, column=2, value="IP/URL")
+ worksheet.cell(row=1, column=3, value="Domain")
+ worksheet.cell(row=1, column=4, value="Status")
+ worksheet.cell(row=1, column=5, value="IP")
+ worksheet.cell(row=1, column=6, value="Server")
+ worksheet.cell(row=1, column=7, value="Title")
+ worksheet.cell(row=1, column=8, value="CDN")
+ worksheet.cell(row=1, column=9, value="Finger")
+ return worksheet
+
def __write_result_to_txt__(self):
- txt_result_path = cores.txt_result_path
append_file_flag = True
-
- for key,value in self.result_dict.items():
+
+ for key, value in self.result_dict.items():
for result in value:
if result in self.value_list:
continue
self.value_list.append(result)
if (("http://" in result) or ("https://" in result)) and ("." in result):
- domain = result.replace("https://","").replace("http://","")
-
if "{" in result or "}" in result or "[" in result or "]" in result or "\\" in result or "!" in result or "," in result:
continue
+ domain = result.replace(
+ "https://", "").replace("http://", "")
if "/" in domain:
domain = domain[:domain.index("/")]
-
+
if "|" in result:
result = result[:result.index("|")]
# 目前流通的域名中加上协议头最短长度为11位
if len(result) <= 10:
continue
-
+
url_suffix = result[result.rindex(".")+1:].lower()
if not(cores.resource_flag and url_suffix in config.sniffer_filter):
- self.domain_queue.put({"domain":domain,"url_ip":result})
-
+ self.domain_queue.put(
+ {"domain": domain, "url_ip": result})
+
for identifier in self.file_identifier:
if identifier in self.app_history_list:
- if not(domain in self.domain_history_list):
+ if not(domain in self.domain_history_list):
self.domain_list.append(domain)
- self.__write_content_in_file__(cores.domain_history_path,domain)
+ self.__write_content_in_file__(
+ cores.domain_history_path, domain)
continue
if not(domain in self.domain_list):
self.domain_list.append(domain)
- self.__write_content_in_file__(cores.domain_history_path,domain)
+ self.__write_content_in_file__(
+ cores.domain_history_path, domain)
if append_file_flag:
- self.__write_content_in_file__(cores.app_history_path,identifier)
+ self.__write_content_in_file__(
+ cores.app_history_path, identifier)
append_file_flag = False
- def __start_threads__(self,worksheet):
- for threadID in range(0,self.threads) :
+ def __start_threads__(self, worksheet):
+ for threadID in range(0, self.threads):
name = "Thread - " + str(threadID)
- thread = NetThreads(threadID,name,self.domain_queue,worksheet)
+ thread = NetThreads(threadID, name, self.domain_queue, worksheet)
thread.start()
self.thread_list.append(thread)
- def __write_content_in_file__(self,file_path,content):
- with open(file_path,"a+",encoding='utf-8',errors='ignore') as f:
+ def __write_content_in_file__(self, file_path, content):
+ with open(file_path, "a+", encoding='utf-8', errors='ignore') as f:
f.write(content+"\r")
f.close()
-
diff --git a/libs/task/web_task.py b/libs/task/web_task.py
index 046fdb3..b47e882 100644
--- a/libs/task/web_task.py
+++ b/libs/task/web_task.py
@@ -7,8 +7,9 @@ import config
import hashlib
from queue import Queue
+
class WebTask(object):
- thread_list =[]
+ thread_list = []
value_list = []
result_dict = {}
@@ -19,29 +20,29 @@ class WebTask(object):
self.permissions = []
def start(self):
- if len(config.web_file_suffix) <=0:
- scanner_file_suffix = ["html","js","html","xml"]
-
+ if len(config.web_file_suffix) <= 0:
+ scanner_file_suffix = ["html", "js", "html", "xml"]
+
scanner_file_suffix = config.web_file_suffix
if os.path.isdir(self.path):
- self.__get_scanner_file__(self.path,scanner_file_suffix)
+ self.__get_scanner_file__(self.path, scanner_file_suffix)
else:
if not (self.path.split(".")[-1] in scanner_file_suffix):
err_info = ("Retrieval of this file type is not supported. Select a file or directory with a suffix of %s" % ",".join(scanner_file_suffix))
raise Exception(err_info)
self.file_queue.put(self.path)
- return {"comp_list":[],"shell_flag":False,"file_queue":self.file_queue,"packagename":None,"file_identifier":self.file_identifier,"permissions":self.permissions}
+ return {"comp_list": [], "shell_flag": False, "file_queue": self.file_queue, "packagename": None, "file_identifier": self.file_identifier, "permissions": self.permissions}
- def __get_scanner_file__(self,scanner_dir,file_suffix):
+ def __get_scanner_file__(self, scanner_dir, file_suffix):
dir_or_files = os.listdir(scanner_dir)
for dir_file in dir_or_files:
- dir_file_path = os.path.join(scanner_dir,dir_file)
+ dir_file_path = os.path.join(scanner_dir, dir_file)
if os.path.isdir(dir_file_path):
- self.__get_scanner_file__(dir_file_path,file_suffix)
+ self.__get_scanner_file__(dir_file_path, file_suffix)
else:
- if len(dir_file.split("."))>1:
+ if len(dir_file.split(".")) > 1:
if dir_file.split(".")[-1] in file_suffix:
with open(dir_file_path,'rb') as f:
dex_md5 = str(hashlib.md5().update(f.read()).hexdigest()).upper()
self.file_identifier.append(dex_md5)
- self.file_queue.put(dir_file_path)
\ No newline at end of file
+ self.file_queue.put(dir_file_path)
diff --git a/requirements.txt b/requirements.txt
index e0753ef..6431b45 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,3 @@
requests
click
-xlwt
\ No newline at end of file
+openpyxl
\ No newline at end of file
diff --git a/tools/apktool.jar b/tools/apktool.jar
index a07b3d7..9706648 100644
Binary files a/tools/apktool.jar and b/tools/apktool.jar differ
diff --git a/update.md b/update.md
index b756e7d..ed92261 100644
--- a/update.md
+++ b/update.md
@@ -1,3 +1,10 @@
+### V1.0.9
+- 更新apktool为最新版本
+- 优化部分环节流程
+- 修复excle文件导出时超时行数限制
+- 修复脚本执行时卡顿的问题
+- 修复Mac下Playload文件权限不足的问题
+
### V1.0.8
- 添加AK和SK的检测
- 添加检测规则提交入口