Compare commits

...

5 Commits

Author SHA1 Message Date
kelvinBen 479abe2e3b 1. 添加下载任务线程 3 years ago
kelvinBen 756025d7fd 忽略文件列表中添加xlsx文件 3 years ago
kelvinBen 4b3db5aff7 - 新增通过网络批量下载文件 3 years ago
kelvinBen acc01c3303 - 优化excle文件输出 3 years ago
kelvinBen 1f0fa65606 - 优化iOS壳检测速度 3 years ago
  1. 5
      .gitignore
  2. 43
      app.py
  3. 235
      libs/core/__init__.py
  4. 91
      libs/core/download.py
  5. 23
      libs/core/net.py
  6. 8
      libs/core/parses.py
  7. 33
      libs/task/android_task.py
  8. 157
      libs/task/base_task.py
  9. 68
      libs/task/download_task.py
  10. 75
      libs/task/ios_task.py
  11. 32
      libs/task/net_task.py
  12. 3
      libs/task/web_task.py
  13. 2
      requirements.txt
  14. 14
      update.md

5
.gitignore vendored

@ -1,5 +1,6 @@
result_*.txt
result_*.xls
result_*.xlsx
download/
history/
out/
@ -114,3 +115,7 @@ venv.bak/
# add
.idea/
#
1.py
.vscode/

@ -4,13 +4,20 @@
# Github: https://github.com/kelvinBen/AppInfoScanner
import click
import logging
from libs.core import Bootstrapper
from libs.task.base_task import BaseTask
@click.group(help="Python script for automatically retrieving key information in app.")
def cli():
pass
try:
LOG_FORMAT = "%(message)s" # 日志格式化输出
fp = logging.FileHandler('info.log', mode='w',encoding='utf-8')
fs = logging.StreamHandler()
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT, handlers=[fp, fs]) # 调用
except Exception as e:
logging.error("{}".format(e))
# 创建Android任务
@cli.command(help="Get the key information of Android system.")
@ -23,13 +30,13 @@ def cli():
@click.option("-o", '--output',required=False, type=str,default=None,help="Specify the result set output directory.")
@click.option("-p", '--package',required=False,type=str,default="",help="Specifies the package name information that needs to be scanned.")
def android(inputs: str, rules: str, sniffer: bool, no_resource:bool, all:bool, threads:int, output, package:str) -> None:
try:
bootstrapper = Bootstrapper(__file__, output, all, no_resource)
bootstrapper.init()
BaseTask("Android", inputs, rules, sniffer, threads, package).start()
except Exception as e:
raise e
bootstrapper = Bootstrapper(rules, sniffer, threads, all ,no_resource)
bootstrapper.init_dir(__file__, output)
BaseTask().start("Android", inputs, package)
@cli.command(help="Get the key information of iOS system.")
@click.option("-i", "--inputs", required=True, type=str, help="Please enter IPA file or ELF file to scan or corresponding IPA download address. App store is not supported at present.")
@ -40,13 +47,12 @@ def android(inputs: str, rules: str, sniffer: bool, no_resource:bool, all:bool,
@click.option("-t", '--threads',required=False, type=int,default=10,help="Set the number of concurrency. The larger the concurrency, the faster the speed. The default value is 10.")
@click.option("-o", '--output',required=False, type=str,default=None,help="Specify the result set output directory.")
def ios(inputs: str, rules: str, sniffer: bool, no_resource:bool, all:bool, threads:int, output:str) -> None:
try:
bootstrapper = Bootstrapper(__file__, output, all, no_resource)
bootstrapper.init()
BaseTask("iOS", inputs, rules, sniffer, threads).start()
except Exception as e:
raise e
bootstrapper = Bootstrapper(rules, sniffer, threads, all ,no_resource)
bootstrapper.init_dir(__file__, output)
BaseTask().start("iOS", inputs)
@cli.command(help="Get the key information of Web system.")
@click.option("-i", "--inputs", required=True, type=str, help="Please enter the site directory or site file to scan or the corresponding site download address.")
@ -57,15 +63,14 @@ def ios(inputs: str, rules: str, sniffer: bool, no_resource:bool, all:bool, thre
@click.option("-t", '--threads',required=False, type=int,default=10,help="Set the number of concurrency. The larger the concurrency, the faster the speed. The default value is 10.")
@click.option("-o", '--output',required=False, type=str,default=None,help="Specify the result set output directory.")
def web(inputs: str, rules: str, sniffer: bool, no_resource:bool, all:bool, threads:int, output:str) -> None:
try:
bootstrapper = Bootstrapper(__file__, output, all, no_resource)
bootstrapper.init()
BaseTask("Web", inputs, rules, sniffer, threads).start()
except Exception as e:
raise e
bootstrapper = Bootstrapper(rules, sniffer, threads, all ,no_resource)
bootstrapper.init_dir(__file__, output)
BaseTask().start("Web", inputs)
def main():
cli()
if __name__ == "__main__":

@ -2,117 +2,178 @@
# -*- coding: utf-8 -*-
# Author: kelvinBen
# Github: https://github.com/kelvinBen/AppInfoScanner
import os
import time
import shutil
import platform
import logging
backsmali_file = None
apktool_file = None
strings_file = None
app_history_file= None
domain_history_file = None
result_dir = None
download_dir = None
decode_dir = None
user_add_rules = None
# smali 所在路径
smali_path = ""
# backsmli 所在路径
backsmali_path = ""
# apktool 所在路径
apktool_path = ""
# 系统类型
os_type = ""
# 输出路径
output_path = ""
# 下载完成标记
download_flag = False
net_sniffer_flag = False
all_string_out = False
no_resource_flag = False
excel_row = 1
threads_num = 10
# excel 起始行号
excel_row = 0
class Bootstrapper(object):
def __init__(self, path, out_path, all=False, no_resource= False):
global smali_path
global backsmali_path
global apktool_path
global os_type
global output_path
global script_root_dir
global txt_result_path
global xls_result_path
global strings_path
global history_path
global app_history_path
global domain_history_path
global excel_row
global download_path
def __init__(self, rules, threads, sniffer, all, no_resource):
# backsmali 加载路径
global backsmali_file
# apktool 加载路径
global apktool_file
# string 加载路径
global strings_file
# App 扫描历史文件
global app_history_file
# 域名 扫描历史文件
global domain_history_file
# 结果输出目录
global result_dir
# 临时文件下载目录
global download_dir
# 临时反编译目录
global decode_dir
# 下载完成标记
global download_flag
global out_dir
global all_flag
global resource_flag
# excel 行号
global excel_row
# 用户自定义规则
global user_add_rules
# 用户指定线程数
global threads_num
# 网络嗅探标记
global net_sniffer_flag
# 输出所有字符传
global all_string_out
# 忽略资源标记
global no_resource_flag
user_add_rules = rules
threads_num = threads
net_sniffer_flag = not sniffer
all_string_out = all
no_resource_flag = no_resource
# 需要创建的目录列表
self.__create_dir_list__= []
# 需要删除目录的列表
self.__remove_dir_list__= []
logging.info("[*] System env: {}".format(platform.system()))
def init_dir(self, app_input_path, user_out_path):
logging.info("[*] init dir...")
# 脚本执行目录
script_root_dir = os.path.dirname(os.path.abspath(app_input_path))
# 加载集成的工具
self.__tools_loading__(script_root_dir)
# 构建持久化目录
self.__build_persistent_path__(script_root_dir)
# 构建结果输出目录
self.__build_result_out__path__(script_root_dir,user_out_path)
# 统一目录构建中心
self.__build_dir__()
# 加载集成的工具
def __tools_loading__(self,script_root_dir):
tools_dir = os.path.join(script_root_dir,"tools")
all_flag = not all
resource_flag = no_resource
backsmali_file = os.path.join(tools_dir,"baksmali.jar")
logging.info("[*] Backsmali Path: {}".format(backsmali_file))
create_time = time.strftime("%Y%m%d%H%M%S", time.localtime())
script_root_dir = os.path.dirname(os.path.abspath(path))
if out_path:
out_dir = out_path
else:
out_dir = script_root_dir
tools_dir = os.path.join(script_root_dir,"tools")
output_path = os.path.join(out_dir,"out")
history_path = os.path.join(script_root_dir,"history")
apktool_file = os.path.join(tools_dir, "apktool.jar")
logging.info("[*] Apktool Path: {}".format(apktool_file))
if platform.system() == "Windows":
machine2bits = {'AMD64':64, 'x86_64': 64, 'i386': 32, 'x86': 32}
machine2bits.get(platform.machine())
if platform.machine() == 'i386' or platform.machine() == 'x86':
strings_path = os.path.join(tools_dir,"strings.exe")
strings_file = os.path.join(tools_dir,"strings.exe")
else:
strings_file = os.path.join(tools_dir,"strings64.exe")
else:
strings_path = os.path.join(tools_dir,"strings64.exe")
strings_file ="strings"
logging.info("[*] Strings Path: {}".format(strings_file))
# 构建持久化目录
def __build_persistent_path__(self,script_root_dir):
# 当前用户文档目录
doc_path = os.path.join(os.path.expanduser("~"), 'Documents')
if os.path.exists(doc_path):
app_dir = os.path.join(doc_path,"AppInfoScanner")
else:
strings_path ="strings"
backsmali_path = os.path.join(tools_dir,"baksmali.jar")
apktool_path = os.path.join(tools_dir, "apktool.jar")
download_path = os.path.join(out_dir,"download")
txt_result_path = os.path.join(out_dir,"result_"+str(create_time)+".txt")
xls_result_path = os.path.join(out_dir,"result_"+str(create_time)+".xls")
app_history_path = os.path.join(history_path,"app_history.txt")
domain_history_path = os.path.join(history_path,"domain_history.txt")
def init(self):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
print("[*] Create directory {}".format(out_dir))
if os.path.exists(output_path):
app_dir = os.path.join(script_root_dir,"AppInfoScanner")
# 历史任务加载目录
history_path = os.path.join(app_dir,"history")
app_history_file = os.path.join(history_path,"app_history.txt")
domain_history_file = os.path.join(history_path,"domain_history.txt")
self.__create_dir_list__.append(app_dir)
self.__create_dir_list__.append(history_path)
# 构建结果输出目录
def __build_result_out__path__(self,script_root_dir,user_out_path):
result_out_dir = script_root_dir
# 用户指定输出目录结果则为输出到指定目录
if user_out_path:
result_out_dir = user_out_path
# 统一输出目录
out_dir = os.path.join(result_out_dir,"out")
# 临时结果输出目录
decode_dir = os.path.join(out_dir,"decode")
# 临时文件下载目录
download_dir = os.path.join(out_dir,"download")
# 最终结果输出目录
result_dir = os.path.join(out_dir,"result")
self.__create_dir_list__.append(out_dir)
self.__create_dir_list__.append(decode_dir)
self.__create_dir_list__.append(download_dir)
self.__create_dir_list__.append(result_dir)
self.__remove_dir_list__.append(decode_dir)
# 统一目录构建中心
def __build_dir__(self):
for dir_path in self.__create_dir_list__:
if (os.path.exists(dir_path)) and (dir_path in self.__remove_dir_list__):
# 删除目录
try:
shutil.rmtree(output_path)
shutil.rmtree(dir_path)
logging.info("[-] Remove Dir: {}".format(dir_path))
except Exception as e:
# 解决windows超长文件名删除问题
if not (platform.system() == "Windows"):
raise e
self.__removed_dirs_cmd__(output_path)
os.makedirs(output_path)
print("[*] Create directory {}".format(output_path))
if not os.path.exists(download_path):
# shutil.rmtree(download_path)
os.makedirs(download_path)
print("[*] Create directory {}".format(download_path))
if not os.path.exists(history_path):
os.makedirs(history_path)
print("[*] Create directory {}".format(history_path))
if os.path.exists(txt_result_path):
os.remove(txt_result_path)
self.__removed_dirs_cmd__(dir_path)
if os.path.exists(xls_result_path):
os.remove(xls_result_path)
# 创建目录
if not os.path.exists(dir_path):
os.makedirs(dir_path)
logging.info("[+] Create Dir: {}".format(dir_path))
def __removed_dirs_cmd__(self,output_path):
files = os.listdir(output_path)
@ -121,8 +182,12 @@ class Bootstrapper(object):
old_dir = os.path.join(output_path,file)
if not os.path.exists(new_dir):
os.makedirs(new_dir)
logging.info("[+] Create Dir: {}".format(new_dir))
os.chdir(output_path)
cmd = ("robocopy %s %s /purge") % (new_dir, old_dir)
logging.debug("[*] cmd : {}".format(cmd))
os.system(cmd)
os.removedirs(new_dir)
os.removedirs(old_dir)
logging.info("[-] Remove Dir: {}".format(new_dir))
logging.info("[-] Remove Dir: {}".format(old_dir))

@ -2,11 +2,14 @@
# -*- coding: utf-8 -*-
# Author: kelvinBen
# Github: https://github.com/kelvinBen/AppInfoScanner
from genericpath import exists
import re
import os
import sys
import time
import uuid
import config
import logging
import requests
import threading
import libs.core as cores
@ -15,14 +18,71 @@ from requests.adapters import HTTPAdapter
class DownloadThreads(threading.Thread):
def __init__(self,input_path,file_name,cache_path,types):
def __init__(self,threadID, threadName, download_file_queue, download_file_list, types):
threading.Thread.__init__(self)
self.url = input_path
self.threadID = threadID
self.threadName = threadName
self.download_file_queue = download_file_queue
self.download_file_list = download_file_list
self.types = types
self.cache_path = None
def __start__(self):
# 从队列中取数据,直到队列数据不为空为止
while not self.download_file_queue.empty():
file_or_url = self.download_file_queue.get()
if not file_or_url:
logging.error("[x] Failed to get file!")
continue
self.__auto_update_type__(file_or_url)
# 自动更新文件类型
def __auto_update_type__(self,file_or_url):
uuid_name = str(uuid.uuid1()).replace("-","")
# 文件后缀为apk 或者 类型为 Android 则自动修正为Android类型
if file_or_url.endswith("apk") or self.types == "Android":
types = "Android"
file_name = uuid_name + ".apk"
# 文件后缀为dex 或者 类型为 Android 则自动修正为Android类型
elif file_or_url.endswith("dex") or self.types == "Android":
types = "Android"
file_name = uuid_name + ".dex"
# 文件后缀为ipa 或者 类型为 iOS 则自动修正为iOS类型
elif file_or_url.endswith("ipa") or self.types == "iOS":
types = "iOS"
file_name = uuid_name + ".ipa"
else:
# 路径以http://开头或者以https://开头 且 文件是不存在的自动修正为web类型
if (file_or_url.startswith("http://") or file_or_url.startswith("https://")) and (not os.path.exists(file_or_url)):
types = "WEB"
file_name = uuid_name + ".html"
# 其他情况如:types为WEB 或者目录 或者 单独的二进制文件 等交给后面逻辑处理
if file_or_url.startswith("http://") or file_or_url.startswith("https://"):
# 进行文件下载
self.__file_deduplication__(file_name, uuid_name)
if self.cache_path:
file_path = self.cache_path
self.__download_file__(file_or_url,file_path)
#TODO 标记下载过的文件,避免重复下载
else:
types = self.types
file_path = file_or_url
self.download_file_list.append({"path": file_path, "type": types})
# 防止文件名重复导致文件被复写
def __file_deduplication__(self,file_name, uuid_name):
cache_path = os.path.join(cores.download_dir, file_name)
if not os.path.exists(cache_path):
self.cache_path = cache_path
self.file_name = file_name
return
new_uuid_name = str(uuid.uuid1()).replace("-","")
new_file_name = file_name.replace(uuid_name,new_uuid_name)
self.__file_deduplication__(new_file_name,new_uuid_name)
def __requset__(self):
# 文件下载
def __download_file__(self, url, file_path):
try:
session = requests.Session()
session.mount('http://', HTTPAdapter(max_retries=3))
@ -32,17 +92,17 @@ class DownloadThreads(threading.Thread):
urllib3.disable_warnings()
if config.method.upper() == "POST":
resp = session.post(url=self.url,params=config.data ,headers=config.headers,timeout=30)
resp = session.post(url=url, params=config.data, headers=config.headers, timeout=30)
else:
resp = session.get(url=self.url,data=config.data ,headers=config.headers,timeout=30)
resp = session.get(url=url, data=config.data, headers=config.headers, timeout=30)
if resp.status_code == requests.codes.ok:
# 下载二进制文件
if self.types == "Android" or self.types == "iOS":
count = 0
progress_tmp = 0
time1 = time.time()
length = float(resp.headers['content-length'])
with open(self.cache_path, "wb") as f:
with open(file_path, "wb") as f:
for chunk in resp.iter_content(chunk_size = 512):
if chunk:
f.write(chunk)
@ -50,20 +110,23 @@ class DownloadThreads(threading.Thread):
progress = int(count / length * 100)
if progress != progress_tmp:
progress_tmp = progress
print("\r", end="")
print("[*] Download progress: {}%: ".format(progress), "" * (progress // 2), end="")
logging.info("\r", end="")
logging.info("[*] Download progress: {}%: ".format(progress), "" * (progress // 2), end="")
sys.stdout.flush()
f.close()
else:
html = resp.text
with open(self.cache_path,"w",encoding='utf-8',errors='ignore') as f:
with open(file_path,"w",encoding='utf-8',errors='ignore') as f:
f.write(html)
f.close()
cores.download_flag = True
else:
logging.error("[x] {} download fails, status code is {} !!!".format(url, str(resp.status_code)))
except Exception as e:
raise Exception(e)
return
logging.error("[x] {} download fails, the following exception information:".format(url))
logging.exception(e)
def run(self):
threadLock = threading.Lock()
self.__requset__()
self.__start__()

@ -4,6 +4,7 @@
# Github: https://github.com/kelvinBen/AppInfoScanner
import re
import time
import logging
import threading
import requests
import libs.core as cores
@ -27,20 +28,22 @@ class NetThreads(threading.Thread):
url_ip = domains["url_ip"]
time.sleep(2)
result = self.__get_request_result__(url_ip)
print("[+] Processing URL address:"+url_ip)
logging.info("[+] " + url_ip)
if result != "error":
if self.lock.acquire(True):
cores.excel_row = cores.excel_row + 1
self.worksheet.write(cores.excel_row, 0, label = cores.excel_row)
self.worksheet.write(cores.excel_row, 1, label = url_ip)
self.worksheet.write(cores.excel_row, 2, label = domain)
self.worksheet.cell(row=cores.excel_row, column=1).value = cores.excel_row
self.worksheet.cell(row=cores.excel_row, column=2).value = url_ip
self.worksheet.cell(row=cores.excel_row, column=3).value = domain
if result != "timeout":
self.worksheet.write(cores.excel_row, 3, label = result["status"])
self.worksheet.write(cores.excel_row, 4, label = result["des_ip"])
self.worksheet.write(cores.excel_row, 5, label = result["server"])
self.worksheet.write(cores.excel_row, 6, label = result["title"])
self.worksheet.write(cores.excel_row, 7, label = result["cdn"])
# self.worksheet.write(cores.excel_row, 8, label = "")
self.worksheet.cell(row=cores.excel_row, column=4).value = result["status"]
self.worksheet.cell(row=cores.excel_row, column=5).value = result["des_ip"]
self.worksheet.cell(row=cores.excel_row, column=6).value = result["server"]
self.worksheet.cell(row=cores.excel_row, column=7).value = result["title"]
self.worksheet.cell(row=cores.excel_row, column=8).value = result["cdn"]
self.worksheet.cell(row=cores.excel_row, column=9).value = ""
self.lock.release()
def __get_request_result__(self,url):

@ -6,6 +6,7 @@
import re
import os
import config
import logging
import threading
import libs.core as cores
@ -38,9 +39,8 @@ class ParsesThreads(threading.Thread):
def __get_string_by_iOS__(self,file_path):
output_path = cores.output_path
strings_path = cores.strings_path
temp = os.path.join(output_path,"temp.txt")
cmd_str = ('"%s" "%s" > "%s"') % (str(strings_path),str(file_path),str(temp))
cmd_str = ('"%s" "%s" > "%s"') % (str(cores.strings_file),str(file_path),str(temp))
if os.system(cmd_str) == 0:
with open(temp,"r",encoding='utf-8',errors='ignore') as f:
lines = f.readlines()
@ -59,7 +59,7 @@ class ParsesThreads(threading.Thread):
akAndSkList = re.compile(r'.*accessKeyId.*".*"|.*accessKeySecret.*".*"|.*secret.*".*"').findall(file_content)
for akAndSk in akAndSkList:
self.result_list.append(akAndSk.strip())
print("[+] AK or SK in:",akAndSk.strip())
logging.info("[+] AK or SK in:",akAndSk.strip())
# 遍历所有的字符串
for result in set(results):
@ -80,7 +80,7 @@ class ParsesThreads(threading.Thread):
self.threadLock.acquire()
if cores.all_flag:
print("[+] The string searched for matching rule is: %s" % (resl_str))
logging.info("[+] String : {}".format(resl_str))
self.result_list.append(resl_str)
self.threadLock.release()
continue

@ -5,6 +5,7 @@
import os
import re
import config
import logging
import hashlib
from queue import Queue
import libs.core as cores
@ -12,14 +13,14 @@ import libs.core as cores
class AndroidTask(object):
def __init__(self,path,package):
self.path = path
def __init__(self, file_path, package):
self.input_file_path = file_path
self.package = package
self.file_queue = Queue()
self.shell_flag=False
self.packagename=""
self.comp_list=[]
self.file_identifier=[]
self.shell_flag = False
self.packagename = ""
self.comp_list = []
self.file_identifier = []
def start(self):
# 检查java环境是否存在
@ -37,8 +38,6 @@ class AndroidTask(object):
return {"comp_list":self.comp_list,"shell_flag":self.shell_flag,"file_queue":self.file_queue,"packagename":self.packagename,"file_identifier":self.file_identifier}
def __decode_file__(self,file_path):
apktool_path = str(cores.apktool_path)
backsmali_path = str(cores.backsmali_path)
base_out_path = str(cores.output_path)
filename = os.path.basename(file_path)
suffix_name = filename.split(".")[-1]
@ -46,7 +45,7 @@ class AndroidTask(object):
if suffix_name == "apk":
name = filename.split(".")[0]
output_path = os.path.join(base_out_path,name)
self.__decode_apk__(file_path,apktool_path,output_path)
self.__decode_apk__(file_path,output_path)
elif suffix_name == "dex":
f = open(file_path,'rb')
md5_obj = hashlib.md5()
@ -60,7 +59,7 @@ class AndroidTask(object):
output_path = os.path.join(base_out_path,dex_md5)
if not os.path.exists(output_path):
os.makedirs(output_path)
self.__decode_dex__(file_path,backsmali_path,output_path)
self.__decode_dex__(file_path,output_path)
else:
return "error"
@ -75,23 +74,25 @@ class AndroidTask(object):
continue
# 分解apk
def __decode_apk__(self,file_path,apktool_path,output_path):
cmd_str = ('java -jar "%s" d -f "%s" -o "%s" --only-main-classe') % (str(apktool_path),str(file_path),str(output_path))
def __decode_apk__(self,file_path,output_path):
cmd_str = ('java -jar "%s" d -f "%s" -o "%s" --only-main-classe') % (cores.apktool_file,str(file_path),str(output_path))
logging.debug("[*] cmd {}".format(cmd_str))
if os.system(cmd_str) == 0:
self.__shell_test__(output_path)
self.__scanner_file_by_apktool__(output_path)
else:
print("[-] Decompilation failed, please submit error information at https://github.com/kelvinBen/AppInfoScanner/issues")
logging.error("[x] Decompilation failed, please submit error information at https://github.com/kelvinBen/AppInfoScanner/issues")
raise Exception(file_path + ", Decompilation failed.")
# 分解dex
def __decode_dex__(self,file_path,backsmali_path,output_path):
cmd_str = ('java -jar "%s" d "%s"') % (str(backsmali_path),str(file_path))
def __decode_dex__(self,file_path,output_path):
cmd_str = ('java -jar "%s" d "%s"') % (cores.backsmali_file,str(file_path))
logging.debug("[*] cmd {}".format(cmd_str))
if os.system(cmd_str) == 0:
self.__get_scanner_file__(output_path)
else:
print("[-] Decompilation failed, please submit error information at https://github.com/kelvinBen/AppInfoScanner/issues")
logging.error("[x] Decompilation failed, please submit error information at https://github.com/kelvinBen/AppInfoScanner/issues")
raise Exception(file_path + ", Decompilation failed.")

@ -5,6 +5,7 @@
import os
import re
import config
import logging
import threading
from queue import Queue
import libs.core as cores
@ -13,52 +14,100 @@ from libs.task.web_task import WebTask
from libs.task.net_task import NetTask
from libs.core.parses import ParsesThreads
from libs.task.android_task import AndroidTask
from libs.task.download_task import DownloadTask
from libs.core.download import DownloadThreads
class BaseTask(object):
thread_list =[]
result_dict = {}
app_history_list=[]
domain_history_list=[]
# 统一初始化入口
def __init__(self, types="Android", inputs="", rules="", sniffer=True, threads=10, package=""):
self.types = types
self.path = inputs
if rules:
config.filter_strs.append(r'.*'+str(rules)+'.*')
self.sniffer = not sniffer
self.threads = threads
self.package = package
self.file_queue = Queue()
def __init__(self):
if cores.user_add_rules:
config.filter_strs.append(r'.*'+str(cores.user_add_rules)+'.*')
self.file_queue = Queue()
# 文件下载队列
self.download_file_queue = Queue()
# 文件下载列表
self.download_file_list = []
self.thread_list = []
self.app_history_list= []
self.domain_history_list = []
self.result_dict = {}
# 统一启动
def start(self, types="Android", user_input_path="", package=""):
# 如果输入路径为目录,且类型非web,则自动检索DEX、IPA、APK等文件
if not(types == "Web") and os.path.isdir(user_input_path):
self.__scanner_specified_file__(user_input_path)
# 如果输入的路径为txt, 则加载txt中的内容实现批量操作
elif user_input_path.endswith("txt"):
with open(user_input_path) as f:
lines = f.readlines()
for line in lines:
# http:// 或者 https:// 开头 或者 apk/dex/ipa结尾的文件且文件存在
if (line.startswith("http://") or line.startswith("https://")) or ((line.endswith("apk") or line.endswith(".dex") or line.endswith("ipa")) and os.path.exists(line)):
self.download_file_queue.put(line)
f.close()
else:
# 如果是文件或者类型为web的目录
self.download_file_queue.put(user_input_path)
# 长度小于1需重新选择目录
if self.download_file_queue.qsize() < 1:
raise Exception('[x] The specified DEX, IPA and APK files are not found. Please re-enter the directory to be scanned!')
# 统一文件下载中心
self.__download_file_center__(types)
for download_file in self.download_file_list:
file_path = download_file["path"]
types = download_file["type"]
# 控制中心
self.__control_center__(file_path, types)
# 统一文件下载中心
def __download_file_center__(self,types):
# 杜绝资源浪费
if self.download_file_queue.qsize() < cores.threads_num:
threads_num = self.download_file_queue.qsize()
else:
threads_num = cores.threads_num
# 统一调度平台
def start(self):
for threadID in range(1, threads_num):
threadName = "Thread - " + str(int(threadID))
thread = DownloadThreads(threadID, threadName, self.download_file_queue, self.download_file_list, types)
thread.start()
thread.join()
print("[*] AI is analyzing filtering rules......")
# 控制中心
def __control_center__(self, file_path, types):
logging.info("[*] Processing {}".format(file_path))
logging.info("[*] AI is analyzing filtering rules......")
# 获取历史记录
# 处理历史记录
self.__history_handle__()
print("[*] The filtering rules obtained by AI are as follows: %s" % (set(config.filter_no)) )
logging.info("[*] The filtering rules obtained by AI are as follows: {}".format(set(config.filter_no)))
# 任务控制中心
task_info = self.__tast_control__()
task_info = self.__tast_control__(file_path, types)
if len(task_info) < 1:
return
# 文件队列
file_queue = task_info["file_queue"]
# 是否存在壳
shell_flag = task_info["shell_flag"]
# 组件列表(仅适用于Android)
comp_list = task_info["comp_list"]
# 报名信息(仅适用于Android)
packagename = task_info["packagename"]
# 文件标识符
file_identifier = task_info["file_identifier"]
if shell_flag:
print('[-] \033[3;31m Error: This application has shell, the retrieval results may not be accurate, Please remove the shell and try again!')
logging.error('[x] This application has shell, the retrieval results may not be accurate, Please remove the shell and try again!')
return
# 线程控制中心
print("[*] ========= Searching for strings that match the rules ===============")
logging.info("[*] ========= Searching for strings that match the rules ===============")
self.__threads_control__(file_queue)
# 等待线程结束
@ -69,54 +118,50 @@ class BaseTask(object):
self.__print_control__(packagename,comp_list,file_identifier)
def __tast_control__(self):
# 任务控制中心
def __tast_control__(self, file_path, types):
task_info = {}
# 自动根据文件后缀名称进行修正
cache_info = DownloadTask().start(self.path,self.types)
cacar_path = cache_info["path"]
types = cache_info["type"]
if (not os.path.exists(cacar_path) and cores.download_flag):
print("[-] File download failed! Please download the file manually and try again.")
# 通过网络下载的文件如果不存在就直接返回任务控制中心
if (not os.path.exists(file_path) and cores.download_flag):
logging.error("[x] {} download failed! Please download the file manually and try again.".format(file_path))
return task_info
# 调用Android 相关处理逻辑
if types == "Android":
task_info = AndroidTask(cacar_path,self.package).start()
task_info = AndroidTask(file_path, self.package).start()
# 调用iOS 相关处理逻辑
elif types == "iOS":
task_info = iOSTask(cacar_path).start()
task_info = iOSTask(file_path).start()
# 调用Web 相关处理逻辑
else:
task_info = WebTask(cacar_path).start()
task_info = WebTask(file_path).start()
return task_info
# 线程控制中心
def __threads_control__(self,file_queue):
for threadID in range(1,self.threads):
for threadID in range(1, cores.threads_num):
name = "Thread - " + str(int(threadID))
thread = ParsesThreads(threadID,name,file_queue,self.result_dict,self.types)
thread.start()
self.thread_list.append(thread)
# 信息输出中心
def __print_control__(self,packagename,comp_list,file_identifier):
txt_result_path = cores.txt_result_path
xls_result_path = cores.xls_result_path
all_flag = cores.all_flag
if self.sniffer:
print("[*] ========= Sniffing the URL address of the search ===============")
NetTask(self.result_dict,self.app_history_list,self.domain_history_list,file_identifier,self.threads).start()
if cores.net_sniffer_flag:
logging.info("[*] ========= Sniffing the URL address of the search ===============")
NetTask(self.result_dict,self.app_history_list,self.domain_history_list,file_identifier).start()
if packagename:
print("[*] ========= The package name of this APP is: ===============")
print(packagename)
logging.info("[*] ========= The package name of this APP is: ===============")
logging.info(packagename)
if len(comp_list) != 0:
print("[*] ========= Component information is as follows :===============")
logging.info("[*] ========= Component information is as follows :===============")
for json in comp_list:
print(json)
logging.info(json)
if all_flag:
if cores.all_flag:
value_list = []
with open(txt_result_path,"a+",encoding='utf-8',errors='ignore') as f:
for key,value in self.result_dict.items():
@ -127,11 +172,12 @@ class BaseTask(object):
value_list.append(result)
f.write("\t"+result+"\r")
f.close()
print("[*] For more information about the search, see TXT file result: %s" %(txt_result_path))
logging.info("[>] For more information about the search, see TXT file result: {}".format(cores.txt_result_path))
if self.sniffer:
print("[*] For more information about the search, see XLS file result: %s" %(xls_result_path))
if cores.net_sniffer_flag:
logging.info("[>] For more information about the search, see XLS file result: {}".format(cores.xls_result_path))
# 获取历史记录
def __history_handle__(self):
domain_history_path = cores.domain_history_path
app_history_path = cores.app_history_path
@ -159,4 +205,13 @@ class BaseTask(object):
config.filter_no.append(".*" + domain)
f.close()
# 扫描指定后缀文件
def __scanner_specified_file__(self, base_dir, file_suffix=['dex','ipa','apk']):
files = os.listdir(base_dir)
for file in files:
dir_or_file_path = os.path.join(base_dir,file)
if os.path.isdir(dir_or_file_path):
self.__scanner_specified_file__(dir_or_file_path,file_suffix)
else:
if dir_or_file_path.split(".")[-1] in file_suffix:
self.download_file_queue.put(dir_or_file_path)

@ -7,40 +7,72 @@ import re
import time
import config
import hashlib
import logging
from queue import Queue
import libs.core as cores
from libs.core.download import DownloadThreads
class DownloadTask(object):
def __init__(self):
self.download_file_queue = Queue()
self.thread_list = []
def start(self,path,types):
def start(self, path, types):
self.__local_or_remote__(path, types)
for threadID in range(1, cores.threads_num):
name = "Thread - " + str(int(threadID))
thread = DownloadThreads(threadID,name,self.download_file_queue)
thread.start()
thread.join()
# 判断文件是本地加载还是远程加载
def __local_or_remote__(self,path,types):
# 添加文件后缀判断
self.__update_type__(path)
# 处理本地文件
if not(path.startswith("http://") or path.startswith("https://")):
if not os.path.isdir(path): # 不是目录
return {"path":path,"type":types}
else: # 目录处理
return {"path":path,"type":types}
else:
self.__net_header__(path,types)
# self.download_file_queue.put(path)
# 处理网络请求
def __net_header__(self, path, types):
create_time = time.strftime("%Y%m%d%H%M%S", time.localtime())
if path.endswith("apk"):
if path.endswith("apk") or types == "Android":
types = "Android"
file_name = create_time+ ".apk"
elif path.endswith("ipa"):
elif path.endswith("ipa") or types == "iOS":
types = "iOS"
file_name = create_time + ".ipa"
else:
if types == "Android":
types = "WEB"
file_name = create_time + ".html"
logging.info("[*] Detected that the task is not local, preparing to download file......")
cache_path = os.path.join(cores.download_dir, file_name)
self.download_file_queue.put({"path":path, "cache_path":cache_path, "types":types})
# thread = DownloadThreads(path,file_name,cache_path,types)
# thread.start()
# thread.join()
return {"path":cache_path,"type":types}
def __update_type__(self, path, types, file_name=None):
create_time = time.strftime("%Y%m%d%H%M%S", time.localtime())
if path.endswith("apk") or types == "Android":
types = "Android"
if not file_name:
file_name = create_time+ ".apk"
elif types == "iOS":
elif path.endswith("ipa") or types == "iOS":
types = "iOS"
if not file_name:
file_name = create_time + ".ipa"
else:
types = "WEB"
if not file_name:
file_name = create_time + ".html"
if not(path.startswith("http://") or path.startswith("https://")):
if not os.path.isdir(path): # 不是目录
return {"path":path,"type":types}
else: # 目录处理
return {"path":path,"type":types}
else:
print("[*] Detected that the task is not local, preparing to download file......")
cache_path = os.path.join(cores.download_path, file_name)
thread = DownloadThreads(path,file_name,cache_path,types)
thread.start()
thread.join()
print()
return {"path":cache_path,"type":types}
return types,file_name

@ -3,8 +3,6 @@
# Author: kelvinBen
# Github: https://github.com/kelvinBen/AppInfoScanner
import os
import re
import shutil
import zipfile
import binascii
import platform
@ -23,7 +21,6 @@ class iOSTask(object):
file_path = self.path
if file_path.split(".")[-1] == 'ipa':
self.__decode_ipa__(cores.output_path)
self.__scanner_file_by_ipa__(cores.output_path)
elif self.__get_file_header__(file_path):
self.file_queue.put(file_path)
else:
@ -31,38 +28,25 @@ class iOSTask(object):
return {"shell_flag":self.shell_flag,"file_queue":self.file_queue,"comp_list":[],"packagename":None,"file_identifier":self.file_identifier}
def __get_file_header__(self,file_path):
hex_hand = 0x0
crypt_load_command_hex = "2C000000"
macho_name = os.path.split(file_path)[-1]
self.file_identifier.append(macho_name)
with open(file_path,"rb") as macho_file:
macho_file.seek(hex_hand,0)
macho_file.seek(0x0,0)
magic = binascii.hexlify(macho_file.read(4)).decode().upper()
macho_magics = ["CFFAEDFE","CEFAEDFE","BEBAFECA","CAFEBABE"]
if magic in macho_magics:
self.__shell_test__(macho_file,hex_hand)
hex_str = binascii.hexlify(macho_file.read()).decode().upper()
if crypt_load_command_hex in hex_str:
macho_file.seek(int(hex_str.index("2C000000")/2)+20,0)
cryptid = binascii.hexlify(macho_file.read(4)).decode()
if cryptid == "01000000":
self.shell_flag = True
macho_file.close()
return True
macho_file.close()
return False
def __shell_test__(self,macho_file,hex_hand):
while True:
magic = binascii.hexlify(macho_file.read(4)).decode().upper()
if magic == "2C000000":
macho_file.seek(hex_hand,0)
encryption_info_command = binascii.hexlify(macho_file.read(24)).decode()
cryptid = encryption_info_command[-8:len(encryption_info_command)]
if cryptid == "01000000":
self.shell_flag = True
break
hex_hand = hex_hand + 4
def __scanner_file_by_ipa__(self,output):
scanner_file_suffix = ["plist","js","xml","html"]
scanner_dir = os.path.join(output,"Payload")
self.__get_scanner_file__(scanner_dir,scanner_file_suffix)
def __get_scanner_file__(self,scanner_dir,file_suffix):
dir_or_files = os.listdir(scanner_dir)
for dir_file in dir_or_files:
@ -84,41 +68,24 @@ class iOSTask(object):
self.file_queue.put(dir_file_path)
def __decode_ipa__(self,output_path):
scanner_file_suffix = ["plist","js","xml","html"]
scanner_dir = os.path.join(output_path,"Payload")
with zipfile.ZipFile(self.path,"r") as zip_files:
zip_file_names = zip_files.namelist()
zip_files.extract(zip_file_names[0],output_path)
for zip_file_name in zip_file_names:
try:
new_zip_file = zip_file_names[0].encode('cp437').decode('utf-8')
except UnicodeEncodeError:
new_zip_file = zip_file_names[0].encode('utf-8').decode('utf-8')
old_zip_dir = self.__get_parse_dir__(output_path,zip_file_names[0])
new_zip_dir = self.__get_parse_dir__(output_path,new_zip_file)
os.rename(old_zip_dir,new_zip_dir)
for zip_file in zip_file_names:
old_ext_path = zip_files.extract(zip_file,output_path)
start = str(old_ext_path).index("Payload")
dir_path = old_ext_path[start:len(old_ext_path)]
old_ext_path = os.path.join(output_path,dir_path)
try:
new_zip_file = zip_file.encode('cp437').decode('utf-8')
except UnicodeEncodeError:
new_zip_file = zip_file.encode('utf-8').decode('utf-8')
new_ext_path = os.path.join(output_path,new_zip_file)
if platform.system() == "Windows":
new_ext_path = new_ext_path.replace("/","\\")
if not os.path.exists(new_ext_path):
dir_path = os.path.dirname(new_ext_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
shutil.move(old_ext_path, new_ext_path)
if os.path.exists(old_ext_path):
os.remove(old_ext_path)
new_file_name = zip_file_name.encode('cp437').decode('GBK')
else:
new_file_name = zip_file_name.encode('cp437').decode('utf-8')
except UnicodeEncodeError:
new_file_name = zip_file_name.encode('utf-8').decode('utf-8')
new_ext_file_path = os.path.join(output_path,new_file_name)
ext_file_path = zip_files.extract(zip_file_name,output_path)
os.rename(ext_file_path,new_ext_file_path)
self.__get_scanner_file__(scanner_dir,scanner_file_suffix)
def __get_parse_dir__(self,output_path,file_path):
start = file_path.index("Payload/")

@ -2,32 +2,29 @@
# -*- coding: utf-8 -*-
# Author: kelvinBen
# Github: https://github.com/kelvinBen/AppInfoScanner
import re
import xlwt
import socket
import config
from queue import Queue
import libs.core as cores
from openpyxl import Workbook
from libs.core.net import NetThreads
import requests
class NetTask(object):
value_list = []
domain_list=[]
def __init__(self,result_dict,app_history_list,domain_history_list,file_identifier,threads):
def __init__(self,result_dict,app_history_list,domain_history_list,file_identifier):
self.result_dict = result_dict
self.app_history_list = app_history_list
self.domain_history_list = domain_history_list
self.file_identifier = file_identifier
self.domain_queue = Queue()
self.threads = int(threads)
self.thread_list = []
self.domain_history_list = domain_history_list
def start(self):
xls_result_path = cores.xls_result_path
workbook = xlwt.Workbook(encoding = 'utf-8')
workbook = Workbook()
worksheet = self.__creating_excel_header__(workbook)
self.__write_result_to_txt__()
@ -40,16 +37,11 @@ class NetTask(object):
workbook.save(xls_result_path)
def __creating_excel_header__(self,workbook):
worksheet = workbook.add_sheet("Result",cell_overwrite_ok=True)
worksheet.write(0,0, label = "Number")
worksheet.write(0,1, label = "IP/URL")
worksheet.write(0,2, label = "Domain")
worksheet.write(0,3, label = "Status")
worksheet.write(0,4, label = "IP")
worksheet.write(0,5, label = "Server")
worksheet.write(0,6, label = "Title")
worksheet.write(0,7, label = "CDN")
# worksheet.write(0,8, label = "Finger")
worksheet = workbook.create_sheet("Result",0)
excel_headers = ["Number","IP/URL","Domain","Status","IP","Server","Title","CDN","Finger"]
for head_cell in excel_headers:
column = excel_headers.index(head_cell) + 1
worksheet.cell(row=1, column=column).value = head_cell
return worksheet
def __write_result_to_txt__(self):
@ -97,7 +89,7 @@ class NetTask(object):
append_file_flag = False
def __start_threads__(self,worksheet):
for threadID in range(0,self.threads) :
for threadID in range(0, cores.threads_num):
name = "Thread - " + str(threadID)
thread = NetThreads(threadID,name,self.domain_queue,worksheet)
thread.start()

@ -4,6 +4,7 @@
# Github: https://github.com/kelvinBen/AppInfoScanner
import os
import config
import hashlib
from queue import Queue
class WebTask(object):
@ -39,7 +40,7 @@ class WebTask(object):
else:
if len(dir_file.split("."))>1:
if dir_file.split(".")[-1] in file_suffix:
with open(file_path,'rb') as f:
with open(dir_file_path,'rb') as f:
dex_md5 = str(hashlib.md5().update(f.read()).hexdigest()).upper()
self.file_identifier.append(dex_md5)
self.file_queue.put(dir_file_path)

@ -1,3 +1,5 @@
requests
click
xlwt
pillow
openpyxl

@ -1,7 +1,15 @@
### V1.0.9
- 新增IPA批量操作
- 优化iOS壳检测速度
- 优化iPA文件解压效率
- 优化日志输出
- 修复内容过长无法输出到excle
### V1.0.8
- 添加AK和SK的检测
- 添加检测规则提交入口
- 添加.gitignore文件
- 新增AK和SK的检测
- 新增检测规则提交入口
- 新增.gitignore文件
- 优化txt结果集输出方式
- 修复目录中包含空格无法解析的问题
- 修复WEB页面或者目录扫描的问题

Loading…
Cancel
Save