写在前面
纯为了自己能多偷一会懒而弄的简易工具, 我并不是专业开发, 写这个完全是业余, 代码上会有很明显的冗余, 还望各位看官指点批评
为啥要搞工具
很简单,因为我懒
要批量扫描IP, 扫C段, 扫指定端口, 汇总表格, 这套程序下来我的文档编辑能力又提高了不少, 但这不是我想要的
经过四处求助和百度,得到了两个扫描脚本.
ip_number=`cat ip.txt|wc -l`
split -l 500 ip.txt -d -a 3 ip__
i=0
ls |grep ip__|while read ip_line
do
i=$[$i+1]
masscan -iL $ip_line -p0-65535 -oX $i.xml --rate 800
cat $i.xml|grep addr|awk -F '"' '{print $4":"$10}' >>dk.txt
sleep 30s
done
nmap.py
def nmap1(host,portlist,t_numb):
t_numb.acquire()
Nmap = nmap.PortScanner() # 生成nmap
np = Nmap.scan(hosts=host, ports=portlist, arguments='-n -Pn')
for host, values in np['scan'].items():
scan_raw_result[host] = values
t_numb.release()
def write_file(what_file, file_name):
index = 1
data = xlwt.Workbook()
sheet_result = data.add_sheet('result', cell_overwrite_ok=True)
sheet_result.write(0, 0, "IP")
sheet_result.write(0, 1, "端口")
sheet_result.write(0, 2, "主机端口")
sheet_result.write(0, 3, "协议")
sheet_result.write(0, 4, "state")
sheet_result.write(0, 5, "服务")
try:
for ip in what_file:
for name, vars in scan_raw_result[ip].items():
if "tcp" in name:
for port, tvalues in scan_raw_result[ip]['tcp'].items():
#print(tvalues)
sheet_result.write(index, 0, ip)
sheet_result.write(index, 1, port)
sheet_result.write(index, 2, str(ip)+':'+str(port))
sheet_result.write(index, 3, 'tcp')
sheet_result.write(index, 4, tvalues['state'])
sheet_result.write(index, 5, tvalues['name'])
index += 1
if "udp" in name:
for port, tvalues in scan_raw_result[ip]['udp'].items():
sheet_result.write(index, 0, ip)
sheet_result.write(index, 1, port)
sheet_result.write(index, 2, str(ip)+':'+str(port))
sheet_result.write(index, 3, 'udp')
sheet_result.write(index, 4, tvalues['state'])
sheet_result.write(index, 5, tvalues['name'])
index += 1
data.save('save.xlsx')
print(file_name+'写入完成')
except Exception as e:
print(e)
分析
masscan.sh
比较简单,最起码我看懂了,就是给一个带IP的txt然后给返回个ip:port
形式的txt
在将这个txt传递给nmap.py
获取端口的指纹和状态. 到也不是很难
遇到问题
由于我要汇总的结果由三个部分组成:批量IP+C段+临时端口扫描
而nmap.py
运行结束后每次都是覆盖结果,导致我需要手动改名,单独保存xlsx,这令我很不爽,作为一个懒人,这不高效
解决思路
三个扫描使用追加a+
写到同一个xlsx里面, 好像比较简单,就把w+
缓存a+
就好了, 看了半天代码,脚本好像用的不是这个.
研究了一下xlsx,引起了一些不太愉快的回忆, 上一次用python玩xlsx玩到人疯掉了.
那么, 找一中表格格式替换xlsx, CSV.
简单尝试了一下CSV, 真是令我心情愉悦
这还有什么好说的, 查看也方便, 又可以用表格打开, 一个文件两用, 为什么我之前没发现这么好用的东西
测试之路
CSV读写
确定了python3+csv, 先找来几段代码来看看效果
(找不到参考的哪个文章了)
def ReadFromCSV():
with open(csv_file, encoding='utf-8') as f:
reader = csv.reader(f)
header = next(reader)
print(header)
for row in reader:
print(row)
def Write2CSV():
header = ['name', 'password', 'status']
data = [
['abc', '123456', 'PASS'],
['张五', '123#456', 'PASS'],
['张#abc123\n123', '123456', 'PASS'],
['666', '123456', 'PASS'],
['a b', '123456', 'PASS']
]
with open(csv_file, 'a+', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(data)
CSV数据追加
预期的效果这段代码已经达到了, 也是追加模式, 但这个追加会把header也重新写一遍.
改一下代码判断文件有没有header(其实这步很多余,header完全可以在汇总数据的时候用去重处理掉...但我就是玩)
于是, 第二版诞生了
def ReadCSV(*args):
data_list = []
try:
csv_file = args[0]
with open(csv_file, encoding='utf-8') as f:
try:
reader = csv.reader(f)
header = next(reader)
except StopIteration as e:
header_flag = False # 无header
return False, '' # 无数据
else:
data_list.append(header)
for row in reader:
data_list.append(row)
header_flag = True
return header_flag, data_list
except FileNotFoundError as e:
open(csv_file, 'w+')
def Write2CSV(*args):
csv_file = args[0]
header = args[1]
data = args[2]
flag = ReadCSV(csv_file)[0]
csv_data = ReadCSV(csv_file)[1]
if flag == True:
with open(csv_file, 'a+', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
# 写入二维数组
writer.writerows(data)
pass
if flag == False:
with open(csv_file, 'a+', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
# 写入二维数组
writer.writerows(data)
CSV数据查询
就是在写之前先读一下文件看看有没有heade
r存在, 返回个flag
标记状态, 这样就可以在不重写header
的情况下追加数据了
接下来, 再测试, 另一个问题: 重复数据怎么办
经过时间推进, 有些端口的状态肯定是有变化的, 按照滞后比可以把三个扫描工作排个顺序:临时扫描>批量IP扫描>C段扫描
那么CSV的写入顺序就明了了:临时扫描>批量IP扫描>C段扫描
nmap.py
执行顺序:C段结果>批量结果>临时结果
执行哪个result.txt
我可以手动控制,但是要保证导入的数据时最新的, 那就把前一个相同的数据删掉好了
整个CSV列表中, 只有ip:port
字段是唯一的, 那就可以在追加数据之前先在CSV表中查一遍, 看看有没有这条数据, 有则删除
在删除之前肯定要先查一遍吧, CSV读写数据的基础是二维数组, 查CSV就变成了元素和二维数组之间的较量
找来一个二维数组查询的代码
class Solution:
# array 二维列表
def Find(self, target, array):
# write code here
if not array or not array[0] or target is None:
return False
if array[0][0] > target or array[-1][-1] < target:
return False
rows, cols = len(array), len(array[0])
r, c = 0, cols - 1
while r < rows and c >= 0:
if array[r][c] == target:
return True
if array[r][c] < target:
r += 1
else:
c -= 1
return array[r][c] == target
看了一下, 不太适用我的情况, 改造一下
def Find(self, target, array):
find_list =[]
if not array or not array[0] or target is None:
return False,''
rows, cols = len(array), len(array[0])
r, c = 0, 0
for r in range(rows):
for c in range(cols):
try:
if array[r][c] == target:
find_list.append(r)
except IndexError as e:
break
else:
continue
这里为什么要把r
放到find_list
中, 是因为后面删除的时候需要这个r
的值. 这里try
了一下本来是要回避一中特殊情况的, 但因为太懒, 干脆就先放着不管了. find
解决, 下一个
CSV数据删除
借助pandas, 先读取数据, 从中删除某行, 在把数据写回去. 说实话这样的效率真的不高, 但我见识有限, 一时想不出其他好的解决办法, 再有就是我在写这个工具上花的时间够多了, 先运行起来降低工作量是关键, 优化后面在考虑.
# 截取一部分下来
import pandas as pd
data = pd.read_csv("./betting.csv")
data_new=data.drop([128,129,130])
这里的drop
入参就是list, 这样我可以同时删掉多行, 而不用每删一行重新加载一次文档.
经过反复的改动, 最终完成了这一些列的操作, 新增数据, 查询与删除, 数据替换
我在也不用为汇总发愁了
必要元素集结完毕
在下不才, 在此奉上简陋的代码
代码部分
csv_delete.py
def CSVDelete(*args):
csv_file = args[0]
detele_line = args[1]
data = pd.read_csv(csv_file)
data_new = data.drop(detele_line)
data_new.to_csv(csv_file, index=0)
csv_find.py
class Solution:
def Find(self, target, array):
find_list =[]
if not array or not array[0] or target is None:
return False,''
rows, cols = len(array), len(array[0])
r, c = 0, 0
for r in range(rows):
for c in range(cols):
try:
if array[r][c] == target:
find_list.append(r)
except IndexError as e:
break
else:
continue
if len(find_list) == 0:
return False,''
else :
return True,find_list
csv_save.py
def ReadCSV(*args):
data_list = []
try:
csv_file = args[0]
with open(csv_file, encoding='utf-8') as f:
try:
reader = csv.reader(f)
header = next(reader)
except StopIteration as e:
header_flag = False
return False, ''
else:
data_list.append(header)
for row in reader:
data_list.append(row)
header_flag = True
return header_flag, data_list
except FileNotFoundError as e:
open(csv_file, 'w+')
def Write2CSV(*args):
csv_file = args[0]
header = args[1]
data = args[2]
delete_list = []
cfs = cf.Solution()
flag = ReadCSV(csv_file)[0]
csv_data = ReadCSV(csv_file)[1]
csv_line = len(csv_data)
for a in data:
target = a[2]
find = cfs.Find(target, csv_data)
if find[0] == True:
for x in find[1]:
x = x-1
delete_list.append(x)
else:
break
try:
cd.CSVDelete(csv_file, delete_list)
except:
pass
if flag == True:
with open(csv_file, 'a+', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerows(data)
pass
if flag == False:
with open(csv_file, 'a+', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(data)
csv_main.py
csv_name = (time.strftime("%Y-%m-%d", time.localtime()))
csv_file = './'+ csv_name +'.csv'
header = ['IP', '端口', '主机端口', '协议', 'state', '服务']
data = [
['127.0.0.1', '22', '127.0.0.1:22', 'tcp', 'closed', '1234'],
]
cs.Write2CSV(csv_file, header, data)
nmap.py
cachefile = './data/cache.txt'
ip_txt = './data/ip_port.txt'
result_lixt = './result_list.txt'
csv_name = (time.strftime("%Y-%m-%d", time.localtime()))
result_file = './'+ csv_name +'.csv'
scan_raw_result = {}
header = ['IP', '端口', '主机端口', '协议', 'state', '服务']
def nmap1(host, portlist, t_numb):
t_numb.acquire()
Nmap = nmap.PortScanner() # 生成nmap
print(host, portlist)
np = Nmap.scan(hosts=host, ports=portlist, arguments='-n -Pn')
for host, values in np['scan'].items():
scan_raw_result[host] = values
t_numb.release()
def write_file(what_file, file_name): # 写文件到excel
flag = 1
try:
for ip in what_file:
for name, vars in scan_raw_result[ip].items():
if "tcp" in name: # tcp协议
for port, tvalues in scan_raw_result[ip]['tcp'].items():
# ip
# port
ip_port = str(ip) + ':' + str(port)
agree = 'tcp'
port_status = tvalues['state']
server_name = tvalues['name']
# CSV
if tvalues['name'] == '':
server_name = 'unknown'
# else:
# server_name == tvalues['name']
tmp_data = [ip, port, ip_port, agree,
port_status, server_name]
Write2CSV(tmp_data)
if "udp" in name: # udp协议
for port, tvalues in scan_raw_result[ip]['udp'].items():
# ip
# port
ip_port = ip + ':' + port
agree = 'udp'
port_status = tvalues['state']
server_name = tvalues['name']
# CSV
if tvalues['name'] == '':
server_name = 'unknown'
# else:
# server_name == tvalues['name']
tmp_data = [ip, port, ip_port, agree,
port_status, server_name]
Write2CSV(tmp_data)
flag += 1
except Exception as e:
raise
print(e)
def DataFormat(*args):
dir = {}
data = []
t_list = []
file = open(readfile, encoding='utf-8')
data = file.readlines()
t_numb = threading.Semaphore(20)
for line in data:
pattern = r'\-|\(|\)|<|\"'
pattern_ip_port = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{0,5}'
line = line.strip()
ip_port = line.split(":")
if ip_port[0] not in dir:
dir[ip_port[0]] = []
if ip_port[1] not in dir[ip_port[0]]:
dir[ip_port[0]].append(ip_port[1])
for ip, value in dir.items():
ports = ','.join(str(port) for port in value)
t = threading.Thread(target=nmap1, args=(ip, ports, t_numb,))
t_list.append(t)
for t in t_list:
t.start()
for t in t_list:
t.join()
pass
key1 = list(set(scan_raw_result.keys()))
if __name__ == '__main__':
params = sys.argv
if len(params) == 1:
readfile = './ip.txt'
if len(params) == 2:
readfile = params[1]
Clear()
DataFormat(readfile)