关于李姐姐sub子域名的一个后续小脚本~~~~

Posted by bfpiaoran on November 27, 2017

测试李姐姐子域名枚举,返回的是域名+ip格式

但是有好多访问不了

原来写个单线程返回标题的   昨天闲着改了改多进程+端口

速度还凑合

import re
import requests
from bs4 import BeautifulSoup
from multiprocessing import Pool


scanlist = []
urllist = []
with open('alibaba.com_full.txt','r') as f:
    for i in f.readlines():
        if '114.80.132.160' not in i:
            ip = i[31:]
            i = i[:31]
            i = re.sub(' ','',i)
            i = re.sub('\t','',i)
            ip = re.sub(' ','',ip)
            ip = re.sub('\t','',ip)
            i = i+' '+ip
            urllist.append(i)


def get_title(url):
    portlist = [80, 81, 82, 8080, 8081, 8000, 8001, 9200, 6379]
    url = 'http://'+url
    ret = []
    for port in portlist:
        u = url+':'+str(port)
        print('[*]star '+url)
        try:
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 UBrowser/6.0.1471.914 Safari/537.36'}
            response = requests.get(url=u, headers=headers, timeout=5, verify=False)
            # print(url)
            response.encoding = response.apparent_encoding
            response = BeautifulSoup(response.text,'lxml')
            title = response.html.title.get_text()
        except Exception as e:
            # print(e)
            title = '空'
        res = u+'    '+title
        print(res)
        ret.append(res)
    with open('aliresult.txt','a+') as e:
        for i in ret:
            i = i + '\n'
            e.write(i)

if __name__ =='__main__':
    while urllist != []:
        p = Pool(5)
        for i in urllist[:5]:
            # print('[*]start scan '+i)
            urllist.remove(i)
            i = re.split(' ',i)
            p.apply_async(get_title, args=(i[0],))
        p.close()
        p.join()