2020-10-31 | Python | UNLOCK

query dns.aizhan.py

query_dns.aizhan.py

某些情况有BUG

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import requests
from bs4 import BeautifulSoup
import time
import sys
import socket

def getip(domain):
if '//' in domain:
domain = domain.split("//")[1]
else:
domain = domain
while domain[-1] == "/":
domain = domain.replace(domain[-1],'',1)
continue
myaddr = socket.getaddrinfo(domain, None)
return myaddr[0][4][0]

def headers():
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded"
}
return headers

def get_response(url):
response = requests.get(url,headers=headers(),timeout=None)
soup = BeautifulSoup(response.content,'html.parser')
return soup

def az_link(url):
soup = get_response(url)
page = []
page.append(url)
for div in soup.find_all('div',class_='pager'):
a = div.find_all('a')[3:-2]
for href in a:
page.append(href['href'])
return page
def select(az_page,selecturl):
for url in az_page:
soup = get_response(url)
print('get:=>'+url+'='*30+'>>'+selecturl+'.txt')
time.sleep(0.5)
if 'height:200px;text-align:center;color:#999;border:none;background:#fff' in soup:
print('貌似查不到')
break
for select in soup.select('.dns-content a'):
link = select['href']
print('[+]',selecturl,getip(selecturl),' ? ',getip(link),link)
if getip(selecturl) != getip(link):
break
else:
with open(selecturl+'.txt','a+') as f:
f.write(link+'\n')
if __name__ == '__main__':
selecturl = sys.argv[1].replace("http://",'').replace("https://",'').replace("/",'')
sourceip = getip(selecturl)
url = 'https://dns.aizhan.com/%s' % (selecturl+'/')
try:
az_page = az_link(url)
select(az_page,selecturl)
except:
pass

评论加载中