时间:2020-10-24 11:49:36 | 栏目:Python代码 | 点击:次
今天得了一批域名,需要把域名解析成ip
因为量比较大所以采用了多进程和队列的方式
from multiprocessing import Process,Queue,Pool import socket import multiprocessing import os #写入文件 def write(q,lock,filename): while not q.empty(): url = q.get() print (url) try: ip = socket.gethostbyname(url) except: ip = "unknow" print (ip) with open(filename,'a+') as f: lock.acquire() #加锁防止多个进程写入会混乱 try: f.write(url + " " + ip + "\n") finally: lock.release() #添加到队列 def readurl(q,n): with open(str(n)+'.txt','r') as f: lines = f.readlines() for line in lines: q.put(line.strip()) return q #根据进程进行拆分txt def multi(urllist,n): with open(urllist,'r') as f: lines = f.readlines() line = int(len(lines)/n) print (line) for m in range(0,n): with open(str(m)+'.txt','a+') as f1: for i in range(line*m,line*(m+1)): f1.write(lines[i]) #删除拆分的txt文件 def remove(n): for i in range(0,n): os.remove(str(i)+'.txt') print ("######清除临时文件######") if __name__ == "__main__": manager = multiprocessing.Manager() q = manager.Queue() lock = manager.Lock() m = 5 #设置扫描进程数 urllist = "url.txt" #待解析的url filename = "test.txt" #结果保存的文件名 multi(urllist,m) p = Pool(m) for i in range(m): p.apply_async(write,args=(readurl(q,i),lock,filename)) p.close() p.join() remove(m) print ("#######全部文件采集完成########")