python 파충류 병발 다중 스레드 웹 메일박스 얻기

1593 단어
Excel표에서 데이터를 얻고, 올라간 데이터는 Excel표에 저장됩니다. 2만 개의 기록은 15분 정도 걸립니다. 웹 페이지마다 형식이 다를 수 있습니다. 수정을 주의하십시오.
 # -*- coding: utf-8 -*-

import requests

import openpyxl

import re

from bs4 import BeautifulSoup

from multiprocessing.dummy import Pool as ThreadPool

wb = openpyxl.load_workbook('company_database.xlsx')

ws = wb.active

start = 10000

f = 'F'

urlstr = 'http://directory... url/'

query = '/q/'

def getEmail(url):

  print('grab email....')

  html = requests.get(url[0])

  soup = BeautifulSoup(html.text,"lxml")

  pudge = soup.find_all("p", text="Email")

  if pudge:

  email = re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+",html.text,re.I)[0]

  return email,url[1]

  else:

  return '',url[1]

def writeExcel(mailCount):

  ws['L'+str(mailCount[1])].value = mailCount[0]

  print('%d save ok: ' % mailCount[1]+mailCount[0])

def geturl(num):

  print('Grabing url')

  celstr = f + str(num)

  compname = ws[celstr].value

  url = urlstr + compname.replace(' ', '+') + query
  
  return url,num

def main(start):

  pool1 = ThreadPool(16)

  urlCount = pool1.map(geturl, range(start,17722))

  pool1.close()

  pool1.join()

  pool2 = ThreadPool(16)

  mailCount = pool2.map(getEmail,urlCount)

pool2.close()

pool2.join()

pool3 = ThreadPool(16)

pool3.map(writeExcel,mailCount)

pool3.close()

pool3.join()

wb.save('company_database.xlsx')

print (' ok!')

if __name__ == '__main__':

main(start)   ```

좋은 웹페이지 즐겨찾기