python 파충류 - 기본 모듈, 당신 은 반드시 알 아야 합 니 다!!

프롤로그 python 파충류, 웹 spider.사이트 에 올 라 가 웹 페이지 데 이 터 를 얻 고 분석 추출 을 한다.
기본 모듈 은 urllib, urllib 2, re 등 모듈 을 사용 합 니 다.
(1) 기본 용법, 예
(1) 기본 GET 요청 을 하고 웹 페이지 html 가 져 오기
#!coding=utf-8
import urllib
import urllib2

url = 'http://www.baidu.com/'
#     
request = urllib2.Request(url)
try:
    #   request,    response
    response = urllib2.urlopen(request)
except urllib2.HTTPError, e:
    if hasattr(e, 'reason'):
        print e.reason
#   response body
html = response.read()
#   response headers
headers = response.info()

(2) 양식 제출
#!coding=utf-8
import urllib2
import urllib

post_url = ''

post_data = urllib.urlencode({
    'username': 'username',
    'password': 'password',
})

post_headers = {
    'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:31.0) Gecko/20100101 Firefox/31.0',
}

request = urllib2.Request(
    url=post_url,
    data=post_data,
    headers=post_headers,
)

response = urllib2.urlopen(request)

html = response.read()

(3)
파이 톤 을 배 우 는 학습 qun 491308659 인증 코드 를 추천 합 니 다: 남 초 는 당신 이 큰 소 든 흰 둥 이 든 직업 을 바 꾸 고 싶 든 직업 에 들 어가 고 싶 든 모두 알 고 함께 발전 하고 함께 공부 할 수 있 습 니 다!치마 안에 개발 도구 가 있어 서 많은 건어물 과 기술자 료 를 공유 합 니 다!
#!coding=utf-8

import urllib2
import re

page_num = 1
url = 'http://tieba.baidu.com/p/3238280985?see_lz=1&pn='+str(page_num)
myPage = urllib2.urlopen(url).read().decode('gbk')

myRe = re.compile(r'class="d_post_content j_d_post_content ">(.*?)
', re.DOTALL)
items = myRe.findall(myPage)
f = open('baidu.txt', 'a+')
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
i = 0
texts = []
for item in items:
i += 1
print i
text = item.replace('', '')
text.replace('', '').replace(' ', '') + ''
print text
f.write(text)
f.close()
(4)
#coding:utf-8
'''
        163         

'''
import urllib
import urllib2
import cookielib
import re
import time
import json

class Email163:
    header = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
    user = ''
    cookie = None
    sid = None
    mailBaseUrl='http://twebmail.mail.163.com'

    def __init__(self):
        self.cookie = cookielib.CookieJar()
        cookiePro = urllib2.HTTPCookieProcessor(self.cookie)
        urllib2.install_opener(urllib2.build_opener(cookiePro))

    def login(self,user,pwd):
        '''
              
        '''
        postdata = urllib.urlencode({
                'username':user,
                'password':pwd,
                'type':1
            })
        #      ,  URL   
        req = urllib2.Request(
                url='https://ssl.mail.163.com/entry/coremail/fcg/ntesdoor2?funcid=loginone&language=-1&passtype=1&iframe=1&product=mail163&from=web&df=email163&race=-2_45_-2_hz&module=&uid='+user+'&style=10&net=t&skinid=null',
                data=postdata,
                headers=self.header,
            )
        res = str(urllib2.urlopen(req).read())
        #print res
        patt = re.compile('sid=([^"]+)',re.I)
        patt = patt.search(res)

        uname = user.split('@')[0]
        self.user = user
        if patt:
            self.sid = patt.group(1).strip()
            #print self.sid
            print '%s Login Successful.....'%(uname)
        else:
            print '%s Login failed....'%(uname)


    def getInBox(self):
        '''
                  
        '''
        print '
Get mail lists.....
' sid = self.sid url = self.mailBaseUrl+'/jy3/list/list.do?sid='+sid+'&fid=1&fr=folder' res = urllib2.urlopen(url).read() # mailList = [] patt = re.compile('
]+>.*?href="([^"]+)"[^>]+>(.*?).*?
]+>.*?href="[^>]+>(.*?)',re.I|re.S) patt = patt.findall(res) if patt==None: return mailList for i in patt: line = { 'from':i[1].decode('utf8'), 'url':self.mailBaseUrl+i[0], 'subject':i[2].decode('utf8') } mailList.append(line) return mailList def getMailMsg(self,url): ''' ''' content='' print '
Download.....%s
'%(url) res = urllib2.urlopen(url).read() patt = re.compile('contentURL:"([^"]+)"',re.I) patt = patt.search(res) if patt==None: return content url = '%s%s'%(self.mailBaseUrl,patt.group(1)) time.sleep(1) res = urllib2.urlopen(url).read() Djson = json.JSONDecoder(encoding='utf8') jsonRes = Djson.decode(res) if 'resultVar' in jsonRes: content = Djson.decode(res)['resultVar'] time.sleep(3) return content ''' Demon ''' # mail163 = Email163() # mail163.login('[email protected]','944898186') time.sleep(2) # elist = mail163.getInBox() # for i in elist: print ' :%s :%s :
%s'%(i['subject'].encode('utf8'),i['from'].encode('utf8'),mail163.getMailMsg(i['url']).encode('utf8'))

(5)

#1 cookie   
 
import urllib2, cookielib
cookie_support= urllib2.HTTPCookieProcessor(cookielib.CookieJar())
opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
urllib2.install_opener(opener)
content = urllib2.urlopen('http://XXXX').read()
 
#2     cookie
 
opener = urllib2.build_opener(proxy_support, cookie_support, urllib2.HTTPHandler)
 
#3      
 
import urllib
postdata=urllib.urlencode({
    'username':'XXXXX',
    'password':'XXXXX',
    'continueURI':'http://www.verycd.com/',
    'fk':fk,
    'login_submit':'  '
})
 
req = urllib2.Request(
    url = 'http://secure.verycd.com/signin/*/http://www.verycd.com/',
    data = postdata
)
result = urllib2.urlopen(req).read()
 
#4         
 
headers = {
    'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'
}
req = urllib2.Request(
    url = 'http://secure.verycd.com/signin/*/http://www.verycd.com/',
    data = postdata,
    headers = headers
)
 
#5  ”   ”
 
headers = {
    'Referer':'http://www.cnbeta.com/articles'
}

(6) 다 중 스 레 드
from threading import Thread
from Queue import Queue
from time import sleep
#q     
#NUM       
#JOBS      
q = Queue()
NUM = 2
JOBS = 10
#       ,        
def do_somthing_using(arguments):
    print arguments
#       ,             
def working():
    while True:
        arguments = q.get()
        do_somthing_using(arguments)
        sleep(1)
        q.task_done()
#fork NUM       
for i in range(NUM):
    t = Thread(target=working)
    t.setDaemon(True)
    t.start()
# JOBS    
for i in range(JOBS):
    q.put(i)
#    JOBS  
q.join()

좋은 웹페이지 즐겨찾기