就是在互聯網上一直爬行的蜘蛛, 若是遇到須要的資源, 那麼它就會抓取下來(html內容);
模擬瀏覽器快速訪問頁面的內容.css
1.Androidhtml
2.Firefoxweb
3.Google Chrome瀏覽器
Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.94 Safari/537.36服務器
Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.133 Mobile Safari/535.19dom
4.iOSsvg
from urllib.error import URLError from urllib.request import urlopen from urllib import request url = "http://www.cbrc.gov.cn/chinese/jrjg/index.html" user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0" reqObj = request.Request(url, headers={'User-Agent': user_agent}) content = urlopen(reqObj).read().decode('utf-8') print(content)
import random import re from urllib.request import urlopen, Request from urllib.error import URLError def get_content(url): """獲取頁面內容, 反爬蟲之模擬瀏覽器""" # 防止一個瀏覽器訪問頻繁被封掉; user_agents = [ "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0", "Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19", "Mozilla/5.0 (Windows NT 6.2; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0", ] try: # reqObj = Request(url, headers={'User-Agent': user_agent}) reqObj = Request(url) # 動態添加爬蟲請求的頭部信息, 能夠在實例化時指定, 也能夠後續經過add—header方法添加 reqObj.add_header('User-Agent', random.choice(user_agents)) except URLError as e: print(e) return None else: content = urlopen(reqObj).read().decode('utf-8').replace('\t', ' ') return content def parser_content(content): """解析頁面內容, 獲取銀行名稱和官網URL地址""" pattern = r'<a href="(.*)" target="_blank" style="color:#08619D">\s+(.*)\s+</a>' bankinfos = re.findall(pattern, content) if not bankinfos: raise Exception("沒有獲取符合條件的信息") else: return bankinfos def main(): url = "http://www.cbrc.gov.cn/chinese/jrjg/index.html" content = get_content(url) bankinfos = parser_content(content) with open('doc/bankinfo.txt', 'w') as f: # ('http://www.cdb.com.cn/', '國家開發銀行\r') for bank in bankinfos: name = bank[1].rstrip() url = bank[0] # 根據正則判斷銀行的url地址是否合法, 若是合法才寫入文件; pattern = r'^((https|http|ftp|rtsp|mms)?:\/\/)\S+' if re.search(pattern, url): f.write('%s: %s\n' %(name, url)) else: print("%s無官方網站" %(name)) print("寫入完成....") if __name__ == '__main__': main()
1.爲何?
2.如何防止IP被封?網站
3.如何獲取代理IP?
https://www.xicidaili.com/ (西刺代理網站提供)ui
ProxyHandler ======> Request()
Opener ====== urlopen()
安裝Openerurl
4.如何檢測代理是否成功? http://httpbin.org/get
from urllib.request import ProxyHandler, build_opener, install_opener, urlopen from urllib import request def use_proxy(proxies, url): # 1. 調用urllib.request.ProxyHandler proxy_support = ProxyHandler(proxies=proxies) # 2. Opener 相似於urlopen opener = build_opener(proxy_support) # 3. 安裝Opener install_opener(opener) # user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0" # user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0" user_agent = 'Mozilla/5.0 (iPad; CPU OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3' # 模擬瀏覽器; opener.addheaders = [('User-agent', user_agent)] urlObj = urlopen(url) content = urlObj.read().decode('utf-8') return content if __name__ == '__main__': url = 'http://httpbin.org/get' proxies = {'https': "111.177.178.167:9999", 'http': '114.249.118.221:9000'} use_proxy(proxies, url)