【#文档大全网# 导语】以下是®文档大全网的小编为您整理的《python爬虫抓去google图片搜索结果的图片》,欢迎阅读!
# -*- coding: gbk -*-
import mechanize
from BeautifulSoup import BeautifulSoup
import cookielib,re
import base64,zlib
import Image,StringIO
# Browser
br = mechanize.Browser()
# Cookie Jar
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
# Browser options
br.set_handle_equiv(True)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/4.0.0')]
br.open("http://www.google.com.hk/search?um=1&newwindow=1&safe=strict&hl=zh-CN&biw=1920&bih=473&tbm=isch&sa=1&q=car&btnG=")
html = br.response().read()
content = BeautifulSoup(html).findAll('script')
pattern = re.compile(r"(?<=data:image/jpeg;base64,).*(?=';)")
count=0
for item in content:
#print item
match = pattern.search(str(item))
if match:
dataStr = match.group()
dataStr = dataStr.replace(r'\x3d','=') #这行代码是关键,折腾了我大半天。网页里的base64编码,在不是4的倍数时,会用“\x3d”补足,要替换为=;(0x3d即为=的Ascii码值)
bin = StringIO.StringIO(dataStr)
bout=open('d:\\out\\data%d.jpeg'%count,'wb')
base64.decode(bin,bout)
bout.close()
count +=1