Web Crawler for Images
Python Codes:
# coding:utf-8
import os
import re
import urllib
import json
import socket
import urllib.request
import urllib.parse
import urllib.error
import time
timeout = 5
socket.setdefaulttimeout(timeout)
class Crawler:
__time_sleep = 0.1 # sleeping time
__amount = 0
__start_amount = 0
__counter = 0
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
# get the url imformation of pirctures
def __init__(self, t=0.1):
self.time_sleep = t # download intervals
# get suffix name of the pictures
def get_suffix(self, name):
m = re.search(r'\.[^\.]*$', name)
if m.group(0) and len(m.group(0)) <= 5:
return m.group(0)
else:
return '.jpg'
# get and generate referrer
def get_referrer(self, url):
par = urllib.parse.urlparse(url)
if par.scheme:
return par.scheme + '://' + par.netloc
else:
return par.netloc
def save_image(self, rsp_data, word): # save images
if not os.path.exists("./" + word):
os.mkdir("./" + word)
self.__counter = len(os.listdir('./' + word)) + 1
for image_info in rsp_data['imgs']:
try:
time.sleep(self.time_sleep)
suffix = self.get_suffix(image_info['objURL'])
refer = self.get_referrer(image_info['objURL']) # reduce 403 error
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0'),('Referer', refer)]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(image_info['objURL'], './' + word + '/' + str(self.__counter) + str(suffix))
except urllib.error.HTTPError as urllib_err:
print(urllib_err)
continue
except Exception as err:
time.sleep(1)
print(err)
print("unknown error, cancel operations")
continue
else:
print("figure+1, there is already" + str(self.__counter) + "figures")
self.__counter += 1
return
def get_images(self, word='beautiful girls'):
search = urllib.parse.quote(word)
pn = self.__start_amount
while pn < self.__amount:
url = 'http://image.baidu.com/search/avatarjson?tn=resultjsonavatarnew&ie=utf-8&word=' + search + '&cg=girl&pn=' + str(
pn) + '&rn=60&itg=0&z=0&fr=&width=&height=&lm=-1&ic=0&s=0&st=-1&gsm=1e0000001e'
try:
time.sleep(self.time_sleep)
req = urllib.request.Request(url=url, headers=self.headers)
page = urllib.request.urlopen(req)
rsp = page.read().decode('unicode_escape')
page.close()
except UnicodeDecodeError as e:
print(e)
print('-----UnicodeDecodeErrorurl:', url)
except urllib.error.URLError as e:
print(e)
print("-----urlErrorurl:", url)
except socket.timeout as e:
print(e)
print("-----socket timout:", url)
else:
rsp_data = json.loads(rsp)
self.save_image(rsp_data, word)
print("download the next page")
pn += 60
finally:
page.close()
print("Finished")
return
def start(self, word, spider_page_num=1, start_page=1):
"""
web crawl entrance
:param word: key words
:param spider_page_num: the pages for catching pictures; the total picture number is page_num x 60
:param start_page: start page
:return:
"""
self.__start_amount = (start_page - 1) * 60
self.__amount = spider_page_num * 60 + self.__start_amount
self.get_images(word)
if __name__ == '__main__':
crawler = Crawler(0.05) # delay time: 0.05
# crawler.start('beautiful girls', 10, 2) # key word: "beautiful girls", 10 pages (totally 10x60=600 pictures), the start papge is page 2
# crawler.start("actors", 5, 3) # key word: "actors", 5 pages (totally 5x60=300 pictures), the start papge is page 1