来源于imooc教程实例,课程地址http://www.imooc.com/learn/563
如“Python”词条的百度百科页面:
要爬取的内容是: 1)词条标题“Python”+词条简介(Python(英国发音……); 2)爬取链接页面,“面向对象”,“计算机程序设计语言”等页面的词条标题和词条简介,按如此规律,直到爬取1000个页面为止。
1)安装Python2.x+BeautifulSoup; 2)Windows; 3)文本编辑器。
按照面向对象编程的原则,可将爬虫按照以下模块进行编程,分别是: 1)爬虫总调度程序,spider_main.py; 2)url管理程序, url_manager.py; 3)html下载程序,html_downloader.py; 4)html 解析程序:BeautifulSoup+正则,html_downloader.py; 5)结果输出程序,html_outputer.py
1)爬虫总调度程序,spider_main.py;
# spider_main.py # _*_coding:utf-8 # Created by linxiaobai on 2016-09-13 #爬虫总调度程序,以一个入口url爬取相关页面 import url_manager,html_downloader,html_parser,html_outputer class SpiderMain(object): def __init__(self): self.urls=url_manager.UrlManager()#url管理器 self.downloader=html_downloader.HtmlDownloader()#html下载器 self.parser=html_parser.HtmlParser()#html解析器:BeautifulSoup+正则 self.outputer=html_outputer.HtmlOutputer()#结果以html文件输出器 #craw(self,root_url)爬取程序; #para:root_url,入口url #从入口url开始,依次爬取1000链接页面,爬取每个页面的标题和简介。 def craw(self,root_url): self.urls.add_new_url(root_url) count=1 while self.urls.has_new_url():# try: new_url=self.urls.get_new_url() print "craw %d: %s"%(count,new_url) html_cont=self.downloader.download(new_url) new_urls,new_data=self.parser.parse(new_url,html_cont) self.urls.add_new_urls(new_urls) self.outputer.collect_data(new_data) if count==10: print "finished!" break count=count+1 print count except: print "craw faild!" self.outputer.output_html() if __name__=="__main__": root_url="http://baike.baidu.com/view/21087.htm" obj_spider=SpiderMain() obj_spider.craw(root_url)2)url管理程序, url_manager.py;
# url_manager.py # _*_coding:utf-8 # Created by linxiaobai on 2016-09-13 # url管理程序 class UrlManager(object): #新url集合:未爬取过的url;旧url集合:已爬取过的url def __init__(self): self.new_urls=set() self.old_urls=set() #将新的url加入到新url集合中 def add_new_url(self,url): if url is None: return if url not in self.new_urls and url not in self.old_urls: self.new_urls.add(url) #将新的批量url加入到新url集合中 def add_new_urls(self,urls): if urls is None or len(urls)==0: return for url in urls: self.add_new_url(url) #获取一个未爬取过的url def get_new_url(self): new_url=self.new_urls.pop() self.old_urls.add(new_url) return new_url #判断是否还有未爬取的url def has_new_url(self): return len(self.new_urls)!=03)html下载程序,html_downloader.py; 下载程序相对简单,使用包urllib2下载页面的html。
# html_downloader.py # _*_coding:utf-8 # Created by linxiaobai on 2016-09-13 # html下载程序 import urllib2 class HtmlDownloader(object): def __init__(self): pass def download(self,url): if url is None: return None print "html downloading" response=urllib2.urlopen(url) if response.getcode()!=200: print "code" retrun return response.read()4)html 解析程序:BeautifulSoup+正则,html_downloader.py; 在这个模块中,关键的问题是分析目标,找到词条标题,词条简介以及链接地址在html文档中的格式,进而使用BeautifulSoup+正则设计出匹配匹配方法,将我们需要的这部分内容解析出来。
# html_parser.py # _*_coding:utf-8 # Created by linxiaobai on 2016-09-13 # html 解析程序:BeautifulSoup+正则 from bs4 import BeautifulSoup import re import urlparse class HtmlParser(object): def parse(self,page_url,html_cont): if page_url is None or html_cont is None: return soup=BeautifulSoup(html_cont,'html.parser',from_encoding='utf-8')#定义一个BeautifulSoup实例 new_urls=self._get_new_urls(page_url,soup) new_data=self._get_new_data(page_url,soup) return new_urls,new_data #解析得到html中的所有链接 def _get_new_urls(self,page_url,soup): new_urls=set() #使用BeautifulSoup+正则 解析获取形如以下形式的链接 #<a target="_blank" href="/view/20965.htm">自由软件</a> links=soup.find_all('a',href=re.compile(r"/view/\d+\.htm")) for link in links: new_url=link['href'] new_full_url=urlparse.urljoin(page_url,new_url) new_urls.add(new_full_url) return new_urls #解析得到html中的标题(title)和简介(summary) def _get_new_data(self,page_url,soup): res_data={} res_data['url']=page_url #使用BeautifulSoup+正则 解析获取形如以下形式的标题 #<dd class="lemmaWgt-lemmaTitle-title"> #<h1>Python</h1> title_node=soup.find('dd', class_="lemmaWgt-lemmaTitle-title").find("h1") res_data['title']=title_node summary_node=soup.find('div',class_="lemma-summary") res_data['summary']=summary_node return res_data5)结果输出程序,html_outputer.py
# html_outputer.py # _*_coding:utf-8 # Created by linxiaobai on 2016-09-13 # 将爬取结果输出以html文件的形式输出 class HtmlOutputer(object): def __init__(self): self.datas=[] def collect_data(self,data): if data is None: return self.datas.append(data) def output_html(self): fout = open('output.html','w') fout.write("<html>") fout.write("<head>") fout.write("<meta charset='utf-8'/>") fout.write("<body>") fout.write("<table>") for data in self.datas: fout.write("<tr>") fout.write("<td>%s</td>"