channel_extract.py 这里的一线链接也就是我们所说的大类链接:
from bs4 import BeautifulSoup import requests start_url = 'http://lz.ganji.com/wu/' host_url = 'http://lz.ganji.com/' def get_channel_urls(url): wb_data = requests.get(url) soup = BeautifulSoup(wb_data.text, 'lxml') links = soup.select('.fenlei > dt > a') #print(links) for link in links: page_url = host_url + link.get('href') print(page_url) #get_channel_urls(start_url) channel_urls = ''' http://lz.ganji.com/jiaju/ http://lz.ganji.com/rirongbaihuo/ http://lz.ganji.com/shouji/ http://lz.ganji.com/bangong/ http://lz.ganji.com/nongyongpin/ http://lz.ganji.com/jiadian/ http://lz.ganji.com/ershoubijibendiannao/ http://lz.ganji.com/ruanjiantushu/ http://lz.ganji.com/yingyouyunfu/ http://lz.ganji.com/diannao/ http://lz.ganji.com/xianzhilipin/ http://lz.ganji.com/fushixiaobaxuemao/ http://lz.ganji.com/meironghuazhuang/ http://lz.ganji.com/shuma/ http://lz.ganji.com/laonianyongpin/ http://lz.ganji.com/xuniwupin/ '''那么拿我爬取的58同城为例就是爬取了二手市场所有品类的链接,也就是我说的大类链接; 找到这些链接的共同特征,用函数将其输出,并作为多行文本储存起来。
page_parsing.py
先看代码:
#引入库文件 from bs4 import BeautifulSoup import requests import pymongo #python操作MongoDB的库 import re import time #链接和建立数据库 client = pymongo.MongoClient('localhost', 27017) ceshi = client['ceshi'] #建ceshi数据库 ganji_url_list = ceshi['ganji_url_list'] #建立表文件 ganji_url_info = ceshi['ganji_url_info']这里的这个link什么类型的,这个get方法又是什么鬼? 后来我发现了这个类型是
<class 'bs4.element.Tab>如果我们想要单独获取某个属性,可以这样,例如我们获取它的 class 叫什么
print soup.p['class'] #['title']还可以这样,利用get方法,传入属性的名称,二者是等价的
print soup.p.get('class') #['title']下面我来贴上代码:
#爬取所有商品的详情页面链接: def get_type_links(channel, num): list_view = '{0}o{1}/'.format(channel, str(num)) #print(list_view) wb_data = requests.get(list_view) soup = BeautifulSoup(wb_data.text, 'lxml') linkOn = soup.select('.pageBox') #判断是否为我们所需页面的标志; #如果爬下来的select链接为这样:div.pageBox > ul > li:nth-child(1) > a > span 这里的:nth-child(1)要删掉 #print(linkOn) if linkOn: link = soup.select('.zz > .zz-til > a') link_2 = soup.select('.js-item > a') link = link + link_2 #print(len(link)) for linkc in link: linkc = linkc.get('href') ganji_url_list.insert_one({'url': linkc}) print(linkc) else: pass我来贴一段代码:
#爬取赶集网详情页链接: def get_url_info_ganji(url): time.sleep(1) wb_data = requests.get(url) soup = BeautifulSoup(wb_data.text, 'lxml') try: title = soup.select('head > title')[0].text timec = soup.select('.pr-5')[0].text.strip() type = soup.select('.det-infor > li > span > a')[0].text price = soup.select('.det-infor > li > i')[0].text place = soup.select('.det-infor > li > a')[1:] placeb = [] for placec in place: placeb.append(placec.text) tag = soup.select('.second-dt-bewrite > ul > li')[0].text tag = ''.join(tag.split()) #print(time.split()) data = { 'url' : url, 'title' : title, 'time' : timec.split(), 'type' : type, 'price' : price, 'place' : placeb, 'new' : tag } ganji_url_info.insert_one(data) #向数据库中插入一条数据; print(data) except IndexError: passmain.py 看代码:
#先从别的文件中引入函数和数据: from multiprocessing import Pool from page_parsing import get_type_links,get_url_info_ganji,ganji_url_list from channel_extract import channel_urls #爬取所有链接的函数: def get_all_links_from(channel): for i in range(1,100): get_type_links(channel,i) #后执行这个函数用来爬取所有详情页的文件: if __name__ == '__main__': # pool = Pool() # # pool = Pool() # pool.map(get_url_info_ganji, [url['url'] for url in ganji_url_list.find()]) # pool.close() # pool.join() #先执行下面的这个函数,用来爬取所有的链接: if __name__ == '__main__': pool = Pool() pool = Pool() pool.map(get_all_links_from,channel_urls.split()) pool.close() pool.join()count.py 用来显示爬取数据的数目;
import time from page_parsing import ganji_url_list,ganji_url_info while True: # print(ganji_url_list.find().count()) # time.sleep(5) print(ganji_url_info.find().count()) time.sleep(5)