栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 软件开发 > 后端开发 > Python

第7章 :爬取百度百科1000个页面的数据(源码)

Python 更新时间: 发布时间: IT归档 最新发布 模块sitemap 名妆网 法律咨询 聚返吧 英语巴士网 伯小乐 网商动力

第7章 :爬取百度百科1000个页面的数据(源码)

1、spider_main.py

#coding:utf8
import url_manager,html_downloader,html_outputer,html_parser

class SpiderMain(object):
    def __init__(self):
 self.urls = url_manager.UrlManager()
 self.downloader = html_downloader.HtmlDownloader()
 self.parser = html_parser.HtmlParser()
 self.outputer = html_outputer.HtmlOutputer()

    def craw(self,root_url):
 count = 1
 self.urls.add_new_url(root_url) 
 while self.urls.has_new_url():
     try:
  new_url = self.urls.get_new_url()
  print "craw %d:%s"%(count,new_url)
  html_content = self.downloader.download(new_url)
  new_urls,new_data = self.parser.parse(new_url,html_content)
  self.urls.add_new_urls(new_urls)
  self.outputer.collect_data(new_data)
  if count == 100:
      break
  count = count + 1
     except Exception as e:
   print str(e)
   print "craw failed"

 self.outputer.output_html()

if __name__=="__main__":
    root_url = "http://baike.baidu.com/item/Python"
    obj_spider = SpiderMain()
    obj_spider.craw(root_url)

2、html_downloader.py

import urllib2

class HtmlDownloader(object):
    def download(self,url):
 if url is None:
     return

 response = urllib2.urlopen(url)
 if response.getcode() != 200:
     return None 
 return response.read()

3、html_parser.py

#encoding:utf8
from bs4 import  BeautifulSoup
import re
import urlparse

class HtmlParser(object):
    def parse(self,page_url,html_content):
 if page_url is None or html_content is None:
     return

 soup = BeautifulSoup(html_content,"html.parser",from_encoding="utf-8")
 new_urls = self._get_new_urls(page_url,soup)

 new_data = self._get_new_data(page_url,soup)

 return new_urls,new_data

    def _get_new_urls(self,page_url,soup):
 new_urls = set()
 links = soup.find_all('a',href=re.compile(r"/item/.+"))
 for link in links:
     new_url = link['href']
    # new_full_url = urlparse.urljoin(page_url,new_url)
     new_full_url = r"http://baike.baidu.com%s"%new_url
     new_urls.add(new_full_url)
 return new_urls

    def _get_new_data(self,page_url,soup):
 #res_data = set() 
 res_data = {}    
 res_data["url"] =  page_url

 #标题  
Python title_node = soup.find('dd',class_="lemmaWgt-lemmaTitle-title").find("h1") res_data["title"] = title_node.get_text() #简介 summary_node = soup.find('div',class_="lemma-summary") res_data["summary"] = summary_node.get_text() return res_data

4、url_manager.py

class UrlManager(object):
    def __init__(self):
 self.new_urls = set()
 self.old_urls = set()

    def add_new_url(self,url):      
 if url is None:
     return None
 if url not in self.new_urls and url not in self.old_urls:
     self.new_urls.add(url)

    def add_new_urls(self,urls):
 if urls is None or len(urls)==0:
     return
 for url in urls:
     self.new_urls.add(url)

    def get_new_url(self):
 new_url = self.new_urls.pop()
 self.old_urls.add(new_url)
 return new_url

    def has_new_url(self):
 return len(self.new_urls)!=0

5、html_outputer.py

class HtmlOutputer(object):
    def __init__(self):
 self.datas = []

    def collect_data(self,data):
 if data is None:
     return
 self.datas.append(data)

    def output_html(self):
 fout = open("output.html","w")
 fout.write("")
 fout.write("")
 fout.write("")
 for data in self.datas:
     fout.write("")
     fout.write(r""%data["url"])
     fout.write(""%data["title"].encode("utf-8"))
     fout.write(""%data["summary"].encode("utf-8"))
     fout.write("")

 fout.write("
%s%s%s
") fout.write("") fout.write("")
转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/225850.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号