栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 软件开发 > 后端开发 > Python

爬取百度百科1000个页面的数据

Python 更新时间: 发布时间: IT归档 最新发布 模块sitemap 名妆网 法律咨询 聚返吧 英语巴士网 伯小乐 网商动力

爬取百度百科1000个页面的数据

package : baike_spider

spider_main.py
from baike_spider import url_manager, html_downloader, html_parser, html_outputer
import urllib.parse

coding:utf-8

class SpiderMain(object):
def init(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.outputer = html_outputer.HtmlOutputer()

def craw(self, root_url):
    count = 1
    self.urls.add_new_url(root_url)
    while self.urls.has_new_url():
 try:
     new_url = self.urls.get_new_url()
     html_cont = self.downloader.download(new_url)
     print("craw %d:%s" % (count,urllib.parse.unquote(new_url,encoding='utf-8')))
     new_urls, new_data = self.parser.parse(new_url, html_cont)
     self.urls.add_new_urls(new_urls)
     self.outputer.collect_data(new_data)

     if count == 1000:
  break
     count += 1
 except:
     print("spider failed")

    self.outputer.output_html() 

if name == "main":
root_url = "https://baike.baidu.com/item/Python/407313?fr=aladdin"
obj_spider = SpiderMain()
obj_spider.craw(root_url)

url_manager.py

coding:utf-8

class UrlManager(object):
def init(self):
self.new_urls = set()
self.old_urls = set()

def add_new_url(self, url):
    if url is None:
 return

    if url not in self.new_urls and url not in self.old_urls:
 self.new_urls.add(url)

def add_new_urls(self, urls):
    if urls is None or len(urls) == 0:
 return

    for url in urls:
 self.add_new_url(url)

def has_new_url(self):
    return len(self.new_urls) != 0

def get_new_url(self):
    new_url = self.new_urls.pop()
    self.old_urls.add(new_url)
    return new_url

html_downloader.py
import urllib.request

coding:utf-8

class HtmlDownloader(object):
def download(self, url):
if url is None:
return None

    response = urllib.request.urlopen(url)
    if response.getcode() != 200:
 return None

    return response.read()

html_parser.py

coding:utf-8

from bs4 import BeautifulSoup
import re
import urllib.request
from urllib.parse import urljoin

class HtmlParser(object):
def _get_new_urls(self, page_url, soup):
new_urls = set()
links = soup.find_all('a', href=re.compile(r'/item/w+'))
for link in links:
new_url = link['href']
new_full_url = urljoin(page_url, new_url)
new_urls.add(new_full_url)
return new_urls

def _get_new_data(self, page_url, soup):
    res_data = {}
    res_data['url'] = page_url
    title_node = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')
    res_data['title'] = title_node.get_text()
    summary_node = soup.find('div', class_='lemma-summary')
    res_data['summary'] = summary_node.get_text()
    return res_data

def parse(self, page_url, html_cont):
    if page_url is None or html_cont is None:
 return 

    soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
    new_urls = self._get_new_urls(page_url, soup)
    new_data = self._get_new_data(page_url, soup)
    return new_urls, new_data

html_outputer.py

coding:utf-8

import urllib.parse

class HtmlOutputer(object):
def init(self):
self.datas = []

def collect_data(self, data):
    if data is None:
 return

    self.datas.append(data)

def output_html(self):
    fout = open('output.html', 'w', encoding='utf-8')
    fout.write('')
    fout.write("")
    fout.write('')
    fout.write('')
    for data in self.datas:
 fout.write('' % (urllib.parse.unquote(data['url'],encoding='utf-8')))
 fout.write('' % (urllib.parse.unquote(data['title'],encoding='utf-8')))
 fout.write('' % (urllib.parse.unquote(data['summary'],encoding='utf-8')))
 fout.write('')
    fout.write('
%s%s%s
') fout.write('') fout.write('')
转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/224886.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号