栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 软件开发 > 后端开发 > Python

python爬虫豆瓣TOP250电影信息并写入数据库

Python 更新时间: 发布时间: IT归档 最新发布 模块sitemap 名妆网 法律咨询 聚返吧 英语巴士网 伯小乐 网商动力

python爬虫豆瓣TOP250电影信息并写入数据库

初步完成一个页面的数据爬取与写入数据库

import requests
import pymysql
from loguru import logger
from lxml import etree

db = pymysql.connect(host='localhost', port=3306, user='root', passwd='1234', db='db', charset='utf8')
logger.info("正在连接到数据库")
cursor = db.cursor()
cursor.execute("DROp TABLE IF EXISTS movie")

sql = 'CREATE TABLE	movie(name CHAr(255),year int,country char(255),director char(255),type CHAr(20),sorce char(20), ' 
      'notes char(255)) '
cursor.execute(sql)
logger.info("创建表格")


def get_html():
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/92.0.4515.159 Safari/537.36'}
    url = 'https://movie.douban.com/top250'
    resp = requests.get(url, headers=headers)
    html = resp.text
    resp.close()
    return html


if __name__ == '__main__':
    logger.info("执行主程序...")
    xhtml = etree.HTML(get_html())
    logger.info("...")
    names = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[1]/a/span[1]/text()')
    logger.info("已完成电影名字的抓取...")

    notes = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[2]/p[2]/span/text()')
    logger.info("已完成电影标语的抓取...")

    sorce = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[2]/div/span[2]/text()')
    logger.info("已完成电影评分的抓取...")

    years = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[2]/p[1]/text()[2]')
    logger.info("已完成电影年份,国家,类型的抓取...")

    # 获取下标,直接用字符串截取年份,国家,类型
    # print(years[0].index('1'))
    # for i in range(len(years)):
    #     print(years[i][29:33])
    # print(years[0].index('美'))
    # print(years[0][36:38])

    # 方法二 使用split("/")
    # print(years[0])
    # print(years[1])
    # print(years[1].split("/")[-1])

    directors = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[2]/p[1]/text()[1]')
    logger.info("已完成电影导演的抓取...")

    # print(re.match(re.compile(r'd{4}')),years[0])

    # for i  in range(len(directors)):
    #     print((directors[i].split(':')[1])[:-2])

    # 同时使用split("/")  和 字符串下标截取
    # print(directors[1])
    # print((directors[1].split(':')[1])[:-2])

    for i in range(len(sorce)):
        sql = "insert into movie(name,year,country,sorce,director,type,notes) values(%s,%s,%s,%s,%s,%s,%s)"
        par = (names[i], years[i][29:33], years[i].split("/")[-1],sorce[i],(directors[i].split(':')[1])[:-2], years[i].split("/")[-2],notes[i])
        cursor.execute(sql, par)
        db.commit()
    logger.info("数据已经写入表格中...")

    db.close()

# class linkMysql():
#     def __init__(self):
#         self.db = pymysql.connect(host='localhost', port=3306, user='root', passwd='1234', db='db', charset='utf8')
#         logger.info("正在连接到数据库")
#         self.cursor = self.db.cursor()

以上面为模板,找到豆瓣翻页后网址的规律,重新封装函数 

去掉了note 字段

import requests
import pymysql
from loguru import logger
from lxml import etree
from faker import Faker

db = pymysql.connect(host='localhost', port=3306, user='root', passwd='1234', db='db', charset='utf8')
logger.info("正在连接到数据库")
cursor = db.cursor()
cursor.execute("DROP TABLE IF EXISTS movie")

sql = 'CREATE TABLE	movie(name CHAr(255),year int,country char(255),director char(255),type varchar(255),score char(20)) '
cursor.execute(sql)
logger.info("创建表格")


def get_xhtml(num=0):
    # 代理ip
    # ip_list = ['60.186.41.131:9000', '175.7.199.253:3256', '118.190.244.234:3128', '112.250.107.37:53281']
    # i = random.choice(ip_list)
    # proxy = {
    #     'http': i
    # }
    # print(proxy)

    # 随时改变UA
    # faker = Faker()

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/92.0.4515.159 Safari/537.36',
        'Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,"
                  "application/signed-exchange;v=b3;q=0.9 ",
        'origin': 'https://movie.douban.com'
    }

    url = f'https://movie.douban.com/top250?start={num}&filter='

    logger.info("正在发送请求...")
    resp = requests.get(url, headers=headers)
    logger.info("正在响应...")
    html = resp.text
    resp.close()
    xhtml = etree.HTML(html)
    return xhtml


if __name__ == '__main__':
    for i in range(10):
        logger.info("执行主程序...")
        xhtml = get_xhtml(25 * i)
        logger.info(f"-----正在抓取第{i + 1}页内容-----")
        names = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[1]/a/span[1]/text()')
        print(names)

        logger.info("已完成电影名字的抓取...")
        notes = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[2]/p[2]/span/text()')
        logger.info("已完成电影标语的抓取...")
        sorce = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[2]/div/span[2]/text()')
        logger.info("已完成电影评分的抓取...")
        years = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[2]/p[1]/text()[2]')
        logger.info("已完成电影年份,国家,类型的抓取...")
        directors = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[2]/p[1]/text()[1]')
        logger.info("已完成电影导演的抓取...")

        for j in range(len(sorce)):
            print(names[j])
            print(years[j][29:33], years[j].split("/")[-1], sorce[j], (directors[j].split(':')[1])[:-2],years[j].split("/")[-2])
            sql = "insert into movie(name,year,country,score,director,type) values(%s,%s,%s,%s,%s,%s)"
            par = (names[j], years[j][29:33], years[j].split("/")[-1], sorce[j], (directors[j].split(':')[1])[:-2],
                   years[j].split("/")[-2])
            cursor.execute(sql, par)
            db.commit()
        logger.info("数据已经写入表格中...")

        # db.close()

转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/293698.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号