栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 软件开发 > 后端开发 > Python

python爬虫 | 采集文章时自动替换标题

Python 更新时间: 发布时间: IT归档 最新发布 模块sitemap 名妆网 法律咨询 聚返吧 英语巴士网 伯小乐 网商动力

python爬虫 | 采集文章时自动替换标题

采集文章时自动替换标题,非伪原创标题,而是与原标题相关相似替换(下拉词|相关词)

from lxml import etree
import re, os, json
import requests
import random

# 请求时随机选一个UA
def header():
    head = [
        {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:87.0) Gecko/20100101 Firefox/87.0"},
        {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:84.0) Gecko/20100101 Firefox/84.0"},
        {"User-Agent": "Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20130331 Firefox/21.0"},
        {"User-Agent": "Opera/9.80 (Windows NT 6.1; U; en-GB) Presto/2.7.62 Version/11.00"},
        {"User-Agent": "Opera/9.80 (Windows NT 6.1 x64; U; en) Presto/2.7.62 Version/11.00"},
        {'User-Agent': "Opera/9.80 (Windows NT 6.0; U; en) Presto/2.7.39 Version/11.00"}
    ]

    return random.choice(head)

pass


# 文本内容写入函数
def local(title, article):
    heading = translate(title)
    heading = re.sub(r'(”|?|!|“|:|,|【|】|《|》|s|*|@|#|%|=|?|;|`|、|/|{|}|\|^|&|~|:|—|+|")', '', heading)
    path = os.path.join(os.path.expanduser('~'), "Desktop", '采集库')
    if os.path.exists(path):
        with open(str(os.path.join(path, f'{heading}.txt')), 'w', encoding='utf-8') as f:
            f.write(article)
            print(f'【{title}】保存成功了')
    else:
        # 桌面创建‘采集库’目录文件
        os.makedirs(str(os.path.join(os.path.expanduser('~'), "Desktop", '采集库')))
        # 在‘采集库’目录写入txt文件
        with open(str(os.path.join(path, f'{heading}.txt')), 'w', encoding='utf-8') as f:
            f.write(article)
            print(f'【{title}】保存成功了')

pass

#百度下拉词
def translate(title):
    response = requests.get(f'http://suggestion.baidu.com/su?wd={title}', headers=header())
    response = response.text[17:-2].replace('q:', '"q":').replace('p:false', '"p":"false"').replace('s:',
                                                                                                    '"s":').replace(
        'p:true', '"p":"true"')
    data = json.loads(response)
    if data['s']:
        random_title = random.choice(data['s'])
        random_title_two = random.choice(data['s'])
        if len(random_title) < 12:
            heading = random_title + '(' + random_title_two + ')'
            return heading
        else:
            heading = random_title
            return heading
    else:
        heading = title
        return heading


pass


以上python代码不需要修改,

# 爬虫代码(太平洋亲子为例)
def get_html(num):
    headers = header()
    for i in range(1, num):
        url = f'https://edu.pcbaby.com.cn/brain/family/index_{i}.html'
        html = requests.get(url, headers=headers)
        html = etree.HTML(html.text)
        # 提取列表链接
        lis = html.xpath('//div[@id="JaList"]/ul/li/dl[@]/dd[@]/p[@]/a/@href')
        for i in lis:
            url = f'https:{i}'
            print('采集链接成功:', url)
            headers = header()
            html = requests.get(url, headers=headers)
            html.encoding = 'gbk'
            if html.status_code != 404:
                html = etree.HTML(html.text)
                # 解析内容页标题(根据网站标签自行修改)
                title = html.xpath('//h1[@]')[0].xpath('string(.)')

                # 解析内容页文章(根据网站标签自行修改)
                article = html.xpath('//div[@]')[0].xpath('string(.)')

                title_sub = re.sub(r'(”|?|!|“|:|,|【|】|《|》|s|(|)|(|)|*|@|#|%|=|?|;|`|、)', '', title)
                title = title_sub.replace('-', '至').replace('~', '至').replace('~', '至').replace('|', '').replace('-',
                                                                                                                 '至').replace(
                    '?', '').replace('?', '')
                local(title, article)
            else:
                print('URL打不开,跳过该页面')
                continue
pass



if __name__ == '__main__':
    #爬取页数
    get_html(99)
转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/357268.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号