上代码,用户键入分区名字,生成csv计算分区人气。
import requests
from lxml import etree
import csv
import json
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.54 Safari/537.36"
}
classification_url = 'https://www.huya.com/g'
html = requests.get(classification_url, headers=headers).content
tree = etree.HTML(html)
while True:
name = input("请输入想要查看的分区(0退出):")
if name == "0":
break
try:
href = tree.xpath(f'//ul[@id="js-game-list"]/li[@title="{name}"]/a/@href')[0]
except:
print("没有找到该分区")
continue
detailed_html = requests.get(href, headers=headers).content
detailed_tree = etree.HTML(detailed_html)
number = tree.xpath(f'//ul[@id="js-game-list"]/li[@title="{name}"]/a/@data-gid')[0]
page = int(detailed_tree.xpath('//div[@]/@data-pages')[0]) + 1
huya_csv = open(f'{name}.csv', 'w', newline='', encoding='utf-8')
h = ['主播', '人气', '房间id', '主播id', '房间名']
f = csv.writer(huya_csv)
f.writerow(h)
storage = []
rq_zong = []
for i in range(1, page):
json_url = f'https://www.huya.com/cache.php?m=LiveList&do=getLiveListByPage&gameId={number}&tagAll=0&callback=getLiveListJsonpCallback&page={i}'
data_json = requests.get(json_url, headers=headers)
data = str(data_json.text.encode("utf-8")).replace('\\', '\')
data_all = json.loads(data[data.find('{'):-2])
sj = data_all['data']['datas']
# print(sj)
for j in sj:
storage.append([j['nick'], j['totalCount'], j['profileRoom'], j['privateHost'], j['roomName']])
rq_zong.append(int(j['totalCount']))
position = rq_zong.index(max(rq_zong))
print("该分区最高人气是" + storage[position][0] + ",人气为" + storage[position][1])
print("该分区总人气为:" + str(sum(rq_zong)))
# print(storage)
f.writerows(storage)
huya_csv.close()



