#导入模块
import requests
import pandas as pd
import time
from bs4 import BeautifulSoup
import jieba
from matplotlib import pyplot as plt
from wordcloud import WordCloud
from PIL import Image
import numpy as np
import pandas as pd
#访问网页
header = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36"}
url_1 = "http://gubaf10.eastmoney.com/list,zssh000001,99_"
url_2 = ".html"
#逐一解析
for ii in range(70):
url = url_1 + str(ii+1)+url_2
html = requests.get(url,headers=header)
soup = BeautifulSoup(html.content,'lxml')
#阅读数
read_counts = soup.find_all('span', attrs={'class':'l1 a1'})
#评论数
comment_counts = soup.find_all('span', attrs={'class':'l2 a2'})
#标题数
title_counts = soup.find_all('span', attrs={'class':'l3 a3'})
#作者
author_counts = soup.find_all('span', attrs={'class':'l4 a4'})
#时间
time_counts = soup.find_all('span', attrs={'class':'l5 a5'})
for i in range(len(read_counts)-1):
data1=[(read_counts[i+1].string,
comment_counts[i+1].string,
title_counts[i+1].find(name='a').get('title'),
author_counts[i+1].find(name='font').string,
time_counts[i+1].string)]
data2 = pd.Dataframe(data1)
data2.to_csv('guba.csv',header=False,index=False,mode='a+')
print('page'+str(ii+1)+' has done')
time.sleep(1)
#对爬取数据进行词云图制作
data = pd.read_csv("guba.csv",header=None,names=['阅读','评论','标题','作者','更新时间',])
data1 = data.loc[data.index[:],['标题']]
#使用jieba分词,获取词列表
#分词
data2 = str(data1)#必须把dataerame转换成字符串
cut = jieba.cut(data2)
string = ' '.join(cut)
print(string)
print(len(string))
#绘图准备
img = Image.open('86.jpg')
img_array = np.array(img)#将图片转化为数组
wc = WordCloud(
background_color = 'white',
mask = img_array,
font_path = "STXINWEI.TTF"
)
wc.generate_from_text(string)#切好词放进去
#绘制图片
fig = plt.figure(1)#创建图片
plt.rcParams['font.sans-serif'] = 'SimHei'#设置字体
plt.imshow(wc)#按词云显示
plt.axis('off')#是否显示坐标
plt.show()#展示生成的词云图
plt.savefig('8.png',dpi = 1000)#dpi是图片清晰度