3.业绩排行 4.基金评级 5.基金经理 6.基金公司
@@@@关于这几部分数据下面用两种方案进行详细解析,先不讲解 直接上代码 有需要的朋友可以详细聊聊
第一种方案
# 开放基金排行
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
import pandas as pd
from bs4 import BeautifulSoup
import time
import datetime
url_dict = {
"全部":"http://fund.eastmoney.com/data/fundranking.html#tall;c0;r;szzf;pn10000;ddesc;qsd20191227;qed20201227;qdii;zq;gg;gzbd;gzfs;bbzt;sfbb",
"股票型": "http://fund.eastmoney.com/data/fundranking.html#tgp;c0;r;szzf;pn10000;ddesc;qsd20191227;qed20201227;qdii;zq;gg;gzbd;gzfs;bbzt;sfbb",
"混合型": "http://fund.eastmoney.com/data/fundranking.html#thh;c0;r;szzf;pn10000;ddesc;qsd20191227;qed20201227;qdii;zq;gg;gzbd;gzfs;bbzt;sfbb",
"债券型": "http://fund.eastmoney.com/data/fundranking.html#tzq;c0;r;szzf;pn10000;ddesc;qsd20191227;qed20201227;qdii;zq;gg;gzbd;gzfs;bbzt;sfbb",
"指数型": "http://fund.eastmoney.com/data/fundranking.html#tzs;c0;r;szzf;pn10000;ddesc;qsd20191227;qed20201227;qdii;zq;gg;gzbd;gzfs;bbzt;sfbb",
"QDII":"http://fund.eastmoney.com/data/fundranking.html#tqdii;c0;r;s6yzf;pn10000;ddesc;qsd20191227;qed20201227;qdii;zq;gg;gzbd;gzfs;bbzt;sfbb",
"LOF":"http://fund.eastmoney.com/data/fundranking.html#tlof;c0;r;s6yzf;pn10000;ddesc;qsd20191227;qed20201227;qdii;zq;gg;gzbd;gzfs;bbzt;sfbb",
"FOF":"http://fund.eastmoney.com/data/fundranking.html#tfof;c0;r;s6yzf;pn10000;ddesc;qsd20191227;qed20201227;qdii;zq;gg;gzbd;gzfs;bbzt;sfbb"
}
thead = ["基金代码", "基金简称", "日期", "单位净值", "累计净值", "日增长率", "近1周", "近1月",
"近3月", "近6月", "近1年", "近2年", "近3年", "今年来", "成立来", "自定义", "手续费"]
def getData(key, url, xlsWriter):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('user-agent= {}'.format('Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36
(KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'))
driver = webdriver.Chrome(options=chrome_options,
executable_path="C:/Users/LENOVO/AppData/Local/Google/Chrome/Application/chromedriver.exe")
driver.get(url)
time.sleep(10)
soup = BeautifulSoup(driver.page_source)
dbtable = soup.findAll(name="table", attrs={"id": "dbtable"})
all_dict = {}
for i in range(len(thead)):
all_dict[thead[i]] = []
trs_item = dbtable[0].tbody.findAll(name="tr")
for tr_item in trs_item:
ids = tr_item.findAll(name="td")
for i in range(len(thead)):
all_dict[thead[i]].append(ids[i + 2].text)
pd.Dataframe(all_dict).to_excel(excel_writer=xlsWriter, sheet_name=key,index=None)
print("%s,nums=%d,%s" % (key, len(all_dict[thead[i]]), url))
driver.close()
if __name__ == "__main__":
today = datetime.datetime.now().strftime('%Y%m%d')
xlsWriter = pd.ExcelWriter("wealth%s.xls"%today) # xls
for key, url in url_dict.items():
getData(key, url, xlsWriter)
xlsWriter.close()
第二种方案
import os
import re
import sys
import time
import random
import datetime
import pandas as pd
import urllib.request
import requests
from bs4 import BeautifulSoup
from pyquery import PyQuery
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from lxml import etree
import pymysql
from sqlalchemy import create_engine
# 基金经理
url_list = []
for i in range(1,57):
url_list.append("https://fund.eastmoney.com/Data/FundDataPortfolio_Interface.aspx?dt=14&mc=returnjson&ft=all&pn=50&pi={}&sc=abbname&st=asc".format(i))
def find_manager(url):
page = urllib.request.urlopen(url)
contents = page.read()
soup = BeautifulSoup(contents,"html.parser")
# soup.text.find("data")
find_manager_info1 = []
find_manager_info2 = []
find_manager_info3 = []
find_manager_info4 = []
find_manager_info5 = []
find_manager_info6 = []
find_manager_info7 = []
find_manager_info8 = []
find_manager_info9 = []
find_manager_info10 = []
find_manager_info11 = []
find_manager_info12 = []
for find_manager_info in json.loads(soup.text[soup.text.find("{data:")+6:soup.text.find("]],")+2]):
find_manager_info1.append(find_manager_info[0])
find_manager_info2.append(find_manager_info[1])
find_manager_info3.append(find_manager_info[2])
find_manager_info4.append(find_manager_info[3])
find_manager_info5.append(find_manager_info[4])
find_manager_info6.append(find_manager_info[5])
find_manager_info7.append(find_manager_info[6])
try:
find_manager_info8.append(find_manager_info[7])
except:
find_manager_info8.append(" ")
try:
find_manager_info9.append(find_manager_info[8])
except:
find_manager_info9.append(" ")
try:
find_manager_info10.append(find_manager_info[9])
except:
find_manager_info10.append(" ")
try:
find_manager_info11.append(find_manager_info[10])
except:
find_manager_info11.append(" ")
try:
find_manager_info12.append(find_manager_info[11])
except:
find_manager_info12.append(" ")
find_manager_info = pd.Dataframe([find_manager_info1,find_manager_info2,find_manager_info3,find_manager_info4,find_manager_info5,find_manager_info6,
find_manager_info7,find_manager_info8,find_manager_info9,find_manager_info10,find_manager_info11,find_manager_info12]).T
find_manager_info.columns = ['find_manager_info1','find_manager_info2','find_manager_info3','find_manager_info4','find_manager_info5','find_manager_info6',
'find_manager_info7','find_manager_info8','find_manager_info9','find_manager_info10','find_manager_info11','find_manager_info12']
return find_manager_info
find_manager_base = pd.Dataframe()
find_manager_data = pd.Dataframe()
for urli in url_list:
time.sleep(2)
try:
find_manager_data = find_manager(url=urli)
except:
pass
find_manager_base = pd.concat([find_manager_data,find_manager_base])
find_manager_base = find_manager_base.drop_duplicates(subset=['find_manager_info1', 'find_manager_info2','find_manager_info3','find_manager_info4'], keep='first')
find_manager_base = find_manager_base.reset_index(drop=True)
find_manager_base = find_manager_base.astype(object).where(pd.notnull(find_manager_base), None)



