栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 软件开发 > 后端开发 > Python

提前获取彩票数据_窃取彩票数据?

Python 更新时间: 发布时间: IT归档 最新发布 模块sitemap 名妆网 法律咨询 聚返吧 英语巴士网 伯小乐 网商动力

提前获取彩票数据_窃取彩票数据?

爬取网站:https://datachart.500.com/dlt/history/history.shtml

先附上完整代码:

# -*- coding = utf-8 -*-
import gzip
import io
import json
import re
import wave
from bs4 import BeautifulSoup
import xlwt
import urllib.request
import urllib.error
import xlrd
import sqlite3
import webbrowser
from selenium import webdriver
from pyquery import PyQuery as pq

def main():
    baseurl = "https://datachart.500.com/dlt/history/history.shtml"

    # 1.爬取网页 2.解析数据 3.保存数据
    datalist = getData(baseurl)
    savepath = "超级大乐透开奖数据.xls"
#    saveData(datalist, savepath)

#< trclass ="t_tr1" > < !-- < td > 2 < / td > --> < td class ="t_tr1" > 22031 < / td > < td class ="cfont2" > 07 < / td > < td class ="cfont2" > 14 < / td > < td class ="cfont2" > 16 < / td > < td class ="cfont2" > 20 < / td > < td class ="cfont2" > 28 < / td > < td class ="cfont4" > 04 < / td > < td class ="cfont4" > 08 < / td > < td class ="t_tr1" > 1, 208, 966, 802 < / td > < td class ="t_tr1" > 3 < / td > < td class ="t_tr1" > 10, 000, 000 < / td > < td class ="t_tr1" > 119 < / td > < td class ="t_tr1" > 102, 371 < / td > < td class ="t_tr1" > 275, 183, 386 < / td > < td class ="t_tr1" > 2022-03-23 < / td > < / tr >

findAll = re.compile(r'(.*?)')
findCanL = re.compile(r'(d{5})')
findFront = re.compile(r'([0-9]*)')
findBack = re.compile(r'([0-9]*)')
findTotalTre = re.compile(r'(1,d{3},d{3},d{3})')
findFirstWinner = re.compile(r'(d{1,2})')
findFirstPrize = re.compile(r'(0|10,000,000|[1-9],d{3},d{3})')
findSecondWinner = re.compile(r'([1-9][0-9]|[1-9][0-9][0-9])')
findSecondPrize = re.compile(r'(d{3},d{3}|d{2},d{3})')
findTotalInMoney = re.compile(r'(d{3},d{3},d{3})')
findDate = re.compile(r'(2022-d{2}-d{2})')


def getData(baseurl):
    datalist = []
    html = askUrl(baseurl)
    soup = BeautifulSoup(html,"html.parser")
    item = soup.find_all('div', class_='chart')
    item = str(item)
    alldata = re.findall(findAll, item)
    for i in range(0, len(alldata)):

        data = []
        #寻找期数
        canl = re.findall(findCanL, alldata[i])
        data.append(canl)

        #寻找前区号码
        front = re.findall(findFront, alldata[i])
        data.append(front)

        #寻找后区号码
        back = re.findall(findBack, alldata[i])
        data.append(back)

        #寻找奖池奖金
        trea = re.findall(findTotalTre, alldata[i])
        data.append(trea)

        #寻找一等奖获奖人数
        fiw = re.findall(findFirstWinner, alldata[i])
        fi = []
        fi.append(fiw[0])
        data.append(fi) #这里这样做是因为在此处获取的时候可能会把后面二等奖获奖人数括进来,而若直接

        #寻找一等奖金额
        fim = re.findall(findFirstPrize, alldata[i])
        fi1 = []
        fi1.append(fim[0])
        data.append(fi1)

        #寻找二等奖人数
        siw = re.findall(findSecondWinner, alldata[i])
        fi2=[]
        if(len(siw)>1):
            fi2.append(siw[1])
        else: fi2.append(siw[0])
        data.append(fi2)

        #寻找二等奖金额
        sim = re.findall(findSecondPrize, alldata[i])
        data.append(sim)

        #寻找总投注额
        tm = re.findall(findTotalInMoney, alldata[i])
        data.append(tm)

        #寻找日期
        date = re.findall(findDate, alldata[i])
        data.append(date)

        datalist.append(data)
        print(i, datalist[i])


    return datalist

def askUrl(url):
    head = {  # 模拟浏览器头部信息
        # 用户代理 表示告诉浏览器 我们可以接受什么水平的文件
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36 Edg/99.0.1150.52"
    }

    request = urllib.request.Request(url, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("gb2312")#在网页源代码中找到charset查看解码模式
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)

    return html

def saveData(datalist, savepath):
    book = xlwt.Workbook(encoding="utf-8", style_compression=0)
    sheet = book.add_sheet('往期大乐透开奖情况', cell_overwrite_ok=True)
    sheet.write_merge(0, 1, 0, 0, '期号')
    sheet.write_merge(0, 0, 1, 5, '前区号码')
    sheet.write_merge(0, 1, 6, 7, '后区')
    sheet.write_merge(0, 1, 8, 8, '奖池奖金(元)')
    sheet.write_merge(0, 0, 9, 10, '一等奖')
    sheet.write_merge(0, 0, 11, 12, '二等奖')
    sheet.write_merge(0, 1, 13, 13, '总投注额')
    sheet.write_merge(0, 1, 14 ,14, '开奖日期')

    for i in range(1,6):
        sheet.write(1, i, i)

    col = ('注数', '奖金(元)')
    for i in range(9,13):
        sheet.write(1, i, col[(0 if (i-10)%2 else 1)])

    for i in range(29):
        index = 0
        for j in range(10):
            if(len(datalist[i][j]) == 1):
                sheet.write(i+2, index, datalist[i][j])
                index += 1
            else:
                for k in range(len(datalist[i][j])):
                    sheet.write(i+2, index, datalist[i][j][k])
                    index += 1
    book.save(savepath)



if __name__ == "__main__":
    main()

#测试代码
# html = askUrl(baseurl)
# soup = BeautifulSoup(html, "html.parser")
# print(soup)
# datalist = []
# html = askUrl(baseurl)
# soup = BeautifulSoup(html,"html.parser")
# item = soup.find_all('div', class_='chart')
# #print(item)
# item = str(item)
# alldata = re.findall(findAll, item)
# canl = re.findall(findCanL, alldata[0])
# print(canl)
# front = re.findall(findFront, alldata[0])
# print(front)
# back = re.findall(findBack, alldata[0])
# print(back)
# trea = re.findall(findTotalTre, alldata[0])
# print(trea)
# fiw = re.findall(findFirstWinner, alldata[0])
# print(fiw)
# fim = re.findall(findFirstPrize, alldata[0])
# print(fim)
# siw = re.findall(findSecondWinner, alldata[0])
# print(siw)
# sim = re.findall(findSecondPrize, alldata[0])
# print(sim)
# tm = re.findall(findTotalInMoney, alldata[0])
# print(tm)
# date = re.findall(findDate, alldata[0])
# print(date)

爬虫的三个步骤:1.爬取网页 2.解析数据 3.保存数据

首先爬取网页要将自己伪装成一个浏览器,相关代码如下:

def askUrl(url):
    head = {  # 模拟浏览器头部信息
        # 用户代理 表示告诉浏览器 我们可以接受什么水平的文件
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36 Edg/99.0.1150.52"
    }

    request = urllib.request.Request(url, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("gb2312")#在网页源代码中找到charset查看解码模式
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)

    return html

一开始运行的时候怎样都通过不了,在网上查询后发现是解码模式并不是固定的'utf-8',

html = response.read().decode("utf-8")即这一句,

 查看源码后,这里的charset说明他是以gb2312编码的

html = response.read().decode("gb2312")改成这样后可正常运行

2.解析数据

findAll = re.compile(r'(.*?)')
findCanL = re.compile(r'(d{5})')
findFront = re.compile(r'([0-9]*)')
findBack = re.compile(r'([0-9]*)')
findTotalTre = re.compile(r'(1,d{3},d{3},d{3})')
findFirstWinner = re.compile(r'(d{1,2})')
findFirstPrize = re.compile(r'(0|10,000,000|[1-9],d{3},d{3})')
findSecondWinner = re.compile(r'([1-9][0-9]|[1-9][0-9][0-9])')
findSecondPrize = re.compile(r'(d{3},d{3}|d{2},d{3})')
findTotalInMoney = re.compile(r'(d{3},d{3},d{3})')
findDate = re.compile(r'(2022-d{2}-d{2})')


def getData(baseurl):
    datalist = []
    html = askUrl(baseurl)
    soup = BeautifulSoup(html,"html.parser")
    item = soup.find_all('div', class_='chart')
    #print(item)
    item = str(item)
    alldata = re.findall(findAll, item)
    for i in range(0, len(alldata)):

        data = []
        #寻找期数
        canl = re.findall(findCanL, alldata[i])
        data.append(canl)

        #寻找前区号码
        front = re.findall(findFront, alldata[i])
        data.append(front)

        #寻找后区号码
        back = re.findall(findBack, alldata[i])
        data.append(back)

        #寻找奖池奖金
        trea = re.findall(findTotalTre, alldata[i])
        data.append(trea)

        #寻找一等奖获奖人数
        fiw = re.findall(findFirstWinner, alldata[i])
        fi = []
        fi.append(fiw[0])
        data.append(fi) #这里这样做是因为在此处获取的时候可能会把后面二等奖获奖人数括进来,而若直接

        #寻找一等奖金额
        fim = re.findall(findFirstPrize, alldata[i])
        fi1 = []
        fi1.append(fim[0])
        data.append(fi1)

        #寻找二等奖人数
        siw = re.findall(findSecondWinner, alldata[i])
        fi2=[]
        if(len(siw)>1):#观察数据,当二等奖人数大于1时,第二个元素为二等奖人数
            fi2.append(siw[1])
        else: fi2.append(siw[0])
        data.append(fi2)

        #寻找二等奖金额
        sim = re.findall(findSecondPrize, alldata[i])
        data.append(sim)

        #寻找总投注额
        tm = re.findall(findTotalInMoney, alldata[i])
        data.append(tm)

        #寻找日期
        date = re.findall(findDate, alldata[i])
        data.append(date)

        datalist.append(data)
        print(i, datalist[i])


    return datalist

用靓汤解析网页源码

 发现每一期的数据都分别藏在这些框里,因此先用findAll将每一期的数据找出来,然后再寻找每一期的详细数据

findAll = re.compile(r'(.*?)')

正则表达式的匹配规则可自行上网查询

再以期数为例

发现它的位置在这里,因此再创建一个正则表达式 

findCanL = re.compile(r'(d{5})')

其他数据以此类推

 而这里的代码额外生成了列表,以一等奖获奖人数为例,因为在运行时发现,可能会把后面的二等奖获奖人数囊括进来,因此取第一个列表元素,而为什么不直接data.append(list(fiw[0]))呢?

这是因为在保存两位数据如16时,会保存成['1','6']的情况

而采用额外生成的列表可以解决这种情况,另外两种情况也是如此

 最后各种情况处理完后,保存数据,加入datalist,函数运行完后返回datalist

3.保存数据

def saveData(datalist, savepath):
    book = xlwt.Workbook(encoding="utf-8", style_compression=0)
    sheet = book.add_sheet('往期大乐透开奖情况', cell_overwrite_ok=True)
    sheet.write_merge(0, 1, 0, 0, '期号')
    sheet.write_merge(0, 0, 1, 5, '前区号码')
    sheet.write_merge(0, 1, 6, 7, '后区')
    sheet.write_merge(0, 1, 8, 8, '奖池奖金(元)')
    sheet.write_merge(0, 0, 9, 10, '一等奖')
    sheet.write_merge(0, 0, 11, 12, '二等奖')
    sheet.write_merge(0, 1, 13, 13, '总投注额')
    sheet.write_merge(0, 1, 14 ,14, '开奖日期')

    for i in range(1,6):
        sheet.write(1, i, i)

    col = ('注数', '奖金(元)')
    for i in range(9,13):
        sheet.write(1, i, col[(0 if (i-10)%2 else 1)])

    for i in range(29):
        index = 0
        for j in range(10):
            if(len(datalist[i][j]) == 1):
                sheet.write(i+2, index, datalist[i][j])
                index += 1
            else:
                for k in range(len(datalist[i][j])):
                    sheet.write(i+2, index, datalist[i][j][k])
                    index += 1
    book.save(savepath)

 因为像按照这样的格式设置标题栏,所以代码显得有点冗杂 = =

但是最后的结果还不错~

 但是我想爬取更多数据,比如到2022年1月1日的,但是数据最多显示28条,也不知道怎么解决

T T

想法是点击这个链接,但是不知道怎么操作。。

先不弄了 弄了一个下午 过几天再学怎么弄吧~ 

转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/783190.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号