主要看一下reponse的爬取的入门
没啥用得练习# Create Time : Nov 11 08 14:27:00
#In[]
#没啥用的练习
from typing_extensions import ParamSpec
from urllib.request import urlopen
url = 'http://www.baidu.com'
resq = urlopen(url)
# print(resq.read().decode('utf-8'))
with open("mbaidu.html",mode='w') as f:
f.write(resq.read().decode('utf-8'))
print("over")
页面渲染
页面渲染有客户端和服务端渲染,这边就讲解一下客户端渲染的入门爬取
GET# %%
#web页面渲染:
# 1、服务器端渲染: 数据和html结合返回给客户端
# 2、客户端渲染: 数据和html分别传给客户端,由客户端进行合并
#大部分为第二种情况,所以需要准确找到使用对应的request返回回来的url
#HTTP协议
# 重点理解:请求头、请求行、请求体;状态行、响应头、响应体、请求方式
# %%
#requests : get
import requests
# quray = input("随便吧:")
url = "https://www.baidu.com/s?wd=婉儿别闹"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36"
}
resq = requests.get(url,headers=headers)
print(resq.text)
POST
# %%
#requests : POST
url = "https://fanyi.baidu.com/sug"
s = input("请输入单词: ")
dat = {
"kw":s
}
resq = requests.post(url,data=dat)
# print(resq.text)
print(resq.json())
params
# %%
###参数应用和XHR使用
import requests
##主题URL
url = "https://movie.douban.com/j/search_subjects?"
##反扒策略之一
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36"
}
##参数设置(防止URL太长,在header中查找)
param = {
"type": "movie",
"tag" : "热门",
"sort": "recommend",
"page_limit": "40",
"page_start": "0"
}
##返回结果
resq = requests.get(url=url,params=param,headers=headers)
##打印结果
print(resq.json())
##关闭访问
resq.close()



