B站弹幕分析系统
视频展示,请点击。
尚硅谷案例
utllib的基本使用
# 使用urllib来获取百度首页的源码
import urllib.request
# (1)定义一个url 就是你要访问的地址
url = 'http://www.baidu.com'
# (2)模拟浏览器先服务器发送请求 response响应
response = urllib.request.urlopen(url)
# (3)获取响应中的页面的源码
# read方法 返回的是字节形式的二进制数据
# 我们要将二进制转化为字符串
# 二进制--》字符串 解码 decode('编码的格式')
content = response.read().decode('utf-8')
# (4)打印数据
print(content)
一个类型和六个方法
一个类型 HTTPResponse
六个方法 read readline readlines getcode geturl getheaders
import urllib.request
url = 'http://www.baidu.com'
# 模拟浏览器先服务器发送请求 response响应
response = urllib.request.urlopen(url)
# 一个类型和六个方法
# response是HTTPResponse的类型
# print(type(response))
# 按照一个字节一个字节的去读
# content = response.read()
# print(content)
# 返回多少个字节
# content = response.read(5)
# print(content)
# 读取一行
# content = response.readline()
# print(content)
# content = response.readlines()
# print(content)
#返回状态码 如果是200了 那么就证明我们的逻辑没有错误
# print(response.getcode())
# 返回url地址
# print(response.geturl())
# 获取是一些状态信息
# print(response.getheaders())
下载
import urllib.request
# 下载网页
url_page = 'http://www.baidu.com'
# url代表的是下载的路径 filename文件的名字
# 在python中可以是变量的名字 也可以直接书写
urllib.request.urlretrieve(url_page, 'baidu.html')
# 下载图片
url_img = 'https://tse1-mm.cn.bing.net/th/id/OIP-C.zzaLy_4i4zzfAWPn03AkdgHaFI?w=194&h=135&c=7&r=0&o=5&dpr=1.6&pid=1.7'
urllib.request.urlretrieve(url_img, 'lusi.png')
url_img = 'https://tse3-mm.cn.bing.net/th/id/OIP-C.PijFe6ZDMUUR-95IU5W_dwHaNK?w=187&h=333&c=7&r=0&o=5&dpr=1.6&pid=1.7'
urllib.request.urlretrieve(url=url_img, filename='lusi2.png')
# 下载视频
url_video = 'https://vd4.bdstatic.com/mda-me989nuvejzc5iws/sc/cae_h264_nowatermark/1620626259198934606/mda-me989nuvejzc5iws.mp4?v_from_s=hkapp-haokan-hbf&auth_key=1692513321-0-0-486766e5a214bed80f7b6de930400603&bcevod_channel=searchbox_feed&cr=2&cd=0&pd=1&pt=3&logid=2121127810&vid=16710300024974486498&klogid=2121127810&abtest=111803_1-112162_1-112345_2'
urllib.request.urlretrieve(url_video, 'lunyi.mp4')
请求对象的定制(遇到了反爬)
import urllib.request
url = 'https://www.baidu.com'
# url的组成
# http/https www.baidu.com s wd=周杰伦 #
# 协议 主机 端口号 路径 参数 锚点
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Mobile Safari/537.36 Edg/115.0.1901.203'
}
# 请求对象的定制《=因为urlopen方法中不能存储字典 所以headers不能传递进去
request = urllib.request.Request(url=url, headers=headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
print(content)
编码集的演变
# https://www.baidu.com/s?wd=%E5%91%A8%E6%9D%B0%E4%BC%A6
# 需求 获取 https://www.baidu.com/s?wd=周杰伦的网页源码
import urllib.request
url = 'https://www.baidu.com/s?wd=周杰伦'
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Mobile Safari/537.36 Edg/115.0.1901.203'
}
request = urllib.request.Request(url, headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
print(content)
UnicodeEncodeError: 'ascii' codec can't encode characters in position 11-13: ordinal not in range(128)
解决方法
# https://www.baidu.com/s?wd=%E5%91%A8%E6%9D%B0%E4%BC%A6
# 需求 获取 https://www.baidu.com/s?wd=周杰伦的网页源码
import urllib.request
url = 'https://www.baidu.com/s?wd='
# 将周杰伦三个字变成Unicode编码的格式
# 我们需要依赖于urllib.parse
name = urllib.parse.quote('周杰伦')
url = url + name
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Mobile Safari/537.36 Edg/115.0.1901.203'
}
request = urllib.request.Request(url=url, headers=headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
print(content)
IT私塾案例
爬虫
Beautiful Soup
import re
import urllib.request
from bs4 import BeautifulSoup
url = "http://www.baidu.com"
response = urllib.request.urlopen(url)
html = response.read()
bs = BeautifulSoup(html, "html.parser")
# print(bs.title)
# print(bs.a)
#
# print(type(bs.head)) # <class 'bs4.element.Tag'>
# 1.Tag 标签及其内容:拿到它所找到的第一个内容
#
# print(bs.title.string)
#
# print(type(bs.title.string))
# 2.NavigableString 标签里的内容(字符串)
#
# print(bs.a.attrs)
# print(type(bs))
# 3.BeautifulSoup 表示整个文档
# print(bs.name)
# print(bs)
# ----------------------------------
# 文档的遍历 更多内容 搜索文档
# print(bs.head.contents[1])
# 文档的搜索
# (1)find_all()
# 字符串过滤:会查找与字符串完全匹配的内容
# t_list = bs.find_all("a")
# print(t_list)
# 正则表达式搜索:使用search()方法来匹配内容
# t_list = bs.find_all(re.compile("a"))
# 2.kwargs 参数
# t_list = bs.find(id="head")
#
# t_list = bs.find(class_=True)
# for item in t_list:
# print(item)
# 3.text文本
# 4.选择器
# t_list = bs.select('title') # 通过标签来查找
# t_list = bs.select('.mnav') # 通过类名来查找
# t_list = bs.select('#u1') # 通过id来查找
# for item in t_list:
# print(item)
正则表达式
豆瓣案例(爬虫)
from bs4 import BeautifulSoup # 网页解析,获取数据
import re # 正则表达式,进行文字匹配
import urllib.request, urllib.error # 制定URL,获取页面数据
import xlwt # 进行excel操作
import sqlite3 # 进行SQLite数据库操作
def main():
baseurl = "https://movie.douban.com/top250?start="
# 1.爬取网页
datalist = getData(baseurl)
savepath = "豆瓣电影Top250.xls"
# 3.保存数据
saveData(datalist, savepath)
# 影片详情链接的规则
findLink = re.compile(r'<a href="(.*?)">') # 创建正则表达式对象,表示规则(字符串的模式)
# 影片图片
findImgsrc = re.compile(r'<img.*src="(.*?)"', re.S) # re.S 让换行符包含在字符中
# 影片片名
findTitle = re.compile(r'<span class="title">(.*)</span>')
# 影片评分
findRating = re.compile(r'<span class="rating_num" property="v:average">(.*)</span>')
# 找到评价人数
findJudge = re.compile(r'<span>(\d*)人评价</span>')
# 找到概况
findIng = re.compile(r'<span class="inq">(.*)</span>')
# 找到影片的相关内容
findBd = re.compile(r'<p class="">(.*?)</p>', re.S)
# 爬取网页
def getData(baseurl):
datalist = []
for i in range(0, 10): # 调用获取页面信息的函数10次
url = baseurl + str(i * 25)
html = askURL(url) # 保存获取到的网页源码
# 2.逐一解析数据
soup = BeautifulSoup(html, 'html.parser')
for item in soup.find_all('div', class_="item"): # 查询符合条件的字符串,形成列表
# print(item) # 测试:查看电影item全部信息
data = []
item = str(item)
# 影片详情的链接
link = re.findall(findLink, item)[0] # re库用来通过正则表达式查找指定的字符串
data.append(link) # 添加链接
imgSrc = re.findall(findImgsrc, item)[0]
data.append(imgSrc) # 添加图片
titles = re.findall(findTitle, item)
if len(titles) == 2:
ctitle = titles[0] # 添加中文名称
data.append(ctitle) # 添加外国名称
otitle = titles[1].replace("/", "") # 去掉无关的符号
data.append(otitle)
else :
data.append(titles[0]) # 添加中文名称
data.append(" ") # 外国名称留空
rating = re.findall(findRating, item)[0]
data.append(rating) # 添加评分
judgeNum = re.findall(findJudge, item)[0]
data.append(judgeNum) # 添加评论人数
ing = re.findall(findIng, item)
if len(ing) != 0:
ing = ing[0].replace("。", "") # 去掉句号
data.append(ing) # 添加概述
else :
data.append(" ") #留空
bd = re.findall(findBd, item)[0]
bd = re.sub('<br(\s+)?/>(\s+)?', " ", bd)
bd = re.sub("/", " ", bd)
data.append(bd.strip())
datalist.append(data) # 把处理好的一部电影的信息添加进去
# for item in datalist:
# print(item)
return datalist
# 得到指定一个URL的网页内容
def askURL(url):
head = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Mobile Safari/537.36 Edg/115.0.1901.203'
}
request = urllib.request.Request(url, headers=head)
html = ""
try:
response = urllib.request.urlopen(request)
html = response.read().decode("utf-8")
# print(html)
except urllib.error.URLError as e:
if hasattr(e, "code"):
print(e.code)
if hasattr(e, "reason"):
print(e.reason)
return html
# 保存数据
def saveData(datalist, savepath):
book = xlwt.Workbook(encoding="utf-8", style_compression=0) # 创建workbook对象
sheet = book.add_sheet('豆瓣电影Top250', cell_overwrite_ok=True) # 创建工作表
col = ('电影详情链接', '图片链接', '影片中文名', '影片外国名', '评分', '评价数', '概况', '相关信息')
for i in range(0, 8):
sheet.write(0, i, col[i]) # 列名
for i in range(0, 250):
print("第%d条" %i)
data = datalist[i]
for j in range(0, 8):
sheet.write(i+1,j,data[j])
book.save(savepath)
if __name__ == "__main__":
main()
数据可视化
Flask(app.py)
from flask import Flask, render_template, request
import datetime
app = Flask(__name__)
# @app.route('/')
# def hello_world():
# return 'Hello World!'
@app.route("/welcome/<name>")
def welcome(name):
return "你好%s"%name
@app.route("/welcome/<int:id>")
def welcome2(id):
return "您好%d号会员"%id
# 返回给用户渲染后的网页文件
# @app.route("/")
# def index():
# return render_template("index.html")
# 向页面传递一个变量
@app.route("/")
def index():
time = datetime.date.today() # 普通变量
name = ["小张","小王","小赵"] # 列表类型
task = {"任务":"打扫卫生","时间":"三小时"} # 字典类型
return render_template("index.html", var = time, list = name, task = task)
# 表单提交
@app.route('/test/register')
def register():
return render_template("test/register.html")
@app.route('/result', methods=['POST','GET'])
def result():
if request.method == 'POST':
result = request.form
return render_template("test/result.html", result=result)
if __name__ == '__main__':
app.run()
Flask(index.html)
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
</head>
<body>
欢迎光临,今天是{{ var }}<br/>
今天值班的有:<br/>
{% for data in list %} <!--用大括号和百分号括起来是控制结构,还有if-->
<li>{{ data }}</li>
{% endfor %}
任务:<br/> <!--了解如何在页面打印表格-->
<table border="1">
{% for key,value in task.items() %}
<tr>
<td>{{ key }}</td>
<td>{{ value }}</td>
</tr>
{% endfor %}
</table>
</body>
</html>
Flask(register.html)
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
</head>
<body>
<form action="{{ url_for('result') }}" method="post">
<p>姓名:<input type="text" name="姓名"></p>
<p>年龄:<input type="text" name="年龄"></p>
<p>性别:<input type="text" name="性别"></p>
<p>地址:<input type="text" name="地址"></p>
<p><input type="submit" value="提交"></p>
</form>
</body>
</html>
Flask(result.html)
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
</head>
<body>
<table border="1">
{% for key,value in result.items() %}
<tr>
<th>{{ key }}</th>
<td>{{ value }}</td>
</tr>
{% endfor %}
</table>
</body>
</html>