python爬虫基础及实例

281次阅读
没有评论
python爬虫基础及实例

requests模块

发送get,post请求

  • response = requests.get(url)
  • response = requests.post(url,data={请求体的字典})

response的方法

  • response.text
    • 该方法经常会出现乱码,出现乱码使用response.encoding=”utf-8”
  • response.content.decode()
    • 把响应的二进制字节流转化为str类型

session()方法

  • 和requests模块用发一样有post和get方法,但是避免了对cookie的操作

#以人人网为例 import requests

session = requests.session() url = "http://www.renren.com/PLogin.do" headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.170 Safari/537.36"} date = { "email": "15869117206", "password": "a588181" } session.post(url, headers=headers, data=date) url1 = "http://www.renren.com/966470757/profile" session.get(url1)

数据提取方法

json

  • 基于js的数据交换格式,看起来像python类型(列表,字典)的字符串
  • json.loads
    • 把json字符串转化为python类型的
    • json.loads(json字符串)
  • 哪里会返回json的数据
    • 浏览器切换到手机版
    • 抓包

import requests import json

url = "http://fanyi.baidu.com/basetrans"

data = { "from": "zh", "to": "en", "query": "你好,世界" } headers= {"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1"}

response = requests.post(url, data=data, headers=headers) print(response) html = response.content.decode() # print(response.content.decode()) # print(type(response.content.decode())) dict_ret = json.loads(html) print(dict_ret) print(type(dict_ret)) ret = dict_ret["trans"][0]["dst"] print(ret)

  • ctrl+f可以在网页当前面板搜索
  • json.dumps
    • 把python类型转换成json字符串
    • json.dumps({“a”:”A”,”b”:”B”})
    • json.dumps(ret1,ensure_ascii=False,indent=2)
    • ensure_ascii=False:让中文显示成中文
    • indent:能够让下一行在上一行的基础上空行

#&callback=jsonp1 import requests import json

url = "https://m.douban.com/rexxar/api/v2/subject_collection/filter_tv_american_hot/items?os=ios&for_mobile=1&start=0&count=18&loc_id=108288&_=1529041601585" #按照浏览器中的url地址多了一个&callback=jsonp1,decode()的结果不是json格式,需要去掉,得到json格式的结果 headers = {"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1", "Referer": "https: // m.douban.com / tv / american"}

response = requests.get(url,headers = headers) json_str = response.content.decode() ret1 = json.loads(json_str) print(ret1) with open("douban","w",encoding="utf-8") as f: f.write(json.dumps(ret1,ensure_ascii=False,indent=2))

基于json的豆瓣电视剧爬虫案例

爬虫代码

import requests import json

class Doubanspider:

def __init__(self): #要将callback去掉,url地址是在network中含有信息的包的url地址 #可以用ctrl+f的快捷键在network页面查找 self.temp_url = "https://m.douban.com/rexxar/api/v2/subject_collection/filter_tv_american_hot/items?os=ios&for_mobile=1&start={}&count=18&loc_id=108288&_=0" self.headers = { "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1", "Referer": "https: // m.douban.com / tv / american"} #headers要带referer,因为是手机版,不要会报错 def run(self): # 实现主要逻辑

num = 0 total = 100 while num < total + 18: start_url = self.temp_url.format(num) response = requests.get(url=start_url, headers=self.headers) html_str = response.content.decode() content_list, total = self.get_content_list(html_str) self.save_content_list(content_list) num += 18

def get_content_list(self, html_str): dict_data = json.loads(html_str) content_list = dict_data["subject_collection_items"] total = dict_data["total"] return content_list, total

def save_content_list(self, content_list): #a代表可追加模式 with open("douban.json", "a", encoding="utf-8") as f: for content in content_list: f.write(json.dumps(content, ensure_ascii=False)) f.write("n")

print("save succeed")

if __name__ == '__main__': douban = Doubanspider() douban.run()

xpath和xml提取数据

  • xpath
    • 一门从html中提示数据的语言
  • xpath语法
    • xpath helper插件:帮助我们从element中定位数据
    • /html/head/meta:能够选中html下head下的所有的meta标签
    • //:能够从任意节点开始选择
    • //li:选择当前页面所有的li标签
    • /html/head//link:head下所有的link标签(任何一级)
    • //div[@class=”a”]/ul/li:选择class=”a”的div下的ul下的li
    • //a/@herf :获取标签a的herf的属性的值
    • //a/text():获取文本
    • //a//text():获取a下的所有文本
    • 可以使用 | 来调用两个节点
    • 多重属性查找://a[@class=’noactive’ and @id=’next’]
  • lxml
    • 安装:sudo pip3 install lxml
    • 使用:
      python
      from lxml import etree
      element = etree.HTML("html字符串")
      element.xpath("xpath语句")

基础知识点的学习

range()函数
  • 语法: range(start,stop[,step])
    • start: 计数从start开始。默认是从0开始。例如range(5)等价于range(0,5)
    • stop: 计数到stop结束,但不包括stop。例如:range(0,5)是[0,1,2,3,4]
    • step: 步长,默认是1,例如range(0,5)等价于range(0,5,1)
列表推导式
  • 帮助我们快速的生成包含一堆数据的列表
    [i+10 for in range(10)]–>[10,11,12,…,19]
    ["10月{}日".format(i) for i in range(1,10)
字典推导式
  • 帮助我们快速的生成包含一堆数据的字典

{"a{}.foramt(i)":10 for i in range(3)} #{"a0":10,"a1":"10","a2":"10"}

三元运算符

a = 10 if 3<4 else 20 a = 10 if 4<3 else 20

python基础扩展format函数
  • 在字符串的参数使用{NUM}表示,0代表第一个,1代表第二个,:表示需要的操作,例子如下

age = 25 name = 'Caroline'

print('{0} is {1} years old. '.format(name, age)) #输出参数 print('{0} is a girl. '.format(name)) print('{0:.3} is a decimal. '.format(1/3)) #小数点后三位 print('{0:_^11} is a 11 length. '.format(name)) #使用_补齐空位 print('{first} is as {second}. '.format(first=name, second='Wendy')) #别名替换 print('My name is {0.name}'.format(open('out.txt', 'w'))) #调用方法 print('My name is {0:8}.'.format('Fred')) #指定宽度

python 中strip()函数
  • 用于你出字符串头尾指定的字符(只能是头尾),并返回行的字符串

str.strip("0")#去除头尾的零 str.strip()#去除头尾的空白

print()中加逗号
  • 输出在同一行,以空格隔开
python错误
  • 字符串中不可以用双引号

将爬去数据装到数据库(使用模块pymysql)

# -*- coding: utf-8 -*- #强制用utf-8编码 import requests import json import pymysql from lxml import etree

class QiuBaiSpder: def __init__(self): self.url_temp = "https://www.qiushibaike.com/8hr/page/{}" self.headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.170 Safari/537.36"}

def get_url_list(self): url_list = [self.url_temp.format(i) for i in range(1, 14)] return url_list

def parse_url(self, url): response = requests.get(url, headers=self.headers) return response.content.decode()

def get_content_list(self, html_str): html = etree.HTML(html_str) div_list = html.xpath("//div[@id='content-left']/div") content_list = [] for div in div_list: item = {} item["author_name"] = div.xpath(".//h2/text()")[0].strip() if len(div.xpath(".//h2/text()")) > 0 else None item["content"] = div.xpath(".//div[@class='content']/span[1]/text()") item["content"] = [i.strip() for i in item["content"]] item["stats_vote"] = div.xpath(".//span[@class='stats-vote']/i/text()") item["stats_vote"] = item["stats_vote"][0] if len(item["stats_vote"]) > 0 else None print(item["stats_vote"]) print(type(item["stats_vote"])) item["stats_comments"] = div.xpath(".//span[@class='stats-comments']//i/text()") item["stats_comments"] = item["stats_comments"][0] if len(item["stats_comments"]) > 0 else None print(item["stats_comments"]) print(type(item["stats_comments"])) content_list.append(item) return content_list

def save_content_list(self, content_list): with open("qiubai.txt", "a", encoding="utf-8") as f: for content in content_list: f.write(json.dumps(content, ensure_ascii=False, ))#以utf-8编码格式写入 #print(content) #print(type(content["stats_vote"])) # print(type(content["stats_vote"]) #print("aaaaaaaaaaaaaaaaaaaaaaaaaaa") f.write("n") print("save")

def save_mysql(self,content_list): connect = pymysql.connect(host="192.168.43.122", user="wei", password="123456", db="gaodb", charset="utf8mb4",#有表情,要是用utf8mb4编码 use_unicode=True#使用Unicode,因为 ) cursor = connect.cursor() ''' 下面三行的内容十分重要,不然导入数据库时会出错 ''' cursor.execute('SET NAMES utf8mb4') cursor.execute("SET CHARACTER SET utf8mb4") cursor.execute("SET character_set_connection=utf8mb4") # cursor.execute("set username utf8mb4") # cursor.execute("set content utf8mb4") # cursor.execute("set vote utf8mb4") # cursor.execute("set stats_vote utf8mb4") for content in content_list: i = (content["author_name"],content["content"][0],content["stats_vote"],content["stats_comments"]) print(i) sql = "insert into demo1 (username,content,vote,comments) values(%s,%s,%s,%s)" cursor.execute(sql,i) connect.commit() cursor.close() connect.commit()

def run(self): url_list = self.get_url_list() for url in url_list: html_str = self.parse_url(url) content_list = self.get_content_list(html_str) # self.save_content_list(content_list) self.save_mysql(content_list)

if __name__ == '__main__': qiubai = QiuBaiSpder() qiubai.run()

python的数据可视化模块(pyecharts)

import pymysql from pyecharts import Bar

class ShowInfo:

# def __init__(self): # # self.cursor

def connect_mysql(self): connect = pymysql.connect( host="192.168.43.122", user="wei", password="123456", db="gaodb", charset="utf8mb4", use_unicode=True )

return connect.cursor(), connect

def get_info(self, cursor, connect): sql = "select count(id) from demo1" cursor.execute(sql) rows = cursor.fetchone()[0] vote = 0 username = 0 content = 0 comments = 0 for i in range(rows): i += 1 i = (i) sql = "select vote,username,content,comments from demo1 where id=%s" cursor.execute(sql, i) fetchone = cursor.fetchone() vote += int(fetchone[0]) username += len(fetchone[1]) content += (len(fetchone[2])) comments += int(fetchone[3]) vote = int(vote / rows) username = int(username / rows) content = int(content / rows) comments = int(comments / rows) print(vote, username, content, comments)

connect.commit() cursor.close() connect.close() return [vote, username, content, comments]

def run(self): print(type(self.connect_mysql())) cursor, connect = self.connect_mysql() info_list = [] info_list = self.get_info(cursor, connect) print(info_list) self.show_html(info_list)

def show_html(self, info_list): bar = Bar("基于糗事百科热门段子的分析") bar.add("基于糗事百科热门段子的分析",["平均好笑数","有户名平均长度","段子内容平均长度","平均评论数"],info_list,is_label_show=True)

bar.render()

if __name__ == '__main__': show_info = ShowInfo() show_info.run()

神龙|纯净稳定代理IP免费测试>>>>>>>>天启|企业级代理IP免费测试>>>>>>>>IPIPGO|全球住宅代理IP免费测试

相关文章:

版权声明:Python教程2022-10-28发表,共计8907字。
新手QQ群:570568346,欢迎进群讨论 Python51学习