Python爬虫:requests库基本使用

简介: Python爬虫:requests库基本使用

requests 基于urlib库

pip install requests

用于http测试的网站:http://httpbin.org/


需要导入的模块


import requests
from requests.models import Response

简单测试


def foo1():
    response = requests.get("http://www.baidu.com")
    print(type(response))
    print(response.status_code)
    print(type(response.text))
    print(len(response.text))
    print(response.cookies)
    """
    <class 'requests.models.Response'>
    200
    <class 'str'>
    2381
    <RequestsCookieJar[<Cookie BDORZ=27315 for .baidu.com/>]>
    """

带参数的get


def foo2():
    response = requests.get("http://httpbin.org/get?name=Tom&age=20")
    print(response.text)
# 用字典构造参数
def foo3():
    data ={
        "name": "Tom",
        "age": 20
    }
    response = requests.get("http://httpbin.org/get", params=data)
    print(response.text)

解析json


def foo4():
    import json
    response = requests.get("http://httpbin.org/get")
    print(type(response.text))
    print(type(response.json()))
    print(type(json.loads(response.text)))
    """
    <class 'str'>
    <class 'dict'>
    <class 'dict'>
    """

保存二进制文件


def foo5():
    response = requests.get("https://github.com/favicon.ico")
    print(type(response.text))
    print(type(response.content))
    print(response.text)
    """
    <class 'str'>
    <class 'bytes'>
    """
    # 保存
    with open("favicon.ico", "wb") as f:
        f.write(response.content)
        f.close()

添加请求头headers


def foo6():
    response = requests.get("https://zhuanlan.zhihu.com/p/36085437")
    print(response.status_code)  # 500
    headers = {
        "User-Agent": "Mozilla/5.0"
    }
    response = requests.get("https://zhuanlan.zhihu.com/p/36085437",
                            headers=headers)
    print(response.status_code)  # 200

post请求


def foo7():
    data ={
        "name": "Tom",
        "age": 20
        }
    headers= {
        "User-Agent": "Mozilla/5.0"
    }
    response = requests.post("http://httpbin.org/post",
                             data=data,
                             headers=headers)
    print(response.text)

response响应


def foo8():
    response = requests.get("http://httpbin.org/get")
    print(type(response))
    print(type(response.headers))
    print(type(response.cookies))
    print(type(response.text))
    print(type(response.content))
    print(type(response.status_code))
    print(response.status_code)
    print(response.url)
    print(response.history)
    print(response.reason)
    print(response.encoding)
    print(response.apparent_encoding)
    print(response.request)
    """
    <class 'requests.models.Response'>
    <class 'requests.structures.CaseInsensitiveDict'>
    <class 'requests.cookies.RequestsCookieJar'>
    <class 'str'>
    <class 'bytes'>
    <class 'int'>
    200
   http://httpbin.org/get
    []
    OK
    None
    ascii
    <PreparedRequest [GET]>
    """

状态码判断


def foo9():
    response = requests.get("http://httpbin.org/get")
    print(response.status_code == requests.codes.ok)
    print(response.status_code == 200)
    # True
    # True
"""
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('already_reported',),
226: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('permanent_redirect',
'resume_incomplete', 'resume',),  # These 2 to be removed in 3.0
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
421: ('misdirected_request',),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
451: ('unavailable_for_legal_reasons', 'legal_reasons'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
511: ('network_authentication_required', 'network_auth', 'network_authentication'),
"""

文件上传


def foo10():
    files = {"file": open("cookie.txt", "rb")}
    response = requests.post("http://httpbin.org/post", files=files)
    print(response.status_code)
    print(response.text)

获取cookies


def foo11():
    response = requests.get("http://www.baidu.com")
    print(response.cookies)
    for key, value in response.cookies.items():
        print("{key} = {value}".format(key=key, value=value))
    """
    <RequestsCookieJar[<Cookie BDORZ=27315 for .baidu.com/>]>
    BDORZ = 27315
    """
# 用cookie做会话保持,模拟登陆
def foo12():
    # 第二次get获取不到cookie,相当于两个浏览器分别请求
    response = requests.get("http://httpbin.org/cookies/set/number/123456789")
    print(response.text)
    # "number": "123456789"
    response = requests.get("http://httpbin.org/cookies")
    print(response.text)

session

session对象可以模拟浏览器请求,保存cookie


# 两次请求相当于一个浏览器请求,维持了会话
def foo13():
    session = requests.Session()
    response = session.get("http://httpbin.org/cookies/set/number/123456789")
    print(response.text)
    # "number": "123456789"
    response = session.get("http://httpbin.org/cookies")
    print(response.text)
    # "number": "123456789"

证书验证

def foo14():
    response = requests.get("https://www.12306.cn/")
    # CERTIFICATE_VERIFY_FAILED
    print(response.status_code)
# 取消验证
def foo15():
    from requests.packages import urllib3
    urllib3.disable_warnings() # 消除 warning
    response = requests.get("https://www.12306.cn/", verify=False)
    # InsecureRequestWarning
    print(response.status_code) # 200
# 指定cert证书
def foo16():
    response = requests.get("https://www.12306.cn/", cert=("path"))
    print(response.status_code)  # 200

使用代理

def foo17():
    proxies = {
        "http": "http://127.0.0.1:8000",
        "https": "https://127.0.0.1:8000"
    }
    response = requests.get("https://www.12306.cn/", proxies=proxies)
    print(response.status_code)  # 200
# 使用需要登录的代理
def foo18():
    proxies = {
        "http": "http://user:password@127.0.0.1:8000"
    }
    response = requests.get("https://www.12306.cn/", proxies=proxies)
    print(response.status_code)  # 200
# 使用socks代理
# 安装: pip install requests[socks]
def foo19():
    proxies = {
        "http": "socks5://127.0.0.1:8000",
        "https": "socks5://127.0.0.1:8000",
    }
    response = requests.get("https://www.12306.cn/", proxies=proxies)
    print(response.status_code)  # 200

超时设置

def foo20():
    from requests.exceptions import ConnectTimeout
    try:
        response = requests.get("http://www.goole.com/", timeout=0.1)
        print(response.status_code)
    except ConnectTimeout as e:
        print("链接超时")
    # requests.exceptions.ReadTimeout

登录验证

def foo21():
    from requests.auth import HTTPBasicAuth
    response = requests.get("http://www.goole.com/",
                        auth=HTTPBasicAuth("user", "password"))
    print(response.status_code)
# 也可以
def foo22():
    response = requests.get("http://www.goole.com/",
                        auth=("user", "password"))
    print(response.status_code)
def foo21():
    from requests.auth import HTTPBasicAuth
    response = requests.get("http://www.goole.com/",
                        auth=HTTPBasicAuth("user", "password"))
    print(response.status_code)
# 也可以
def foo22():
    response = requests.get("http://www.goole.com/",
                        auth=("user", "password"))
    print(response.status_code)

异常处理

# 原则:从子类开始捕捉,然后是父类,最后捕捉基类
# 参考:http://cn.python-requests.org/zh_CN/latest/_modules/requests/exceptions.html
def foo23():
    from requests.exceptions import ConnectTimeout, Timeout, RequestException
    try:
        response = requests.get("http://www.google.com/", timeout=0.1)
        print(response.status_code)
    except ConnectTimeout:  # 子类异常
        print("ConnectTimeout")
    except Timeout:  # 父类异常
        print("Timeout")
    except RequestException: # 基类异常
        print("RequestException")
相关文章
|
2月前
|
数据采集 Web App开发 数据安全/隐私保护
实战:Python爬虫如何模拟登录与维持会话状态
实战:Python爬虫如何模拟登录与维持会话状态
|
3月前
|
数据采集 Web App开发 自然语言处理
新闻热点一目了然:Python爬虫数据可视化
新闻热点一目了然:Python爬虫数据可视化
|
3月前
|
存储 人工智能 测试技术
如何使用LangChain的Python库结合DeepSeek进行多轮次对话?
本文介绍如何使用LangChain结合DeepSeek实现多轮对话,测开人员可借此自动生成测试用例,提升自动化测试效率。
552 125
如何使用LangChain的Python库结合DeepSeek进行多轮次对话?
|
3月前
|
监控 数据可视化 数据挖掘
Python Rich库使用指南:打造更美观的命令行应用
Rich库是Python的终端美化利器,支持彩色文本、智能表格、动态进度条和语法高亮,大幅提升命令行应用的可视化效果与用户体验。
271 0
|
2月前
|
数据可视化 关系型数据库 MySQL
【可视化大屏】全流程讲解用python的pyecharts库实现拖拽可视化大屏的背后原理,简单粗暴!
本文详解基于Python的电影TOP250数据可视化大屏开发全流程,涵盖爬虫、数据存储、分析及可视化。使用requests+BeautifulSoup爬取数据,pandas存入MySQL,pyecharts实现柱状图、饼图、词云图、散点图等多种图表,并通过Page组件拖拽布局组合成大屏,支持多种主题切换,附完整源码与视频讲解。
273 4
【可视化大屏】全流程讲解用python的pyecharts库实现拖拽可视化大屏的背后原理,简单粗暴!
|
2月前
|
传感器 运维 前端开发
Python离群值检测实战:使用distfit库实现基于分布拟合的异常检测
本文解析异常(anomaly)与新颖性(novelty)检测的本质差异,结合distfit库演示基于概率密度拟合的单变量无监督异常检测方法,涵盖全局、上下文与集体离群值识别,助力构建高可解释性模型。
345 10
Python离群值检测实战:使用distfit库实现基于分布拟合的异常检测
|
2月前
|
数据采集 监控 数据库
Python异步编程实战:爬虫案例
🌟 蒋星熠Jaxonic,代码为舟的星际旅人。从回调地狱到async/await协程天堂,亲历Python异步编程演进。分享高性能爬虫、数据库异步操作、限流监控等实战经验,助你驾驭并发,在二进制星河中谱写极客诗篇。
Python异步编程实战:爬虫案例
|
3月前
|
数据采集 存储 XML
Python爬虫技术:从基础到实战的完整教程
最后强调: 父母法律法规限制下进行网络抓取活动; 不得侵犯他人版权隐私利益; 同时也要注意个人安全防止泄露敏感信息.
753 19
|
2月前
|
数据采集 存储 JSON
Python爬虫常见陷阱:Ajax动态生成内容的URL去重与数据拼接
Python爬虫常见陷阱:Ajax动态生成内容的URL去重与数据拼接
|
2月前
|
数据采集 存储 JavaScript
解析Python爬虫中的Cookies和Session管理
Cookies与Session是Python爬虫中实现状态保持的核心。Cookies由服务器发送、客户端存储,用于标识用户;Session则通过唯一ID在服务端记录会话信息。二者协同实现登录模拟与数据持久化。

推荐镜像

更多