爬虫入门之线程进程协程抓取方法(八)

本文涉及的产品
.cn 域名,1个 12个月
简介: 1 多线程抓取import lxmlfrom lxml import etreeimport requestsimport threadingimport timerlock = threading.

1 多线程抓取


import lxml
from lxml import etree
import requests
import threading
import time

rlock = threading.RLock()  # 递归锁
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36"}

def getArea(url):
    '''
    获取区域名和链接
    :param url: 种子
    :return:
    '''
    response = requests.get(url, headers=headers).text
    mytree = lxml.etree.HTML(response)

    areaList = mytree.xpath('//div[@data-role="ershoufang"]/div[1]/a')
    # 存储地址和链接
    areaDict = {}
    for area in areaList:
        # 区名
        areaName = area.xpath('./text()')[0]
        # url
        areaurl = "https://hz.lianjia.com" + area.xpath('./@href')[0]
        print(areaName, areaurl)
        # 西湖 https://hz.lianjia.com/ershoufang/xihu/  将其变成字典
        areaDict[areaName] = areaurl
    return areaDict

def gethouseInfo(areaName, url):
    '''
    获取房子信息
    :param areaname: 地区名
    :param url: 区域的url
    :return:
    '''
    response = requests.get(url, headers=headers).text
    mytree = lxml.etree.HTML(response)

    sellList = mytree.xpath("//ul[@class='sellListContent']/li[@class=\"clear\"]")
    for house in sellList:
        # 概述
        title = house.xpath('.//div[@class="title"]/a/text()')[0]
        # url
        houseurl = house.xpath('.//div[@class="title"]/a/@href')[0]
        # 房子信息
        houseInfo = house.xpath('.//div[@class="houseInfo"]/a/text()')[0] + \
                    house.xpath('.//div[@class="houseInfo"]/text()')[0]

        # 位置信息
        positionInfo = house.xpath('.//div[@class="positionInfo"]/text()')[0] + \
                       house.xpath('.//div[@class="positionInfo"]/a/text()')[0]

        # 总价
        # /html/body/div[4]/div[1]/ul/li[1]/div[1]/div[6]/div[1]/span
        totalPrice = house.xpath('.//div[@class="totalPrice"]/span/text()')[0] + '万'
        # 平方价
        unitPrice = house.xpath('.//div[@class="unitPrice"]/span/text()')[0]
        # print(title, houseurl, houseInfo, positionInfo, totalPrice, unitPrice)

        with rlock:
            print(areaName)
            with open(areaName + '.txt', 'a+', encoding='utf-8', errors='ignore') as f:
                f.write(str((title, houseInfo, houseurl, positionInfo, totalPrice, unitPrice)) + '\n')
                f.flush()

if __name__ == '__main__':
    starUrl = "https://hz.lianjia.com/ershoufang/"
    areaDict = getArea(starUrl)
    time.clock()
    print(areaDict)
    # 多线程
    threadList = []
    for areaName, url in areaDict.items():
        t = threading.Thread(target=gethouseInfo, args=(areaName, url))
        # 开启
        threadList.append(t)
        t.start()

    # 保证线程都结束
    for i in threadList:
        i.join()
    print(time.clock())

2 多协程抓取

import gevent
from gevent import monkey
gevent.monkey.patch_all()   #有些需要刚开始进行初始化
import lxml
from lxml import etree
import requests
import threading
import time

rlock = threading.RLock()  # 递归锁
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36"}

def getArea(url):
    '''
    获取区域名和链接
    :param url: 种子
    :return:
    '''
    response = requests.get(url, headers=headers).text
    mytree = lxml.etree.HTML(response)
    areaList = mytree.xpath('//div[@data-role="ershoufang"]/div[1]/a')
    # 存储地址和链接
    areaDict = {}
    for area in areaList:
        # 区名
        areaName = area.xpath('./text()')[0]
        # url
        areaurl = "https://hz.lianjia.com" + area.xpath('./@href')[0]
        print(areaName, areaurl)
        areaDict[areaName] = areaurl
    return areaDict

def gethouseInfo(areaName, url):
    '''
    获取房子信息
    :param areaname: 地区名
    :param url: 区域的url
    :return:
    '''
    response = requests.get(url, headers=headers).text
    mytree = lxml.etree.HTML(response)
    sellList = mytree.xpath("//ul[@class='sellListContent']/li[@class=\"clear\"]")
    for house in sellList:
        # 概述
        title = house.xpath('.//div[@class="title"]/a/text()')[0]
        # url
        houseurl = house.xpath('.//div[@class="title"]/a/@href')[0]
        # 房子信息
        houseInfo = house.xpath('.//div[@class="houseInfo"]/a/text()')[0] + \
                    house.xpath('.//div[@class="houseInfo"]/text()')[0]

        # 位置信息
        positionInfo = house.xpath('.//div[@class="positionInfo"]/text()')[0] + \
                       house.xpath('.//div[@class="positionInfo"]/a/text()')[0]

        # 总价
        # /html/body/div[4]/div[1]/ul/li[1]/div[1]/div[6]/div[1]/span
        totalPrice = house.xpath('.//div[@class="totalPrice"]/span/text()')[0] + '万'
        # 平方价
        unitPrice = house.xpath('.//div[@class="unitPrice"]/span/text()')[0]
        # print(title, houseurl, houseInfo, positionInfo, totalPrice, unitPrice)

        with open("./hz/" + areaName + '.txt', 'a+', encoding='utf-8', errors='ignore') as f:
            f.write(str((title, houseInfo, houseurl, positionInfo, totalPrice, unitPrice)) + '\n')
            f.flush()

if __name__ == '__main__':
    starUrl = "https://hz.lianjia.com/ershoufang/"
    areaDict = getArea(starUrl)
    time.clock()
    print(areaDict)
    # 多协程
    # gevent.monkey.patch_all()  # 非阻塞io  如果此处不行则需要在最上方导入
    geventList = []
    for k, v in areaDict.items():
        g = gevent.spawn(gethouseInfo, k, v)
        geventList.append(g)
    gevent.joinall(geventList)
    print(time.clock())

3 多进程抓取

import lxml
from lxml import etree
import requests

import multiprocessing
import time

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36"}

def getArea(url):
    '''
    获取区域名和链接
    :param url: 种子
    :return:
    '''
    response = requests.get(url, headers=headers).text
    mytree = lxml.etree.HTML(response)
    areaList = mytree.xpath('//div[@data-role="ershoufang"]/div[1]/a')
    # 存储地址和链接
    areaDict = {}
    for area in areaList:
        # 区名
        areaName = area.xpath('./text()')[0]
        # url
        areaurl = "https://hz.lianjia.com" + area.xpath('./@href')[0]
        print(areaName, areaurl)
        areaDict[areaName] = areaurl
    return areaDict

def gethouseInfo(areaName, url):
    '''
    获取房子信息
    :param areaname: 地区名
    :param url: 区域的url
    :return:
    '''
    response = requests.get(url, headers=headers).text
    mytree = lxml.etree.HTML(response)
    sellList = mytree.xpath("//ul[@class='sellListContent']/li[@class=\"clear\"]")
    for house in sellList:
        # 概述
        title = house.xpath('.//div[@class="title"]/a/text()')[0]
        # url
        houseurl = house.xpath('.//div[@class="title"]/a/@href')[0]
        # 房子信息
        houseInfo = house.xpath('.//div[@class="houseInfo"]/a/text()')[0] + \
                    house.xpath('.//div[@class="houseInfo"]/text()')[0]

        # 位置信息
        positionInfo = house.xpath('.//div[@class="positionInfo"]/text()')[0] + \
                       house.xpath('.//div[@class="positionInfo"]/a/text()')[0]

        # 总价
        # /html/body/div[4]/div[1]/ul/li[1]/div[1]/div[6]/div[1]/span
        totalPrice = house.xpath('.//div[@class="totalPrice"]/span/text()')[0] + '万'
        # 平方价
        unitPrice = house.xpath('.//div[@class="unitPrice"]/span/text()')[0]

        with open("./hz/" + areaName + '.txt', 'a+', encoding='utf-8', errors='ignore') as f:
            f.write(str((title, houseInfo, houseurl, positionInfo, totalPrice, unitPrice)) + '\n')
            f.flush()

if __name__ == '__main__':
    starUrl = "https://hz.lianjia.com/ershoufang/"
    areaDict = getArea(starUrl)
    time.clock()
    print(areaDict)
    # 多进程
    processList = []
    for areaName, url in areaDict.items():
        t = multiprocessing.Process(target=gethouseInfo, args=(areaName, url)) #开启多进程
        # 开启
        processList.append(t)
        t.start()

    # 保证线程都结束
    for i in processList:
        i.join()
    print(time.clock())

4 多线程加协程

import gevent
from gevent import monkey
gevent.monkey.patch_all()
import json

import lxml
from lxml import etree
import requests
import threading
import time

rlock = threading.RLock()  # 递归锁
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36"}

  # 非阻塞IO
def getArea(url):
    '''
    获取区域名和链接
    :param url: 种子
    :return:
    '''
    response = requests.get(url, headers=headers).text
    mytree = lxml.etree.HTML(response)
    areaList = mytree.xpath('//div[@data-role="ershoufang"]/div[1]/a')
    # 存储地址和链接
    areaDict = {}
    for area in areaList:
        # 区名
        areaName = area.xpath('./text()')[0]
        # url
        areaurl = "https://hz.lianjia.com" + area.xpath('./@href')[0]
        print(areaName, areaurl)
        areaDict[areaName] = areaurl
    return areaDict

def gethouseInfo(areaName, url):
    '''
    获取房子信息
    :param areaname: 地区名
    :param url: 区域的url
    :return:
    '''
    response = requests.get(url, headers=headers).text
    mytree = lxml.etree.HTML(response)
    sellList = mytree.xpath("//ul[@class='sellListContent']/li[@class=\"clear\"]")
    for house in sellList:
        # 概述
        title = house.xpath('.//div[@class="title"]/a/text()')[0]
        # url
        houseurl = house.xpath('.//div[@class="title"]/a/@href')[0]
        # 房子信息
        houseInfo = house.xpath('.//div[@class="houseInfo"]/a/text()')[0] + \
                    house.xpath('.//div[@class="houseInfo"]/text()')[0]

        # 位置信息
        positionInfo = house.xpath('.//div[@class="positionInfo"]/text()')[0] + \
                       house.xpath('.//div[@class="positionInfo"]/a/text()')[0]

        # 总价
        # /html/body/div[4]/div[1]/ul/li[1]/div[1]/div[6]/div[1]/span
        totalPrice = house.xpath('.//div[@class="totalPrice"]/span/text()')[0] + '万'
        # 平方价
        unitPrice = house.xpath('.//div[@class="unitPrice"]/span/text()')[0]
        # print(title, houseurl, houseInfo, positionInfo, totalPrice, unitPrice)

        with rlock:
            print(areaName)
            with open("./hz/" + areaName + '.txt', 'a+', encoding='utf-8', errors='ignore') as f:
                f.write(str((title, houseInfo, houseurl, positionInfo, totalPrice, unitPrice)) + '\n')
                f.flush()

def getPageNum(areaName, url):
    '''
    获取当前页面
    '''
    response = requests.get(url, headers=headers).text
    mytree = lxml.etree.HTML(response)
    pageNum = mytree.xpath('//div[@class="page-box house-lst-page-box"]/@page-data')[0]
    pageNum = json.loads(pageNum)  # json数据
    pageNum = pageNum['totalPage']

    geventList = []
    for i in range(1, int(pageNum) + 1):
        newurl = url + "pg%d/" % i
        g = gevent.spawn(gethouseInfo, areaName, newurl)
        geventList.append(g)
    gevent.joinall(geventList)

if __name__ == '__main__':
    starUrl = "https://hz.lianjia.com/ershoufang/"
    areaDict = getArea(starUrl)
    time.clock()
    print(areaDict)
    # 多线程
    threadList = []
    for areaName, url in areaDict.items():
        t = threading.Thread(target=getPageNum, args=(areaName, url))
        # 开启
        threadList.append(t)
        t.start()

    # 保证线程都结束
    for i in threadList:
        i.join()

    print(time.clock())

5 多进程加协程


import gevent
from gevent import monkey
gevent.monkey.patch_all()
import json

import lxml
from lxml import etree
import requests
import multiprocessing
import time

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36"}

  # 非阻塞IO
def getArea(url):
    '''
    获取区域名和链接
    :param url: 种子
    :return:
    '''
    response = requests.get(url, headers=headers).text
    mytree = lxml.etree.HTML(response)

    areaList = mytree.xpath('//div[@data-role="ershoufang"]/div[1]/a')
    # 存储地址和链接
    areaDict = {}
    for area in areaList:
        # 区名
        areaName = area.xpath('./text()')[0]
        # url
        areaurl = "https://hz.lianjia.com" + area.xpath('./@href')[0]
        print(areaName, areaurl)
        areaDict[areaName] = areaurl
    return areaDict

def gethouseInfo(areaName, url):
    '''
    获取房子信息
    :param areaname: 地区名
    :param url: 区域的url
    :return:
    '''
    response = requests.get(url, headers=headers).text
    mytree = lxml.etree.HTML(response)

    sellList = mytree.xpath("//ul[@class='sellListContent']/li[@class=\"clear\"]")
    for house in sellList:
        # 概述
        title = house.xpath('.//div[@class="title"]/a/text()')[0]
        # url
        houseurl = house.xpath('.//div[@class="title"]/a/@href')[0]
        # 房子信息
        houseInfo = house.xpath('.//div[@class="houseInfo"]/a/text()')[0] + \
                    house.xpath('.//div[@class="houseInfo"]/text()')[0]
        # 位置信息
        positionInfo = house.xpath('.//div[@class="positionInfo"]/text()')[0] + \
                       house.xpath('.//div[@class="positionInfo"]/a/text()')[0]
        # 总价
        totalPrice = house.xpath('.//div[@class="totalPrice"]/span/text()')[0] + '万'
        # 平方价
        unitPrice = house.xpath('.//div[@class="unitPrice"]/span/text()')[0]
        print(areaName)
        with open("./hz/" + areaName + '.txt', 'a+', encoding='utf-8', errors='ignore') as f:
            f.write(str((title, houseInfo, houseurl, positionInfo, totalPrice, unitPrice)) + '\n')
            f.flush()


def getPageNum(areaName, url):

    response = requests.get(url, headers=headers).text
    mytree = lxml.etree.HTML(response)
    pageNum = mytree.xpath('//div[@class="page-box house-lst-page-box"]/@page-data')[0]
    pageNum = json.loads(pageNum)  # json数据
    pageNum = pageNum['totalPage']

    geventList = []
    for i in range(1, int(pageNum) + 1):
        newurl = url + "pg%d/" % i
        g = gevent.spawn(gethouseInfo, areaName, newurl)
        geventList.append(g)
    gevent.joinall(geventList)

if __name__ == '__main__':
    starUrl = "https://hz.lianjia.com/ershoufang/"
    areaDict = getArea(starUrl)
    time.clock()
    print(areaDict)
    # 多线程
    processList = []
    for areaName, url in areaDict.items():
        # 开启多进程
        p = multiprocessing.Process(target=getPageNum,args=(areaName, url))
        processList.append(p)
        p.start()

    # 保证进程都结束
    for i in processList:
        i.join()
    print(time.clock())
相关文章
|
3月前
|
数据采集 API 数据处理
Objective-C 音频爬虫:实时接收数据的 didReceiveData: 方法
Objective-C 音频爬虫:实时接收数据的 didReceiveData: 方法
|
3月前
|
消息中间件 并行计算 安全
进程、线程、协程
【10月更文挑战第16天】进程、线程和协程是计算机程序执行的三种基本形式。进程是操作系统资源分配和调度的基本单位,具有独立的内存空间,稳定性高但资源消耗大。线程是进程内的执行单元,共享内存,轻量级且并发性好,但同步复杂。协程是用户态的轻量级调度单位,适用于高并发和IO密集型任务,资源消耗最小,但不支持多核并行。
63 1
|
7天前
|
消息中间件 调度
如何区分进程、线程和协程?看这篇就够了!
本课程主要探讨操作系统中的进程、线程和协程的区别。进程是资源分配的基本单位,具有独立性和隔离性;线程是CPU调度的基本单位,轻量且共享资源,适合并发执行;协程更轻量,由程序自身调度,适合I/O密集型任务。通过学习这些概念,可以更好地理解和应用它们,以实现最优的性能和资源利用。
40 11
|
2月前
|
数据采集 JSON JavaScript
如何通过PHP爬虫模拟表单提交,抓取隐藏数据
本文介绍了如何使用PHP模拟表单提交并结合代理IP技术抓取京东商品的实时名称和价格,特别是在电商大促期间的数据采集需求。通过cURL发送POST请求,设置User-Agent和Cookie,使用代理IP绕过限制,解析返回数据,展示了完整代码示例。
如何通过PHP爬虫模拟表单提交,抓取隐藏数据
|
25天前
|
缓存 安全 Java
【JavaEE】——单例模式引起的多线程安全问题:“饿汉/懒汉”模式,及解决思路和方法(面试高频)
单例模式下,“饿汉模式”,“懒汉模式”,单例模式下引起的线程安全问题,解锁思路和解决方法
|
25天前
|
Java 程序员 调度
【JavaEE】线程创建和终止,Thread类方法,变量捕获(7000字长文)
创建线程的五种方式,Thread常见方法(守护进程.setDaemon() ,isAlive),start和run方法的区别,如何提前终止一个线程,标志位,isinterrupted,变量捕获
|
2月前
|
数据采集 JavaScript 网络安全
为什么PHP爬虫抓取失败?解析cURL常见错误原因
豆瓣电影评分是电影市场的重要参考,通过网络爬虫技术可以高效采集评分数据,帮助电影制作和发行方优化策略。本文介绍使用PHP cURL库和代理IP技术抓取豆瓣电影评分的方法,解决反爬机制、网络设置和数据解析等问题,提供详细代码示例和优化建议。
为什么PHP爬虫抓取失败?解析cURL常见错误原因
|
2月前
|
数据采集 前端开发 JavaScript
除了网页标题,还能用爬虫抓取哪些信息?
爬虫技术可以抓取网页上的各种信息,包括文本、图片、视频、链接、结构化数据、用户信息、价格和库存、导航菜单、CSS和JavaScript、元数据、社交媒体信息、地图和位置信息、广告信息、日历和事件信息、评论和评分、API数据等。通过Python和BeautifulSoup等工具,可以轻松实现数据抓取。但在使用爬虫时,需遵守相关法律法规,尊重网站的版权和隐私政策,合理控制请求频率,确保数据的合法性和有效性。
|
3月前
|
数据采集 Python
python爬虫抓取91处理网
本人是个爬虫小萌新,看了网上教程学着做爬虫爬取91处理网www.91chuli.com,如果有什么问题请大佬们反馈,谢谢。
44 4
|
3月前
|
Java 开发者
在Java多线程编程中,创建线程的方法有两种:继承Thread类和实现Runnable接口
【10月更文挑战第20天】在Java多线程编程中,创建线程的方法有两种:继承Thread类和实现Runnable接口。本文揭示了这两种方式的微妙差异和潜在陷阱,帮助你更好地理解和选择适合项目需求的线程创建方式。
51 3

热门文章

最新文章

相关实验场景

更多