DDR爱好者之家 Design By 杰米

这篇文章主要介绍了爬虫代理池Python3WebSpider源代码测试过程解析,文中通过示例代码介绍的非常详细,对大家的学习或者工作具有一定的参考学习价值,需要的朋友可以参考下

元类属性的使用

代码

主要关于元类的使用

通过获取由元类生成的爬虫抓取类的部分属性.这里为抓取函数,以相同的字符开头的抓取函数,生成属性列表,这样可以持续调用.目的是可以仅仅添加不同的抓取函数抓取不同的网站,而类的其他部分不用做调整.

部分代码:

class ProxyMetaclass(type):
  def __new__(cls, name, bases, attrs):
    count = 0
    attrs['__CrawlFunc__'] = []
    for k, v in attrs.items():
      if 'crawl_' in k:
        attrs['__CrawlFunc__'].append(k)
        count += 1
    attrs['__CrawlFuncCount__'] = count
    return type.__new__(cls, name, bases, attrs)


class Crawler(object, metaclass=ProxyMetaclass):
  def get_proxies(self, callback):
    proxies = []
    for proxy in eval("self.{}()".format(callback)):
      print('成功获取到代理', proxy)
      proxies.append(proxy)
    return proxies
    
  def crawl_daili66(self, page_count=4):
    """
    获取代理66
    :param page_count: 页码
    :return: 代理
    """
    start_url = 'http://www.66ip.cn/{}.html'
    urls = [start_url.format(page) for page in range(1, page_count + 1)]
    for url in urls:
      print('Crawling', url)
      html = get_page(url)
      if html:
        doc = pq(html)
        trs = doc('.containerbox table tr:gt(0)').items()
        for tr in trs:
          ip = tr.find('td:nth-child(1)').text()
          port = tr.find('td:nth-child(2)').text()
          yield ':'.join([ip, port])

测试方法

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time  : 12/19/19 4:10 PM
# @Author : yon
# @Email  : @qq.com
# @File  : test


import json
import re
from pyquery import PyQuery as pq


class ProxyMetaclass(type):
  def __new__(cls, name, bases, attrs):
    count = 0
    attrs['__CrawlFunc__'] = []
    for k, v in attrs.items():
      print("打印k")
      print(k)
      print("打印v")
      print(v)
      if 'crawl_' in k:
        attrs['__CrawlFunc__'].append(k)
        count += 1
    attrs['__CrawlFuncCount__'] = count
    return type.__new__(cls, name, bases, attrs)


class Crawler(object, metaclass=ProxyMetaclass):
  def get_proxies(self, callback):
    proxies = []
    for proxy in eval("self.{}()".format(callback)):
      print('成功获取到代理', proxy)
      proxies.append(proxy)
    return proxies

  def crawl_daili66(self, page_count=4):
    """
    获取代理66
    :param page_count: 页码
    :return: 代理
    """
    start_url = 'http://www.66ip.cn/{}.html'
    urls = [start_url.format(page) for page in range(1, page_count + 1)]
    for url in urls:
      print('Crawling', url)
      html = get_page(url)
      if html:
        doc = pq(html)
        trs = doc('.containerbox table tr:gt(0)').items()
        for tr in trs:
          ip = tr.find('td:nth-child(1)').text()
          port = tr.find('td:nth-child(2)').text()
          yield ':'.join([ip, port])

  def crawl_ip3366(self):
    for page in range(1, 4):
      start_url = 'http://www.ip3366.net/free/"IP">(.*"PORT">(.*"port.*?>(\d+)</li>', re.S)
      re_ip_address = ip_address.findall(html)
      for address, port in re_ip_address:
        result = address + ':' + port
        yield result.replace(' ', '')


class Getter():
  def __init__(self):
    self.crawler = Crawler()

  def run(self):
    print('获取器开始执行')
    for callback_label in range(self.crawler.__CrawlFuncCount__):
      print(callback_label)
      callback = self.crawler.__CrawlFunc__[callback_label]
      print(callback)
      # # 获取代理
      # proxies = self.crawler.get_proxies(callback)
      # sys.stdout.flush()
      # for proxy in proxies:
      #   self.redis.add(proxy)
if __name__ == '__main__':
  get = Getter()
  get.run()

测试结果

/home/baixiaoxu/PycharmProjects/pytthon-tt/venv/bin/python /home/baixiaoxu/PycharmProjects/pytthon-tt/proxypool/test.py
打印k
__module__
打印v
__main__
打印k
__qualname__
打印v
Crawler
打印k
get_proxies
打印v
<function Crawler.get_proxies at 0x7f905ca5a598>
打印k
crawl_daili66
打印v
<function Crawler.crawl_daili66 at 0x7f905ca5a620>
打印k
crawl_ip3366
打印v
<function Crawler.crawl_ip3366 at 0x7f905ca5a840>
打印k
crawl_kuaidaili
打印v
<function Crawler.crawl_kuaidaili at 0x7f905ca5a730>
打印k
crawl_xicidaili
打印v
<function Crawler.crawl_xicidaili at 0x7f905ca5a7b8>
打印k
crawl_iphai
打印v
<function Crawler.crawl_iphai at 0x7f905ca5a6a8>
打印k
crawl_data5u
打印v
<function Crawler.crawl_data5u at 0x7f905ca5a8c8>
打印k
__CrawlFunc__
打印v
['crawl_daili66', 'crawl_ip3366', 'crawl_kuaidaili', 'crawl_xicidaili', 'crawl_iphai', 'crawl_data5u']
获取器开始执行
0
crawl_daili66
1
crawl_ip3366
2
crawl_kuaidaili
3
crawl_xicidaili
4
crawl_iphai
5
crawl_data5u

进程完成,退出码 0

以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持。

DDR爱好者之家 Design By 杰米
广告合作:本站广告合作请联系QQ:858582 申请时备注:广告合作(否则不回)
免责声明:本站资源来自互联网收集,仅供用于学习和交流,请遵循相关法律法规,本站一切资源不代表本站立场,如有侵权、后门、不妥请联系本站删除!
DDR爱好者之家 Design By 杰米