DDR爱好者之家 Design By 杰米
                                我们首先来看下实例代码:
from time import sleep
import faker
import requests
from lxml import etree
fake = faker.Faker()
base_url = "http://angelimg.spbeen.com"
def get_next_link(url):
  content = downloadHtml(url)
  html = etree.HTML(content)
  next_url = html.xpath("//a[@class='ch next']/@href")
  if next_url:
    return base_url + next_url[0]
  else:
    return False
def downloadHtml(ur):
  user_agent = fake.user_agent()
  headers = {'User-Agent': user_agent,"Referer":"http://angelimg.spbeen.com/"}
  response = requests.get(url, headers=headers)
  return response.text
def getImgUrl(content):
  html = etree.HTML(content)
  img_url = html.xpath('//*[@id="content"]/a/img/@src')
  title = html.xpath(".//div['@class=article']/h2/text()")
  return img_url[0],title[0]
def saveImg(title,img_url):
  if img_url is not None and title is not None:
    with open("txt/"+str(title)+".jpg",'wb') as f:
      user_agent = fake.user_agent()
      headers = {'User-Agent': user_agent,"Referer":"http://angelimg.spbeen.com/"}
      content = requests.get(img_url, headers=headers)
      #request_view(content)
      f.write(content.content)
      f.close()
def request_view(response):
  import webbrowser
  request_url = response.url
  base_url = '<head><base href="%s" rel="external nofollow" >' %(request_url)
  base_url = base_url.encode()
  content = response.content.replace(b"<head>",base_url)
  tem_html = open('tmp.html','wb')
  tem_html.write(content)
  tem_html.close()
  webbrowser.open_new_tab('tmp.html')
def crawl_img(url):
  content = downloadHtml(url)
  res = getImgUrl(content)
  title = res[1]
  img_url = res[0]
  saveImg(title,img_url)
if __name__ == "__main__":
  url = "http://angelimg.spbeen.com/ang/4968/1"
  while url:
    print(url)
    crawl_img(url)
    url = get_next_link(url)
python 爬虫如何执行自动下一页循环加载文字
from bs4 import BeautifulSoup
import requests
import time
from lxml import etree
import os
# 该demo执行的为如何利用bs去爬一些文字
def start():
  # 发起网络请求
  html=requests.get('http://www.baidu.com')
  #编码
  html.encoding=html.apparent_encoding
  #创建sp
  soup=BeautifulSoup(html.text,'html.parser')
  print(type(soup))
  print('打印元素')
  print(soup.prettify())
  #存储一下title 该方法没有提示直接展示
  title=soup.head.title.string
  print(title)
#   写入文本
  with open(r'C:/Users/a/Desktop/a.txt','w') as f:
    f.write(title)
  print(time.localtime())
 
url_2 = 'http://news.gdzjdaily.com.cn/zjxw/politics/sz_4.shtml'
def get_html_from_bs4(url):
 
  # response = requests.get(url,headers=data,proxies=ip).content.decode('utf-8')
  response = requests.get(url).content.decode('utf-8')
  soup = BeautifulSoup(response, 'html.parser')
  next_page = soup.select('#displaypagenum a:nth-of-type(9)')[0].get('href')
  # for i in nett
  print(next_page)
  next2='http://news.gdzjdaily.com.cn/zjxw/politics/'+next_page
 
 
def get_html_from_etree(url):
 
  response = requests.get(url).content.decode('utf-8')
  html= etree.HTML(response)
 
  next_page = html.xpath('.//a[@class="PageNum"][8]/@href')[0]
  print(next_page)
  # next2='http://news.gdzjdaily.com.cn/zjxw/politics/'+next_page
 
 
get_html_from_etree(url_2)
 
if __name__ == '__main__':
  start()
DDR爱好者之家 Design By 杰米
                            
                                广告合作:本站广告合作请联系QQ:858582 申请时备注:广告合作(否则不回)
免责声明:本站资源来自互联网收集,仅供用于学习和交流,请遵循相关法律法规,本站一切资源不代表本站立场,如有侵权、后门、不妥请联系本站删除!
                        免责声明:本站资源来自互联网收集,仅供用于学习和交流,请遵循相关法律法规,本站一切资源不代表本站立场,如有侵权、后门、不妥请联系本站删除!
DDR爱好者之家 Design By 杰米
                        暂无评论...
                                    更新日志
2025年10月31日
                                2025年10月31日
                    - 小骆驼-《草原狼2(蓝光CD)》[原抓WAV+CUE]
- 群星《欢迎来到我身边 电影原声专辑》[320K/MP3][105.02MB]
- 群星《欢迎来到我身边 电影原声专辑》[FLAC/分轨][480.9MB]
- 雷婷《梦里蓝天HQⅡ》 2023头版限量编号低速原抓[WAV+CUE][463M]
- 群星《2024好听新歌42》AI调整音效【WAV分轨】
- 王思雨-《思念陪着鸿雁飞》WAV
- 王思雨《喜马拉雅HQ》头版限量编号[WAV+CUE]
- 李健《无时无刻》[WAV+CUE][590M]
- 陈奕迅《酝酿》[WAV分轨][502M]
- 卓依婷《化蝶》2CD[WAV+CUE][1.1G]
- 群星《吉他王(黑胶CD)》[WAV+CUE]
- 齐秦《穿乐(穿越)》[WAV+CUE]
- 发烧珍品《数位CD音响测试-动向效果(九)》【WAV+CUE】
- 邝美云《邝美云精装歌集》[DSF][1.6G]
- 吕方《爱一回伤一回》[WAV+CUE][454M]
 
                     
                    