DDR爱好者之家 Design By 杰米

用python实现的抓取腾讯视频所有电影的爬虫

# -*- coding: utf-8 -*-
import re
import urllib2
from bs4import BeautifulSoup
import string, time
import pymongo
NUM   =0 #全局变量,电影数量
m_type = u'' #全局变量,电影类型
m_site = u'qq' #全局变量,电影网站
#根据指定的URL获取网页内容
def gethtml(url):
  req = urllib2.Request(url)
  response = urllib2.urlopen(req)
  html = response.read()
  return html
#从电影分类列表页面获取电影分类
def gettags(html):
  global m_type
  soup = BeautifulSoup(html)   #过滤出分类内容
  #print soup
  #<ulclass="clearfix _group" gname="mi_type" gtype="1">
  tags_all = soup.find_all('ul', {'class' :'clearfix _group' ,'gname' :'mi_type'})
  #print len(tags_all), tags_all
  #print str(tags_all[1]).replace('\n','')
  #<a _hot="tag.sub" class="_gtag _hotkey" href="http://v.qq.com/list/1_0_-1_-1_1_0_0_20_0_-1_0.html" title="动作" tvalue="0">动作</a>
  re_tags = r'<a _hot=\"tag\.sub\" class=\"_gtag _hotkey\" href=\"(.+" title=\"(.+" tvalue=\"(.+">.+"Not Find"
  return tags_url
#获取每个分类的页数
def get_pages(tag_url):
  tag_html = gethtml(tag_url)
  #divclass="paginator
  soup = BeautifulSoup(tag_html)   #过滤出标记页面的html
  #print soup
  #<divclass="mod_pagenav" id="pager">
  div_page = soup.find_all('div', {'class' :'mod_pagenav','id' :'pager'})
  #print div_page #len(div_page), div_page[0]
  #<aclass="c_txt6" href="http://v.qq.com/list/1_2_-1_-1_1_0_24_20_0_-1_0.html" title="25"><span>25</span></a>
  re_pages = r'<a class=.+?><span>(.+"mod_list_pic_130">
  divs = soup.find_all('ul', {'class' :'mod_list_pic_130'})
  #print divs
  for div_htmlin divs:
    div_html = str(div_html).replace('\n','')
    #print div_html
    getmovie(div_html)
def getmovie(html):
  global NUM
  global m_type
  global m_site
  re_movie = r'<li><a class=\"mod_poster_130\" href=\"(.+" target=\"_blank\" title=\"(.+"><img.+"%s : %d" % ("=" *70, NUM)
      values = dict(
        movie_title = movie[1],
        movie_url  = movie[0],
        movie_site   = m_site,
        movie_type   = m_type
        )
      print values
      playlinks.insert(values)
      print"_" *70
      NUM +=1
      print"%s : %d" % ("=" *70, NUM)
  #else:
  #  print"Not Find"
def getmovieinfo(url):
  html = gethtml(url)
  soup = BeautifulSoup(html)
  #pack pack_album album_cover
  divs = soup.find_all('div', {'class' :'pack pack_album album_cover'})
  #print divs[0]
  #<a href="http://www.tudou.com/albumplay/9NyofXc_lHI/32JqhiKJykI.html" target="new" title="《血滴子》独家纪录片" wl="1"> </a>
  re_info = r'<a href=\"(.+" target=\"new\" title=\"(.+" wl=\".+"> </a>'
  p_info = re.compile(re_info, re.DOTALL)
  m_info = p_info.findall(str(divs[0]))
  if m_info:
    return m_info
  else:
    print"Not find movie info"
  return m_info
def insertdb(movieinfo):
  global conn
  movie_db = conn.dianying_at
  movies = movie_db.movies
  movies.insert(movieinfo)
if __name__ =="__main__":
  global conn
  tags_url ="http://v.qq.com/list/1_-1_-1_-1_1_0_0_20_0_-1_0.html"
  #print tags_url
  tags_html = gethtml(tags_url)
  #print tags_html
  tag_urls = gettags(tags_html)
  #print tag_urls
  for urlin tag_urls.items():
    print str(url[1]).encode('utf-8') #,url[0]
    maxpage =int(get_pages(str(url[1]).encode('utf-8')))
    print maxpage
    for xin range(0, maxpage):
      #http://v.qq.com/list/1_0_-1_-1_1_0_0_20_0_-1_0.html
      m_url = str(url[1]).replace('0_20_0_-1_0.html','')
      movie_url ="%s%d_20_0_-1_0.html" % (m_url, x)
      print movie_url
      movie_html = gethtml(movie_url.encode('utf-8'))
      #print movie_html
      getmovielist(movie_html)
      time.sleep(0.1)

总结

以上所述是小编给大家介绍的使用python实现抓取腾讯视频所有电影的爬虫,希望对大家有所帮助,如果大家有任何疑问欢迎给我留言,小编会及时回复大家的!

DDR爱好者之家 Design By 杰米
广告合作:本站广告合作请联系QQ:858582 申请时备注:广告合作(否则不回)
免责声明:本站资源来自互联网收集,仅供用于学习和交流,请遵循相关法律法规,本站一切资源不代表本站立场,如有侵权、后门、不妥请联系本站删除!
DDR爱好者之家 Design By 杰米

稳了!魔兽国服回归的3条重磅消息!官宣时间再确认!

昨天有一位朋友在大神群里分享,自己亚服账号被封号之后居然弹出了国服的封号信息对话框。

这里面让他访问的是一个国服的战网网址,com.cn和后面的zh都非常明白地表明这就是国服战网。

而他在复制这个网址并且进行登录之后,确实是网易的网址,也就是我们熟悉的停服之后国服发布的暴雪游戏产品运营到期开放退款的说明。这是一件比较奇怪的事情,因为以前都没有出现这样的情况,现在突然提示跳转到国服战网的网址,是不是说明了简体中文客户端已经开始进行更新了呢?