DDR爱好者之家 Design By 杰米
本文实例为大家分享了Python抓取天猫商品详细信息及交易记录的具体代码,供大家参考,具体内容如下
一、搭建Python环境
本帖使用的是Python 2.7
涉及到的模块:spynner, scrapy, bs4, pymmssql
二、要获取的天猫数据
三、数据抓取流程
四、源代码
#coding:utf-8 import spynner from scrapy.selector import Selector from bs4 import BeautifulSoup import random import pymssql #------------------------接数据库-----------------------------# server="localhost" user="sa" password = "123456" conn=pymssql.connect(server,user,password,"TmallData") if conn: print "DataBase connecting successfully!" else: print "DataBase connecting error!" cursor=conn.cursor() #----------------------定义网页操作函数--------------------------# def py_click_element(browser,pos): #点击网页中的元素 #pos example:'a[href="#description" rel="external nofollow" rel="external nofollow" ]' browser.click(pos) browser.wait(random.randint(3,10)) return browser def py_click_xpath(browser,xpath): xpath=xpath+'/@href' inner_href=Selector(text=browser.html).xpath(xpath).extract() pos='a[href="'+str(inner_href[0])+'" rel="external nofollow" ]' browser=py_click_element(browser, pos) return browser def py_webpage_load(browser,url): browser.load(url,load_timeout=60) browser.wait(10) return browser def py_check_element(browser,xpath): #按照xpath查找元素,如果存在则返回True,否则返回False if Selector(text=browser.html).xpath(xpath).extract()!=[]: return True else: return False def py_extract_xpath(browser,xpath): if py_check_element(browser, xpath): return Selector(text=browser.html).xpath(xpath).extract()[0] else: return "none" def py_extract_xpaths(browser,xpaths): #批量提取网页内容 length=len(xpaths) results=[0]*length for i in range(length): results[i]=py_extract_xpath(browser, xpaths[i]) return results #-----------------------------数据库操作函数---------------------------# #-----------------------------数据提取函数----------------------------# def py_getDealReord(doc): soup=BeautifulSoup(doc,'lxml') tr=soup.find_all('tr') total_dealRecord=[([0]*5)for i in range(len(tr))] i=-1 for this_tr in tr: i=i+1 td_user=this_tr.find_all('td',attrs={'class':"cell-align-l buyer"}) for this_td in td_user: total_dealRecord[i][0]=this_td.getText().strip(' ') #print username td_style=this_tr.find_all('td',attrs={'class':"cell-align-l style"}) for this_td in td_style: total_dealRecord[i][1]=this_td.getText(',').strip(' ') #print style td_quantity=this_tr.find_all('td',attrs={'class':"quantity"}) for this_td in td_quantity: total_dealRecord[i][2]=this_td.getText().strip(' ') #print quantity td_dealtime=this_tr.find_all('td',attrs={'class':"dealtime"}) for this_td in td_dealtime: total_dealRecord[i][3]=this_td.find('p',attrs={'class':"date"}).getText() total_dealRecord[i][4]=this_td.find('p',attrs={'class':"time"}).getText() return total_dealRecord #--------------------获取要抓取的所有商品链接-----------------------# cursor.execute(""" select * from ProductURLs where BrandName='NB' """) file=open("H:\\Eclipse\\TmallCrawling\\HTMLParse\\errLog.txt") InProductInfo=cursor.fetchall() browser=spynner.Browser() for temp_InProductInfo in InProductInfo: url='https:'+temp_InProductInfo[2] BrandName=temp_InProductInfo[0] ProductType=temp_InProductInfo[1] print BrandName,'\t',ProductType,'\t',url #url= 'https://detail.tmall.com/item.htm"Loading webpage failed." file.write(url) file.write('\n') continue xpaths=['//*[@id="J_PromoPrice"]/dd/div/span/text()', '//*[@id="J_StrPriceModBox"]/dd/span/text()', '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/div[1]/h1/text()', '//*[@id="J_PostageToggleCont"]/p/span/text()', '//*[@id="J_EmStock"]/text()', '//*[@id="J_CollectCount"]/text()', '//*[@id="J_ItemRates"]/div/span[2]/text()', '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/ul/li[1]/div/span[2]/text()'] out_ProductInfo=py_extract_xpaths(browser,xpaths) browser=py_click_element(browser,'a[href="#description" rel="external nofollow" rel="external nofollow" ]') ProductProperty=py_extract_xpath(browser, '//*[@id="J_AttrUL"]') soup=BeautifulSoup(ProductProperty,'lxml') li=soup.find_all('li') prop='' for this_li in li: prop=prop+this_li.getText()+'\\' prop=prop[0:len(prop)-1] out_ProductProperty=prop print out_ProductProperty cursor.execute(""" Insert into py_ProductInfo values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) """,(BrandName,ProductType,url, out_ProductInfo[2],out_ProductInfo[1], out_ProductInfo[0],out_ProductInfo[7], out_ProductInfo[1],out_ProductInfo[3], out_ProductInfo[4],out_ProductInfo[5], out_ProductProperty)) conn.commit() Deal_PageCount=0 browser=py_click_element(browser, 'a[href="#J_DealRecord" rel="external nofollow" ]') #browser.browse(True) DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody') out_DealRecord=py_getDealReord(DealRecord) for temp_DealRecord in out_DealRecord: if str(temp_DealRecord[4])=='0': continue cursor.execute(""" Insert into DealRecord values(%s,%s,%s,%s,%s,%s) """,(url,temp_DealRecord[0],temp_DealRecord[1], temp_DealRecord[2],temp_DealRecord[3], temp_DealRecord[4])) conn.commit() Deal_PageCount=Deal_PageCount+1 print "Page ",Deal_PageCount for i in range(6): if (i==0) or (i==2): continue xpath='//*[@id="J_showBuyerList"]/div/div/a['+str(i)+']' if py_check_element(browser,xpath): browser=py_click_xpath(browser, xpath) DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody') out_DealRecord=py_getDealReord(DealRecord) for temp_DealRecord in out_DealRecord: if str(temp_DealRecord[4])=='0': continue cursor.execute(""" Insert into DealRecord values(%s,%s,%s,%s,%s,%s) """,(url,temp_DealRecord[0],temp_DealRecord[1], temp_DealRecord[2],temp_DealRecord[3], temp_DealRecord[4])) conn.commit() Deal_PageCount=Deal_PageCount+1 print "Page ",Deal_PageCount while py_check_element(browser, '//*[@id="J_showBuyerList"]/div/div/a[6]'): browser=py_click_xpath(browser, '//*[@id="J_showBuyerList"]/div/div/a[6]') DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody') out_DealRecord=py_getDealReord(DealRecord) for temp_DealRecord in out_DealRecord: if str(temp_DealRecord[4])=='0': continue cursor.execute(""" Insert into DealRecord values(%s,%s,%s,%s,%s,%s) """,(url,temp_DealRecord[0],temp_DealRecord[1], temp_DealRecord[2],temp_DealRecord[3], temp_DealRecord[4])) conn.commit() Deal_PageCount=Deal_PageCount+1 print "Page ",Deal_PageCount
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持。
DDR爱好者之家 Design By 杰米
广告合作:本站广告合作请联系QQ:858582 申请时备注:广告合作(否则不回)
免责声明:本站资源来自互联网收集,仅供用于学习和交流,请遵循相关法律法规,本站一切资源不代表本站立场,如有侵权、后门、不妥请联系本站删除!
免责声明:本站资源来自互联网收集,仅供用于学习和交流,请遵循相关法律法规,本站一切资源不代表本站立场,如有侵权、后门、不妥请联系本站删除!
DDR爱好者之家 Design By 杰米
暂无评论...
P70系列延期,华为新旗舰将在下月发布
3月20日消息,近期博主@数码闲聊站 透露,原定三月份发布的华为新旗舰P70系列延期发布,预计4月份上市。
而博主@定焦数码 爆料,华为的P70系列在定位上已经超过了Mate60,成为了重要的旗舰系列之一。它肩负着重返影像领域顶尖的使命。那么这次P70会带来哪些令人惊艳的创新呢?
根据目前爆料的消息来看,华为P70系列将推出三个版本,其中P70和P70 Pro采用了三角形的摄像头模组设计,而P70 Art则采用了与上一代P60 Art相似的不规则形状设计。这样的外观是否好看见仁见智,但辨识度绝对拉满。
更新日志
2024年11月28日
2024年11月28日
- 凤飞飞《我们的主题曲》飞跃制作[正版原抓WAV+CUE]
- 刘嘉亮《亮情歌2》[WAV+CUE][1G]
- 红馆40·谭咏麟《歌者恋歌浓情30年演唱会》3CD[低速原抓WAV+CUE][1.8G]
- 刘纬武《睡眠宝宝竖琴童谣 吉卜力工作室 白噪音安抚》[320K/MP3][193.25MB]
- 【轻音乐】曼托凡尼乐团《精选辑》2CD.1998[FLAC+CUE整轨]
- 邝美云《心中有爱》1989年香港DMIJP版1MTO东芝首版[WAV+CUE]
- 群星《情叹-发烧女声DSD》天籁女声发烧碟[WAV+CUE]
- 刘纬武《睡眠宝宝竖琴童谣 吉卜力工作室 白噪音安抚》[FLAC/分轨][748.03MB]
- 理想混蛋《Origin Sessions》[320K/MP3][37.47MB]
- 公馆青少年《我其实一点都不酷》[320K/MP3][78.78MB]
- 群星《情叹-发烧男声DSD》最值得珍藏的完美男声[WAV+CUE]
- 群星《国韵飘香·贵妃醉酒HQCD黑胶王》2CD[WAV]
- 卫兰《DAUGHTER》【低速原抓WAV+CUE】
- 公馆青少年《我其实一点都不酷》[FLAC/分轨][398.22MB]
- ZWEI《迟暮的花 (Explicit)》[320K/MP3][57.16MB]