123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139 |
- #/usr/bin/env python
- #coding=utf-8
- import random
- import sys
- import time
- from selenium import webdriver
- from urllib import quote
- from fty_util.common import Mysql
- reload(sys)
- sys.setdefaultencoding('utf8')
- def scrapy_url():
- conn = Mysql.createOfflineConn()
- sql = """
- select id, tv_name from scrapy.wangju_url where url_sohu is null order by id asc
- """
- rows = Mysql.getAll(sql, conn=conn)
- driver = webdriver.Firefox()
- driver.set_page_load_timeout(10)
- for row in rows:
- _id = row['id']
- tv_name = row['tv_name']
- url = 'http://so.tv.sohu.com/mts?box=1&wd=' + quote(str(tv_name))
- need_blank = True
- try:
- driver.get(url)
- except Exception, e:
- driver.execute_script('window.stop()')
- divs = driver.find_elements_by_xpath('//div[@class="wrap cfix"]/div')
- href_list = []
- for div in divs:
- try:
- href = div.find_element_by_xpath('./div/div[2]/div[1]/h2/a').get_attribute('href')
- href_list.append(href)
- except Exception, e:
- pass
- if len(href_list) > 0:
- sql = """
- update scrapy.wangju_url set url_sohu = '%s' where id = %s
- """
- sql = sql % (','.join(href_list), _id)
- Mysql.execute(sql, conn=conn)
- need_blank = False
- if need_blank:
- sql = """
- update scrapy.wangju_url set url_sohu = '%s' where id = %s
- """
- sql = sql % ('', _id)
- Mysql.execute(sql, conn=conn)
- driver.quit()
- def scrapy_data():
- conn = Mysql.createOfflineConn()
- sql = """
- select id, tv_name, url_sohu from scrapy.wangju_url where url_sohu is not null and url_sohu != '' order by id asc
- """
- rows = Mysql.getAll(sql, conn=conn)
- driver = webdriver.Firefox()
- driver.set_page_load_timeout(10)
- for row in rows:
- _id = row['id']
- tv_name = row['tv_name']
- url_sohu = row['url_sohu']
- urls = url_sohu.split(',')
- for url in urls:
- try:
- driver.get(url)
- except Exception, e:
- driver.execute_script('window.stop()')
-
- try:
- title = ''
- content = driver.find_element_by_xpath('//div[@class="infoR r"]').get_attribute('textContent')
- except Exception, e:
- try:
- title = driver.find_element_by_xpath('//div[@class="drama-name area rel cfix "]').get_attribute('textContent')
- content = driver.find_element_by_xpath('//div[@class="drama-infoR"]').get_attribute('textContent')
- except Exception, e:
- continue
-
- sql = """
- insert into scrapy.wangju_all_url (id, tv_name, url, title, content, source) values (%s, %s, %s, %s, %s, %s)
- """
- value = (_id, tv_name, url, title, content, 'sohu')
- Mysql.insertOne(sql, value=value, conn=conn)
- driver.quit()
- def parse_wangju_all_url_data():
- conn = Mysql.createOfflineConn()
- sql = """
- select id, tv_name, url, content from scrapy.wangju_all_url where source = 'sohu' order by id asc
- """
- rows = Mysql.getAll(sql, conn=conn)
- for row in rows:
- _id = row['id']
- tv_name = row['tv_name']
- url = row['url']
- content = row['content']
-
- import re
- m = re.search(ur'评分:\d+(.)\d+', content)
- score = '0'
- if m is not None:
- score = m.group(0)
-
- play = '0'
- m = re.search(ur'总播放:\d+(.)\d+[(亿)(万)]', content)
- if m is not None:
- play = m.group(0)
-
- sql = """
- update scrapy.wangju_all_url set score = '%s', playtimes = '%s' where url = '%s' and source = 'sohu'
- """
- sql = sql % (score, play, url)
- Mysql.execute(sql, conn=conn)
- if __name__ == '__main__':
- # scrapy_data()
- # scrapy_url()
- parse_wangju_all_url_data()
|