美文网首页
第二周周作业

第二周周作业

作者: 采矿 | 来源:发表于2016-05-29 17:17 被阅读76次
详情页信息 详情页链接信息 详情页数据库信息
#抓取频道链接
from bs4 import BeautifulSoup
import requests
start_url = 'http://bj.ganji.com/wu/'


def get_channel_links(url):
    web_data = requests.get(url)
    soup = BeautifulSoup(web_data.text, 'lxml')
    links = soup.select('div.content dt a')
    for link in links:
        channel_links = 'http://bj.ganji.com' + link.get('href')
        print(channel_links)

get_channel_links(start_url)
All_channnel_links = '''
    http://bj.ganji.com/jiaju/
    http://bj.ganji.com/rirongbaihuo/
    http://bj.ganji.com/shouji/
    http://bj.ganji.com/bangong/
    http://bj.ganji.com/nongyongpin/
    http://bj.ganji.com/jiadian/
    http://bj.ganji.com/ershoubijibendiannao/
    http://bj.ganji.com/ruanjiantushu/
    http://bj.ganji.com/yingyouyunfu/
    http://bj.ganji.com/diannao/
    http://bj.ganji.com/xianzhilipin/
    http://bj.ganji.com/fushixiaobaxuemao/
    http://bj.ganji.com/meironghuazhuang/
    http://bj.ganji.com/shuma/
    http://bj.ganji.com/laonianyongpin/
    http://bj.ganji.com/xuniwupin/
    http://bj.ganji.com/qitawupin/
    http://bj.ganji.com/ershoufree/
    http://bj.ganji.com/wupinjiaohuan/
#爬取详情页信息
from bs4 import BeautifulSoup
import requests
import random
import time
import pymongo
client =pymongo.MongoClient('localhost', 27017)
ganjidb = client['ganjidb']
detail_urls = ganjidb['detail_urls']
detail_info = ganjidb['detail_info']
# http://cn-proxy.com/,获得免费iP的网站
proxy_list = [
    'http://101.96.11.45 :8090',
    'http://115.28.31.219:8888',
    'http://27.115.75.114:8080',
]
# 随机获取代理ip
proxy_ip = random.choice(proxy_list)
proxies = {'http': proxy_ip}
#随机UA
userAgent = random.choice(['Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.23 Mobile Safari/537.36',
                           'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.86 Safari/537.36',
                           'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.23 Mobile Safari/537.36',
                           'Mozilla/5.0 (Linux; Android 5.1.1; Nexus 6 Build/LYZ28E) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.23 Mobile Safari/537.36',
                           'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1',
                           'Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1'])


#爬取详情页的链接
def getdetail_links(channel_links, pages, who_sales='o'):
    headers = {
        'User-Agent': userAgent,
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, sdch',
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive'
    }
    info_links = '{}{}{}/'.format(channel_links, who_sales, str(pages))
    web_data = requests.get(info_links, headers=headers)
    i = random.randrange(1, 3)
    time.sleep(i)
    web_data.encoding = 'utf-8'
    if web_data.status_code == 200:
        try:
            soup =BeautifulSoup(web_data.text, 'lxml')
            links = soup.select('dd.feature > div >  ul > li')
            for link in links:
                detail_url = channel_links + str(link.get('data-puid')) + 'x.htm'
                detail_urls.insert_one({'url': detail_url})
                get_detailinfo(detail_url)
        except UnicodeEncodeError:
            pass
    else:
        pass



#爬取详情页上的信息
def get_detailinfo(detailpg_url):
    headers = {
        'User-Agent': userAgent,
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, sdch',
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive'
    }
    web_data = requests.get(detailpg_url, headers=headers)
    i = random.randrange(1, 3)
    time.sleep(i)
    web_data.encoding = 'utf-8'
    if web_data.status_code == 200:
        try:
            soup = BeautifulSoup(web_data.text, 'lxml')
            title = soup.select('h1.title-name')[0].text
            price = soup.select('.f22.fc-orange.f-type')[0].text
            # 注意split()函数里的冒号是中文字符下的
            degree= soup.select('ul.second-det-infor.clearfix > li ')[0].text.split(':')[-1].strip() if soup.find('ul', 'second-det-infor') and soup.select('ul.second-det-infor.clearfix > li')[0].text.split(':')[0].strip() == '新旧程度' else None
            cate = soup.select('ul.det-infor > li:nth-of-type(1) > span')[0].text
            area = []
            areas = soup.select('div.content.clearfix > div.leftBox > div:nth-of-type(3) > div > ul > li:nth-of-type(3) > a')
            for i in areas:
                if i.text != '-':
                    area.append(i.text)
                else:
                    return
            date = soup.select('i.pr-5')[0].text.strip().split(' ' and '\xa0发布')[0] if soup.find('i', 'pr-5') else None
            data = {
                '标题': title,
                '价格': price,
                '日期': date,
                '新旧程度': degree,
                '分类': cate,
                '地区': area,
                'url': detailpg_url
            }
            detail_info.insert_one(data)
            print(data)
        except UnicodeEncodeError:
            pass
    else:
        pass
#主函数
#采用多线程
from multiprocessing import Pool
from Gchannel_extract import All_channnel_links
from Gpage_parsing import get_detailinfo, getdetail_links, detail_info, detail_urls


def get_all_links(channel):
    for i in range(1, 100):
        getdetail_links(channel, i)
if __name__ == '__main__':
    pool = Pool()
    pool.map(get_all_links, All_channnel_links.split())
    #断点续传功能
    db_urls = [item['url'] for item in detail_urls.find()]
    index_urls = [item['url'] for item in detail_info.find()]
    x = set(db_urls)
    y = set(index_urls)
    rest_urls = x - y
    pool.map(get_detailinfo, rest_urls)
#监控程序
import time
from Gpage_parsing import detail_urls,detail_info


while True:
    print('爬取详情页链接数量', detail_urls.find().count())
    print('爬取详情页数量', detail_info.find().count())
    print('\n')
    time.sleep(6)

相关文章

网友评论

      本文标题:第二周周作业

      本文链接:https://www.haomeiwen.com/subject/wcjqdttx.html