美文网首页
爬煎蛋网妹子图

爬煎蛋网妹子图

作者: 交易狗二哈 | 来源:发表于2017-03-24 20:41 被阅读191次

利用 BeautifulSoup + Requests 爬取 煎蛋网 妹子图

一、爬煎蛋网一页图片

import requests
from bs4 import BeautifulSoup
from urllib.request import urlretrieve
import time
import re


headers = {
    'Host': 'blog.csdn.net',
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
    'Accept-Encoding': 'gzip, deflate',
    'Referer': 'http://www.baidu.com',
    'Connection': 'keep-alive',
    'Cache-Control': 'max-age=0',
}


def Get_url(url):
    web_data = requests.get(url) #web_data = requests.get(url,headers=headers)
    time.sleep(4)
    soup = BeautifulSoup(web_data.text, 'lxml')
    image_location = soup.find_all("img", {"src": re.compile(".*?")})     #图片存放于src标签内
    return image_location

def Get_links(Links):                  #获取图片原始链接,存于列表中
    b = []
    for i in Links:
        b.append(i["src"])              #获得 src 内文本方法1
    return b

def Download_picture(alinks):
    link = []
    for i in alinks:
        link.append('http:' + i)      #图片链接中缺少开头http,新建个列表加上
    d = len(link)
    for i in range(d):
        print("file url :    {}".format(link[i]))
        urlretrieve(link[i], '{}.jpg'.format(i))
    print('Done')


if __name__ == '__main__':
    website = 'http://jandan.net/ooxx'
    links = Get_url(website)
    address = Get_links(links)
    Download_picture(address)

此爬虫只能爬取该页的,若要不断爬取,则不断改变url链接

二、爬煎蛋网所有妹子图

import requests
from bs4 import BeautifulSoup
from urllib.request import urlretrieve
import time
import re
import urllib

urls = ['http://jandan.net/ooxx/page-{}#comments'.format(i) for i in range(1, 5, 1)]
# 目前有2368页,range(1, 2369,1)  由于图片存在不同的页面,用列表生成式将所有的网页存入

global page             #定义全局变量记录页数
page = 1

headers = {                                     #请求头
    'Host': 'blog.csdn.net',
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
    'Accept-Encoding': 'gzip, deflate',
    'Referer': 'http://www.baidu.com',
    'Connection': 'keep-alive',
    'Cache-Control': 'max-age=0',
}


def Get_url(url):
    web_data = requests.get(url)
    #web_data = requests.get(url,headers=headers)   待完善
    time.sleep(4)                                   #防止频繁请求被封IP
    soup = BeautifulSoup(web_data.text, 'lxml')
    image_location = soup.find_all("img", {"src": re.compile(".*?")})  # 图片存放于src标签内
    return image_location


def Get_links(Links):  # 获取图片原始链接,存于列表中
    b = []
    for i in Links:
        b.append(i["src"])
    return b



def Download_picture(alinks):
    global page
    for i in range(len(alinks)):                    #该网站图片有的链接缺少http:开头
        if re.match('http:', alinks[i]) == None:    #检测是否不为http:开头,是的话加上
            alinks[i] = 'http:' + alinks[i]
        try:                                        #处理一些图片网址已失效的异常
            urlretrieve(alinks[i], '第{}页{}.jpg'.format(page, i))
        except urllib.error.URLError:
            print('Picture 第{}页{} website is Error!'.format(page, i))
        finally:
            print('Done')
    page += 1



if __name__ == '__main__':
    # global page
    for url in urls:
        links = Get_url(url)
        address = Get_links(links)
        #print(address)
        Download_picture(address)

相关文章

网友评论

      本文标题:爬煎蛋网妹子图

      本文链接:https://www.haomeiwen.com/subject/fkafottx.html