From 53608353a63a2d8f1c82b5c2f26b29d8481c0c26 Mon Sep 17 00:00:00 2001 From: injetlee Date: Sat, 28 Jul 2018 10:41:39 +0800 Subject: [PATCH] =?UTF-8?q?=E7=88=AC=E8=99=AB=E9=9B=86=E5=90=88?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- 爬虫集合/README.MD | 2 + 爬虫集合/meizitu.py | 77 +++++++++++++++++++++++++++++++++++ 爬虫集合/qiubai_crawer.py | 54 ++++++++++++++++++++++++ 3 files changed, 133 insertions(+) create mode 100644 爬虫集合/README.MD create mode 100644 爬虫集合/meizitu.py create mode 100644 爬虫集合/qiubai_crawer.py diff --git a/爬虫集合/README.MD b/爬虫集合/README.MD new file mode 100644 index 0000000..b73228f --- /dev/null +++ b/爬虫集合/README.MD @@ -0,0 +1,2 @@ +# 代码详细说明请看文章 + diff --git a/爬虫集合/meizitu.py b/爬虫集合/meizitu.py new file mode 100644 index 0000000..e26a83d --- /dev/null +++ b/爬虫集合/meizitu.py @@ -0,0 +1,77 @@ +import requests +import os +import time +import threading +from bs4 import BeautifulSoup + + +def download_page(url): + ''' + 用于下载页面 + ''' + headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"} + r = requests.get(url, headers=headers) + r.encoding = 'gb2312' + return r.text + + +def get_pic_list(html): + ''' + 获取每个页面的套图列表,之后循环调用get_pic函数获取图片 + ''' + soup = BeautifulSoup(html, 'html.parser') + pic_list = soup.find_all('li', class_='wp-item') + for i in pic_list: + a_tag = i.find('h3', class_='tit').find('a') + link = a_tag.get('href') + text = a_tag.get_text() + get_pic(link, text) + + +def get_pic(link, text): + ''' + 获取当前页面的图片,并保存 + ''' + html = download_page(link) # 下载界面 + soup = BeautifulSoup(html, 'html.parser') + pic_list = soup.find('div', id="picture").find_all('img') # 找到界面所有图片 + headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"} + create_dir('pic/{}'.format(text)) + for i in pic_list: + pic_link = i.get('src') # 拿到图片的具体 url + r = requests.get(pic_link, headers=headers) # 下载图片,之后保存到文件 + with open('pic/{}/{}'.format(text, link.split('/')[-1]), 'wb') as f: + f.write(r.content) + time.sleep(1) # 休息一下,不要给网站太大压力,避免被封 + + +def create_dir(name): + if not os.path.exists(name): + os.makedirs(name) + + +def execute(url): + page_html = download_page(url) + get_pic_list(page_html) + + +def main(): + create_dir('pic') + queue = [i for i in range(1, 72)] # 构造 url 链接 页码。 + threads = [] + while len(queue) > 0: + for thread in threads: + if not thread.is_alive(): + threads.remove(thread) + while len(threads) < 5 and len(queue) > 0: # 最大线程数设置为 5 + cur_page = queue.pop(0) + url = 'http://meizitu.com/a/more_{}.html'.format(cur_page) + thread = threading.Thread(target=execute, args=(url,)) + thread.setDaemon(True) + thread.start() + print('{}正在下载{}页'.format(threading.current_thread().name, cur_page)) + threads.append(thread) + + +if __name__ == '__main__': + main() diff --git a/爬虫集合/qiubai_crawer.py b/爬虫集合/qiubai_crawer.py new file mode 100644 index 0000000..e37e7e7 --- /dev/null +++ b/爬虫集合/qiubai_crawer.py @@ -0,0 +1,54 @@ +import requests +from bs4 import BeautifulSoup + + +def download_page(url): + headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"} + r = requests.get(url, headers=headers) + return r.text + + +def get_content(html, page): + output = """第{}页 作者:{} 性别:{} 年龄:{} 点赞:{} 评论:{}\n{}\n------------\n""" + soup = BeautifulSoup(html, 'html.parser') + con = soup.find(id='content-left') + con_list = con.find_all('div', class_="article") + for i in con_list: + author = i.find('h2').string # 获取作者名字 + content = i.find('div', class_='content').find('span').get_text() # 获取内容 + stats = i.find('div', class_='stats') + vote = stats.find('span', class_='stats-vote').find('i', class_='number').string + comment = stats.find('span', class_='stats-comments').find('i', class_='number').string + author_info = i.find('div', class_='articleGender') # 获取作者 年龄,性别 + if author_info is not None: # 非匿名用户 + class_list = author_info['class'] + if "womenIcon" in class_list: + gender = '女' + elif "manIcon" in class_list: + gender = '男' + else: + gender = '' + age = author_info.string # 获取年龄 + else: # 匿名用户 + gender = '' + age = '' + + save_txt(output.format(page, author, gender, age, vote, comment, content)) + + +def save_txt(*args): + for i in args: + with open('qiubai.txt', 'a', encoding='utf-8') as f: + f.write(i) + + +def main(): + # 我们点击下面链接,在页面下方可以看到共有13页,可以构造如下 url, + # 当然我们最好是用 Beautiful Soup找到页面底部有多少页。 + for i in range(1, 14): + url = 'https://qiushibaike.com/text/page/{}'.format(i) + html = download_page(url) + get_content(html, i) + +if __name__ == '__main__': + main()