mirror of https://github.com/injetlee/Python.git
爬虫集合
parent
d122e5a65d
commit
53608353a6
|
@ -0,0 +1,2 @@
|
||||||
|
# 代码详细说明请看文章
|
||||||
|
|
|
@ -0,0 +1,77 @@
|
||||||
|
import requests
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import threading
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
|
||||||
|
def download_page(url):
|
||||||
|
'''
|
||||||
|
用于下载页面
|
||||||
|
'''
|
||||||
|
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"}
|
||||||
|
r = requests.get(url, headers=headers)
|
||||||
|
r.encoding = 'gb2312'
|
||||||
|
return r.text
|
||||||
|
|
||||||
|
|
||||||
|
def get_pic_list(html):
|
||||||
|
'''
|
||||||
|
获取每个页面的套图列表,之后循环调用get_pic函数获取图片
|
||||||
|
'''
|
||||||
|
soup = BeautifulSoup(html, 'html.parser')
|
||||||
|
pic_list = soup.find_all('li', class_='wp-item')
|
||||||
|
for i in pic_list:
|
||||||
|
a_tag = i.find('h3', class_='tit').find('a')
|
||||||
|
link = a_tag.get('href')
|
||||||
|
text = a_tag.get_text()
|
||||||
|
get_pic(link, text)
|
||||||
|
|
||||||
|
|
||||||
|
def get_pic(link, text):
|
||||||
|
'''
|
||||||
|
获取当前页面的图片,并保存
|
||||||
|
'''
|
||||||
|
html = download_page(link) # 下载界面
|
||||||
|
soup = BeautifulSoup(html, 'html.parser')
|
||||||
|
pic_list = soup.find('div', id="picture").find_all('img') # 找到界面所有图片
|
||||||
|
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"}
|
||||||
|
create_dir('pic/{}'.format(text))
|
||||||
|
for i in pic_list:
|
||||||
|
pic_link = i.get('src') # 拿到图片的具体 url
|
||||||
|
r = requests.get(pic_link, headers=headers) # 下载图片,之后保存到文件
|
||||||
|
with open('pic/{}/{}'.format(text, link.split('/')[-1]), 'wb') as f:
|
||||||
|
f.write(r.content)
|
||||||
|
time.sleep(1) # 休息一下,不要给网站太大压力,避免被封
|
||||||
|
|
||||||
|
|
||||||
|
def create_dir(name):
|
||||||
|
if not os.path.exists(name):
|
||||||
|
os.makedirs(name)
|
||||||
|
|
||||||
|
|
||||||
|
def execute(url):
|
||||||
|
page_html = download_page(url)
|
||||||
|
get_pic_list(page_html)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
create_dir('pic')
|
||||||
|
queue = [i for i in range(1, 72)] # 构造 url 链接 页码。
|
||||||
|
threads = []
|
||||||
|
while len(queue) > 0:
|
||||||
|
for thread in threads:
|
||||||
|
if not thread.is_alive():
|
||||||
|
threads.remove(thread)
|
||||||
|
while len(threads) < 5 and len(queue) > 0: # 最大线程数设置为 5
|
||||||
|
cur_page = queue.pop(0)
|
||||||
|
url = 'http://meizitu.com/a/more_{}.html'.format(cur_page)
|
||||||
|
thread = threading.Thread(target=execute, args=(url,))
|
||||||
|
thread.setDaemon(True)
|
||||||
|
thread.start()
|
||||||
|
print('{}正在下载{}页'.format(threading.current_thread().name, cur_page))
|
||||||
|
threads.append(thread)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
|
@ -0,0 +1,54 @@
|
||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
|
||||||
|
def download_page(url):
|
||||||
|
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"}
|
||||||
|
r = requests.get(url, headers=headers)
|
||||||
|
return r.text
|
||||||
|
|
||||||
|
|
||||||
|
def get_content(html, page):
|
||||||
|
output = """第{}页 作者:{} 性别:{} 年龄:{} 点赞:{} 评论:{}\n{}\n------------\n"""
|
||||||
|
soup = BeautifulSoup(html, 'html.parser')
|
||||||
|
con = soup.find(id='content-left')
|
||||||
|
con_list = con.find_all('div', class_="article")
|
||||||
|
for i in con_list:
|
||||||
|
author = i.find('h2').string # 获取作者名字
|
||||||
|
content = i.find('div', class_='content').find('span').get_text() # 获取内容
|
||||||
|
stats = i.find('div', class_='stats')
|
||||||
|
vote = stats.find('span', class_='stats-vote').find('i', class_='number').string
|
||||||
|
comment = stats.find('span', class_='stats-comments').find('i', class_='number').string
|
||||||
|
author_info = i.find('div', class_='articleGender') # 获取作者 年龄,性别
|
||||||
|
if author_info is not None: # 非匿名用户
|
||||||
|
class_list = author_info['class']
|
||||||
|
if "womenIcon" in class_list:
|
||||||
|
gender = '女'
|
||||||
|
elif "manIcon" in class_list:
|
||||||
|
gender = '男'
|
||||||
|
else:
|
||||||
|
gender = ''
|
||||||
|
age = author_info.string # 获取年龄
|
||||||
|
else: # 匿名用户
|
||||||
|
gender = ''
|
||||||
|
age = ''
|
||||||
|
|
||||||
|
save_txt(output.format(page, author, gender, age, vote, comment, content))
|
||||||
|
|
||||||
|
|
||||||
|
def save_txt(*args):
|
||||||
|
for i in args:
|
||||||
|
with open('qiubai.txt', 'a', encoding='utf-8') as f:
|
||||||
|
f.write(i)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# 我们点击下面链接,在页面下方可以看到共有13页,可以构造如下 url,
|
||||||
|
# 当然我们最好是用 Beautiful Soup找到页面底部有多少页。
|
||||||
|
for i in range(1, 14):
|
||||||
|
url = 'https://qiushibaike.com/text/page/{}'.format(i)
|
||||||
|
html = download_page(url)
|
||||||
|
get_content(html, i)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
Loading…
Reference in New Issue