Python-100-Days/Day61-65/63.存储数据.md

119 lines
3.9 KiB
Markdown
Raw Normal View History

2018-05-28 08:50:30 +08:00
## 存储数据
2019-05-03 16:00:14 +08:00
### 存储海量数据
2018-05-29 20:21:13 +08:00
2020-06-24 08:10:58 +08:00
数据持久化的首选方案应该是关系型数据库关系型数据库的产品很多包括Oracle、MySQL、SQLServer、PostgreSQL等。如果要存储海量的低价值数据文档数据库也是不错的选择MongoDB是文档数据库中的佼佼者有兴趣的读者可以自行研究。
2018-05-29 20:21:13 +08:00
2020-06-24 08:10:58 +08:00
下面的代码演示了如何使用MySQL来保存从知乎发现上爬取到的链接和页面。
2018-05-29 16:58:46 +08:00
2020-06-24 08:10:58 +08:00
```SQL
create database zhihu default charset utf8;
create user 'hellokitty'@'%' identified by 'Hellokitty.618';
grant all privileges on zhihu.* to 'hellokitty'@'%';
flush privileges;
2018-05-28 08:50:30 +08:00
2020-06-24 08:10:58 +08:00
use zhihu;
create table `tb_explore`
(
`id` integer auto_increment,
`url` varchar(1024) not null,
`page` longblob not null,
`digest` char(48) unique not null,
`idate` datetime default now(),
primary key (`id`)
);
```
2018-05-29 21:05:42 +08:00
```Python
2020-06-24 08:10:58 +08:00
import hashlib
import pickle
import re
import zlib
2018-05-29 21:05:42 +08:00
from urllib.parse import urljoin
2020-06-24 08:10:58 +08:00
import MySQLdb
import bs4
import requests
conn = MySQLdb.connect(host='1.2.3.4', port=3306,
user='hellokitty', password='Hellokitty.618',
database='zhihu', charset='utf8',
autocommit=True)
def write_to_db(url, page, digest):
try:
with conn.cursor() as cursor:
cursor.execute(
'insert into tb_explore (url, page, digest) values (%s, %s, %s) ',
(url, page, digest)
)
except MySQLdb.MySQLError as err:
print(err)
def main():
base_url = 'https://www.zhihu.com/'
seed_url = urljoin(base_url, 'explore')
headers = {'user-agent': 'Baiduspider'}
try:
resp = requests.get(seed_url, headers=headers)
soup = bs4.BeautifulSoup(resp.text, 'lxml')
href_regex = re.compile(r'^/question')
for a_tag in soup.find_all('a', {'href': href_regex}):
href = a_tag.attrs['href']
full_url = urljoin(base_url, href)
digest = hashlib.sha1(full_url.encode()).hexdigest()
html_page = requests.get(full_url, headers=headers).text
zipped_page = zlib.compress(pickle.dumps(html_page))
write_to_db(full_url, zipped_page, digest)
finally:
conn.close()
if __name__ == '__main__':
main()
```
### 数据缓存
通过[《网络数据采集和解析》](./67.数据采集和解析.md)一文我们已经知道了如何从指定的页面中抓取数据以及如何保存抓取的结果但是我们没有考虑过这么一种情况就是我们可能需要从已经抓取过的页面中提取出更多的数据重新去下载这些页面对于规模不大的网站倒是问题也不大但是如果能够把这些页面缓存起来对应用的性能会有明显的改善。下面的例子演示了如何使用Redis来缓存知乎发现上的页面。
```Python
import hashlib
2018-05-29 21:05:42 +08:00
import pickle
import re
import zlib
2020-06-24 08:10:58 +08:00
from urllib.parse import urljoin
2018-05-29 21:05:42 +08:00
2020-06-24 08:10:58 +08:00
import bs4
import redis
import requests
2018-05-29 21:05:42 +08:00
def main():
base_url = 'https://www.zhihu.com/'
seed_url = urljoin(base_url, 'explore')
2020-06-24 08:10:58 +08:00
client = redis.Redis(host='1.2.3.4', port=6379, password='1qaz2wsx')
2018-05-29 21:05:42 +08:00
headers = {'user-agent': 'Baiduspider'}
resp = requests.get(seed_url, headers=headers)
2020-06-24 08:10:58 +08:00
soup = bs4.BeautifulSoup(resp.text, 'lxml')
2018-05-29 21:05:42 +08:00
href_regex = re.compile(r'^/question')
for a_tag in soup.find_all('a', {'href': href_regex}):
href = a_tag.attrs['href']
full_url = urljoin(base_url, href)
2020-06-24 08:10:58 +08:00
field_key = hashlib.sha1(full_url.encode()).hexdigest()
if not client.hexists('spider:zhihu:explore', field_key):
2018-05-29 21:05:42 +08:00
html_page = requests.get(full_url, headers=headers).text
zipped_page = zlib.compress(pickle.dumps(html_page))
2020-06-24 08:10:58 +08:00
client.hset('spider:zhihu:explore', field_key, zipped_page)
print('Total %d question pages found.' % client.hlen('spider:zhihu:explore'))
2018-05-29 21:05:42 +08:00
if __name__ == '__main__':
main()
```