mirror of https://github.com/dunwu/db-tutorial.git
🔖 Redis In Action 源码
parent
87d0bb5433
commit
11a22e2fa2
|
@ -0,0 +1,3 @@
|
||||||
|
# Redis 实战(Redis In Action)源码
|
||||||
|
|
||||||
|
搬运自:https://github.com/huangz1990/riacn-code
|
|
@ -0,0 +1,294 @@
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
import time
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 1-1
|
||||||
|
'''
|
||||||
|
$ redis-cli # 启动redis-cli 客户端
|
||||||
|
redis 127.0.0.1:6379> set hello world # 将键 hello 的值设置为 world 。
|
||||||
|
OK # SET 命令在执行成功时返回 OK ,Python 客户端会将这个 OK 转换成 True
|
||||||
|
redis 127.0.0.1:6379> get hello # 获取储存在键 hello 中的值。
|
||||||
|
"world" # 键的值仍然是 world ,跟我们刚才设置的一样。
|
||||||
|
redis 127.0.0.1:6379> del hello # 删除这个键值对。
|
||||||
|
(integer) 1 # 在对值进行删除的时候,DEL 命令将返回被成功删除的值的数量。
|
||||||
|
redis 127.0.0.1:6379> get hello # 因为键的值已经不存在,所以尝试获取键的值将得到一个 nil ,
|
||||||
|
(nil) # Python 客户端会将这个 nil 转换成 None。
|
||||||
|
redis 127.0.0.1:6379>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 1-2
|
||||||
|
'''
|
||||||
|
redis 127.0.0.1:6379> rpush list-key item # 在向列表推入新元素之后,该命令会返回列表的当前长度。
|
||||||
|
(integer) 1 #
|
||||||
|
redis 127.0.0.1:6379> rpush list-key item2 #
|
||||||
|
(integer) 2 #
|
||||||
|
redis 127.0.0.1:6379> rpush list-key item #
|
||||||
|
(integer) 3 #
|
||||||
|
redis 127.0.0.1:6379> lrange list-key 0 -1 # 使用0为范围的起始索引,-1为范围的结束索引,
|
||||||
|
1) "item" # 可以取出列表包含的所有元素。
|
||||||
|
2) "item2" #
|
||||||
|
3) "item" #
|
||||||
|
redis 127.0.0.1:6379> lindex list-key 1 # 使用LINDEX可以从列表里面取出单个元素。
|
||||||
|
"item2" #
|
||||||
|
redis 127.0.0.1:6379> lpop list-key # 从列表里面弹出一个元素,被弹出的元素不再存在于列表。
|
||||||
|
"item" #
|
||||||
|
redis 127.0.0.1:6379> lrange list-key 0 -1 #
|
||||||
|
1) "item2" #
|
||||||
|
2) "item" #
|
||||||
|
redis 127.0.0.1:6379>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 1-3
|
||||||
|
'''
|
||||||
|
redis 127.0.0.1:6379> sadd set-key item # 在尝试将一个元素添加到集合的时候,
|
||||||
|
(integer) 1 # 命令返回1表示这个元素被成功地添加到了集合里面,
|
||||||
|
redis 127.0.0.1:6379> sadd set-key item2 # 而返回0则表示这个元素已经存在于集合中。
|
||||||
|
(integer) 1 #
|
||||||
|
redis 127.0.0.1:6379> sadd set-key item3 #
|
||||||
|
(integer) 1 #
|
||||||
|
redis 127.0.0.1:6379> sadd set-key item #
|
||||||
|
(integer) 0 #
|
||||||
|
redis 127.0.0.1:6379> smembers set-key # 获取集合包含的所有元素将得到一个由元素组成的序列,
|
||||||
|
1) "item" # Python客户端会将这个序列转换成Python集合。
|
||||||
|
2) "item2" #
|
||||||
|
3) "item3" #
|
||||||
|
redis 127.0.0.1:6379> sismember set-key item4 # 检查一个元素是否存在于集合中,
|
||||||
|
(integer) 0 # Python客户端会返回一个布尔值来表示检查结果。
|
||||||
|
redis 127.0.0.1:6379> sismember set-key item #
|
||||||
|
(integer) 1 #
|
||||||
|
redis 127.0.0.1:6379> srem set-key item2 # 在使用命令移除集合中的元素时,命令会返回被移除的元素数量。
|
||||||
|
(integer) 1 #
|
||||||
|
redis 127.0.0.1:6379> srem set-key item2 #
|
||||||
|
(integer) 0 #
|
||||||
|
redis 127.0.0.1:6379> smembers set-key
|
||||||
|
1) "item"
|
||||||
|
2) "item3"
|
||||||
|
redis 127.0.0.1:6379>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 1-4
|
||||||
|
'''
|
||||||
|
redis 127.0.0.1:6379> hset hash-key sub-key1 value1 # 在尝试添加键值对到散列的时候,
|
||||||
|
(integer) 1 # 命令会返回一个值来表示给定的键是否已经存在于散列里面。
|
||||||
|
redis 127.0.0.1:6379> hset hash-key sub-key2 value2 #
|
||||||
|
(integer) 1 #
|
||||||
|
redis 127.0.0.1:6379> hset hash-key sub-key1 value1 #
|
||||||
|
(integer) 0 #
|
||||||
|
redis 127.0.0.1:6379> hgetall hash-key # 获取散列包含的所有键值对,
|
||||||
|
1) "sub-key1" # Python客户端会将这些键值对转换为Python字典。
|
||||||
|
2) "value1" #
|
||||||
|
3) "sub-key2" #
|
||||||
|
4) "value2" #
|
||||||
|
redis 127.0.0.1:6379> hdel hash-key sub-key2 # 在删除键值对的时候,
|
||||||
|
(integer) 1 # 命令会返回一个值来表示给定的键在移除之前是否存在于散列里面。
|
||||||
|
redis 127.0.0.1:6379> hdel hash-key sub-key2 #
|
||||||
|
(integer) 0 #
|
||||||
|
redis 127.0.0.1:6379> hget hash-key sub-key1 # 从散列里面单独取出一个域。
|
||||||
|
"value1" #
|
||||||
|
redis 127.0.0.1:6379> hgetall hash-key
|
||||||
|
1) "sub-key1"
|
||||||
|
2) "value1"
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 1-5
|
||||||
|
'''
|
||||||
|
redis 127.0.0.1:6379> zadd zset-key 728 member1 # 在尝试向有序集合添加元素的时候,
|
||||||
|
(integer) 1 # 命令会返回新添加元素的数量。
|
||||||
|
redis 127.0.0.1:6379> zadd zset-key 982 member0 #
|
||||||
|
(integer) 1 #
|
||||||
|
redis 127.0.0.1:6379> zadd zset-key 982 member0 #
|
||||||
|
(integer) 0 #
|
||||||
|
redis 127.0.0.1:6379> zrange zset-key 0 -1 withscores # 获取有序集合包含的所有元素,
|
||||||
|
1) "member1" # 这些元素会按照分值进行排序,
|
||||||
|
2) "728" # Python客户端会将这些分值转换成浮点数。
|
||||||
|
3) "member0" #
|
||||||
|
4) "982" #
|
||||||
|
redis 127.0.0.1:6379> zrangebyscore zset-key 0 800 withscores # 也可以根据分值来获取有序集合的其中一部分元素。
|
||||||
|
1) "member1" #
|
||||||
|
2) "728" #
|
||||||
|
redis 127.0.0.1:6379> zrem zset-key member1 # 在移除有序集合元素的时候,
|
||||||
|
(integer) 1 # 命令会返回被移除元素的数量。
|
||||||
|
redis 127.0.0.1:6379> zrem zset-key member1 #
|
||||||
|
(integer) 0 #
|
||||||
|
redis 127.0.0.1:6379> zrange zset-key 0 -1 withscores
|
||||||
|
1) "member0"
|
||||||
|
2) "982"
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 1-6
|
||||||
|
# <start id="upvote-code"/>
|
||||||
|
# 准备好需要用到的常量。
|
||||||
|
ONE_WEEK_IN_SECONDS = 7 * 86400
|
||||||
|
VOTE_SCORE = 432
|
||||||
|
|
||||||
|
def article_vote(conn, user, article):
|
||||||
|
|
||||||
|
# 计算文章的投票截止时间。
|
||||||
|
cutoff = time.time() - ONE_WEEK_IN_SECONDS
|
||||||
|
|
||||||
|
# 检查是否还可以对文章进行投票
|
||||||
|
#(虽然使用散列也可以获取文章的发布时间,
|
||||||
|
# 但有序集合返回的文章发布时间为浮点数,
|
||||||
|
# 可以不进行转换直接使用)。
|
||||||
|
if conn.zscore('time:', article) < cutoff:
|
||||||
|
return
|
||||||
|
|
||||||
|
# 从article:id标识符(identifier)里面取出文章的ID。
|
||||||
|
article_id = article.partition(':')[-1]
|
||||||
|
|
||||||
|
# 如果用户是第一次为这篇文章投票,那么增加这篇文章的投票数量和评分。
|
||||||
|
if conn.sadd('voted:' + article_id, user):
|
||||||
|
conn.zincrby('score:', article, VOTE_SCORE)
|
||||||
|
conn.hincrby(article, 'votes', 1)
|
||||||
|
# <end id="upvote-code"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 1-7
|
||||||
|
# <start id="post-article-code"/>
|
||||||
|
def post_article(conn, user, title, link):
|
||||||
|
# 生成一个新的文章ID。
|
||||||
|
article_id = str(conn.incr('article:'))
|
||||||
|
|
||||||
|
voted = 'voted:' + article_id
|
||||||
|
# 将发布文章的用户添加到文章的已投票用户名单里面,
|
||||||
|
# 然后将这个名单的过期时间设置为一周(第3章将对过期时间作更详细的介绍)。
|
||||||
|
conn.sadd(voted, user)
|
||||||
|
conn.expire(voted, ONE_WEEK_IN_SECONDS)
|
||||||
|
|
||||||
|
now = time.time()
|
||||||
|
article = 'article:' + article_id
|
||||||
|
# 将文章信息存储到一个散列里面。
|
||||||
|
conn.hmset(article, {
|
||||||
|
'title': title,
|
||||||
|
'link': link,
|
||||||
|
'poster': user,
|
||||||
|
'time': now,
|
||||||
|
'votes': 1,
|
||||||
|
})
|
||||||
|
|
||||||
|
# 将文章添加到根据发布时间排序的有序集合和根据评分排序的有序集合里面。
|
||||||
|
conn.zadd('score:', article, now + VOTE_SCORE)
|
||||||
|
conn.zadd('time:', article, now)
|
||||||
|
|
||||||
|
return article_id
|
||||||
|
# <end id="post-article-code"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 1-8
|
||||||
|
# <start id="fetch-articles-code"/>
|
||||||
|
ARTICLES_PER_PAGE = 25
|
||||||
|
|
||||||
|
def get_articles(conn, page, order='score:'):
|
||||||
|
# 设置获取文章的起始索引和结束索引。
|
||||||
|
start = (page-1) * ARTICLES_PER_PAGE
|
||||||
|
end = start + ARTICLES_PER_PAGE - 1
|
||||||
|
|
||||||
|
# 获取多个文章ID。
|
||||||
|
ids = conn.zrevrange(order, start, end)
|
||||||
|
articles = []
|
||||||
|
# 根据文章ID获取文章的详细信息。
|
||||||
|
for id in ids:
|
||||||
|
article_data = conn.hgetall(id)
|
||||||
|
article_data['id'] = id
|
||||||
|
articles.append(article_data)
|
||||||
|
|
||||||
|
return articles
|
||||||
|
# <end id="fetch-articles-code"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 1-9
|
||||||
|
# <start id="add-remove-groups"/>
|
||||||
|
def add_remove_groups(conn, article_id, to_add=[], to_remove=[]):
|
||||||
|
# 构建存储文章信息的键名。
|
||||||
|
article = 'article:' + article_id
|
||||||
|
for group in to_add:
|
||||||
|
# 将文章添加到它所属的群组里面。
|
||||||
|
conn.sadd('group:' + group, article)
|
||||||
|
for group in to_remove:
|
||||||
|
# 从群组里面移除文章。
|
||||||
|
conn.srem('group:' + group, article)
|
||||||
|
# <end id="add-remove-groups"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 1-10
|
||||||
|
# <start id="fetch-articles-group"/>
|
||||||
|
def get_group_articles(conn, group, page, order='score:'):
|
||||||
|
# 为每个群组的每种排列顺序都创建一个键。
|
||||||
|
key = order + group
|
||||||
|
# 检查是否有已缓存的排序结果,如果没有的话就现在进行排序。
|
||||||
|
if not conn.exists(key):
|
||||||
|
# 根据评分或者发布时间,对群组文章进行排序。
|
||||||
|
conn.zinterstore(key,
|
||||||
|
['group:' + group, order],
|
||||||
|
aggregate='max',
|
||||||
|
)
|
||||||
|
# 让Redis在60秒钟之后自动删除这个有序集合。
|
||||||
|
conn.expire(key, 60)
|
||||||
|
# 调用之前定义的get_articles()函数来进行分页并获取文章数据。
|
||||||
|
return get_articles(conn, page, key)
|
||||||
|
# <end id="fetch-articles-group"/>
|
||||||
|
|
||||||
|
#--------------- 以下是用于测试代码的辅助函数 --------------------------------
|
||||||
|
|
||||||
|
class TestCh01(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
import redis
|
||||||
|
self.conn = redis.Redis(db=15)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
del self.conn
|
||||||
|
print
|
||||||
|
print
|
||||||
|
|
||||||
|
def test_article_functionality(self):
|
||||||
|
conn = self.conn
|
||||||
|
import pprint
|
||||||
|
|
||||||
|
article_id = str(post_article(conn, 'username', 'A title', 'http://www.google.com'))
|
||||||
|
print "We posted a new article with id:", article_id
|
||||||
|
print
|
||||||
|
self.assertTrue(article_id)
|
||||||
|
|
||||||
|
print "Its HASH looks like:"
|
||||||
|
r = conn.hgetall('article:' + article_id)
|
||||||
|
print r
|
||||||
|
print
|
||||||
|
self.assertTrue(r)
|
||||||
|
|
||||||
|
article_vote(conn, 'other_user', 'article:' + article_id)
|
||||||
|
print "We voted for the article, it now has votes:",
|
||||||
|
v = int(conn.hget('article:' + article_id, 'votes'))
|
||||||
|
print v
|
||||||
|
print
|
||||||
|
self.assertTrue(v > 1)
|
||||||
|
|
||||||
|
print "The currently highest-scoring articles are:"
|
||||||
|
articles = get_articles(conn, 1)
|
||||||
|
pprint.pprint(articles)
|
||||||
|
print
|
||||||
|
|
||||||
|
self.assertTrue(len(articles) >= 1)
|
||||||
|
|
||||||
|
add_remove_groups(conn, article_id, ['new-group'])
|
||||||
|
print "We added the article to a new group, other articles include:"
|
||||||
|
articles = get_group_articles(conn, 'new-group', 1)
|
||||||
|
pprint.pprint(articles)
|
||||||
|
print
|
||||||
|
self.assertTrue(len(articles) >= 1)
|
||||||
|
|
||||||
|
to_del = (
|
||||||
|
conn.keys('time:*') + conn.keys('voted:*') + conn.keys('score:*') +
|
||||||
|
conn.keys('article:*') + conn.keys('group:*')
|
||||||
|
)
|
||||||
|
if to_del:
|
||||||
|
conn.delete(*to_del)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
|
@ -0,0 +1,391 @@
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
import json
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import unittest
|
||||||
|
import urlparse
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
QUIT = False
|
||||||
|
|
||||||
|
# 代码清单 2-1
|
||||||
|
# <start id="_1311_14471_8266"/>
|
||||||
|
def check_token(conn, token):
|
||||||
|
return conn.hget('login:', token) # 尝试获取并返回令牌对应的用户。
|
||||||
|
# <end id="_1311_14471_8266"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 2-2
|
||||||
|
# <start id="_1311_14471_8265"/>
|
||||||
|
def update_token(conn, token, user, item=None):
|
||||||
|
# 获取当前时间戳。
|
||||||
|
timestamp = time.time()
|
||||||
|
# 维持令牌与已登录用户之间的映射。
|
||||||
|
conn.hset('login:', token, user)
|
||||||
|
# 记录令牌最后一次出现的时间。
|
||||||
|
conn.zadd('recent:', token, timestamp)
|
||||||
|
if item:
|
||||||
|
# 记录用户浏览过的商品。
|
||||||
|
conn.zadd('viewed:' + token, item, timestamp)
|
||||||
|
# 移除旧的记录,只保留用户最近浏览过的25个商品。
|
||||||
|
conn.zremrangebyrank('viewed:' + token, 0, -26)
|
||||||
|
# <end id="_1311_14471_8265"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 2-3
|
||||||
|
# <start id="_1311_14471_8270"/>
|
||||||
|
QUIT = False
|
||||||
|
LIMIT = 10000000
|
||||||
|
|
||||||
|
def clean_sessions(conn):
|
||||||
|
while not QUIT:
|
||||||
|
# 找出目前已有令牌的数量。
|
||||||
|
size = conn.zcard('recent:')
|
||||||
|
# 令牌数量未超过限制,休眠并在之后重新检查。
|
||||||
|
if size <= LIMIT:
|
||||||
|
time.sleep(1)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 获取需要移除的令牌ID。
|
||||||
|
end_index = min(size - LIMIT, 100)
|
||||||
|
tokens = conn.zrange('recent:', 0, end_index-1)
|
||||||
|
|
||||||
|
# 为那些将要被删除的令牌构建键名。
|
||||||
|
session_keys = []
|
||||||
|
for token in tokens:
|
||||||
|
session_keys.append('viewed:' + token)
|
||||||
|
|
||||||
|
# 移除最旧的那些令牌。
|
||||||
|
conn.delete(*session_keys)
|
||||||
|
conn.hdel('login:', *tokens)
|
||||||
|
conn.zrem('recent:', *tokens)
|
||||||
|
# <end id="_1311_14471_8270"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 2-4
|
||||||
|
# <start id="_1311_14471_8279"/>
|
||||||
|
def add_to_cart(conn, session, item, count):
|
||||||
|
if count <= 0:
|
||||||
|
# 从购物车里面移除指定的商品。
|
||||||
|
conn.hrem('cart:' + session, item)
|
||||||
|
else:
|
||||||
|
# 将指定的商品添加到购物车。
|
||||||
|
conn.hset('cart:' + session, item, count)
|
||||||
|
# <end id="_1311_14471_8279"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 2-5
|
||||||
|
# <start id="_1311_14471_8271"/>
|
||||||
|
def clean_full_sessions(conn):
|
||||||
|
while not QUIT:
|
||||||
|
size = conn.zcard('recent:')
|
||||||
|
if size <= LIMIT:
|
||||||
|
time.sleep(1)
|
||||||
|
continue
|
||||||
|
|
||||||
|
end_index = min(size - LIMIT, 100)
|
||||||
|
sessions = conn.zrange('recent:', 0, end_index-1)
|
||||||
|
|
||||||
|
session_keys = []
|
||||||
|
for sess in sessions:
|
||||||
|
session_keys.append('viewed:' + sess)
|
||||||
|
session_keys.append('cart:' + sess) # 新增加的这行代码用于删除旧会话对应用户的购物车。
|
||||||
|
|
||||||
|
conn.delete(*session_keys)
|
||||||
|
conn.hdel('login:', *sessions)
|
||||||
|
conn.zrem('recent:', *sessions)
|
||||||
|
# <end id="_1311_14471_8271"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 2-6
|
||||||
|
# <start id="_1311_14471_8291"/>
|
||||||
|
def cache_request(conn, request, callback):
|
||||||
|
# 对于不能被缓存的请求,直接调用回调函数。
|
||||||
|
if not can_cache(conn, request):
|
||||||
|
return callback(request)
|
||||||
|
|
||||||
|
# 将请求转换成一个简单的字符串键,方便之后进行查找。
|
||||||
|
page_key = 'cache:' + hash_request(request)
|
||||||
|
# 尝试查找被缓存的页面。
|
||||||
|
content = conn.get(page_key)
|
||||||
|
|
||||||
|
if not content:
|
||||||
|
# 如果页面还没有被缓存,那么生成页面。
|
||||||
|
content = callback(request)
|
||||||
|
# 将新生成的页面放到缓存里面。
|
||||||
|
conn.setex(page_key, content, 300)
|
||||||
|
|
||||||
|
# 返回页面。
|
||||||
|
return content
|
||||||
|
# <end id="_1311_14471_8291"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 2-7
|
||||||
|
# <start id="_1311_14471_8287"/>
|
||||||
|
def schedule_row_cache(conn, row_id, delay):
|
||||||
|
# 先设置数据行的延迟值。
|
||||||
|
conn.zadd('delay:', row_id, delay)
|
||||||
|
# 立即缓存数据行。
|
||||||
|
conn.zadd('schedule:', row_id, time.time())
|
||||||
|
# <end id="_1311_14471_8287"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 2-8
|
||||||
|
# <start id="_1311_14471_8292"/>
|
||||||
|
def cache_rows(conn):
|
||||||
|
while not QUIT:
|
||||||
|
# 尝试获取下一个需要被缓存的数据行以及该行的调度时间戳,
|
||||||
|
# 命令会返回一个包含零个或一个元组(tuple)的列表。
|
||||||
|
next = conn.zrange('schedule:', 0, 0, withscores=True)
|
||||||
|
now = time.time()
|
||||||
|
if not next or next[0][1] > now:
|
||||||
|
# 暂时没有行需要被缓存,休眠50毫秒后重试。
|
||||||
|
time.sleep(.05)
|
||||||
|
continue
|
||||||
|
|
||||||
|
row_id = next[0][0]
|
||||||
|
# 获取下一次调度前的延迟时间。
|
||||||
|
delay = conn.zscore('delay:', row_id)
|
||||||
|
if delay <= 0:
|
||||||
|
# 不必再缓存这个行,将它从缓存中移除。
|
||||||
|
conn.zrem('delay:', row_id)
|
||||||
|
conn.zrem('schedule:', row_id)
|
||||||
|
conn.delete('inv:' + row_id)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 读取数据行。
|
||||||
|
row = Inventory.get(row_id)
|
||||||
|
# 更新调度时间并设置缓存值。
|
||||||
|
conn.zadd('schedule:', row_id, now + delay)
|
||||||
|
conn.set('inv:' + row_id, json.dumps(row.to_dict()))
|
||||||
|
# <end id="_1311_14471_8292"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 2-9
|
||||||
|
# <start id="_1311_14471_8298"/>
|
||||||
|
def update_token(conn, token, user, item=None):
|
||||||
|
timestamp = time.time()
|
||||||
|
conn.hset('login:', token, user)
|
||||||
|
conn.zadd('recent:', token, timestamp)
|
||||||
|
if item:
|
||||||
|
conn.zadd('viewed:' + token, item, timestamp)
|
||||||
|
conn.zremrangebyrank('viewed:' + token, 0, -26)
|
||||||
|
conn.zincrby('viewed:', item, -1) # 这行代码是新添加的。
|
||||||
|
# <end id="_1311_14471_8298"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 2-10
|
||||||
|
# <start id="_1311_14471_8288"/>
|
||||||
|
def rescale_viewed(conn):
|
||||||
|
while not QUIT:
|
||||||
|
# 删除所有排名在20 000名之后的商品。
|
||||||
|
conn.zremrangebyrank('viewed:', 20000, -1)
|
||||||
|
# 将浏览次数降低为原来的一半
|
||||||
|
conn.zinterstore('viewed:', {'viewed:': .5})
|
||||||
|
# 5分钟之后再执行这个操作。
|
||||||
|
time.sleep(300)
|
||||||
|
# <end id="_1311_14471_8288"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 2-11
|
||||||
|
# <start id="_1311_14471_8289"/>
|
||||||
|
def can_cache(conn, request):
|
||||||
|
# 尝试从页面里面取出商品ID。
|
||||||
|
item_id = extract_item_id(request)
|
||||||
|
# 检查这个页面能否被缓存以及这个页面是否为商品页面。
|
||||||
|
if not item_id or is_dynamic(request):
|
||||||
|
return False
|
||||||
|
# 取得商品的浏览次数排名。
|
||||||
|
rank = conn.zrank('viewed:', item_id)
|
||||||
|
# 根据商品的浏览次数排名来判断是否需要缓存这个页面。
|
||||||
|
return rank is not None and rank < 10000
|
||||||
|
# <end id="_1311_14471_8289"/>
|
||||||
|
|
||||||
|
|
||||||
|
#--------------- 以下是用于测试代码的辅助函数 --------------------------------
|
||||||
|
|
||||||
|
def extract_item_id(request):
|
||||||
|
parsed = urlparse.urlparse(request)
|
||||||
|
query = urlparse.parse_qs(parsed.query)
|
||||||
|
return (query.get('item') or [None])[0]
|
||||||
|
|
||||||
|
def is_dynamic(request):
|
||||||
|
parsed = urlparse.urlparse(request)
|
||||||
|
query = urlparse.parse_qs(parsed.query)
|
||||||
|
return '_' in query
|
||||||
|
|
||||||
|
def hash_request(request):
|
||||||
|
return str(hash(request))
|
||||||
|
|
||||||
|
class Inventory(object):
|
||||||
|
def __init__(self, id):
|
||||||
|
self.id = id
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get(cls, id):
|
||||||
|
return Inventory(id)
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {'id':self.id, 'data':'data to cache...', 'cached':time.time()}
|
||||||
|
|
||||||
|
class TestCh02(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
import redis
|
||||||
|
self.conn = redis.Redis(db=15)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
conn = self.conn
|
||||||
|
to_del = (
|
||||||
|
conn.keys('login:*') + conn.keys('recent:*') + conn.keys('viewed:*') +
|
||||||
|
conn.keys('cart:*') + conn.keys('cache:*') + conn.keys('delay:*') +
|
||||||
|
conn.keys('schedule:*') + conn.keys('inv:*'))
|
||||||
|
if to_del:
|
||||||
|
self.conn.delete(*to_del)
|
||||||
|
del self.conn
|
||||||
|
global QUIT, LIMIT
|
||||||
|
QUIT = False
|
||||||
|
LIMIT = 10000000
|
||||||
|
print
|
||||||
|
print
|
||||||
|
|
||||||
|
def test_login_cookies(self):
|
||||||
|
conn = self.conn
|
||||||
|
global LIMIT, QUIT
|
||||||
|
token = str(uuid.uuid4())
|
||||||
|
|
||||||
|
update_token(conn, token, 'username', 'itemX')
|
||||||
|
print "We just logged-in/updated token:", token
|
||||||
|
print "For user:", 'username'
|
||||||
|
print
|
||||||
|
|
||||||
|
print "What username do we get when we look-up that token?"
|
||||||
|
r = check_token(conn, token)
|
||||||
|
print r
|
||||||
|
print
|
||||||
|
self.assertTrue(r)
|
||||||
|
|
||||||
|
|
||||||
|
print "Let's drop the maximum number of cookies to 0 to clean them out"
|
||||||
|
print "We will start a thread to do the cleaning, while we stop it later"
|
||||||
|
|
||||||
|
LIMIT = 0
|
||||||
|
t = threading.Thread(target=clean_sessions, args=(conn,))
|
||||||
|
t.setDaemon(1) # to make sure it dies if we ctrl+C quit
|
||||||
|
t.start()
|
||||||
|
time.sleep(1)
|
||||||
|
QUIT = True
|
||||||
|
time.sleep(2)
|
||||||
|
if t.isAlive():
|
||||||
|
raise Exception("The clean sessions thread is still alive?!?")
|
||||||
|
|
||||||
|
s = conn.hlen('login:')
|
||||||
|
print "The current number of sessions still available is:", s
|
||||||
|
self.assertFalse(s)
|
||||||
|
|
||||||
|
def test_shoppping_cart_cookies(self):
|
||||||
|
conn = self.conn
|
||||||
|
global LIMIT, QUIT
|
||||||
|
token = str(uuid.uuid4())
|
||||||
|
|
||||||
|
print "We'll refresh our session..."
|
||||||
|
update_token(conn, token, 'username', 'itemX')
|
||||||
|
print "And add an item to the shopping cart"
|
||||||
|
add_to_cart(conn, token, "itemY", 3)
|
||||||
|
r = conn.hgetall('cart:' + token)
|
||||||
|
print "Our shopping cart currently has:", r
|
||||||
|
print
|
||||||
|
|
||||||
|
self.assertTrue(len(r) >= 1)
|
||||||
|
|
||||||
|
print "Let's clean out our sessions and carts"
|
||||||
|
LIMIT = 0
|
||||||
|
t = threading.Thread(target=clean_full_sessions, args=(conn,))
|
||||||
|
t.setDaemon(1) # to make sure it dies if we ctrl+C quit
|
||||||
|
t.start()
|
||||||
|
time.sleep(1)
|
||||||
|
QUIT = True
|
||||||
|
time.sleep(2)
|
||||||
|
if t.isAlive():
|
||||||
|
raise Exception("The clean sessions thread is still alive?!?")
|
||||||
|
|
||||||
|
r = conn.hgetall('cart:' + token)
|
||||||
|
print "Our shopping cart now contains:", r
|
||||||
|
|
||||||
|
self.assertFalse(r)
|
||||||
|
|
||||||
|
def test_cache_request(self):
|
||||||
|
conn = self.conn
|
||||||
|
token = str(uuid.uuid4())
|
||||||
|
|
||||||
|
def callback(request):
|
||||||
|
return "content for " + request
|
||||||
|
|
||||||
|
update_token(conn, token, 'username', 'itemX')
|
||||||
|
url = 'http://test.com/?item=itemX'
|
||||||
|
print "We are going to cache a simple request against", url
|
||||||
|
result = cache_request(conn, url, callback)
|
||||||
|
print "We got initial content:", repr(result)
|
||||||
|
print
|
||||||
|
|
||||||
|
self.assertTrue(result)
|
||||||
|
|
||||||
|
print "To test that we've cached the request, we'll pass a bad callback"
|
||||||
|
result2 = cache_request(conn, url, None)
|
||||||
|
print "We ended up getting the same response!", repr(result2)
|
||||||
|
|
||||||
|
self.assertEquals(result, result2)
|
||||||
|
|
||||||
|
self.assertFalse(can_cache(conn, 'http://test.com/'))
|
||||||
|
self.assertFalse(can_cache(conn, 'http://test.com/?item=itemX&_=1234536'))
|
||||||
|
|
||||||
|
def test_cache_rows(self):
|
||||||
|
import pprint
|
||||||
|
conn = self.conn
|
||||||
|
global QUIT
|
||||||
|
|
||||||
|
print "First, let's schedule caching of itemX every 5 seconds"
|
||||||
|
schedule_row_cache(conn, 'itemX', 5)
|
||||||
|
print "Our schedule looks like:"
|
||||||
|
s = conn.zrange('schedule:', 0, -1, withscores=True)
|
||||||
|
pprint.pprint(s)
|
||||||
|
self.assertTrue(s)
|
||||||
|
|
||||||
|
print "We'll start a caching thread that will cache the data..."
|
||||||
|
t = threading.Thread(target=cache_rows, args=(conn,))
|
||||||
|
t.setDaemon(1)
|
||||||
|
t.start()
|
||||||
|
|
||||||
|
time.sleep(1)
|
||||||
|
print "Our cached data looks like:"
|
||||||
|
r = conn.get('inv:itemX')
|
||||||
|
print repr(r)
|
||||||
|
self.assertTrue(r)
|
||||||
|
print
|
||||||
|
print "We'll check again in 5 seconds..."
|
||||||
|
time.sleep(5)
|
||||||
|
print "Notice that the data has changed..."
|
||||||
|
r2 = conn.get('inv:itemX')
|
||||||
|
print repr(r2)
|
||||||
|
print
|
||||||
|
self.assertTrue(r2)
|
||||||
|
self.assertTrue(r != r2)
|
||||||
|
|
||||||
|
print "Let's force un-caching"
|
||||||
|
schedule_row_cache(conn, 'itemX', -1)
|
||||||
|
time.sleep(1)
|
||||||
|
r = conn.get('inv:itemX')
|
||||||
|
print "The cache was cleared?", not r
|
||||||
|
print
|
||||||
|
self.assertFalse(r)
|
||||||
|
|
||||||
|
QUIT = True
|
||||||
|
time.sleep(2)
|
||||||
|
if t.isAlive():
|
||||||
|
raise Exception("The database caching thread is still alive?!?")
|
||||||
|
|
||||||
|
# We aren't going to bother with the top 10k requests are cached, as
|
||||||
|
# we already tested it as part of the cached requests test.
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
|
@ -0,0 +1,507 @@
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
import redis
|
||||||
|
|
||||||
|
ONE_WEEK_IN_SECONDS = 7 * 86400
|
||||||
|
VOTE_SCORE = 432
|
||||||
|
ARTICLES_PER_PAGE = 25
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 3-1
|
||||||
|
'''
|
||||||
|
# <start id="string-calls-1"/>
|
||||||
|
>>> conn = redis.Redis()
|
||||||
|
>>> conn.get('key') # 尝试获取一个不存在的键将得到一个None值,终端不会显示这个值。
|
||||||
|
>>> conn.incr('key') # 我们既可以对不存在的键执行自增操作,
|
||||||
|
1 # 也可以通过可选的参数来指定自增操作的增量。
|
||||||
|
>>> conn.incr('key', 15) #
|
||||||
|
16 #
|
||||||
|
>>> conn.decr('key', 5) # 和自增操作一样,
|
||||||
|
11 # 执行自减操作的函数也可以通过可选的参数来指定减量。
|
||||||
|
>>> conn.get('key') # 在尝试获取一个键的时候,命令以字符串格式返回被存储的整数。
|
||||||
|
'11' #
|
||||||
|
>>> conn.set('key', '13') # 即使在设置键时输入的值为字符串,
|
||||||
|
True # 但只要这个值可以被解释为整数,
|
||||||
|
>>> conn.incr('key') # 我们就可以把它当作整数来处理。
|
||||||
|
14 #
|
||||||
|
# <end id="string-calls-1"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 3-2
|
||||||
|
'''
|
||||||
|
# <start id="string-calls-2"/>
|
||||||
|
>>> conn.append('new-string-key', 'hello ') # 将字符串'hello'追加到目前并不存在的'new-string-key'键里。
|
||||||
|
6L # APPEND命令在执行之后会返回字符串当前的长度。
|
||||||
|
>>> conn.append('new-string-key', 'world!')
|
||||||
|
12L #
|
||||||
|
>>> conn.substr('new-string-key', 3, 7) # Redis的索引以0为开始,在进行范围访问时,范围的终点(endpoint)默认也包含在这个范围之内。
|
||||||
|
'lo wo' # 字符串'lo wo'位于字符串'hello world!'的中间。
|
||||||
|
>>> conn.setrange('new-string-key', 0, 'H') # 对字符串执行范围设置操作。
|
||||||
|
12 # SETRANGE命令在执行之后同样会返回字符串的当前总长度。
|
||||||
|
>>> conn.setrange('new-string-key', 6, 'W')
|
||||||
|
12
|
||||||
|
>>> conn.get('new-string-key') # 查看字符串的当前值。
|
||||||
|
'Hello World!' # 前面执行的两个SETRANGE命令成功地将字母h和w从原来的小写改成了大写。
|
||||||
|
>>> conn.setrange('new-string-key', 11, ', how are you?') # SETRANGE命令既可以用于替换字符串里已有的内容,又可以用于增长字符串。
|
||||||
|
25
|
||||||
|
>>> conn.get('new-string-key')
|
||||||
|
'Hello World, how are you?' # 前面执行的SETRANGE命令移除了字符串末尾的感叹号,并将更多字符追加到了字符串末尾。
|
||||||
|
>>> conn.setbit('another-key', 2, 1) # 对超出字符串长度的二进制位进行设置时,超出的部分会被填充为空字节。
|
||||||
|
0 # SETBIT命令会返回二进制位被设置之前的值。
|
||||||
|
>>> conn.setbit('another-key', 7, 1) # 在对Redis存储的二进制位进行解释(interpret)时,
|
||||||
|
0 # 请记住Redis存储的二进制位是按照偏移量从高到低排列的。
|
||||||
|
>>> conn.get('another-key') #
|
||||||
|
'!' # 通过将第2个二进制位以及第7个二进制位的值设置为1,键的值将变为‘!’,即字符33 。
|
||||||
|
# <end id="string-calls-2"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 3-3
|
||||||
|
'''
|
||||||
|
# <start id="list-calls-1"/>
|
||||||
|
>>> conn.rpush('list-key', 'last') # 在向列表推入元素时,
|
||||||
|
1L # 推入操作执行完毕之后会返回列表当前的长度。
|
||||||
|
>>> conn.lpush('list-key', 'first') # 可以很容易地对列表的两端执行推入操作。
|
||||||
|
2L
|
||||||
|
>>> conn.rpush('list-key', 'new last')
|
||||||
|
3L
|
||||||
|
>>> conn.lrange('list-key', 0, -1) # 从语义上来说,列表的左端为开头,右端为结尾。
|
||||||
|
['first', 'last', 'new last'] #
|
||||||
|
>>> conn.lpop('list-key') # 通过重复地弹出列表左端的元素,
|
||||||
|
'first' # 可以按照从左到右的顺序来获取列表中的元素。
|
||||||
|
>>> conn.lpop('list-key') #
|
||||||
|
'last' #
|
||||||
|
>>> conn.lrange('list-key', 0, -1)
|
||||||
|
['new last']
|
||||||
|
>>> conn.rpush('list-key', 'a', 'b', 'c') # 可以同时推入多个元素。
|
||||||
|
4L
|
||||||
|
>>> conn.lrange('list-key', 0, -1)
|
||||||
|
['new last', 'a', 'b', 'c']
|
||||||
|
>>> conn.ltrim('list-key', 2, -1) # 可以从列表的左端、右端或者左右两端删减任意数量的元素。
|
||||||
|
True #
|
||||||
|
>>> conn.lrange('list-key', 0, -1) #
|
||||||
|
['b', 'c'] #
|
||||||
|
# <end id="list-calls-1"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 3-4
|
||||||
|
'''
|
||||||
|
# <start id="list-calls-2"/>
|
||||||
|
>>> conn.rpush('list', 'item1') # 将一些元素添加到两个列表里面。
|
||||||
|
1 #
|
||||||
|
>>> conn.rpush('list', 'item2') #
|
||||||
|
2 #
|
||||||
|
>>> conn.rpush('list2', 'item3') #
|
||||||
|
1 #
|
||||||
|
>>> conn.brpoplpush('list2', 'list', 1) # 将一个元素从一个列表移动到另一个列表,
|
||||||
|
'item3' # 并返回被移动的元素。
|
||||||
|
>>> conn.brpoplpush('list2', 'list', 1) # 当列表不包含任何元素时,阻塞弹出操作会在给定的时限内等待可弹出的元素出现,并在时限到达后返回None(交互终端不会打印这个值)。
|
||||||
|
>>> conn.lrange('list', 0, -1) # 弹出“list2”最右端的元素,
|
||||||
|
['item3', 'item1', 'item2'] # 并将弹出的元素推入到“list”的左端。
|
||||||
|
>>> conn.brpoplpush('list', 'list2', 1)
|
||||||
|
'item2'
|
||||||
|
>>> conn.blpop(['list', 'list2'], 1) # BLPOP命令会从左到右地检查传入的列表,
|
||||||
|
('list', 'item3') # 并对最先遇到的非空列表执行弹出操作。
|
||||||
|
>>> conn.blpop(['list', 'list2'], 1) #
|
||||||
|
('list', 'item1') #
|
||||||
|
>>> conn.blpop(['list', 'list2'], 1) #
|
||||||
|
('list2', 'item2') #
|
||||||
|
>>> conn.blpop(['list', 'list2'], 1) #
|
||||||
|
>>>
|
||||||
|
# <end id="list-calls-2"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
# <start id="exercise-update-token"/>
|
||||||
|
def update_token(conn, token, user, item=None):
|
||||||
|
timestamp = time.time()
|
||||||
|
conn.hset('login:', token, user)
|
||||||
|
conn.zadd('recent:', token, timestamp)
|
||||||
|
if item:
|
||||||
|
key = 'viewed:' + token
|
||||||
|
# 如果指定的元素存在于列表当中,那么移除它
|
||||||
|
conn.lrem(key, item)
|
||||||
|
# 将元素推入到列表的右端,使得 ZRANGE 和 LRANGE 可以取得相同的结果
|
||||||
|
conn.rpush(key, item)
|
||||||
|
# 对列表进行修剪,让它最多只能保存 25 个元素
|
||||||
|
conn.ltrim(key, -25, -1)
|
||||||
|
conn.zincrby('viewed:', item, -1)
|
||||||
|
# <end id="exercise-update-token"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 3-5
|
||||||
|
'''
|
||||||
|
# <start id="set-calls-1"/>
|
||||||
|
>>> conn.sadd('set-key', 'a', 'b', 'c') # SADD命令会将那些目前并不存在于集合里面的元素添加到集合里面,
|
||||||
|
3 # 并返回被添加元素的数量。
|
||||||
|
>>> conn.srem('set-key', 'c', 'd') # srem函数在元素被成功移除时返回True,
|
||||||
|
True # 移除失败时返回False;
|
||||||
|
>>> conn.srem('set-key', 'c', 'd') # 注意这是Python客户端的一个bug,
|
||||||
|
False # 实际上Redis的SREM命令返回的是被移除元素的数量,而不是布尔值。
|
||||||
|
>>> conn.scard('set-key') # 查看集合包含的元素数量。
|
||||||
|
2 #
|
||||||
|
>>> conn.smembers('set-key') # 获取集合包含的所有元素。
|
||||||
|
set(['a', 'b']) #
|
||||||
|
>>> conn.smove('set-key', 'set-key2', 'a') # 可以很容易地将元素从一个集合移动到另一个集合。
|
||||||
|
True #
|
||||||
|
>>> conn.smove('set-key', 'set-key2', 'c') # 在执行SMOVE命令时,
|
||||||
|
False # 如果用户想要移动的元素不存在于第一个集合里,
|
||||||
|
>>> conn.smembers('set-key2') # 那么移动操作就不会执行。
|
||||||
|
set(['a']) #
|
||||||
|
# <end id="set-calls-1"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 3-6
|
||||||
|
'''
|
||||||
|
# <start id="set-calls-2"/>
|
||||||
|
>>> conn.sadd('skey1', 'a', 'b', 'c', 'd') # 首先将一些元素添加到两个集合里面。
|
||||||
|
4 #
|
||||||
|
>>> conn.sadd('skey2', 'c', 'd', 'e', 'f') #
|
||||||
|
4 #
|
||||||
|
>>> conn.sdiff('skey1', 'skey2') # 计算从第一个集合中移除第二个集合所有元素之后的结果。
|
||||||
|
set(['a', 'b']) #
|
||||||
|
>>> conn.sinter('skey1', 'skey2') # 还可以找出同时存在于两个集合中的元素。
|
||||||
|
set(['c', 'd']) #
|
||||||
|
>>> conn.sunion('skey1', 'skey2') # 可以找出两个结合中的所有元素。
|
||||||
|
set(['a', 'c', 'b', 'e', 'd', 'f']) #
|
||||||
|
# <end id="set-calls-2"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 3-7
|
||||||
|
'''
|
||||||
|
# <start id="hash-calls-1"/>
|
||||||
|
>>> conn.hmset('hash-key', {'k1':'v1', 'k2':'v2', 'k3':'v3'}) # 使用HMSET命令可以一次将多个键值对添加到散列里面。
|
||||||
|
True #
|
||||||
|
>>> conn.hmget('hash-key', ['k2', 'k3']) # 使用HMGET命令可以一次获取多个键的值。
|
||||||
|
['v2', 'v3'] #
|
||||||
|
>>> conn.hlen('hash-key') # HLEN命令通常用于调试一个包含非常多键值对的散列。
|
||||||
|
3 #
|
||||||
|
>>> conn.hdel('hash-key', 'k1', 'k3') # HDEL命令在成功地移除了至少一个键值对时返回True,
|
||||||
|
True # 因为HDEL命令已经可以同时删除多个键值对了,所以Redis没有实现HMDEL命令。
|
||||||
|
# <end id="hash-calls-1"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 3-8
|
||||||
|
'''
|
||||||
|
# <start id="hash-calls-2"/>
|
||||||
|
>>> conn.hmset('hash-key2', {'short':'hello', 'long':1000*'1'}) # 在考察散列的时候,我们可以只取出散列包含的键,而不必传输大的键值。
|
||||||
|
True #
|
||||||
|
>>> conn.hkeys('hash-key2') #
|
||||||
|
['long', 'short'] #
|
||||||
|
>>> conn.hexists('hash-key2', 'num') # 检查给定的键是否存在于散列中。
|
||||||
|
False #
|
||||||
|
>>> conn.hincrby('hash-key2', 'num') # 和字符串一样,
|
||||||
|
1L # 对散列中一个尚未存在的键执行自增操作时,
|
||||||
|
>>> conn.hexists('hash-key2', 'num') # Redis会将键的值当作0来处理。
|
||||||
|
True #
|
||||||
|
# <end id="hash-calls-2"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 3-9
|
||||||
|
'''
|
||||||
|
# <start id="zset-calls-1"/>
|
||||||
|
>>> conn.zadd('zset-key', 'a', 3, 'b', 2, 'c', 1) # 在Python客户端执行ZADD命令需要先输入成员、后输入分值,
|
||||||
|
3 # 这跟Redis标准的先输入分值、后输入成员的做法正好相反。
|
||||||
|
>>> conn.zcard('zset-key') # 取得有序集合的大小可以让我们在某些情况下知道是否需要对有序集合进行修剪。
|
||||||
|
3 #
|
||||||
|
>>> conn.zincrby('zset-key', 'c', 3) # 跟字符串和散列一样,
|
||||||
|
4.0 # 有序集合的成员也可以执行自增操作。
|
||||||
|
>>> conn.zscore('zset-key', 'b') # 获取单个成员的分值对于实现计数器或者排行榜之类的功能非常有用。
|
||||||
|
2.0 #
|
||||||
|
>>> conn.zrank('zset-key', 'c') # 获取指定成员的排名(排名以0为开始),
|
||||||
|
2 # 之后可以根据这个排名来决定ZRANGE的访问范围。
|
||||||
|
>>> conn.zcount('zset-key', 0, 3) # 对于某些任务来说,
|
||||||
|
2L # 统计给定分值范围内的元素数量非常有用。
|
||||||
|
>>> conn.zrem('zset-key', 'b') # 从有序集合里面移除成员和添加成员一样容易。
|
||||||
|
True #
|
||||||
|
>>> conn.zrange('zset-key', 0, -1, withscores=True) # 在进行调试时,我们通常会使用ZRANGE取出有序集合里包含的所有元素,
|
||||||
|
[('a', 3.0), ('c', 4.0)] # 但是在实际用例中,通常一次只会取出一小部分元素。
|
||||||
|
# <end id="zset-calls-1"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 3-10
|
||||||
|
'''
|
||||||
|
# <start id="zset-calls-2"/>
|
||||||
|
>>> conn.zadd('zset-1', 'a', 1, 'b', 2, 'c', 3) # 首先创建两个有序集合。
|
||||||
|
3 #
|
||||||
|
>>> conn.zadd('zset-2', 'b', 4, 'c', 1, 'd', 0) #
|
||||||
|
3 #
|
||||||
|
>>> conn.zinterstore('zset-i', ['zset-1', 'zset-2']) # 因为ZINTERSTORE和ZUNIONSTORE默认使用的聚合函数为sum,
|
||||||
|
2L # 所以多个有序集合里成员的分值将被加起来。
|
||||||
|
>>> conn.zrange('zset-i', 0, -1, withscores=True) #
|
||||||
|
[('c', 4.0), ('b', 6.0)] #
|
||||||
|
>>> conn.zunionstore('zset-u', ['zset-1', 'zset-2'], aggregate='min') # 用户可以在执行并集运算和交集运算的时候传入不同的聚合函数,
|
||||||
|
4L # 共有 sum、min、max 三个聚合函数可选。
|
||||||
|
>>> conn.zrange('zset-u', 0, -1, withscores=True) #
|
||||||
|
[('d', 0.0), ('a', 1.0), ('c', 1.0), ('b', 2.0)] #
|
||||||
|
>>> conn.sadd('set-1', 'a', 'd') # 用户还可以把集合作为输入传给ZINTERSTORE和ZUNIONSTORE,
|
||||||
|
2 # 命令会将集合看作是成员分值全为1的有序集合来处理。
|
||||||
|
>>> conn.zunionstore('zset-u2', ['zset-1', 'zset-2', 'set-1']) #
|
||||||
|
4L #
|
||||||
|
>>> conn.zrange('zset-u2', 0, -1, withscores=True) #
|
||||||
|
[('d', 1.0), ('a', 2.0), ('c', 4.0), ('b', 6.0)] #
|
||||||
|
# <end id="zset-calls-2"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
def publisher(n):
|
||||||
|
time.sleep(1)
|
||||||
|
for i in xrange(n):
|
||||||
|
conn.publish('channel', i)
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
def run_pubsub():
|
||||||
|
threading.Thread(target=publisher, args=(3,)).start()
|
||||||
|
pubsub = conn.pubsub()
|
||||||
|
pubsub.subscribe(['channel'])
|
||||||
|
count = 0
|
||||||
|
for item in pubsub.listen():
|
||||||
|
print item
|
||||||
|
count += 1
|
||||||
|
if count == 4:
|
||||||
|
pubsub.unsubscribe()
|
||||||
|
if count == 5:
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 3-11
|
||||||
|
'''
|
||||||
|
# <start id="pubsub-calls-1"/>
|
||||||
|
>>> def publisher(n):
|
||||||
|
... time.sleep(1) # 函数在刚开始执行时会先休眠,让订阅者有足够的时间来连接服务器并监听消息。
|
||||||
|
... for i in xrange(n):
|
||||||
|
... conn.publish('channel', i) # 在发布消息之后进行短暂的休眠,
|
||||||
|
... time.sleep(1) # 让消息可以一条接一条地出现。
|
||||||
|
...
|
||||||
|
>>> def run_pubsub():
|
||||||
|
... threading.Thread(target=publisher, args=(3,)).start()
|
||||||
|
... pubsub = conn.pubsub()
|
||||||
|
... pubsub.subscribe(['channel'])
|
||||||
|
... count = 0
|
||||||
|
... for item in pubsub.listen():
|
||||||
|
... print item
|
||||||
|
... count += 1
|
||||||
|
... if count == 4:
|
||||||
|
... pubsub.unsubscribe()
|
||||||
|
... if count == 5:
|
||||||
|
... break
|
||||||
|
...
|
||||||
|
|
||||||
|
>>> def run_pubsub():
|
||||||
|
... threading.Thread(target=publisher, args=(3,)).start() # 启动发送者线程发送三条消息。
|
||||||
|
... pubsub = conn.pubsub() # 创建发布与订阅对象,并让它订阅给定的频道。
|
||||||
|
... pubsub.subscribe(['channel']) #
|
||||||
|
... count = 0
|
||||||
|
... for item in pubsub.listen(): # 通过遍历pubsub.listen()函数的执行结果来监听订阅消息。
|
||||||
|
... print item # 打印接收到的每条消息。
|
||||||
|
... count += 1 # 在接收到一条订阅反馈消息和三条发布者发送的消息之后,
|
||||||
|
... if count == 4: # 执行退订操作,停止监听新消息。
|
||||||
|
... pubsub.unsubscribe() #
|
||||||
|
... if count == 5: # 当客户端接收到退订反馈消息时,
|
||||||
|
... break # 需要停止接收消息。
|
||||||
|
...
|
||||||
|
>>> run_pubsub() # 实际运行函数并观察它们的行为。
|
||||||
|
{'pattern': None, 'type': 'subscribe', 'channel': 'channel', 'data': 1L}# 在刚开始订阅一个频道的时候,客户端会接收到一条关于被订阅频道的反馈消息。
|
||||||
|
{'pattern': None, 'type': 'message', 'channel': 'channel', 'data': '0'} # 这些结构就是我们在遍历pubsub.listen()函数时得到的元素。
|
||||||
|
{'pattern': None, 'type': 'message', 'channel': 'channel', 'data': '1'} #
|
||||||
|
{'pattern': None, 'type': 'message', 'channel': 'channel', 'data': '2'} #
|
||||||
|
{'pattern': None, 'type': 'unsubscribe', 'channel': 'channel', 'data': # 在退订频道时,客户端会接收到一条反馈消息,
|
||||||
|
0L} # 告知被退订的是哪个频道,以及客户端目前仍在订阅的频道数量。
|
||||||
|
# <end id="pubsub-calls-1"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 3-12
|
||||||
|
'''
|
||||||
|
# <start id="sort-calls"/>
|
||||||
|
>>> conn.rpush('sort-input', 23, 15, 110, 7) # 首先将一些元素添加到列表里面。
|
||||||
|
4 #
|
||||||
|
>>> conn.sort('sort-input') # 根据数字大小对元素进行排序。
|
||||||
|
['7', '15', '23', '110'] #
|
||||||
|
>>> conn.sort('sort-input', alpha=True) # 根据字母表顺序对元素进行排序。
|
||||||
|
['110', '15', '23', '7'] #
|
||||||
|
>>> conn.hset('d-7', 'field', 5) # 添加一些用于执行排序操作和获取操作的附加数据。
|
||||||
|
1L #
|
||||||
|
>>> conn.hset('d-15', 'field', 1) #
|
||||||
|
1L #
|
||||||
|
>>> conn.hset('d-23', 'field', 9) #
|
||||||
|
1L #
|
||||||
|
>>> conn.hset('d-110', 'field', 3) #
|
||||||
|
1L #
|
||||||
|
>>> conn.sort('sort-input', by='d-*->field') # 将散列的域(field)用作权重,对sort-input列表进行排序。
|
||||||
|
['15', '110', '7', '23'] #
|
||||||
|
>>> conn.sort('sort-input', by='d-*->field', get='d-*->field') # 获取外部数据作为返回值,而不返回被排序的元素。
|
||||||
|
['1', '3', '5', '9'] #
|
||||||
|
# <end id="sort-calls"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 3-13
|
||||||
|
'''
|
||||||
|
# <start id="simple-pipeline-notrans"/>
|
||||||
|
>>> def notrans():
|
||||||
|
... print conn.incr('notrans:') # 对‘notrans:’计数器执行自增操作并打印操作的执行结果。
|
||||||
|
... time.sleep(.1) # 等待100毫秒。
|
||||||
|
... conn.incr('notrans:', -1) # 对‘notrans:’计数器执行自减操作。
|
||||||
|
...
|
||||||
|
>>> if 1:
|
||||||
|
... for i in xrange(3): # 启动三个线程来执行没有被事务包裹的自增、休眠和自减操作。
|
||||||
|
... threading.Thread(target=notrans).start() #
|
||||||
|
... time.sleep(.5) # 等待500毫秒,让操作有足够的时间完成。
|
||||||
|
...
|
||||||
|
1 # 因为没有使用事务,
|
||||||
|
2 # 所以三个线程执行的各个命令会互相交错,
|
||||||
|
3 # 使得计数器的值持续地增大。
|
||||||
|
# <end id="simple-pipeline-notrans"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 3-14
|
||||||
|
'''
|
||||||
|
# <start id="simple-pipeline-trans"/>
|
||||||
|
>>> def trans():
|
||||||
|
... pipeline = conn.pipeline() # 创建一个事务型(transactional)流水线对象。
|
||||||
|
... pipeline.incr('trans:') # 把针对‘trans:’计数器的自增操作放入队列。
|
||||||
|
... time.sleep(.1) # 等待100毫秒。
|
||||||
|
... pipeline.incr('trans:', -1) # 把针对‘trans:’计数器的自减操作放入队列。
|
||||||
|
... print pipeline.execute()[0] # 执行事务包含的命令并打印自增操作的执行结果。
|
||||||
|
...
|
||||||
|
>>> if 1:
|
||||||
|
... for i in xrange(3): # 启动三个线程来执行被事务包裹的自增、休眠和自减三个操作。
|
||||||
|
... threading.Thread(target=trans).start() #
|
||||||
|
... time.sleep(.5) # 等待500毫秒,让操作有足够的时间完成。
|
||||||
|
...
|
||||||
|
1 # 因为每组自增、休眠和自减操作都在事务里面执行,
|
||||||
|
1 # 所以命令之间不会互相交错,
|
||||||
|
1 # 因此所有事务的执行结果都是1。
|
||||||
|
# <end id="simple-pipeline-trans"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# <start id="exercise-fix-article-vote"/>
|
||||||
|
def article_vote(conn, user, article):
|
||||||
|
# 在进行投票之前,先检查这篇文章是否仍然处于可投票的时间之内
|
||||||
|
cutoff = time.time() - ONE_WEEK_IN_SECONDS
|
||||||
|
posted = conn.zscore('time:', article)
|
||||||
|
if posted < cutoff:
|
||||||
|
return
|
||||||
|
|
||||||
|
article_id = article.partition(':')[-1]
|
||||||
|
pipeline = conn.pipeline()
|
||||||
|
pipeline.sadd('voted:' + article_id, user)
|
||||||
|
# 为文章的投票设置过期时间
|
||||||
|
pipeline.expire('voted:' + article_id, int(posted-cutoff))
|
||||||
|
if pipeline.execute()[0]:
|
||||||
|
# 因为客户端可能会在执行 SADD/EXPIRE 之间或者执行 ZINCRBY/HINCRBY 之间掉线
|
||||||
|
# 所以投票可能会不被计数,但这总比在执行 ZINCRBY/HINCRBY 之间失败并导致不完整的计数要好
|
||||||
|
pipeline.zincrby('score:', article, VOTE_SCORE)
|
||||||
|
pipeline.hincrby(article, 'votes', 1)
|
||||||
|
pipeline.execute()
|
||||||
|
# <end id="exercise-fix-article-vote"/>
|
||||||
|
|
||||||
|
# 从技术上来将,上面的 article_vote() 函数仍然有一些问题,
|
||||||
|
# 这些问题可以通过下面展示的这段代码来解决,
|
||||||
|
# 这段代码里面用到了本书第 4 章才会介绍的技术
|
||||||
|
|
||||||
|
def article_vote(conn, user, article):
|
||||||
|
cutoff = time.time() - ONE_WEEK_IN_SECONDS
|
||||||
|
posted = conn.zscore('time:', article)
|
||||||
|
article_id = article.partition(':')[-1]
|
||||||
|
voted = 'voted:' + article_id
|
||||||
|
|
||||||
|
pipeline = conn.pipeline()
|
||||||
|
while posted > cutoff:
|
||||||
|
try:
|
||||||
|
pipeline.watch(voted)
|
||||||
|
if not pipeline.sismember(voted, user):
|
||||||
|
pipeline.multi()
|
||||||
|
pipeline.sadd(voted, user)
|
||||||
|
pipeline.expire(voted, int(posted-cutoff))
|
||||||
|
pipeline.zincrby('score:', article, VOTE_SCORE)
|
||||||
|
pipeline.hincrby(article, 'votes', 1)
|
||||||
|
pipeline.execute()
|
||||||
|
else:
|
||||||
|
pipeline.unwatch()
|
||||||
|
return
|
||||||
|
except redis.exceptions.WatchError:
|
||||||
|
cutoff = time.time() - ONE_WEEK_IN_SECONDS
|
||||||
|
|
||||||
|
# <start id="exercise-fix-get_articles"/>
|
||||||
|
def get_articles(conn, page, order='score:'):
|
||||||
|
start = max(page-1, 0) * ARTICLES_PER_PAGE
|
||||||
|
end = start + ARTICLES_PER_PAGE - 1
|
||||||
|
|
||||||
|
ids = conn.zrevrangebyscore(order, start, end)
|
||||||
|
|
||||||
|
pipeline = conn.pipeline()
|
||||||
|
# 将等待执行的多个 HGETALL 调用放入流水线
|
||||||
|
map(pipeline.hgetall, ids) #A
|
||||||
|
|
||||||
|
articles = []
|
||||||
|
# 执行被流水线包含的多个 HGETALL 命令,
|
||||||
|
# 并将执行所得的多个 id 添加到 articles 变量里面
|
||||||
|
for id, article_data in zip(ids, pipeline.execute()): #B
|
||||||
|
article_data['id'] = id
|
||||||
|
articles.append(article_data)
|
||||||
|
|
||||||
|
return articles
|
||||||
|
# <end id="exercise-fix-get_articles"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 3-15
|
||||||
|
'''
|
||||||
|
# <start id="other-calls-1"/>
|
||||||
|
>>> conn.set('key', 'value') # 设置一个简单的字符串值,作为过期时间的设置对象。
|
||||||
|
True #
|
||||||
|
>>> conn.get('key') #
|
||||||
|
'value' #
|
||||||
|
>>> conn.expire('key', 2) # 如果我们为键设置了过期时间,那么当键过期后,
|
||||||
|
True # 我们再尝试去获取键时,会发现键已经被删除了。
|
||||||
|
>>> time.sleep(2) #
|
||||||
|
>>> conn.get('key') #
|
||||||
|
>>> conn.set('key', 'value2')
|
||||||
|
True
|
||||||
|
>>> conn.expire('key', 100); conn.ttl('key') # 还可以很容易地查到键距离过期时间还有多久。
|
||||||
|
True #
|
||||||
|
100 #
|
||||||
|
# <end id="other-calls-1"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
# <start id="exercise-no-recent-zset"/>
|
||||||
|
THIRTY_DAYS = 30*86400
|
||||||
|
def check_token(conn, token):
|
||||||
|
# 为了能够对登录令牌进行过期,我们将把它存储为字符串值
|
||||||
|
return conn.get('login:' + token)
|
||||||
|
|
||||||
|
def update_token(conn, token, user, item=None):
|
||||||
|
# 在一次命令调用里面,同时为字符串键设置值和过期时间
|
||||||
|
conn.setex('login:' + token, user, THIRTY_DAYS)
|
||||||
|
key = 'viewed:' + token
|
||||||
|
if item:
|
||||||
|
conn.lrem(key, item)
|
||||||
|
conn.rpush(key, item)
|
||||||
|
conn.ltrim(key, -25, -1)
|
||||||
|
# 跟字符串不一样,Redis 并没有提供能够在操作列表的同时,
|
||||||
|
# 为列表设置过期时间的命令,
|
||||||
|
# 所以我们需要在这里调用 EXPIRE 命令来为列表设置过期时间
|
||||||
|
conn.expire(key, THIRTY_DAYS)
|
||||||
|
conn.zincrby('viewed:', item, -1)
|
||||||
|
|
||||||
|
def add_to_cart(conn, session, item, count):
|
||||||
|
key = 'cart:' + session
|
||||||
|
if count <= 0:
|
||||||
|
conn.hrem(key, item)
|
||||||
|
else:
|
||||||
|
conn.hset(key, item, count)
|
||||||
|
# 散列也和列表一样,需要通过调用 EXPIRE 命令来设置过期时间
|
||||||
|
conn.expire(key, THIRTY_DAYS)
|
||||||
|
# <end id="exercise-no-recent-zset"/>
|
|
@ -0,0 +1,377 @@
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import unittest
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import redis
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 4-1
|
||||||
|
'''
|
||||||
|
# <start id="persistence-options"/>
|
||||||
|
save 60 1000 # 快照持久化选项。
|
||||||
|
stop-writes-on-bgsave-error no #
|
||||||
|
rdbcompression yes #
|
||||||
|
dbfilename dump.rdb #
|
||||||
|
|
||||||
|
appendonly no # 只追加文件持久化选项。
|
||||||
|
appendfsync everysec #
|
||||||
|
no-appendfsync-on-rewrite no #
|
||||||
|
auto-aof-rewrite-percentage 100 #
|
||||||
|
auto-aof-rewrite-min-size 64mb #
|
||||||
|
|
||||||
|
dir ./ # 共享选项,这个选项决定了快照文件和只追加文件的保存位置。
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 4-2
|
||||||
|
# <start id="process-logs-progress"/>
|
||||||
|
# 日志处理函数接受的其中一个参数为回调函数,
|
||||||
|
# 这个回调函数接受一个Redis连接和一个日志行作为参数,
|
||||||
|
# 并通过调用流水线对象的方法来执行Redis命令。
|
||||||
|
def process_logs(conn, path, callback):
|
||||||
|
# 获取文件当前的处理进度。
|
||||||
|
current_file, offset = conn.mget(
|
||||||
|
'progress:file', 'progress:position')
|
||||||
|
|
||||||
|
pipe = conn.pipeline()
|
||||||
|
|
||||||
|
# 通过使用闭包(closure)来减少重复代码
|
||||||
|
def update_progress():
|
||||||
|
# 更新正在处理的日志文件的名字和偏移量。
|
||||||
|
pipe.mset({
|
||||||
|
'progress:file': fname,
|
||||||
|
'progress:position': offset
|
||||||
|
})
|
||||||
|
# 这个语句负责执行实际的日志更新操作,
|
||||||
|
# 并将日志文件的名字和目前的处理进度记录到Redis里面。
|
||||||
|
pipe.execute()
|
||||||
|
|
||||||
|
# 有序地遍历各个日志文件。
|
||||||
|
for fname in sorted(os.listdir(path)):
|
||||||
|
# 略过所有已处理的日志文件。
|
||||||
|
if fname < current_file:
|
||||||
|
continue
|
||||||
|
|
||||||
|
inp = open(os.path.join(path, fname), 'rb')
|
||||||
|
# 在接着处理一个因为系统崩溃而未能完成处理的日志文件时,略过已处理的内容。
|
||||||
|
if fname == current_file:
|
||||||
|
inp.seek(int(offset, 10))
|
||||||
|
else:
|
||||||
|
offset = 0
|
||||||
|
|
||||||
|
current_file = None
|
||||||
|
|
||||||
|
# 枚举函数遍历一个由文件行组成的序列,
|
||||||
|
# 并返回任意多个二元组,
|
||||||
|
# 每个二元组包含了行号lno和行数据line,
|
||||||
|
# 其中行号从0开始。
|
||||||
|
for lno, line in enumerate(inp):
|
||||||
|
# 处理日志行。
|
||||||
|
callback(pipe, line)
|
||||||
|
# 更新已处理内容的偏移量。
|
||||||
|
offset += int(offset) + len(line)
|
||||||
|
|
||||||
|
# 每当处理完1000个日志行或者处理完整个日志文件的时候,
|
||||||
|
# 都更新一次文件的处理进度。
|
||||||
|
if not (lno+1) % 1000:
|
||||||
|
update_progress()
|
||||||
|
|
||||||
|
update_progress()
|
||||||
|
|
||||||
|
inp.close()
|
||||||
|
# <end id="process-logs-progress"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 4-3
|
||||||
|
# <start id="wait-for-sync"/>
|
||||||
|
def wait_for_sync(mconn, sconn):
|
||||||
|
identifier = str(uuid.uuid4())
|
||||||
|
# 将令牌添加至主服务器。
|
||||||
|
mconn.zadd('sync:wait', identifier, time.time())
|
||||||
|
|
||||||
|
# 如果有必要的话,等待从服务器完成同步。
|
||||||
|
while sconn.info()['master_link_status'] != 'up':
|
||||||
|
time.sleep(.001)
|
||||||
|
|
||||||
|
# 等待从服务器接收数据更新。
|
||||||
|
while not sconn.zscore('sync:wait', identifier):
|
||||||
|
time.sleep(.001)
|
||||||
|
|
||||||
|
# 最多只等待一秒钟。
|
||||||
|
deadline = time.time() + 1.01
|
||||||
|
while time.time() < deadline:
|
||||||
|
# 检查数据更新是否已经被同步到了磁盘。
|
||||||
|
if sconn.info()['aof_pending_bio_fsync'] == 0:
|
||||||
|
break
|
||||||
|
time.sleep(.001)
|
||||||
|
|
||||||
|
# 清理刚刚创建的新令牌以及之前可能留下的旧令牌。
|
||||||
|
mconn.zrem('sync:wait', identifier)
|
||||||
|
mconn.zremrangebyscore('sync:wait', 0, time.time()-900)
|
||||||
|
# <end id="wait-for-sync"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 4-4
|
||||||
|
'''
|
||||||
|
# <start id="master-failover"/>
|
||||||
|
user@vpn-master ~:$ ssh root@machine-b.vpn # 通过VPN网络连接机器B。
|
||||||
|
Last login: Wed Mar 28 15:21:06 2012 from ... #
|
||||||
|
root@machine-b ~:$ redis-cli # 启动命令行Redis客户端来执行几个简单的操作。
|
||||||
|
redis 127.0.0.1:6379> SAVE # 执行SAVE命令,
|
||||||
|
OK # 并在命令完成之后,
|
||||||
|
redis 127.0.0.1:6379> QUIT # 使用QUIT命令退出客户端。
|
||||||
|
root@machine-b ~:$ scp \\ # 将快照文件发送至新的主服务器——机器C。
|
||||||
|
> /var/local/redis/dump.rdb machine-c.vpn:/var/local/redis/ #
|
||||||
|
dump.rdb 100% 525MB 8.1MB/s 01:05 #
|
||||||
|
root@machine-b ~:$ ssh machine-c.vpn # 连接新的主服务器并启动Redis。
|
||||||
|
Last login: Tue Mar 27 12:42:31 2012 from ... #
|
||||||
|
root@machine-c ~:$ sudo /etc/init.d/redis-server start #
|
||||||
|
Starting Redis server... #
|
||||||
|
root@machine-c ~:$ exit
|
||||||
|
root@machine-b ~:$ redis-cli # 告知机器B的Redis,让它将机器C用作新的主服务器。
|
||||||
|
redis 127.0.0.1:6379> SLAVEOF machine-c.vpn 6379 #
|
||||||
|
OK #
|
||||||
|
redis 127.0.0.1:6379> QUIT
|
||||||
|
root@machine-b ~:$ exit
|
||||||
|
user@vpn-master ~:$
|
||||||
|
# <end id="master-failover"/>
|
||||||
|
#A Connect to machine B on our vpn network
|
||||||
|
#B Start up the command line redis client to do a few simple operations
|
||||||
|
#C Start a SAVE, and when it is done, QUIT so that we can continue
|
||||||
|
#D Copy the snapshot over to the new master, machine C
|
||||||
|
#E Connect to the new master and start Redis
|
||||||
|
#F Tell machine B's Redis that it should use C as the new master
|
||||||
|
#END
|
||||||
|
'''
|
||||||
|
|
||||||
|
# 代码清单 4-5
|
||||||
|
# <start id="_1313_14472_8342"/>
|
||||||
|
def list_item(conn, itemid, sellerid, price):
|
||||||
|
inventory = "inventory:%s"%sellerid
|
||||||
|
item = "%s.%s"%(itemid, sellerid)
|
||||||
|
end = time.time() + 5
|
||||||
|
pipe = conn.pipeline()
|
||||||
|
|
||||||
|
while time.time() < end:
|
||||||
|
try:
|
||||||
|
# 监视用户包裹发生的变化。
|
||||||
|
pipe.watch(inventory)
|
||||||
|
# 验证用户是否仍然持有指定的物品。
|
||||||
|
if not pipe.sismember(inventory, itemid):
|
||||||
|
# 如果指定的物品不在用户的包裹里面,
|
||||||
|
# 那么停止对包裹键的监视并返回一个空值。
|
||||||
|
pipe.unwatch()
|
||||||
|
return None
|
||||||
|
|
||||||
|
# 将指定的物品添加到物品买卖市场里面。
|
||||||
|
pipe.multi()
|
||||||
|
pipe.zadd("market:", item, price)
|
||||||
|
pipe.srem(inventory, itemid)
|
||||||
|
# 如果执行execute方法没有引发WatchError异常,
|
||||||
|
# 那么说明事务执行成功,
|
||||||
|
# 并且对包裹键的监视也已经结束。
|
||||||
|
pipe.execute()
|
||||||
|
return True
|
||||||
|
# 用户的包裹已经发生了变化;重试。
|
||||||
|
except redis.exceptions.WatchError:
|
||||||
|
pass
|
||||||
|
return False
|
||||||
|
# <end id="_1313_14472_8342"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 4-6
|
||||||
|
# <start id="_1313_14472_8353"/>
|
||||||
|
def purchase_item(conn, buyerid, itemid, sellerid, lprice):
|
||||||
|
buyer = "users:%s"%buyerid
|
||||||
|
seller = "users:%s"%sellerid
|
||||||
|
item = "%s.%s"%(itemid, sellerid)
|
||||||
|
inventory = "inventory:%s"%buyerid
|
||||||
|
end = time.time() + 10
|
||||||
|
pipe = conn.pipeline()
|
||||||
|
|
||||||
|
while time.time() < end:
|
||||||
|
try:
|
||||||
|
# 对物品买卖市场以及买家账号信息的变化进行监视。
|
||||||
|
pipe.watch("market:", buyer)
|
||||||
|
|
||||||
|
# 检查指定物品的价格是否出现了变化,
|
||||||
|
# 以及买家是否有足够的钱来购买指定的物品。
|
||||||
|
price = pipe.zscore("market:", item)
|
||||||
|
funds = int(pipe.hget(buyer, "funds"))
|
||||||
|
if price != lprice or price > funds:
|
||||||
|
pipe.unwatch()
|
||||||
|
return None
|
||||||
|
|
||||||
|
# 将买家支付的货款转移给卖家,并将卖家出售的物品移交给买家。
|
||||||
|
pipe.multi()
|
||||||
|
pipe.hincrby(seller, "funds", int(price))
|
||||||
|
pipe.hincrby(buyer, "funds", int(-price))
|
||||||
|
pipe.sadd(inventory, itemid)
|
||||||
|
pipe.zrem("market:", item)
|
||||||
|
pipe.execute()
|
||||||
|
return True
|
||||||
|
# 如果买家的账号或者物品买卖市场出现了变化,那么进行重试。
|
||||||
|
except redis.exceptions.WatchError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return False
|
||||||
|
# <end id="_1313_14472_8353"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 4-7
|
||||||
|
# <start id="update-token"/>
|
||||||
|
def update_token(conn, token, user, item=None):
|
||||||
|
# 获取时间戳。
|
||||||
|
timestamp = time.time()
|
||||||
|
# 创建令牌与已登录用户之间的映射。
|
||||||
|
conn.hset('login:', token, user)
|
||||||
|
# 记录令牌最后一次出现的时间。
|
||||||
|
conn.zadd('recent:', token, timestamp)
|
||||||
|
if item:
|
||||||
|
# 把用户浏览过的商品记录起来。
|
||||||
|
conn.zadd('viewed:' + token, item, timestamp)
|
||||||
|
# 移除旧商品,只记录最新浏览的25件商品。
|
||||||
|
conn.zremrangebyrank('viewed:' + token, 0, -26)
|
||||||
|
# 更新给定商品的被浏览次数。
|
||||||
|
conn.zincrby('viewed:', item, -1)
|
||||||
|
# <end id="update-token"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 4-8
|
||||||
|
# <start id="update-token-pipeline"/>
|
||||||
|
def update_token_pipeline(conn, token, user, item=None):
|
||||||
|
timestamp = time.time()
|
||||||
|
# 设置流水线。
|
||||||
|
pipe = conn.pipeline(False) #A
|
||||||
|
pipe.hset('login:', token, user)
|
||||||
|
pipe.zadd('recent:', token, timestamp)
|
||||||
|
if item:
|
||||||
|
pipe.zadd('viewed:' + token, item, timestamp)
|
||||||
|
pipe.zremrangebyrank('viewed:' + token, 0, -26)
|
||||||
|
pipe.zincrby('viewed:', item, -1)
|
||||||
|
# 执行那些被流水线包裹的命令。
|
||||||
|
pipe.execute() #B
|
||||||
|
# <end id="update-token-pipeline"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 4-9
|
||||||
|
# <start id="simple-pipeline-benchmark-code"/>
|
||||||
|
def benchmark_update_token(conn, duration):
|
||||||
|
# 测试会分别执行update_token()函数和update_token_pipeline()函数。
|
||||||
|
for function in (update_token, update_token_pipeline):
|
||||||
|
# 设置计数器以及测试结束的条件。
|
||||||
|
count = 0 #B
|
||||||
|
start = time.time() #B
|
||||||
|
end = start + duration #B
|
||||||
|
while time.time() < end:
|
||||||
|
count += 1
|
||||||
|
# 调用两个函数的其中一个。
|
||||||
|
function(conn, 'token', 'user', 'item') #C
|
||||||
|
# 计算函数的执行时长。
|
||||||
|
delta = time.time() - start #D
|
||||||
|
# 打印测试结果。
|
||||||
|
print function.__name__, count, delta, count / delta #E
|
||||||
|
# <end id="simple-pipeline-benchmark-code"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 4-10
|
||||||
|
'''
|
||||||
|
# <start id="redis-benchmark"/>
|
||||||
|
$ redis-benchmark -c 1 -q # 给定“-q”选项可以让程序简化输出结果,
|
||||||
|
PING (inline): 34246.57 requests per second # 给定“-c 1”选项让程序只使用一个客户端来进行测试。
|
||||||
|
PING: 34843.21 requests per second
|
||||||
|
MSET (10 keys): 24213.08 requests per second
|
||||||
|
SET: 32467.53 requests per second
|
||||||
|
GET: 33112.59 requests per second
|
||||||
|
INCR: 32679.74 requests per second
|
||||||
|
LPUSH: 33333.33 requests per second
|
||||||
|
LPOP: 33670.04 requests per second
|
||||||
|
SADD: 33222.59 requests per second
|
||||||
|
SPOP: 34482.76 requests per second
|
||||||
|
LPUSH (again, in order to bench LRANGE): 33222.59 requests per second
|
||||||
|
LRANGE (first 100 elements): 22988.51 requests per second
|
||||||
|
LRANGE (first 300 elements): 13888.89 requests per second
|
||||||
|
LRANGE (first 450 elements): 11061.95 requests per second
|
||||||
|
LRANGE (first 600 elements): 9041.59 requests per second
|
||||||
|
# <end id="redis-benchmark"/>
|
||||||
|
#A We run with the '-q' option to get simple output, and '-c 1' to use a single client
|
||||||
|
#END
|
||||||
|
'''
|
||||||
|
|
||||||
|
#--------------- 以下是用于测试代码的辅助函数 --------------------------------
|
||||||
|
|
||||||
|
class TestCh04(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
import redis
|
||||||
|
self.conn = redis.Redis(db=15)
|
||||||
|
self.conn.flushdb()
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
self.conn.flushdb()
|
||||||
|
del self.conn
|
||||||
|
print
|
||||||
|
print
|
||||||
|
|
||||||
|
# We can't test process_logs, as that would require writing to disk, which
|
||||||
|
# we don't want to do.
|
||||||
|
|
||||||
|
# We also can't test wait_for_sync, as we can't guarantee that there are
|
||||||
|
# multiple Redis servers running with the proper configuration
|
||||||
|
|
||||||
|
def test_list_item(self):
|
||||||
|
import pprint
|
||||||
|
conn = self.conn
|
||||||
|
|
||||||
|
print "We need to set up just enough state so that a user can list an item"
|
||||||
|
seller = 'userX'
|
||||||
|
item = 'itemX'
|
||||||
|
conn.sadd('inventory:' + seller, item)
|
||||||
|
i = conn.smembers('inventory:' + seller)
|
||||||
|
print "The user's inventory has:", i
|
||||||
|
self.assertTrue(i)
|
||||||
|
print
|
||||||
|
|
||||||
|
print "Listing the item..."
|
||||||
|
l = list_item(conn, item, seller, 10)
|
||||||
|
print "Listing the item succeeded?", l
|
||||||
|
self.assertTrue(l)
|
||||||
|
r = conn.zrange('market:', 0, -1, withscores=True)
|
||||||
|
print "The market contains:"
|
||||||
|
pprint.pprint(r)
|
||||||
|
self.assertTrue(r)
|
||||||
|
self.assertTrue(any(x[0] == 'itemX.userX' for x in r))
|
||||||
|
|
||||||
|
def test_purchase_item(self):
|
||||||
|
self.test_list_item()
|
||||||
|
conn = self.conn
|
||||||
|
|
||||||
|
print "We need to set up just enough state so a user can buy an item"
|
||||||
|
buyer = 'userY'
|
||||||
|
conn.hset('users:userY', 'funds', 125)
|
||||||
|
r = conn.hgetall('users:userY')
|
||||||
|
print "The user has some money:", r
|
||||||
|
self.assertTrue(r)
|
||||||
|
self.assertTrue(r.get('funds'))
|
||||||
|
print
|
||||||
|
|
||||||
|
print "Let's purchase an item"
|
||||||
|
p = purchase_item(conn, 'userY', 'itemX', 'userX', 10)
|
||||||
|
print "Purchasing an item succeeded?", p
|
||||||
|
self.assertTrue(p)
|
||||||
|
r = conn.hgetall('users:userY')
|
||||||
|
print "Their money is now:", r
|
||||||
|
self.assertTrue(r)
|
||||||
|
i = conn.smembers('inventory:' + buyer)
|
||||||
|
print "Their inventory is now:", i
|
||||||
|
self.assertTrue(i)
|
||||||
|
self.assertTrue('itemX' in i)
|
||||||
|
self.assertEquals(conn.zscore('market:', 'itemX.userX'), None)
|
||||||
|
|
||||||
|
def test_benchmark_update_token(self):
|
||||||
|
benchmark_update_token(self.conn, 5)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
|
@ -0,0 +1,716 @@
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
import bisect
|
||||||
|
import contextlib
|
||||||
|
import csv
|
||||||
|
from datetime import datetime
|
||||||
|
import functools
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import random
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import unittest
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import redis
|
||||||
|
|
||||||
|
QUIT = False
|
||||||
|
SAMPLE_COUNT = 100
|
||||||
|
|
||||||
|
config_connection = None
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 5-1
|
||||||
|
# <start id="recent_log"/>
|
||||||
|
# 设置一个字典,它可以帮助我们将大部分日志的安全级别转换成某种一致的东西。
|
||||||
|
SEVERITY = {
|
||||||
|
logging.DEBUG: 'debug',
|
||||||
|
logging.INFO: 'info',
|
||||||
|
logging.WARNING: 'warning',
|
||||||
|
logging.ERROR: 'error',
|
||||||
|
logging.CRITICAL: 'critical',
|
||||||
|
}
|
||||||
|
SEVERITY.update((name, name) for name in SEVERITY.values())
|
||||||
|
|
||||||
|
def log_recent(conn, name, message, severity=logging.INFO, pipe=None):
|
||||||
|
# 尝试将日志的级别转换成简单的字符串。
|
||||||
|
severity = str(SEVERITY.get(severity, severity)).lower()
|
||||||
|
# 创建负责存储消息的键。
|
||||||
|
destination = 'recent:%s:%s'%(name, severity)
|
||||||
|
# 将当前时间添加到消息里面,用于记录消息的发送时间。
|
||||||
|
message = time.asctime() + ' ' + message
|
||||||
|
# 使用流水线来将通信往返次数降低为一次。
|
||||||
|
pipe = pipe or conn.pipeline()
|
||||||
|
# 将消息添加到日志列表的最前面。
|
||||||
|
pipe.lpush(destination, message)
|
||||||
|
# 对日志列表进行修剪,让它只包含最新的100条消息。
|
||||||
|
pipe.ltrim(destination, 0, 99)
|
||||||
|
# 执行两个命令。
|
||||||
|
pipe.execute()
|
||||||
|
# <end id="recent_log"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 5-2
|
||||||
|
# <start id="common_log"/>
|
||||||
|
def log_common(conn, name, message, severity=logging.INFO, timeout=5):
|
||||||
|
# 设置日志的级别。
|
||||||
|
severity = str(SEVERITY.get(severity, severity)).lower()
|
||||||
|
# 负责存储最新日志的键。
|
||||||
|
destination = 'common:%s:%s'%(name, severity)
|
||||||
|
# 因为程序每小时需要轮换一次日志,所以它使用一个键来记录当前所处的小时数。
|
||||||
|
start_key = destination + ':start'
|
||||||
|
pipe = conn.pipeline()
|
||||||
|
end = time.time() + timeout
|
||||||
|
while time.time() < end:
|
||||||
|
try:
|
||||||
|
# 对记录当前小时数的键进行监视,确保轮换操作可以正确地执行。
|
||||||
|
pipe.watch(start_key)
|
||||||
|
# 取得当前时间。
|
||||||
|
now = datetime.utcnow().timetuple()
|
||||||
|
# 取得当前所处的小时数。
|
||||||
|
hour_start = datetime(*now[:4]).isoformat()
|
||||||
|
|
||||||
|
existing = pipe.get(start_key)
|
||||||
|
# 创建一个事务。
|
||||||
|
pipe.multi()
|
||||||
|
# 如果目前的常见日志列表是上一个小时的……
|
||||||
|
if existing and existing < hour_start:
|
||||||
|
# ……那么将旧的常见日志信息进行归档。
|
||||||
|
pipe.rename(destination, destination + ':last')
|
||||||
|
pipe.rename(start_key, destination + ':pstart')
|
||||||
|
# 更新当前所处的小时数。
|
||||||
|
pipe.set(start_key, hour_start)
|
||||||
|
|
||||||
|
# 对记录日志出现次数的计数器执行自增操作。
|
||||||
|
pipe.zincrby(destination, message)
|
||||||
|
# log_recent()函数负责记录日志并调用execute()函数。
|
||||||
|
log_recent(pipe, name, message, severity, pipe)
|
||||||
|
return
|
||||||
|
except redis.exceptions.WatchError:
|
||||||
|
# 如果程序因为其他客户端在执行归档操作而出现监视错误,那么重试。
|
||||||
|
continue
|
||||||
|
# <end id="common_log"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 5-3
|
||||||
|
# <start id="update_counter"/>
|
||||||
|
# 以秒为单位的计数器精度,分别为1秒钟、5秒钟、1分钟、5分钟、1小时、5小时、1天——用户可以按需调整这些精度。
|
||||||
|
PRECISION = [1, 5, 60, 300, 3600, 18000, 86400] #A
|
||||||
|
|
||||||
|
def update_counter(conn, name, count=1, now=None):
|
||||||
|
# 通过取得当前时间来判断应该对哪个时间片执行自增操作。
|
||||||
|
now = now or time.time()
|
||||||
|
# 为了保证之后的清理工作可以正确地执行,这里需要创建一个事务型流水线。
|
||||||
|
pipe = conn.pipeline()
|
||||||
|
# 为我们记录的每种精度都创建一个计数器。
|
||||||
|
for prec in PRECISION:
|
||||||
|
# 取得当前时间片的开始时间。
|
||||||
|
pnow = int(now / prec) * prec
|
||||||
|
# 创建负责存储计数信息的散列。
|
||||||
|
hash = '%s:%s'%(prec, name)
|
||||||
|
# 将计数器的引用信息添加到有序集合里面,
|
||||||
|
# 并将其分值设置为0,以便在之后执行清理操作。
|
||||||
|
pipe.zadd('known:', hash, 0)
|
||||||
|
# 对给定名字和精度的计数器进行更新。
|
||||||
|
pipe.hincrby('count:' + hash, pnow, count)
|
||||||
|
pipe.execute()
|
||||||
|
# <end id="update_counter"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 5-4
|
||||||
|
# <start id="get_counter"/>
|
||||||
|
def get_counter(conn, name, precision):
|
||||||
|
# 取得存储着计数器数据的键的名字。
|
||||||
|
hash = '%s:%s'%(precision, name)
|
||||||
|
# 从Redis里面取出计数器数据。
|
||||||
|
data = conn.hgetall('count:' + hash)
|
||||||
|
# 将计数器数据转换成指定的格式。
|
||||||
|
to_return = []
|
||||||
|
for key, value in data.iteritems():
|
||||||
|
to_return.append((int(key), int(value)))
|
||||||
|
# 对数据进行排序,把旧的数据样本排在前面。
|
||||||
|
to_return.sort()
|
||||||
|
return to_return
|
||||||
|
# <end id="get_counter"/>
|
||||||
|
|
||||||
|
# <start id="clean_counters"/>
|
||||||
|
def clean_counters(conn):
|
||||||
|
pipe = conn.pipeline(True)
|
||||||
|
# 为了平等地处理更新频率各不相同的多个计数器,程序需要记录清理操作执行的次数。
|
||||||
|
passes = 0
|
||||||
|
# 持续地对计数器进行清理,直到退出为止。
|
||||||
|
while not QUIT:
|
||||||
|
# 记录清理操作开始执行的时间,用于计算清理操作执行的时长。
|
||||||
|
start = time.time()
|
||||||
|
# 渐进地遍历所有已知的计数器。
|
||||||
|
index = 0
|
||||||
|
while index < conn.zcard('known:'):
|
||||||
|
# 取得被检查计数器的数据。
|
||||||
|
hash = conn.zrange('known:', index, index)
|
||||||
|
index += 1
|
||||||
|
if not hash:
|
||||||
|
break
|
||||||
|
hash = hash[0]
|
||||||
|
# 取得计数器的精度。
|
||||||
|
prec = int(hash.partition(':')[0])
|
||||||
|
# 因为清理程序每60秒钟就会循环一次,
|
||||||
|
# 所以这里需要根据计数器的更新频率来判断是否真的有必要对计数器进行清理。
|
||||||
|
bprec = int(prec // 60) or 1
|
||||||
|
# 如果这个计数器在这次循环里不需要进行清理,
|
||||||
|
# 那么检查下一个计数器。
|
||||||
|
# (举个例子,如果清理程序只循环了三次,而计数器的更新频率为每5分钟一次,
|
||||||
|
# 那么程序暂时还不需要对这个计数器进行清理。)
|
||||||
|
if passes % bprec:
|
||||||
|
continue
|
||||||
|
|
||||||
|
hkey = 'count:' + hash
|
||||||
|
# 根据给定的精度以及需要保留的样本数量,
|
||||||
|
# 计算出我们需要保留什么时间之前的样本。
|
||||||
|
cutoff = time.time() - SAMPLE_COUNT * prec
|
||||||
|
# 获取样本的开始时间,并将其从字符串转换为整数。
|
||||||
|
samples = map(int, conn.hkeys(hkey))
|
||||||
|
# 计算出需要移除的样本数量。
|
||||||
|
samples.sort()
|
||||||
|
remove = bisect.bisect_right(samples, cutoff)
|
||||||
|
|
||||||
|
# 按需移除计数样本。
|
||||||
|
if remove:
|
||||||
|
conn.hdel(hkey, *samples[:remove])
|
||||||
|
# 这个散列可能已经被清空。
|
||||||
|
if remove == len(samples):
|
||||||
|
try:
|
||||||
|
# 在尝试修改计数器散列之前,对其进行监视。
|
||||||
|
pipe.watch(hkey)
|
||||||
|
# 验证计数器散列是否为空,如果是的话,
|
||||||
|
# 那么从记录已知计数器的有序集合里面移除它。
|
||||||
|
if not pipe.hlen(hkey):
|
||||||
|
pipe.multi()
|
||||||
|
pipe.zrem('known:', hash)
|
||||||
|
pipe.execute()
|
||||||
|
# 在删除了一个计数器的情况下,
|
||||||
|
# 下次循环可以使用与本次循环相同的索引。
|
||||||
|
index -= 1
|
||||||
|
else:
|
||||||
|
# 计数器散列并不为空,
|
||||||
|
# 继续让它留在记录已有计数器的有序集合里面。
|
||||||
|
pipe.unwatch()
|
||||||
|
# 有其他程序向这个计算器散列添加了新的数据,
|
||||||
|
# 它已经不再是空的了,继续让它留在记录已知计数器的有序集合里面。
|
||||||
|
except redis.exceptions.WatchError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# 为了让清理操作的执行频率与计数器更新的频率保持一致,
|
||||||
|
# 对记录循环次数的变量以及记录执行时长的变量进行更新。
|
||||||
|
passes += 1
|
||||||
|
duration = min(int(time.time() - start) + 1, 60)
|
||||||
|
# 如果这次循环未耗尽60秒钟,那么在余下的时间内进行休眠;
|
||||||
|
# 如果60秒钟已经耗尽,那么休眠一秒钟以便稍作休息。
|
||||||
|
time.sleep(max(60 - duration, 1))
|
||||||
|
# <end id="clean_counters"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 5-6
|
||||||
|
# <start id="update_stats"/>
|
||||||
|
def update_stats(conn, context, type, value, timeout=5):
|
||||||
|
# 设置用于存储统计数据的键。
|
||||||
|
destination = 'stats:%s:%s'%(context, type)
|
||||||
|
# 像common_log()函数一样,
|
||||||
|
# 处理当前这一个小时的数据和上一个小时的数据。
|
||||||
|
start_key = destination + ':start'
|
||||||
|
pipe = conn.pipeline(True)
|
||||||
|
end = time.time() + timeout
|
||||||
|
while time.time() < end:
|
||||||
|
try:
|
||||||
|
pipe.watch(start_key)
|
||||||
|
now = datetime.utcnow().timetuple()
|
||||||
|
hour_start = datetime(*now[:4]).isoformat()
|
||||||
|
|
||||||
|
existing = pipe.get(start_key)
|
||||||
|
pipe.multi()
|
||||||
|
if existing and existing < hour_start:
|
||||||
|
pipe.rename(destination, destination + ':last')
|
||||||
|
pipe.rename(start_key, destination + ':pstart')
|
||||||
|
pipe.set(start_key, hour_start)
|
||||||
|
|
||||||
|
tkey1 = str(uuid.uuid4())
|
||||||
|
tkey2 = str(uuid.uuid4())
|
||||||
|
# 将值添加到临时键里面。
|
||||||
|
pipe.zadd(tkey1, 'min', value)
|
||||||
|
pipe.zadd(tkey2, 'max', value)
|
||||||
|
# 使用合适聚合函数MIN和MAX,
|
||||||
|
# 对存储统计数据的键和两个临时键进行并集计算。
|
||||||
|
pipe.zunionstore(destination,
|
||||||
|
[destination, tkey1], aggregate='min')
|
||||||
|
pipe.zunionstore(destination,
|
||||||
|
[destination, tkey2], aggregate='max')
|
||||||
|
|
||||||
|
# 删除临时键。
|
||||||
|
pipe.delete(tkey1, tkey2)
|
||||||
|
# 对有序集合中的样本数量、值的和、值的平方之和三个成员进行更新。
|
||||||
|
pipe.zincrby(destination, 'count')
|
||||||
|
pipe.zincrby(destination, 'sum', value)
|
||||||
|
pipe.zincrby(destination, 'sumsq', value*value)
|
||||||
|
|
||||||
|
# 返回基本的计数信息,以便函数调用者在有需要时做进一步的处理。
|
||||||
|
return pipe.execute()[-3:]
|
||||||
|
except redis.exceptions.WatchError:
|
||||||
|
# 如果新的一个小时已经开始,并且旧的数据已经被归档,那么进行重试。
|
||||||
|
continue
|
||||||
|
# <end id="update_stats"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 5-7
|
||||||
|
# <start id="get_stats"/>
|
||||||
|
def get_stats(conn, context, type):
|
||||||
|
# 程序将从这个键里面取出统计数据。
|
||||||
|
key = 'stats:%s:%s'%(context, type)
|
||||||
|
# 获取基本的统计数据,并将它们都放到一个字典里面。
|
||||||
|
data = dict(conn.zrange(key, 0, -1, withscores=True))
|
||||||
|
# 计算平均值。
|
||||||
|
data['average'] = data['sum'] / data['count']
|
||||||
|
# 计算标准差的第一个步骤。
|
||||||
|
numerator = data['sumsq'] - data['sum'] ** 2 / data['count']
|
||||||
|
# 完成标准差的计算工作。
|
||||||
|
data['stddev'] = (numerator / (data['count'] - 1 or 1)) ** .5
|
||||||
|
return data
|
||||||
|
# <end id="get_stats"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 5-8
|
||||||
|
# <start id="access_time_context_manager"/>
|
||||||
|
# 将这个Python生成器用作上下文管理器。
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def access_time(conn, context):
|
||||||
|
# 记录代码块执行前的时间。
|
||||||
|
start = time.time()
|
||||||
|
# 运行被包裹的代码块。
|
||||||
|
yield
|
||||||
|
|
||||||
|
# 计算代码块的执行时长。
|
||||||
|
delta = time.time() - start
|
||||||
|
# 更新这一上下文的统计数据。
|
||||||
|
stats = update_stats(conn, context, 'AccessTime', delta)
|
||||||
|
# 计算页面的平均访问时长。
|
||||||
|
average = stats[1] / stats[0]
|
||||||
|
|
||||||
|
pipe = conn.pipeline(True)
|
||||||
|
# 将页面的平均访问时长添加到记录最慢访问时间的有序集合里面。
|
||||||
|
pipe.zadd('slowest:AccessTime', context, average)
|
||||||
|
# AccessTime有序集合只会保留最慢的100条记录。
|
||||||
|
pipe.zremrangebyrank('slowest:AccessTime', 0, -101)
|
||||||
|
pipe.execute()
|
||||||
|
# <end id="access_time_context_manager"/>
|
||||||
|
|
||||||
|
|
||||||
|
# <start id="access_time_use"/>
|
||||||
|
# 这个视图(view)接受一个Redis连接以及一个生成内容的回调函数为参数。
|
||||||
|
def process_view(conn, callback):
|
||||||
|
# 计算并记录访问时长的上下文管理器就是这样包围代码块的。
|
||||||
|
with access_time(conn, request.path):
|
||||||
|
# 当上下文管理器中的yield语句被执行时,这个语句就会被执行。
|
||||||
|
return callback()
|
||||||
|
# <end id="access_time_use"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 5-9
|
||||||
|
# <start id="_1314_14473_9188"/>
|
||||||
|
def ip_to_score(ip_address):
|
||||||
|
score = 0
|
||||||
|
for v in ip_address.split('.'):
|
||||||
|
score = score * 256 + int(v, 10)
|
||||||
|
return score
|
||||||
|
# <end id="_1314_14473_9188"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 5-10
|
||||||
|
# <start id="_1314_14473_9191"/>
|
||||||
|
# 这个函数在执行时需要给定GeoLiteCity-Blocks.csv文件所在的位置。
|
||||||
|
def import_ips_to_redis(conn, filename):
|
||||||
|
csv_file = csv.reader(open(filename, 'rb'))
|
||||||
|
for count, row in enumerate(csv_file):
|
||||||
|
# 按需将IP地址转换为分值。
|
||||||
|
start_ip = row[0] if row else ''
|
||||||
|
if 'i' in start_ip.lower():
|
||||||
|
continue
|
||||||
|
if '.' in start_ip:
|
||||||
|
start_ip = ip_to_score(start_ip)
|
||||||
|
elif start_ip.isdigit():
|
||||||
|
start_ip = int(start_ip, 10)
|
||||||
|
else:
|
||||||
|
# 略过文件的第一行以及格式不正确的条目。
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 构建唯一城市ID。
|
||||||
|
city_id = row[2] + '_' + str(count)
|
||||||
|
# 将城市ID及其对应的IP地址分值添加到有序集合里面。
|
||||||
|
conn.zadd('ip2cityid:', city_id, start_ip)
|
||||||
|
# <end id="_1314_14473_9191"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 5-11
|
||||||
|
# <start id="_1314_14473_9194"/>
|
||||||
|
# 这个函数在执行时需要给定GeoLiteCity-Location.csv文件所在的位置。
|
||||||
|
def import_cities_to_redis(conn, filename):
|
||||||
|
for row in csv.reader(open(filename, 'rb')):
|
||||||
|
if len(row) < 4 or not row[0].isdigit():
|
||||||
|
continue
|
||||||
|
row = [i.decode('latin-1') for i in row]
|
||||||
|
# 准备好需要添加到散列里面的信息。
|
||||||
|
city_id = row[0]
|
||||||
|
country = row[1]
|
||||||
|
region = row[2]
|
||||||
|
city = row[3]
|
||||||
|
# 将城市信息添加到Redis里面。
|
||||||
|
conn.hset('cityid2city:', city_id,
|
||||||
|
json.dumps([city, region, country]))
|
||||||
|
# <end id="_1314_14473_9194"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 5-12
|
||||||
|
# <start id="_1314_14473_9197"/>
|
||||||
|
def find_city_by_ip(conn, ip_address):
|
||||||
|
# 将IP地址转换为分值以便执行ZREVRANGEBYSCORE命令。
|
||||||
|
if isinstance(ip_address, str): #A
|
||||||
|
ip_address = ip_to_score(ip_address) #A
|
||||||
|
|
||||||
|
# 查找唯一城市ID。
|
||||||
|
city_id = conn.zrevrangebyscore( #B
|
||||||
|
'ip2cityid:', ip_address, 0, start=0, num=1) #B
|
||||||
|
|
||||||
|
if not city_id:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# 将唯一城市ID转换为普通城市ID。
|
||||||
|
city_id = city_id[0].partition('_')[0] #C
|
||||||
|
# 从散列里面取出城市信息。
|
||||||
|
return json.loads(conn.hget('cityid2city:', city_id)) #D
|
||||||
|
# <end id="_1314_14473_9197"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 5-13
|
||||||
|
# <start id="is_under_maintenance"/>
|
||||||
|
LAST_CHECKED = None
|
||||||
|
IS_UNDER_MAINTENANCE = False
|
||||||
|
|
||||||
|
def is_under_maintenance(conn):
|
||||||
|
# 将两个变量设置为全局变量以便在之后对它们进行写入。
|
||||||
|
global LAST_CHECKED, IS_UNDER_MAINTENANCE #A
|
||||||
|
|
||||||
|
# 距离上次检查是否已经超过1秒钟?
|
||||||
|
if LAST_CHECKED < time.time() - 1: #B
|
||||||
|
# 更新最后检查时间。
|
||||||
|
LAST_CHECKED = time.time() #C
|
||||||
|
# 检查系统是否正在进行维护。
|
||||||
|
IS_UNDER_MAINTENANCE = bool( #D
|
||||||
|
conn.get('is-under-maintenance')) #D
|
||||||
|
|
||||||
|
# 返回一个布尔值,用于表示系统是否正在进行维护。
|
||||||
|
return IS_UNDER_MAINTENANCE #E
|
||||||
|
# <end id="is_under_maintenance"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 5-14
|
||||||
|
# <start id="set_config"/>
|
||||||
|
def set_config(conn, type, component, config):
|
||||||
|
conn.set(
|
||||||
|
'config:%s:%s'%(type, component),
|
||||||
|
json.dumps(config))
|
||||||
|
# <end id="set_config"/>
|
||||||
|
#END
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 5-15
|
||||||
|
# <start id="get_config"/>
|
||||||
|
CONFIGS = {}
|
||||||
|
CHECKED = {}
|
||||||
|
|
||||||
|
def get_config(conn, type, component, wait=1):
|
||||||
|
key = 'config:%s:%s'%(type, component)
|
||||||
|
|
||||||
|
# 检查是否需要对这个组件的配置信息进行更新。
|
||||||
|
if CHECKED.get(key) < time.time() - wait:
|
||||||
|
# 有需要对配置进行更新,记录最后一次检查这个连接的时间。
|
||||||
|
CHECKED[key] = time.time()
|
||||||
|
# 取得Redis存储的组件配置。
|
||||||
|
config = json.loads(conn.get(key) or '{}')
|
||||||
|
# 将潜在的Unicode关键字参数转换为字符串关键字参数。
|
||||||
|
config = dict((str(k), config[k]) for k in config)
|
||||||
|
# 取得组件正在使用的配置。
|
||||||
|
old_config = CONFIGS.get(key)
|
||||||
|
|
||||||
|
# 如果两个配置并不相同……
|
||||||
|
if config != old_config:
|
||||||
|
# ……那么对组件的配置进行更新。
|
||||||
|
CONFIGS[key] = config
|
||||||
|
|
||||||
|
return CONFIGS.get(key)
|
||||||
|
# <end id="get_config"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 5-16
|
||||||
|
# <start id="redis_connection"/>
|
||||||
|
REDIS_CONNECTIONS = {}
|
||||||
|
|
||||||
|
# 将应用组件的名字传递给装饰器。
|
||||||
|
def redis_connection(component, wait=1): #A
|
||||||
|
# 因为函数每次被调用都需要获取这个配置键,所以我们干脆把它缓存起来。
|
||||||
|
key = 'config:redis:' + component #B
|
||||||
|
# 包装器接受一个函数作为参数,并使用另一个函数来包裹这个函数。
|
||||||
|
def wrapper(function): #C
|
||||||
|
# 将被包裹函数里的一些有用的元数据复制到配置处理器。
|
||||||
|
@functools.wraps(function) #D
|
||||||
|
# 创建负责管理连接信息的函数。
|
||||||
|
def call(*args, **kwargs): #E
|
||||||
|
# 如果有旧配置存在,那么获取它。
|
||||||
|
old_config = CONFIGS.get(key, object()) #F
|
||||||
|
# 如果有新配置存在,那么获取它。
|
||||||
|
_config = get_config( #G
|
||||||
|
config_connection, 'redis', component, wait) #G
|
||||||
|
|
||||||
|
config = {}
|
||||||
|
# 对配置进行处理并将其用于创建Redis连接。
|
||||||
|
for k, v in _config.iteritems(): #L
|
||||||
|
config[k.encode('utf-8')] = v #L
|
||||||
|
|
||||||
|
# 如果新旧配置并不相同,那么创建新的连接。
|
||||||
|
if config != old_config: #H
|
||||||
|
REDIS_CONNECTIONS[key] = redis.Redis(**config) #H
|
||||||
|
|
||||||
|
# 将Redis连接以及其他匹配的参数传递给被包裹函数,然后调用函数并返回执行结果。
|
||||||
|
return function( #I
|
||||||
|
REDIS_CONNECTIONS.get(key), *args, **kwargs) #I
|
||||||
|
# 返回被包裹的函数。
|
||||||
|
return call #J
|
||||||
|
# 返回用于包裹Redis函数的包装器。
|
||||||
|
return wrapper #K
|
||||||
|
# <end id="redis_connection"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 5-17
|
||||||
|
'''
|
||||||
|
# <start id="recent_log_decorator"/>
|
||||||
|
@redis_connection('logs') # redis_connection()装饰器非常容易使用。
|
||||||
|
def log_recent(conn, app, message): # 这个函数的定义和之前展示的一样,没有发生任何变化。
|
||||||
|
'the old log_recent() code'
|
||||||
|
|
||||||
|
log_recent('main', 'User 235 logged in') # 我们再也不必在调用log_recent()函数时手动地向它传递日志服务器的连接了。
|
||||||
|
# <end id="recent_log_decorator"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
#--------------- 以下是用于测试代码的辅助函数 --------------------------------
|
||||||
|
|
||||||
|
class request:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# a faster version with pipelines for actual testing
|
||||||
|
def import_ips_to_redis(conn, filename):
|
||||||
|
csv_file = csv.reader(open(filename, 'rb'))
|
||||||
|
pipe = conn.pipeline(False)
|
||||||
|
for count, row in enumerate(csv_file):
|
||||||
|
start_ip = row[0] if row else ''
|
||||||
|
if 'i' in start_ip.lower():
|
||||||
|
continue
|
||||||
|
if '.' in start_ip:
|
||||||
|
start_ip = ip_to_score(start_ip)
|
||||||
|
elif start_ip.isdigit():
|
||||||
|
start_ip = int(start_ip, 10)
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
|
||||||
|
city_id = row[2] + '_' + str(count)
|
||||||
|
pipe.zadd('ip2cityid:', city_id, start_ip)
|
||||||
|
if not (count+1) % 1000:
|
||||||
|
pipe.execute()
|
||||||
|
pipe.execute()
|
||||||
|
|
||||||
|
def import_cities_to_redis(conn, filename):
|
||||||
|
pipe = conn.pipeline(False)
|
||||||
|
for count, row in enumerate(csv.reader(open(filename, 'rb'))):
|
||||||
|
if len(row) < 4 or not row[0].isdigit():
|
||||||
|
continue
|
||||||
|
row = [i.decode('latin-1') for i in row]
|
||||||
|
city_id = row[0]
|
||||||
|
country = row[1]
|
||||||
|
region = row[2]
|
||||||
|
city = row[3]
|
||||||
|
pipe.hset('cityid2city:', city_id,
|
||||||
|
json.dumps([city, region, country]))
|
||||||
|
if not (count+1) % 1000:
|
||||||
|
pipe.execute()
|
||||||
|
pipe.execute()
|
||||||
|
|
||||||
|
class TestCh05(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
global config_connection
|
||||||
|
import redis
|
||||||
|
self.conn = config_connection = redis.Redis(db=15)
|
||||||
|
self.conn.flushdb()
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
self.conn.flushdb()
|
||||||
|
del self.conn
|
||||||
|
global config_connection, QUIT, SAMPLE_COUNT
|
||||||
|
config_connection = None
|
||||||
|
QUIT = False
|
||||||
|
SAMPLE_COUNT = 100
|
||||||
|
print
|
||||||
|
print
|
||||||
|
|
||||||
|
def test_log_recent(self):
|
||||||
|
import pprint
|
||||||
|
conn = self.conn
|
||||||
|
|
||||||
|
print "Let's write a few logs to the recent log"
|
||||||
|
for msg in xrange(5):
|
||||||
|
log_recent(conn, 'test', 'this is message %s'%msg)
|
||||||
|
recent = conn.lrange('recent:test:info', 0, -1)
|
||||||
|
print "The current recent message log has this many messages:", len(recent)
|
||||||
|
print "Those messages include:"
|
||||||
|
pprint.pprint(recent[:10])
|
||||||
|
self.assertTrue(len(recent) >= 5)
|
||||||
|
|
||||||
|
def test_log_common(self):
|
||||||
|
import pprint
|
||||||
|
conn = self.conn
|
||||||
|
|
||||||
|
print "Let's write some items to the common log"
|
||||||
|
for count in xrange(1, 6):
|
||||||
|
for i in xrange(count):
|
||||||
|
log_common(conn, 'test', "message-%s"%count)
|
||||||
|
common = conn.zrevrange('common:test:info', 0, -1, withscores=True)
|
||||||
|
print "The current number of common messages is:", len(common)
|
||||||
|
print "Those common messages are:"
|
||||||
|
pprint.pprint(common)
|
||||||
|
self.assertTrue(len(common) >= 5)
|
||||||
|
|
||||||
|
def test_counters(self):
|
||||||
|
import pprint
|
||||||
|
global QUIT, SAMPLE_COUNT
|
||||||
|
conn = self.conn
|
||||||
|
|
||||||
|
print "Let's update some counters for now and a little in the future"
|
||||||
|
now = time.time()
|
||||||
|
for delta in xrange(10):
|
||||||
|
update_counter(conn, 'test', count=random.randrange(1,5), now=now+delta)
|
||||||
|
counter = get_counter(conn, 'test', 1)
|
||||||
|
print "We have some per-second counters:", len(counter)
|
||||||
|
self.assertTrue(len(counter) >= 10)
|
||||||
|
counter = get_counter(conn, 'test', 5)
|
||||||
|
print "We have some per-5-second counters:", len(counter)
|
||||||
|
print "These counters include:"
|
||||||
|
pprint.pprint(counter[:10])
|
||||||
|
self.assertTrue(len(counter) >= 2)
|
||||||
|
print
|
||||||
|
|
||||||
|
tt = time.time
|
||||||
|
def new_tt():
|
||||||
|
return tt() + 2*86400
|
||||||
|
time.time = new_tt
|
||||||
|
|
||||||
|
print "Let's clean out some counters by setting our sample count to 0"
|
||||||
|
SAMPLE_COUNT = 0
|
||||||
|
t = threading.Thread(target=clean_counters, args=(conn,))
|
||||||
|
t.setDaemon(1) # to make sure it dies if we ctrl+C quit
|
||||||
|
t.start()
|
||||||
|
time.sleep(1)
|
||||||
|
QUIT = True
|
||||||
|
time.time = tt
|
||||||
|
counter = get_counter(conn, 'test', 86400)
|
||||||
|
print "Did we clean out all of the counters?", not counter
|
||||||
|
self.assertFalse(counter)
|
||||||
|
|
||||||
|
def test_stats(self):
|
||||||
|
import pprint
|
||||||
|
conn = self.conn
|
||||||
|
|
||||||
|
print "Let's add some data for our statistics!"
|
||||||
|
for i in xrange(5):
|
||||||
|
r = update_stats(conn, 'temp', 'example', random.randrange(5, 15))
|
||||||
|
print "We have some aggregate statistics:", r
|
||||||
|
rr = get_stats(conn, 'temp', 'example')
|
||||||
|
print "Which we can also fetch manually:"
|
||||||
|
pprint.pprint(rr)
|
||||||
|
self.assertTrue(rr['count'] >= 5)
|
||||||
|
|
||||||
|
def test_access_time(self):
|
||||||
|
import pprint
|
||||||
|
conn = self.conn
|
||||||
|
|
||||||
|
print "Let's calculate some access times..."
|
||||||
|
for i in xrange(10):
|
||||||
|
with access_time(conn, "req-%s"%i):
|
||||||
|
time.sleep(.5 + random.random())
|
||||||
|
print "The slowest access times are:"
|
||||||
|
atimes = conn.zrevrange('slowest:AccessTime', 0, -1, withscores=True)
|
||||||
|
pprint.pprint(atimes[:10])
|
||||||
|
self.assertTrue(len(atimes) >= 10)
|
||||||
|
print
|
||||||
|
|
||||||
|
def cb():
|
||||||
|
time.sleep(1 + random.random())
|
||||||
|
|
||||||
|
print "Let's use the callback version..."
|
||||||
|
for i in xrange(5):
|
||||||
|
request.path = 'cbreq-%s'%i
|
||||||
|
process_view(conn, cb)
|
||||||
|
print "The slowest access times are:"
|
||||||
|
atimes = conn.zrevrange('slowest:AccessTime', 0, -1, withscores=True)
|
||||||
|
pprint.pprint(atimes[:10])
|
||||||
|
self.assertTrue(len(atimes) >= 10)
|
||||||
|
|
||||||
|
def test_ip_lookup(self):
|
||||||
|
conn = self.conn
|
||||||
|
|
||||||
|
try:
|
||||||
|
open('GeoLiteCity-Blocks.csv', 'rb')
|
||||||
|
open('GeoLiteCity-Location.csv', 'rb')
|
||||||
|
except:
|
||||||
|
print "********"
|
||||||
|
print "You do not have the GeoLiteCity database available, aborting test"
|
||||||
|
print "Please have the following two files in the current path:"
|
||||||
|
print "GeoLiteCity-Blocks.csv"
|
||||||
|
print "GeoLiteCity-Location.csv"
|
||||||
|
print "********"
|
||||||
|
return
|
||||||
|
|
||||||
|
print "Importing IP addresses to Redis... (this may take a while)"
|
||||||
|
import_ips_to_redis(conn, 'GeoLiteCity-Blocks.csv')
|
||||||
|
ranges = conn.zcard('ip2cityid:')
|
||||||
|
print "Loaded ranges into Redis:", ranges
|
||||||
|
self.assertTrue(ranges > 1000)
|
||||||
|
print
|
||||||
|
|
||||||
|
print "Importing Location lookups to Redis... (this may take a while)"
|
||||||
|
import_cities_to_redis(conn, 'GeoLiteCity-Location.csv')
|
||||||
|
cities = conn.hlen('cityid2city:')
|
||||||
|
print "Loaded city lookups into Redis:", cities
|
||||||
|
self.assertTrue(cities > 1000)
|
||||||
|
print
|
||||||
|
|
||||||
|
print "Let's lookup some locations!"
|
||||||
|
rr = random.randrange
|
||||||
|
for i in xrange(5):
|
||||||
|
print find_city_by_ip(conn, '%s.%s.%s.%s'%(rr(1,255), rr(256), rr(256), rr(256)))
|
||||||
|
|
||||||
|
def test_is_under_maintenance(self):
|
||||||
|
print "Are we under maintenance (we shouldn't be)?", is_under_maintenance(self.conn)
|
||||||
|
self.conn.set('is-under-maintenance', 'yes')
|
||||||
|
print "We cached this, so it should be the same:", is_under_maintenance(self.conn)
|
||||||
|
time.sleep(1)
|
||||||
|
print "But after a sleep, it should change:", is_under_maintenance(self.conn)
|
||||||
|
print "Cleaning up..."
|
||||||
|
self.conn.delete('is-under-maintenance')
|
||||||
|
time.sleep(1)
|
||||||
|
print "Should be False again:", is_under_maintenance(self.conn)
|
||||||
|
|
||||||
|
def test_config(self):
|
||||||
|
print "Let's set a config and then get a connection from that config..."
|
||||||
|
set_config(self.conn, 'redis', 'test', {'db':15})
|
||||||
|
@redis_connection('test')
|
||||||
|
def test(conn2):
|
||||||
|
return bool(conn2.info())
|
||||||
|
print "We can run commands from the configured connection:", test()
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,868 @@
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
import math
|
||||||
|
import re
|
||||||
|
import unittest
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import redis
|
||||||
|
|
||||||
|
AVERAGE_PER_1K = {}
|
||||||
|
|
||||||
|
# 代码清单 7-1
|
||||||
|
# <start id="tokenize-and-index"/>
|
||||||
|
# 预先定义好从网上获取的停止词。
|
||||||
|
STOP_WORDS = set('''able about across after all almost also am among
|
||||||
|
an and any are as at be because been but by can cannot could dear did
|
||||||
|
do does either else ever every for from get got had has have he her
|
||||||
|
hers him his how however if in into is it its just least let like
|
||||||
|
likely may me might most must my neither no nor not of off often on
|
||||||
|
only or other our own rather said say says she should since so some
|
||||||
|
than that the their them then there these they this tis to too twas us
|
||||||
|
wants was we were what when where which while who whom why will with
|
||||||
|
would yet you your'''.split())
|
||||||
|
|
||||||
|
# 根据定义提取单词的正则表达式。
|
||||||
|
WORDS_RE = re.compile("[a-z']{2,}")
|
||||||
|
|
||||||
|
def tokenize(content):
|
||||||
|
# 将文章包含的单词储存到 Python 集合里面。
|
||||||
|
words = set()
|
||||||
|
# 遍历文章包含的所有单词。
|
||||||
|
for match in WORDS_RE.finditer(content.lower()):
|
||||||
|
# 剔除所有位于单词前面或后面的单引号。
|
||||||
|
word = match.group().strip("'")
|
||||||
|
# 保留那些至少有两个字符长的单词。
|
||||||
|
if len(word) >= 2:
|
||||||
|
words.add(word)
|
||||||
|
# 返回一个集合,集合里面包含了所有被保留并且不是停止词的单词。
|
||||||
|
return words - STOP_WORDS
|
||||||
|
|
||||||
|
def index_document(conn, docid, content):
|
||||||
|
# 对内容进行标记化处理,并取得处理产生的单词。
|
||||||
|
words = tokenize(content)
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
# 将文章添加到正确的反向索引集合里面。
|
||||||
|
for word in words:
|
||||||
|
pipeline.sadd('idx:' + word, docid)
|
||||||
|
# 计算一下,程序为这篇文章添加了多少个独一无二并且不是停止词的单词。
|
||||||
|
return len(pipeline.execute())
|
||||||
|
# <end id="tokenize-and-index"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-2
|
||||||
|
# <start id="_1314_14473_9158"/>
|
||||||
|
def _set_common(conn, method, names, ttl=30, execute=True):
|
||||||
|
# 创建一个新的临时标识符。
|
||||||
|
id = str(uuid.uuid4())
|
||||||
|
# 设置事务流水线,确保每个调用都能获得一致的执行结果。
|
||||||
|
pipeline = conn.pipeline(True) if execute else conn
|
||||||
|
# 给每个单词加上 'idx:' 前缀。
|
||||||
|
names = ['idx:' + name for name in names]
|
||||||
|
# 为将要执行的集合操作设置相应的参数。
|
||||||
|
getattr(pipeline, method)('idx:' + id, *names)
|
||||||
|
# 吩咐 Redis 在将来自动删除这个集合。
|
||||||
|
pipeline.expire('idx:' + id, ttl)
|
||||||
|
if execute:
|
||||||
|
# 实际地执行操作。
|
||||||
|
pipeline.execute()
|
||||||
|
# 将结果集合的 ID 返回给调用者,以便做进一步的处理。
|
||||||
|
return id
|
||||||
|
|
||||||
|
# 执行交集计算的辅助函数。
|
||||||
|
def intersect(conn, items, ttl=30, _execute=True):
|
||||||
|
return _set_common(conn, 'sinterstore', items, ttl, _execute)
|
||||||
|
|
||||||
|
# 执行并集计算的辅助函数。
|
||||||
|
def union(conn, items, ttl=30, _execute=True):
|
||||||
|
return _set_common(conn, 'sunionstore', items, ttl, _execute)
|
||||||
|
|
||||||
|
# 执行差集计算的辅助函数。
|
||||||
|
def difference(conn, items, ttl=30, _execute=True):
|
||||||
|
return _set_common(conn, 'sdiffstore', items, ttl, _execute)
|
||||||
|
# <end id="_1314_14473_9158"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-3
|
||||||
|
# <start id="parse-query"/>
|
||||||
|
# 查找需要的单词、不需要的单词以及同义词的正则表达式。
|
||||||
|
QUERY_RE = re.compile("[+-]?[a-z']{2,}")
|
||||||
|
|
||||||
|
def parse(query):
|
||||||
|
# 这个集合将用于储存不需要的单词。
|
||||||
|
unwanted = set()
|
||||||
|
# 这个列表将用于储存需要执行交集计算的单词。
|
||||||
|
all = []
|
||||||
|
# 这个集合将用于储存目前已发现的同义词。
|
||||||
|
current = set()
|
||||||
|
# 遍历搜索查询语句中的所有单词。
|
||||||
|
for match in QUERY_RE.finditer(query.lower()):
|
||||||
|
# 检查单词是否带有 + 号前缀或 - 号前缀。
|
||||||
|
word = match.group()
|
||||||
|
prefix = word[:1]
|
||||||
|
if prefix in '+-':
|
||||||
|
word = word[1:]
|
||||||
|
else:
|
||||||
|
prefix = None
|
||||||
|
|
||||||
|
# 剔除所有位于单词前面或者后面的单引号,并略过所有停止词。
|
||||||
|
word = word.strip("'")
|
||||||
|
if len(word) < 2 or word in STOP_WORDS:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 如果这是一个不需要的单词,
|
||||||
|
# 那么将它添加到储存不需要单词的集合里面。
|
||||||
|
if prefix == '-':
|
||||||
|
unwanted.add(word)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 如果在同义词集合非空的情况下,
|
||||||
|
# 遇到了一个不带 + 号前缀的单词,
|
||||||
|
# 那么创建一个新的同义词集合。
|
||||||
|
if current and not prefix:
|
||||||
|
all.append(list(current))
|
||||||
|
current = set()
|
||||||
|
current.add(word)
|
||||||
|
|
||||||
|
# 将正在处理的单词添加到同义词集合里面。
|
||||||
|
if current:
|
||||||
|
all.append(list(current))
|
||||||
|
|
||||||
|
# 把所有剩余的单词都放到最后的交集计算里面进行处理。
|
||||||
|
return all, list(unwanted)
|
||||||
|
# <end id="parse-query"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-4
|
||||||
|
# <start id="search-query"/>
|
||||||
|
def parse_and_search(conn, query, ttl=30):
|
||||||
|
# 对查询语句进行分析。
|
||||||
|
all, unwanted = parse(query)
|
||||||
|
# 如果查询语句只包含停止词,那么这次搜索没有任何结果。
|
||||||
|
if not all:
|
||||||
|
return None
|
||||||
|
|
||||||
|
to_intersect = []
|
||||||
|
# 遍历各个同义词列表。
|
||||||
|
for syn in all:
|
||||||
|
# 如果同义词列表包含的单词不止一个,那么执行并集计算。
|
||||||
|
if len(syn) > 1:
|
||||||
|
to_intersect.append(union(conn, syn, ttl=ttl))
|
||||||
|
# 如果同义词列表只包含一个单词,那么直接使用这个单词。
|
||||||
|
else:
|
||||||
|
to_intersect.append(syn[0])
|
||||||
|
|
||||||
|
# 如果单词(或者并集计算的结果)有不止一个,那么执行交集计算。
|
||||||
|
if len(to_intersect) > 1:
|
||||||
|
intersect_result = intersect(conn, to_intersect, ttl=ttl)
|
||||||
|
# 如果单词(或者并集计算的结果)只有一个,那么将它用作交集计算的结果。
|
||||||
|
else:
|
||||||
|
intersect_result = to_intersect[0]
|
||||||
|
|
||||||
|
# 如果用户给定了不需要的单词,
|
||||||
|
# 那么从交集计算结果里面移除包含这些单词的文章,然后返回搜索结果。
|
||||||
|
if unwanted:
|
||||||
|
unwanted.insert(0, intersect_result)
|
||||||
|
return difference(conn, unwanted, ttl=ttl)
|
||||||
|
|
||||||
|
# 如果用户没有给定不需要的单词,那么直接返回交集计算的结果作为搜索的结果。
|
||||||
|
return intersect_result
|
||||||
|
# <end id="search-query"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-5
|
||||||
|
# <start id="sorted-searches"/>
|
||||||
|
# 用户可以通过可选的参数来传入已有的搜索结果、指定搜索结果的排序方式,并对结果进行分页。
|
||||||
|
def search_and_sort(conn, query, id=None, ttl=300, sort="-updated",
|
||||||
|
start=0, num=20):
|
||||||
|
# 决定基于文章的哪个属性进行排序,以及是进行升序排序还是降序排序。
|
||||||
|
desc = sort.startswith('-')
|
||||||
|
sort = sort.lstrip('-')
|
||||||
|
by = "kb:doc:*->" + sort
|
||||||
|
# 告知 Redis ,排序是以数值方式进行还是字母方式进行。
|
||||||
|
alpha = sort not in ('updated', 'id', 'created')
|
||||||
|
|
||||||
|
# 如果用户给定了已有的搜索结果,
|
||||||
|
# 并且这个结果仍然存在的话,
|
||||||
|
# 那么延长它的生存时间。
|
||||||
|
if id and not conn.expire(id, ttl):
|
||||||
|
id = None
|
||||||
|
|
||||||
|
# 如果用户没有给定已有的搜索结果,
|
||||||
|
# 或者给定的搜索结果已经过期,
|
||||||
|
# 那么执行一次新的搜索操作。
|
||||||
|
if not id:
|
||||||
|
id = parse_and_search(conn, query, ttl=ttl)
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
# 获取结果集合的元素数量。
|
||||||
|
pipeline.scard('idx:' + id)
|
||||||
|
# 根据指定属性对结果进行排序,并且只获取用户指定的那一部分结果。
|
||||||
|
pipeline.sort('idx:' + id, by=by, alpha=alpha,
|
||||||
|
desc=desc, start=start, num=num)
|
||||||
|
results = pipeline.execute()
|
||||||
|
|
||||||
|
# 返回搜索结果包含的元素数量、搜索结果本身以及搜索结果的 ID ,
|
||||||
|
# 其中搜索结果的 ID 可以用于在之后再次获取本次搜索的结果。
|
||||||
|
return results[0], results[1], id
|
||||||
|
# <end id="sorted-searches"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-6
|
||||||
|
# <start id="zset_scored_composite"/>
|
||||||
|
# 和之前一样,函数接受一个已有搜索结果的 ID 作为可选参数,
|
||||||
|
# 以便在结果仍然可用的情况下,对其进行分页。
|
||||||
|
def search_and_zsort(conn, query, id=None, ttl=300, update=1, vote=0,
|
||||||
|
start=0, num=20, desc=True):
|
||||||
|
|
||||||
|
# 尝试更新已有搜索结果的生存时间。
|
||||||
|
if id and not conn.expire(id, ttl):
|
||||||
|
id = None
|
||||||
|
|
||||||
|
# 如果传入的结果已经过期,
|
||||||
|
# 或者这是函数第一次进行搜索,
|
||||||
|
# 那么执行标准的集合搜索操作。
|
||||||
|
if not id:
|
||||||
|
id = parse_and_search(conn, query, ttl=ttl)
|
||||||
|
|
||||||
|
scored_search = {
|
||||||
|
# 函数在计算并集的时候也会用到传入的 ID 键,
|
||||||
|
# 但这个键不会被用作排序权重(weight)。
|
||||||
|
id: 0,
|
||||||
|
# 对文章评分进行调整以平衡更新时间和投票数量。
|
||||||
|
# 根据待排序数据的需要,投票数量可以被调整为 1 、10 、100 ,甚至更高。
|
||||||
|
'sort:update': update,
|
||||||
|
'sort:votes': vote
|
||||||
|
}
|
||||||
|
# 使用代码清单 7-7 定义的辅助函数执行交集计算。
|
||||||
|
id = zintersect(conn, scored_search, ttl)
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
# 获取结果有序集合的大小。
|
||||||
|
pipeline.zcard('idx:' + id)
|
||||||
|
# 从搜索结果里面取出一页(page)。
|
||||||
|
if desc:
|
||||||
|
pipeline.zrevrange('idx:' + id, start, start + num - 1)
|
||||||
|
else:
|
||||||
|
pipeline.zrange('idx:' + id, start, start + num - 1)
|
||||||
|
results = pipeline.execute()
|
||||||
|
|
||||||
|
# 返回搜索结果,以及分页用的 ID 值。
|
||||||
|
return results[0], results[1], id
|
||||||
|
# <end id="zset_scored_composite"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-7
|
||||||
|
# <start id="zset_helpers"/>
|
||||||
|
def _zset_common(conn, method, scores, ttl=30, **kw):
|
||||||
|
# 创建一个新的临时标识符。
|
||||||
|
id = str(uuid.uuid4())
|
||||||
|
# 调用者可以通过传递参数来决定是否使用事务流水线。
|
||||||
|
execute = kw.pop('_execute', True)
|
||||||
|
# 设置事务流水线,保证每个单独的调用都有一致的结果。
|
||||||
|
pipeline = conn.pipeline(True) if execute else conn
|
||||||
|
# 为输入的键添加 ‘idx:’ 前缀。
|
||||||
|
for key in scores.keys():
|
||||||
|
scores['idx:' + key] = scores.pop(key)
|
||||||
|
# 为将要被执行的操作设置好相应的参数。
|
||||||
|
getattr(pipeline, method)('idx:' + id, scores, **kw)
|
||||||
|
# 为计算结果有序集合设置过期时间。
|
||||||
|
pipeline.expire('idx:' + id, ttl)
|
||||||
|
# 除非调用者明确指示要延迟执行操作,否则实际地执行计算操作。
|
||||||
|
if execute:
|
||||||
|
pipeline.execute()
|
||||||
|
# 将计算结果的 ID 返回给调用者,以便做进一步的处理。
|
||||||
|
return id
|
||||||
|
|
||||||
|
# 对有序集合执行交集计算的辅助函数。
|
||||||
|
def zintersect(conn, items, ttl=30, **kw):
|
||||||
|
return _zset_common(conn, 'zinterstore', dict(items), ttl, **kw)
|
||||||
|
|
||||||
|
# 对有序集合执行并集计算的辅助函数。
|
||||||
|
def zunion(conn, items, ttl=30, **kw):
|
||||||
|
return _zset_common(conn, 'zunionstore', dict(items), ttl, **kw)
|
||||||
|
# <end id="zset_helpers"/>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-8
|
||||||
|
# <start id="string-to-score"/>
|
||||||
|
def string_to_score(string, ignore_case=False):
|
||||||
|
# 用户可以通过参数来决定是否以大小写无关的方式建立前缀索引。
|
||||||
|
if ignore_case:
|
||||||
|
string = string.lower()
|
||||||
|
|
||||||
|
# 将字符串的前 6 个字符转换为相应的数字值,
|
||||||
|
# 比如把空字符转换为 0 、制表符(tab)转换为 9 、大写 A 转换为 65 ,
|
||||||
|
# 诸如此类。
|
||||||
|
pieces = map(ord, string[:6])
|
||||||
|
# 为长度不足 6 个字符的字符串添加占位符,以此来表示这是一个短字符。
|
||||||
|
while len(pieces) < 6:
|
||||||
|
pieces.append(-1)
|
||||||
|
|
||||||
|
score = 0
|
||||||
|
# 对字符串进行转换得出的每个值都会被计算到分值里面,
|
||||||
|
# 并且程序处理空字符的方式和处理占位符的方式并不相同。
|
||||||
|
for piece in pieces:
|
||||||
|
score = score * 257 + piece + 1
|
||||||
|
|
||||||
|
# 通过多使用一个二进制位,
|
||||||
|
# 程序可以表明字符串是否正好为 6 个字符长,
|
||||||
|
# 这样它就可以正确地区分出 “robber” 和 “robbers” ,
|
||||||
|
# 尽管这对于区分 “robbers” 和 “robbery” 并无帮助。
|
||||||
|
return score * 2 + (len(string) > 6)
|
||||||
|
# <end id="string-to-score"/>
|
||||||
|
|
||||||
|
def to_char_map(set):
|
||||||
|
out = {}
|
||||||
|
for pos, val in enumerate(sorted(set)):
|
||||||
|
out[val] = pos-1
|
||||||
|
return out
|
||||||
|
|
||||||
|
LOWER = to_char_map(set([-1]) | set(xrange(ord('a'), ord('z')+1)))
|
||||||
|
ALPHA = to_char_map(set(LOWER) | set(xrange(ord('A'), ord('Z')+1)))
|
||||||
|
LOWER_NUMERIC = to_char_map(set(LOWER) | set(xrange(ord('0'), ord('9')+1)))
|
||||||
|
ALPHA_NUMERIC = to_char_map(set(LOWER_NUMERIC) | set(ALPHA))
|
||||||
|
|
||||||
|
def string_to_score_generic(string, mapping):
|
||||||
|
length = int(52 / math.log(len(mapping), 2)) #A
|
||||||
|
|
||||||
|
pieces = map(ord, string[:length]) #B
|
||||||
|
while len(pieces) < length: #C
|
||||||
|
pieces.append(-1) #C
|
||||||
|
|
||||||
|
score = 0
|
||||||
|
for piece in pieces: #D
|
||||||
|
value = mapping[piece] #D
|
||||||
|
score = score * len(mapping) + value + 1 #D
|
||||||
|
|
||||||
|
return score * 2 + (len(string) > length) #E
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# <start id="zadd-string"/>
|
||||||
|
def zadd_string(conn, name, *args, **kwargs):
|
||||||
|
pieces = list(args) # 为了进行之后的修改,
|
||||||
|
for piece in kwargs.iteritems(): # 对传入的不同类型的参数进行合并(combine)
|
||||||
|
pieces.extend(piece) #
|
||||||
|
|
||||||
|
for i, v in enumerate(pieces):
|
||||||
|
if i & 1: # 将字符串格式的分值转换为整数分值
|
||||||
|
pieces[i] = string_to_score(v) #
|
||||||
|
|
||||||
|
return conn.zadd(name, *pieces) # 调用已有的 ZADD 方法
|
||||||
|
# <end id="zadd-string"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-9
|
||||||
|
# <start id="ecpm_helpers"/>
|
||||||
|
def cpc_to_ecpm(views, clicks, cpc):
|
||||||
|
return 1000. * cpc * clicks / views
|
||||||
|
|
||||||
|
def cpa_to_ecpm(views, actions, cpa):
|
||||||
|
# 因为点击通过率是由点击次数除以展示次数计算出的,
|
||||||
|
# 而动作的执行概率则是由动作执行次数除以点击次数计算出的,
|
||||||
|
# 所以这两个概率相乘的结果等于动作执行次数除以展示次数。
|
||||||
|
return 1000. * cpa * actions / views
|
||||||
|
# <end id="ecpm_helpers"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-10
|
||||||
|
# <start id="index_ad"/>
|
||||||
|
TO_ECPM = {
|
||||||
|
'cpc': cpc_to_ecpm,
|
||||||
|
'cpa': cpa_to_ecpm,
|
||||||
|
'cpm': lambda *args:args[-1],
|
||||||
|
}
|
||||||
|
|
||||||
|
def index_ad(conn, id, locations, content, type, value):
|
||||||
|
# 设置流水线,使得程序可以在一次通信往返里面完成整个索引操作。
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
|
||||||
|
for location in locations:
|
||||||
|
# 为了进行定向操作,把广告 ID 添加到所有相关的位置集合里面。
|
||||||
|
pipeline.sadd('idx:req:'+location, id)
|
||||||
|
|
||||||
|
words = tokenize(content)
|
||||||
|
# 对广告包含的单词进行索引。
|
||||||
|
for word in tokenize(content):
|
||||||
|
pipeline.zadd('idx:' + word, id, 0)
|
||||||
|
|
||||||
|
# 为了评估新广告的效果,
|
||||||
|
# 程序会使用字典来储存广告每千次展示的平均点击次数或平均动作执行次数。
|
||||||
|
rvalue = TO_ECPM[type](
|
||||||
|
1000, AVERAGE_PER_1K.get(type, 1), value)
|
||||||
|
# 记录这个广告的类型。
|
||||||
|
pipeline.hset('type:', id, type)
|
||||||
|
# 将广告的 eCPM 添加到一个记录了所有广告的 eCPM 的有序集合里面。
|
||||||
|
pipeline.zadd('idx:ad:value:', id, rvalue)
|
||||||
|
# 将广告的基本价格(base value)添加到一个记录了所有广告的基本价格的有序集合里面。
|
||||||
|
pipeline.zadd('ad:base_value:', id, value)
|
||||||
|
# 把能够对广告进行定向的单词全部记录起来。
|
||||||
|
pipeline.sadd('terms:' + id, *list(words))
|
||||||
|
pipeline.execute()
|
||||||
|
# <end id="index_ad"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-11
|
||||||
|
# <start id="target_ad"/>
|
||||||
|
def target_ads(conn, locations, content):
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
# 根据用户传入的位置定向参数,找到所有位于该位置的广告,以及这些广告的 eCPM 。
|
||||||
|
matched_ads, base_ecpm = match_location(pipeline, locations)
|
||||||
|
# 基于匹配的内容计算附加值。
|
||||||
|
words, targeted_ads = finish_scoring(
|
||||||
|
pipeline, matched_ads, base_ecpm, content)
|
||||||
|
|
||||||
|
# 获取一个 ID ,它可以用于汇报并记录这个被定向的广告。
|
||||||
|
pipeline.incr('ads:served:')
|
||||||
|
# 找到 eCPM 最高的广告,并获取这个广告的 ID 。
|
||||||
|
pipeline.zrevrange('idx:' + targeted_ads, 0, 0)
|
||||||
|
target_id, targeted_ad = pipeline.execute()[-2:]
|
||||||
|
|
||||||
|
# 如果没有任何广告与目标位置相匹配,那么返回空值。
|
||||||
|
if not targeted_ad:
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
ad_id = targeted_ad[0]
|
||||||
|
# 记录一系列定向操作的执行结果,作为学习用户行为的其中一个步骤。
|
||||||
|
record_targeting_result(conn, target_id, ad_id, words)
|
||||||
|
|
||||||
|
# 向调用者返回记录本次定向操作相关信息的 ID ,以及被选中的广告的 ID 。
|
||||||
|
return target_id, ad_id
|
||||||
|
# <end id="target_ad"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-12
|
||||||
|
# <start id="location_target"/>
|
||||||
|
def match_location(pipe, locations):
|
||||||
|
# 根据给定的位置,找出所有需要执行并集操作的集合键。
|
||||||
|
required = ['req:' + loc for loc in locations]
|
||||||
|
# 找出与指定地区相匹配的广告,并将它们储存到集合里面。
|
||||||
|
matched_ads = union(pipe, required, ttl=300, _execute=False)
|
||||||
|
# 找到储存着所有被匹配广告的集合,
|
||||||
|
# 以及储存着所有被匹配广告的基本 eCPM 的有序集合,
|
||||||
|
# 然后返回它们的 ID 。
|
||||||
|
return matched_ads, zintersect(pipe,
|
||||||
|
{matched_ads: 0, 'ad:value:': 1}, _execute=False)
|
||||||
|
# <end id="location_target"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-13
|
||||||
|
# <start id="finish_scoring"/>
|
||||||
|
def finish_scoring(pipe, matched, base, content):
|
||||||
|
bonus_ecpm = {}
|
||||||
|
# 对内容进行标记化处理,以便与广告进行匹配。
|
||||||
|
words = tokenize(content)
|
||||||
|
for word in words:
|
||||||
|
# 找出那些既位于定向位置之内,又拥有页面内容其中一个单词的广告。
|
||||||
|
word_bonus = zintersect(
|
||||||
|
pipe, {matched: 0, word: 1}, _execute=False)
|
||||||
|
bonus_ecpm[word_bonus] = 1
|
||||||
|
|
||||||
|
if bonus_ecpm:
|
||||||
|
# 计算每个广告的最小 eCPM 附加值和最大 eCPM 附加值。
|
||||||
|
minimum = zunion(
|
||||||
|
pipe, bonus_ecpm, aggregate='MIN', _execute=False)
|
||||||
|
maximum = zunion(
|
||||||
|
pipe, bonus_ecpm, aggregate='MAX', _execute=False)
|
||||||
|
|
||||||
|
# 将广告的基本价格、最小 eCPM 附加值的一半以及最大 eCPM 附加值的一半这三者相加起来。
|
||||||
|
return words, zunion(
|
||||||
|
pipe, {base:1, minimum:.5, maximum:.5}, _execute=False)
|
||||||
|
# 如果页面内容中没有出现任何可匹配的单词,那么返回广告的基本 eCPM 。
|
||||||
|
return words, base
|
||||||
|
# <end id="finish_scoring"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-14
|
||||||
|
# <start id="record_targeting"/>
|
||||||
|
def record_targeting_result(conn, target_id, ad_id, words):
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
|
||||||
|
# 找出内容与广告之间相匹配的那些单词。
|
||||||
|
terms = conn.smembers('terms:' + ad_id)
|
||||||
|
matched = list(words & terms)
|
||||||
|
if matched:
|
||||||
|
matched_key = 'terms:matched:%s' % target_id
|
||||||
|
# 如果有相匹配的单词出现,那么把它们记录起来,并设置 15 分钟的生存时间。
|
||||||
|
pipeline.sadd(matched_key, *matched)
|
||||||
|
pipeline.expire(matched_key, 900)
|
||||||
|
|
||||||
|
# 为每种类型的广告分别记录它们的展示次数。
|
||||||
|
type = conn.hget('type:', ad_id)
|
||||||
|
pipeline.incr('type:%s:views:' % type)
|
||||||
|
# 对广告以及广告包含的单词的展示信息进行记录。
|
||||||
|
for word in matched:
|
||||||
|
pipeline.zincrby('views:%s' % ad_id, word)
|
||||||
|
pipeline.zincrby('views:%s' % ad_id, '')
|
||||||
|
|
||||||
|
# 广告每展示 100 次,就更新一次它的 eCPM 。
|
||||||
|
if not pipeline.execute()[-1] % 100:
|
||||||
|
update_cpms(conn, ad_id)
|
||||||
|
|
||||||
|
# <end id="record_targeting"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-15
|
||||||
|
# <start id="record_click"/>
|
||||||
|
def record_click(conn, target_id, ad_id, action=False):
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
click_key = 'clicks:%s'%ad_id
|
||||||
|
|
||||||
|
match_key = 'terms:matched:%s'%target_id
|
||||||
|
|
||||||
|
type = conn.hget('type:', ad_id)
|
||||||
|
# 如果这是一个按动作计费的广告,
|
||||||
|
# 并且被匹配的单词仍然存在,
|
||||||
|
# 那么刷新这些单词的过期时间。
|
||||||
|
if type == 'cpa':
|
||||||
|
pipeline.expire(match_key, 900)
|
||||||
|
if action:
|
||||||
|
# 记录动作信息,而不是点击信息。
|
||||||
|
click_key = 'actions:%s' % ad_id
|
||||||
|
|
||||||
|
if action and type == 'cpa':
|
||||||
|
# 根据广告的类型,维持一个全局的点击/动作计数器。
|
||||||
|
pipeline.incr('type:%s:actions:' % type)
|
||||||
|
else:
|
||||||
|
pipeline.incr('type:%s:clicks:' % type)
|
||||||
|
|
||||||
|
# 为广告以及所有被定向至该广告的单词记录下本次点击(或动作)。
|
||||||
|
matched = list(conn.smembers(match_key))
|
||||||
|
matched.append('')
|
||||||
|
for word in matched:
|
||||||
|
pipeline.zincrby(click_key, word)
|
||||||
|
pipeline.execute()
|
||||||
|
|
||||||
|
# 对广告中出现的所有单词的 eCPM 进行更新。
|
||||||
|
update_cpms(conn, ad_id)
|
||||||
|
# <end id="record_click"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-16
|
||||||
|
# <start id="update_cpms"/>
|
||||||
|
def update_cpms(conn, ad_id):
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
# 获取广告的类型和价格,以及广告包含的所有单词。
|
||||||
|
pipeline.hget('type:', ad_id)
|
||||||
|
pipeline.zscore('ad:base_value:', ad_id)
|
||||||
|
pipeline.smembers('terms:' + ad_id)
|
||||||
|
type, base_value, words = pipeline.execute()
|
||||||
|
|
||||||
|
# 判断广告的 eCPM 应该基于点击次数进行计算还是基于动作执行次数进行计算。
|
||||||
|
which = 'clicks'
|
||||||
|
if type == 'cpa':
|
||||||
|
which = 'actions'
|
||||||
|
|
||||||
|
# 根据广告的类型,
|
||||||
|
# 获取这类广告的展示次数和点击次数(或者动作执行次数)。
|
||||||
|
pipeline.get('type:%s:views:' % type)
|
||||||
|
pipeline.get('type:%s:%s' % (type, which))
|
||||||
|
type_views, type_clicks = pipeline.execute()
|
||||||
|
# 将广告的点击率或动作执行率重新写入到全局字典里面。
|
||||||
|
AVERAGE_PER_1K[type] = (
|
||||||
|
1000. * int(type_clicks or '1') / int(type_views or '1'))
|
||||||
|
|
||||||
|
# 如果正在处理的是一个 CPM 广告,
|
||||||
|
# 那么它的 eCPM 已经更新完毕,
|
||||||
|
# 无需再做其他处理。
|
||||||
|
if type == 'cpm':
|
||||||
|
return
|
||||||
|
|
||||||
|
view_key = 'views:%s' % ad_id
|
||||||
|
click_key = '%s:%s' % (which, ad_id)
|
||||||
|
|
||||||
|
to_ecpm = TO_ECPM[type]
|
||||||
|
|
||||||
|
# 获取广告的展示次数,以及广告的点击次数(或者动作执行次数)。
|
||||||
|
pipeline.zscore(view_key, '')
|
||||||
|
pipeline.zscore(click_key, '')
|
||||||
|
ad_views, ad_clicks = pipeline.execute()
|
||||||
|
# 如果广告还没有被点击过,那么使用已有的 eCPM 。
|
||||||
|
if (ad_clicks or 0) < 1:
|
||||||
|
ad_ecpm = conn.zscore('idx:ad:value:', ad_id)
|
||||||
|
else:
|
||||||
|
# 计算广告的 eCPM 并更新它的价格。
|
||||||
|
ad_ecpm = to_ecpm(ad_views or 1, ad_clicks or 0, base_value)
|
||||||
|
pipeline.zadd('idx:ad:value:', ad_id, ad_ecpm)
|
||||||
|
|
||||||
|
for word in words:
|
||||||
|
# 获取单词的展示次数和点击次数(或者动作执行次数)。
|
||||||
|
pipeline.zscore(view_key, word)
|
||||||
|
pipeline.zscore(click_key, word)
|
||||||
|
views, clicks = pipeline.execute()[-2:]
|
||||||
|
|
||||||
|
# 如果广告还未被点击过,那么不对 eCPM 进行更新。
|
||||||
|
if (clicks or 0) < 1:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 计算单词的 eCPM 。
|
||||||
|
word_ecpm = to_ecpm(views or 1, clicks or 0, base_value)
|
||||||
|
# 计算单词的附加值。
|
||||||
|
bonus = word_ecpm - ad_ecpm
|
||||||
|
# 将单词的附加值重新写入到为广告包含的每个单词分别记录附加值的有序集合里面。
|
||||||
|
pipeline.zadd('idx:' + word, ad_id, bonus)
|
||||||
|
pipeline.execute()
|
||||||
|
# <end id="update_cpms"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-17
|
||||||
|
# <start id="slow_job_search"/>
|
||||||
|
def add_job(conn, job_id, required_skills):
|
||||||
|
# 把职位所需的技能全部添加到职位对应的集合里面。
|
||||||
|
conn.sadd('job:' + job_id, *required_skills)
|
||||||
|
|
||||||
|
def is_qualified(conn, job_id, candidate_skills):
|
||||||
|
temp = str(uuid.uuid4())
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
# 把求职者拥有的技能全部添加到一个临时集合里面,并设置过期时间。
|
||||||
|
pipeline.sadd(temp, *candidate_skills)
|
||||||
|
pipeline.expire(temp, 5)
|
||||||
|
# 找出职位所需技能当中,求职者不具备的那些技能,并将它们记录到结果集合里面。
|
||||||
|
pipeline.sdiff('job:' + job_id, temp)
|
||||||
|
# 如果求职者具备职位所需的全部技能,那么返回 True 。
|
||||||
|
return not pipeline.execute()[-1]
|
||||||
|
# <end id="slow_job_search"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-18
|
||||||
|
# <start id="job_search_index"/>
|
||||||
|
def index_job(conn, job_id, skills):
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
for skill in skills:
|
||||||
|
# 将职位 ID 添加到相应的技能集合里面。
|
||||||
|
pipeline.sadd('idx:skill:' + skill, job_id)
|
||||||
|
# 将职位所需技能的数量添加到记录了所有职位所需技能数量的有序集合里面。
|
||||||
|
pipeline.zadd('idx:jobs:req', job_id, len(set(skills)))
|
||||||
|
pipeline.execute()
|
||||||
|
# <end id="job_search_index"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 7-19
|
||||||
|
# <start id="job_search_results"/>
|
||||||
|
def find_jobs(conn, candidate_skills):
|
||||||
|
# 设置好用于计算职位得分的字典。
|
||||||
|
skills = {}
|
||||||
|
for skill in set(candidate_skills):
|
||||||
|
skills['skill:' + skill] = 1
|
||||||
|
|
||||||
|
# 计算求职者对于每个职位的得分。
|
||||||
|
job_scores = zunion(conn, skills)
|
||||||
|
# 计算出求职者能够胜任以及不能够胜任的职位。
|
||||||
|
final_result = zintersect(
|
||||||
|
conn, {job_scores:-1, 'jobs:req':1})
|
||||||
|
|
||||||
|
# 返回求职者能够胜任的那些职位。
|
||||||
|
return conn.zrangebyscore('idx:' + final_result, 0, 0)
|
||||||
|
# <end id="job_search_results"/>
|
||||||
|
|
||||||
|
# 0 is beginner, 1 is intermediate, 2 is expert
|
||||||
|
SKILL_LEVEL_LIMIT = 2
|
||||||
|
|
||||||
|
def index_job_levels(conn, job_id, skill_levels):
|
||||||
|
total_skills = len(set(skill for skill, level in skill_levels))
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
for skill, level in skill_levels:
|
||||||
|
level = min(level, SKILL_LEVEL_LIMIT)
|
||||||
|
for wlevel in xrange(level, SKILL_LEVEL_LIMIT+1):
|
||||||
|
pipeline.sadd('idx:skill:%s:%s'%(skill,wlevel), job_id)
|
||||||
|
pipeline.zadd('idx:jobs:req', job_id, total_skills)
|
||||||
|
pipeline.execute()
|
||||||
|
|
||||||
|
def search_job_levels(conn, skill_levels):
|
||||||
|
skills = {}
|
||||||
|
for skill, level in skill_levels:
|
||||||
|
level = min(level, SKILL_LEVEL_LIMIT)
|
||||||
|
for wlevel in xrange(level, SKILL_LEVEL_LIMIT+1):
|
||||||
|
skills['skill:%s:%s'%(skill,wlevel)] = 1
|
||||||
|
|
||||||
|
job_scores = zunion(conn, skills)
|
||||||
|
final_result = zintersect(conn, {job_scores:-1, 'jobs:req':1})
|
||||||
|
|
||||||
|
return conn.zrangebyscore('idx:' + final_result, 0, 0)
|
||||||
|
|
||||||
|
|
||||||
|
def index_job_years(conn, job_id, skill_years):
|
||||||
|
total_skills = len(set(skill for skill, level in skill_years))
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
for skill, years in skill_years:
|
||||||
|
pipeline.zadd(
|
||||||
|
'idx:skill:%s:years'%skill, job_id, max(years, 0))
|
||||||
|
pipeline.sadd('idx:jobs:all', job_id)
|
||||||
|
pipeline.zadd('idx:jobs:req', job_id, total_skills)
|
||||||
|
|
||||||
|
|
||||||
|
def search_job_years(conn, skill_years):
|
||||||
|
skill_years = dict(skill_years)
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
|
||||||
|
union = []
|
||||||
|
for skill, years in skill_years.iteritems():
|
||||||
|
sub_result = zintersect(pipeline,
|
||||||
|
{'jobs:all':-years, 'skill:%s:years'%skill:1}, _execute=False)
|
||||||
|
pipeline.zremrangebyscore('idx:' + sub_result, '(0', 'inf')
|
||||||
|
union.append(
|
||||||
|
zintersect(pipeline, {'jobs:all':1, sub_result:0}), _execute=False)
|
||||||
|
|
||||||
|
job_scores = zunion(pipeline, dict((key, 1) for key in union), _execute=False)
|
||||||
|
final_result = zintersect(pipeline, {job_scores:-1, 'jobs:req':1}, _execute=False)
|
||||||
|
|
||||||
|
pipeline.zrange('idx:' + final_result, 0, 0)
|
||||||
|
return pipeline.execute()[-1]
|
||||||
|
|
||||||
|
class TestCh07(unittest.TestCase):
|
||||||
|
content = 'this is some random content, look at how it is indexed.'
|
||||||
|
def setUp(self):
|
||||||
|
self.conn = redis.Redis(db=15)
|
||||||
|
self.conn.flushdb()
|
||||||
|
def tearDown(self):
|
||||||
|
self.conn.flushdb()
|
||||||
|
|
||||||
|
def test_index_document(self):
|
||||||
|
print "We're tokenizing some content..."
|
||||||
|
tokens = tokenize(self.content)
|
||||||
|
print "Those tokens are:", tokens
|
||||||
|
self.assertTrue(tokens)
|
||||||
|
|
||||||
|
print "And now we are indexing that content..."
|
||||||
|
r = index_document(self.conn, 'test', self.content)
|
||||||
|
self.assertEquals(r, len(tokens))
|
||||||
|
for t in tokens:
|
||||||
|
self.assertEquals(self.conn.smembers('idx:' + t), set(['test']))
|
||||||
|
|
||||||
|
def test_set_operations(self):
|
||||||
|
index_document(self.conn, 'test', self.content)
|
||||||
|
|
||||||
|
r = intersect(self.conn, ['content', 'indexed'])
|
||||||
|
self.assertEquals(self.conn.smembers('idx:' + r), set(['test']))
|
||||||
|
|
||||||
|
r = intersect(self.conn, ['content', 'ignored'])
|
||||||
|
self.assertEquals(self.conn.smembers('idx:' + r), set())
|
||||||
|
|
||||||
|
r = union(self.conn, ['content', 'ignored'])
|
||||||
|
self.assertEquals(self.conn.smembers('idx:' + r), set(['test']))
|
||||||
|
|
||||||
|
r = difference(self.conn, ['content', 'ignored'])
|
||||||
|
self.assertEquals(self.conn.smembers('idx:' + r), set(['test']))
|
||||||
|
|
||||||
|
r = difference(self.conn, ['content', 'indexed'])
|
||||||
|
self.assertEquals(self.conn.smembers('idx:' + r), set())
|
||||||
|
|
||||||
|
def test_parse_query(self):
|
||||||
|
query = 'test query without stopwords'
|
||||||
|
self.assertEquals(parse(query), ([[x] for x in query.split()], []))
|
||||||
|
|
||||||
|
query = 'test +query without -stopwords'
|
||||||
|
self.assertEquals(parse(query), ([['test', 'query'], ['without']], ['stopwords']))
|
||||||
|
|
||||||
|
def test_parse_and_search(self):
|
||||||
|
print "And now we are testing search..."
|
||||||
|
index_document(self.conn, 'test', self.content)
|
||||||
|
|
||||||
|
r = parse_and_search(self.conn, 'content')
|
||||||
|
self.assertEquals(self.conn.smembers('idx:' + r), set(['test']))
|
||||||
|
|
||||||
|
r = parse_and_search(self.conn, 'content indexed random')
|
||||||
|
self.assertEquals(self.conn.smembers('idx:' + r), set(['test']))
|
||||||
|
|
||||||
|
r = parse_and_search(self.conn, 'content +indexed random')
|
||||||
|
self.assertEquals(self.conn.smembers('idx:' + r), set(['test']))
|
||||||
|
|
||||||
|
r = parse_and_search(self.conn, 'content indexed +random')
|
||||||
|
self.assertEquals(self.conn.smembers('idx:' + r), set(['test']))
|
||||||
|
|
||||||
|
r = parse_and_search(self.conn, 'content indexed -random')
|
||||||
|
self.assertEquals(self.conn.smembers('idx:' + r), set())
|
||||||
|
|
||||||
|
r = parse_and_search(self.conn, 'content indexed +random')
|
||||||
|
self.assertEquals(self.conn.smembers('idx:' + r), set(['test']))
|
||||||
|
|
||||||
|
print "Which passed!"
|
||||||
|
|
||||||
|
def test_search_with_sort(self):
|
||||||
|
print "And now let's test searching with sorting..."
|
||||||
|
|
||||||
|
index_document(self.conn, 'test', self.content)
|
||||||
|
index_document(self.conn, 'test2', self.content)
|
||||||
|
self.conn.hmset('kb:doc:test', {'updated': 12345, 'id': 10})
|
||||||
|
self.conn.hmset('kb:doc:test2', {'updated': 54321, 'id': 1})
|
||||||
|
|
||||||
|
r = search_and_sort(self.conn, "content")
|
||||||
|
self.assertEquals(r[1], ['test2', 'test'])
|
||||||
|
|
||||||
|
r = search_and_sort(self.conn, "content", sort='-id')
|
||||||
|
self.assertEquals(r[1], ['test', 'test2'])
|
||||||
|
print "Which passed!"
|
||||||
|
|
||||||
|
def test_search_with_zsort(self):
|
||||||
|
print "And now let's test searching with sorting via zset..."
|
||||||
|
|
||||||
|
index_document(self.conn, 'test', self.content)
|
||||||
|
index_document(self.conn, 'test2', self.content)
|
||||||
|
self.conn.zadd('idx:sort:update', 'test', 12345, 'test2', 54321)
|
||||||
|
self.conn.zadd('idx:sort:votes', 'test', 10, 'test2', 1)
|
||||||
|
|
||||||
|
r = search_and_zsort(self.conn, "content", desc=False)
|
||||||
|
self.assertEquals(r[1], ['test', 'test2'])
|
||||||
|
|
||||||
|
r = search_and_zsort(self.conn, "content", update=0, vote=1, desc=False)
|
||||||
|
self.assertEquals(r[1], ['test2', 'test'])
|
||||||
|
print "Which passed!"
|
||||||
|
|
||||||
|
def test_string_to_score(self):
|
||||||
|
words = 'these are some words that will be sorted'.split()
|
||||||
|
pairs = [(word, string_to_score(word)) for word in words]
|
||||||
|
pairs2 = list(pairs)
|
||||||
|
pairs.sort()
|
||||||
|
pairs2.sort(key=lambda x:x[1])
|
||||||
|
self.assertEquals(pairs, pairs2)
|
||||||
|
|
||||||
|
words = 'these are some words that will be sorted'.split()
|
||||||
|
pairs = [(word, string_to_score_generic(word, LOWER)) for word in words]
|
||||||
|
pairs2 = list(pairs)
|
||||||
|
pairs.sort()
|
||||||
|
pairs2.sort(key=lambda x:x[1])
|
||||||
|
self.assertEquals(pairs, pairs2)
|
||||||
|
|
||||||
|
zadd_string(self.conn, 'key', 'test', 'value', test2='other')
|
||||||
|
self.assertTrue(self.conn.zscore('key', 'test'), string_to_score('value'))
|
||||||
|
self.assertTrue(self.conn.zscore('key', 'test2'), string_to_score('other'))
|
||||||
|
|
||||||
|
def test_index_and_target_ads(self):
|
||||||
|
index_ad(self.conn, '1', ['USA', 'CA'], self.content, 'cpc', .25)
|
||||||
|
index_ad(self.conn, '2', ['USA', 'VA'], self.content + ' wooooo', 'cpc', .125)
|
||||||
|
|
||||||
|
for i in xrange(100):
|
||||||
|
ro = target_ads(self.conn, ['USA'], self.content)
|
||||||
|
self.assertEquals(ro[1], '1')
|
||||||
|
|
||||||
|
r = target_ads(self.conn, ['VA'], 'wooooo')
|
||||||
|
self.assertEquals(r[1], '2')
|
||||||
|
|
||||||
|
self.assertEquals(self.conn.zrange('idx:ad:value:', 0, -1, withscores=True), [('2', 0.125), ('1', 0.25)])
|
||||||
|
self.assertEquals(self.conn.zrange('ad:base_value:', 0, -1, withscores=True), [('2', 0.125), ('1', 0.25)])
|
||||||
|
|
||||||
|
record_click(self.conn, ro[0], ro[1])
|
||||||
|
|
||||||
|
self.assertEquals(self.conn.zrange('idx:ad:value:', 0, -1, withscores=True), [('2', 0.125), ('1', 2.5)])
|
||||||
|
self.assertEquals(self.conn.zrange('ad:base_value:', 0, -1, withscores=True), [('2', 0.125), ('1', 0.25)])
|
||||||
|
|
||||||
|
def test_is_qualified_for_job(self):
|
||||||
|
add_job(self.conn, 'test', ['q1', 'q2', 'q3'])
|
||||||
|
self.assertTrue(is_qualified(self.conn, 'test', ['q1', 'q3', 'q2']))
|
||||||
|
self.assertFalse(is_qualified(self.conn, 'test', ['q1', 'q2']))
|
||||||
|
|
||||||
|
def test_index_and_find_jobs(self):
|
||||||
|
index_job(self.conn, 'test1', ['q1', 'q2', 'q3'])
|
||||||
|
index_job(self.conn, 'test2', ['q1', 'q3', 'q4'])
|
||||||
|
index_job(self.conn, 'test3', ['q1', 'q3', 'q5'])
|
||||||
|
|
||||||
|
self.assertEquals(find_jobs(self.conn, ['q1']), [])
|
||||||
|
self.assertEquals(find_jobs(self.conn, ['q1', 'q3', 'q4']), ['test2'])
|
||||||
|
self.assertEquals(find_jobs(self.conn, ['q1', 'q3', 'q5']), ['test3'])
|
||||||
|
self.assertEquals(find_jobs(self.conn, ['q1', 'q2', 'q3', 'q4', 'q5']), ['test1', 'test2', 'test3'])
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
|
@ -0,0 +1,972 @@
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
import BaseHTTPServer
|
||||||
|
import cgi
|
||||||
|
import functools
|
||||||
|
import json
|
||||||
|
import math
|
||||||
|
import random
|
||||||
|
import socket
|
||||||
|
import SocketServer
|
||||||
|
import time
|
||||||
|
import threading
|
||||||
|
import unittest
|
||||||
|
import uuid
|
||||||
|
import urlparse
|
||||||
|
|
||||||
|
import redis
|
||||||
|
|
||||||
|
def acquire_lock_with_timeout(
|
||||||
|
conn, lockname, acquire_timeout=10, lock_timeout=10):
|
||||||
|
identifier = str(uuid.uuid4()) #A
|
||||||
|
lockname = 'lock:' + lockname
|
||||||
|
lock_timeout = int(math.ceil(lock_timeout)) #D
|
||||||
|
|
||||||
|
end = time.time() + acquire_timeout
|
||||||
|
while time.time() < end:
|
||||||
|
if conn.setnx(lockname, identifier): #B
|
||||||
|
conn.expire(lockname, lock_timeout) #B
|
||||||
|
return identifier
|
||||||
|
elif not conn.ttl(lockname): #C
|
||||||
|
conn.expire(lockname, lock_timeout) #C
|
||||||
|
|
||||||
|
time.sleep(.001)
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def release_lock(conn, lockname, identifier):
|
||||||
|
pipe = conn.pipeline(True)
|
||||||
|
lockname = 'lock:' + lockname
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
pipe.watch(lockname) #A
|
||||||
|
if pipe.get(lockname) == identifier: #A
|
||||||
|
pipe.multi() #B
|
||||||
|
pipe.delete(lockname) #B
|
||||||
|
pipe.execute() #B
|
||||||
|
return True #B
|
||||||
|
|
||||||
|
pipe.unwatch()
|
||||||
|
break
|
||||||
|
|
||||||
|
except redis.exceptions.WatchError: #C
|
||||||
|
pass #C
|
||||||
|
|
||||||
|
return False #D
|
||||||
|
|
||||||
|
CONFIGS = {}
|
||||||
|
CHECKED = {}
|
||||||
|
|
||||||
|
def get_config(conn, type, component, wait=1):
|
||||||
|
key = 'config:%s:%s'%(type, component)
|
||||||
|
|
||||||
|
if CHECKED.get(key) < time.time() - wait: #A
|
||||||
|
CHECKED[key] = time.time() #B
|
||||||
|
config = json.loads(conn.get(key) or '{}') #C
|
||||||
|
old_config = CONFIGS.get(key) #D
|
||||||
|
|
||||||
|
if config != old_config: #E
|
||||||
|
CONFIGS[key] = config #F
|
||||||
|
|
||||||
|
return CONFIGS.get(key)
|
||||||
|
|
||||||
|
REDIS_CONNECTIONS = {}
|
||||||
|
|
||||||
|
def redis_connection(component, wait=1): #A
|
||||||
|
key = 'config:redis:' + component #B
|
||||||
|
def wrapper(function): #C
|
||||||
|
@functools.wraps(function) #D
|
||||||
|
def call(*args, **kwargs): #E
|
||||||
|
old_config = CONFIGS.get(key, object()) #F
|
||||||
|
_config = get_config( #G
|
||||||
|
config_connection, 'redis', component, wait) #G
|
||||||
|
|
||||||
|
config = {}
|
||||||
|
for k, v in _config.iteritems(): #L
|
||||||
|
config[k.encode('utf-8')] = v #L
|
||||||
|
|
||||||
|
if config != old_config: #H
|
||||||
|
REDIS_CONNECTIONS[key] = redis.Redis(**config) #H
|
||||||
|
|
||||||
|
return function( #I
|
||||||
|
REDIS_CONNECTIONS.get(key), *args, **kwargs) #I
|
||||||
|
return call #J
|
||||||
|
return wrapper #K
|
||||||
|
|
||||||
|
def execute_later(conn, queue, name, args):
|
||||||
|
# this is just for testing purposes
|
||||||
|
assert conn is args[0]
|
||||||
|
t = threading.Thread(target=globals()[name], args=tuple(args))
|
||||||
|
t.setDaemon(1)
|
||||||
|
t.start()
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-1
|
||||||
|
# <start id="create-twitter-user"/>
|
||||||
|
def create_user(conn, login, name):
|
||||||
|
llogin = login.lower()
|
||||||
|
# 使用第 6 章定义的加锁函数尝试对小写的用户名进行加锁。
|
||||||
|
lock = acquire_lock_with_timeout(conn, 'user:' + llogin, 1)
|
||||||
|
# 如果加锁不成功,那么说明给定的用户名已经被其他用户占用了。
|
||||||
|
if not lock:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# 程序使用了一个散列来储存小写的用户名以及用户 ID 之间的映射,
|
||||||
|
# 如果给定的用户名已经被映射到了某个用户 ID ,
|
||||||
|
# 那么程序就不会再将这个用户名分配给其他人。
|
||||||
|
if conn.hget('users:', llogin):
|
||||||
|
release_lock(conn, 'user:' + llogin, lock)
|
||||||
|
return None
|
||||||
|
|
||||||
|
# 每个用户都有一个独一无二的 ID ,
|
||||||
|
# 这个 ID 是通过对计数器执行自增操作产生的。
|
||||||
|
id = conn.incr('user:id:')
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
# 在散列里面将小写的用户名映射至用户 ID 。
|
||||||
|
pipeline.hset('users:', llogin, id)
|
||||||
|
# 将用户信息添加到用户对应的散列里面。
|
||||||
|
pipeline.hmset('user:%s'%id, {
|
||||||
|
'login': login,
|
||||||
|
'id': id,
|
||||||
|
'name': name,
|
||||||
|
'followers': 0,
|
||||||
|
'following': 0,
|
||||||
|
'posts': 0,
|
||||||
|
'signup': time.time(),
|
||||||
|
})
|
||||||
|
pipeline.execute()
|
||||||
|
# 释放之前对用户名加的锁。
|
||||||
|
release_lock(conn, 'user:' + llogin, lock)
|
||||||
|
# 返回用户 ID 。
|
||||||
|
return id
|
||||||
|
# <end id="create-twitter-user"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-2
|
||||||
|
# <start id="create-twitter-status"/>
|
||||||
|
def create_status(conn, uid, message, **data):
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
# 根据用户 ID 获取用户的用户名。
|
||||||
|
pipeline.hget('user:%s'%uid, 'login')
|
||||||
|
# 为这条状态消息创建一个新的 ID 。
|
||||||
|
pipeline.incr('status:id:')
|
||||||
|
login, id = pipeline.execute()
|
||||||
|
|
||||||
|
# 在发布状态消息之前,先检查用户的账号是否存在。
|
||||||
|
if not login:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# 准备并设置状态消息的各项信息。
|
||||||
|
data.update({
|
||||||
|
'message': message,
|
||||||
|
'posted': time.time(),
|
||||||
|
'id': id,
|
||||||
|
'uid': uid,
|
||||||
|
'login': login,
|
||||||
|
})
|
||||||
|
pipeline.hmset('status:%s'%id, data)
|
||||||
|
# 更新用户的已发送状态消息数量。
|
||||||
|
pipeline.hincrby('user:%s'%uid, 'posts')
|
||||||
|
pipeline.execute()
|
||||||
|
# 返回新创建的状态消息的 ID 。
|
||||||
|
return id
|
||||||
|
# <end id="create-twitter-status"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-3
|
||||||
|
# <start id="fetch-page"/>
|
||||||
|
# 函数接受三个可选参数,
|
||||||
|
# 它们分别用于指定函数要获取哪条时间线、要获取多少页时间线、以及每页要有多少条状态消息。
|
||||||
|
def get_status_messages(conn, uid, timeline='home:', page=1, count=30):
|
||||||
|
# 获取时间线上面最新的状态消息的 ID 。
|
||||||
|
statuses = conn.zrevrange(
|
||||||
|
'%s%s'%(timeline, uid), (page-1)*count, page*count-1)
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
# 获取状态消息本身。
|
||||||
|
for id in statuses:
|
||||||
|
pipeline.hgetall('status:%s'%id)
|
||||||
|
|
||||||
|
# 使用过滤器移除那些已经被删除了的状态消息。
|
||||||
|
return filter(None, pipeline.execute())
|
||||||
|
# <end id="fetch-page"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-4
|
||||||
|
# <start id="follow-user"/>
|
||||||
|
HOME_TIMELINE_SIZE = 1000
|
||||||
|
def follow_user(conn, uid, other_uid):
|
||||||
|
# 把正在关注有序集合以及关注者有序集合的键名缓存起来。
|
||||||
|
fkey1 = 'following:%s'%uid
|
||||||
|
fkey2 = 'followers:%s'%other_uid
|
||||||
|
|
||||||
|
# 如果 uid 指定的用户已经关注了 other_uid 指定的用户,那么函数直接返回。
|
||||||
|
if conn.zscore(fkey1, other_uid):
|
||||||
|
return None
|
||||||
|
|
||||||
|
now = time.time()
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
# 将两个用户的 ID 分别添加到相应的正在关注有序集合以及关注者有序集合里面。
|
||||||
|
pipeline.zadd(fkey1, other_uid, now)
|
||||||
|
pipeline.zadd(fkey2, uid, now)
|
||||||
|
# 从被关注用户的个人时间线里面获取 HOME_TIMELINE_SIZE 条最新的状态消息。
|
||||||
|
pipeline.zrevrange('profile:%s'%other_uid,
|
||||||
|
0, HOME_TIMELINE_SIZE-1, withscores=True)
|
||||||
|
following, followers, status_and_score = pipeline.execute()[-3:]
|
||||||
|
|
||||||
|
# 修改两个用户的散列,更新他们各自的正在关注数量以及关注者数量。
|
||||||
|
pipeline.hincrby('user:%s'%uid, 'following', int(following))
|
||||||
|
pipeline.hincrby('user:%s'%other_uid, 'followers', int(followers))
|
||||||
|
if status_and_score:
|
||||||
|
# 对执行关注操作的用户的定制时间线进行更新,并保留时间线上面的最新 1000 条状态消息。
|
||||||
|
pipeline.zadd('home:%s'%uid, **dict(status_and_score))
|
||||||
|
pipeline.zremrangebyrank('home:%s'%uid, 0, -HOME_TIMELINE_SIZE-1)
|
||||||
|
|
||||||
|
pipeline.execute()
|
||||||
|
# 返回 True 表示关注操作已经成功执行。
|
||||||
|
return True
|
||||||
|
# <end id="follow-user"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-5
|
||||||
|
# <start id="unfollow-user"/>
|
||||||
|
def unfollow_user(conn, uid, other_uid):
|
||||||
|
# 把正在关注有序集合以及关注者有序集合的键名缓存起来。
|
||||||
|
fkey1 = 'following:%s'%uid
|
||||||
|
fkey2 = 'followers:%s'%other_uid
|
||||||
|
|
||||||
|
# 如果 uid 指定的用户并未关注 other_uid 指定的用户,那么函数直接返回。
|
||||||
|
if not conn.zscore(fkey1, other_uid):
|
||||||
|
return None
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
# 从正在关注有序集合以及关注者有序集合里面移除双方的用户 ID 。
|
||||||
|
pipeline.zrem(fkey1, other_uid)
|
||||||
|
pipeline.zrem(fkey2, uid)
|
||||||
|
# 获取被取消关注的用户最近发布的 HOME_TIMELINE_SIZE 条状态消息。
|
||||||
|
pipeline.zrevrange('profile:%s'%other_uid,
|
||||||
|
0, HOME_TIMELINE_SIZE-1)
|
||||||
|
following, followers, statuses = pipeline.execute()[-3:]
|
||||||
|
|
||||||
|
# 对用户信息散列里面的正在关注数量以及关注者数量进行更新。
|
||||||
|
pipeline.hincrby('user:%s'%uid, 'following', int(following))
|
||||||
|
pipeline.hincrby('user:%s'%other_uid, 'followers', int(followers))
|
||||||
|
if statuses:
|
||||||
|
# 对执行取消关注操作的用户的定制时间线进行更新,
|
||||||
|
# 移除被取消关注的用户发布的所有状态消息。
|
||||||
|
pipeline.zrem('home:%s'%uid, *statuses)
|
||||||
|
|
||||||
|
pipeline.execute()
|
||||||
|
# 返回 True 表示取消关注操作执行成功。
|
||||||
|
return True
|
||||||
|
# <end id="unfollow-user"/>
|
||||||
|
|
||||||
|
# <start id="exercise-refilling-timelines"/>
|
||||||
|
REFILL_USERS_STEP = 50
|
||||||
|
def refill_timeline(conn, incoming, timeline, start=0):
|
||||||
|
if not start and conn.zcard(timeline) >= 750: # 如果时间线已经被填满了 3/4 或以上
|
||||||
|
return # 那么不对它进行重新填充
|
||||||
|
|
||||||
|
users = conn.zrangebyscore(incoming, start, 'inf', # 获取一组用户,这些用户发布的消息将被用于填充时间线
|
||||||
|
start=0, num=REFILL_USERS_STEP, withscores=True) #
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(False)
|
||||||
|
for uid, start in users:
|
||||||
|
pipeline.zrevrange('profile:%s'%uid, # 从正在关注的人哪里获取最新的状态消息
|
||||||
|
0, HOME_TIMELINE_SIZE-1, withscores=True) #
|
||||||
|
|
||||||
|
messages = []
|
||||||
|
for results in pipeline.execute():
|
||||||
|
messages.extend(results) # 将取得的所有状态消息放到一起
|
||||||
|
|
||||||
|
messages.sort(key=lambda x:-x[1]) # 根据发布时间对取得的所有状态消息进行排序,
|
||||||
|
del messages[HOME_TIMELINE_SIZE:] # 并保留其中最新的 100 条状态消息
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
if messages:
|
||||||
|
pipeline.zadd(timeline, **dict(messages)) # 将挑选出的状态消息添加到用户的主页时间线上面
|
||||||
|
pipeline.zremrangebyrank( # 对时间线进行修剪,只保留最新的 100 条状态消息
|
||||||
|
timeline, 0, -HOME_TIMELINE_SIZE-1) #
|
||||||
|
pipeline.execute()
|
||||||
|
|
||||||
|
if len(users) >= REFILL_USERS_STEP:
|
||||||
|
execute_later(conn, 'default', 'refill_timeline', # 如果还要其他用户的时间线需要进行重新填充,
|
||||||
|
[conn, incoming, timeline, start]) # 那么继续执行这个动作
|
||||||
|
# <end id="exercise-refilling-timelines"/>
|
||||||
|
|
||||||
|
# <start id="exercise-follow-user-list"/>
|
||||||
|
def follow_user_list(conn, uid, other_uid, list_id):
|
||||||
|
fkey1 = 'list:in:%s'%list_id # 把相关的键名缓存起来
|
||||||
|
fkey2 = 'list:out:%s'%other_uid #
|
||||||
|
timeline = 'list:statuses:%s'%list_id #
|
||||||
|
|
||||||
|
if conn.zscore(fkey1, other_uid): # 如果 other_uid 已经包含在列表里面,
|
||||||
|
return None # 那么直接返回
|
||||||
|
|
||||||
|
now = time.time()
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
pipeline.zadd(fkey1, other_uid, now) # 将各个用户ID添加到相应的有序集合里面
|
||||||
|
pipeline.zadd(fkey2, list_id, now) #
|
||||||
|
pipeline.zcard(fkey1) # 获取有序集合的大小
|
||||||
|
pipeline.zrevrange('profile:%s'%other_uid, # 从用户的个人时间线里面获取最新的状态消息
|
||||||
|
0, HOME_TIMELINE_SIZE-1, withscores=True) #
|
||||||
|
following, status_and_score = pipeline.execute()[-2:]
|
||||||
|
|
||||||
|
pipeline.hset('list:%s'%list_id, 'following', following) # 对存储列表信息的散列进行更新,将列表的新大小记录到散列里面
|
||||||
|
pipeline.zadd(timeline, **dict(status_and_score)) # 对列表的状态消息进行更新
|
||||||
|
pipeline.zremrangebyrank(timeline, 0, -HOME_TIMELINE_SIZE-1)#
|
||||||
|
|
||||||
|
pipeline.execute()
|
||||||
|
return True # 返回 True 值,表示用户已经被添加到列表里面
|
||||||
|
# <end id="exercise-follow-user"/>
|
||||||
|
|
||||||
|
# <start id="exercise-unfollow-user-list"/>
|
||||||
|
def unfollow_user_list(conn, uid, other_uid, list_id):
|
||||||
|
fkey1 = 'list:in:%s'%list_id # 把相关的键名缓存起来
|
||||||
|
fkey2 = 'list:out:%s'%other_uid #
|
||||||
|
timeline = 'list:statuses:%s'%list_id #
|
||||||
|
|
||||||
|
if not conn.zscore(fkey1, other_uid): # 如果用户并未关注 other_uid ,
|
||||||
|
return None # 那么直接返回
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
pipeline.zrem(fkey1, other_uid) # 从相应的有序集合里面移除各个用户ID
|
||||||
|
pipeline.zrem(fkey2, list_id) #
|
||||||
|
pipeline.zcard(fkey1) # 获取有序集合的大小
|
||||||
|
pipeline.zrevrange('profile:%s'%other_uid, # 从被取消关注的用户那里获取他最新发布的状态消息
|
||||||
|
0, HOME_TIMELINE_SIZE-1) #
|
||||||
|
following, statuses = pipeline.execute()[-2:]
|
||||||
|
|
||||||
|
pipeline.hset('list:%s'%list_id, 'following', following) # 对存储列表信息的散列进行更新,将列表的新大小记录到散列里面
|
||||||
|
if statuses:
|
||||||
|
pipeline.zrem(timeline, *statuses) # 从时间线里面移除被取消关注的用户所发布的状态消息
|
||||||
|
refill_timeline(fkey1, timeline) # 重新填充时间线
|
||||||
|
|
||||||
|
pipeline.execute()
|
||||||
|
return True # 返回 True 值,表示用户已经被取消关注
|
||||||
|
# <end id="exercise-unfollow-user-list"/>
|
||||||
|
|
||||||
|
# <start id="exercise-create-user-list"/>
|
||||||
|
def create_user_list(conn, uid, name):
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
pipeline.hget('user:%s'%uid, 'login') # 获取创建列表的用户的用户名
|
||||||
|
pipeline.incr('list:id:') # 生成一个新的列表ID
|
||||||
|
login, id = pipeline.execute()
|
||||||
|
|
||||||
|
if not login: # 如果用户不存在,那么直接返回
|
||||||
|
return None #
|
||||||
|
|
||||||
|
now = time.time()
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
pipeline.zadd('lists:%s'%uid, **{id: now}) # 将新创建的列表添加到用户已经创建了的有序集合里面
|
||||||
|
pipeline.hmset('list:%s'%id, { # 创建记录列表信息的散列
|
||||||
|
'name': name, #
|
||||||
|
'id': id, #
|
||||||
|
'uid': uid, #
|
||||||
|
'login': login, #
|
||||||
|
'following': 0, #
|
||||||
|
'created': now, #
|
||||||
|
})
|
||||||
|
pipeline.execute()
|
||||||
|
|
||||||
|
return id # 返回新列表的ID
|
||||||
|
# <end id="exercise-create-user-list"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-6
|
||||||
|
# <start id="post-message"/>
|
||||||
|
def post_status(conn, uid, message, **data):
|
||||||
|
# 使用之前介绍过的函数来创建一条新的状态消息。
|
||||||
|
id = create_status(conn, uid, message, **data)
|
||||||
|
# 如果创建状态消息失败,那么直接返回。
|
||||||
|
if not id:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# 获取消息的发布时间。
|
||||||
|
posted = conn.hget('status:%s'%id, 'posted')
|
||||||
|
# 如果程序未能顺利地获取消息的发布时间,那么直接返回。
|
||||||
|
if not posted:
|
||||||
|
return None
|
||||||
|
|
||||||
|
post = {str(id): float(posted)}
|
||||||
|
# 将状态消息添加到用户的个人时间线里面。
|
||||||
|
conn.zadd('profile:%s'%uid, **post)
|
||||||
|
|
||||||
|
# 将状态消息推送给用户的关注者。
|
||||||
|
syndicate_status(conn, uid, post)
|
||||||
|
return id
|
||||||
|
# <end id="post-message"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-7
|
||||||
|
# <start id="syndicate-message"/>
|
||||||
|
# 函数每次被调用时,最多只会将状态消息发送给一千个关注者。
|
||||||
|
POSTS_PER_PASS = 1000
|
||||||
|
def syndicate_status(conn, uid, post, start=0):
|
||||||
|
# 以上次被更新的最后一个关注者为起点,获取接下来的一千个关注者。
|
||||||
|
followers = conn.zrangebyscore('followers:%s'%uid, start, 'inf',
|
||||||
|
start=0, num=POSTS_PER_PASS, withscores=True)
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(False)
|
||||||
|
# 在遍历关注者的同时,
|
||||||
|
# 对 start 变量的值进行更新,
|
||||||
|
# 这个变量可以在有需要的时候传递给下一个 syndicate_status() 调用。
|
||||||
|
for follower, start in followers:
|
||||||
|
# 将状态消息添加到所有被获取的关注者的定制时间线里面,
|
||||||
|
# 并在有需要的时候对关注者的定制时间线进行修剪,
|
||||||
|
# 防止它超过限定的最大长度。
|
||||||
|
pipeline.zadd('home:%s'%follower, **post)
|
||||||
|
pipeline.zremrangebyrank(
|
||||||
|
'home:%s'%follower, 0, -HOME_TIMELINE_SIZE-1)
|
||||||
|
pipeline.execute()
|
||||||
|
|
||||||
|
# 如果需要更新的关注者数量超过一千人,
|
||||||
|
# 那么在延迟任务里面继续执行剩余的更新操作。
|
||||||
|
if len(followers) >= POSTS_PER_PASS:
|
||||||
|
execute_later(conn, 'default', 'syndicate_status',
|
||||||
|
[conn, uid, post, start])
|
||||||
|
# <end id="syndicate-message"/>
|
||||||
|
|
||||||
|
# <start id="syndicate-message-list"/>
|
||||||
|
def syndicate_status_list(conn, uid, post, start=0, on_lists=False):
|
||||||
|
key = 'followers:%s'%uid # 根据操作的处理进度(depending on how far along we are),
|
||||||
|
base = 'home:%s' # 选择对主页时间线还是对用户时间线进行操作
|
||||||
|
if on_lists: #
|
||||||
|
key = 'list:out:%s'%uid #
|
||||||
|
base = 'list:statuses:%s' #
|
||||||
|
followers = conn.zrangebyscore(key, start, 'inf', # 从上次更新时的最后一个用户或者列表作为起点,
|
||||||
|
start=0, num=POSTS_PER_PASS, withscores=True) # 获取下一组用户或者列表(数量为 1000 个)
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(False)
|
||||||
|
for follower, start in followers: # 将状态消息添加到所有已获取关注者的主页时间线里面
|
||||||
|
pipeline.zadd(base%follower, **post) #
|
||||||
|
pipeline.zremrangebyrank( #
|
||||||
|
base%follower, 0, -HOME_TIMELINE_SIZE-1) #
|
||||||
|
pipeline.execute()
|
||||||
|
|
||||||
|
if len(followers) >= POSTS_PER_PASS: # 如果已经对至少 1000 个用户进行了更新,
|
||||||
|
execute_later(conn, 'default', 'syndicate_status', # 那么将后续的更新操作留到下次再进行
|
||||||
|
[conn, uid, post, start, on_lists]) #
|
||||||
|
|
||||||
|
elif not on_lists:
|
||||||
|
execute_later(conn, 'default', 'syndicate_status', # 如果针对列表的操作并未完成,那么对列表进行操作
|
||||||
|
[conn, uid, post, 0, True]) # 如果操作只是对主页时间线执行的话,那么程序无需执行这一步
|
||||||
|
# <end id="syndicate-message-list"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-8
|
||||||
|
# <start id="delete-message"/>
|
||||||
|
def delete_status(conn, uid, status_id):
|
||||||
|
key = 'status:%s'%status_id
|
||||||
|
# 对指定的状态消息进行加锁,防止两个程序同时删除同一条状态消息的情况出现。
|
||||||
|
lock = acquire_lock_with_timeout(conn, key, 1)
|
||||||
|
# 如果加锁失败,那么直接返回。
|
||||||
|
if not lock:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# 如果 uid 指定的用户并非状态消息的发布人,那么函数直接返回。
|
||||||
|
if conn.hget(key, 'uid') != str(uid):
|
||||||
|
release_lock(conn, key, lock)
|
||||||
|
return None
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
# 删除指定的状态消息。
|
||||||
|
pipeline.delete(key)
|
||||||
|
# 从用户的个人时间线里面移除指定的状态消息 ID 。
|
||||||
|
pipeline.zrem('profile:%s'%uid, status_id)
|
||||||
|
# 从用户的定制时间线里面移除指定的状态消息 ID 。
|
||||||
|
pipeline.zrem('home:%s'%uid, status_id)
|
||||||
|
# 对储存着用户信息的散列进行更新,减少已发布状态消息的数量。
|
||||||
|
pipeline.hincrby('user:%s'%uid, 'posts', -1)
|
||||||
|
pipeline.execute()
|
||||||
|
|
||||||
|
release_lock(conn, key, lock)
|
||||||
|
return True
|
||||||
|
# <end id="delete-message"/>
|
||||||
|
|
||||||
|
# <start id="exercise-clean-out-timelines"/>
|
||||||
|
def clean_timelines(conn, uid, status_id, start=0, on_lists=False):
|
||||||
|
key = 'followers:%s'%uid # 根据操作的处理进度,
|
||||||
|
base = 'home:%s' # 选择对主页时间线还是对用户时间线进行操作
|
||||||
|
if on_lists: #
|
||||||
|
key = 'list:out:%s'%uid #
|
||||||
|
base = 'list:statuses:%s' #
|
||||||
|
followers = conn.zrangebyscore(key, start, 'inf', # 从上次更新时的最后一个用户或者列表作为起点,
|
||||||
|
start=0, num=POSTS_PER_PASS, withscores=True) # 获取下一组用户或者列表(数量为 1000 个)
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(False)
|
||||||
|
for follower, start in followers: # 从所有已获取的关注者的主页时间线上面,
|
||||||
|
pipeline.zrem(base%follower, status_id) # 移除指定的状态消息
|
||||||
|
pipeline.execute()
|
||||||
|
|
||||||
|
if len(followers) >= POSTS_PER_PASS: # 如果本次更新已经处理了至少 1000 个关注者,
|
||||||
|
execute_later(conn, 'default', 'clean_timelines' , # 那么将后续的工作留到下次再执行
|
||||||
|
[conn, uid, status_id, start, on_lists]) #
|
||||||
|
|
||||||
|
elif not on_lists:
|
||||||
|
execute_later(conn, 'default', 'clean_timelines', # 如果针对列表的操作并未完成,那么对列表进行操作
|
||||||
|
[conn, uid, status_id, 0, True]) # 如果操作只是对主页时间线执行的话,那么程序无需执行这一步
|
||||||
|
# <end id="exercise-clean-out-timelines"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-9
|
||||||
|
# <start id="streaming-http-server"/>
|
||||||
|
# 创建一个名为 StreamingAPIServer 的类。
|
||||||
|
class StreamingAPIServer(
|
||||||
|
# 这个类是一个 HTTP 服务器,
|
||||||
|
# 并且它具有为每个请求创建一个新线程的能力。
|
||||||
|
SocketServer.ThreadingMixIn,
|
||||||
|
BaseHTTPServer.HTTPServer):
|
||||||
|
|
||||||
|
# 让线程服务器内部组件在主服务器线程死亡(die)之后,
|
||||||
|
# 关闭所有客户端请求线程。
|
||||||
|
daemon_threads = True
|
||||||
|
|
||||||
|
# 创建一个名为 StreamingAPIRequestHandler 的类。
|
||||||
|
class StreamingAPIRequestHandler(
|
||||||
|
# 这个新创建的类可以用于处理 HTTP 请求。
|
||||||
|
BaseHTTPServer.BaseHTTPRequestHandler):
|
||||||
|
|
||||||
|
# 创建一个名为 do_GET() 的方法,用于处理服务器接收到的 GET 请求。
|
||||||
|
def do_GET(self):
|
||||||
|
# 调用辅助函数,获取客户端标识符。
|
||||||
|
parse_identifier(self)
|
||||||
|
# 如果这个 GET 请求访问的不是 sample 流或者 firehose 流,
|
||||||
|
# 那么返回“404 页面未找到”错误。
|
||||||
|
if self.path != '/statuses/sample.json':
|
||||||
|
return self.send_error(404)
|
||||||
|
|
||||||
|
# 如果一切顺利,那么调用辅助函数,执行实际的过滤工作。
|
||||||
|
process_filters(self)
|
||||||
|
|
||||||
|
# 创建一个名为 do_POST() 的方法,用于处理服务器接收到的 POST 请求。
|
||||||
|
def do_POST(self):
|
||||||
|
# 调用辅助函数,获取客户端标识符。
|
||||||
|
parse_identifier(self)
|
||||||
|
# 如果这个 POST 请求访问的不是用户过滤器、关键字过滤器或者位置过滤器,
|
||||||
|
# 那么返回“404 页面未找到”错误。
|
||||||
|
if self.path != '/statuses/filter.json':
|
||||||
|
return self.send_error(404)
|
||||||
|
|
||||||
|
# 如果一切顺利,那么调用辅助函数,执行实际的过滤工作。
|
||||||
|
process_filters(self)
|
||||||
|
# <end id="streaming-http-server"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-11
|
||||||
|
# <start id="get-identifier"/>
|
||||||
|
def parse_identifier(handler):
|
||||||
|
# 将标识符和查询参数设置为预留值。
|
||||||
|
handler.identifier = None
|
||||||
|
handler.query = {}
|
||||||
|
# 如果请求里面包含了查询参数,那么处理这些参数。
|
||||||
|
if '?' in handler.path:
|
||||||
|
# 取出路径里面包含查询参数的部分,并对路径进行更新。
|
||||||
|
handler.path, _, query = handler.path.partition('?')
|
||||||
|
# 通过语法分析得出查询参数。
|
||||||
|
handler.query = urlparse.parse_qs(query)
|
||||||
|
# 获取名为 identifier 的查询参数列表。
|
||||||
|
identifier = handler.query.get('identifier') or [None]
|
||||||
|
# 使用第一个传入的标识符。
|
||||||
|
handler.identifier = identifier[0]
|
||||||
|
# <end id="get-identifier"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-12
|
||||||
|
# <start id="stream-to-client"/>
|
||||||
|
# 把需要传入参数的过滤器都放到一个列表里面。
|
||||||
|
FILTERS = ('track', 'filter', 'location')
|
||||||
|
def process_filters(handler):
|
||||||
|
id = handler.identifier
|
||||||
|
# 如果客户端没有提供标识符,那么返回一个错误。
|
||||||
|
if not id:
|
||||||
|
return handler.send_error(401, "identifier missing")
|
||||||
|
|
||||||
|
# 获取客户端指定的方法,
|
||||||
|
# 结果应该是 sample (随机消息)或者 filter (过滤器)这两种的其中一种。
|
||||||
|
method = handler.path.rsplit('/')[-1].split('.')[0]
|
||||||
|
name = None
|
||||||
|
args = None
|
||||||
|
# 如果客户端指定的是过滤器方法,那么程序需要获取相应的过滤参数。
|
||||||
|
if method == 'filter':
|
||||||
|
# 对 POST 请求进行语法分析,从而获知过滤器的类型以及参数。
|
||||||
|
data = cgi.FieldStorage(
|
||||||
|
fp=handler.rfile,
|
||||||
|
headers=handler.headers,
|
||||||
|
environ={'REQUEST_METHOD':'POST',
|
||||||
|
'CONTENT_TYPE':handler.headers['Content-Type'],
|
||||||
|
})
|
||||||
|
|
||||||
|
# 找到客户端在请求中指定的过滤器。
|
||||||
|
for name in data:
|
||||||
|
if name in FILTERS:
|
||||||
|
args = data.getfirst(name).lower().split(',')
|
||||||
|
break
|
||||||
|
|
||||||
|
# 如果客户端没有指定任何过滤器,那么返回一个错误。
|
||||||
|
if not args:
|
||||||
|
return handler.send_error(401, "no filter provided")
|
||||||
|
else:
|
||||||
|
# 如果客户端指定的是随机消息请求,那么将查询参数用作 args 变量的值。
|
||||||
|
args = handler.query
|
||||||
|
|
||||||
|
# 最后,向客户端返回一个回复,
|
||||||
|
# 告知客户端,服务器接下来将向它发送流回复。
|
||||||
|
handler.send_response(200)
|
||||||
|
handler.send_header('Transfer-Encoding', 'chunked')
|
||||||
|
handler.end_headers()
|
||||||
|
|
||||||
|
# 使用 Python 列表来做引用传递(pass-by-reference)变量的占位符,
|
||||||
|
# 用户可以通过这个变量来让内容过滤器停止接收消息。
|
||||||
|
quit = [False]
|
||||||
|
# 对过滤结果进行迭代。
|
||||||
|
for item in filter_content(id, method, name, args, quit):
|
||||||
|
try:
|
||||||
|
# 使用分块传输编码向客户端发送经过预编码后(pre-encoded)的回复。
|
||||||
|
handler.wfile.write('%X\r\n%s\r\n'%(len(item), item))
|
||||||
|
# 如果发送操作引发了错误,那么让订阅者停止订阅并关闭自身。
|
||||||
|
except socket.error:
|
||||||
|
quit[0] = True
|
||||||
|
if not quit[0]:
|
||||||
|
# 如果服务器与客户端的连接并未断开,
|
||||||
|
# 那么向客户端发送表示“分块到此结束”的消息。
|
||||||
|
handler.wfile.write('0\r\n\r\n')
|
||||||
|
# <end id="stream-to-client"/>
|
||||||
|
|
||||||
|
_create_status = create_status
|
||||||
|
# 代码清单 8-13
|
||||||
|
# <start id="create-message-streaming"/>
|
||||||
|
def create_status(conn, uid, message, **data):
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
pipeline.hget('user:%s'%uid, 'login')
|
||||||
|
pipeline.incr('status:id:')
|
||||||
|
login, id = pipeline.execute()
|
||||||
|
|
||||||
|
if not login:
|
||||||
|
return None
|
||||||
|
|
||||||
|
data.update({
|
||||||
|
'message': message,
|
||||||
|
'posted': time.time(),
|
||||||
|
'id': id,
|
||||||
|
'uid': uid,
|
||||||
|
'login': login,
|
||||||
|
})
|
||||||
|
pipeline.hmset('status:%s'%id, data)
|
||||||
|
pipeline.hincrby('user:%s'%uid, 'posts')
|
||||||
|
# 新添加的这一行代码用于向流过滤器发送消息。
|
||||||
|
pipeline.publish('streaming:status:', json.dumps(data))
|
||||||
|
pipeline.execute()
|
||||||
|
return id
|
||||||
|
# <end id="create-message-streaming"/>
|
||||||
|
|
||||||
|
_delete_status = delete_status
|
||||||
|
# 代码清单 8-14
|
||||||
|
# <start id="delete-message-streaming"/>
|
||||||
|
def delete_status(conn, uid, status_id):
|
||||||
|
key = 'status:%s'%status_id
|
||||||
|
lock = acquire_lock_with_timeout(conn, key, 1)
|
||||||
|
if not lock:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if conn.hget(key, 'uid') != str(uid):
|
||||||
|
release_lock(conn, key, lock)
|
||||||
|
return None
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
# 获取状态消息,
|
||||||
|
# 以便流过滤器可以通过执行相同的过滤器来判断是否需要将被删除的消息传递给客户端。
|
||||||
|
status = conn.hgetall(key)
|
||||||
|
# 将状态消息标记为“已被删除”。
|
||||||
|
status['deleted'] = True
|
||||||
|
# 将已被删除的状态消息发送到流里面。
|
||||||
|
pipeline.publish('streaming:status:', json.dumps(status))
|
||||||
|
pipeline.delete(key)
|
||||||
|
pipeline.zrem('profile:%s'%uid, status_id)
|
||||||
|
pipeline.zrem('home:%s'%uid, status_id)
|
||||||
|
pipeline.hincrby('user:%s'%uid, 'posts', -1)
|
||||||
|
pipeline.execute()
|
||||||
|
|
||||||
|
release_lock(conn, key, lock)
|
||||||
|
return True
|
||||||
|
# <end id="delete-message-streaming"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-15
|
||||||
|
# <start id="message-subscription"/>
|
||||||
|
# 使用第 5 章介绍的自动连接装饰器。
|
||||||
|
@redis_connection('social-network')
|
||||||
|
def filter_content(conn, id, method, name, args, quit):
|
||||||
|
# 创建一个过滤器,让它来判断是否应该将消息发送给客户端。
|
||||||
|
match = create_filters(id, method, name, args)
|
||||||
|
|
||||||
|
# 执行订阅前的准备工作。
|
||||||
|
pubsub = conn.pubsub()
|
||||||
|
pubsub.subscribe(['streaming:status:'])
|
||||||
|
|
||||||
|
# 通过订阅来获取消息。
|
||||||
|
for item in pubsub.listen():
|
||||||
|
# 从订阅结构中取出状态消息。
|
||||||
|
message = item['data']
|
||||||
|
decoded = json.loads(message)
|
||||||
|
|
||||||
|
# 检查状态消息是否与过滤器相匹配。
|
||||||
|
if match(decoded):
|
||||||
|
# 在发送被删除的消息之前,
|
||||||
|
# 先给消息添加一个特殊的“已被删除”占位符。
|
||||||
|
if decoded.get('deleted'):
|
||||||
|
yield json.dumps({
|
||||||
|
'id': decoded['id'], 'deleted': True})
|
||||||
|
else:
|
||||||
|
# 对于未被删除的消息,程序直接发送消息本身。
|
||||||
|
yield message
|
||||||
|
|
||||||
|
# 如果服务器与客户端之间的连接已经断开,那么停止过滤消息。
|
||||||
|
if quit[0]:
|
||||||
|
break
|
||||||
|
|
||||||
|
# 重置 Redis 连接,
|
||||||
|
# 清空因为连接速度不够快而滞留在 Redis 服务器输出缓冲区里面的数据。
|
||||||
|
pubsub.reset()
|
||||||
|
# <end id="message-subscription"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-16
|
||||||
|
# <start id="create-filters"/>
|
||||||
|
def create_filters(id, method, name, args):
|
||||||
|
# sample 方法不需要用到 name 参数,
|
||||||
|
# 只需要给定 id 参数和 args 参数即可。
|
||||||
|
if method == 'sample':
|
||||||
|
return SampleFilter(id, args)
|
||||||
|
elif name == 'track': # filter 方法需要创建并返回用户指定的过滤器。
|
||||||
|
return TrackFilter(args) #
|
||||||
|
elif name == 'follow': #
|
||||||
|
return FollowFilter(args) #
|
||||||
|
elif name == 'location': #
|
||||||
|
return LocationFilter(args) #
|
||||||
|
# 如果没有任何过滤器被选中,那么引发一个异常。
|
||||||
|
raise Exception("Unknown filter")
|
||||||
|
# <end id="create-filters"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-17
|
||||||
|
# <start id="sample-filter"/>
|
||||||
|
# 定义一个 SampleFilter 函数,它接受 id 和 args 两个参数。
|
||||||
|
def SampleFilter(id, args):
|
||||||
|
# args 参数是一个字典,它来源于 GET 请求传递的参数。
|
||||||
|
percent = int(args.get('percent', ['10'])[0], 10)
|
||||||
|
# 使用 id 参数来随机地选择其中一部分消息 ID ,
|
||||||
|
# 被选中 ID 的数量由传入的 percent 参数决定。
|
||||||
|
ids = range(100)
|
||||||
|
shuffler = random.Random(id)
|
||||||
|
shuffler.shuffle(ids)
|
||||||
|
# 使用 Python 集合来快速地判断给定的状态消息是否符合过滤器的标准。
|
||||||
|
keep = set(ids[:max(percent, 1)])
|
||||||
|
|
||||||
|
# 创建并返回一个闭包函数,
|
||||||
|
# 这个函数就是被创建出来的随机取样消息过滤器。
|
||||||
|
def check(status):
|
||||||
|
# 为了对状态消息进行过滤,
|
||||||
|
# 程序会获取给定状态消息的 ID ,
|
||||||
|
# 并将 ID 的值取模 100 ,
|
||||||
|
# 然后通过检查取模结果是否存在于 keep 集合来判断给定的状态消息是否符合过滤器的标准。
|
||||||
|
return (status['id'] % 100) in keep
|
||||||
|
return check
|
||||||
|
# <end id="sample-filter"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-18
|
||||||
|
# <start id="track-filter"/>
|
||||||
|
def TrackFilter(list_of_strings):
|
||||||
|
# 函数接受一个由词组构成的列表为参数,
|
||||||
|
# 如果一条状态消息包含某个词组里面的所有单词,
|
||||||
|
# 那么这条消息就与过滤器相匹配。
|
||||||
|
groups = []
|
||||||
|
for group in list_of_strings:
|
||||||
|
group = set(group.lower().split())
|
||||||
|
if group:
|
||||||
|
# 每个词组至少需要包含一个单词。
|
||||||
|
groups.append(group)
|
||||||
|
|
||||||
|
def check(status):
|
||||||
|
# 以空格为分隔符,从消息里面分割出多个单词。
|
||||||
|
message_words = set(status['message'].lower().split())
|
||||||
|
# 遍历所有词组。
|
||||||
|
for group in groups:
|
||||||
|
# 如果某个词组的所有单词都在消息里面出现了,
|
||||||
|
# 那么过滤器将接受(accept)这条消息。
|
||||||
|
if len(group & message_words) == len(group):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
return check
|
||||||
|
# <end id="track-filter"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-19
|
||||||
|
# <start id="follow-filter"/>
|
||||||
|
def FollowFilter(names):
|
||||||
|
# 过滤器会根据给定的用户名,对消息内容以及消息的发送者进行匹配。
|
||||||
|
nset = set()
|
||||||
|
# 以“@用户名”的形式储存所有给定用户的名字。
|
||||||
|
for name in names:
|
||||||
|
nset.add('@' + name.lower().lstrip('@'))
|
||||||
|
|
||||||
|
def check(status):
|
||||||
|
# 根据消息内容以及消息发布者的名字,构建一个由空格分割的词组。
|
||||||
|
message_words = set(status['message'].lower().split())
|
||||||
|
message_words.add('@' + status['login'].lower())
|
||||||
|
|
||||||
|
# 如果给定的用户名与词组中的某个词语相同,
|
||||||
|
# 那么这条消息与过滤器相匹配。
|
||||||
|
return message_words & nset
|
||||||
|
return check
|
||||||
|
# <end id="follow-filter"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 8-20
|
||||||
|
# <start id="location-filter"/>
|
||||||
|
def LocationFilter(list_of_boxes):
|
||||||
|
# 创建一个区域集合,这个集合定义了过滤器接受的消息来自于哪些区域。
|
||||||
|
boxes = []
|
||||||
|
for start in xrange(0, len(list_of_boxes)-3, 4):
|
||||||
|
boxes.append(map(float, list_of_boxes[start:start+4]))
|
||||||
|
|
||||||
|
def check(self, status):
|
||||||
|
# 尝试从状态消息里面取出位置数据。
|
||||||
|
location = status.get('location')
|
||||||
|
# 如果消息未包含任何位置数据,
|
||||||
|
# 那么这条消息不在任何区域的范围之内。
|
||||||
|
if not location:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# 如果消息包含位置数据,那么取出纬度和经度。
|
||||||
|
lat, lon = map(float, location.split(','))
|
||||||
|
# 遍历所有区域,尝试进行匹配。
|
||||||
|
for box in self.boxes:
|
||||||
|
# 如果状态消息的位置在给定区域的经纬度范围之内,
|
||||||
|
# 那么这条状态消息与过滤器相匹配。
|
||||||
|
if (box[1] <= lat <= box[3] and
|
||||||
|
box[0] <= lon <= box[2]):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
return check
|
||||||
|
# <end id="location-filter"/>
|
||||||
|
|
||||||
|
_filter_content = filter_content
|
||||||
|
def filter_content(identifier, method, name, args, quit):
|
||||||
|
print "got:", identifier, method, name, args
|
||||||
|
for i in xrange(10):
|
||||||
|
yield json.dumps({'id':i})
|
||||||
|
if quit[0]:
|
||||||
|
break
|
||||||
|
time.sleep(.1)
|
||||||
|
'''
|
||||||
|
# <start id="start-http-server"/>
|
||||||
|
if __name__ == '__main__': # 如果这个模块是以命令行方式运行的,那么执行下方的代码块
|
||||||
|
server = StreamingAPIServer( # 创建一个流API服务器实例,并让它监视本地主机的 8080 端口,
|
||||||
|
('localhost', 8080), StreamingAPIRequestHandler)# 然后使用 StreamingAPIRequestHandler 去处理请求
|
||||||
|
print 'Starting server, use <Ctrl-C> to stop' # 打印信息行
|
||||||
|
server.serve_forever() # 一直运行,直到这个进程被杀死为止
|
||||||
|
# <end id="start-http-server"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
class TestCh08(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.conn = redis.Redis(db=15)
|
||||||
|
self.conn.flushdb()
|
||||||
|
def tearDown(self):
|
||||||
|
self.conn.flushdb()
|
||||||
|
|
||||||
|
def test_create_user_and_status(self):
|
||||||
|
self.assertEquals(create_user(self.conn, 'TestUser', 'Test User'), 1)
|
||||||
|
self.assertEquals(create_user(self.conn, 'TestUser', 'Test User2'), None)
|
||||||
|
|
||||||
|
self.assertEquals(create_status(self.conn, 1, "This is a new status message"), 1)
|
||||||
|
self.assertEquals(self.conn.hget('user:1', 'posts'), '1')
|
||||||
|
|
||||||
|
def test_follow_unfollow_user(self):
|
||||||
|
self.assertEquals(create_user(self.conn, 'TestUser', 'Test User'), 1)
|
||||||
|
self.assertEquals(create_user(self.conn, 'TestUser2', 'Test User2'), 2)
|
||||||
|
|
||||||
|
self.assertTrue(follow_user(self.conn, 1, 2))
|
||||||
|
self.assertEquals(self.conn.zcard('followers:2'), 1)
|
||||||
|
self.assertEquals(self.conn.zcard('followers:1'), 0)
|
||||||
|
self.assertEquals(self.conn.zcard('following:1'), 1)
|
||||||
|
self.assertEquals(self.conn.zcard('following:2'), 0)
|
||||||
|
self.assertEquals(self.conn.hget('user:1', 'following'), '1')
|
||||||
|
self.assertEquals(self.conn.hget('user:2', 'following'), '0')
|
||||||
|
self.assertEquals(self.conn.hget('user:1', 'followers'), '0')
|
||||||
|
self.assertEquals(self.conn.hget('user:2', 'followers'), '1')
|
||||||
|
|
||||||
|
self.assertEquals(unfollow_user(self.conn, 2, 1), None)
|
||||||
|
self.assertEquals(unfollow_user(self.conn, 1, 2), True)
|
||||||
|
self.assertEquals(self.conn.zcard('followers:2'), 0)
|
||||||
|
self.assertEquals(self.conn.zcard('followers:1'), 0)
|
||||||
|
self.assertEquals(self.conn.zcard('following:1'), 0)
|
||||||
|
self.assertEquals(self.conn.zcard('following:2'), 0)
|
||||||
|
self.assertEquals(self.conn.hget('user:1', 'following'), '0')
|
||||||
|
self.assertEquals(self.conn.hget('user:2', 'following'), '0')
|
||||||
|
self.assertEquals(self.conn.hget('user:1', 'followers'), '0')
|
||||||
|
self.assertEquals(self.conn.hget('user:2', 'followers'), '0')
|
||||||
|
|
||||||
|
def test_syndicate_status(self):
|
||||||
|
self.assertEquals(create_user(self.conn, 'TestUser', 'Test User'), 1)
|
||||||
|
self.assertEquals(create_user(self.conn, 'TestUser2', 'Test User2'), 2)
|
||||||
|
self.assertTrue(follow_user(self.conn, 1, 2))
|
||||||
|
self.assertEquals(self.conn.zcard('followers:2'), 1)
|
||||||
|
self.assertEquals(self.conn.hget('user:1', 'following'), '1')
|
||||||
|
self.assertEquals(post_status(self.conn, 2, 'this is some message content'), 1)
|
||||||
|
self.assertEquals(len(get_status_messages(self.conn, 1)), 1)
|
||||||
|
|
||||||
|
for i in xrange(3, 11):
|
||||||
|
self.assertEquals(create_user(self.conn, 'TestUser%s'%i, 'Test User%s'%i), i)
|
||||||
|
follow_user(self.conn, i, 2)
|
||||||
|
|
||||||
|
global POSTS_PER_PASS
|
||||||
|
POSTS_PER_PASS = 5
|
||||||
|
|
||||||
|
self.assertEquals(post_status(self.conn, 2, 'this is some other message content'), 2)
|
||||||
|
time.sleep(.1)
|
||||||
|
self.assertEquals(len(get_status_messages(self.conn, 9)), 2)
|
||||||
|
|
||||||
|
self.assertTrue(unfollow_user(self.conn, 1, 2))
|
||||||
|
self.assertEquals(len(get_status_messages(self.conn, 1)), 0)
|
||||||
|
|
||||||
|
def test_refill_timeline(self):
|
||||||
|
self.assertEquals(create_user(self.conn, 'TestUser', 'Test User'), 1)
|
||||||
|
self.assertEquals(create_user(self.conn, 'TestUser2', 'Test User2'), 2)
|
||||||
|
self.assertEquals(create_user(self.conn, 'TestUser3', 'Test User3'), 3)
|
||||||
|
|
||||||
|
self.assertTrue(follow_user(self.conn, 1, 2))
|
||||||
|
self.assertTrue(follow_user(self.conn, 1, 3))
|
||||||
|
|
||||||
|
global HOME_TIMELINE_SIZE
|
||||||
|
HOME_TIMELINE_SIZE = 5
|
||||||
|
|
||||||
|
for i in xrange(10):
|
||||||
|
self.assertTrue(post_status(self.conn, 2, 'message'))
|
||||||
|
self.assertTrue(post_status(self.conn, 3, 'message'))
|
||||||
|
time.sleep(.05)
|
||||||
|
|
||||||
|
self.assertEquals(len(get_status_messages(self.conn, 1)), 5)
|
||||||
|
self.assertTrue(unfollow_user(self.conn, 1, 2))
|
||||||
|
self.assertTrue(len(get_status_messages(self.conn, 1)) < 5)
|
||||||
|
|
||||||
|
refill_timeline(self.conn, 'following:1', 'home:1')
|
||||||
|
messages = get_status_messages(self.conn, 1)
|
||||||
|
self.assertEquals(len(messages), 5)
|
||||||
|
for msg in messages:
|
||||||
|
self.assertEquals(msg['uid'], '3')
|
||||||
|
|
||||||
|
delete_status(self.conn, '3', messages[-1]['id'])
|
||||||
|
self.assertEquals(len(get_status_messages(self.conn, 1)), 4)
|
||||||
|
self.assertEquals(self.conn.zcard('home:1'), 5)
|
||||||
|
clean_timelines(self.conn, '3', messages[-1]['id'])
|
||||||
|
self.assertEquals(self.conn.zcard('home:1'), 4)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
|
@ -0,0 +1,560 @@
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
import binascii
|
||||||
|
import bisect
|
||||||
|
from datetime import date, timedelta
|
||||||
|
from collections import defaultdict
|
||||||
|
import math
|
||||||
|
import time
|
||||||
|
import unittest
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import redis
|
||||||
|
|
||||||
|
def readblocks(conn, key, blocksize=2**17):
|
||||||
|
lb = blocksize
|
||||||
|
pos = 0
|
||||||
|
while lb == blocksize: #A
|
||||||
|
block = conn.substr(key, pos, pos + blocksize - 1) #B
|
||||||
|
yield block #C
|
||||||
|
lb = len(block) #C
|
||||||
|
pos += lb #C
|
||||||
|
yield ''
|
||||||
|
|
||||||
|
# 代码清单 9-1
|
||||||
|
'''
|
||||||
|
# <start id="ziplist-configuration-options"/>
|
||||||
|
list-max-ziplist-entries 512 # 列表结构使用压缩列表表示的限制条件。
|
||||||
|
list-max-ziplist-value 64 #
|
||||||
|
|
||||||
|
hash-max-ziplist-entries 512 # 散列结构使用压缩列表表示的限制条件
|
||||||
|
hash-max-ziplist-value 64 #(Redis 2.6 以前的版本会为散列结构使用不同的编码表示,并且选项的名字也与此不同)。
|
||||||
|
|
||||||
|
zset-max-ziplist-entries 128 # 有序集合使用压缩列表表示的限制条件。
|
||||||
|
zset-max-ziplist-value 64 #
|
||||||
|
# <end id="ziplist-configuration-options"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
'''
|
||||||
|
# <start id="ziplist-test"/>
|
||||||
|
>>> conn.rpush('test', 'a', 'b', 'c', 'd') # 首先将四个元素推入到列表。
|
||||||
|
4 #
|
||||||
|
>>> conn.debug_object('test') # debug object 命令可以查看特定对象的相关信息。
|
||||||
|
{'encoding': 'ziplist', 'refcount': 1, 'lru_seconds_idle': 20, # “encoding”信息表示这个对象的编码为压缩列表,
|
||||||
|
'lru': 274841, 'at': '0xb6c9f120', 'serializedlength': 24, # 这个压缩列表占用了 24 字节内存。
|
||||||
|
'type': 'Value'} #
|
||||||
|
>>> conn.rpush('test', 'e', 'f', 'g', 'h') # 再将四个元素推入到列表。
|
||||||
|
8 #
|
||||||
|
>>> conn.debug_object('test')
|
||||||
|
{'encoding': 'ziplist', 'refcount': 1, 'lru_seconds_idle': 0, # 对象的编码依然是压缩列表,只是体积增长到了 36 字节
|
||||||
|
'lru': 274846, 'at': '0xb6c9f120', 'serializedlength': 36, # (前面推入的四个元素,每个元素都需要花费 1 字节进行储存,并带来 2 字节的额外消耗)。
|
||||||
|
'type': 'Value'}
|
||||||
|
>>> conn.rpush('test', 65*'a') # 当一个超出编码允许大小的元素被推入到列表里面的时候,
|
||||||
|
9 # 列表将从压缩列表编码转换为标准的链表。
|
||||||
|
>>> conn.debug_object('test')
|
||||||
|
{'encoding': 'linkedlist', 'refcount': 1, 'lru_seconds_idle': 10, # 尽管序列化长度下降了,
|
||||||
|
'lru': 274851, 'at': '0xb6c9f120', 'serializedlength': 30, # 但是对于压缩列表编码以及集合的特殊编码之外的其他编码来说,这个数值并不代表结构的实际内存占用量。
|
||||||
|
'type': 'Value'}
|
||||||
|
>>> conn.rpop('test') # 当压缩列表被转换为普通的结构之后,
|
||||||
|
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' # 即使结构将来重新满足配置设置的限制条件,
|
||||||
|
>>> conn.debug_object('test') # 结构也不会转换回压缩列表。
|
||||||
|
{'encoding': 'linkedlist', 'refcount': 1, 'lru_seconds_idle': 0, #
|
||||||
|
'lru': 274853, 'at': '0xb6c9f120', 'serializedlength': 17,
|
||||||
|
'type': 'Value'}
|
||||||
|
# <end id="ziplist-test"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
'''
|
||||||
|
# <start id="intset-configuration-option"/>
|
||||||
|
set-max-intset-entries 512 # 集合使用整数集合表示的限制条件。
|
||||||
|
# <end id="intset-configuration-option"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
'''
|
||||||
|
# <start id="intset-test"/>
|
||||||
|
>>> conn.sadd('set-object', *range(500)) # 即使向集合添加 500 个元素,
|
||||||
|
500 # 它的编码仍然为整数集合。
|
||||||
|
>>> conn.debug_object('set-object') #
|
||||||
|
{'encoding': 'intset', 'refcount': 1, 'lru_seconds_idle': 0, #
|
||||||
|
'lru': 283116, 'at': '0xb6d1a1c0', 'serializedlength': 1010,
|
||||||
|
'type': 'Value'}
|
||||||
|
>>> conn.sadd('set-object', *range(500, 1000)) # 当集合的元素数量超过限定的 512 个时,
|
||||||
|
500 # 整数集合将被转换为散列表表示。
|
||||||
|
>>> conn.debug_object('set-object') #
|
||||||
|
{'encoding': 'hashtable', 'refcount': 1, 'lru_seconds_idle': 0, #
|
||||||
|
'lru': 283118, 'at': '0xb6d1a1c0', 'serializedlength': 2874,
|
||||||
|
'type': 'Value'}
|
||||||
|
# <end id="intset-test"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
# <start id="rpoplpush-benchmark"/>
|
||||||
|
# 为了以不同的方式进行性能测试,函数需要对所有测试指标进行参数化处理。
|
||||||
|
def long_ziplist_performance(conn, key, length, passes, psize):
|
||||||
|
# 删除指定的键,确保被测试数据的准确性。
|
||||||
|
conn.delete(key)
|
||||||
|
# 通过从右端推入指定数量的元素来对列表进行初始化。
|
||||||
|
conn.rpush(key, *range(length))
|
||||||
|
# 通过流水线来降低网络通信给测试带来的影响。
|
||||||
|
pipeline = conn.pipeline(False)
|
||||||
|
|
||||||
|
# 启动计时器。
|
||||||
|
t = time.time()
|
||||||
|
# 根据 passes 参数来决定流水线操作的执行次数。
|
||||||
|
for p in xrange(passes):
|
||||||
|
# 每个流水线操作都包含了 psize 次 RPOPLPUSH 命令调用。
|
||||||
|
for pi in xrange(psize):
|
||||||
|
# 每个 rpoplpush() 函数调用都会将列表最右端的元素弹出,
|
||||||
|
# 并将它推入到同一个列表的左端。
|
||||||
|
pipeline.rpoplpush(key, key)
|
||||||
|
# 执行 psize 次 RPOPLPUSH 命令。
|
||||||
|
pipeline.execute()
|
||||||
|
|
||||||
|
# 计算每秒钟执行的 RPOPLPUSH 调用数量。
|
||||||
|
return (passes * psize) / (time.time() - t or .001)
|
||||||
|
# <end id="rpoplpush-benchmark"/>
|
||||||
|
|
||||||
|
'''
|
||||||
|
# <start id="rpoplpush-performance"/>
|
||||||
|
>>> long_ziplist_performance(conn, 'list', 1, 1000, 100) # 当压缩列表编码的列表包含的节点数量不超过 1000 个时,
|
||||||
|
52093.558416505381 # Redis 每秒钟可以执行大约五万次操作。
|
||||||
|
>>> long_ziplist_performance(conn, 'list', 100, 1000, 100) #
|
||||||
|
51501.154762768667 #
|
||||||
|
>>> long_ziplist_performance(conn, 'list', 1000, 1000, 100) #
|
||||||
|
49732.490843316067 #
|
||||||
|
>>> long_ziplist_performance(conn, 'list', 5000, 1000, 100) # 当压缩列表编码的列表包含的节点数量超过 5000 个时,
|
||||||
|
43424.056529592635 # 内存复制带来的消耗就会越来越大,
|
||||||
|
>>> long_ziplist_performance(conn, 'list', 10000, 1000, 100) # 导致性能下降。
|
||||||
|
36727.062573334966 #
|
||||||
|
>>> long_ziplist_performance(conn, 'list', 50000, 1000, 100) # 当压缩列表的节点数量达到 5000 个时,
|
||||||
|
16695.140684975777 # 性能出现明显下降。
|
||||||
|
>>> long_ziplist_performance(conn, 'list', 100000, 500, 100) # 当节点数量达到十万个时,
|
||||||
|
553.10821080054586 # 压缩列表的性能低得根本没法用了。
|
||||||
|
# <end id="rpoplpush-performance"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
def long_ziplist_index(conn, key, length, passes, psize): #A
|
||||||
|
conn.delete(key) #B
|
||||||
|
conn.rpush(key, *range(length)) #C
|
||||||
|
length >>= 1
|
||||||
|
pipeline = conn.pipeline(False) #D
|
||||||
|
t = time.time() #E
|
||||||
|
for p in xrange(passes): #F
|
||||||
|
for pi in xrange(psize): #G
|
||||||
|
pipeline.lindex(key, length)#H
|
||||||
|
pipeline.execute() #I
|
||||||
|
return (passes * psize) / (time.time() - t or .001) #J
|
||||||
|
|
||||||
|
def long_intset_performance(conn, key, length, passes, psize): #A
|
||||||
|
conn.delete(key) #B
|
||||||
|
conn.sadd(key, *range(1000000, 1000000+length)) #C
|
||||||
|
cur = 1000000-1
|
||||||
|
pipeline = conn.pipeline(False) #D
|
||||||
|
t = time.time() #E
|
||||||
|
for p in xrange(passes): #F
|
||||||
|
for pi in xrange(psize): #G
|
||||||
|
pipeline.spop(key)#H
|
||||||
|
pipeline.sadd(key, cur)
|
||||||
|
cur -= 1
|
||||||
|
pipeline.execute() #I
|
||||||
|
return (passes * psize) / (time.time() - t or .001) #J
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 9-7
|
||||||
|
# <start id="calculate-shard-key"/>
|
||||||
|
# 在调用 shard_key() 函数时,
|
||||||
|
# 用户需要给定基础散列的名字、将要被储存到分片散列里面的键、预计的元素总数量以及请求的分片数量。
|
||||||
|
def shard_key(base, key, total_elements, shard_size):
|
||||||
|
# 如果值是一个整数或者一个看上去像是整数的字符串,
|
||||||
|
# 那么它将被直接用于计算分片 ID 。
|
||||||
|
if isinstance(key, (int, long)) or key.isdigit():
|
||||||
|
# 整数键将被程序假定为连续指派的 ID ,
|
||||||
|
# 并基于这个整数 ID 的二进制位的高位来选择分片 ID 。
|
||||||
|
# 此外,程序在进行整数转换的时候还使用了显式的基数(以及 str()`` 函数),
|
||||||
|
# 使得键 010 可以被转换为 10 ,而不是 8 。
|
||||||
|
shard_id = int(str(key), 10) // shard_size
|
||||||
|
else:
|
||||||
|
# 对于不是整数的键,
|
||||||
|
# 程序将基于预计的元素总数量以及请求的分片数量,
|
||||||
|
# 计算出实际所需的分片总数量。
|
||||||
|
shards = 2 * total_elements // shard_size
|
||||||
|
# 在得知了分片的数量之后,
|
||||||
|
# 程序就可以通过计算键的散列值与分片数量之间的模数来得到分片 ID 。
|
||||||
|
shard_id = binascii.crc32(key) % shards
|
||||||
|
# 最后,程序会把基础键和分片 ID 组合在一起,得出分片键。
|
||||||
|
return "%s:%s"%(base, shard_id)
|
||||||
|
# <end id="calculate-shard-key"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 9-8
|
||||||
|
# <start id="sharded-hset-hget"/>
|
||||||
|
def shard_hset(conn, base, key, value, total_elements, shard_size):
|
||||||
|
# 计算出应该由哪个分片来储存值。
|
||||||
|
shard = shard_key(base, key, total_elements, shard_size)
|
||||||
|
# 将值储存到分片里面。
|
||||||
|
return conn.hset(shard, key, value)
|
||||||
|
|
||||||
|
def shard_hget(conn, base, key, total_elements, shard_size):
|
||||||
|
# 计算出值可能被储存到了哪个分片里面。
|
||||||
|
shard = shard_key(base, key, total_elements, shard_size)
|
||||||
|
# 取得储存在分片里面的值。
|
||||||
|
return conn.hget(shard, key)
|
||||||
|
# <end id="sharded-hset-hget"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 9-9
|
||||||
|
'''
|
||||||
|
# <start id="sharded-ip-lookup"/>
|
||||||
|
TOTAL_SIZE = 320000 # 把传递给分片函数的参数设置为全局常量,
|
||||||
|
SHARD_SIZE = 1024 # 确保每次传递的值总是相同的。
|
||||||
|
|
||||||
|
def import_cities_to_redis(conn, filename):
|
||||||
|
for row in csv.reader(open(filename)):
|
||||||
|
...
|
||||||
|
shard_hset(conn, 'cityid2city:', city_id, # 为了对数据进行设置,用户需要传递 TOTAL_SIZE 参数和 SHARD_SIZE 参数。
|
||||||
|
json.dumps([city, region, country]), # 不过因为这个程序处理的 ID 都是数字,
|
||||||
|
TOTAL_SIZE, SHARD_SIZE) # 所以 TOTAL_SIZE 实际上并没有被使用。
|
||||||
|
|
||||||
|
def find_city_by_ip(conn, ip_address):
|
||||||
|
...
|
||||||
|
data = shard_hget(conn, 'cityid2city:', city_id, # 程序在获取数据时,
|
||||||
|
TOTAL_SIZE, SHARD_SIZE) # 需要根据相同的 TOTAL_SIZE 参数和 SHARD_SIZE 参数查找被分片的键。
|
||||||
|
return json.loads(data)
|
||||||
|
# <end id="sharded-ip-lookup"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 9-10
|
||||||
|
# <start id="sharded-sadd"/>
|
||||||
|
def shard_sadd(conn, base, member, total_elements, shard_size):
|
||||||
|
shard = shard_key(base,
|
||||||
|
# 计算成员应该被储存到哪个分片集合里面;
|
||||||
|
# 因为成员并非连续 ID ,所以程序在计算成员所属的分片之前,会先将成员转换为字符串。
|
||||||
|
'x'+str(member), total_elements, shard_size)
|
||||||
|
# 将成员储存到分片里面。
|
||||||
|
return conn.sadd(shard, member)
|
||||||
|
# <end id="sharded-sadd"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 9-11
|
||||||
|
# <start id="unique-visitor-count"/>
|
||||||
|
# 为整数集合编码的集合预设一个典型的分片大小。
|
||||||
|
SHARD_SIZE = 512
|
||||||
|
|
||||||
|
def count_visit(conn, session_id):
|
||||||
|
# 取得当天的日期,并生成唯一访客计数器的键。
|
||||||
|
today = date.today()
|
||||||
|
key = 'unique:%s'%today.isoformat()
|
||||||
|
# 计算或者获取当天的预计唯一访客人数。
|
||||||
|
expected = get_expected(conn, key, today)
|
||||||
|
|
||||||
|
# 根据 128 位的 UUID ,计算出一个 56 位的 ID 。
|
||||||
|
id = int(session_id.replace('-', '')[:15], 16)
|
||||||
|
# 将 ID 添加到分片集合里面。
|
||||||
|
if shard_sadd(conn, key, id, expected, SHARD_SIZE):
|
||||||
|
# 如果 ID 在分片集合里面并不存在,那么对唯一访客计数器执行加一操作。
|
||||||
|
conn.incr(key)
|
||||||
|
# <end id="unique-visitor-count"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 9-12
|
||||||
|
# <start id="expected-viewer-count"/>
|
||||||
|
# 这个初始的预计每日访客人数会设置得稍微比较高一些。
|
||||||
|
DAILY_EXPECTED = 1000000
|
||||||
|
# 在本地储存一份计算得出的预计访客人数副本。
|
||||||
|
EXPECTED = {}
|
||||||
|
|
||||||
|
def get_expected(conn, key, today):
|
||||||
|
# 如果程序已经计算出或者获取到了当日的预计访客人数,
|
||||||
|
# 那么直接使用已计算出的数字。
|
||||||
|
if key in EXPECTED:
|
||||||
|
return EXPECTED[key]
|
||||||
|
|
||||||
|
exkey = key + ':expected'
|
||||||
|
# 如果其他客户端已经计算出了当日的预计访客人数,
|
||||||
|
# 那么直接使用已计算出的数字。
|
||||||
|
expected = conn.get(exkey)
|
||||||
|
|
||||||
|
if not expected:
|
||||||
|
# 获取昨天的唯一访客人数,如果该数值不存在就使用默认值一百万。
|
||||||
|
yesterday = (today - timedelta(days=1)).isoformat()
|
||||||
|
expected = conn.get('unique:%s'%yesterday)
|
||||||
|
expected = int(expected or DAILY_EXPECTED)
|
||||||
|
|
||||||
|
# 基于“明天的访客人数至少会比今天的访客人数多 50%”这一假设,
|
||||||
|
# 给昨天的访客人数加上 50% ,然后向上舍入至下一个底数为 2 的幂。
|
||||||
|
expected = 2**int(math.ceil(math.log(expected*1.5, 2)))
|
||||||
|
# 将计算出的预计访客人数写入到 Redis 里面,以便其他程序在有需要时使用。
|
||||||
|
if not conn.setnx(exkey, expected):
|
||||||
|
# 如果在我们之前,
|
||||||
|
# 已经有其他客户端储存了当日的预计访客人数,
|
||||||
|
# 那么直接使用已储存的数字。
|
||||||
|
expected = conn.get(exkey)
|
||||||
|
|
||||||
|
# 将当日的预计访客人数记录到本地副本里面,并将它返回给调用者。
|
||||||
|
EXPECTED[key] = int(expected)
|
||||||
|
return EXPECTED[key]
|
||||||
|
# <end id="expected-viewer-count"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 9-13
|
||||||
|
# <start id="location-tables"/>
|
||||||
|
# 一个由 ISO3 国家编码组成的字符串表格,
|
||||||
|
# 调用 split() 函数会根据空白对这个字符串进行分割,
|
||||||
|
# 并将它转换为一个由国家编码组成的列表。
|
||||||
|
COUNTRIES = '''
|
||||||
|
ABW AFG AGO AIA ALA ALB AND ARE ARG ARM ASM ATA ATF ATG AUS AUT AZE BDI
|
||||||
|
BEL BEN BES BFA BGD BGR BHR BHS BIH BLM BLR BLZ BMU BOL BRA BRB BRN BTN
|
||||||
|
BVT BWA CAF CAN CCK CHE CHL CHN CIV CMR COD COG COK COL COM CPV CRI CUB
|
||||||
|
CUW CXR CYM CYP CZE DEU DJI DMA DNK DOM DZA ECU EGY ERI ESH ESP EST ETH
|
||||||
|
FIN FJI FLK FRA FRO FSM GAB GBR GEO GGY GHA GIB GIN GLP GMB GNB GNQ GRC
|
||||||
|
GRD GRL GTM GUF GUM GUY HKG HMD HND HRV HTI HUN IDN IMN IND IOT IRL IRN
|
||||||
|
IRQ ISL ISR ITA JAM JEY JOR JPN KAZ KEN KGZ KHM KIR KNA KOR KWT LAO LBN
|
||||||
|
LBR LBY LCA LIE LKA LSO LTU LUX LVA MAC MAF MAR MCO MDA MDG MDV MEX MHL
|
||||||
|
MKD MLI MLT MMR MNE MNG MNP MOZ MRT MSR MTQ MUS MWI MYS MYT NAM NCL NER
|
||||||
|
NFK NGA NIC NIU NLD NOR NPL NRU NZL OMN PAK PAN PCN PER PHL PLW PNG POL
|
||||||
|
PRI PRK PRT PRY PSE PYF QAT REU ROU RUS RWA SAU SDN SEN SGP SGS SHN SJM
|
||||||
|
SLB SLE SLV SMR SOM SPM SRB SSD STP SUR SVK SVN SWE SWZ SXM SYC SYR TCA
|
||||||
|
TCD TGO THA TJK TKL TKM TLS TON TTO TUN TUR TUV TWN TZA UGA UKR UMI URY
|
||||||
|
USA UZB VAT VCT VEN VGB VIR VNM VUT WLF WSM YEM ZAF ZMB ZWE'''.split()
|
||||||
|
|
||||||
|
STATES = {
|
||||||
|
# 加拿大的省信息和属地信息。
|
||||||
|
'CAN':'''AB BC MB NB NL NS NT NU ON PE QC SK YT'''.split(),
|
||||||
|
# 美国各个州的信息。
|
||||||
|
'USA':'''AA AE AK AL AP AR AS AZ CA CO CT DC DE FL FM GA GU HI IA ID
|
||||||
|
IL IN KS KY LA MA MD ME MH MI MN MO MP MS MT NC ND NE NH NJ NM NV NY OH
|
||||||
|
OK OR PA PR PW RI SC SD TN TX UT VA VI VT WA WI WV WY'''.split(),
|
||||||
|
}
|
||||||
|
# <end id="location-tables"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 9-14
|
||||||
|
# <start id="location-to-code"/>
|
||||||
|
def get_code(country, state):
|
||||||
|
# 寻找国家对应的偏移量。
|
||||||
|
cindex = bisect.bisect_left(COUNTRIES, country)
|
||||||
|
# 没有找到指定的国家时,将索引设置为 -1 。
|
||||||
|
if cindex > len(COUNTRIES) or COUNTRIES[cindex] != country:
|
||||||
|
cindex = -1
|
||||||
|
# 因为 Redis 里面的未初始化数据在返回时会被转换为空值,
|
||||||
|
# 所以我们要将“未找到指定国家”时的返回值改为 0 ,
|
||||||
|
# 并将第一个国家的索引变为 1 ,以此类推。
|
||||||
|
cindex += 1
|
||||||
|
|
||||||
|
sindex = -1
|
||||||
|
if state and country in STATES:
|
||||||
|
# 尝试取出国家对应的州信息。
|
||||||
|
states = STATES[country]
|
||||||
|
# 寻找州对应的偏移量。
|
||||||
|
sindex = bisect.bisect_left(states, state)
|
||||||
|
# 像处理“未找到指定国家”时的情况一样,处理“未找到指定州”的情况。
|
||||||
|
if sindex > len(states) or states[sindex] != state:
|
||||||
|
sindex = -1
|
||||||
|
# 如果没有找到指定的州,那么索引为 0 ;
|
||||||
|
# 如果找到了指定的州,那么索引大于 0 。
|
||||||
|
sindex += 1
|
||||||
|
|
||||||
|
# chr() 函数会将介于 0 至 255 之间的整数值转换为对应的 ASCII 字符。
|
||||||
|
return chr(cindex) + chr(sindex)
|
||||||
|
# <end id="location-to-code"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 9-15
|
||||||
|
# <start id="set-location-information"/>
|
||||||
|
# 设置每个分片的大小。
|
||||||
|
USERS_PER_SHARD = 2**20
|
||||||
|
|
||||||
|
def set_location(conn, user_id, country, state):
|
||||||
|
# 取得用户所在位置的编码。
|
||||||
|
code = get_code(country, state)
|
||||||
|
|
||||||
|
# 查找分片 ID 以及用户在指定分片中的位置(position)。
|
||||||
|
shard_id, position = divmod(user_id, USERS_PER_SHARD)
|
||||||
|
# 计算用户数据的偏移量。
|
||||||
|
offset = position * 2
|
||||||
|
|
||||||
|
pipe = conn.pipeline(False)
|
||||||
|
# 将用户的位置信息储存到分片后的位置表格里面。
|
||||||
|
pipe.setrange('location:%s'%shard_id, offset, code)
|
||||||
|
|
||||||
|
# 对记录目前已知最大用户 ID 的有序集合进行更新。
|
||||||
|
tkey = str(uuid.uuid4())
|
||||||
|
pipe.zadd(tkey, 'max', user_id)
|
||||||
|
pipe.zunionstore('location:max',
|
||||||
|
[tkey, 'location:max'], aggregate='max')
|
||||||
|
pipe.delete(tkey)
|
||||||
|
|
||||||
|
pipe.execute()
|
||||||
|
# <end id="set-location-information"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 9-16
|
||||||
|
# <start id="aggregate-population"/>
|
||||||
|
def aggregate_location(conn):
|
||||||
|
# 初始化两个特殊结构,
|
||||||
|
# 以便快速地对已存在的计数器以及缺失的计数器进行更新。
|
||||||
|
countries = defaultdict(int)
|
||||||
|
states = defaultdict(lambda:defaultdict(int))
|
||||||
|
|
||||||
|
# 获取目前已知的最大用户 ID ,
|
||||||
|
# 并使用它来计算出程序需要访问的最大分片 ID 。
|
||||||
|
max_id = int(conn.zscore('location:max', 'max'))
|
||||||
|
max_block = max_id // USERS_PER_SHARD
|
||||||
|
|
||||||
|
# 按顺序地处理每个分片……
|
||||||
|
for shard_id in xrange(max_block + 1):
|
||||||
|
# 读取每个块……
|
||||||
|
for block in readblocks(conn, 'location:%s'%shard_id):
|
||||||
|
# 从块里面提取出每个编码,
|
||||||
|
# 并根据编码查找原始的位置信息,
|
||||||
|
# 然后对这些位置信息进行聚合计算。
|
||||||
|
for offset in xrange(0, len(block)-1, 2):
|
||||||
|
code = block[offset:offset+2]
|
||||||
|
# 对聚合数据进行更新。
|
||||||
|
update_aggregates(countries, states, [code])
|
||||||
|
|
||||||
|
return countries, states
|
||||||
|
# <end id="aggregate-population"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 9-17
|
||||||
|
# <start id="code-to-location"/>
|
||||||
|
def update_aggregates(countries, states, codes):
|
||||||
|
for code in codes:
|
||||||
|
# 只对合法的编码进行查找。
|
||||||
|
if len(code) != 2:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 计算出国家和州在查找表格中的实际偏移量。
|
||||||
|
country = ord(code[0]) - 1
|
||||||
|
state = ord(code[1]) - 1
|
||||||
|
|
||||||
|
# 如果国家所处的偏移量不在合法范围之内,那么跳过这个编码。
|
||||||
|
if country < 0 or country >= len(COUNTRIES):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 获取 ISO3 国家编码。
|
||||||
|
country = COUNTRIES[country]
|
||||||
|
# 在对国家信息进行解码之后,
|
||||||
|
# 把用户计入到这个国家对应的计数器里面。
|
||||||
|
countries[country] += 1
|
||||||
|
|
||||||
|
# 如果程序没有找到指定的州信息,
|
||||||
|
# 或者查找州信息时的偏移量不在合法的范围之内,
|
||||||
|
# 那么跳过这个编码。
|
||||||
|
if country not in STATES:
|
||||||
|
continue
|
||||||
|
if state < 0 or state >= STATES[country]:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 根据编码获取州名。
|
||||||
|
state = STATES[country][state]
|
||||||
|
# 对州计数器执行加一操作。
|
||||||
|
states[country][state] += 1
|
||||||
|
# <end id="code-to-location"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 9-18
|
||||||
|
# <start id="aggregate-limited"/>
|
||||||
|
def aggregate_location_list(conn, user_ids):
|
||||||
|
# 设置流水线,减少操作执行过程中与 Redis 的通信往返次数。
|
||||||
|
pipe = conn.pipeline(False)
|
||||||
|
# 和之前一样,设置好基本的聚合数据。
|
||||||
|
countries = defaultdict(int)
|
||||||
|
states = defaultdict(lambda: defaultdict(int))
|
||||||
|
|
||||||
|
for i, user_id in enumerate(user_ids):
|
||||||
|
# 查找用户位置信息所在分片的 ID ,以及信息在分片中的偏移量。
|
||||||
|
shard_id, position = divmod(user_id, USERS_PER_SHARD)
|
||||||
|
offset = position * 2
|
||||||
|
|
||||||
|
# 发送另一个被流水线包裹的命令,获取用户的位置信息。
|
||||||
|
pipe.substr('location:%s'%shard_id, offset, offset+1)
|
||||||
|
|
||||||
|
# 每处理 1000 个请求,
|
||||||
|
# 程序就会调用之前定义的辅助函数对聚合数据进行一次更新。
|
||||||
|
if (i+1) % 1000 == 0:
|
||||||
|
update_aggregates(countries, states, pipe.execute())
|
||||||
|
|
||||||
|
# 对遍历余下的最后一批用户进行处理。
|
||||||
|
update_aggregates(countries, states, pipe.execute())
|
||||||
|
|
||||||
|
# 返回聚合数据。
|
||||||
|
return countries, states
|
||||||
|
# <end id="aggregate-limited"/>
|
||||||
|
|
||||||
|
class TestCh09(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.conn = redis.Redis(db=15)
|
||||||
|
self.conn.flushdb()
|
||||||
|
def tearDown(self):
|
||||||
|
self.conn.flushdb()
|
||||||
|
|
||||||
|
def test_long_ziplist_performance(self):
|
||||||
|
long_ziplist_performance(self.conn, 'test', 5, 10, 10)
|
||||||
|
self.assertEquals(self.conn.llen('test'), 5)
|
||||||
|
|
||||||
|
def test_shard_key(self):
|
||||||
|
base = 'test'
|
||||||
|
self.assertEquals(shard_key(base, 1, 2, 2), 'test:0')
|
||||||
|
self.assertEquals(shard_key(base, '1', 2, 2), 'test:0')
|
||||||
|
self.assertEquals(shard_key(base, 125, 1000, 100), 'test:1')
|
||||||
|
self.assertEquals(shard_key(base, '125', 1000, 100), 'test:1')
|
||||||
|
|
||||||
|
for i in xrange(50):
|
||||||
|
self.assertTrue(0 <= int(shard_key(base, 'hello:%s'%i, 1000, 100).partition(':')[-1]) < 20)
|
||||||
|
self.assertTrue(0 <= int(shard_key(base, i, 1000, 100).partition(':')[-1]) < 10)
|
||||||
|
|
||||||
|
def test_sharded_hash(self):
|
||||||
|
for i in xrange(50):
|
||||||
|
shard_hset(self.conn, 'test', 'keyname:%s'%i, i, 1000, 100)
|
||||||
|
self.assertEquals(shard_hget(self.conn, 'test', 'keyname:%s'%i, 1000, 100), str(i))
|
||||||
|
shard_hset(self.conn, 'test2', i, i, 1000, 100)
|
||||||
|
self.assertEquals(shard_hget(self.conn, 'test2', i, 1000, 100), str(i))
|
||||||
|
|
||||||
|
def test_sharded_sadd(self):
|
||||||
|
for i in xrange(50):
|
||||||
|
shard_sadd(self.conn, 'testx', i, 50, 50)
|
||||||
|
self.assertEquals(self.conn.scard('testx:0') + self.conn.scard('testx:1'), 50)
|
||||||
|
|
||||||
|
def test_unique_visitors(self):
|
||||||
|
global DAILY_EXPECTED
|
||||||
|
DAILY_EXPECTED = 10000
|
||||||
|
|
||||||
|
for i in xrange(179):
|
||||||
|
count_visit(self.conn, str(uuid.uuid4()))
|
||||||
|
self.assertEquals(self.conn.get('unique:%s'%(date.today().isoformat())), '179')
|
||||||
|
|
||||||
|
self.conn.flushdb()
|
||||||
|
self.conn.set('unique:%s'%((date.today() - timedelta(days=1)).isoformat()), 1000)
|
||||||
|
for i in xrange(183):
|
||||||
|
count_visit(self.conn, str(uuid.uuid4()))
|
||||||
|
self.assertEquals(self.conn.get('unique:%s'%(date.today().isoformat())), '183')
|
||||||
|
|
||||||
|
def test_user_location(self):
|
||||||
|
i = 0
|
||||||
|
for country in COUNTRIES:
|
||||||
|
if country in STATES:
|
||||||
|
for state in STATES[country]:
|
||||||
|
set_location(self.conn, i, country, state)
|
||||||
|
i += 1
|
||||||
|
else:
|
||||||
|
set_location(self.conn, i, country, '')
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
_countries, _states = aggregate_location(self.conn)
|
||||||
|
countries, states = aggregate_location_list(self.conn, range(i+1))
|
||||||
|
|
||||||
|
self.assertEquals(_countries, countries)
|
||||||
|
self.assertEquals(_states, states)
|
||||||
|
|
||||||
|
for c in countries:
|
||||||
|
if c in STATES:
|
||||||
|
self.assertEquals(len(STATES[c]), countries[c])
|
||||||
|
for s in STATES[c]:
|
||||||
|
self.assertEquals(states[c][s], 1)
|
||||||
|
else:
|
||||||
|
self.assertEquals(countries[c], 1)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
|
@ -0,0 +1,784 @@
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
import binascii
|
||||||
|
from collections import defaultdict
|
||||||
|
from datetime import date
|
||||||
|
from decimal import Decimal
|
||||||
|
import functools
|
||||||
|
import json
|
||||||
|
from Queue import Empty, Queue
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import unittest
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import redis
|
||||||
|
|
||||||
|
CONFIGS = {}
|
||||||
|
CHECKED = {}
|
||||||
|
|
||||||
|
def get_config(conn, type, component, wait=1):
|
||||||
|
key = 'config:%s:%s'%(type, component)
|
||||||
|
|
||||||
|
if CHECKED.get(key) < time.time() - wait: #A
|
||||||
|
CHECKED[key] = time.time() #B
|
||||||
|
config = json.loads(conn.get(key) or '{}') #C
|
||||||
|
config = dict((str(k), config[k]) for k in config)
|
||||||
|
old_config = CONFIGS.get(key) #D
|
||||||
|
|
||||||
|
if config != old_config: #E
|
||||||
|
CONFIGS[key] = config #F
|
||||||
|
|
||||||
|
return CONFIGS.get(key)
|
||||||
|
|
||||||
|
REDIS_CONNECTIONS = {}
|
||||||
|
config_connection = None
|
||||||
|
|
||||||
|
def redis_connection(component, wait=1): #A
|
||||||
|
key = 'config:redis:' + component #B
|
||||||
|
def wrapper(function): #C
|
||||||
|
@functools.wraps(function) #D
|
||||||
|
def call(*args, **kwargs): #E
|
||||||
|
old_config = CONFIGS.get(key, object()) #F
|
||||||
|
_config = get_config( #G
|
||||||
|
config_connection, 'redis', component, wait) #G
|
||||||
|
|
||||||
|
config = {}
|
||||||
|
for k, v in _config.iteritems(): #L
|
||||||
|
config[k.encode('utf-8')] = v #L
|
||||||
|
|
||||||
|
if config != old_config: #H
|
||||||
|
REDIS_CONNECTIONS[key] = redis.Redis(**config) #H
|
||||||
|
|
||||||
|
return function( #I
|
||||||
|
REDIS_CONNECTIONS.get(key), *args, **kwargs) #I
|
||||||
|
return call #J
|
||||||
|
return wrapper #K
|
||||||
|
|
||||||
|
def index_document(conn, docid, words, scores):
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
for word in words: #I
|
||||||
|
pipeline.sadd('idx:' + word, docid) #I
|
||||||
|
pipeline.hmset('kb:doc:%s'%docid, scores)
|
||||||
|
return len(pipeline.execute()) #J
|
||||||
|
|
||||||
|
def parse_and_search(conn, query, ttl):
|
||||||
|
id = str(uuid.uuid4())
|
||||||
|
conn.sinterstore('idx:' + id,
|
||||||
|
['idx:'+key for key in query])
|
||||||
|
conn.expire('idx:' + id, ttl)
|
||||||
|
return id
|
||||||
|
|
||||||
|
def search_and_sort(conn, query, id=None, ttl=300, sort="-updated", #A
|
||||||
|
start=0, num=20): #A
|
||||||
|
desc = sort.startswith('-') #B
|
||||||
|
sort = sort.lstrip('-') #B
|
||||||
|
by = "kb:doc:*->" + sort #B
|
||||||
|
alpha = sort not in ('updated', 'id', 'created') #I
|
||||||
|
|
||||||
|
if id and not conn.expire(id, ttl): #C
|
||||||
|
id = None #C
|
||||||
|
|
||||||
|
if not id: #D
|
||||||
|
id = parse_and_search(conn, query, ttl=ttl) #D
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
pipeline.scard('idx:' + id) #E
|
||||||
|
pipeline.sort('idx:' + id, by=by, alpha=alpha, #F
|
||||||
|
desc=desc, start=start, num=num) #F
|
||||||
|
results = pipeline.execute()
|
||||||
|
|
||||||
|
return results[0], results[1], id #G
|
||||||
|
|
||||||
|
def zintersect(conn, keys, ttl):
|
||||||
|
id = str(uuid.uuid4())
|
||||||
|
conn.zinterstore('idx:' + id,
|
||||||
|
dict(('idx:'+k, v) for k,v in keys.iteritems()))
|
||||||
|
conn.expire('idx:' + id, ttl)
|
||||||
|
return id
|
||||||
|
|
||||||
|
def search_and_zsort(conn, query, id=None, ttl=300, update=1, vote=0, #A
|
||||||
|
start=0, num=20, desc=True): #A
|
||||||
|
|
||||||
|
if id and not conn.expire(id, ttl): #B
|
||||||
|
id = None #B
|
||||||
|
|
||||||
|
if not id: #C
|
||||||
|
id = parse_and_search(conn, query, ttl=ttl) #C
|
||||||
|
|
||||||
|
scored_search = { #D
|
||||||
|
id: 0, #D
|
||||||
|
'sort:update': update, #D
|
||||||
|
'sort:votes': vote #D
|
||||||
|
}
|
||||||
|
id = zintersect(conn, scored_search, ttl) #E
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
pipeline.zcard('idx:' + id) #F
|
||||||
|
if desc: #G
|
||||||
|
pipeline.zrevrange('idx:' + id, start, start + num - 1) #G
|
||||||
|
else: #G
|
||||||
|
pipeline.zrange('idx:' + id, start, start + num - 1) #G
|
||||||
|
results = pipeline.execute()
|
||||||
|
|
||||||
|
return results[0], results[1], id #H
|
||||||
|
|
||||||
|
def execute_later(conn, queue, name, args):
|
||||||
|
t = threading.Thread(target=globals()[name], args=tuple(args))
|
||||||
|
t.setDaemon(1)
|
||||||
|
t.start()
|
||||||
|
|
||||||
|
HOME_TIMELINE_SIZE = 1000
|
||||||
|
POSTS_PER_PASS = 1000
|
||||||
|
|
||||||
|
def shard_key(base, key, total_elements, shard_size): #A
|
||||||
|
if isinstance(key, (int, long)) or key.isdigit(): #B
|
||||||
|
shard_id = int(str(key), 10) // shard_size #C
|
||||||
|
else:
|
||||||
|
shards = 2 * total_elements // shard_size #D
|
||||||
|
shard_id = binascii.crc32(key) % shards #E
|
||||||
|
return "%s:%s"%(base, shard_id) #F
|
||||||
|
|
||||||
|
def shard_sadd(conn, base, member, total_elements, shard_size):
|
||||||
|
shard = shard_key(base,
|
||||||
|
'x'+str(member), total_elements, shard_size) #A
|
||||||
|
return conn.sadd(shard, member) #B
|
||||||
|
|
||||||
|
SHARD_SIZE = 512
|
||||||
|
EXPECTED = defaultdict(lambda: 1000000)
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 10-1
|
||||||
|
# <start id="get-connection"/>
|
||||||
|
def get_redis_connection(component, wait=1):
|
||||||
|
key = 'config:redis:' + component
|
||||||
|
# 尝试获取旧的配置。
|
||||||
|
old_config = CONFIGS.get(key, object())
|
||||||
|
# 尝试获取新的配置。
|
||||||
|
config = get_config(
|
||||||
|
config_connection, 'redis', component, wait)
|
||||||
|
|
||||||
|
# 如果新旧配置不相同,那么创建一个新的连接。
|
||||||
|
if config != old_config:
|
||||||
|
REDIS_CONNECTIONS[key] = redis.Redis(**config)
|
||||||
|
|
||||||
|
# 返回用户指定的连接对象。
|
||||||
|
return REDIS_CONNECTIONS.get(key)
|
||||||
|
# <end id="get-connection"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 10-2
|
||||||
|
# <start id="get-sharded-connection"/>
|
||||||
|
def get_sharded_connection(component, key, shard_count, wait=1):
|
||||||
|
# 计算出 “<组件名>:<分片数字>” 格式的分片 ID 。
|
||||||
|
shard = shard_key(component, 'x'+str(key), shard_count, 2)
|
||||||
|
# 返回连接。
|
||||||
|
return get_redis_connection(shard, wait)
|
||||||
|
# <end id="get-sharded-connection"/>
|
||||||
|
|
||||||
|
|
||||||
|
# <start id="no-decorator-example"/>
|
||||||
|
def log_recent(conn, app, message):
|
||||||
|
'the old log_recent() code'
|
||||||
|
|
||||||
|
log_recent = redis_connection('logs')(log_recent) # 通过反复执行 3 次这行代码,可以达到和装饰器一样的效果
|
||||||
|
# <end id="no-decorator-example"/>
|
||||||
|
|
||||||
|
# 代码清单 10-3
|
||||||
|
# <start id="shard-aware-decorator"/>
|
||||||
|
# 装饰器接受组件名以及预期的分片数量作为参数。
|
||||||
|
def sharded_connection(component, shard_count, wait=1):
|
||||||
|
# 创建一个包装器,使用它去装饰传入的函数。
|
||||||
|
def wrapper(function):
|
||||||
|
# 从原始函数里面复制一些有用的元信息到配置处理器。
|
||||||
|
@functools.wraps(function)
|
||||||
|
# 创建一个函数,它负责计算键的分片 ID ,并对连接管理器进行设置。
|
||||||
|
def call(key, *args, **kwargs):
|
||||||
|
# 获取分片连接。
|
||||||
|
conn = get_sharded_connection(
|
||||||
|
component, key, shard_count, wait)
|
||||||
|
# 实际地调用被装饰的函数,并将分片连接以及其他参数传递给它。
|
||||||
|
return function(conn, key, *args, **kwargs)
|
||||||
|
# 返回被包装后的函数。
|
||||||
|
return call
|
||||||
|
# 返回一个函数,它可以对需要分片连接的函数进行包装。
|
||||||
|
return wrapper
|
||||||
|
# <end id="shard-aware-decorator"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 10-4
|
||||||
|
# <start id="sharded-count-unique"/>
|
||||||
|
# 将 count_visit() 函数分片到 16 台机器上面执行,
|
||||||
|
# 执行所得的结果将被自动地分片到每台机器的多个数据库键上面。
|
||||||
|
@sharded_connection('unique', 16)
|
||||||
|
def count_visit(conn, session_id):
|
||||||
|
today = date.today()
|
||||||
|
key = 'unique:%s'%today.isoformat()
|
||||||
|
# 经过修改的 get_expected() 调用。
|
||||||
|
conn2, expected = get_expected(key, today)
|
||||||
|
|
||||||
|
id = int(session_id.replace('-', '')[:15], 16)
|
||||||
|
if shard_sadd(conn, key, id, expected, SHARD_SIZE):
|
||||||
|
# 使用 get_expected() 函数返回的非分片(nonsharded)连接,
|
||||||
|
# 对唯一计数器执行自增操作。
|
||||||
|
conn2.incr(key)
|
||||||
|
|
||||||
|
# 对 get_expected() 函数使用非分片连接。
|
||||||
|
@redis_connection('unique')
|
||||||
|
def get_expected(conn, key, today):
|
||||||
|
'all of the same function body as before, except the last line'
|
||||||
|
# 返回非分片连接,
|
||||||
|
# 使得 count_visit() 函数可以在有需要的时候,
|
||||||
|
# 对唯一计数器执行自增操作。
|
||||||
|
return conn, EXPECTED[key]
|
||||||
|
# <end id="sharded-count-unique"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 10-5
|
||||||
|
# <start id="search-with-values"/>
|
||||||
|
# 这个函数接受的参数与 search_and_sort() 函数接受的参数完全相同。
|
||||||
|
def search_get_values(conn, query, id=None, ttl=300, sort="-updated",
|
||||||
|
start=0, num=20):
|
||||||
|
# 首先取得搜索操作和排序操作的执行结果。
|
||||||
|
count, docids, id = search_and_sort(
|
||||||
|
conn, query, id, ttl, sort, 0, start+num)
|
||||||
|
|
||||||
|
key = "kb:doc:%s"
|
||||||
|
sort = sort.lstrip('-')
|
||||||
|
|
||||||
|
pipe = conn.pipeline(False)
|
||||||
|
# 根据结果的排序方式来获取数据。
|
||||||
|
for docid in docids:
|
||||||
|
pipe.hget(key%docid, sort)
|
||||||
|
sort_column = pipe.execute()
|
||||||
|
|
||||||
|
# 将文档 ID 以及对文档进行排序产生的数据进行配对(pair up)。
|
||||||
|
data_pairs = zip(docids, sort_column)
|
||||||
|
# 返回结果包含的文档数量、排序之后的搜索结果以及结果的缓存 ID 。
|
||||||
|
return count, data_pairs, id
|
||||||
|
# <end id="search-with-values"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 10-6
|
||||||
|
# <start id="search-on-shards"/>
|
||||||
|
# 程序为了获知自己要连接的服务器,
|
||||||
|
# 会假定所有分片服务器的信息都记录在一个标准的配置位置里面。
|
||||||
|
def get_shard_results(component, shards, query, ids=None, ttl=300,
|
||||||
|
sort="-updated", start=0, num=20, wait=1):
|
||||||
|
|
||||||
|
# 准备一些结构,用于储存之后获取的数据。
|
||||||
|
count = 0
|
||||||
|
data = []
|
||||||
|
# 尝试使用已被缓存的搜索结果;
|
||||||
|
# 如果没有缓存结果可用,那么重新执行查询。
|
||||||
|
ids = ids or shards * [None]
|
||||||
|
for shard in xrange(shards):
|
||||||
|
# 获取或者创建一个连向指定分片的连接。
|
||||||
|
conn = get_redis_connection('%s:%s'%(component, shard), wait)
|
||||||
|
# 获取搜索结果以及它们的排序数据。
|
||||||
|
c, d, i = search_get_values(
|
||||||
|
conn, query, ids[shard], ttl, sort, start, num)
|
||||||
|
|
||||||
|
# 将这个分片的计算结果与其他分片的计算结果进行合并。
|
||||||
|
count += c
|
||||||
|
data.extend(d)
|
||||||
|
ids[shard] = i
|
||||||
|
|
||||||
|
# 把所有分片的原始(raw)计算结果返回给调用者。
|
||||||
|
return count, data, ids
|
||||||
|
# <end id="search-on-shards"/>
|
||||||
|
|
||||||
|
def get_values_thread(component, shard, wait, rqueue, *args, **kwargs):
|
||||||
|
conn = get_redis_connection('%s:%s'%(component, shard), wait)
|
||||||
|
count, results, id = search_get_values(conn, *args, **kwargs)
|
||||||
|
rqueue.put((shard, count, results, id))
|
||||||
|
|
||||||
|
def get_shard_results_thread(component, shards, query, ids=None, ttl=300,
|
||||||
|
sort="-updated", start=0, num=20, wait=1, timeout=.5):
|
||||||
|
|
||||||
|
ids = ids or shards * [None]
|
||||||
|
rqueue = Queue()
|
||||||
|
|
||||||
|
for shard in xrange(shards):
|
||||||
|
t = threading.Thread(target=get_values_thread, args=(
|
||||||
|
component, shard, wait, rqueue, query, ids[shard],
|
||||||
|
ttl, sort, start, num))
|
||||||
|
t.setDaemon(1)
|
||||||
|
t.start()
|
||||||
|
|
||||||
|
received = 0
|
||||||
|
count = 0
|
||||||
|
data = []
|
||||||
|
deadline = time.time() + timeout
|
||||||
|
while received < shards and time.time() < deadline:
|
||||||
|
try:
|
||||||
|
sh, c, r, i = rqueue.get(timeout=max(deadline-time.time(), .001))
|
||||||
|
except Empty:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
count += c
|
||||||
|
data.extend(r)
|
||||||
|
ids[sh] = i
|
||||||
|
|
||||||
|
return count, data, ids
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 10-7
|
||||||
|
# <start id="merge-sharded-results"/>
|
||||||
|
def to_numeric_key(data):
|
||||||
|
try:
|
||||||
|
# 这里之所以使用 Decimal 数字类型,
|
||||||
|
# 是因为这种类型可以合理地对整数和浮点数进行转换,
|
||||||
|
# 并在值缺失或者不是数字值的时候,
|
||||||
|
# 返回默认值 0 。
|
||||||
|
return Decimal(data[1] or '0')
|
||||||
|
except:
|
||||||
|
return Decimal('0')
|
||||||
|
|
||||||
|
def to_string_key(data):
|
||||||
|
# 总是返回一个字符串,即使在值缺失的情况下,也是如此。
|
||||||
|
return data[1] or ''
|
||||||
|
|
||||||
|
# 这个函数需要接受所有分片参数和搜索参数,
|
||||||
|
# 这些参数大部分都会被传给底层的函数,
|
||||||
|
# 而这个函数本身只会用到 sort 参数以及搜索偏移量。
|
||||||
|
def search_shards(component, shards, query, ids=None, ttl=300,
|
||||||
|
sort="-updated", start=0, num=20, wait=1):
|
||||||
|
|
||||||
|
# 获取未经排序的分片搜索结果。
|
||||||
|
count, data, ids = get_shard_results(
|
||||||
|
component, shards, query, ids, ttl, sort, start, num, wait)
|
||||||
|
|
||||||
|
# 准备好进行排序所需的各个参数。
|
||||||
|
reversed = sort.startswith('-')
|
||||||
|
sort = sort.strip('-')
|
||||||
|
key = to_numeric_key
|
||||||
|
if sort not in ('updated', 'id', 'created'):
|
||||||
|
key = to_string_key
|
||||||
|
|
||||||
|
# 根据 sort 参数对搜索结果进行排序。
|
||||||
|
data.sort(key=key, reverse=reversed)
|
||||||
|
|
||||||
|
results = []
|
||||||
|
# 只获取用户指定的那一页搜索结果。
|
||||||
|
for docid, score in data[start:start+num]:
|
||||||
|
results.append(docid)
|
||||||
|
|
||||||
|
# 返回被选中的结果,其中包括由每个分片的缓存 ID 组成的序列。
|
||||||
|
return count, results, ids
|
||||||
|
# <end id="merge-sharded-results"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 10-8
|
||||||
|
# <start id="zset-search-with-values"/>
|
||||||
|
# 这个函数接受 search_and_zsort() 函数所需的全部参数。
|
||||||
|
def search_get_zset_values(conn, query, id=None, ttl=300, update=1,
|
||||||
|
vote=0, start=0, num=20, desc=True):
|
||||||
|
|
||||||
|
# 调用底层的 search_and_zsort() 函数,
|
||||||
|
# 获取搜索结果的缓存 ID 以及结果包含的文档数量。
|
||||||
|
count, r, id = search_and_zsort(
|
||||||
|
conn, query, id, ttl, update, vote, 0, 1, desc)
|
||||||
|
|
||||||
|
# 获取指定的搜索结果以及这些结果的分值。
|
||||||
|
if desc:
|
||||||
|
data = conn.zrevrange(id, 0, start + num - 1, withscores=True)
|
||||||
|
else:
|
||||||
|
data = conn.zrange(id, 0, start + num - 1, withscores=True)
|
||||||
|
|
||||||
|
# 返回搜索结果的数量、搜索结果本身、搜索结果的分值以及搜索结果的缓存 ID 。
|
||||||
|
return count, data, id
|
||||||
|
# <end id="zset-search-with-values"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 10-9
|
||||||
|
# <start id="search-shards-zset"/>
|
||||||
|
# 函数需要接受所有分片参数以及所有搜索参数。
|
||||||
|
def search_shards_zset(component, shards, query, ids=None, ttl=300,
|
||||||
|
update=1, vote=0, start=0, num=20, desc=True, wait=1):
|
||||||
|
|
||||||
|
# 准备一些结构,用于储存之后获取到的数据。
|
||||||
|
count = 0
|
||||||
|
data = []
|
||||||
|
# 尝试使用已有的缓存结果;
|
||||||
|
# 如果没有缓存结果可用,那么开始一次新的搜索。
|
||||||
|
ids = ids or shards * [None]
|
||||||
|
for shard in xrange(shards):
|
||||||
|
# 获取或者创建指向每个分片的连接。
|
||||||
|
conn = get_redis_connection('%s:%s'%(component, shard), wait)
|
||||||
|
# 在分片上面进行搜索,并取得搜索结果的分值。
|
||||||
|
c, d, i = search_get_zset_values(conn, query, ids[shard],
|
||||||
|
ttl, update, vote, start, num, desc)
|
||||||
|
|
||||||
|
# 对每个分片的搜索结果进行合并。
|
||||||
|
count += c
|
||||||
|
data.extend(d)
|
||||||
|
ids[shard] = i
|
||||||
|
|
||||||
|
# 定义一个简单的排序辅助函数,让它只返回与分值有关的信息。
|
||||||
|
def key(result):
|
||||||
|
return result[1]
|
||||||
|
|
||||||
|
# 对所有搜索结果进行排序。
|
||||||
|
data.sort(key=key, reversed=desc)
|
||||||
|
results = []
|
||||||
|
# 从结果里面提取出文档 ID ,并丢弃与之关联的分值。
|
||||||
|
for docid, score in data[start:start+num]:
|
||||||
|
results.append(docid)
|
||||||
|
|
||||||
|
# 将搜索结果返回给调用者。
|
||||||
|
return count, results, ids
|
||||||
|
# <end id="search-shards-zset"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 10-11
|
||||||
|
# <start id="sharded-api-base"/>
|
||||||
|
class KeyShardedConnection(object):
|
||||||
|
# 对象使用组件名字以及分片数量进行初始化。
|
||||||
|
def __init__(self, component, shards):
|
||||||
|
self.component = component
|
||||||
|
self.shards = shards
|
||||||
|
# 当用户尝试从对象里面获取一个元素的时候,
|
||||||
|
# 这个方法就会被调用,
|
||||||
|
# 而调用这个方法时传入的参数就是用户请求的元素。
|
||||||
|
def __getitem__(self, key):
|
||||||
|
# 根据传入的键以及之前已知的组件名字和分片数量,
|
||||||
|
# 获取分片连接。
|
||||||
|
return get_sharded_connection(
|
||||||
|
self.component, key, self.shards)
|
||||||
|
# <end id="sharded-api-base"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 10-10
|
||||||
|
# <start id="sharded-api-example"/>
|
||||||
|
# 创建一个连接,这个连接包含对拥有指定分片数量的组件进行分片所需的相关信息。
|
||||||
|
sharded_timelines = KeyShardedConnection('timelines', 8)
|
||||||
|
|
||||||
|
def follow_user(conn, uid, other_uid):
|
||||||
|
fkey1 = 'following:%s'%uid
|
||||||
|
fkey2 = 'followers:%s'%other_uid
|
||||||
|
|
||||||
|
if conn.zscore(fkey1, other_uid):
|
||||||
|
print "already followed", uid, other_uid
|
||||||
|
return None
|
||||||
|
|
||||||
|
now = time.time()
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
pipeline.zadd(fkey1, other_uid, now)
|
||||||
|
pipeline.zadd(fkey2, uid, now)
|
||||||
|
pipeline.zcard(fkey1)
|
||||||
|
pipeline.zcard(fkey2)
|
||||||
|
following, followers = pipeline.execute()[-2:]
|
||||||
|
pipeline.hset('user:%s'%uid, 'following', following)
|
||||||
|
pipeline.hset('user:%s'%other_uid, 'followers', followers)
|
||||||
|
pipeline.execute()
|
||||||
|
|
||||||
|
pkey = 'profile:%s'%other_uid
|
||||||
|
# 从正在关注的用户的个人时间线里面,取出最新的状态消息。
|
||||||
|
status_and_score = sharded_timelines[pkey].zrevrange(
|
||||||
|
pkey, 0, HOME_TIMELINE_SIZE-1, withscores=True)
|
||||||
|
|
||||||
|
if status_and_score:
|
||||||
|
hkey = 'home:%s'%uid
|
||||||
|
# 根据被分片的键获取一个连接,然后通过连接获取一个流水线对象。
|
||||||
|
pipe = sharded_timelines[hkey].pipeline(True)
|
||||||
|
# 将一系列状态消息添加到位于分片上面的定制时间线有序集合里面,
|
||||||
|
# 并在添加操作完成之后,对有序集合进行修剪。
|
||||||
|
pipe.zadd(hkey, **dict(status_and_score))
|
||||||
|
pipe.zremrangebyrank(hkey, 0, -HOME_TIMELINE_SIZE-1)
|
||||||
|
# 执行事务。
|
||||||
|
pipe.execute()
|
||||||
|
|
||||||
|
return True
|
||||||
|
# <end id="sharded-api-example"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 10-13
|
||||||
|
# <start id="key-data-sharded-api"/>
|
||||||
|
class KeyDataShardedConnection(object):
|
||||||
|
# 对象使用组件名和分片数量进行初始化。
|
||||||
|
def __init__(self, component, shards):
|
||||||
|
self.component = component
|
||||||
|
self.shards = shards
|
||||||
|
# 当一对 ID 作为字典查找操作的其中一个参数被传入时,
|
||||||
|
# 这个方法将被调用。
|
||||||
|
def __getitem__(self, ids):
|
||||||
|
# 取出两个 ID ,并确保它们都是整数。
|
||||||
|
id1, id2 = map(int, ids)
|
||||||
|
# 如果第二个 ID 比第一个 ID 要小,
|
||||||
|
# 那么对调两个 ID 的位置,
|
||||||
|
# 从而确保第一个 ID 总是小于或等于第二个 ID 。
|
||||||
|
if id2 < id1:
|
||||||
|
id1, id2 = id2, id1
|
||||||
|
# 基于两个 ID 构建出一个键。
|
||||||
|
key = "%s:%s"%(id1, id2)
|
||||||
|
# 使用构建出的键以及之前已知的组件名和分片数量,
|
||||||
|
# 获取分片连接。
|
||||||
|
return get_sharded_connection(
|
||||||
|
self.component, key, self.shards)
|
||||||
|
# <end id="key-data-sharded-api"/>
|
||||||
|
|
||||||
|
|
||||||
|
_follow_user = follow_user
|
||||||
|
# 代码清单 10-12
|
||||||
|
# <start id="sharded-api-example2"/>
|
||||||
|
# 创建一个连接,
|
||||||
|
# 这个连接包含对拥有指定分片数量的组件进行分片所需的相关信息。
|
||||||
|
sharded_timelines = KeyShardedConnection('timelines', 8)
|
||||||
|
sharded_followers = KeyDataShardedConnection('followers', 16)
|
||||||
|
|
||||||
|
def follow_user(conn, uid, other_uid):
|
||||||
|
fkey1 = 'following:%s'%uid
|
||||||
|
fkey2 = 'followers:%s'%other_uid
|
||||||
|
|
||||||
|
# 根据 uid 和 other_uid 获取连接对象。
|
||||||
|
sconn = sharded_followers[uid, other_uid]
|
||||||
|
# 检查 other_uid 代表的用户是否已经关注了 uid 代表的用户。
|
||||||
|
if sconn.zscore(fkey1, other_uid):
|
||||||
|
return None
|
||||||
|
|
||||||
|
now = time.time()
|
||||||
|
spipe = sconn.pipeline(True)
|
||||||
|
# 把关注者的信息以及被关注者的信息添加到有序集合里面。
|
||||||
|
spipe.zadd(fkey1, other_uid, now)
|
||||||
|
spipe.zadd(fkey2, uid, now)
|
||||||
|
following, followers = spipe.execute()
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
# 为执行关注操作的用户以及被关注的用户更新关注者信息和正在关注信息。
|
||||||
|
pipeline.hincrby('user:%s'%uid, 'following', int(following))
|
||||||
|
pipeline.hincrby('user:%s'%other_uid, 'followers', int(followers))
|
||||||
|
pipeline.execute()
|
||||||
|
|
||||||
|
pkey = 'profile:%s'%other_uid
|
||||||
|
status_and_score = sharded_timelines[pkey].zrevrange(
|
||||||
|
pkey, 0, HOME_TIMELINE_SIZE-1, withscores=True)
|
||||||
|
|
||||||
|
if status_and_score:
|
||||||
|
hkey = 'home:%s'%uid
|
||||||
|
pipe = sharded_timelines[hkey].pipeline(True)
|
||||||
|
pipe.zadd(hkey, **dict(status_and_score))
|
||||||
|
pipe.zremrangebyrank(hkey, 0, -HOME_TIMELINE_SIZE-1)
|
||||||
|
pipe.execute()
|
||||||
|
|
||||||
|
return True
|
||||||
|
# <end id="sharded-api-example2"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 10-14
|
||||||
|
# <start id="sharded-zrangebyscore"/>
|
||||||
|
# 函数接受组件名称、分片数量以及那些可以在分片环境下产生正确行为的参数作为参数。
|
||||||
|
def sharded_zrangebyscore(component, shards, key, min, max, num):
|
||||||
|
data = []
|
||||||
|
for shard in xrange(shards):
|
||||||
|
# 获取指向当前分片的分片连接。
|
||||||
|
conn = get_redis_connection("%s:%s"%(component, shard))
|
||||||
|
# 从 Redis 分片上面取出数据。
|
||||||
|
data.extend(conn.zrangebyscore(
|
||||||
|
key, min, max, start=0, num=num, withscores=True))
|
||||||
|
|
||||||
|
# 首先基于分值对数据进行排序,然后再基于成员进行排序。
|
||||||
|
def key(pair):
|
||||||
|
return pair[1], pair[0]
|
||||||
|
data.sort(key=key)
|
||||||
|
|
||||||
|
# 根据用户请求的数量返回元素。
|
||||||
|
return data[:num]
|
||||||
|
# <end id="sharded-zrangebyscore"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 10-15
|
||||||
|
# <start id="sharded-syndicate-posts"/>
|
||||||
|
def syndicate_status(uid, post, start=0, on_lists=False):
|
||||||
|
root = 'followers'
|
||||||
|
key = 'followers:%s'%uid
|
||||||
|
base = 'home:%s'
|
||||||
|
if on_lists:
|
||||||
|
root = 'list:out'
|
||||||
|
key = 'list:out:%s'%uid
|
||||||
|
base = 'list:statuses:%s'
|
||||||
|
|
||||||
|
# 通过 ZRANGEBYSCORE 调用,找出下一组关注者。
|
||||||
|
followers = sharded_zrangebyscore(root,
|
||||||
|
sharded_followers.shards, key, start, 'inf', POSTS_PER_PASS)
|
||||||
|
|
||||||
|
# 基于预先分片的结果对个人信息进行分组,
|
||||||
|
# 并把分组后的信息储存到预先准备好的结构里面。
|
||||||
|
to_send = defaultdict(list)
|
||||||
|
for follower, start in followers:
|
||||||
|
# 构造出储存时间线的键。
|
||||||
|
timeline = base % follower
|
||||||
|
# 找到负责储存这个时间线的分片。
|
||||||
|
shard = shard_key('timelines',
|
||||||
|
timeline, sharded_timelines.shards, 2)
|
||||||
|
# 把时间线的键添加到位于同一个分片的其他时间线的后面。
|
||||||
|
to_send[shard].append(timeline)
|
||||||
|
|
||||||
|
for timelines in to_send.itervalues():
|
||||||
|
# 根据储存这组时间线的服务器,
|
||||||
|
# 找出连向它的连接,
|
||||||
|
# 然后创建一个流水线对象。
|
||||||
|
pipe = sharded_timelines[timelines[0]].pipeline(False)
|
||||||
|
for timeline in timelines:
|
||||||
|
# 把新发送的消息添加到时间线上面,
|
||||||
|
# 并移除过于陈旧的消息。
|
||||||
|
pipe.zadd(timeline, **post)
|
||||||
|
pipe.zremrangebyrank(
|
||||||
|
timeline, 0, -HOME_TIMELINE_SIZE-1)
|
||||||
|
pipe.execute()
|
||||||
|
|
||||||
|
conn = redis.Redis()
|
||||||
|
if len(followers) >= POSTS_PER_PASS:
|
||||||
|
execute_later(conn, 'default', 'syndicate_status',
|
||||||
|
[uid, post, start, on_lists])
|
||||||
|
|
||||||
|
elif not on_lists:
|
||||||
|
execute_later(conn, 'default', 'syndicate_status',
|
||||||
|
[uid, post, 0, True])
|
||||||
|
# <end id="sharded-syndicate-posts"/>
|
||||||
|
|
||||||
|
def _fake_shards_for(conn, component, count, actual):
|
||||||
|
assert actual <= 4
|
||||||
|
for i in xrange(count):
|
||||||
|
m = i % actual
|
||||||
|
conn.set('config:redis:%s:%i'%(component, i), json.dumps({'db':14 - m}))
|
||||||
|
|
||||||
|
class TestCh10(unittest.TestCase):
|
||||||
|
def _flush(self):
|
||||||
|
self.conn.flushdb()
|
||||||
|
redis.Redis(db=14).flushdb()
|
||||||
|
redis.Redis(db=13).flushdb()
|
||||||
|
redis.Redis(db=12).flushdb()
|
||||||
|
redis.Redis(db=11).flushdb()
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.conn = redis.Redis(db=15)
|
||||||
|
self._flush()
|
||||||
|
global config_connection
|
||||||
|
config_connection = self.conn
|
||||||
|
self.conn.set('config:redis:test', json.dumps({'db':15}))
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
self._flush()
|
||||||
|
|
||||||
|
def test_get_sharded_connections(self):
|
||||||
|
_fake_shards_for(self.conn, 'shard', 2, 2)
|
||||||
|
|
||||||
|
for i in xrange(10):
|
||||||
|
get_sharded_connection('shard', i, 2).sadd('foo', i)
|
||||||
|
|
||||||
|
s0 = redis.Redis(db=14).scard('foo')
|
||||||
|
s1 = redis.Redis(db=13).scard('foo')
|
||||||
|
self.assertTrue(s0 < 10)
|
||||||
|
self.assertTrue(s1 < 10)
|
||||||
|
self.assertEquals(s0 + s1, 10)
|
||||||
|
|
||||||
|
def test_count_visit(self):
|
||||||
|
shards = {'db':13}, {'db':14}
|
||||||
|
self.conn.set('config:redis:unique', json.dumps({'db':15}))
|
||||||
|
for i in xrange(16):
|
||||||
|
self.conn.set('config:redis:unique:%s'%i, json.dumps(shards[i&1]))
|
||||||
|
|
||||||
|
for i in xrange(100):
|
||||||
|
count_visit(str(uuid.uuid4()))
|
||||||
|
base = 'unique:%s'%date.today().isoformat()
|
||||||
|
total = 0
|
||||||
|
for c in shards:
|
||||||
|
conn = redis.Redis(**c)
|
||||||
|
keys = conn.keys(base + ':*')
|
||||||
|
for k in keys:
|
||||||
|
cnt = conn.scard(k)
|
||||||
|
total += cnt
|
||||||
|
self.assertTrue(cnt < k)
|
||||||
|
self.assertEquals(total, 100)
|
||||||
|
self.assertEquals(self.conn.get(base), '100')
|
||||||
|
|
||||||
|
def test_sharded_search(self):
|
||||||
|
_fake_shards_for(self.conn, 'search', 2, 2)
|
||||||
|
|
||||||
|
docs = 'hello world how are you doing'.split(), 'this world is doing fine'.split()
|
||||||
|
for i in xrange(50):
|
||||||
|
c = get_sharded_connection('search', i, 2)
|
||||||
|
index_document(c, i, docs[i&1], {'updated':time.time() + i, 'id':i, 'created':time.time() + i})
|
||||||
|
r = search_and_sort(c, docs[i&1], sort='-id')
|
||||||
|
self.assertEquals(r[1][0], str(i))
|
||||||
|
|
||||||
|
total = 0
|
||||||
|
for shard in (0,1):
|
||||||
|
count = search_get_values(get_redis_connection('search:%s'%shard),['this', 'world'], num=50)[0]
|
||||||
|
total += count
|
||||||
|
self.assertTrue(count < 50)
|
||||||
|
self.assertTrue(count > 0)
|
||||||
|
|
||||||
|
self.assertEquals(total, 25)
|
||||||
|
|
||||||
|
count, r, id = get_shard_results('search', 2, ['world', 'doing'], num=50)
|
||||||
|
self.assertEquals(count, 50)
|
||||||
|
self.assertEquals(count, len(r))
|
||||||
|
|
||||||
|
self.assertEquals(get_shard_results('search', 2, ['this', 'doing'], num=50)[0], 25)
|
||||||
|
|
||||||
|
count, r, id = get_shard_results_thread('search', 2, ['this', 'doing'], num=50)
|
||||||
|
self.assertEquals(count, 25)
|
||||||
|
self.assertEquals(count, len(r))
|
||||||
|
r.sort(key=lambda x:x[1], reverse=True)
|
||||||
|
r = list(zip(*r)[0])
|
||||||
|
|
||||||
|
count, r2, id = search_shards('search', 2, ['this', 'doing'])
|
||||||
|
self.assertEquals(count, 25)
|
||||||
|
self.assertEquals(len(r2), 20)
|
||||||
|
self.assertEquals(r2, r[:20])
|
||||||
|
|
||||||
|
def test_sharded_follow_user(self):
|
||||||
|
_fake_shards_for(self.conn, 'timelines', 8, 4)
|
||||||
|
|
||||||
|
sharded_timelines['profile:1'].zadd('profile:1', 1, time.time())
|
||||||
|
for u2 in xrange(2, 11):
|
||||||
|
sharded_timelines['profile:%i'%u2].zadd('profile:%i'%u2, u2, time.time() + u2)
|
||||||
|
_follow_user(self.conn, 1, u2)
|
||||||
|
_follow_user(self.conn, u2, 1)
|
||||||
|
|
||||||
|
self.assertEquals(self.conn.zcard('followers:1'), 9)
|
||||||
|
self.assertEquals(self.conn.zcard('following:1'), 9)
|
||||||
|
self.assertEquals(sharded_timelines['home:1'].zcard('home:1'), 9)
|
||||||
|
|
||||||
|
for db in xrange(14, 10, -1):
|
||||||
|
self.assertTrue(len(redis.Redis(db=db).keys()) > 0)
|
||||||
|
for u2 in xrange(2, 11):
|
||||||
|
self.assertEquals(self.conn.zcard('followers:%i'%u2), 1)
|
||||||
|
self.assertEquals(self.conn.zcard('following:%i'%u2), 1)
|
||||||
|
self.assertEquals(sharded_timelines['home:%i'%u2].zcard('home:%i'%u2), 1)
|
||||||
|
|
||||||
|
def test_sharded_follow_user_and_syndicate_status(self):
|
||||||
|
_fake_shards_for(self.conn, 'timelines', 8, 4)
|
||||||
|
_fake_shards_for(self.conn, 'followers', 4, 4)
|
||||||
|
sharded_followers.shards = 4
|
||||||
|
|
||||||
|
sharded_timelines['profile:1'].zadd('profile:1', 1, time.time())
|
||||||
|
for u2 in xrange(2, 11):
|
||||||
|
sharded_timelines['profile:%i'%u2].zadd('profile:%i'%u2, u2, time.time() + u2)
|
||||||
|
follow_user(self.conn, 1, u2)
|
||||||
|
follow_user(self.conn, u2, 1)
|
||||||
|
|
||||||
|
allkeys = defaultdict(int)
|
||||||
|
for db in xrange(14, 10, -1):
|
||||||
|
c = redis.Redis(db=db)
|
||||||
|
for k in c.keys():
|
||||||
|
allkeys[k] += c.zcard(k)
|
||||||
|
|
||||||
|
for k, v in allkeys.iteritems():
|
||||||
|
part, _, owner = k.partition(':')
|
||||||
|
if part in ('following', 'followers', 'home'):
|
||||||
|
self.assertEquals(v, 9 if owner == '1' else 1)
|
||||||
|
elif part == 'profile':
|
||||||
|
self.assertEquals(v, 1)
|
||||||
|
|
||||||
|
self.assertEquals(len(sharded_zrangebyscore('followers', 4, 'followers:1', '0', 'inf', 100)), 9)
|
||||||
|
syndicate_status(1, {'11':time.time()})
|
||||||
|
self.assertEquals(len(sharded_zrangebyscore('timelines', 4, 'home:2', '0', 'inf', 100)), 2)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
|
@ -0,0 +1,733 @@
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
import bisect
|
||||||
|
import math
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import unittest
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import redis
|
||||||
|
|
||||||
|
# 代码清单 11-1
|
||||||
|
# <start id="script-load"/>
|
||||||
|
def script_load(script):
|
||||||
|
# 将 SCRIPT LOAD 命令返回的已缓存脚本 SHA1 校验和储存到一个列表里面,
|
||||||
|
# 以便之后在 call() 函数内部对其进行修改。
|
||||||
|
sha = [None]
|
||||||
|
# 在调用已载入脚本的时候,
|
||||||
|
# 用户需要将 Redis 连接、脚本要处理的键以及脚本的其他参数传递给脚本。
|
||||||
|
def call(conn, keys=[], args=[], force_eval=False):
|
||||||
|
if not force_eval:
|
||||||
|
# 程序只会在 SHA1 校验和未被缓存的情况下尝试载入脚本。
|
||||||
|
if not sha[0]:
|
||||||
|
# 如果 SHA1 校验和未被缓存,那么载入给定的脚本
|
||||||
|
sha[0] = conn.execute_command(
|
||||||
|
"SCRIPT", "LOAD", script, parse="LOAD")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# 使用已缓存的 SHA1 校验和执行命令。
|
||||||
|
return conn.execute_command(
|
||||||
|
"EVALSHA", sha[0], len(keys), *(keys+args))
|
||||||
|
|
||||||
|
except redis.exceptions.ResponseError as msg:
|
||||||
|
# 如果错误与脚本缺失无关,那么重新抛出异常。
|
||||||
|
if not msg.args[0].startswith("NOSCRIPT"):
|
||||||
|
raise
|
||||||
|
|
||||||
|
# 当程序接收到脚本错误的时候,
|
||||||
|
# 又或者程序需要强制执行脚本的时候,
|
||||||
|
# 它会使用 EVAL 命令直接执行给定的脚本。
|
||||||
|
# EVAL 命令在执行完脚本之后,
|
||||||
|
# 会自动地把脚本缓存起来,
|
||||||
|
# 而缓存产生的 SHA1 校验和跟使用 EVALSHA 命令缓存脚本产生的 SHA1 校验和是完全相同的。
|
||||||
|
return conn.execute_command(
|
||||||
|
"EVAL", script, len(keys), *(keys+args))
|
||||||
|
|
||||||
|
# 返回一个函数,这个函数在被调用的时候会自动载入并执行脚本。
|
||||||
|
return call
|
||||||
|
# <end id="script-load"/>
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
# <start id="show-script-load"/>
|
||||||
|
>>> ret_1 = script_load("return 1") # 在大多数情况下,我们在载入脚本之后都会储存起脚本载入程序返回的函数引用。
|
||||||
|
>>> ret_1(conn) # 在此之后,我们就可以通过传入连接对象以及脚本需要的其他参数来调用函数。
|
||||||
|
1L # 只要条件允许,就将脚本返回的结果转换成相应的 Python 类型。
|
||||||
|
# <end id="show-script-load"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 11-2
|
||||||
|
# <start id="ch08-post-status"/>
|
||||||
|
def create_status(conn, uid, message, **data):
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
# 根据用户 ID 获取用户的用户名。
|
||||||
|
pipeline.hget('user:%s' % uid, 'login')
|
||||||
|
# 为这条状态消息创建一个新的 ID 。
|
||||||
|
pipeline.incr('status:id:')
|
||||||
|
login, id = pipeline.execute()
|
||||||
|
|
||||||
|
# 在发布状态消息之前,先检查用户的账号是否存在。
|
||||||
|
if not login:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# 准备并设置状态消息的各项信息。
|
||||||
|
data.update({
|
||||||
|
'message': message,
|
||||||
|
'posted': time.time(),
|
||||||
|
'id': id,
|
||||||
|
'uid': uid,
|
||||||
|
'login': login,
|
||||||
|
})
|
||||||
|
pipeline.hmset('status:%s' % id, data)
|
||||||
|
# 更新用户的已发送状态消息数量。
|
||||||
|
pipeline.hincrby('user:%s' % uid, 'posts')
|
||||||
|
pipeline.execute()
|
||||||
|
# 返回新创建的状态消息的 ID 。
|
||||||
|
return id
|
||||||
|
# <end id="ch08-post-status"/>
|
||||||
|
|
||||||
|
|
||||||
|
_create_status = create_status
|
||||||
|
# 代码清单 11-3
|
||||||
|
# <start id="post-status-lua"/>
|
||||||
|
# 这个函数接受的参数和原版消息发布函数接受的参数一样。
|
||||||
|
def create_status(conn, uid, message, **data):
|
||||||
|
# 准备好对状态消息进行设置所需的各个参数和属性。
|
||||||
|
args = [
|
||||||
|
'message', message,
|
||||||
|
'posted', time.time(),
|
||||||
|
'uid', uid,
|
||||||
|
]
|
||||||
|
for key, value in data.iteritems():
|
||||||
|
args.append(key)
|
||||||
|
args.append(value)
|
||||||
|
|
||||||
|
return create_status_lua(
|
||||||
|
conn, ['user:%s' % uid, 'status:id:'], args)
|
||||||
|
|
||||||
|
create_status_lua = script_load('''
|
||||||
|
-- 根据用户 ID ,获取用户的用户名。
|
||||||
|
-- 记住,Lua 表格的索引是从 1 开始的,
|
||||||
|
-- 而不是像 Python 和很多其他语言那样从 0 开始。
|
||||||
|
local login = redis.call('hget', KEYS[1], 'login')
|
||||||
|
-- 如果用户并未登录,那么向调用者说明这一情况。
|
||||||
|
if not login then
|
||||||
|
return false
|
||||||
|
end
|
||||||
|
-- 获取一个新的状态消息 ID 。
|
||||||
|
local id = redis.call('incr', KEYS[2])
|
||||||
|
-- 准备好负责储存状态消息的键。
|
||||||
|
local key = string.format('status:%s', id)
|
||||||
|
|
||||||
|
-- 为状态消息执行数据设置操作。
|
||||||
|
redis.call('hmset', key,
|
||||||
|
'login', login,
|
||||||
|
'id', id,
|
||||||
|
unpack(ARGV))
|
||||||
|
-- 对用户的已发布消息计数器执行自增操作。
|
||||||
|
redis.call('hincrby', KEYS[1], 'posts', 1)
|
||||||
|
|
||||||
|
-- 返回状态消息的 ID 。
|
||||||
|
return id
|
||||||
|
''')
|
||||||
|
# <end id="post-status-lua"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 11-4
|
||||||
|
# <start id="old-lock"/>
|
||||||
|
def acquire_lock_with_timeout(
|
||||||
|
conn, lockname, acquire_timeout=10, lock_timeout=10):
|
||||||
|
# 128 位随机标识符。
|
||||||
|
identifier = str(uuid.uuid4())
|
||||||
|
lockname = 'lock:' + lockname
|
||||||
|
# 确保传给 EXPIRE 的都是整数。
|
||||||
|
lock_timeout = int(math.ceil(lock_timeout))
|
||||||
|
|
||||||
|
end = time.time() + acquire_timeout
|
||||||
|
while time.time() < end:
|
||||||
|
# 获取锁并设置过期时间。
|
||||||
|
if conn.setnx(lockname, identifier):
|
||||||
|
conn.expire(lockname, lock_timeout)
|
||||||
|
return identifier
|
||||||
|
# 检查过期时间,并在有需要时对其进行更新。
|
||||||
|
elif not conn.ttl(lockname):
|
||||||
|
conn.expire(lockname, lock_timeout)
|
||||||
|
|
||||||
|
time.sleep(.001)
|
||||||
|
|
||||||
|
return False
|
||||||
|
# <end id="old-lock"/>
|
||||||
|
|
||||||
|
|
||||||
|
_acquire_lock_with_timeout = acquire_lock_with_timeout
|
||||||
|
# 代码清单 11-5
|
||||||
|
# <start id="lock-in-lua"/>
|
||||||
|
def acquire_lock_with_timeout(
|
||||||
|
conn, lockname, acquire_timeout=10, lock_timeout=10):
|
||||||
|
identifier = str(uuid.uuid4())
|
||||||
|
lockname = 'lock:' + lockname
|
||||||
|
lock_timeout = int(math.ceil(lock_timeout))
|
||||||
|
|
||||||
|
acquired = False
|
||||||
|
end = time.time() + acquire_timeout
|
||||||
|
while time.time() < end and not acquired:
|
||||||
|
# 执行实际的锁获取操作,通过检查确保 Lua 调用已经执行成功。
|
||||||
|
acquired = acquire_lock_with_timeout_lua(
|
||||||
|
conn, [lockname], [lock_timeout, identifier]) == 'OK'
|
||||||
|
|
||||||
|
time.sleep(.001 * (not acquired))
|
||||||
|
|
||||||
|
return acquired and identifier
|
||||||
|
|
||||||
|
acquire_lock_with_timeout_lua = script_load('''
|
||||||
|
-- 检测锁是否已经存在。(再次提醒,Lua 表格的索引是从 1 开始的。)
|
||||||
|
if redis.call('exists', KEYS[1]) == 0 then
|
||||||
|
-- 使用给定的过期时间以及标识符去设置键。
|
||||||
|
return redis.call('setex', KEYS[1], unpack(ARGV))
|
||||||
|
end
|
||||||
|
''')
|
||||||
|
# <end id="lock-in-lua"/>
|
||||||
|
|
||||||
|
|
||||||
|
def release_lock(conn, lockname, identifier):
|
||||||
|
pipe = conn.pipeline(True)
|
||||||
|
lockname = 'lock:' + lockname
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
pipe.watch(lockname) #A
|
||||||
|
if pipe.get(lockname) == identifier: #A
|
||||||
|
pipe.multi() #B
|
||||||
|
pipe.delete(lockname) #B
|
||||||
|
pipe.execute() #B
|
||||||
|
return True #B
|
||||||
|
|
||||||
|
pipe.unwatch()
|
||||||
|
break
|
||||||
|
|
||||||
|
except redis.exceptions.WatchError: #C
|
||||||
|
pass #C
|
||||||
|
|
||||||
|
return False #D
|
||||||
|
|
||||||
|
|
||||||
|
_release_lock = release_lock
|
||||||
|
# 代码清单 11-6
|
||||||
|
# <start id="release-lock-in-lua"/>
|
||||||
|
def release_lock(conn, lockname, identifier):
|
||||||
|
lockname = 'lock:' + lockname
|
||||||
|
# 调用负责释放锁的 Lua 函数。
|
||||||
|
return release_lock_lua(conn, [lockname], [identifier])
|
||||||
|
|
||||||
|
release_lock_lua = script_load('''
|
||||||
|
-- 检查锁是否匹配。
|
||||||
|
if redis.call('get', KEYS[1]) == ARGV[1] then
|
||||||
|
-- 删除锁并确保脚本总是返回真值。
|
||||||
|
return redis.call('del', KEYS[1]) or true
|
||||||
|
end
|
||||||
|
''')
|
||||||
|
# <end id="release-lock-in-lua"/>
|
||||||
|
|
||||||
|
# 代码清单 11-7
|
||||||
|
# <start id="old-acquire-semaphore"/>
|
||||||
|
def acquire_semaphore(conn, semname, limit, timeout=10):
|
||||||
|
# 128 位随机标识符。
|
||||||
|
identifier = str(uuid.uuid4())
|
||||||
|
now = time.time()
|
||||||
|
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
# 清理过期的信号量持有者。
|
||||||
|
pipeline.zremrangebyscore(semname, '-inf', now - timeout)
|
||||||
|
# 尝试获取信号量。
|
||||||
|
pipeline.zadd(semname, identifier, now)
|
||||||
|
# 检查是否成功取得了信号量。
|
||||||
|
pipeline.zrank(semname, identifier)
|
||||||
|
if pipeline.execute()[-1] < limit:
|
||||||
|
return identifier
|
||||||
|
|
||||||
|
# 获取信号量失败,删除之前添加的标识符。
|
||||||
|
conn.zrem(semname, identifier)
|
||||||
|
return None
|
||||||
|
# <end id="old-acquire-semaphore"/>
|
||||||
|
|
||||||
|
|
||||||
|
_acquire_semaphore = acquire_semaphore
|
||||||
|
# 代码清单 11-8
|
||||||
|
# <start id="acquire-semaphore-lua"/>
|
||||||
|
def acquire_semaphore(conn, semname, limit, timeout=10):
|
||||||
|
# 取得当前时间戳,用于处理超时信号量。
|
||||||
|
now = time.time()
|
||||||
|
# 把所有必须的参数传递给 Lua 函数,实际地执行信号量获取操作。
|
||||||
|
return acquire_semaphore_lua(conn, [semname],
|
||||||
|
[now-timeout, limit, now, str(uuid.uuid4())])
|
||||||
|
|
||||||
|
acquire_semaphore_lua = script_load('''
|
||||||
|
-- 清除所有已过期的信号量。
|
||||||
|
redis.call('zremrangebyscore', KEYS[1], '-inf', ARGV[1])
|
||||||
|
|
||||||
|
-- 如果还有剩余的信号量可用,那么获取信号量。
|
||||||
|
if redis.call('zcard', KEYS[1]) < tonumber(ARGV[2]) then
|
||||||
|
-- 把时间戳添加到超时有序集合里面。
|
||||||
|
redis.call('zadd', KEYS[1], ARGV[3], ARGV[4])
|
||||||
|
return ARGV[4]
|
||||||
|
end
|
||||||
|
''')
|
||||||
|
# <end id="acquire-semaphore-lua"/>
|
||||||
|
|
||||||
|
def release_semaphore(conn, semname, identifier):
|
||||||
|
return conn.zrem(semname, identifier)
|
||||||
|
|
||||||
|
# 代码清单 11-9
|
||||||
|
# <start id="refresh-semaphore-lua"/>
|
||||||
|
def refresh_semaphore(conn, semname, identifier):
|
||||||
|
return refresh_semaphore_lua(conn, [semname],
|
||||||
|
# 如果信号量没有被刷新,那么 Lua 脚本将返回空值,
|
||||||
|
# 而 Python 会将这个空值转换成 None 并返回给调用者。
|
||||||
|
[identifier, time.time()]) != None
|
||||||
|
|
||||||
|
refresh_semaphore_lua = script_load('''
|
||||||
|
-- 如果信号量仍然存在,那么对它的时间戳进行更新。
|
||||||
|
if redis.call('zscore', KEYS[1], ARGV[1]) then
|
||||||
|
return redis.call('zadd', KEYS[1], ARGV[2], ARGV[1]) or true
|
||||||
|
end
|
||||||
|
''')
|
||||||
|
# <end id="refresh-semaphore-lua"/>
|
||||||
|
|
||||||
|
valid_characters = '`abcdefghijklmnopqrstuvwxyz{'
|
||||||
|
|
||||||
|
def find_prefix_range(prefix):
|
||||||
|
posn = bisect.bisect_left(valid_characters, prefix[-1:])
|
||||||
|
suffix = valid_characters[(posn or 1) - 1]
|
||||||
|
return prefix[:-1] + suffix + '{', prefix + '{'
|
||||||
|
|
||||||
|
# 代码清单 11-10
|
||||||
|
# <start id="old-autocomplete-code"/>
|
||||||
|
def autocomplete_on_prefix(conn, guild, prefix):
|
||||||
|
# 根据给定的前缀计算出查找范围的起点和终点。
|
||||||
|
start, end = find_prefix_range(prefix)
|
||||||
|
identifier = str(uuid.uuid4())
|
||||||
|
start += identifier
|
||||||
|
end += identifier
|
||||||
|
zset_name = 'members:' + guild
|
||||||
|
|
||||||
|
# 将范围的起始元素和结束元素添加到有序集合里面。
|
||||||
|
conn.zadd(zset_name, start, 0, end, 0)
|
||||||
|
pipeline = conn.pipeline(True)
|
||||||
|
while 1:
|
||||||
|
try:
|
||||||
|
pipeline.watch(zset_name)
|
||||||
|
# 找到两个被插入元素在有序集合中的排名。
|
||||||
|
sindex = pipeline.zrank(zset_name, start)
|
||||||
|
eindex = pipeline.zrank(zset_name, end)
|
||||||
|
erange = min(sindex + 9, eindex - 2)
|
||||||
|
pipeline.multi()
|
||||||
|
# 获取范围内的值,然后删除之前插入的起始元素和结束元素。
|
||||||
|
pipeline.zrem(zset_name, start, end)
|
||||||
|
pipeline.zrange(zset_name, sindex, erange)
|
||||||
|
items = pipeline.execute()[-1]
|
||||||
|
break
|
||||||
|
# 如果自动补完有序集合已经被其他客户端修改过了,
|
||||||
|
# 那么进行重试。
|
||||||
|
except redis.exceptions.WatchError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 如果有其他自动补完操作正在执行,
|
||||||
|
# 那么从获取到的元素里面移除起始元素和终结元素。
|
||||||
|
return [item for item in items if '{' not in item]
|
||||||
|
# <end id="old-autocomplete-code"/>
|
||||||
|
|
||||||
|
|
||||||
|
_autocomplete_on_prefix = autocomplete_on_prefix
|
||||||
|
# 代码清单 11-11
|
||||||
|
# <start id="autocomplete-on-prefix-lua"/>
|
||||||
|
def autocomplete_on_prefix(conn, guild, prefix):
|
||||||
|
# 取得范围和标识符。
|
||||||
|
start, end = find_prefix_range(prefix)
|
||||||
|
identifier = str(uuid.uuid4())
|
||||||
|
|
||||||
|
# 使用 Lua 脚本从 Redis 里面获取数据。
|
||||||
|
items = autocomplete_on_prefix_lua(conn,
|
||||||
|
['members:' + guild],
|
||||||
|
[start+identifier, end+identifier])
|
||||||
|
|
||||||
|
# 过滤掉所有不想要的元素。
|
||||||
|
return [item for item in items if '{' not in item]
|
||||||
|
|
||||||
|
autocomplete_on_prefix_lua = script_load('''
|
||||||
|
-- 把标记起始范围和结束范围的元素添加到有序集合里面。
|
||||||
|
redis.call('zadd', KEYS[1], 0, ARGV[1], 0, ARGV[2])
|
||||||
|
-- 在有序集合里面找到范围元素的位置。
|
||||||
|
local sindex = redis.call('zrank', KEYS[1], ARGV[1])
|
||||||
|
local eindex = redis.call('zrank', KEYS[1], ARGV[2])
|
||||||
|
-- 计算出想要获取的元素所处的范围。
|
||||||
|
eindex = math.min(sindex + 9, eindex - 2)
|
||||||
|
|
||||||
|
-- 移除范围元素。
|
||||||
|
redis.call('zrem', KEYS[1], unpack(ARGV))
|
||||||
|
-- 获取并返回结果。
|
||||||
|
return redis.call('zrange', KEYS[1], sindex, eindex)
|
||||||
|
''')
|
||||||
|
# <end id="autocomplete-on-prefix-lua"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 11-12
|
||||||
|
# <start id="ch06-purchase-item-with-lock"/>
|
||||||
|
def purchase_item_with_lock(conn, buyerid, itemid, sellerid):
|
||||||
|
buyer = "users:%s" % buyerid
|
||||||
|
seller = "users:%s" % sellerid
|
||||||
|
item = "%s.%s" % (itemid, sellerid)
|
||||||
|
inventory = "inventory:%s" % buyerid
|
||||||
|
|
||||||
|
# 尝试获取锁。
|
||||||
|
locked = acquire_lock(conn, 'market:')
|
||||||
|
if not locked:
|
||||||
|
return False
|
||||||
|
|
||||||
|
pipe = conn.pipeline(True)
|
||||||
|
try:
|
||||||
|
# 检查物品是否已经售出,以及买家是否有足够的金钱来购买物品。
|
||||||
|
pipe.zscore("market:", item)
|
||||||
|
pipe.hget(buyer, 'funds')
|
||||||
|
price, funds = pipe.execute()
|
||||||
|
if price is None or price > funds:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# 将买家支付的货款转移给卖家,并将售出的物品转移给买家。
|
||||||
|
pipe.hincrby(seller, 'funds', int(price))
|
||||||
|
pipe.hincrby(buyer, 'funds', int(-price))
|
||||||
|
pipe.sadd(inventory, itemid)
|
||||||
|
pipe.zrem("market:", item)
|
||||||
|
pipe.execute()
|
||||||
|
return True
|
||||||
|
finally:
|
||||||
|
# 释放锁
|
||||||
|
release_lock(conn, 'market:', locked)
|
||||||
|
# <end id="ch06-purchase-item-with-lock"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 11-13
|
||||||
|
# <start id="purchase-item-lua"/>
|
||||||
|
def purchase_item(conn, buyerid, itemid, sellerid):
|
||||||
|
# 准备好执行 Lua 脚本所需的所有键和参数。
|
||||||
|
buyer = "users:%s" % buyerid
|
||||||
|
seller = "users:%s" % sellerid
|
||||||
|
item = "%s.%s"%(itemid, sellerid)
|
||||||
|
inventory = "inventory:%s" % buyerid
|
||||||
|
|
||||||
|
return purchase_item_lua(conn,
|
||||||
|
['market:', buyer, seller, inventory], [item, itemid])
|
||||||
|
|
||||||
|
purchase_item_lua = script_load('''
|
||||||
|
-- 获取物品的价格以及买家可用的金钱数量。
|
||||||
|
local price = tonumber(redis.call('zscore', KEYS[1], ARGV[1]))
|
||||||
|
local funds = tonumber(redis.call('hget', KEYS[2], 'funds'))
|
||||||
|
|
||||||
|
-- 如果物品仍在销售,并且买家也有足够的金钱,那么对物品和金钱进行相应的转移。
|
||||||
|
if price and funds and funds >= price then
|
||||||
|
redis.call('hincrby', KEYS[3], 'funds', price)
|
||||||
|
redis.call('hincrby', KEYS[2], 'funds', -price)
|
||||||
|
redis.call('sadd', KEYS[4], ARGV[2])
|
||||||
|
redis.call('zrem', KEYS[1], ARGV[1])
|
||||||
|
-- 返回真值表示购买操作执行成功。
|
||||||
|
return true
|
||||||
|
end
|
||||||
|
''')
|
||||||
|
# <end id="purchase-item-lua"/>
|
||||||
|
|
||||||
|
def list_item(conn, itemid, sellerid, price):
|
||||||
|
inv = "inventory:%s" % sellerid
|
||||||
|
item = "%s.%s" % (itemid, sellerid)
|
||||||
|
return list_item_lua(conn, [inv, 'market:'], [itemid, item, price])
|
||||||
|
|
||||||
|
list_item_lua = script_load('''
|
||||||
|
if redis.call('sismember', KEYS[1], ARGV[1]) ~= 0 then
|
||||||
|
redis.call('zadd', KEYS[2], ARGV[2], ARGV[3])
|
||||||
|
redis.call('srem', KEYS[1], ARGV[1])
|
||||||
|
return true
|
||||||
|
end
|
||||||
|
''')
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 11-14
|
||||||
|
# <start id="sharded-list-push"/>
|
||||||
|
def sharded_push_helper(conn, key, *items, **kwargs):
|
||||||
|
# 把元素组成的序列转换成列表。
|
||||||
|
items = list(items)
|
||||||
|
total = 0
|
||||||
|
# 仍然有元素需要推入……
|
||||||
|
while items:
|
||||||
|
# ……通过调用 Lua 脚本,把元素推入到分片列表里面。
|
||||||
|
pushed = sharded_push_lua(conn,
|
||||||
|
[key+':', key+':first', key+':last'],
|
||||||
|
# 这个程序目前每次最多只会推入 64 个元素,
|
||||||
|
# 读者可以根据自己的压缩列表最大长度来调整这个数值。
|
||||||
|
[kwargs['cmd']] + items[:64])
|
||||||
|
# 计算被推入的元素数量。
|
||||||
|
total += pushed
|
||||||
|
# 移除那些已经被推入到分片列表里面的元素。
|
||||||
|
del items[:pushed]
|
||||||
|
# 返回被推入元素的总数量。
|
||||||
|
return total
|
||||||
|
|
||||||
|
def sharded_lpush(conn, key, *items):
|
||||||
|
# 调用 sharded_push_helper() 函数,
|
||||||
|
# 并通过指定的参数告诉它应该执行左端推入操作还是右端推入操作。
|
||||||
|
return sharded_push_helper(conn, key, *items, cmd='lpush')
|
||||||
|
|
||||||
|
def sharded_rpush(conn, key, *items):
|
||||||
|
# 调用 sharded_push_helper() 函数,
|
||||||
|
# 并通过指定的参数告诉它应该执行左端推入操作还是右端推入操作。
|
||||||
|
return sharded_push_helper(conn, key, *items, cmd='rpush')
|
||||||
|
|
||||||
|
sharded_push_lua = script_load('''
|
||||||
|
-- 确定每个列表分片的最大长度。
|
||||||
|
local max = tonumber(redis.call(
|
||||||
|
'config', 'get', 'list-max-ziplist-entries')[2])
|
||||||
|
-- 如果没有元素需要进行推入,又或者压缩列表的最大长度太小,那么返回 0 。
|
||||||
|
if #ARGV < 2 or max < 2 then return 0 end
|
||||||
|
|
||||||
|
-- 弄清楚程序要对列表的左端还是右端进行推入,然后取得那一端对应的分片。
|
||||||
|
local skey = ARGV[1] == 'lpush' and KEYS[2] or KEYS[3]
|
||||||
|
local shard = redis.call('get', skey) or '0'
|
||||||
|
|
||||||
|
while 1 do
|
||||||
|
-- 取得分片的当前长度。
|
||||||
|
local current = tonumber(redis.call('llen', KEYS[1]..shard))
|
||||||
|
-- 计算出在不超过限制的情况下,可以将多少个元素推入到目前的列表里面。
|
||||||
|
-- 此外,在列表里面保留一个节点的空间以便处理之后可能发生的阻塞弹出操作。
|
||||||
|
local topush = math.min(#ARGV - 1, max - current - 1)
|
||||||
|
-- 在条件允许的情况下,向列表推入尽可能多的元素。
|
||||||
|
if topush > 0 then
|
||||||
|
redis.call(ARGV[1], KEYS[1]..shard, unpack(ARGV, 2, topush+1))
|
||||||
|
return topush
|
||||||
|
end
|
||||||
|
-- 否则的话,生成一个新的分片并继续进行未完成的推入工作。
|
||||||
|
shard = redis.call(ARGV[1] == 'lpush' and 'decr' or 'incr', skey)
|
||||||
|
end
|
||||||
|
''')
|
||||||
|
# <end id="sharded-list-push"/>
|
||||||
|
|
||||||
|
def sharded_llen(conn, key):
|
||||||
|
return sharded_llen_lua(conn, [key+':', key+':first', key+':last'])
|
||||||
|
|
||||||
|
sharded_llen_lua = script_load('''
|
||||||
|
local shardsize = tonumber(redis.call(
|
||||||
|
'config', 'get', 'list-max-ziplist-entries')[2])
|
||||||
|
|
||||||
|
local first = tonumber(redis.call('get', KEYS[2]) or '0')
|
||||||
|
local last = tonumber(redis.call('get', KEYS[3]) or '0')
|
||||||
|
|
||||||
|
local total = 0
|
||||||
|
total = total + tonumber(redis.call('llen', KEYS[1]..first))
|
||||||
|
if first ~= last then
|
||||||
|
total = total + (last - first - 1) * (shardsize-1)
|
||||||
|
total = total + tonumber(redis.call('llen', KEYS[1]..last))
|
||||||
|
end
|
||||||
|
|
||||||
|
return total
|
||||||
|
''')
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 11-15
|
||||||
|
# <start id="sharded-list-pop-lua"/>
|
||||||
|
def sharded_lpop(conn, key):
|
||||||
|
return sharded_list_pop_lua(
|
||||||
|
conn, [key+':', key+':first', key+':last'], ['lpop'])
|
||||||
|
|
||||||
|
def sharded_rpop(conn, key):
|
||||||
|
return sharded_list_pop_lua(
|
||||||
|
conn, [key+':', key+':first', key+':last'], ['rpop'])
|
||||||
|
|
||||||
|
sharded_list_pop_lua = script_load('''
|
||||||
|
-- 找到需要执行弹出操作的分片。
|
||||||
|
local skey = ARGV[1] == 'lpop' and KEYS[2] or KEYS[3]
|
||||||
|
-- 找到不需要执行弹出操作的分片。
|
||||||
|
local okey = ARGV[1] ~= 'lpop' and KEYS[2] or KEYS[3]
|
||||||
|
-- 获取需要执行弹出操作的分片的 ID 。
|
||||||
|
local shard = redis.call('get', skey) or '0'
|
||||||
|
|
||||||
|
-- 从分片对应的列表里面弹出一个元素。
|
||||||
|
local ret = redis.call(ARGV[1], KEYS[1]..shard)
|
||||||
|
-- 如果程序因为分片为空而没有得到弹出元素,
|
||||||
|
-- 又或者弹出操作使得分片变空了,那么对分片端点进行清理。
|
||||||
|
if not ret or redis.call('llen', KEYS[1]..shard) == '0' then
|
||||||
|
-- 获取不需要执行弹出操作的分片的 ID 。
|
||||||
|
local oshard = redis.call('get', okey) or '0'
|
||||||
|
|
||||||
|
-- 如果分片列表的两端相同,那么说明它已经不包含任何元素,操作执行完毕。
|
||||||
|
if shard == oshard then
|
||||||
|
return ret
|
||||||
|
end
|
||||||
|
|
||||||
|
-- 根据被弹出的元素来自列表的左端还是右端,
|
||||||
|
-- 决定应该增加还是减少分片的 ID 。
|
||||||
|
local cmd = ARGV[1] == 'lpop' and 'incr' or 'decr'
|
||||||
|
-- 调整分片的端点(endpoint)。
|
||||||
|
shard = redis.call(cmd, skey)
|
||||||
|
if not ret then
|
||||||
|
-- 如果之前没有取得弹出元素,那么尝试对新分片进行弹出。
|
||||||
|
ret = redis.call(ARGV[1], KEYS[1]..shard)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
return ret
|
||||||
|
''')
|
||||||
|
# <end id="sharded-list-pop-lua"/>
|
||||||
|
|
||||||
|
|
||||||
|
# 代码清单 11-16
|
||||||
|
# <start id="sharded-blocking-list-pop"/>
|
||||||
|
# 预先定义好的伪元素,读者也可以按自己的需要,
|
||||||
|
# 把这个伪元素替换成某个不可能出现在分片列表里面的值。
|
||||||
|
DUMMY = str(uuid.uuid4())
|
||||||
|
|
||||||
|
# 定义一个辅助函数,
|
||||||
|
# 这个函数会为左端阻塞弹出操作以及右端阻塞弹出操作执行实际的弹出动作。
|
||||||
|
def sharded_bpop_helper(conn, key, timeout, pop, bpop, endp, push):
|
||||||
|
# 准备好流水线对象和超时信息。
|
||||||
|
pipe = conn.pipeline(False)
|
||||||
|
timeout = max(timeout, 0) or 2**64
|
||||||
|
end = time.time() + timeout
|
||||||
|
|
||||||
|
while time.time() < end:
|
||||||
|
# 尝试执行一次非阻塞弹出,
|
||||||
|
# 如果这个操作成功取得了一个弹出值,
|
||||||
|
# 并且这个值并不是伪元素,那么返回这个值。
|
||||||
|
result = pop(conn, key)
|
||||||
|
if result not in (None, DUMMY):
|
||||||
|
return result
|
||||||
|
|
||||||
|
# 取得程序认为需要对其执行弹出操作的分片。
|
||||||
|
shard = conn.get(key + endp) or '0'
|
||||||
|
# 运行 Lua 脚本辅助程序,
|
||||||
|
# 它会在程序尝试从错误的分片里面弹出元素的时候,
|
||||||
|
# 将一个伪元素推入到那个分片里面。
|
||||||
|
sharded_bpop_helper_lua(pipe, [key + ':', key + endp],
|
||||||
|
# 因为程序不能在流水线里面执行一个可能会失败的 EVALSHA 调用,
|
||||||
|
# 所以这里需要使用 force_eval 参数,
|
||||||
|
# 确保程序调用的是 EVAL 命令而不是 EVALSHA 命令。
|
||||||
|
[shard, push, DUMMY], force_eval=True)
|
||||||
|
# 使用用户传入的 BLPOP 命令或 BRPOP 命令,对列表执行阻塞弹出操作。
|
||||||
|
getattr(pipe, bpop)(key + ':' + shard, 1)
|
||||||
|
|
||||||
|
# 如果命令返回了一个元素,那么程序执行完毕;否则的话,进行重试。
|
||||||
|
result = (pipe.execute()[-1] or [None])[-1]
|
||||||
|
if result not in (None, DUMMY):
|
||||||
|
return result
|
||||||
|
|
||||||
|
# 这个函数负责调用底层的阻塞弹出操作。
|
||||||
|
def sharded_blpop(conn, key, timeout=0):
|
||||||
|
return sharded_bpop_helper(
|
||||||
|
conn, key, timeout, sharded_lpop, 'blpop', ':first', 'lpush')
|
||||||
|
|
||||||
|
# 这个函数负责调用底层的阻塞弹出操作。
|
||||||
|
def sharded_brpop(conn, key, timeout=0):
|
||||||
|
return sharded_bpop_helper(
|
||||||
|
conn, key, timeout, sharded_rpop, 'brpop', ':last', 'rpush')
|
||||||
|
|
||||||
|
sharded_bpop_helper_lua = script_load('''
|
||||||
|
-- 找到程序想要对其执行弹出操作的列表端,并取得这个列表端对应的分片。
|
||||||
|
local shard = redis.call('get', KEYS[2]) or '0'
|
||||||
|
-- 如果程序接下来要从错误的分片里面弹出元素,那么将伪元素推入到那个分片里面。
|
||||||
|
if shard ~= ARGV[1] then
|
||||||
|
redis.call(ARGV[2], KEYS[1]..ARGV[1], ARGV[3])
|
||||||
|
end
|
||||||
|
''')
|
||||||
|
# <end id="sharded-blocking-list-pop"/>
|
||||||
|
|
||||||
|
class TestCh11(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.conn = redis.Redis(db=15)
|
||||||
|
self.conn.flushdb()
|
||||||
|
def tearDown(self):
|
||||||
|
self.conn.flushdb()
|
||||||
|
|
||||||
|
def test_load_script(self):
|
||||||
|
self.assertEquals(script_load("return 1")(self.conn), 1)
|
||||||
|
|
||||||
|
def test_create_status(self):
|
||||||
|
self.conn.hset('user:1', 'login', 'test')
|
||||||
|
sid = _create_status(self.conn, 1, 'hello')
|
||||||
|
sid2 = create_status(self.conn, 1, 'hello')
|
||||||
|
|
||||||
|
self.assertEquals(self.conn.hget('user:1', 'posts'), '2')
|
||||||
|
data = self.conn.hgetall('status:%s'%sid)
|
||||||
|
data2 = self.conn.hgetall('status:%s'%sid2)
|
||||||
|
data.pop('posted'); data.pop('id')
|
||||||
|
data2.pop('posted'); data2.pop('id')
|
||||||
|
self.assertEquals(data, data2)
|
||||||
|
|
||||||
|
def test_locking(self):
|
||||||
|
identifier = acquire_lock_with_timeout(self.conn, 'test', 1, 5)
|
||||||
|
self.assertTrue(identifier)
|
||||||
|
self.assertFalse(acquire_lock_with_timeout(self.conn, 'test', 1, 5))
|
||||||
|
release_lock(self.conn, 'test', identifier)
|
||||||
|
self.assertTrue(acquire_lock_with_timeout(self.conn, 'test', 1, 5))
|
||||||
|
|
||||||
|
def test_semaphore(self):
|
||||||
|
ids = []
|
||||||
|
for i in xrange(5):
|
||||||
|
ids.append(acquire_semaphore(self.conn, 'test', 5, timeout=1))
|
||||||
|
self.assertTrue(None not in ids)
|
||||||
|
self.assertFalse(acquire_semaphore(self.conn, 'test', 5, timeout=1))
|
||||||
|
time.sleep(.01)
|
||||||
|
id = acquire_semaphore(self.conn, 'test', 5, timeout=0)
|
||||||
|
self.assertTrue(id)
|
||||||
|
self.assertFalse(refresh_semaphore(self.conn, 'test', ids[-1]))
|
||||||
|
self.assertFalse(release_semaphore(self.conn, 'test', ids[-1]))
|
||||||
|
|
||||||
|
self.assertTrue(refresh_semaphore(self.conn, 'test', id))
|
||||||
|
self.assertTrue(release_semaphore(self.conn, 'test', id))
|
||||||
|
self.assertFalse(release_semaphore(self.conn, 'test', id))
|
||||||
|
|
||||||
|
def test_autocomplet_on_prefix(self):
|
||||||
|
for word in 'these are some words that we will be autocompleting on'.split():
|
||||||
|
self.conn.zadd('members:test', word, 0)
|
||||||
|
|
||||||
|
self.assertEquals(autocomplete_on_prefix(self.conn, 'test', 'th'), ['that', 'these'])
|
||||||
|
self.assertEquals(autocomplete_on_prefix(self.conn, 'test', 'w'), ['we', 'will', 'words'])
|
||||||
|
self.assertEquals(autocomplete_on_prefix(self.conn, 'test', 'autocompleting'), ['autocompleting'])
|
||||||
|
|
||||||
|
def test_marketplace(self):
|
||||||
|
self.conn.sadd('inventory:1', '1')
|
||||||
|
self.conn.hset('users:2', 'funds', 5)
|
||||||
|
self.assertFalse(list_item(self.conn, 2, 1, 10))
|
||||||
|
self.assertTrue(list_item(self.conn, 1, 1, 10))
|
||||||
|
self.assertFalse(purchase_item(self.conn, 2, '1', 1))
|
||||||
|
self.conn.zadd('market:', '1.1', 4)
|
||||||
|
self.assertTrue(purchase_item(self.conn, 2, '1', 1))
|
||||||
|
|
||||||
|
def test_sharded_list(self):
|
||||||
|
self.assertEquals(sharded_lpush(self.conn, 'lst', *range(100)), 100)
|
||||||
|
self.assertEquals(sharded_llen(self.conn, 'lst'), 100)
|
||||||
|
|
||||||
|
self.assertEquals(sharded_lpush(self.conn, 'lst2', *range(1000)), 1000)
|
||||||
|
self.assertEquals(sharded_llen(self.conn, 'lst2'), 1000)
|
||||||
|
self.assertEquals(sharded_rpush(self.conn, 'lst2', *range(-1, -1001, -1)), 1000)
|
||||||
|
self.assertEquals(sharded_llen(self.conn, 'lst2'), 2000)
|
||||||
|
|
||||||
|
self.assertEquals(sharded_lpop(self.conn, 'lst2'), '999')
|
||||||
|
self.assertEquals(sharded_rpop(self.conn, 'lst2'), '-1000')
|
||||||
|
|
||||||
|
for i in xrange(999):
|
||||||
|
r = sharded_lpop(self.conn, 'lst2')
|
||||||
|
self.assertEquals(r, '0')
|
||||||
|
|
||||||
|
results = []
|
||||||
|
def pop_some(conn, fcn, lst, count, timeout):
|
||||||
|
for i in xrange(count):
|
||||||
|
results.append(sharded_blpop(conn, lst, timeout))
|
||||||
|
|
||||||
|
t = threading.Thread(target=pop_some, args=(self.conn, sharded_blpop, 'lst3', 10, 1))
|
||||||
|
t.setDaemon(1)
|
||||||
|
t.start()
|
||||||
|
|
||||||
|
self.assertEquals(sharded_rpush(self.conn, 'lst3', *range(4)), 4)
|
||||||
|
time.sleep(2)
|
||||||
|
self.assertEquals(sharded_rpush(self.conn, 'lst3', *range(4, 8)), 4)
|
||||||
|
time.sleep(2)
|
||||||
|
self.assertEquals(results, ['0', '1', '2', '3', None, '4', '5', '6', '7', None])
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
|
@ -0,0 +1,119 @@
|
||||||
|
|
||||||
|
'''
|
||||||
|
# <start id="linux-redis-install"/>
|
||||||
|
~:$ wget -q http://redis.googlecode.com/files/redis-2.6.2.tar.gz # 从http://redis.io/download下载最新版的Redis。本书写作时Redis的最新版为2.6版本。
|
||||||
|
~:$ tar -xzf redis-2.6.2.tar.gz # 解压源码。
|
||||||
|
~:$ cd redis-2.6.2/
|
||||||
|
~/redis-2.6.2:$ make # 编译Redis。
|
||||||
|
cd src && make all # 注意观察编译消息,
|
||||||
|
[trimmed] # 这里不应该看到错误。
|
||||||
|
make[1]: Leaving directory `~/redis-2.6.2/src' #
|
||||||
|
~/redis-2.6.2:$ sudo make install # 安装Redis。
|
||||||
|
cd src && make install # 注意观察安装消息,
|
||||||
|
[trimmed] # 这里不应该看到错误。
|
||||||
|
make[1]: Leaving directory `~/redis-2.6.2/src' #
|
||||||
|
~/redis-2.6.2:$ redis-server redis.conf # 启动Redis服务器。
|
||||||
|
[13792] 26 Aug 17:53:16.523 * Max number of open files set to 10032 # 通过日志确认Redis已经顺利启动。
|
||||||
|
[trimmed] #
|
||||||
|
[13792] 26 Aug 17:53:16.529 * The server is now ready to accept #
|
||||||
|
connections on port 6379 #
|
||||||
|
# <end id="linux-redis-install"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
'''
|
||||||
|
# <start id="linux-python-install"/>
|
||||||
|
~:$ wget -q http://peak.telecommunity.com/dist/ez_setup.py # 下载ez_setup模块
|
||||||
|
~:$ sudo python ez_setup.py # 通过运行ez_setup模块来下载并安装 setuptools。
|
||||||
|
Downloading http://pypi.python.org/packages/2.7/s/setuptools/... #
|
||||||
|
[trimmed] #
|
||||||
|
Finished processing dependencies for setuptools==0.6c11 #
|
||||||
|
~:$ sudo python -m easy_install redis hiredis # 通过运行setuptools的easy_install包来安装redis包以及hiredis包。
|
||||||
|
Searching for redis # redis包为Python提供了一个连接至Redis的接口。
|
||||||
|
[trimmed] #
|
||||||
|
Finished processing dependencies for redis #
|
||||||
|
Searching for hiredis # hiredis包是一个C库,它可以提高Python的Redis客户端的速度。
|
||||||
|
[trimmed] #
|
||||||
|
Finished processing dependencies for hiredis #
|
||||||
|
~:$
|
||||||
|
# <end id="linux-python-install"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
'''
|
||||||
|
# <start id="mac-redis-install"/>
|
||||||
|
~:$ curl -O http://rudix.googlecode.com/hg/Ports/rudix/rudix.py # 下载用于安装Rudix的引导脚本。
|
||||||
|
[trimmed]
|
||||||
|
~:$ sudo python rudix.py install rudix # 命令Rudix安装自身。
|
||||||
|
Downloading rudix.googlecode.com/files/rudix-12.6-0.pkg # Rudix下载并安装它自身。
|
||||||
|
[trimmed] #
|
||||||
|
installer: The install was successful. #
|
||||||
|
All done #
|
||||||
|
~:$ sudo rudix install redis # 命令Rudix安装Redis。
|
||||||
|
Downloading rudix.googlecode.com/files/redis-2.4.15-0.pkg # Rudix下载并安装它自身。
|
||||||
|
[trimmed] #
|
||||||
|
installer: The install was successful. #
|
||||||
|
All done #
|
||||||
|
~:$ redis-server # 启动Redis服务器。
|
||||||
|
[699] 13 Jul 21:18:09 # Warning: no config file specified, using the# Redis使用默认配置启动并运行。
|
||||||
|
default config. In order to specify a config file use 'redis-server #
|
||||||
|
/path/to/redis.conf' #
|
||||||
|
[699] 13 Jul 21:18:09 * Server started, Redis version 2.4.15 #
|
||||||
|
[699] 13 Jul 21:18:09 * The server is now ready to accept connections#
|
||||||
|
on port 6379 #
|
||||||
|
[699] 13 Jul 21:18:09 - 0 clients connected (0 slaves), 922304 bytes#
|
||||||
|
in use #
|
||||||
|
# <end id="mac-redis-install"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
'''
|
||||||
|
# <start id="mac-python-install"/>
|
||||||
|
~:$ sudo rudix install pip # 通过Rudix安装名为pip的Python包管理器。
|
||||||
|
Downloading rudix.googlecode.com/files/pip-1.1-1.pkg # Rudix正在安装pip。
|
||||||
|
[trimmed] #
|
||||||
|
installer: The install was successful. #
|
||||||
|
All done #
|
||||||
|
~:$ sudo pip install redis # 现在可以使用pip来为Python安装Redis客户端库了。
|
||||||
|
Downloading/unpacking redis # Pip正在为Python安装Redis客户端库。
|
||||||
|
[trimmed] #
|
||||||
|
Cleaning up... #
|
||||||
|
~:$
|
||||||
|
# <end id="mac-python-install"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
'''
|
||||||
|
# <start id="windows-python-install"/>
|
||||||
|
C:\Users\josiah>c:\python27\python # 以交互模式启动Python。
|
||||||
|
Python 2.7.3 (default, Apr 10 2012, 23:31:26) [MSC v.1500 32 bit...
|
||||||
|
Type "help", "copyright", "credits" or "license" for more information.
|
||||||
|
>>> from urllib import urlopen # 从urllib模块里面载入urlopen工厂函数。
|
||||||
|
>>> data = urlopen('http://peak.telecommunity.com/dist/ez_setup.py') # 获取一个能够帮助你安装其他包的模块。
|
||||||
|
>>> open('ez_setup.py', 'wb').write(data.read()) # 将下载后的模块写入磁盘文件里。
|
||||||
|
>>> exit() # 通过执行内置的exit()函数来退出Python解释器。
|
||||||
|
|
||||||
|
C:\Users\josiah>c:\python27\python ez_setup.py # 运行ez_setup辅助模块。
|
||||||
|
Downloading http://pypi.python.org/packages/2.7/s/setuptools/... # ez_setup辅助模块会下载并安装setuptools,
|
||||||
|
[trimmed] # 而setuptools可以方便地下载并安装Redis客户端库。
|
||||||
|
Finished processing dependencies for setuptools==0.6c11 #
|
||||||
|
|
||||||
|
C:\Users\josiah>c:\python27\python -m easy_install redis # 使用setuptools的easy_install模块来下载并安装Redis。
|
||||||
|
Searching for redis #
|
||||||
|
[trimmed] #
|
||||||
|
Finished processing dependencies for redis #
|
||||||
|
C:\Users\josiah>
|
||||||
|
# <end id="windows-python-install"/>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
# <start id="hello-redis-appendix"/>
|
||||||
|
~:$ python # 启动Python,并使用它来验证Redis的各项功能是否正常。
|
||||||
|
Python 2.6.5 (r265:79063, Apr 16 2010, 13:09:56)
|
||||||
|
[GCC 4.4.3] on linux2
|
||||||
|
Type "help", "copyright", "credits" or "license" for more information.
|
||||||
|
>>> import redis # 导入Redis客户端库,如果系统已经安装了hiredis这个C加速库的话,那么Redis客户端库会自动使用hiredis。
|
||||||
|
>>> conn = redis.Redis() # 创建一个指向Redis的连接。
|
||||||
|
>>> conn.set('hello', 'world') # 设置一个值,
|
||||||
|
True # 并通过返回值确认设置操作是否执行成功。
|
||||||
|
>>> conn.get('hello') # 获取刚刚设置的值。
|
||||||
|
'world' #
|
||||||
|
# <end id="hello-redis-appendix"/>
|
||||||
|
'''
|
Loading…
Reference in New Issue