用链表存储用户发送的每一个推特,用堆获服务器托管网取最先的10条动态
class Twitter {
Map> followMap;
//规定最新的放到最后
Map postMap;
//优先队列(堆)
PriorityQueue priorityQueue;
int timeStamp = 0;
int limit = 10;
public Twitter() {
followMap = new HashMap();
postMap = new HashMap();
//按照每一个推特发送的时间戳由大到小排布
priorityQueue = new PriorityQueue((t1,t2) -> t2.timeStamp - t1.timeStamp);
}
//userId发送推特
public void postTweet(int userId, int tweetId) {
//首先根据postMap来获取userId对应发送到文章
Tweet tweet = postMap.get(userId);
//生成新的tweet
Tweet newTweet = new Tweet(tweetId, timeStamp++, tweet);
postMap.put(userId,newTweet);
}
//根据userId获得自己和关注用户的10条推特,按时间顺序由近到远排序
public List getNewsFeed(int userId) {
//因为每一个用户都有自己的优先队列,所以先清空优先队列
priorityQueue.clear();
//将自己和关注的用户发送的最新的推特id先放入到优先队列
if (postMap.containsKey(userId))
priorityQueue.offer(postMap.get(userId));
Set follows = followMap.get(userId);
if (follows != null){
for (Integer follow : follows) {
if (postMap.containsKey(follow))
priorityQueue.offer(postMap.get(follow));
}
}
//现在用户和所有关注的推特都已经放入到优先队列,开始获取前10条
int count = 0;
ArrayList result = new ArrayList();
while (!priorityQueue.isEmpty() && count follows = followMap.getOrDefault(followerId, new HashSet());
follows.add(followeeId);
followMap.put(followerId,follows);
}
//取关
public void unfollow(int followerId, int followeeId) {
// 被关注人不能是自己
if (followeeId == followerId) {
return;
}
Set follows = followMap.getOrDefault(followerId, new HashSet());
follows.remove(followeeId);
followMap.put(followerId,follows);
}
}
class Tweet{
int id;
int timeStamp;
Tweet next;
public Tweet(int id, int timeStamp) {
this.id = id;
this.timeStamp = timeStamp;
}
public Tweet(int id, 服务器托管网int timeStamp, Tweet next) {
this.id = id;
this.timeStamp = timeStamp;
this.next = next;
}
}
/**
* Your Twitter object will be instantiated and called as such:
* Twitter obj = new Twitter();
* obj.postTweet(userId,tweetId);
* List param_2 = obj.getNewsFeed(userId);
* obj.follow(followerId,followeeId);
* obj.unfollow(followerId,followeeId);
*/
服务器托管,北京服务器托管,服务器租用 http://www.fwqtg.net
作者 | 把酒问青天 导读 经过近几年的技术演进,语义模型在百度搜索场景中被广泛地应用,消耗了大量的GPU资源,模型压缩技术也随之得到大量研究和实践。通过兼顾推理性能、业务效果和迭代效率的优化目标,我们成功地将INT8量化技术大面积地应用到了搜索场景中,极大地…