1 #include <linux/spinlock.h>
2 #include <linux/slab.h>
3 #include <linux/list.h>
4 #include <linux/list_bl.h>
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/workqueue.h>
8 #include <linux/mbcache.h>
9 
10 /*
11  * Mbcache is a simple key-value store. Keys need not be unique, however
12  * key-value pairs are expected to be unique (we use this fact in
13  * mb_cache_entry_delete_or_get()).
14  *
15  * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
16  * Ext4 also uses it for deduplication of xattr values stored in inodes.
17  * They use hash of data as a key and provide a value that may represent a
18  * block or inode number. That's why keys need not be unique (hash of different
19  * data may be the same). However user provided value always uniquely
20  * identifies a cache entry.
21  *
22  * We provide functions for creation and removal of entries, search by key,
23  * and a special "delete entry with given key-value pair" operation. Fixed
24  * size hash table is used for fast key lookups.
25  */
26 
27 struct mb_cache {
28 	/* Hash table of entries */
29 	struct hlist_bl_head	*c_hash;
30 	/* log2 of hash table size */
31 	int			c_bucket_bits;
32 	/* Maximum entries in cache to avoid degrading hash too much */
33 	unsigned long		c_max_entries;
34 	/* Protects c_list, c_entry_count */
35 	spinlock_t		c_list_lock;
36 	struct list_head	c_list;
37 	/* Number of entries in cache */
38 	unsigned long		c_entry_count;
39 	struct shrinker		c_shrink;
40 	/* Work for shrinking when the cache has too many entries */
41 	struct work_struct	c_shrink_work;
42 };
43 
44 static struct kmem_cache *mb_entry_cache;
45 
46 static unsigned long mb_cache_shrink(struct mb_cache *cache,
47 				     unsigned long nr_to_scan);
48 
mb_cache_entry_head(struct mb_cache * cache,u32 key)49 static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
50 							u32 key)
51 {
52 	return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
53 }
54 
55 /*
56  * Number of entries to reclaim synchronously when there are too many entries
57  * in cache
58  */
59 #define SYNC_SHRINK_BATCH 64
60 
61 /*
62  * mb_cache_entry_create - create entry in cache
63  * @cache - cache where the entry should be created
64  * @mask - gfp mask with which the entry should be allocated
65  * @key - key of the entry
66  * @value - value of the entry
67  * @reusable - is the entry reusable by others?
68  *
69  * Creates entry in @cache with key @key and value @value. The function returns
70  * -EBUSY if entry with the same key and value already exists in cache.
71  * Otherwise 0 is returned.
72  */
mb_cache_entry_create(struct mb_cache * cache,gfp_t mask,u32 key,u64 value,bool reusable)73 int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
74 			  u64 value, bool reusable)
75 {
76 	struct mb_cache_entry *entry, *dup;
77 	struct hlist_bl_node *dup_node;
78 	struct hlist_bl_head *head;
79 
80 	/* Schedule background reclaim if there are too many entries */
81 	if (cache->c_entry_count >= cache->c_max_entries)
82 		schedule_work(&cache->c_shrink_work);
83 	/* Do some sync reclaim if background reclaim cannot keep up */
84 	if (cache->c_entry_count >= 2*cache->c_max_entries)
85 		mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
86 
87 	entry = kmem_cache_alloc(mb_entry_cache, mask);
88 	if (!entry)
89 		return -ENOMEM;
90 
91 	INIT_LIST_HEAD(&entry->e_list);
92 	/*
93 	 * We create entry with two references. One reference is kept by the
94 	 * hash table, the other reference is used to protect us from
95 	 * mb_cache_entry_delete_or_get() until the entry is fully setup. This
96 	 * avoids nesting of cache->c_list_lock into hash table bit locks which
97 	 * is problematic for RT.
98 	 */
99 	atomic_set(&entry->e_refcnt, 2);
100 	entry->e_key = key;
101 	entry->e_value = value;
102 	entry->e_flags = 0;
103 	if (reusable)
104 		set_bit(MBE_REUSABLE_B, &entry->e_flags);
105 	head = mb_cache_entry_head(cache, key);
106 	hlist_bl_lock(head);
107 	hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
108 		if (dup->e_key == key && dup->e_value == value) {
109 			hlist_bl_unlock(head);
110 			kmem_cache_free(mb_entry_cache, entry);
111 			return -EBUSY;
112 		}
113 	}
114 	hlist_bl_add_head(&entry->e_hash_list, head);
115 	hlist_bl_unlock(head);
116 	spin_lock(&cache->c_list_lock);
117 	list_add_tail(&entry->e_list, &cache->c_list);
118 	cache->c_entry_count++;
119 	spin_unlock(&cache->c_list_lock);
120 	mb_cache_entry_put(cache, entry);
121 
122 	return 0;
123 }
124 EXPORT_SYMBOL(mb_cache_entry_create);
125 
__mb_cache_entry_free(struct mb_cache * cache,struct mb_cache_entry * entry)126 void __mb_cache_entry_free(struct mb_cache *cache, struct mb_cache_entry *entry)
127 {
128 	struct hlist_bl_head *head;
129 
130 	head = mb_cache_entry_head(cache, entry->e_key);
131 	hlist_bl_lock(head);
132 	hlist_bl_del(&entry->e_hash_list);
133 	hlist_bl_unlock(head);
134 	kmem_cache_free(mb_entry_cache, entry);
135 }
136 EXPORT_SYMBOL(__mb_cache_entry_free);
137 
138 /*
139  * mb_cache_entry_wait_unused - wait to be the last user of the entry
140  *
141  * @entry - entry to work on
142  *
143  * Wait to be the last user of the entry.
144  */
mb_cache_entry_wait_unused(struct mb_cache_entry * entry)145 void mb_cache_entry_wait_unused(struct mb_cache_entry *entry)
146 {
147 	wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 2);
148 }
149 EXPORT_SYMBOL(mb_cache_entry_wait_unused);
150 
__entry_find(struct mb_cache * cache,struct mb_cache_entry * entry,u32 key)151 static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
152 					   struct mb_cache_entry *entry,
153 					   u32 key)
154 {
155 	struct mb_cache_entry *old_entry = entry;
156 	struct hlist_bl_node *node;
157 	struct hlist_bl_head *head;
158 
159 	head = mb_cache_entry_head(cache, key);
160 	hlist_bl_lock(head);
161 	if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
162 		node = entry->e_hash_list.next;
163 	else
164 		node = hlist_bl_first(head);
165 	while (node) {
166 		entry = hlist_bl_entry(node, struct mb_cache_entry,
167 				       e_hash_list);
168 		if (entry->e_key == key &&
169 		    test_bit(MBE_REUSABLE_B, &entry->e_flags) &&
170 		    atomic_inc_not_zero(&entry->e_refcnt))
171 			goto out;
172 		node = node->next;
173 	}
174 	entry = NULL;
175 out:
176 	hlist_bl_unlock(head);
177 	if (old_entry)
178 		mb_cache_entry_put(cache, old_entry);
179 
180 	return entry;
181 }
182 
183 /*
184  * mb_cache_entry_find_first - find the first reusable entry with the given key
185  * @cache: cache where we should search
186  * @key: key to look for
187  *
188  * Search in @cache for a reusable entry with key @key. Grabs reference to the
189  * first reusable entry found and returns the entry.
190  */
mb_cache_entry_find_first(struct mb_cache * cache,u32 key)191 struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
192 						 u32 key)
193 {
194 	return __entry_find(cache, NULL, key);
195 }
196 EXPORT_SYMBOL(mb_cache_entry_find_first);
197 
198 /*
199  * mb_cache_entry_find_next - find next reusable entry with the same key
200  * @cache: cache where we should search
201  * @entry: entry to start search from
202  *
203  * Finds next reusable entry in the hash chain which has the same key as @entry.
204  * If @entry is unhashed (which can happen when deletion of entry races with the
205  * search), finds the first reusable entry in the hash chain. The function drops
206  * reference to @entry and returns with a reference to the found entry.
207  */
mb_cache_entry_find_next(struct mb_cache * cache,struct mb_cache_entry * entry)208 struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
209 						struct mb_cache_entry *entry)
210 {
211 	return __entry_find(cache, entry, entry->e_key);
212 }
213 EXPORT_SYMBOL(mb_cache_entry_find_next);
214 
215 /*
216  * mb_cache_entry_get - get a cache entry by value (and key)
217  * @cache - cache we work with
218  * @key - key
219  * @value - value
220  */
mb_cache_entry_get(struct mb_cache * cache,u32 key,u64 value)221 struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
222 					  u64 value)
223 {
224 	struct hlist_bl_node *node;
225 	struct hlist_bl_head *head;
226 	struct mb_cache_entry *entry;
227 
228 	head = mb_cache_entry_head(cache, key);
229 	hlist_bl_lock(head);
230 	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
231 		if (entry->e_key == key && entry->e_value == value &&
232 		    atomic_inc_not_zero(&entry->e_refcnt))
233 			goto out;
234 	}
235 	entry = NULL;
236 out:
237 	hlist_bl_unlock(head);
238 	return entry;
239 }
240 EXPORT_SYMBOL(mb_cache_entry_get);
241 
242 /* mb_cache_entry_delete - try to remove a cache entry
243  * @cache - cache we work with
244  * @key - key
245  * @value - value
246  *
247  * Remove entry from cache @cache with key @key and value @value.
248  */
mb_cache_entry_delete(struct mb_cache * cache,u32 key,u64 value)249 void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
250 {
251 	struct hlist_bl_node *node;
252 	struct hlist_bl_head *head;
253 	struct mb_cache_entry *entry;
254 
255 	head = mb_cache_entry_head(cache, key);
256 	hlist_bl_lock(head);
257 	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
258 		if (entry->e_key == key && entry->e_value == value) {
259 			/* We keep hash list reference to keep entry alive */
260 			hlist_bl_del_init(&entry->e_hash_list);
261 			hlist_bl_unlock(head);
262 			spin_lock(&cache->c_list_lock);
263 			if (!list_empty(&entry->e_list)) {
264 				list_del_init(&entry->e_list);
265 				if (!WARN_ONCE(cache->c_entry_count == 0,
266 		"mbcache: attempt to decrement c_entry_count past zero"))
267 					cache->c_entry_count--;
268 				atomic_dec(&entry->e_refcnt);
269 			}
270 			spin_unlock(&cache->c_list_lock);
271 			mb_cache_entry_put(cache, entry);
272 			return;
273 		}
274 	}
275 	hlist_bl_unlock(head);
276 }
277 EXPORT_SYMBOL(mb_cache_entry_delete);
278 
279 /* mb_cache_entry_delete_or_get - remove a cache entry if it has no users
280  * @cache - cache we work with
281  * @key - key
282  * @value - value
283  *
284  * Remove entry from cache @cache with key @key and value @value. The removal
285  * happens only if the entry is unused. The function returns NULL in case the
286  * entry was successfully removed or there's no entry in cache. Otherwise the
287  * function grabs reference of the entry that we failed to delete because it
288  * still has users and return it.
289  */
mb_cache_entry_delete_or_get(struct mb_cache * cache,u32 key,u64 value)290 struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
291 						    u32 key, u64 value)
292 {
293 	struct mb_cache_entry *entry;
294 
295 	entry = mb_cache_entry_get(cache, key, value);
296 	if (!entry)
297 		return NULL;
298 
299 	/*
300 	 * Drop the ref we got from mb_cache_entry_get() and the initial hash
301 	 * ref if we are the last user
302 	 */
303 	if (atomic_cmpxchg(&entry->e_refcnt, 2, 0) != 2)
304 		return entry;
305 
306 	spin_lock(&cache->c_list_lock);
307 	if (!list_empty(&entry->e_list))
308 		list_del_init(&entry->e_list);
309 	cache->c_entry_count--;
310 	spin_unlock(&cache->c_list_lock);
311 	__mb_cache_entry_free(cache, entry);
312 	return NULL;
313 }
314 EXPORT_SYMBOL(mb_cache_entry_delete_or_get);
315 
316 /* mb_cache_entry_touch - cache entry got used
317  * @cache - cache the entry belongs to
318  * @entry - entry that got used
319  *
320  * Marks entry as used to give hit higher chances of surviving in cache.
321  */
mb_cache_entry_touch(struct mb_cache * cache,struct mb_cache_entry * entry)322 void mb_cache_entry_touch(struct mb_cache *cache,
323 			  struct mb_cache_entry *entry)
324 {
325 	set_bit(MBE_REFERENCED_B, &entry->e_flags);
326 }
327 EXPORT_SYMBOL(mb_cache_entry_touch);
328 
mb_cache_count(struct shrinker * shrink,struct shrink_control * sc)329 static unsigned long mb_cache_count(struct shrinker *shrink,
330 				    struct shrink_control *sc)
331 {
332 	struct mb_cache *cache = container_of(shrink, struct mb_cache,
333 					      c_shrink);
334 
335 	return cache->c_entry_count;
336 }
337 
338 /* Shrink number of entries in cache */
mb_cache_shrink(struct mb_cache * cache,unsigned long nr_to_scan)339 static unsigned long mb_cache_shrink(struct mb_cache *cache,
340 				     unsigned long nr_to_scan)
341 {
342 	struct mb_cache_entry *entry;
343 	unsigned long shrunk = 0;
344 
345 	spin_lock(&cache->c_list_lock);
346 	while (nr_to_scan-- && !list_empty(&cache->c_list)) {
347 		entry = list_first_entry(&cache->c_list,
348 					 struct mb_cache_entry, e_list);
349 		/* Drop initial hash reference if there is no user */
350 		if (test_bit(MBE_REFERENCED_B, &entry->e_flags) ||
351 		    atomic_cmpxchg(&entry->e_refcnt, 1, 0) != 1) {
352 			clear_bit(MBE_REFERENCED_B, &entry->e_flags);
353 			list_move_tail(&entry->e_list, &cache->c_list);
354 			continue;
355 		}
356 		list_del_init(&entry->e_list);
357 		cache->c_entry_count--;
358 		spin_unlock(&cache->c_list_lock);
359 		__mb_cache_entry_free(cache, entry);
360 		shrunk++;
361 		cond_resched();
362 		spin_lock(&cache->c_list_lock);
363 	}
364 	spin_unlock(&cache->c_list_lock);
365 
366 	return shrunk;
367 }
368 
mb_cache_scan(struct shrinker * shrink,struct shrink_control * sc)369 static unsigned long mb_cache_scan(struct shrinker *shrink,
370 				   struct shrink_control *sc)
371 {
372 	struct mb_cache *cache = container_of(shrink, struct mb_cache,
373 					      c_shrink);
374 	return mb_cache_shrink(cache, sc->nr_to_scan);
375 }
376 
377 /* We shrink 1/X of the cache when we have too many entries in it */
378 #define SHRINK_DIVISOR 16
379 
mb_cache_shrink_worker(struct work_struct * work)380 static void mb_cache_shrink_worker(struct work_struct *work)
381 {
382 	struct mb_cache *cache = container_of(work, struct mb_cache,
383 					      c_shrink_work);
384 	mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
385 }
386 
387 /*
388  * mb_cache_create - create cache
389  * @bucket_bits: log2 of the hash table size
390  *
391  * Create cache for keys with 2^bucket_bits hash entries.
392  */
mb_cache_create(int bucket_bits)393 struct mb_cache *mb_cache_create(int bucket_bits)
394 {
395 	struct mb_cache *cache;
396 	unsigned long bucket_count = 1UL << bucket_bits;
397 	unsigned long i;
398 
399 	cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
400 	if (!cache)
401 		goto err_out;
402 	cache->c_bucket_bits = bucket_bits;
403 	cache->c_max_entries = bucket_count << 4;
404 	INIT_LIST_HEAD(&cache->c_list);
405 	spin_lock_init(&cache->c_list_lock);
406 	cache->c_hash = kmalloc_array(bucket_count,
407 				      sizeof(struct hlist_bl_head),
408 				      GFP_KERNEL);
409 	if (!cache->c_hash) {
410 		kfree(cache);
411 		goto err_out;
412 	}
413 	for (i = 0; i < bucket_count; i++)
414 		INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
415 
416 	cache->c_shrink.count_objects = mb_cache_count;
417 	cache->c_shrink.scan_objects = mb_cache_scan;
418 	cache->c_shrink.seeks = DEFAULT_SEEKS;
419 	if (register_shrinker(&cache->c_shrink)) {
420 		kfree(cache->c_hash);
421 		kfree(cache);
422 		goto err_out;
423 	}
424 
425 	INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
426 
427 	return cache;
428 
429 err_out:
430 	return NULL;
431 }
432 EXPORT_SYMBOL(mb_cache_create);
433 
434 /*
435  * mb_cache_destroy - destroy cache
436  * @cache: the cache to destroy
437  *
438  * Free all entries in cache and cache itself. Caller must make sure nobody
439  * (except shrinker) can reach @cache when calling this.
440  */
mb_cache_destroy(struct mb_cache * cache)441 void mb_cache_destroy(struct mb_cache *cache)
442 {
443 	struct mb_cache_entry *entry, *next;
444 
445 	unregister_shrinker(&cache->c_shrink);
446 
447 	/*
448 	 * We don't bother with any locking. Cache must not be used at this
449 	 * point.
450 	 */
451 	list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
452 		list_del(&entry->e_list);
453 		WARN_ON(atomic_read(&entry->e_refcnt) != 1);
454 		mb_cache_entry_put(cache, entry);
455 	}
456 	kfree(cache->c_hash);
457 	kfree(cache);
458 }
459 EXPORT_SYMBOL(mb_cache_destroy);
460 
mbcache_init(void)461 static int __init mbcache_init(void)
462 {
463 	mb_entry_cache = kmem_cache_create("mbcache",
464 				sizeof(struct mb_cache_entry), 0,
465 				SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
466 	if (!mb_entry_cache)
467 		return -ENOMEM;
468 	return 0;
469 }
470 
mbcache_exit(void)471 static void __exit mbcache_exit(void)
472 {
473 	kmem_cache_destroy(mb_entry_cache);
474 }
475 
476 module_init(mbcache_init)
477 module_exit(mbcache_exit)
478 
479 MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
480 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
481 MODULE_LICENSE("GPL");
482