1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_LIST_BL_H
3 #define _LINUX_LIST_BL_H
4 
5 #include <linux/list.h>
6 #include <linux/bit_spinlock.h>
7 
8 /*
9  * Special version of lists, where head of the list has a lock in the lowest
10  * bit. This is useful for scalable hash tables without increasing memory
11  * footprint overhead.
12  *
13  * For modification operations, the 0 bit of hlist_bl_head->first
14  * pointer must be set.
15  *
16  * With some small modifications, this can easily be adapted to store several
17  * arbitrary bits (not just a single lock bit), if the need arises to store
18  * some fast and compact auxiliary data.
19  */
20 
21 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
22 #define LIST_BL_LOCKMASK	1UL
23 #else
24 #define LIST_BL_LOCKMASK	0UL
25 #endif
26 
27 #ifdef CONFIG_DEBUG_LIST
28 #define LIST_BL_BUG_ON(x) BUG_ON(x)
29 #else
30 #define LIST_BL_BUG_ON(x)
31 #endif
32 
33 
34 struct hlist_bl_head {
35 	struct hlist_bl_node *first;
36 };
37 
38 struct hlist_bl_node {
39 	struct hlist_bl_node *next, **pprev;
40 };
41 #define INIT_HLIST_BL_HEAD(ptr) \
42 	((ptr)->first = NULL)
43 
INIT_HLIST_BL_NODE(struct hlist_bl_node * h)44 static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
45 {
46 	h->next = NULL;
47 	h->pprev = NULL;
48 }
49 
50 #define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member)
51 
hlist_bl_unhashed(const struct hlist_bl_node * h)52 static inline bool  hlist_bl_unhashed(const struct hlist_bl_node *h)
53 {
54 	return !h->pprev;
55 }
56 
hlist_bl_first(struct hlist_bl_head * h)57 static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h)
58 {
59 	return (struct hlist_bl_node *)
60 		((unsigned long)h->first & ~LIST_BL_LOCKMASK);
61 }
62 
hlist_bl_set_first(struct hlist_bl_head * h,struct hlist_bl_node * n)63 static inline void hlist_bl_set_first(struct hlist_bl_head *h,
64 					struct hlist_bl_node *n)
65 {
66 	LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
67 	LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
68 							LIST_BL_LOCKMASK);
69 	h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK);
70 }
71 
hlist_bl_empty(const struct hlist_bl_head * h)72 static inline bool hlist_bl_empty(const struct hlist_bl_head *h)
73 {
74 	return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK);
75 }
76 
hlist_bl_add_head(struct hlist_bl_node * n,struct hlist_bl_head * h)77 static inline void hlist_bl_add_head(struct hlist_bl_node *n,
78 					struct hlist_bl_head *h)
79 {
80 	struct hlist_bl_node *first = hlist_bl_first(h);
81 
82 	n->next = first;
83 	if (first)
84 		first->pprev = &n->next;
85 	n->pprev = &h->first;
86 	hlist_bl_set_first(h, n);
87 }
88 
__hlist_bl_del(struct hlist_bl_node * n)89 static inline void __hlist_bl_del(struct hlist_bl_node *n)
90 {
91 	struct hlist_bl_node *next = n->next;
92 	struct hlist_bl_node **pprev = n->pprev;
93 
94 	LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
95 
96 	/* pprev may be `first`, so be careful not to lose the lock bit */
97 	WRITE_ONCE(*pprev,
98 		   (struct hlist_bl_node *)
99 			((unsigned long)next |
100 			 ((unsigned long)*pprev & LIST_BL_LOCKMASK)));
101 	if (next)
102 		next->pprev = pprev;
103 }
104 
hlist_bl_del(struct hlist_bl_node * n)105 static inline void hlist_bl_del(struct hlist_bl_node *n)
106 {
107 	__hlist_bl_del(n);
108 	n->next = LIST_POISON1;
109 	n->pprev = LIST_POISON2;
110 }
111 
hlist_bl_del_init(struct hlist_bl_node * n)112 static inline void hlist_bl_del_init(struct hlist_bl_node *n)
113 {
114 	if (!hlist_bl_unhashed(n)) {
115 		__hlist_bl_del(n);
116 		INIT_HLIST_BL_NODE(n);
117 	}
118 }
119 
hlist_bl_lock(struct hlist_bl_head * b)120 static inline void hlist_bl_lock(struct hlist_bl_head *b)
121 {
122 	bit_spin_lock(0, (unsigned long *)b);
123 }
124 
hlist_bl_unlock(struct hlist_bl_head * b)125 static inline void hlist_bl_unlock(struct hlist_bl_head *b)
126 {
127 	__bit_spin_unlock(0, (unsigned long *)b);
128 }
129 
hlist_bl_is_locked(struct hlist_bl_head * b)130 static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
131 {
132 	return bit_spin_is_locked(0, (unsigned long *)b);
133 }
134 
135 /**
136  * hlist_bl_for_each_entry	- iterate over list of given type
137  * @tpos:	the type * to use as a loop cursor.
138  * @pos:	the &struct hlist_node to use as a loop cursor.
139  * @head:	the head for your list.
140  * @member:	the name of the hlist_node within the struct.
141  *
142  */
143 #define hlist_bl_for_each_entry(tpos, pos, head, member)		\
144 	for (pos = hlist_bl_first(head);				\
145 	     pos &&							\
146 		({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
147 	     pos = pos->next)
148 
149 /**
150  * hlist_bl_for_each_entry_safe - iterate over list of given type safe against removal of list entry
151  * @tpos:	the type * to use as a loop cursor.
152  * @pos:	the &struct hlist_node to use as a loop cursor.
153  * @n:		another &struct hlist_node to use as temporary storage
154  * @head:	the head for your list.
155  * @member:	the name of the hlist_node within the struct.
156  */
157 #define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member)	 \
158 	for (pos = hlist_bl_first(head);				 \
159 	     pos && ({ n = pos->next; 1; }) && 				 \
160 		({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
161 	     pos = n)
162 
163 #endif
164