xref: /wlan-driver/qcacld-3.0/os_if/sync/src/osif_vdev_sync.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2018-2019, 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "linux/device.h"
21 #include "linux/netdevice.h"
22 #include "__osif_psoc_sync.h"
23 #include "__osif_vdev_sync.h"
24 #include "osif_vdev_sync.h"
25 #include "qdf_lock.h"
26 #include "qdf_status.h"
27 #include "qdf_types.h"
28 #include <qdf_trace.h>
29 #include <wlan_cfg80211.h>
30 
31 static struct osif_vdev_sync __osif_vdev_sync_arr[WLAN_MAX_VDEVS +
32 						  WLAN_MAX_ML_VDEVS];
33 static qdf_spinlock_t __osif_vdev_sync_lock;
34 
35 #define osif_vdev_sync_lock_create() qdf_spinlock_create(&__osif_vdev_sync_lock)
36 #define osif_vdev_sync_lock_destroy() \
37 	qdf_spinlock_destroy(&__osif_vdev_sync_lock)
38 #define osif_vdev_sync_lock() qdf_spin_lock_bh(&__osif_vdev_sync_lock)
39 #define osif_vdev_sync_unlock() qdf_spin_unlock_bh(&__osif_vdev_sync_lock)
40 
osif_vdev_sync_lookup(struct net_device * net_dev)41 static struct osif_vdev_sync *osif_vdev_sync_lookup(struct net_device *net_dev)
42 {
43 	int i;
44 
45 	for (i = 0; i < QDF_ARRAY_SIZE(__osif_vdev_sync_arr); i++) {
46 		struct osif_vdev_sync *vdev_sync = __osif_vdev_sync_arr + i;
47 
48 		if (!vdev_sync->in_use)
49 			continue;
50 
51 		if (vdev_sync->net_dev == net_dev)
52 			return vdev_sync;
53 	}
54 
55 	return NULL;
56 }
57 
osif_get_vdev_sync_arr(void)58 struct osif_vdev_sync *osif_get_vdev_sync_arr(void)
59 {
60 	return __osif_vdev_sync_arr;
61 }
62 
osif_vdev_sync_get(void)63 static struct osif_vdev_sync *osif_vdev_sync_get(void)
64 {
65 	int i;
66 
67 	for (i = 0; i < QDF_ARRAY_SIZE(__osif_vdev_sync_arr); i++) {
68 		struct osif_vdev_sync *vdev_sync = __osif_vdev_sync_arr + i;
69 
70 		if (!vdev_sync->in_use) {
71 			vdev_sync->in_use = true;
72 			return vdev_sync;
73 		}
74 	}
75 
76 	return NULL;
77 }
78 
osif_vdev_sync_put(struct osif_vdev_sync * vdev_sync)79 static void osif_vdev_sync_put(struct osif_vdev_sync *vdev_sync)
80 {
81 	qdf_mem_zero(vdev_sync, sizeof(*vdev_sync));
82 }
83 
osif_vdev_sync_create(struct device * dev,struct osif_vdev_sync ** out_vdev_sync)84 int osif_vdev_sync_create(struct device *dev,
85 			  struct osif_vdev_sync **out_vdev_sync)
86 {
87 	struct osif_vdev_sync *vdev_sync;
88 	QDF_STATUS status;
89 
90 	QDF_BUG(dev);
91 	if (!dev)
92 		return -EINVAL;
93 
94 	QDF_BUG(out_vdev_sync);
95 	if (!out_vdev_sync)
96 		return -EINVAL;
97 
98 	osif_vdev_sync_lock();
99 	vdev_sync = osif_vdev_sync_get();
100 	osif_vdev_sync_unlock();
101 	if (!vdev_sync)
102 		return -ENOMEM;
103 
104 	status = osif_psoc_sync_dsc_vdev_create(dev, &vdev_sync->dsc_vdev);
105 	if (QDF_IS_STATUS_ERROR(status))
106 		goto sync_put;
107 
108 	*out_vdev_sync = vdev_sync;
109 
110 	return 0;
111 
112 sync_put:
113 	osif_vdev_sync_lock();
114 	osif_vdev_sync_put(vdev_sync);
115 	osif_vdev_sync_unlock();
116 
117 	return qdf_status_to_os_return(status);
118 }
119 
__osif_vdev_sync_create_and_trans(struct device * dev,struct osif_vdev_sync ** out_vdev_sync,const char * desc)120 int __osif_vdev_sync_create_and_trans(struct device *dev,
121 				      struct osif_vdev_sync **out_vdev_sync,
122 				      const char *desc)
123 {
124 	struct osif_vdev_sync *vdev_sync;
125 	QDF_STATUS status;
126 	int errno;
127 
128 	errno = osif_vdev_sync_create(dev, &vdev_sync);
129 	if (errno)
130 		return errno;
131 
132 	status = dsc_vdev_trans_start(vdev_sync->dsc_vdev, desc);
133 	if (QDF_IS_STATUS_ERROR(status))
134 		goto sync_destroy;
135 
136 	*out_vdev_sync = vdev_sync;
137 
138 	return 0;
139 
140 sync_destroy:
141 	osif_vdev_sync_destroy(vdev_sync);
142 
143 	return qdf_status_to_os_return(status);
144 }
145 
osif_vdev_sync_destroy(struct osif_vdev_sync * vdev_sync)146 void osif_vdev_sync_destroy(struct osif_vdev_sync *vdev_sync)
147 {
148 	QDF_BUG(vdev_sync);
149 	if (!vdev_sync)
150 		return;
151 
152 	dsc_vdev_destroy(&vdev_sync->dsc_vdev);
153 
154 	osif_vdev_sync_lock();
155 	osif_vdev_sync_put(vdev_sync);
156 	osif_vdev_sync_unlock();
157 }
158 
osif_vdev_sync_register(struct net_device * net_dev,struct osif_vdev_sync * vdev_sync)159 void osif_vdev_sync_register(struct net_device *net_dev,
160 			     struct osif_vdev_sync *vdev_sync)
161 {
162 	QDF_BUG(net_dev);
163 	QDF_BUG(vdev_sync);
164 	if (!vdev_sync)
165 		return;
166 
167 	osif_vdev_sync_lock();
168 	vdev_sync->net_dev = net_dev;
169 	osif_vdev_sync_unlock();
170 }
171 
osif_vdev_sync_unregister(struct net_device * net_dev)172 struct osif_vdev_sync *osif_vdev_sync_unregister(struct net_device *net_dev)
173 {
174 	struct osif_vdev_sync *vdev_sync;
175 
176 	QDF_BUG(net_dev);
177 	if (!net_dev)
178 		return NULL;
179 
180 	osif_vdev_sync_lock();
181 	vdev_sync = osif_vdev_sync_lookup(net_dev);
182 	if (vdev_sync)
183 		vdev_sync->net_dev = NULL;
184 	osif_vdev_sync_unlock();
185 
186 	return vdev_sync;
187 }
188 
189 typedef QDF_STATUS (*vdev_start_func)(struct dsc_vdev *, const char *);
190 
191 static int
__osif_vdev_sync_start_callback(struct net_device * net_dev,struct osif_vdev_sync ** out_vdev_sync,const char * desc,vdev_start_func vdev_start_cb)192 __osif_vdev_sync_start_callback(struct net_device *net_dev,
193 				struct osif_vdev_sync **out_vdev_sync,
194 				const char *desc,
195 				vdev_start_func vdev_start_cb)
196 {
197 	QDF_STATUS status;
198 	struct osif_vdev_sync *vdev_sync;
199 
200 	*out_vdev_sync = NULL;
201 
202 	vdev_sync = osif_vdev_sync_lookup(net_dev);
203 	if (!vdev_sync)
204 		return -EAGAIN;
205 
206 	*out_vdev_sync = vdev_sync;
207 
208 	status = vdev_start_cb(vdev_sync->dsc_vdev, desc);
209 	if (QDF_IS_STATUS_ERROR(status))
210 		return qdf_status_to_os_return(status);
211 
212 	return 0;
213 }
214 
215 static int
__osif_vdev_sync_start_wait_callback(struct net_device * net_dev,struct osif_vdev_sync ** out_vdev_sync,const char * desc,vdev_start_func vdev_start_cb)216 __osif_vdev_sync_start_wait_callback(struct net_device *net_dev,
217 				     struct osif_vdev_sync **out_vdev_sync,
218 				     const char *desc,
219 				     vdev_start_func vdev_start_cb)
220 {
221 	QDF_STATUS status;
222 	struct osif_vdev_sync *vdev_sync;
223 
224 	*out_vdev_sync = NULL;
225 
226 	osif_vdev_sync_lock();
227 	vdev_sync = osif_vdev_sync_lookup(net_dev);
228 	osif_vdev_sync_unlock();
229 	if (!vdev_sync)
230 		return -EAGAIN;
231 
232 	status = vdev_start_cb(vdev_sync->dsc_vdev, desc);
233 	if (QDF_IS_STATUS_ERROR(status))
234 		return qdf_status_to_os_return(status);
235 
236 	*out_vdev_sync = vdev_sync;
237 
238 	return 0;
239 }
240 
241 /**
242  * osif_vdev_sync_wait_for_uptree_ops - Wait for psoc/driver operations
243  * @vdev_sync: vdev sync pointer
244  *
245  * If there are any psoc/driver operations are taking place, then vdev
246  * trans/ops should wait for these operations to be completed to avoid
247  * memory domain mismatch issues. For example, if modules are closed
248  * because of idle shutdown, memory domain will be init domain and at
249  * that time if some psoc ops starts, memory allocated as part of this
250  * ops will be allocated in init domain and if at the same time if vdev
251  * up starts which will trigger the vdev trans and will start the
252  * modules and change the memory domain to active domain, now when the
253  * memory allocated as part of psoc operation is release on psoc ops
254  * completion will be released in the active domain which leads the
255  * memory domain mismatch.
256  *
257  * Return: None.
258  */
osif_vdev_sync_wait_for_uptree_ops(struct osif_vdev_sync * vdev_sync)259 static void osif_vdev_sync_wait_for_uptree_ops(struct osif_vdev_sync *vdev_sync)
260 {
261 	dsc_vdev_wait_for_uptree_ops(vdev_sync->dsc_vdev);
262 }
263 
__osif_vdev_sync_trans_start(struct net_device * net_dev,struct osif_vdev_sync ** out_vdev_sync,const char * desc)264 int __osif_vdev_sync_trans_start(struct net_device *net_dev,
265 				 struct osif_vdev_sync **out_vdev_sync,
266 				 const char *desc)
267 {
268 	int errno;
269 
270 	osif_vdev_sync_lock();
271 	errno = __osif_vdev_sync_start_callback(net_dev, out_vdev_sync, desc,
272 						dsc_vdev_trans_start);
273 	osif_vdev_sync_unlock();
274 
275 	if (!errno) {
276 		osif_vdev_sync_wait_for_ops(*out_vdev_sync);
277 		osif_vdev_sync_wait_for_uptree_ops(*out_vdev_sync);
278 	}
279 
280 	return errno;
281 }
282 
__osif_vdev_sync_trans_start_wait(struct net_device * net_dev,struct osif_vdev_sync ** out_vdev_sync,const char * desc)283 int __osif_vdev_sync_trans_start_wait(struct net_device *net_dev,
284 				      struct osif_vdev_sync **out_vdev_sync,
285 				      const char *desc)
286 {
287 	int errno;
288 
289 	/* since dsc_vdev_trans_start_wait may sleep do not take lock here */
290 	errno = __osif_vdev_sync_start_wait_callback(net_dev,
291 						     out_vdev_sync, desc,
292 						     dsc_vdev_trans_start_wait);
293 
294 	if (!errno) {
295 		osif_vdev_sync_wait_for_ops(*out_vdev_sync);
296 		osif_vdev_sync_wait_for_uptree_ops(*out_vdev_sync);
297 	}
298 
299 	return errno;
300 }
301 
osif_vdev_sync_trans_stop(struct osif_vdev_sync * vdev_sync)302 void osif_vdev_sync_trans_stop(struct osif_vdev_sync *vdev_sync)
303 {
304 	dsc_vdev_trans_stop(vdev_sync->dsc_vdev);
305 }
306 
osif_vdev_sync_assert_trans_protected(struct net_device * net_dev)307 void osif_vdev_sync_assert_trans_protected(struct net_device *net_dev)
308 {
309 	struct osif_vdev_sync *vdev_sync;
310 
311 	osif_vdev_sync_lock();
312 
313 	vdev_sync = osif_vdev_sync_lookup(net_dev);
314 	QDF_BUG(vdev_sync);
315 	if (vdev_sync)
316 		dsc_vdev_assert_trans_protected(vdev_sync->dsc_vdev);
317 
318 	osif_vdev_sync_unlock();
319 }
320 
__osif_vdev_sync_op_start(struct net_device * net_dev,struct osif_vdev_sync ** out_vdev_sync,const char * func)321 int __osif_vdev_sync_op_start(struct net_device *net_dev,
322 			      struct osif_vdev_sync **out_vdev_sync,
323 			      const char *func)
324 {
325 	int errno;
326 
327 	osif_vdev_sync_lock();
328 	errno = __osif_vdev_sync_start_callback(net_dev, out_vdev_sync, func,
329 						_dsc_vdev_op_start);
330 	osif_vdev_sync_unlock();
331 
332 	return errno;
333 }
334 
__osif_vdev_sync_op_stop(struct osif_vdev_sync * vdev_sync,const char * func)335 void __osif_vdev_sync_op_stop(struct osif_vdev_sync *vdev_sync,
336 			      const char *func)
337 {
338 	_dsc_vdev_op_stop(vdev_sync->dsc_vdev, func);
339 }
340 
osif_vdev_sync_wait_for_ops(struct osif_vdev_sync * vdev_sync)341 void osif_vdev_sync_wait_for_ops(struct osif_vdev_sync *vdev_sync)
342 {
343 	dsc_vdev_wait_for_ops(vdev_sync->dsc_vdev);
344 }
345 
osif_vdev_sync_init(void)346 void osif_vdev_sync_init(void)
347 {
348 	osif_vdev_sync_lock_create();
349 }
350 
osif_vdev_sync_deinit(void)351 void osif_vdev_sync_deinit(void)
352 {
353 	osif_vdev_sync_lock_destroy();
354 }
355 
osif_vdev_get_cached_cmd(struct osif_vdev_sync * vdev_sync)356 uint8_t osif_vdev_get_cached_cmd(struct osif_vdev_sync *vdev_sync)
357 {
358 	return dsc_vdev_get_cached_cmd(vdev_sync->dsc_vdev);
359 }
360 
osif_vdev_cache_command(struct osif_vdev_sync * vdev_sync,uint8_t cmd_id)361 void osif_vdev_cache_command(struct osif_vdev_sync *vdev_sync, uint8_t cmd_id)
362 {
363 	dsc_vdev_cache_command(vdev_sync->dsc_vdev, cmd_id);
364 	osif_debug("Set cache cmd to %d", cmd_id);
365 }
366 
367