xref: /wlan-driver/platform/cnss2/pci_qcom.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
3 
4 #include "pci_platform.h"
5 #include "debug.h"
6 
7 static struct cnss_msi_config msi_config = {
8 	.total_vectors = 32,
9 	.total_users = MSI_USERS,
10 	.users = (struct cnss_msi_user[]) {
11 		{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
12 		{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
13 		{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
14 		{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
15 	},
16 };
17 
18 #ifdef CONFIG_ONE_MSI_VECTOR
19 /**
20  * All the user share the same vector and msi data
21  * For MHI user, we need pass IRQ array information to MHI component
22  * MHI_IRQ_NUMBER is defined to specify this MHI IRQ array size
23  */
24 #define MHI_IRQ_NUMBER 3
25 static struct cnss_msi_config msi_config_one_msi = {
26 	.total_vectors = 1,
27 	.total_users = 4,
28 	.users = (struct cnss_msi_user[]) {
29 		{ .name = "MHI", .num_vectors = 1, .base_vector = 0 },
30 		{ .name = "CE", .num_vectors = 1, .base_vector = 0 },
31 		{ .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
32 		{ .name = "DP", .num_vectors = 1, .base_vector = 0 },
33 	},
34 };
35 #endif
36 
_cnss_pci_enumerate(struct cnss_plat_data * plat_priv,u32 rc_num)37 int _cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
38 {
39 	return msm_pcie_enumerate(rc_num);
40 }
41 
cnss_pci_assert_perst(struct cnss_pci_data * pci_priv)42 int cnss_pci_assert_perst(struct cnss_pci_data *pci_priv)
43 {
44 	struct pci_dev *pci_dev = pci_priv->pci_dev;
45 
46 	return msm_pcie_pm_control(MSM_PCIE_HANDLE_LINKDOWN,
47 				   pci_dev->bus->number, pci_dev, NULL,
48 				   PM_OPTIONS_DEFAULT);
49 }
50 
cnss_pci_disable_pc(struct cnss_pci_data * pci_priv,bool vote)51 int cnss_pci_disable_pc(struct cnss_pci_data *pci_priv, bool vote)
52 {
53 	struct pci_dev *pci_dev = pci_priv->pci_dev;
54 
55 	return msm_pcie_pm_control(vote ? MSM_PCIE_DISABLE_PC :
56 				   MSM_PCIE_ENABLE_PC,
57 				   pci_dev->bus->number, pci_dev, NULL,
58 				   PM_OPTIONS_DEFAULT);
59 }
60 
cnss_pci_set_link_bandwidth(struct cnss_pci_data * pci_priv,u16 link_speed,u16 link_width)61 int cnss_pci_set_link_bandwidth(struct cnss_pci_data *pci_priv,
62 				u16 link_speed, u16 link_width)
63 {
64 	return msm_pcie_set_link_bandwidth(pci_priv->pci_dev,
65 					   link_speed, link_width);
66 }
67 
cnss_pci_set_max_link_speed(struct cnss_pci_data * pci_priv,u32 rc_num,u16 link_speed)68 int cnss_pci_set_max_link_speed(struct cnss_pci_data *pci_priv,
69 				u32 rc_num, u16 link_speed)
70 {
71 	return msm_pcie_set_target_link_speed(rc_num, link_speed, false);
72 }
73 
74 /**
75  * _cnss_pci_prevent_l1() - Prevent PCIe L1 and L1 sub-states
76  * @pci_priv: driver PCI bus context pointer
77  *
78  * This function shall call corresponding PCIe root complex driver APIs
79  * to prevent PCIe link enter L1 and L1 sub-states. The APIs should also
80  * bring link out of L1 or L1 sub-states if any and avoid synchronization
81  * issues if any.
82  *
83  * Return: 0 for success, negative value for error
84  */
_cnss_pci_prevent_l1(struct cnss_pci_data * pci_priv)85 static int _cnss_pci_prevent_l1(struct cnss_pci_data *pci_priv)
86 {
87 	return msm_pcie_prevent_l1(pci_priv->pci_dev);
88 }
89 
90 /**
91  * _cnss_pci_allow_l1() - Allow PCIe L1 and L1 sub-states
92  * @pci_priv: driver PCI bus context pointer
93  *
94  * This function shall call corresponding PCIe root complex driver APIs
95  * to allow PCIe link enter L1 and L1 sub-states. The APIs should avoid
96  * synchronization issues if any.
97  *
98  * Return: 0 for success, negative value for error
99  */
_cnss_pci_allow_l1(struct cnss_pci_data * pci_priv)100 static void _cnss_pci_allow_l1(struct cnss_pci_data *pci_priv)
101 {
102 	msm_pcie_allow_l1(pci_priv->pci_dev);
103 }
104 
105 /**
106  * cnss_pci_set_link_up() - Power on or resume PCIe link
107  * @pci_priv: driver PCI bus context pointer
108  *
109  * This function shall call corresponding PCIe root complex driver APIs
110  * to Power on or resume PCIe link.
111  *
112  * Return: 0 for success, negative value for error
113  */
cnss_pci_set_link_up(struct cnss_pci_data * pci_priv)114 static int cnss_pci_set_link_up(struct cnss_pci_data *pci_priv)
115 {
116 	struct pci_dev *pci_dev = pci_priv->pci_dev;
117 	enum msm_pcie_pm_opt pm_ops = MSM_PCIE_RESUME;
118 	u32 pm_options = PM_OPTIONS_DEFAULT;
119 	int ret;
120 
121 	ret = msm_pcie_pm_control(pm_ops, pci_dev->bus->number, pci_dev,
122 				  NULL, pm_options);
123 	if (ret)
124 		cnss_pr_err("Failed to resume PCI link with default option, err = %d\n",
125 			    ret);
126 
127 	return ret;
128 }
129 
130 /**
131  * cnss_pci_set_link_down() - Power off or suspend PCIe link
132  * @pci_priv: driver PCI bus context pointer
133  *
134  * This function shall call corresponding PCIe root complex driver APIs
135  * to power off or suspend PCIe link.
136  *
137  * Return: 0 for success, negative value for error
138  */
cnss_pci_set_link_down(struct cnss_pci_data * pci_priv)139 static int cnss_pci_set_link_down(struct cnss_pci_data *pci_priv)
140 {
141 	struct pci_dev *pci_dev = pci_priv->pci_dev;
142 	enum msm_pcie_pm_opt pm_ops;
143 	u32 pm_options = PM_OPTIONS_DEFAULT;
144 	int ret;
145 
146 	if (pci_priv->drv_connected_last) {
147 		cnss_pr_vdbg("Use PCIe DRV suspend\n");
148 		pm_ops = MSM_PCIE_DRV_SUSPEND;
149 	} else {
150 		pm_ops = MSM_PCIE_SUSPEND;
151 	}
152 
153 	ret = msm_pcie_pm_control(pm_ops, pci_dev->bus->number, pci_dev,
154 				  NULL, pm_options);
155 	if (ret)
156 		cnss_pr_err("Failed to suspend PCI link with default option, err = %d\n",
157 			    ret);
158 
159 	return ret;
160 }
161 
cnss_pci_update_drv_supported(struct cnss_pci_data * pci_priv)162 void cnss_pci_update_drv_supported(struct cnss_pci_data *pci_priv)
163 {
164 	struct pci_dev *root_port = pcie_find_root_port(pci_priv->pci_dev);
165 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
166 	struct device_node *root_of_node;
167 	bool drv_supported = false;
168 
169 	if (!root_port) {
170 		cnss_pr_err("PCIe DRV is not supported as root port is null\n");
171 		pci_priv->drv_supported = false;
172 		return;
173 	}
174 
175 	root_of_node = root_port->dev.of_node;
176 
177 	if (root_of_node->parent) {
178 		drv_supported = of_property_read_bool(root_of_node->parent,
179 						      "qcom,drv-supported") ||
180 				of_property_read_bool(root_of_node->parent,
181 						      "qcom,drv-name");
182 	}
183 
184 	cnss_pr_dbg("PCIe DRV is %s\n",
185 		    drv_supported ? "supported" : "not supported");
186 	pci_priv->drv_supported = drv_supported;
187 
188 	if (drv_supported) {
189 		plat_priv->cap.cap_flag |= CNSS_HAS_DRV_SUPPORT;
190 		cnss_set_feature_list(plat_priv, CNSS_DRV_SUPPORT_V01);
191 	}
192 }
193 
cnss_pci_event_cb(struct msm_pcie_notify * notify)194 static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
195 {
196 	struct pci_dev *pci_dev;
197 	struct cnss_pci_data *pci_priv;
198 	struct device *dev;
199 	struct cnss_plat_data *plat_priv = NULL;
200 	int ret = 0;
201 
202 	if (!notify)
203 		return;
204 
205 	pci_dev = notify->user;
206 	if (!pci_dev)
207 		return;
208 
209 	pci_priv = cnss_get_pci_priv(pci_dev);
210 	if (!pci_priv)
211 		return;
212 	dev = &pci_priv->pci_dev->dev;
213 
214 	switch (notify->event) {
215 	case MSM_PCIE_EVENT_LINK_RECOVER:
216 		cnss_pr_dbg("PCI link recover callback\n");
217 
218 		plat_priv = pci_priv->plat_priv;
219 		if (!plat_priv) {
220 			cnss_pr_err("plat_priv is NULL\n");
221 			return;
222 		}
223 
224 		plat_priv->ctrl_params.quirks |= BIT(LINK_DOWN_SELF_RECOVERY);
225 
226 		ret = msm_pcie_pm_control(MSM_PCIE_HANDLE_LINKDOWN,
227 					  pci_dev->bus->number, pci_dev, NULL,
228 					  PM_OPTIONS_DEFAULT);
229 		if (ret)
230 			cnss_pci_handle_linkdown(pci_priv);
231 		break;
232 	case MSM_PCIE_EVENT_LINKDOWN:
233 		cnss_pr_dbg("PCI link down event callback\n");
234 		cnss_pci_handle_linkdown(pci_priv);
235 		break;
236 	case MSM_PCIE_EVENT_WAKEUP:
237 		cnss_pr_dbg("PCI Wake up event callback\n");
238 		if ((cnss_pci_get_monitor_wake_intr(pci_priv) &&
239 		     cnss_pci_get_auto_suspended(pci_priv)) ||
240 		     dev->power.runtime_status == RPM_SUSPENDING) {
241 			cnss_pci_set_monitor_wake_intr(pci_priv, false);
242 			cnss_pci_pm_request_resume(pci_priv);
243 		}
244 		complete(&pci_priv->wake_event_complete);
245 		break;
246 	case MSM_PCIE_EVENT_DRV_CONNECT:
247 		cnss_pr_dbg("DRV subsystem is connected\n");
248 		cnss_pci_set_drv_connected(pci_priv, 1);
249 		break;
250 	case MSM_PCIE_EVENT_DRV_DISCONNECT:
251 		cnss_pr_dbg("DRV subsystem is disconnected\n");
252 		if (cnss_pci_get_auto_suspended(pci_priv))
253 			cnss_pci_pm_request_resume(pci_priv);
254 		cnss_pci_set_drv_connected(pci_priv, 0);
255 		break;
256 	default:
257 		cnss_pr_err("Received invalid PCI event: %d\n", notify->event);
258 	}
259 }
260 
cnss_reg_pci_event(struct cnss_pci_data * pci_priv)261 int cnss_reg_pci_event(struct cnss_pci_data *pci_priv)
262 {
263 	int ret = 0;
264 	struct msm_pcie_register_event *pci_event;
265 
266 	pci_event = &pci_priv->msm_pci_event;
267 	pci_event->events = MSM_PCIE_EVENT_LINK_RECOVER |
268 			    MSM_PCIE_EVENT_LINKDOWN |
269 			    MSM_PCIE_EVENT_WAKEUP;
270 
271 	if (cnss_pci_get_drv_supported(pci_priv))
272 		pci_event->events = pci_event->events |
273 			MSM_PCIE_EVENT_DRV_CONNECT |
274 			MSM_PCIE_EVENT_DRV_DISCONNECT;
275 
276 	pci_event->user = pci_priv->pci_dev;
277 	pci_event->mode = MSM_PCIE_TRIGGER_CALLBACK;
278 	pci_event->callback = cnss_pci_event_cb;
279 	pci_event->options = MSM_PCIE_CONFIG_NO_RECOVERY;
280 
281 	ret = msm_pcie_register_event(pci_event);
282 	if (ret)
283 		cnss_pr_err("Failed to register MSM PCI event, err = %d\n",
284 			    ret);
285 
286 	return ret;
287 }
288 
cnss_dereg_pci_event(struct cnss_pci_data * pci_priv)289 void cnss_dereg_pci_event(struct cnss_pci_data *pci_priv)
290 {
291 	msm_pcie_deregister_event(&pci_priv->msm_pci_event);
292 }
293 
cnss_wlan_adsp_pc_enable(struct cnss_pci_data * pci_priv,bool control)294 int cnss_wlan_adsp_pc_enable(struct cnss_pci_data *pci_priv,
295 			     bool control)
296 {
297 	struct pci_dev *pci_dev = pci_priv->pci_dev;
298 	int ret = 0;
299 	u32 pm_options = PM_OPTIONS_DEFAULT;
300 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
301 
302 	if (!cnss_pci_get_drv_supported(pci_priv))
303 		return 0;
304 
305 	if (plat_priv->adsp_pc_enabled == control) {
306 		cnss_pr_dbg("ADSP power collapse already %s\n",
307 			    control ? "Enabled" : "Disabled");
308 		return 0;
309 	}
310 
311 	if (control)
312 		pm_options &= ~MSM_PCIE_CONFIG_NO_DRV_PC;
313 	else
314 		pm_options |= MSM_PCIE_CONFIG_NO_DRV_PC;
315 
316 	ret = msm_pcie_pm_control(MSM_PCIE_DRV_PC_CTRL, pci_dev->bus->number,
317 				  pci_dev, NULL, pm_options);
318 	if (ret)
319 		return ret;
320 
321 	cnss_pr_dbg("%s ADSP power collapse\n", control ? "Enable" : "Disable");
322 	plat_priv->adsp_pc_enabled = control;
323 	return 0;
324 }
325 
cnss_set_pci_link_status(struct cnss_pci_data * pci_priv,enum pci_link_status status)326 static int cnss_set_pci_link_status(struct cnss_pci_data *pci_priv,
327 				    enum pci_link_status status)
328 {
329 	u16 link_speed, link_width = pci_priv->def_link_width;
330 	u16 one_lane = PCI_EXP_LNKSTA_NLW_X1 >> PCI_EXP_LNKSTA_NLW_SHIFT;
331 	int ret;
332 
333 	cnss_pr_vdbg("Set PCI link status to: %u\n", status);
334 
335 	switch (status) {
336 	case PCI_GEN1:
337 		link_speed = PCI_EXP_LNKSTA_CLS_2_5GB;
338 		if (!link_width)
339 			link_width = one_lane;
340 		break;
341 	case PCI_GEN2:
342 		link_speed = PCI_EXP_LNKSTA_CLS_5_0GB;
343 		if (!link_width)
344 			link_width = one_lane;
345 		break;
346 	case PCI_DEF:
347 		link_speed = pci_priv->def_link_speed;
348 		if (!link_speed || !link_width) {
349 			cnss_pr_err("PCI link speed or width is not valid\n");
350 			return -EINVAL;
351 		}
352 		break;
353 	default:
354 		cnss_pr_err("Unknown PCI link status config: %u\n", status);
355 		return -EINVAL;
356 	}
357 
358 	ret = cnss_pci_set_link_bandwidth(pci_priv, link_speed, link_width);
359 	if (!ret)
360 		pci_priv->cur_link_speed = link_speed;
361 
362 	return ret;
363 }
364 
cnss_set_pci_link(struct cnss_pci_data * pci_priv,bool link_up)365 int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
366 {
367 	int ret = 0, retry = 0;
368 	struct cnss_plat_data *plat_priv;
369 	int sw_ctrl_gpio;
370 
371 	plat_priv = pci_priv->plat_priv;
372 	sw_ctrl_gpio = plat_priv->pinctrl_info.sw_ctrl_gpio;
373 
374 	cnss_pr_vdbg("%s PCI link\n", link_up ? "Resuming" : "Suspending");
375 
376 	if (link_up) {
377 retry:
378 		ret = cnss_pci_set_link_up(pci_priv);
379 		if (ret && retry++ < LINK_TRAINING_RETRY_MAX_TIMES) {
380 			cnss_pr_dbg("Retry PCI link training #%d\n", retry);
381 			cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
382 				    cnss_get_input_gpio_value(plat_priv, sw_ctrl_gpio));
383 			if (pci_priv->pci_link_down_ind)
384 				msleep(LINK_TRAINING_RETRY_DELAY_MS * retry);
385 			goto retry;
386 		}
387 	} else {
388 		/* Since DRV suspend cannot be done in Gen 3, set it to
389 		 * Gen 2 if current link speed is larger than Gen 2.
390 		 */
391 		if (pci_priv->drv_connected_last &&
392 		    pci_priv->cur_link_speed > PCI_EXP_LNKSTA_CLS_5_0GB)
393 			cnss_set_pci_link_status(pci_priv, PCI_GEN2);
394 
395 		ret = cnss_pci_set_link_down(pci_priv);
396 	}
397 
398 	if (pci_priv->drv_connected_last) {
399 		if ((link_up && !ret) || (!link_up && ret))
400 			cnss_set_pci_link_status(pci_priv, PCI_DEF);
401 	}
402 
403 	return ret;
404 }
405 
cnss_pci_prevent_l1(struct device * dev)406 int cnss_pci_prevent_l1(struct device *dev)
407 {
408 	struct pci_dev *pci_dev = to_pci_dev(dev);
409 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
410 	int ret;
411 
412 	if (!pci_priv) {
413 		cnss_pr_err("pci_priv is NULL\n");
414 		return -ENODEV;
415 	}
416 
417 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
418 		cnss_pr_dbg("PCIe link is in suspend state\n");
419 		return -EIO;
420 	}
421 
422 	if (pci_priv->pci_link_down_ind) {
423 		cnss_pr_err("PCIe link is down\n");
424 		return -EIO;
425 	}
426 
427 	ret = _cnss_pci_prevent_l1(pci_priv);
428 	if (ret == -EIO) {
429 		cnss_pr_err("Failed to prevent PCIe L1, considered as link down\n");
430 		cnss_pci_link_down(dev);
431 	}
432 
433 	return ret;
434 }
435 EXPORT_SYMBOL(cnss_pci_prevent_l1);
436 
cnss_pci_allow_l1(struct device * dev)437 void cnss_pci_allow_l1(struct device *dev)
438 {
439 	struct pci_dev *pci_dev = to_pci_dev(dev);
440 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
441 
442 	if (!pci_priv) {
443 		cnss_pr_err("pci_priv is NULL\n");
444 		return;
445 	}
446 
447 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
448 		cnss_pr_dbg("PCIe link is in suspend state\n");
449 		return;
450 	}
451 
452 	if (pci_priv->pci_link_down_ind) {
453 		cnss_pr_err("PCIe link is down\n");
454 		return;
455 	}
456 
457 	_cnss_pci_allow_l1(pci_priv);
458 }
459 EXPORT_SYMBOL(cnss_pci_allow_l1);
460 
cnss_pci_get_msi_assignment(struct cnss_pci_data * pci_priv)461 int cnss_pci_get_msi_assignment(struct cnss_pci_data *pci_priv)
462 {
463 	pci_priv->msi_config = &msi_config;
464 
465 	return 0;
466 }
467 
468 #ifdef CONFIG_ONE_MSI_VECTOR
cnss_pci_get_one_msi_assignment(struct cnss_pci_data * pci_priv)469 int cnss_pci_get_one_msi_assignment(struct cnss_pci_data *pci_priv)
470 {
471 	pci_priv->msi_config = &msi_config_one_msi;
472 
473 	return 0;
474 }
475 
cnss_pci_fallback_one_msi(struct cnss_pci_data * pci_priv,int * num_vectors)476 bool cnss_pci_fallback_one_msi(struct cnss_pci_data *pci_priv,
477 			       int *num_vectors)
478 {
479 	struct pci_dev *pci_dev = pci_priv->pci_dev;
480 	struct cnss_msi_config *msi_config;
481 
482 	cnss_pci_get_one_msi_assignment(pci_priv);
483 	msi_config = pci_priv->msi_config;
484 	if (!msi_config) {
485 		cnss_pr_err("one msi_config is NULL!\n");
486 		return false;
487 	}
488 	*num_vectors = pci_alloc_irq_vectors(pci_dev,
489 					     msi_config->total_vectors,
490 					     msi_config->total_vectors,
491 					     PCI_IRQ_MSI);
492 	if (*num_vectors < 0) {
493 		cnss_pr_err("Failed to get one MSI vector!\n");
494 		return false;
495 	}
496 	cnss_pr_dbg("request MSI one vector\n");
497 
498 	return true;
499 }
500 
cnss_pci_is_one_msi(struct cnss_pci_data * pci_priv)501 bool cnss_pci_is_one_msi(struct cnss_pci_data *pci_priv)
502 {
503 	return pci_priv && pci_priv->msi_config &&
504 	       (pci_priv->msi_config->total_vectors == 1);
505 }
506 
cnss_pci_get_one_msi_mhi_irq_array_size(struct cnss_pci_data * pci_priv)507 int cnss_pci_get_one_msi_mhi_irq_array_size(struct cnss_pci_data *pci_priv)
508 {
509 	return MHI_IRQ_NUMBER;
510 }
511 
cnss_pci_is_force_one_msi(struct cnss_pci_data * pci_priv)512 bool cnss_pci_is_force_one_msi(struct cnss_pci_data *pci_priv)
513 {
514 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
515 
516 	return test_bit(FORCE_ONE_MSI, &plat_priv->ctrl_params.quirks);
517 }
518 #else
cnss_pci_get_one_msi_assignment(struct cnss_pci_data * pci_priv)519 int cnss_pci_get_one_msi_assignment(struct cnss_pci_data *pci_priv)
520 {
521 	return 0;
522 }
523 
cnss_pci_fallback_one_msi(struct cnss_pci_data * pci_priv,int * num_vectors)524 bool cnss_pci_fallback_one_msi(struct cnss_pci_data *pci_priv,
525 			       int *num_vectors)
526 {
527 	return false;
528 }
529 
cnss_pci_is_one_msi(struct cnss_pci_data * pci_priv)530 bool cnss_pci_is_one_msi(struct cnss_pci_data *pci_priv)
531 {
532 	return false;
533 }
534 
cnss_pci_get_one_msi_mhi_irq_array_size(struct cnss_pci_data * pci_priv)535 int cnss_pci_get_one_msi_mhi_irq_array_size(struct cnss_pci_data *pci_priv)
536 {
537 	return 0;
538 }
539 
cnss_pci_is_force_one_msi(struct cnss_pci_data * pci_priv)540 bool cnss_pci_is_force_one_msi(struct cnss_pci_data *pci_priv)
541 {
542 	return false;
543 }
544 #endif
545 
cnss_pci_smmu_fault_handler(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags,void * handler_token)546 static int cnss_pci_smmu_fault_handler(struct iommu_domain *domain,
547 				       struct device *dev, unsigned long iova,
548 				       int flags, void *handler_token)
549 {
550 	struct cnss_pci_data *pci_priv = handler_token;
551 
552 	cnss_fatal_err("SMMU fault happened with IOVA 0x%lx\n", iova);
553 
554 	if (!pci_priv) {
555 		cnss_pr_err("pci_priv is NULL\n");
556 		return -ENODEV;
557 	}
558 
559 	pci_priv->is_smmu_fault = true;
560 	cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
561 	cnss_force_fw_assert(&pci_priv->pci_dev->dev);
562 
563 	/* IOMMU driver requires -ENOSYS to print debug info. */
564 	return -ENOSYS;
565 }
566 
cnss_pci_init_smmu(struct cnss_pci_data * pci_priv)567 int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
568 {
569 	struct pci_dev *pci_dev = pci_priv->pci_dev;
570 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
571 	struct device_node *of_node;
572 	struct resource *res;
573 	const char *iommu_dma_type;
574 	u32 addr_win[2];
575 	int ret = 0;
576 
577 	of_node = of_parse_phandle(pci_dev->dev.of_node, "qcom,iommu-group", 0);
578 	if (!of_node)
579 		return ret;
580 
581 	cnss_pr_dbg("Initializing SMMU\n");
582 
583 	pci_priv->iommu_domain = iommu_get_domain_for_dev(&pci_dev->dev);
584 	ret = of_property_read_string(of_node, "qcom,iommu-dma",
585 				      &iommu_dma_type);
586 	if (!ret && !strcmp("fastmap", iommu_dma_type)) {
587 		cnss_pr_dbg("Enabling SMMU S1 stage\n");
588 		pci_priv->smmu_s1_enable = true;
589 		iommu_set_fault_handler(pci_priv->iommu_domain,
590 					cnss_pci_smmu_fault_handler, pci_priv);
591 		cnss_register_iommu_fault_handler_irq(pci_priv);
592 	}
593 
594 	ret = of_property_read_u32_array(of_node,  "qcom,iommu-dma-addr-pool",
595 					 addr_win, ARRAY_SIZE(addr_win));
596 	if (ret) {
597 		cnss_pr_err("Invalid SMMU size window, err = %d\n", ret);
598 		of_node_put(of_node);
599 		return ret;
600 	}
601 
602 	pci_priv->smmu_iova_start = addr_win[0];
603 	pci_priv->smmu_iova_len = addr_win[1];
604 	cnss_pr_dbg("smmu_iova_start: %pa, smmu_iova_len: 0x%zx\n",
605 		    &pci_priv->smmu_iova_start,
606 		    pci_priv->smmu_iova_len);
607 
608 	res = platform_get_resource_byname(plat_priv->plat_dev, IORESOURCE_MEM,
609 					   "smmu_iova_ipa");
610 	if (res) {
611 		pci_priv->smmu_iova_ipa_start = res->start;
612 		pci_priv->smmu_iova_ipa_current = res->start;
613 		pci_priv->smmu_iova_ipa_len = resource_size(res);
614 		cnss_pr_dbg("smmu_iova_ipa_start: %pa, smmu_iova_ipa_len: 0x%zx\n",
615 			    &pci_priv->smmu_iova_ipa_start,
616 			    pci_priv->smmu_iova_ipa_len);
617 	}
618 
619 	pci_priv->iommu_geometry = of_property_read_bool(of_node,
620 							 "qcom,iommu-geometry");
621 	cnss_pr_dbg("iommu_geometry: %d\n", pci_priv->iommu_geometry);
622 
623 	of_node_put(of_node);
624 
625 	return 0;
626 }
627 
_cnss_pci_get_reg_dump(struct cnss_pci_data * pci_priv,u8 * buf,u32 len)628 int _cnss_pci_get_reg_dump(struct cnss_pci_data *pci_priv,
629 			   u8 *buf, u32 len)
630 {
631 	return msm_pcie_reg_dump(pci_priv->pci_dev, buf, len);
632 }
633 
634 #if IS_ENABLED(CONFIG_ARCH_QCOM)
635 /**
636  * cnss_pci_of_reserved_mem_device_init() - Assign reserved memory region
637  *                                          to given PCI device
638  * @pci_priv: driver PCI bus context pointer
639  *
640  * This function shall call corresponding of_reserved_mem_device* API to
641  * assign reserved memory region to PCI device based on where the memory is
642  * defined and attached to (platform device of_node or PCI device of_node)
643  * in device tree.
644  *
645  * Return: 0 for success, negative value for error
646  */
cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data * pci_priv)647 int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
648 {
649 	struct device *dev_pci = &pci_priv->pci_dev->dev;
650 	int ret;
651 
652 	/* Use of_reserved_mem_device_init_by_idx() if reserved memory is
653 	 * attached to platform device of_node.
654 	 */
655 	ret = of_reserved_mem_device_init(dev_pci);
656 	if (ret) {
657 		if (ret == -EINVAL)
658 			cnss_pr_vdbg("Ignore, no specific reserved-memory assigned\n");
659 		else
660 			cnss_pr_err("Failed to init reserved mem device, err = %d\n",
661 				    ret);
662 	}
663 	if (dev_pci->cma_area)
664 		cnss_pr_dbg("CMA area is %s\n",
665 			    cma_get_name(dev_pci->cma_area));
666 
667 	return ret;
668 }
669 
cnss_pci_wake_gpio_init(struct cnss_pci_data * pci_priv)670 int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
671 {
672 	return 0;
673 }
674 
cnss_pci_wake_gpio_deinit(struct cnss_pci_data * pci_priv)675 void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
676 {
677 }
678 #endif
679 
680