1 /* Driver for Realtek PCI-Express card reader
2  *
3  * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License as published by the
7  * Free Software Foundation; either version 2, or (at your option) any
8  * later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, see <http://www.gnu.org/licenses/>.
17  *
18  * Author:
19  *   Wei WANG <wei_wang@realsil.com.cn>
20  */
21 
22 #include <linux/pci.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/highmem.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/idr.h>
30 #include <linux/platform_device.h>
31 #include <linux/mfd/core.h>
32 #include <linux/rtsx_pci.h>
33 #include <linux/mmc/card.h>
34 #include <asm/unaligned.h>
35 
36 #include "rtsx_pcr.h"
37 
38 static bool msi_en = true;
39 module_param(msi_en, bool, S_IRUGO | S_IWUSR);
40 MODULE_PARM_DESC(msi_en, "Enable MSI");
41 
42 static DEFINE_IDR(rtsx_pci_idr);
43 static DEFINE_SPINLOCK(rtsx_pci_lock);
44 
45 static struct mfd_cell rtsx_pcr_cells[] = {
46 	[RTSX_SD_CARD] = {
47 		.name = DRV_NAME_RTSX_PCI_SDMMC,
48 	},
49 	[RTSX_MS_CARD] = {
50 		.name = DRV_NAME_RTSX_PCI_MS,
51 	},
52 };
53 
54 static const struct pci_device_id rtsx_pci_ids[] = {
55 	{ PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
56 	{ PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
57 	{ PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
58 	{ PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
59 	{ PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
60 	{ PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
61 	{ PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
62 	{ PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
63 	{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
64 	{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
65 	{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
66 	{ 0, }
67 };
68 
69 MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
70 
rtsx_pci_enable_aspm(struct rtsx_pcr * pcr)71 static inline void rtsx_pci_enable_aspm(struct rtsx_pcr *pcr)
72 {
73 	rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
74 		0xFC, pcr->aspm_en);
75 }
76 
rtsx_pci_disable_aspm(struct rtsx_pcr * pcr)77 static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
78 {
79 	rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
80 		0xFC, 0);
81 }
82 
rtsx_comm_set_ltr_latency(struct rtsx_pcr * pcr,u32 latency)83 static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
84 {
85 	rtsx_pci_write_register(pcr, MSGTXDATA0,
86 				MASK_8_BIT_DEF, (u8) (latency & 0xFF));
87 	rtsx_pci_write_register(pcr, MSGTXDATA1,
88 				MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
89 	rtsx_pci_write_register(pcr, MSGTXDATA2,
90 				MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
91 	rtsx_pci_write_register(pcr, MSGTXDATA3,
92 				MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
93 	rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
94 		LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
95 
96 	return 0;
97 }
98 
rtsx_set_ltr_latency(struct rtsx_pcr * pcr,u32 latency)99 int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
100 {
101 	if (pcr->ops->set_ltr_latency)
102 		return pcr->ops->set_ltr_latency(pcr, latency);
103 	else
104 		return rtsx_comm_set_ltr_latency(pcr, latency);
105 }
106 
rtsx_comm_set_aspm(struct rtsx_pcr * pcr,bool enable)107 static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
108 {
109 	struct rtsx_cr_option *option = &pcr->option;
110 
111 	if (pcr->aspm_enabled == enable)
112 		return;
113 
114 	if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
115 		if (enable)
116 			rtsx_pci_enable_aspm(pcr);
117 		else
118 			rtsx_pci_disable_aspm(pcr);
119 	} else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
120 		u8 mask = FORCE_ASPM_VAL_MASK;
121 		u8 val = 0;
122 
123 		if (enable)
124 			val = pcr->aspm_en;
125 		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL,  mask, val);
126 	}
127 
128 	pcr->aspm_enabled = enable;
129 }
130 
rtsx_disable_aspm(struct rtsx_pcr * pcr)131 static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
132 {
133 	if (pcr->ops->set_aspm)
134 		pcr->ops->set_aspm(pcr, false);
135 	else
136 		rtsx_comm_set_aspm(pcr, false);
137 }
138 
rtsx_set_l1off_sub(struct rtsx_pcr * pcr,u8 val)139 int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
140 {
141 	rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
142 
143 	return 0;
144 }
145 
rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr * pcr,int active)146 static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
147 {
148 	if (pcr->ops->set_l1off_cfg_sub_d0)
149 		pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
150 }
151 
rtsx_comm_pm_full_on(struct rtsx_pcr * pcr)152 static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
153 {
154 	struct rtsx_cr_option *option = &pcr->option;
155 
156 	rtsx_disable_aspm(pcr);
157 
158 	/* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */
159 	msleep(1);
160 
161 	if (option->ltr_enabled)
162 		rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
163 
164 	if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
165 		rtsx_set_l1off_sub_cfg_d0(pcr, 1);
166 }
167 
rtsx_pm_full_on(struct rtsx_pcr * pcr)168 static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
169 {
170 	if (pcr->ops->full_on)
171 		pcr->ops->full_on(pcr);
172 	else
173 		rtsx_comm_pm_full_on(pcr);
174 }
175 
rtsx_pci_start_run(struct rtsx_pcr * pcr)176 void rtsx_pci_start_run(struct rtsx_pcr *pcr)
177 {
178 	/* If pci device removed, don't queue idle work any more */
179 	if (pcr->remove_pci)
180 		return;
181 
182 	if (pcr->state != PDEV_STAT_RUN) {
183 		pcr->state = PDEV_STAT_RUN;
184 		if (pcr->ops->enable_auto_blink)
185 			pcr->ops->enable_auto_blink(pcr);
186 		rtsx_pm_full_on(pcr);
187 	}
188 
189 	mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
190 }
191 EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
192 
rtsx_pci_write_register(struct rtsx_pcr * pcr,u16 addr,u8 mask,u8 data)193 int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
194 {
195 	int i;
196 	u32 val = HAIMR_WRITE_START;
197 
198 	val |= (u32)(addr & 0x3FFF) << 16;
199 	val |= (u32)mask << 8;
200 	val |= (u32)data;
201 
202 	rtsx_pci_writel(pcr, RTSX_HAIMR, val);
203 
204 	for (i = 0; i < MAX_RW_REG_CNT; i++) {
205 		val = rtsx_pci_readl(pcr, RTSX_HAIMR);
206 		if ((val & HAIMR_TRANS_END) == 0) {
207 			if (data != (u8)val)
208 				return -EIO;
209 			return 0;
210 		}
211 	}
212 
213 	return -ETIMEDOUT;
214 }
215 EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
216 
rtsx_pci_read_register(struct rtsx_pcr * pcr,u16 addr,u8 * data)217 int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
218 {
219 	u32 val = HAIMR_READ_START;
220 	int i;
221 
222 	val |= (u32)(addr & 0x3FFF) << 16;
223 	rtsx_pci_writel(pcr, RTSX_HAIMR, val);
224 
225 	for (i = 0; i < MAX_RW_REG_CNT; i++) {
226 		val = rtsx_pci_readl(pcr, RTSX_HAIMR);
227 		if ((val & HAIMR_TRANS_END) == 0)
228 			break;
229 	}
230 
231 	if (i >= MAX_RW_REG_CNT)
232 		return -ETIMEDOUT;
233 
234 	if (data)
235 		*data = (u8)(val & 0xFF);
236 
237 	return 0;
238 }
239 EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
240 
__rtsx_pci_write_phy_register(struct rtsx_pcr * pcr,u8 addr,u16 val)241 int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
242 {
243 	int err, i, finished = 0;
244 	u8 tmp;
245 
246 	rtsx_pci_init_cmd(pcr);
247 
248 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val);
249 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8));
250 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
251 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81);
252 
253 	err = rtsx_pci_send_cmd(pcr, 100);
254 	if (err < 0)
255 		return err;
256 
257 	for (i = 0; i < 100000; i++) {
258 		err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
259 		if (err < 0)
260 			return err;
261 
262 		if (!(tmp & 0x80)) {
263 			finished = 1;
264 			break;
265 		}
266 	}
267 
268 	if (!finished)
269 		return -ETIMEDOUT;
270 
271 	return 0;
272 }
273 
rtsx_pci_write_phy_register(struct rtsx_pcr * pcr,u8 addr,u16 val)274 int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
275 {
276 	if (pcr->ops->write_phy)
277 		return pcr->ops->write_phy(pcr, addr, val);
278 
279 	return __rtsx_pci_write_phy_register(pcr, addr, val);
280 }
281 EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
282 
__rtsx_pci_read_phy_register(struct rtsx_pcr * pcr,u8 addr,u16 * val)283 int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
284 {
285 	int err, i, finished = 0;
286 	u16 data;
287 	u8 *ptr, tmp;
288 
289 	rtsx_pci_init_cmd(pcr);
290 
291 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
292 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80);
293 
294 	err = rtsx_pci_send_cmd(pcr, 100);
295 	if (err < 0)
296 		return err;
297 
298 	for (i = 0; i < 100000; i++) {
299 		err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
300 		if (err < 0)
301 			return err;
302 
303 		if (!(tmp & 0x80)) {
304 			finished = 1;
305 			break;
306 		}
307 	}
308 
309 	if (!finished)
310 		return -ETIMEDOUT;
311 
312 	rtsx_pci_init_cmd(pcr);
313 
314 	rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0);
315 	rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0);
316 
317 	err = rtsx_pci_send_cmd(pcr, 100);
318 	if (err < 0)
319 		return err;
320 
321 	ptr = rtsx_pci_get_cmd_data(pcr);
322 	data = ((u16)ptr[1] << 8) | ptr[0];
323 
324 	if (val)
325 		*val = data;
326 
327 	return 0;
328 }
329 
rtsx_pci_read_phy_register(struct rtsx_pcr * pcr,u8 addr,u16 * val)330 int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
331 {
332 	if (pcr->ops->read_phy)
333 		return pcr->ops->read_phy(pcr, addr, val);
334 
335 	return __rtsx_pci_read_phy_register(pcr, addr, val);
336 }
337 EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
338 
rtsx_pci_stop_cmd(struct rtsx_pcr * pcr)339 void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
340 {
341 	if (pcr->ops->stop_cmd)
342 		return pcr->ops->stop_cmd(pcr);
343 
344 	rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
345 	rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
346 
347 	rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
348 	rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
349 }
350 EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
351 
rtsx_pci_add_cmd(struct rtsx_pcr * pcr,u8 cmd_type,u16 reg_addr,u8 mask,u8 data)352 void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
353 		u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
354 {
355 	unsigned long flags;
356 	u32 val = 0;
357 	u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
358 
359 	val |= (u32)(cmd_type & 0x03) << 30;
360 	val |= (u32)(reg_addr & 0x3FFF) << 16;
361 	val |= (u32)mask << 8;
362 	val |= (u32)data;
363 
364 	spin_lock_irqsave(&pcr->lock, flags);
365 	ptr += pcr->ci;
366 	if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
367 		put_unaligned_le32(val, ptr);
368 		ptr++;
369 		pcr->ci++;
370 	}
371 	spin_unlock_irqrestore(&pcr->lock, flags);
372 }
373 EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
374 
rtsx_pci_send_cmd_no_wait(struct rtsx_pcr * pcr)375 void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
376 {
377 	u32 val = 1 << 31;
378 
379 	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
380 
381 	val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
382 	/* Hardware Auto Response */
383 	val |= 0x40000000;
384 	rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
385 }
386 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
387 
rtsx_pci_send_cmd(struct rtsx_pcr * pcr,int timeout)388 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
389 {
390 	struct completion trans_done;
391 	u32 val = 1 << 31;
392 	long timeleft;
393 	unsigned long flags;
394 	int err = 0;
395 
396 	spin_lock_irqsave(&pcr->lock, flags);
397 
398 	/* set up data structures for the wakeup system */
399 	pcr->done = &trans_done;
400 	pcr->trans_result = TRANS_NOT_READY;
401 	init_completion(&trans_done);
402 
403 	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
404 
405 	val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
406 	/* Hardware Auto Response */
407 	val |= 0x40000000;
408 	rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
409 
410 	spin_unlock_irqrestore(&pcr->lock, flags);
411 
412 	/* Wait for TRANS_OK_INT */
413 	timeleft = wait_for_completion_interruptible_timeout(
414 			&trans_done, msecs_to_jiffies(timeout));
415 	if (timeleft <= 0) {
416 		pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
417 		err = -ETIMEDOUT;
418 		goto finish_send_cmd;
419 	}
420 
421 	spin_lock_irqsave(&pcr->lock, flags);
422 	if (pcr->trans_result == TRANS_RESULT_FAIL)
423 		err = -EINVAL;
424 	else if (pcr->trans_result == TRANS_RESULT_OK)
425 		err = 0;
426 	else if (pcr->trans_result == TRANS_NO_DEVICE)
427 		err = -ENODEV;
428 	spin_unlock_irqrestore(&pcr->lock, flags);
429 
430 finish_send_cmd:
431 	spin_lock_irqsave(&pcr->lock, flags);
432 	pcr->done = NULL;
433 	spin_unlock_irqrestore(&pcr->lock, flags);
434 
435 	if ((err < 0) && (err != -ENODEV))
436 		rtsx_pci_stop_cmd(pcr);
437 
438 	if (pcr->finish_me)
439 		complete(pcr->finish_me);
440 
441 	return err;
442 }
443 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
444 
rtsx_pci_add_sg_tbl(struct rtsx_pcr * pcr,dma_addr_t addr,unsigned int len,int end)445 static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
446 		dma_addr_t addr, unsigned int len, int end)
447 {
448 	u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
449 	u64 val;
450 	u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
451 
452 	pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
453 
454 	if (end)
455 		option |= RTSX_SG_END;
456 	val = ((u64)addr << 32) | ((u64)len << 12) | option;
457 
458 	put_unaligned_le64(val, ptr);
459 	pcr->sgi++;
460 }
461 
rtsx_pci_transfer_data(struct rtsx_pcr * pcr,struct scatterlist * sglist,int num_sg,bool read,int timeout)462 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
463 		int num_sg, bool read, int timeout)
464 {
465 	int err = 0, count;
466 
467 	pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
468 	count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
469 	if (count < 1)
470 		return -EINVAL;
471 	pcr_dbg(pcr, "DMA mapping count: %d\n", count);
472 
473 	err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
474 
475 	rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
476 
477 	return err;
478 }
479 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
480 
rtsx_pci_dma_map_sg(struct rtsx_pcr * pcr,struct scatterlist * sglist,int num_sg,bool read)481 int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
482 		int num_sg, bool read)
483 {
484 	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
485 
486 	if (pcr->remove_pci)
487 		return -EINVAL;
488 
489 	if ((sglist == NULL) || (num_sg <= 0))
490 		return -EINVAL;
491 
492 	return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
493 }
494 EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
495 
rtsx_pci_dma_unmap_sg(struct rtsx_pcr * pcr,struct scatterlist * sglist,int num_sg,bool read)496 void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
497 		int num_sg, bool read)
498 {
499 	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
500 
501 	dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
502 }
503 EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
504 
rtsx_pci_dma_transfer(struct rtsx_pcr * pcr,struct scatterlist * sglist,int count,bool read,int timeout)505 int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
506 		int count, bool read, int timeout)
507 {
508 	struct completion trans_done;
509 	struct scatterlist *sg;
510 	dma_addr_t addr;
511 	long timeleft;
512 	unsigned long flags;
513 	unsigned int len;
514 	int i, err = 0;
515 	u32 val;
516 	u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
517 
518 	if (pcr->remove_pci)
519 		return -ENODEV;
520 
521 	if ((sglist == NULL) || (count < 1))
522 		return -EINVAL;
523 
524 	val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
525 	pcr->sgi = 0;
526 	for_each_sg(sglist, sg, count, i) {
527 		addr = sg_dma_address(sg);
528 		len = sg_dma_len(sg);
529 		rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
530 	}
531 
532 	spin_lock_irqsave(&pcr->lock, flags);
533 
534 	pcr->done = &trans_done;
535 	pcr->trans_result = TRANS_NOT_READY;
536 	init_completion(&trans_done);
537 	rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
538 	rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
539 
540 	spin_unlock_irqrestore(&pcr->lock, flags);
541 
542 	timeleft = wait_for_completion_interruptible_timeout(
543 			&trans_done, msecs_to_jiffies(timeout));
544 	if (timeleft <= 0) {
545 		pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
546 		err = -ETIMEDOUT;
547 		goto out;
548 	}
549 
550 	spin_lock_irqsave(&pcr->lock, flags);
551 	if (pcr->trans_result == TRANS_RESULT_FAIL) {
552 		err = -EILSEQ;
553 		if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
554 			pcr->dma_error_count++;
555 	}
556 
557 	else if (pcr->trans_result == TRANS_NO_DEVICE)
558 		err = -ENODEV;
559 	spin_unlock_irqrestore(&pcr->lock, flags);
560 
561 out:
562 	spin_lock_irqsave(&pcr->lock, flags);
563 	pcr->done = NULL;
564 	spin_unlock_irqrestore(&pcr->lock, flags);
565 
566 	if ((err < 0) && (err != -ENODEV))
567 		rtsx_pci_stop_cmd(pcr);
568 
569 	if (pcr->finish_me)
570 		complete(pcr->finish_me);
571 
572 	return err;
573 }
574 EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
575 
rtsx_pci_read_ppbuf(struct rtsx_pcr * pcr,u8 * buf,int buf_len)576 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
577 {
578 	int err;
579 	int i, j;
580 	u16 reg;
581 	u8 *ptr;
582 
583 	if (buf_len > 512)
584 		buf_len = 512;
585 
586 	ptr = buf;
587 	reg = PPBUF_BASE2;
588 	for (i = 0; i < buf_len / 256; i++) {
589 		rtsx_pci_init_cmd(pcr);
590 
591 		for (j = 0; j < 256; j++)
592 			rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
593 
594 		err = rtsx_pci_send_cmd(pcr, 250);
595 		if (err < 0)
596 			return err;
597 
598 		memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
599 		ptr += 256;
600 	}
601 
602 	if (buf_len % 256) {
603 		rtsx_pci_init_cmd(pcr);
604 
605 		for (j = 0; j < buf_len % 256; j++)
606 			rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
607 
608 		err = rtsx_pci_send_cmd(pcr, 250);
609 		if (err < 0)
610 			return err;
611 	}
612 
613 	memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
614 
615 	return 0;
616 }
617 EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
618 
rtsx_pci_write_ppbuf(struct rtsx_pcr * pcr,u8 * buf,int buf_len)619 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
620 {
621 	int err;
622 	int i, j;
623 	u16 reg;
624 	u8 *ptr;
625 
626 	if (buf_len > 512)
627 		buf_len = 512;
628 
629 	ptr = buf;
630 	reg = PPBUF_BASE2;
631 	for (i = 0; i < buf_len / 256; i++) {
632 		rtsx_pci_init_cmd(pcr);
633 
634 		for (j = 0; j < 256; j++) {
635 			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
636 					reg++, 0xFF, *ptr);
637 			ptr++;
638 		}
639 
640 		err = rtsx_pci_send_cmd(pcr, 250);
641 		if (err < 0)
642 			return err;
643 	}
644 
645 	if (buf_len % 256) {
646 		rtsx_pci_init_cmd(pcr);
647 
648 		for (j = 0; j < buf_len % 256; j++) {
649 			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
650 					reg++, 0xFF, *ptr);
651 			ptr++;
652 		}
653 
654 		err = rtsx_pci_send_cmd(pcr, 250);
655 		if (err < 0)
656 			return err;
657 	}
658 
659 	return 0;
660 }
661 EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
662 
rtsx_pci_set_pull_ctl(struct rtsx_pcr * pcr,const u32 * tbl)663 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
664 {
665 	rtsx_pci_init_cmd(pcr);
666 
667 	while (*tbl & 0xFFFF0000) {
668 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
669 				(u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
670 		tbl++;
671 	}
672 
673 	return rtsx_pci_send_cmd(pcr, 100);
674 }
675 
rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr * pcr,int card)676 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
677 {
678 	const u32 *tbl;
679 
680 	if (card == RTSX_SD_CARD)
681 		tbl = pcr->sd_pull_ctl_enable_tbl;
682 	else if (card == RTSX_MS_CARD)
683 		tbl = pcr->ms_pull_ctl_enable_tbl;
684 	else
685 		return -EINVAL;
686 
687 	return rtsx_pci_set_pull_ctl(pcr, tbl);
688 }
689 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
690 
rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr * pcr,int card)691 int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
692 {
693 	const u32 *tbl;
694 
695 	if (card == RTSX_SD_CARD)
696 		tbl = pcr->sd_pull_ctl_disable_tbl;
697 	else if (card == RTSX_MS_CARD)
698 		tbl = pcr->ms_pull_ctl_disable_tbl;
699 	else
700 		return -EINVAL;
701 
702 
703 	return rtsx_pci_set_pull_ctl(pcr, tbl);
704 }
705 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
706 
rtsx_pci_enable_bus_int(struct rtsx_pcr * pcr)707 static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
708 {
709 	pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN;
710 
711 	if (pcr->num_slots > 1)
712 		pcr->bier |= MS_INT_EN;
713 
714 	/* Enable Bus Interrupt */
715 	rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
716 
717 	pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
718 }
719 
double_ssc_depth(u8 depth)720 static inline u8 double_ssc_depth(u8 depth)
721 {
722 	return ((depth > 1) ? (depth - 1) : depth);
723 }
724 
revise_ssc_depth(u8 ssc_depth,u8 div)725 static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
726 {
727 	if (div > CLK_DIV_1) {
728 		if (ssc_depth > (div - 1))
729 			ssc_depth -= (div - 1);
730 		else
731 			ssc_depth = SSC_DEPTH_4M;
732 	}
733 
734 	return ssc_depth;
735 }
736 
rtsx_pci_switch_clock(struct rtsx_pcr * pcr,unsigned int card_clock,u8 ssc_depth,bool initial_mode,bool double_clk,bool vpclk)737 int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
738 		u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
739 {
740 	int err, clk;
741 	u8 n, clk_divider, mcu_cnt, div;
742 	static const u8 depth[] = {
743 		[RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
744 		[RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
745 		[RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
746 		[RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
747 		[RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
748 	};
749 
750 	if (initial_mode) {
751 		/* We use 250k(around) here, in initial stage */
752 		clk_divider = SD_CLK_DIVIDE_128;
753 		card_clock = 30000000;
754 	} else {
755 		clk_divider = SD_CLK_DIVIDE_0;
756 	}
757 	err = rtsx_pci_write_register(pcr, SD_CFG1,
758 			SD_CLK_DIVIDE_MASK, clk_divider);
759 	if (err < 0)
760 		return err;
761 
762 	/* Reduce card clock by 20MHz each time a DMA transfer error occurs */
763 	if (card_clock == UHS_SDR104_MAX_DTR &&
764 	    pcr->dma_error_count &&
765 	    PCI_PID(pcr) == RTS5227_DEVICE_ID)
766 		card_clock = UHS_SDR104_MAX_DTR -
767 			(pcr->dma_error_count * 20000000);
768 
769 	card_clock /= 1000000;
770 	pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
771 
772 	clk = card_clock;
773 	if (!initial_mode && double_clk)
774 		clk = card_clock * 2;
775 	pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
776 		clk, pcr->cur_clock);
777 
778 	if (clk == pcr->cur_clock)
779 		return 0;
780 
781 	if (pcr->ops->conv_clk_and_div_n)
782 		n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
783 	else
784 		n = (u8)(clk - 2);
785 	if ((clk <= 2) || (n > MAX_DIV_N_PCR))
786 		return -EINVAL;
787 
788 	mcu_cnt = (u8)(125/clk + 3);
789 	if (mcu_cnt > 15)
790 		mcu_cnt = 15;
791 
792 	/* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
793 	div = CLK_DIV_1;
794 	while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
795 		if (pcr->ops->conv_clk_and_div_n) {
796 			int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
797 					DIV_N_TO_CLK) * 2;
798 			n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
799 					CLK_TO_DIV_N);
800 		} else {
801 			n = (n + 2) * 2 - 2;
802 		}
803 		div++;
804 	}
805 	pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
806 
807 	ssc_depth = depth[ssc_depth];
808 	if (double_clk)
809 		ssc_depth = double_ssc_depth(ssc_depth);
810 
811 	ssc_depth = revise_ssc_depth(ssc_depth, div);
812 	pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
813 
814 	rtsx_pci_init_cmd(pcr);
815 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
816 			CLK_LOW_FREQ, CLK_LOW_FREQ);
817 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
818 			0xFF, (div << 4) | mcu_cnt);
819 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
820 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
821 			SSC_DEPTH_MASK, ssc_depth);
822 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
823 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
824 	if (vpclk) {
825 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
826 				PHASE_NOT_RESET, 0);
827 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
828 				PHASE_NOT_RESET, PHASE_NOT_RESET);
829 	}
830 
831 	err = rtsx_pci_send_cmd(pcr, 2000);
832 	if (err < 0)
833 		return err;
834 
835 	/* Wait SSC clock stable */
836 	udelay(SSC_CLOCK_STABLE_WAIT);
837 	err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
838 	if (err < 0)
839 		return err;
840 
841 	pcr->cur_clock = clk;
842 	return 0;
843 }
844 EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
845 
rtsx_pci_card_power_on(struct rtsx_pcr * pcr,int card)846 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
847 {
848 	if (pcr->ops->card_power_on)
849 		return pcr->ops->card_power_on(pcr, card);
850 
851 	return 0;
852 }
853 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
854 
rtsx_pci_card_power_off(struct rtsx_pcr * pcr,int card)855 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
856 {
857 	if (pcr->ops->card_power_off)
858 		return pcr->ops->card_power_off(pcr, card);
859 
860 	return 0;
861 }
862 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
863 
rtsx_pci_card_exclusive_check(struct rtsx_pcr * pcr,int card)864 int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
865 {
866 	static const unsigned int cd_mask[] = {
867 		[RTSX_SD_CARD] = SD_EXIST,
868 		[RTSX_MS_CARD] = MS_EXIST
869 	};
870 
871 	if (!(pcr->flags & PCR_MS_PMOS)) {
872 		/* When using single PMOS, accessing card is not permitted
873 		 * if the existing card is not the designated one.
874 		 */
875 		if (pcr->card_exist & (~cd_mask[card]))
876 			return -EIO;
877 	}
878 
879 	return 0;
880 }
881 EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
882 
rtsx_pci_switch_output_voltage(struct rtsx_pcr * pcr,u8 voltage)883 int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
884 {
885 	if (pcr->ops->switch_output_voltage)
886 		return pcr->ops->switch_output_voltage(pcr, voltage);
887 
888 	return 0;
889 }
890 EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
891 
rtsx_pci_card_exist(struct rtsx_pcr * pcr)892 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
893 {
894 	unsigned int val;
895 
896 	val = rtsx_pci_readl(pcr, RTSX_BIPR);
897 	if (pcr->ops->cd_deglitch)
898 		val = pcr->ops->cd_deglitch(pcr);
899 
900 	return val;
901 }
902 EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
903 
rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr * pcr)904 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
905 {
906 	struct completion finish;
907 
908 	pcr->finish_me = &finish;
909 	init_completion(&finish);
910 
911 	if (pcr->done)
912 		complete(pcr->done);
913 
914 	if (!pcr->remove_pci)
915 		rtsx_pci_stop_cmd(pcr);
916 
917 	wait_for_completion_interruptible_timeout(&finish,
918 			msecs_to_jiffies(2));
919 	pcr->finish_me = NULL;
920 }
921 EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
922 
rtsx_pci_card_detect(struct work_struct * work)923 static void rtsx_pci_card_detect(struct work_struct *work)
924 {
925 	struct delayed_work *dwork;
926 	struct rtsx_pcr *pcr;
927 	unsigned long flags;
928 	unsigned int card_detect = 0, card_inserted, card_removed;
929 	u32 irq_status;
930 
931 	dwork = to_delayed_work(work);
932 	pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
933 
934 	pcr_dbg(pcr, "--> %s\n", __func__);
935 
936 	mutex_lock(&pcr->pcr_mutex);
937 	spin_lock_irqsave(&pcr->lock, flags);
938 
939 	irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
940 	pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
941 
942 	irq_status &= CARD_EXIST;
943 	card_inserted = pcr->card_inserted & irq_status;
944 	card_removed = pcr->card_removed;
945 	pcr->card_inserted = 0;
946 	pcr->card_removed = 0;
947 
948 	spin_unlock_irqrestore(&pcr->lock, flags);
949 
950 	if (card_inserted || card_removed) {
951 		pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
952 			card_inserted, card_removed);
953 
954 		if (pcr->ops->cd_deglitch)
955 			card_inserted = pcr->ops->cd_deglitch(pcr);
956 
957 		card_detect = card_inserted | card_removed;
958 
959 		pcr->card_exist |= card_inserted;
960 		pcr->card_exist &= ~card_removed;
961 	}
962 
963 	mutex_unlock(&pcr->pcr_mutex);
964 
965 	if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
966 		pcr->slots[RTSX_SD_CARD].card_event(
967 				pcr->slots[RTSX_SD_CARD].p_dev);
968 	if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
969 		pcr->slots[RTSX_MS_CARD].card_event(
970 				pcr->slots[RTSX_MS_CARD].p_dev);
971 }
972 
rtsx_pci_process_ocp(struct rtsx_pcr * pcr)973 static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
974 {
975 	if (pcr->ops->process_ocp)
976 		pcr->ops->process_ocp(pcr);
977 }
978 
rtsx_pci_process_ocp_interrupt(struct rtsx_pcr * pcr)979 static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
980 {
981 	if (pcr->option.ocp_en)
982 		rtsx_pci_process_ocp(pcr);
983 
984 	return 0;
985 }
986 
rtsx_pci_isr(int irq,void * dev_id)987 static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
988 {
989 	struct rtsx_pcr *pcr = dev_id;
990 	u32 int_reg;
991 
992 	if (!pcr)
993 		return IRQ_NONE;
994 
995 	spin_lock(&pcr->lock);
996 
997 	int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
998 	/* Clear interrupt flag */
999 	rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
1000 	if ((int_reg & pcr->bier) == 0) {
1001 		spin_unlock(&pcr->lock);
1002 		return IRQ_NONE;
1003 	}
1004 	if (int_reg == 0xFFFFFFFF) {
1005 		spin_unlock(&pcr->lock);
1006 		return IRQ_HANDLED;
1007 	}
1008 
1009 	int_reg &= (pcr->bier | 0x7FFFFF);
1010 
1011 	if (int_reg & SD_OC_INT)
1012 		rtsx_pci_process_ocp_interrupt(pcr);
1013 
1014 	if (int_reg & SD_INT) {
1015 		if (int_reg & SD_EXIST) {
1016 			pcr->card_inserted |= SD_EXIST;
1017 		} else {
1018 			pcr->card_removed |= SD_EXIST;
1019 			pcr->card_inserted &= ~SD_EXIST;
1020 		}
1021 		pcr->dma_error_count = 0;
1022 	}
1023 
1024 	if (int_reg & MS_INT) {
1025 		if (int_reg & MS_EXIST) {
1026 			pcr->card_inserted |= MS_EXIST;
1027 		} else {
1028 			pcr->card_removed |= MS_EXIST;
1029 			pcr->card_inserted &= ~MS_EXIST;
1030 		}
1031 	}
1032 
1033 	if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1034 		if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1035 			pcr->trans_result = TRANS_RESULT_FAIL;
1036 			if (pcr->done)
1037 				complete(pcr->done);
1038 		} else if (int_reg & TRANS_OK_INT) {
1039 			pcr->trans_result = TRANS_RESULT_OK;
1040 			if (pcr->done)
1041 				complete(pcr->done);
1042 		}
1043 	}
1044 
1045 	if (pcr->card_inserted || pcr->card_removed)
1046 		schedule_delayed_work(&pcr->carddet_work,
1047 				msecs_to_jiffies(200));
1048 
1049 	spin_unlock(&pcr->lock);
1050 	return IRQ_HANDLED;
1051 }
1052 
rtsx_pci_acquire_irq(struct rtsx_pcr * pcr)1053 static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1054 {
1055 	pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1056 			__func__, pcr->msi_en, pcr->pci->irq);
1057 
1058 	if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1059 			pcr->msi_en ? 0 : IRQF_SHARED,
1060 			DRV_NAME_RTSX_PCI, pcr)) {
1061 		dev_err(&(pcr->pci->dev),
1062 			"rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1063 			pcr->pci->irq);
1064 		return -1;
1065 	}
1066 
1067 	pcr->irq = pcr->pci->irq;
1068 	pci_intx(pcr->pci, !pcr->msi_en);
1069 
1070 	return 0;
1071 }
1072 
rtsx_enable_aspm(struct rtsx_pcr * pcr)1073 static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1074 {
1075 	if (pcr->ops->set_aspm)
1076 		pcr->ops->set_aspm(pcr, true);
1077 	else
1078 		rtsx_comm_set_aspm(pcr, true);
1079 }
1080 
rtsx_comm_pm_power_saving(struct rtsx_pcr * pcr)1081 static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1082 {
1083 	struct rtsx_cr_option *option = &pcr->option;
1084 
1085 	if (option->ltr_enabled) {
1086 		u32 latency = option->ltr_l1off_latency;
1087 
1088 		if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1089 			mdelay(option->l1_snooze_delay);
1090 
1091 		rtsx_set_ltr_latency(pcr, latency);
1092 	}
1093 
1094 	if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1095 		rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1096 
1097 	rtsx_enable_aspm(pcr);
1098 }
1099 
rtsx_pm_power_saving(struct rtsx_pcr * pcr)1100 static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1101 {
1102 	if (pcr->ops->power_saving)
1103 		pcr->ops->power_saving(pcr);
1104 	else
1105 		rtsx_comm_pm_power_saving(pcr);
1106 }
1107 
rtsx_pci_idle_work(struct work_struct * work)1108 static void rtsx_pci_idle_work(struct work_struct *work)
1109 {
1110 	struct delayed_work *dwork = to_delayed_work(work);
1111 	struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
1112 
1113 	pcr_dbg(pcr, "--> %s\n", __func__);
1114 
1115 	mutex_lock(&pcr->pcr_mutex);
1116 
1117 	pcr->state = PDEV_STAT_IDLE;
1118 
1119 	if (pcr->ops->disable_auto_blink)
1120 		pcr->ops->disable_auto_blink(pcr);
1121 	if (pcr->ops->turn_off_led)
1122 		pcr->ops->turn_off_led(pcr);
1123 
1124 	rtsx_pm_power_saving(pcr);
1125 
1126 	mutex_unlock(&pcr->pcr_mutex);
1127 }
1128 
1129 #ifdef CONFIG_PM
rtsx_pci_power_off(struct rtsx_pcr * pcr,u8 pm_state)1130 static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
1131 {
1132 	if (pcr->ops->turn_off_led)
1133 		pcr->ops->turn_off_led(pcr);
1134 
1135 	rtsx_pci_writel(pcr, RTSX_BIER, 0);
1136 	pcr->bier = 0;
1137 
1138 	rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1139 	rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1140 
1141 	if (pcr->ops->force_power_down)
1142 		pcr->ops->force_power_down(pcr, pm_state);
1143 }
1144 #endif
1145 
rtsx_pci_enable_ocp(struct rtsx_pcr * pcr)1146 void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1147 {
1148 	u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1149 
1150 	if (pcr->ops->enable_ocp)
1151 		pcr->ops->enable_ocp(pcr);
1152 	else
1153 		rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1154 
1155 }
1156 
rtsx_pci_disable_ocp(struct rtsx_pcr * pcr)1157 void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1158 {
1159 	u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1160 
1161 	if (pcr->ops->disable_ocp)
1162 		pcr->ops->disable_ocp(pcr);
1163 	else
1164 		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1165 }
1166 
rtsx_pci_init_ocp(struct rtsx_pcr * pcr)1167 void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1168 {
1169 	if (pcr->ops->init_ocp) {
1170 		pcr->ops->init_ocp(pcr);
1171 	} else {
1172 		struct rtsx_cr_option *option = &(pcr->option);
1173 
1174 		if (option->ocp_en) {
1175 			u8 val = option->sd_400mA_ocp_thd;
1176 
1177 			rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1178 			rtsx_pci_write_register(pcr, REG_OCPPARA1,
1179 				SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1180 			rtsx_pci_write_register(pcr, REG_OCPPARA2,
1181 				SD_OCP_THD_MASK, val);
1182 			rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1183 				SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1184 			rtsx_pci_enable_ocp(pcr);
1185 		} else {
1186 			/* OC power down */
1187 			rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1188 				OC_POWER_DOWN);
1189 		}
1190 	}
1191 }
1192 
rtsx_pci_get_ocpstat(struct rtsx_pcr * pcr,u8 * val)1193 int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1194 {
1195 	if (pcr->ops->get_ocpstat)
1196 		return pcr->ops->get_ocpstat(pcr, val);
1197 	else
1198 		return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1199 }
1200 
rtsx_pci_clear_ocpstat(struct rtsx_pcr * pcr)1201 void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1202 {
1203 	if (pcr->ops->clear_ocpstat) {
1204 		pcr->ops->clear_ocpstat(pcr);
1205 	} else {
1206 		u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1207 		u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1208 
1209 		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1210 		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1211 	}
1212 }
1213 
rtsx_sd_power_off_card3v3(struct rtsx_pcr * pcr)1214 int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1215 {
1216 	rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1217 		MS_CLK_EN | SD40_CLK_EN, 0);
1218 	rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
1219 
1220 	rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1221 
1222 	msleep(50);
1223 
1224 	rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
1225 
1226 	return 0;
1227 }
1228 
rtsx_ms_power_off_card3v3(struct rtsx_pcr * pcr)1229 int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
1230 {
1231 	rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1232 		MS_CLK_EN | SD40_CLK_EN, 0);
1233 
1234 	rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
1235 
1236 	rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
1237 	rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
1238 
1239 	return 0;
1240 }
1241 
rtsx_pci_init_hw(struct rtsx_pcr * pcr)1242 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1243 {
1244 	int err;
1245 
1246 	pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP);
1247 	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1248 
1249 	rtsx_pci_enable_bus_int(pcr);
1250 
1251 	/* Power on SSC */
1252 	err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1253 	if (err < 0)
1254 		return err;
1255 
1256 	/* Wait SSC power stable */
1257 	udelay(200);
1258 
1259 	rtsx_pci_disable_aspm(pcr);
1260 	if (pcr->ops->optimize_phy) {
1261 		err = pcr->ops->optimize_phy(pcr);
1262 		if (err < 0)
1263 			return err;
1264 	}
1265 
1266 	rtsx_pci_init_cmd(pcr);
1267 
1268 	/* Set mcu_cnt to 7 to ensure data can be sampled properly */
1269 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1270 
1271 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1272 	/* Disable card clock */
1273 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1274 	/* Reset delink mode */
1275 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1276 	/* Card driving select */
1277 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1278 			0xFF, pcr->card_drive_sel);
1279 	/* Enable SSC Clock */
1280 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1281 			0xFF, SSC_8X_EN | SSC_SEL_4M);
1282 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1283 	/* Disable cd_pwr_save */
1284 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1285 	/* Clear Link Ready Interrupt */
1286 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1287 			LINK_RDY_INT, LINK_RDY_INT);
1288 	/* Enlarge the estimation window of PERST# glitch
1289 	 * to reduce the chance of invalid card interrupt
1290 	 */
1291 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1292 	/* Update RC oscillator to 400k
1293 	 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1294 	 *                1: 2M  0: 400k
1295 	 */
1296 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1297 	/* Set interrupt write clear
1298 	 * bit 1: U_elbi_if_rd_clr_en
1299 	 *	1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1300 	 *	0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1301 	 */
1302 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1303 
1304 	err = rtsx_pci_send_cmd(pcr, 100);
1305 	if (err < 0)
1306 		return err;
1307 
1308 	switch (PCI_PID(pcr)) {
1309 	case PID_5250:
1310 	case PID_524A:
1311 	case PID_525A:
1312 	case PID_5260:
1313 		rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1314 		break;
1315 	default:
1316 		break;
1317 	}
1318 
1319 	/* Enable clk_request_n to enable clock power management */
1320 	rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
1321 	/* Enter L1 when host tx idle */
1322 	rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B);
1323 
1324 	if (pcr->ops->extra_init_hw) {
1325 		err = pcr->ops->extra_init_hw(pcr);
1326 		if (err < 0)
1327 			return err;
1328 	}
1329 
1330 	/* No CD interrupt if probing driver with card inserted.
1331 	 * So we need to initialize pcr->card_exist here.
1332 	 */
1333 	if (pcr->ops->cd_deglitch)
1334 		pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1335 	else
1336 		pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1337 
1338 	return 0;
1339 }
1340 
rtsx_pci_init_chip(struct rtsx_pcr * pcr)1341 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1342 {
1343 	int err;
1344 
1345 	spin_lock_init(&pcr->lock);
1346 	mutex_init(&pcr->pcr_mutex);
1347 
1348 	switch (PCI_PID(pcr)) {
1349 	default:
1350 	case 0x5209:
1351 		rts5209_init_params(pcr);
1352 		break;
1353 
1354 	case 0x5229:
1355 		rts5229_init_params(pcr);
1356 		break;
1357 
1358 	case 0x5289:
1359 		rtl8411_init_params(pcr);
1360 		break;
1361 
1362 	case 0x5227:
1363 		rts5227_init_params(pcr);
1364 		break;
1365 
1366 	case 0x522A:
1367 		rts522a_init_params(pcr);
1368 		break;
1369 
1370 	case 0x5249:
1371 		rts5249_init_params(pcr);
1372 		break;
1373 
1374 	case 0x524A:
1375 		rts524a_init_params(pcr);
1376 		break;
1377 
1378 	case 0x525A:
1379 		rts525a_init_params(pcr);
1380 		break;
1381 
1382 	case 0x5287:
1383 		rtl8411b_init_params(pcr);
1384 		break;
1385 
1386 	case 0x5286:
1387 		rtl8402_init_params(pcr);
1388 		break;
1389 	case 0x5260:
1390 		rts5260_init_params(pcr);
1391 		break;
1392 	}
1393 
1394 	pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1395 			PCI_PID(pcr), pcr->ic_version);
1396 
1397 	pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1398 			GFP_KERNEL);
1399 	if (!pcr->slots)
1400 		return -ENOMEM;
1401 
1402 	if (pcr->ops->fetch_vendor_settings)
1403 		pcr->ops->fetch_vendor_settings(pcr);
1404 
1405 	pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1406 	pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1407 			pcr->sd30_drive_sel_1v8);
1408 	pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1409 			pcr->sd30_drive_sel_3v3);
1410 	pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1411 			pcr->card_drive_sel);
1412 	pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1413 
1414 	pcr->state = PDEV_STAT_IDLE;
1415 	err = rtsx_pci_init_hw(pcr);
1416 	if (err < 0) {
1417 		kfree(pcr->slots);
1418 		return err;
1419 	}
1420 
1421 	return 0;
1422 }
1423 
rtsx_pci_probe(struct pci_dev * pcidev,const struct pci_device_id * id)1424 static int rtsx_pci_probe(struct pci_dev *pcidev,
1425 			  const struct pci_device_id *id)
1426 {
1427 	struct rtsx_pcr *pcr;
1428 	struct pcr_handle *handle;
1429 	u32 base, len;
1430 	int ret, i, bar = 0;
1431 
1432 	dev_dbg(&(pcidev->dev),
1433 		": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1434 		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1435 		(int)pcidev->revision);
1436 
1437 	ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
1438 	if (ret < 0)
1439 		return ret;
1440 
1441 	ret = pci_enable_device(pcidev);
1442 	if (ret)
1443 		return ret;
1444 
1445 	ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1446 	if (ret)
1447 		goto disable;
1448 
1449 	pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1450 	if (!pcr) {
1451 		ret = -ENOMEM;
1452 		goto release_pci;
1453 	}
1454 
1455 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1456 	if (!handle) {
1457 		ret = -ENOMEM;
1458 		goto free_pcr;
1459 	}
1460 	handle->pcr = pcr;
1461 
1462 	idr_preload(GFP_KERNEL);
1463 	spin_lock(&rtsx_pci_lock);
1464 	ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1465 	if (ret >= 0)
1466 		pcr->id = ret;
1467 	spin_unlock(&rtsx_pci_lock);
1468 	idr_preload_end();
1469 	if (ret < 0)
1470 		goto free_handle;
1471 
1472 	pcr->pci = pcidev;
1473 	dev_set_drvdata(&pcidev->dev, handle);
1474 
1475 	if (CHK_PCI_PID(pcr, 0x525A))
1476 		bar = 1;
1477 	len = pci_resource_len(pcidev, bar);
1478 	base = pci_resource_start(pcidev, bar);
1479 	pcr->remap_addr = ioremap_nocache(base, len);
1480 	if (!pcr->remap_addr) {
1481 		ret = -ENOMEM;
1482 		goto free_idr;
1483 	}
1484 
1485 	pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1486 			RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1487 			GFP_KERNEL);
1488 	if (pcr->rtsx_resv_buf == NULL) {
1489 		ret = -ENXIO;
1490 		goto unmap;
1491 	}
1492 	pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1493 	pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1494 	pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1495 	pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1496 
1497 	pcr->card_inserted = 0;
1498 	pcr->card_removed = 0;
1499 	INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1500 	INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
1501 
1502 	pcr->msi_en = msi_en;
1503 	if (pcr->msi_en) {
1504 		ret = pci_enable_msi(pcidev);
1505 		if (ret)
1506 			pcr->msi_en = false;
1507 	}
1508 
1509 	ret = rtsx_pci_acquire_irq(pcr);
1510 	if (ret < 0)
1511 		goto disable_msi;
1512 
1513 	pci_set_master(pcidev);
1514 	synchronize_irq(pcr->irq);
1515 
1516 	ret = rtsx_pci_init_chip(pcr);
1517 	if (ret < 0)
1518 		goto disable_irq;
1519 
1520 	for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1521 		rtsx_pcr_cells[i].platform_data = handle;
1522 		rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1523 	}
1524 	ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1525 			ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1526 	if (ret < 0)
1527 		goto free_slots;
1528 
1529 	schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1530 
1531 	return 0;
1532 
1533 free_slots:
1534 	kfree(pcr->slots);
1535 disable_irq:
1536 	free_irq(pcr->irq, (void *)pcr);
1537 disable_msi:
1538 	if (pcr->msi_en)
1539 		pci_disable_msi(pcr->pci);
1540 	dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1541 			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1542 unmap:
1543 	iounmap(pcr->remap_addr);
1544 free_idr:
1545 	spin_lock(&rtsx_pci_lock);
1546 	idr_remove(&rtsx_pci_idr, pcr->id);
1547 	spin_unlock(&rtsx_pci_lock);
1548 free_handle:
1549 	kfree(handle);
1550 free_pcr:
1551 	kfree(pcr);
1552 release_pci:
1553 	pci_release_regions(pcidev);
1554 disable:
1555 	pci_disable_device(pcidev);
1556 
1557 	return ret;
1558 }
1559 
rtsx_pci_remove(struct pci_dev * pcidev)1560 static void rtsx_pci_remove(struct pci_dev *pcidev)
1561 {
1562 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1563 	struct rtsx_pcr *pcr = handle->pcr;
1564 
1565 	pcr->remove_pci = true;
1566 
1567 	/* Disable interrupts at the pcr level */
1568 	spin_lock_irq(&pcr->lock);
1569 	rtsx_pci_writel(pcr, RTSX_BIER, 0);
1570 	pcr->bier = 0;
1571 	spin_unlock_irq(&pcr->lock);
1572 
1573 	cancel_delayed_work_sync(&pcr->carddet_work);
1574 	cancel_delayed_work_sync(&pcr->idle_work);
1575 
1576 	mfd_remove_devices(&pcidev->dev);
1577 
1578 	dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1579 			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1580 	free_irq(pcr->irq, (void *)pcr);
1581 	if (pcr->msi_en)
1582 		pci_disable_msi(pcr->pci);
1583 	iounmap(pcr->remap_addr);
1584 
1585 	pci_release_regions(pcidev);
1586 	pci_disable_device(pcidev);
1587 
1588 	spin_lock(&rtsx_pci_lock);
1589 	idr_remove(&rtsx_pci_idr, pcr->id);
1590 	spin_unlock(&rtsx_pci_lock);
1591 
1592 	kfree(pcr->slots);
1593 	kfree(pcr);
1594 	kfree(handle);
1595 
1596 	dev_dbg(&(pcidev->dev),
1597 		": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1598 		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1599 }
1600 
1601 #ifdef CONFIG_PM
1602 
rtsx_pci_suspend(struct pci_dev * pcidev,pm_message_t state)1603 static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
1604 {
1605 	struct pcr_handle *handle;
1606 	struct rtsx_pcr *pcr;
1607 
1608 	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1609 
1610 	handle = pci_get_drvdata(pcidev);
1611 	pcr = handle->pcr;
1612 
1613 	cancel_delayed_work(&pcr->carddet_work);
1614 	cancel_delayed_work(&pcr->idle_work);
1615 
1616 	mutex_lock(&pcr->pcr_mutex);
1617 
1618 	rtsx_pci_power_off(pcr, HOST_ENTER_S3);
1619 
1620 	pci_save_state(pcidev);
1621 	pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
1622 	pci_disable_device(pcidev);
1623 	pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
1624 
1625 	mutex_unlock(&pcr->pcr_mutex);
1626 	return 0;
1627 }
1628 
rtsx_pci_resume(struct pci_dev * pcidev)1629 static int rtsx_pci_resume(struct pci_dev *pcidev)
1630 {
1631 	struct pcr_handle *handle;
1632 	struct rtsx_pcr *pcr;
1633 	int ret = 0;
1634 
1635 	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1636 
1637 	handle = pci_get_drvdata(pcidev);
1638 	pcr = handle->pcr;
1639 
1640 	mutex_lock(&pcr->pcr_mutex);
1641 
1642 	pci_set_power_state(pcidev, PCI_D0);
1643 	pci_restore_state(pcidev);
1644 	ret = pci_enable_device(pcidev);
1645 	if (ret)
1646 		goto out;
1647 	pci_set_master(pcidev);
1648 
1649 	ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1650 	if (ret)
1651 		goto out;
1652 
1653 	ret = rtsx_pci_init_hw(pcr);
1654 	if (ret)
1655 		goto out;
1656 
1657 	schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1658 
1659 out:
1660 	mutex_unlock(&pcr->pcr_mutex);
1661 	return ret;
1662 }
1663 
rtsx_pci_shutdown(struct pci_dev * pcidev)1664 static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1665 {
1666 	struct pcr_handle *handle;
1667 	struct rtsx_pcr *pcr;
1668 
1669 	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1670 
1671 	handle = pci_get_drvdata(pcidev);
1672 	pcr = handle->pcr;
1673 	rtsx_pci_power_off(pcr, HOST_ENTER_S1);
1674 
1675 	pci_disable_device(pcidev);
1676 	free_irq(pcr->irq, (void *)pcr);
1677 	if (pcr->msi_en)
1678 		pci_disable_msi(pcr->pci);
1679 }
1680 
1681 #else /* CONFIG_PM */
1682 
1683 #define rtsx_pci_suspend NULL
1684 #define rtsx_pci_resume NULL
1685 #define rtsx_pci_shutdown NULL
1686 
1687 #endif /* CONFIG_PM */
1688 
1689 static struct pci_driver rtsx_pci_driver = {
1690 	.name = DRV_NAME_RTSX_PCI,
1691 	.id_table = rtsx_pci_ids,
1692 	.probe = rtsx_pci_probe,
1693 	.remove = rtsx_pci_remove,
1694 	.suspend = rtsx_pci_suspend,
1695 	.resume = rtsx_pci_resume,
1696 	.shutdown = rtsx_pci_shutdown,
1697 };
1698 module_pci_driver(rtsx_pci_driver);
1699 
1700 MODULE_LICENSE("GPL");
1701 MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1702 MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");
1703