1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Driver for Realtek PCI-Express card reader
3  *
4  * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
5  *
6  * Author:
7  *   Wei WANG <wei_wang@realsil.com.cn>
8  */
9 
10 #include <linux/pci.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/highmem.h>
15 #include <linux/interrupt.h>
16 #include <linux/delay.h>
17 #include <linux/idr.h>
18 #include <linux/platform_device.h>
19 #include <linux/mfd/core.h>
20 #include <linux/rtsx_pci.h>
21 #include <linux/mmc/card.h>
22 #include <asm/unaligned.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 
26 #include "rtsx_pcr.h"
27 #include "rts5261.h"
28 #include "rts5228.h"
29 
30 static bool msi_en = true;
31 module_param(msi_en, bool, S_IRUGO | S_IWUSR);
32 MODULE_PARM_DESC(msi_en, "Enable MSI");
33 
34 static DEFINE_IDR(rtsx_pci_idr);
35 static DEFINE_SPINLOCK(rtsx_pci_lock);
36 
37 static struct mfd_cell rtsx_pcr_cells[] = {
38 	[RTSX_SD_CARD] = {
39 		.name = DRV_NAME_RTSX_PCI_SDMMC,
40 	},
41 };
42 
43 static const struct pci_device_id rtsx_pci_ids[] = {
44 	{ PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
45 	{ PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
46 	{ PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
47 	{ PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
48 	{ PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
49 	{ PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
50 	{ PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
51 	{ PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
52 	{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
53 	{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
54 	{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
55 	{ PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
56 	{ PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
57 	{ 0, }
58 };
59 
60 MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
61 
rtsx_comm_set_ltr_latency(struct rtsx_pcr * pcr,u32 latency)62 static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
63 {
64 	rtsx_pci_write_register(pcr, MSGTXDATA0,
65 				MASK_8_BIT_DEF, (u8) (latency & 0xFF));
66 	rtsx_pci_write_register(pcr, MSGTXDATA1,
67 				MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
68 	rtsx_pci_write_register(pcr, MSGTXDATA2,
69 				MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
70 	rtsx_pci_write_register(pcr, MSGTXDATA3,
71 				MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
72 	rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
73 		LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
74 
75 	return 0;
76 }
77 
rtsx_set_ltr_latency(struct rtsx_pcr * pcr,u32 latency)78 int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
79 {
80 	return rtsx_comm_set_ltr_latency(pcr, latency);
81 }
82 
rtsx_comm_set_aspm(struct rtsx_pcr * pcr,bool enable)83 static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
84 {
85 	if (pcr->aspm_enabled == enable)
86 		return;
87 
88 	if (pcr->aspm_en & 0x02)
89 		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
90 			FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
91 	else
92 		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
93 			FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
94 
95 	if (!enable && (pcr->aspm_en & 0x02))
96 		mdelay(10);
97 
98 	pcr->aspm_enabled = enable;
99 }
100 
rtsx_disable_aspm(struct rtsx_pcr * pcr)101 static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
102 {
103 	if (pcr->ops->set_aspm)
104 		pcr->ops->set_aspm(pcr, false);
105 	else
106 		rtsx_comm_set_aspm(pcr, false);
107 }
108 
rtsx_set_l1off_sub(struct rtsx_pcr * pcr,u8 val)109 int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
110 {
111 	rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
112 
113 	return 0;
114 }
115 
rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr * pcr,int active)116 static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
117 {
118 	if (pcr->ops->set_l1off_cfg_sub_d0)
119 		pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
120 }
121 
rtsx_comm_pm_full_on(struct rtsx_pcr * pcr)122 static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
123 {
124 	struct rtsx_cr_option *option = &pcr->option;
125 
126 	rtsx_disable_aspm(pcr);
127 
128 	/* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */
129 	msleep(1);
130 
131 	if (option->ltr_enabled)
132 		rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
133 
134 	if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
135 		rtsx_set_l1off_sub_cfg_d0(pcr, 1);
136 }
137 
rtsx_pm_full_on(struct rtsx_pcr * pcr)138 static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
139 {
140 	rtsx_comm_pm_full_on(pcr);
141 }
142 
rtsx_pci_start_run(struct rtsx_pcr * pcr)143 void rtsx_pci_start_run(struct rtsx_pcr *pcr)
144 {
145 	/* If pci device removed, don't queue idle work any more */
146 	if (pcr->remove_pci)
147 		return;
148 
149 	if (pcr->rtd3_en)
150 		if (pcr->is_runtime_suspended) {
151 			pm_runtime_get(&(pcr->pci->dev));
152 			pcr->is_runtime_suspended = false;
153 		}
154 
155 	if (pcr->state != PDEV_STAT_RUN) {
156 		pcr->state = PDEV_STAT_RUN;
157 		if (pcr->ops->enable_auto_blink)
158 			pcr->ops->enable_auto_blink(pcr);
159 		rtsx_pm_full_on(pcr);
160 	}
161 
162 	mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
163 }
164 EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
165 
rtsx_pci_write_register(struct rtsx_pcr * pcr,u16 addr,u8 mask,u8 data)166 int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
167 {
168 	int i;
169 	u32 val = HAIMR_WRITE_START;
170 
171 	val |= (u32)(addr & 0x3FFF) << 16;
172 	val |= (u32)mask << 8;
173 	val |= (u32)data;
174 
175 	rtsx_pci_writel(pcr, RTSX_HAIMR, val);
176 
177 	for (i = 0; i < MAX_RW_REG_CNT; i++) {
178 		val = rtsx_pci_readl(pcr, RTSX_HAIMR);
179 		if ((val & HAIMR_TRANS_END) == 0) {
180 			if (data != (u8)val)
181 				return -EIO;
182 			return 0;
183 		}
184 	}
185 
186 	return -ETIMEDOUT;
187 }
188 EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
189 
rtsx_pci_read_register(struct rtsx_pcr * pcr,u16 addr,u8 * data)190 int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
191 {
192 	u32 val = HAIMR_READ_START;
193 	int i;
194 
195 	val |= (u32)(addr & 0x3FFF) << 16;
196 	rtsx_pci_writel(pcr, RTSX_HAIMR, val);
197 
198 	for (i = 0; i < MAX_RW_REG_CNT; i++) {
199 		val = rtsx_pci_readl(pcr, RTSX_HAIMR);
200 		if ((val & HAIMR_TRANS_END) == 0)
201 			break;
202 	}
203 
204 	if (i >= MAX_RW_REG_CNT)
205 		return -ETIMEDOUT;
206 
207 	if (data)
208 		*data = (u8)(val & 0xFF);
209 
210 	return 0;
211 }
212 EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
213 
__rtsx_pci_write_phy_register(struct rtsx_pcr * pcr,u8 addr,u16 val)214 int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
215 {
216 	int err, i, finished = 0;
217 	u8 tmp;
218 
219 	rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
220 	rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
221 	rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
222 	rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
223 
224 	for (i = 0; i < 100000; i++) {
225 		err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
226 		if (err < 0)
227 			return err;
228 
229 		if (!(tmp & 0x80)) {
230 			finished = 1;
231 			break;
232 		}
233 	}
234 
235 	if (!finished)
236 		return -ETIMEDOUT;
237 
238 	return 0;
239 }
240 
rtsx_pci_write_phy_register(struct rtsx_pcr * pcr,u8 addr,u16 val)241 int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
242 {
243 	if (pcr->ops->write_phy)
244 		return pcr->ops->write_phy(pcr, addr, val);
245 
246 	return __rtsx_pci_write_phy_register(pcr, addr, val);
247 }
248 EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
249 
__rtsx_pci_read_phy_register(struct rtsx_pcr * pcr,u8 addr,u16 * val)250 int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
251 {
252 	int err, i, finished = 0;
253 	u16 data;
254 	u8 tmp, val1, val2;
255 
256 	rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
257 	rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
258 
259 	for (i = 0; i < 100000; i++) {
260 		err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
261 		if (err < 0)
262 			return err;
263 
264 		if (!(tmp & 0x80)) {
265 			finished = 1;
266 			break;
267 		}
268 	}
269 
270 	if (!finished)
271 		return -ETIMEDOUT;
272 
273 	rtsx_pci_read_register(pcr, PHYDATA0, &val1);
274 	rtsx_pci_read_register(pcr, PHYDATA1, &val2);
275 	data = val1 | (val2 << 8);
276 
277 	if (val)
278 		*val = data;
279 
280 	return 0;
281 }
282 
rtsx_pci_read_phy_register(struct rtsx_pcr * pcr,u8 addr,u16 * val)283 int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
284 {
285 	if (pcr->ops->read_phy)
286 		return pcr->ops->read_phy(pcr, addr, val);
287 
288 	return __rtsx_pci_read_phy_register(pcr, addr, val);
289 }
290 EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
291 
rtsx_pci_stop_cmd(struct rtsx_pcr * pcr)292 void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
293 {
294 	if (pcr->ops->stop_cmd)
295 		return pcr->ops->stop_cmd(pcr);
296 
297 	rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
298 	rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
299 
300 	rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
301 	rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
302 }
303 EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
304 
rtsx_pci_add_cmd(struct rtsx_pcr * pcr,u8 cmd_type,u16 reg_addr,u8 mask,u8 data)305 void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
306 		u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
307 {
308 	unsigned long flags;
309 	u32 val = 0;
310 	u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
311 
312 	val |= (u32)(cmd_type & 0x03) << 30;
313 	val |= (u32)(reg_addr & 0x3FFF) << 16;
314 	val |= (u32)mask << 8;
315 	val |= (u32)data;
316 
317 	spin_lock_irqsave(&pcr->lock, flags);
318 	ptr += pcr->ci;
319 	if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
320 		put_unaligned_le32(val, ptr);
321 		ptr++;
322 		pcr->ci++;
323 	}
324 	spin_unlock_irqrestore(&pcr->lock, flags);
325 }
326 EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
327 
rtsx_pci_send_cmd_no_wait(struct rtsx_pcr * pcr)328 void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
329 {
330 	u32 val = 1 << 31;
331 
332 	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
333 
334 	val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
335 	/* Hardware Auto Response */
336 	val |= 0x40000000;
337 	rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
338 }
339 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
340 
rtsx_pci_send_cmd(struct rtsx_pcr * pcr,int timeout)341 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
342 {
343 	struct completion trans_done;
344 	u32 val = 1 << 31;
345 	long timeleft;
346 	unsigned long flags;
347 	int err = 0;
348 
349 	spin_lock_irqsave(&pcr->lock, flags);
350 
351 	/* set up data structures for the wakeup system */
352 	pcr->done = &trans_done;
353 	pcr->trans_result = TRANS_NOT_READY;
354 	init_completion(&trans_done);
355 
356 	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
357 
358 	val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
359 	/* Hardware Auto Response */
360 	val |= 0x40000000;
361 	rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
362 
363 	spin_unlock_irqrestore(&pcr->lock, flags);
364 
365 	/* Wait for TRANS_OK_INT */
366 	timeleft = wait_for_completion_interruptible_timeout(
367 			&trans_done, msecs_to_jiffies(timeout));
368 	if (timeleft <= 0) {
369 		pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
370 		err = -ETIMEDOUT;
371 		goto finish_send_cmd;
372 	}
373 
374 	spin_lock_irqsave(&pcr->lock, flags);
375 	if (pcr->trans_result == TRANS_RESULT_FAIL)
376 		err = -EINVAL;
377 	else if (pcr->trans_result == TRANS_RESULT_OK)
378 		err = 0;
379 	else if (pcr->trans_result == TRANS_NO_DEVICE)
380 		err = -ENODEV;
381 	spin_unlock_irqrestore(&pcr->lock, flags);
382 
383 finish_send_cmd:
384 	spin_lock_irqsave(&pcr->lock, flags);
385 	pcr->done = NULL;
386 	spin_unlock_irqrestore(&pcr->lock, flags);
387 
388 	if ((err < 0) && (err != -ENODEV))
389 		rtsx_pci_stop_cmd(pcr);
390 
391 	if (pcr->finish_me)
392 		complete(pcr->finish_me);
393 
394 	return err;
395 }
396 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
397 
rtsx_pci_add_sg_tbl(struct rtsx_pcr * pcr,dma_addr_t addr,unsigned int len,int end)398 static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
399 		dma_addr_t addr, unsigned int len, int end)
400 {
401 	u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
402 	u64 val;
403 	u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
404 
405 	pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
406 
407 	if (end)
408 		option |= RTSX_SG_END;
409 
410 	if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
411 		if (len > 0xFFFF)
412 			val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
413 				| (((u64)len >> 16) << 6) | option;
414 		else
415 			val = ((u64)addr << 32) | ((u64)len << 16) | option;
416 	} else {
417 		val = ((u64)addr << 32) | ((u64)len << 12) | option;
418 	}
419 	put_unaligned_le64(val, ptr);
420 	pcr->sgi++;
421 }
422 
rtsx_pci_transfer_data(struct rtsx_pcr * pcr,struct scatterlist * sglist,int num_sg,bool read,int timeout)423 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
424 		int num_sg, bool read, int timeout)
425 {
426 	int err = 0, count;
427 
428 	pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
429 	count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
430 	if (count < 1)
431 		return -EINVAL;
432 	pcr_dbg(pcr, "DMA mapping count: %d\n", count);
433 
434 	err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
435 
436 	rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
437 
438 	return err;
439 }
440 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
441 
rtsx_pci_dma_map_sg(struct rtsx_pcr * pcr,struct scatterlist * sglist,int num_sg,bool read)442 int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
443 		int num_sg, bool read)
444 {
445 	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
446 
447 	if (pcr->remove_pci)
448 		return -EINVAL;
449 
450 	if ((sglist == NULL) || (num_sg <= 0))
451 		return -EINVAL;
452 
453 	return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
454 }
455 EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
456 
rtsx_pci_dma_unmap_sg(struct rtsx_pcr * pcr,struct scatterlist * sglist,int num_sg,bool read)457 void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
458 		int num_sg, bool read)
459 {
460 	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
461 
462 	dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
463 }
464 EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
465 
rtsx_pci_dma_transfer(struct rtsx_pcr * pcr,struct scatterlist * sglist,int count,bool read,int timeout)466 int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
467 		int count, bool read, int timeout)
468 {
469 	struct completion trans_done;
470 	struct scatterlist *sg;
471 	dma_addr_t addr;
472 	long timeleft;
473 	unsigned long flags;
474 	unsigned int len;
475 	int i, err = 0;
476 	u32 val;
477 	u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
478 
479 	if (pcr->remove_pci)
480 		return -ENODEV;
481 
482 	if ((sglist == NULL) || (count < 1))
483 		return -EINVAL;
484 
485 	val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
486 	pcr->sgi = 0;
487 	for_each_sg(sglist, sg, count, i) {
488 		addr = sg_dma_address(sg);
489 		len = sg_dma_len(sg);
490 		rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
491 	}
492 
493 	spin_lock_irqsave(&pcr->lock, flags);
494 
495 	pcr->done = &trans_done;
496 	pcr->trans_result = TRANS_NOT_READY;
497 	init_completion(&trans_done);
498 	rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
499 	rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
500 
501 	spin_unlock_irqrestore(&pcr->lock, flags);
502 
503 	timeleft = wait_for_completion_interruptible_timeout(
504 			&trans_done, msecs_to_jiffies(timeout));
505 	if (timeleft <= 0) {
506 		pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
507 		err = -ETIMEDOUT;
508 		goto out;
509 	}
510 
511 	spin_lock_irqsave(&pcr->lock, flags);
512 	if (pcr->trans_result == TRANS_RESULT_FAIL) {
513 		err = -EILSEQ;
514 		if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
515 			pcr->dma_error_count++;
516 	}
517 
518 	else if (pcr->trans_result == TRANS_NO_DEVICE)
519 		err = -ENODEV;
520 	spin_unlock_irqrestore(&pcr->lock, flags);
521 
522 out:
523 	spin_lock_irqsave(&pcr->lock, flags);
524 	pcr->done = NULL;
525 	spin_unlock_irqrestore(&pcr->lock, flags);
526 
527 	if ((err < 0) && (err != -ENODEV))
528 		rtsx_pci_stop_cmd(pcr);
529 
530 	if (pcr->finish_me)
531 		complete(pcr->finish_me);
532 
533 	return err;
534 }
535 EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
536 
rtsx_pci_read_ppbuf(struct rtsx_pcr * pcr,u8 * buf,int buf_len)537 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
538 {
539 	int err;
540 	int i, j;
541 	u16 reg;
542 	u8 *ptr;
543 
544 	if (buf_len > 512)
545 		buf_len = 512;
546 
547 	ptr = buf;
548 	reg = PPBUF_BASE2;
549 	for (i = 0; i < buf_len / 256; i++) {
550 		rtsx_pci_init_cmd(pcr);
551 
552 		for (j = 0; j < 256; j++)
553 			rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
554 
555 		err = rtsx_pci_send_cmd(pcr, 250);
556 		if (err < 0)
557 			return err;
558 
559 		memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
560 		ptr += 256;
561 	}
562 
563 	if (buf_len % 256) {
564 		rtsx_pci_init_cmd(pcr);
565 
566 		for (j = 0; j < buf_len % 256; j++)
567 			rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
568 
569 		err = rtsx_pci_send_cmd(pcr, 250);
570 		if (err < 0)
571 			return err;
572 	}
573 
574 	memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
575 
576 	return 0;
577 }
578 EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
579 
rtsx_pci_write_ppbuf(struct rtsx_pcr * pcr,u8 * buf,int buf_len)580 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
581 {
582 	int err;
583 	int i, j;
584 	u16 reg;
585 	u8 *ptr;
586 
587 	if (buf_len > 512)
588 		buf_len = 512;
589 
590 	ptr = buf;
591 	reg = PPBUF_BASE2;
592 	for (i = 0; i < buf_len / 256; i++) {
593 		rtsx_pci_init_cmd(pcr);
594 
595 		for (j = 0; j < 256; j++) {
596 			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
597 					reg++, 0xFF, *ptr);
598 			ptr++;
599 		}
600 
601 		err = rtsx_pci_send_cmd(pcr, 250);
602 		if (err < 0)
603 			return err;
604 	}
605 
606 	if (buf_len % 256) {
607 		rtsx_pci_init_cmd(pcr);
608 
609 		for (j = 0; j < buf_len % 256; j++) {
610 			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
611 					reg++, 0xFF, *ptr);
612 			ptr++;
613 		}
614 
615 		err = rtsx_pci_send_cmd(pcr, 250);
616 		if (err < 0)
617 			return err;
618 	}
619 
620 	return 0;
621 }
622 EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
623 
rtsx_pci_set_pull_ctl(struct rtsx_pcr * pcr,const u32 * tbl)624 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
625 {
626 	rtsx_pci_init_cmd(pcr);
627 
628 	while (*tbl & 0xFFFF0000) {
629 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
630 				(u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
631 		tbl++;
632 	}
633 
634 	return rtsx_pci_send_cmd(pcr, 100);
635 }
636 
rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr * pcr,int card)637 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
638 {
639 	const u32 *tbl;
640 
641 	if (card == RTSX_SD_CARD)
642 		tbl = pcr->sd_pull_ctl_enable_tbl;
643 	else if (card == RTSX_MS_CARD)
644 		tbl = pcr->ms_pull_ctl_enable_tbl;
645 	else
646 		return -EINVAL;
647 
648 	return rtsx_pci_set_pull_ctl(pcr, tbl);
649 }
650 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
651 
rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr * pcr,int card)652 int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
653 {
654 	const u32 *tbl;
655 
656 	if (card == RTSX_SD_CARD)
657 		tbl = pcr->sd_pull_ctl_disable_tbl;
658 	else if (card == RTSX_MS_CARD)
659 		tbl = pcr->ms_pull_ctl_disable_tbl;
660 	else
661 		return -EINVAL;
662 
663 	return rtsx_pci_set_pull_ctl(pcr, tbl);
664 }
665 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
666 
rtsx_pci_enable_bus_int(struct rtsx_pcr * pcr)667 static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
668 {
669 	struct rtsx_hw_param *hw_param = &pcr->hw_param;
670 
671 	pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
672 		| hw_param->interrupt_en;
673 
674 	if (pcr->num_slots > 1)
675 		pcr->bier |= MS_INT_EN;
676 
677 	/* Enable Bus Interrupt */
678 	rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
679 
680 	pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
681 }
682 
double_ssc_depth(u8 depth)683 static inline u8 double_ssc_depth(u8 depth)
684 {
685 	return ((depth > 1) ? (depth - 1) : depth);
686 }
687 
revise_ssc_depth(u8 ssc_depth,u8 div)688 static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
689 {
690 	if (div > CLK_DIV_1) {
691 		if (ssc_depth > (div - 1))
692 			ssc_depth -= (div - 1);
693 		else
694 			ssc_depth = SSC_DEPTH_4M;
695 	}
696 
697 	return ssc_depth;
698 }
699 
rtsx_pci_switch_clock(struct rtsx_pcr * pcr,unsigned int card_clock,u8 ssc_depth,bool initial_mode,bool double_clk,bool vpclk)700 int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
701 		u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
702 {
703 	int err, clk;
704 	u8 n, clk_divider, mcu_cnt, div;
705 	static const u8 depth[] = {
706 		[RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
707 		[RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
708 		[RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
709 		[RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
710 		[RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
711 	};
712 
713 	if (PCI_PID(pcr) == PID_5261)
714 		return rts5261_pci_switch_clock(pcr, card_clock,
715 				ssc_depth, initial_mode, double_clk, vpclk);
716 	if (PCI_PID(pcr) == PID_5228)
717 		return rts5228_pci_switch_clock(pcr, card_clock,
718 				ssc_depth, initial_mode, double_clk, vpclk);
719 
720 	if (initial_mode) {
721 		/* We use 250k(around) here, in initial stage */
722 		clk_divider = SD_CLK_DIVIDE_128;
723 		card_clock = 30000000;
724 	} else {
725 		clk_divider = SD_CLK_DIVIDE_0;
726 	}
727 	err = rtsx_pci_write_register(pcr, SD_CFG1,
728 			SD_CLK_DIVIDE_MASK, clk_divider);
729 	if (err < 0)
730 		return err;
731 
732 	/* Reduce card clock by 20MHz each time a DMA transfer error occurs */
733 	if (card_clock == UHS_SDR104_MAX_DTR &&
734 	    pcr->dma_error_count &&
735 	    PCI_PID(pcr) == RTS5227_DEVICE_ID)
736 		card_clock = UHS_SDR104_MAX_DTR -
737 			(pcr->dma_error_count * 20000000);
738 
739 	card_clock /= 1000000;
740 	pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
741 
742 	clk = card_clock;
743 	if (!initial_mode && double_clk)
744 		clk = card_clock * 2;
745 	pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
746 		clk, pcr->cur_clock);
747 
748 	if (clk == pcr->cur_clock)
749 		return 0;
750 
751 	if (pcr->ops->conv_clk_and_div_n)
752 		n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
753 	else
754 		n = (u8)(clk - 2);
755 	if ((clk <= 2) || (n > MAX_DIV_N_PCR))
756 		return -EINVAL;
757 
758 	mcu_cnt = (u8)(125/clk + 3);
759 	if (mcu_cnt > 15)
760 		mcu_cnt = 15;
761 
762 	/* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
763 	div = CLK_DIV_1;
764 	while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
765 		if (pcr->ops->conv_clk_and_div_n) {
766 			int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
767 					DIV_N_TO_CLK) * 2;
768 			n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
769 					CLK_TO_DIV_N);
770 		} else {
771 			n = (n + 2) * 2 - 2;
772 		}
773 		div++;
774 	}
775 	pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
776 
777 	ssc_depth = depth[ssc_depth];
778 	if (double_clk)
779 		ssc_depth = double_ssc_depth(ssc_depth);
780 
781 	ssc_depth = revise_ssc_depth(ssc_depth, div);
782 	pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
783 
784 	rtsx_pci_init_cmd(pcr);
785 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
786 			CLK_LOW_FREQ, CLK_LOW_FREQ);
787 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
788 			0xFF, (div << 4) | mcu_cnt);
789 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
790 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
791 			SSC_DEPTH_MASK, ssc_depth);
792 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
793 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
794 	if (vpclk) {
795 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
796 				PHASE_NOT_RESET, 0);
797 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
798 				PHASE_NOT_RESET, PHASE_NOT_RESET);
799 	}
800 
801 	err = rtsx_pci_send_cmd(pcr, 2000);
802 	if (err < 0)
803 		return err;
804 
805 	/* Wait SSC clock stable */
806 	udelay(SSC_CLOCK_STABLE_WAIT);
807 	err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
808 	if (err < 0)
809 		return err;
810 
811 	pcr->cur_clock = clk;
812 	return 0;
813 }
814 EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
815 
rtsx_pci_card_power_on(struct rtsx_pcr * pcr,int card)816 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
817 {
818 	if (pcr->ops->card_power_on)
819 		return pcr->ops->card_power_on(pcr, card);
820 
821 	return 0;
822 }
823 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
824 
rtsx_pci_card_power_off(struct rtsx_pcr * pcr,int card)825 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
826 {
827 	if (pcr->ops->card_power_off)
828 		return pcr->ops->card_power_off(pcr, card);
829 
830 	return 0;
831 }
832 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
833 
rtsx_pci_card_exclusive_check(struct rtsx_pcr * pcr,int card)834 int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
835 {
836 	static const unsigned int cd_mask[] = {
837 		[RTSX_SD_CARD] = SD_EXIST,
838 		[RTSX_MS_CARD] = MS_EXIST
839 	};
840 
841 	if (!(pcr->flags & PCR_MS_PMOS)) {
842 		/* When using single PMOS, accessing card is not permitted
843 		 * if the existing card is not the designated one.
844 		 */
845 		if (pcr->card_exist & (~cd_mask[card]))
846 			return -EIO;
847 	}
848 
849 	return 0;
850 }
851 EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
852 
rtsx_pci_switch_output_voltage(struct rtsx_pcr * pcr,u8 voltage)853 int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
854 {
855 	if (pcr->ops->switch_output_voltage)
856 		return pcr->ops->switch_output_voltage(pcr, voltage);
857 
858 	return 0;
859 }
860 EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
861 
rtsx_pci_card_exist(struct rtsx_pcr * pcr)862 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
863 {
864 	unsigned int val;
865 
866 	val = rtsx_pci_readl(pcr, RTSX_BIPR);
867 	if (pcr->ops->cd_deglitch)
868 		val = pcr->ops->cd_deglitch(pcr);
869 
870 	return val;
871 }
872 EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
873 
rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr * pcr)874 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
875 {
876 	struct completion finish;
877 
878 	pcr->finish_me = &finish;
879 	init_completion(&finish);
880 
881 	if (pcr->done)
882 		complete(pcr->done);
883 
884 	if (!pcr->remove_pci)
885 		rtsx_pci_stop_cmd(pcr);
886 
887 	wait_for_completion_interruptible_timeout(&finish,
888 			msecs_to_jiffies(2));
889 	pcr->finish_me = NULL;
890 }
891 EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
892 
rtsx_pci_card_detect(struct work_struct * work)893 static void rtsx_pci_card_detect(struct work_struct *work)
894 {
895 	struct delayed_work *dwork;
896 	struct rtsx_pcr *pcr;
897 	unsigned long flags;
898 	unsigned int card_detect = 0, card_inserted, card_removed;
899 	u32 irq_status;
900 
901 	dwork = to_delayed_work(work);
902 	pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
903 
904 	pcr_dbg(pcr, "--> %s\n", __func__);
905 
906 	mutex_lock(&pcr->pcr_mutex);
907 	spin_lock_irqsave(&pcr->lock, flags);
908 
909 	irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
910 	pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
911 
912 	irq_status &= CARD_EXIST;
913 	card_inserted = pcr->card_inserted & irq_status;
914 	card_removed = pcr->card_removed;
915 	pcr->card_inserted = 0;
916 	pcr->card_removed = 0;
917 
918 	spin_unlock_irqrestore(&pcr->lock, flags);
919 
920 	if (card_inserted || card_removed) {
921 		pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
922 			card_inserted, card_removed);
923 
924 		if (pcr->ops->cd_deglitch)
925 			card_inserted = pcr->ops->cd_deglitch(pcr);
926 
927 		card_detect = card_inserted | card_removed;
928 
929 		pcr->card_exist |= card_inserted;
930 		pcr->card_exist &= ~card_removed;
931 	}
932 
933 	mutex_unlock(&pcr->pcr_mutex);
934 
935 	if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
936 		pcr->slots[RTSX_SD_CARD].card_event(
937 				pcr->slots[RTSX_SD_CARD].p_dev);
938 	if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
939 		pcr->slots[RTSX_MS_CARD].card_event(
940 				pcr->slots[RTSX_MS_CARD].p_dev);
941 }
942 
rtsx_pci_process_ocp(struct rtsx_pcr * pcr)943 static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
944 {
945 	if (pcr->ops->process_ocp) {
946 		pcr->ops->process_ocp(pcr);
947 	} else {
948 		if (!pcr->option.ocp_en)
949 			return;
950 		rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
951 		if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
952 			rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
953 			rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
954 			rtsx_pci_clear_ocpstat(pcr);
955 			pcr->ocp_stat = 0;
956 		}
957 	}
958 }
959 
rtsx_pci_process_ocp_interrupt(struct rtsx_pcr * pcr)960 static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
961 {
962 	if (pcr->option.ocp_en)
963 		rtsx_pci_process_ocp(pcr);
964 
965 	return 0;
966 }
967 
rtsx_pci_isr(int irq,void * dev_id)968 static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
969 {
970 	struct rtsx_pcr *pcr = dev_id;
971 	u32 int_reg;
972 
973 	if (!pcr)
974 		return IRQ_NONE;
975 
976 	spin_lock(&pcr->lock);
977 
978 	int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
979 	/* Clear interrupt flag */
980 	rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
981 	if ((int_reg & pcr->bier) == 0) {
982 		spin_unlock(&pcr->lock);
983 		return IRQ_NONE;
984 	}
985 	if (int_reg == 0xFFFFFFFF) {
986 		spin_unlock(&pcr->lock);
987 		return IRQ_HANDLED;
988 	}
989 
990 	int_reg &= (pcr->bier | 0x7FFFFF);
991 
992 	if (int_reg & SD_OC_INT)
993 		rtsx_pci_process_ocp_interrupt(pcr);
994 
995 	if (int_reg & SD_INT) {
996 		if (int_reg & SD_EXIST) {
997 			pcr->card_inserted |= SD_EXIST;
998 		} else {
999 			pcr->card_removed |= SD_EXIST;
1000 			pcr->card_inserted &= ~SD_EXIST;
1001 			if (PCI_PID(pcr) == PID_5261) {
1002 				rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
1003 					RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
1004 				pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
1005 			}
1006 		}
1007 		pcr->dma_error_count = 0;
1008 	}
1009 
1010 	if (int_reg & MS_INT) {
1011 		if (int_reg & MS_EXIST) {
1012 			pcr->card_inserted |= MS_EXIST;
1013 		} else {
1014 			pcr->card_removed |= MS_EXIST;
1015 			pcr->card_inserted &= ~MS_EXIST;
1016 		}
1017 	}
1018 
1019 	if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1020 		if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1021 			pcr->trans_result = TRANS_RESULT_FAIL;
1022 			if (pcr->done)
1023 				complete(pcr->done);
1024 		} else if (int_reg & TRANS_OK_INT) {
1025 			pcr->trans_result = TRANS_RESULT_OK;
1026 			if (pcr->done)
1027 				complete(pcr->done);
1028 		}
1029 	}
1030 
1031 	if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
1032 		schedule_delayed_work(&pcr->carddet_work,
1033 				msecs_to_jiffies(200));
1034 
1035 	spin_unlock(&pcr->lock);
1036 	return IRQ_HANDLED;
1037 }
1038 
rtsx_pci_acquire_irq(struct rtsx_pcr * pcr)1039 static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1040 {
1041 	pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1042 			__func__, pcr->msi_en, pcr->pci->irq);
1043 
1044 	if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1045 			pcr->msi_en ? 0 : IRQF_SHARED,
1046 			DRV_NAME_RTSX_PCI, pcr)) {
1047 		dev_err(&(pcr->pci->dev),
1048 			"rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1049 			pcr->pci->irq);
1050 		return -1;
1051 	}
1052 
1053 	pcr->irq = pcr->pci->irq;
1054 	pci_intx(pcr->pci, !pcr->msi_en);
1055 
1056 	return 0;
1057 }
1058 
rtsx_enable_aspm(struct rtsx_pcr * pcr)1059 static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1060 {
1061 	if (pcr->ops->set_aspm)
1062 		pcr->ops->set_aspm(pcr, true);
1063 	else
1064 		rtsx_comm_set_aspm(pcr, true);
1065 }
1066 
rtsx_comm_pm_power_saving(struct rtsx_pcr * pcr)1067 static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1068 {
1069 	struct rtsx_cr_option *option = &pcr->option;
1070 
1071 	if (option->ltr_enabled) {
1072 		u32 latency = option->ltr_l1off_latency;
1073 
1074 		if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1075 			mdelay(option->l1_snooze_delay);
1076 
1077 		rtsx_set_ltr_latency(pcr, latency);
1078 	}
1079 
1080 	if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1081 		rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1082 
1083 	rtsx_enable_aspm(pcr);
1084 }
1085 
rtsx_pm_power_saving(struct rtsx_pcr * pcr)1086 static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1087 {
1088 	rtsx_comm_pm_power_saving(pcr);
1089 }
1090 
rtsx_pci_rtd3_work(struct work_struct * work)1091 static void rtsx_pci_rtd3_work(struct work_struct *work)
1092 {
1093 	struct delayed_work *dwork = to_delayed_work(work);
1094 	struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, rtd3_work);
1095 
1096 	pcr_dbg(pcr, "--> %s\n", __func__);
1097 	if (!pcr->is_runtime_suspended)
1098 		pm_runtime_put(&(pcr->pci->dev));
1099 }
1100 
rtsx_pci_idle_work(struct work_struct * work)1101 static void rtsx_pci_idle_work(struct work_struct *work)
1102 {
1103 	struct delayed_work *dwork = to_delayed_work(work);
1104 	struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
1105 
1106 	pcr_dbg(pcr, "--> %s\n", __func__);
1107 
1108 	mutex_lock(&pcr->pcr_mutex);
1109 
1110 	pcr->state = PDEV_STAT_IDLE;
1111 
1112 	if (pcr->ops->disable_auto_blink)
1113 		pcr->ops->disable_auto_blink(pcr);
1114 	if (pcr->ops->turn_off_led)
1115 		pcr->ops->turn_off_led(pcr);
1116 
1117 	rtsx_pm_power_saving(pcr);
1118 
1119 	mutex_unlock(&pcr->pcr_mutex);
1120 
1121 	if (pcr->rtd3_en)
1122 		mod_delayed_work(system_wq, &pcr->rtd3_work, msecs_to_jiffies(10000));
1123 }
1124 
rtsx_base_force_power_down(struct rtsx_pcr * pcr,u8 pm_state)1125 static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
1126 {
1127 	/* Set relink_time to 0 */
1128 	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
1129 	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
1130 	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
1131 			RELINK_TIME_MASK, 0);
1132 
1133 	rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
1134 			D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
1135 
1136 	rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
1137 }
1138 
rtsx_pci_power_off(struct rtsx_pcr * pcr,u8 pm_state)1139 static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
1140 {
1141 	if (pcr->ops->turn_off_led)
1142 		pcr->ops->turn_off_led(pcr);
1143 
1144 	rtsx_pci_writel(pcr, RTSX_BIER, 0);
1145 	pcr->bier = 0;
1146 
1147 	rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1148 	rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1149 
1150 	if (pcr->ops->force_power_down)
1151 		pcr->ops->force_power_down(pcr, pm_state);
1152 	else
1153 		rtsx_base_force_power_down(pcr, pm_state);
1154 }
1155 
rtsx_pci_enable_ocp(struct rtsx_pcr * pcr)1156 void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1157 {
1158 	u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1159 
1160 	if (pcr->ops->enable_ocp) {
1161 		pcr->ops->enable_ocp(pcr);
1162 	} else {
1163 		rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1164 		rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1165 	}
1166 
1167 }
1168 
rtsx_pci_disable_ocp(struct rtsx_pcr * pcr)1169 void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1170 {
1171 	u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1172 
1173 	if (pcr->ops->disable_ocp) {
1174 		pcr->ops->disable_ocp(pcr);
1175 	} else {
1176 		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1177 		rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1178 				OC_POWER_DOWN);
1179 	}
1180 }
1181 
rtsx_pci_init_ocp(struct rtsx_pcr * pcr)1182 void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1183 {
1184 	if (pcr->ops->init_ocp) {
1185 		pcr->ops->init_ocp(pcr);
1186 	} else {
1187 		struct rtsx_cr_option *option = &(pcr->option);
1188 
1189 		if (option->ocp_en) {
1190 			u8 val = option->sd_800mA_ocp_thd;
1191 
1192 			rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1193 			rtsx_pci_write_register(pcr, REG_OCPPARA1,
1194 				SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1195 			rtsx_pci_write_register(pcr, REG_OCPPARA2,
1196 				SD_OCP_THD_MASK, val);
1197 			rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1198 				SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1199 			rtsx_pci_enable_ocp(pcr);
1200 		}
1201 	}
1202 }
1203 
rtsx_pci_get_ocpstat(struct rtsx_pcr * pcr,u8 * val)1204 int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1205 {
1206 	if (pcr->ops->get_ocpstat)
1207 		return pcr->ops->get_ocpstat(pcr, val);
1208 	else
1209 		return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1210 }
1211 
rtsx_pci_clear_ocpstat(struct rtsx_pcr * pcr)1212 void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1213 {
1214 	if (pcr->ops->clear_ocpstat) {
1215 		pcr->ops->clear_ocpstat(pcr);
1216 	} else {
1217 		u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1218 		u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1219 
1220 		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1221 		udelay(100);
1222 		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1223 	}
1224 }
1225 
rtsx_pci_enable_oobs_polling(struct rtsx_pcr * pcr)1226 void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
1227 {
1228 	u16 val;
1229 
1230 	if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
1231 		rtsx_pci_read_phy_register(pcr, 0x01, &val);
1232 		val |= 1<<9;
1233 		rtsx_pci_write_phy_register(pcr, 0x01, val);
1234 	}
1235 	rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
1236 	rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
1237 	rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
1238 	rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
1239 
1240 }
1241 
rtsx_pci_disable_oobs_polling(struct rtsx_pcr * pcr)1242 void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
1243 {
1244 	u16 val;
1245 
1246 	if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
1247 		rtsx_pci_read_phy_register(pcr, 0x01, &val);
1248 		val &= ~(1<<9);
1249 		rtsx_pci_write_phy_register(pcr, 0x01, val);
1250 	}
1251 	rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
1252 	rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
1253 
1254 }
1255 
rtsx_sd_power_off_card3v3(struct rtsx_pcr * pcr)1256 int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1257 {
1258 	rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1259 		MS_CLK_EN | SD40_CLK_EN, 0);
1260 	rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
1261 	rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1262 
1263 	msleep(50);
1264 
1265 	rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
1266 
1267 	return 0;
1268 }
1269 
rtsx_ms_power_off_card3v3(struct rtsx_pcr * pcr)1270 int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
1271 {
1272 	rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1273 		MS_CLK_EN | SD40_CLK_EN, 0);
1274 
1275 	rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
1276 
1277 	rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
1278 	rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
1279 
1280 	return 0;
1281 }
1282 
rtsx_pci_init_hw(struct rtsx_pcr * pcr)1283 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1284 {
1285 	struct pci_dev *pdev = pcr->pci;
1286 	int err;
1287 
1288 	if (PCI_PID(pcr) == PID_5228)
1289 		rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
1290 				RTS5228_LDO1_SR_0_5);
1291 
1292 	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1293 
1294 	rtsx_pci_enable_bus_int(pcr);
1295 
1296 	/* Power on SSC */
1297 	if (PCI_PID(pcr) == PID_5261) {
1298 		/* Gating real mcu clock */
1299 		err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
1300 			RTS5261_MCU_CLOCK_GATING, 0);
1301 		err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
1302 			SSC_POWER_DOWN, 0);
1303 	} else {
1304 		err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1305 	}
1306 	if (err < 0)
1307 		return err;
1308 
1309 	/* Wait SSC power stable */
1310 	udelay(200);
1311 
1312 	rtsx_disable_aspm(pcr);
1313 	if (pcr->ops->optimize_phy) {
1314 		err = pcr->ops->optimize_phy(pcr);
1315 		if (err < 0)
1316 			return err;
1317 	}
1318 
1319 	rtsx_pci_init_cmd(pcr);
1320 
1321 	/* Set mcu_cnt to 7 to ensure data can be sampled properly */
1322 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1323 
1324 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1325 	/* Disable card clock */
1326 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1327 	/* Reset delink mode */
1328 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1329 	/* Card driving select */
1330 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1331 			0xFF, pcr->card_drive_sel);
1332 	/* Enable SSC Clock */
1333 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1334 			0xFF, SSC_8X_EN | SSC_SEL_4M);
1335 	if (PCI_PID(pcr) == PID_5261)
1336 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1337 			RTS5261_SSC_DEPTH_2M);
1338 	else if (PCI_PID(pcr) == PID_5228)
1339 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1340 			RTS5228_SSC_DEPTH_2M);
1341 	else
1342 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1343 
1344 	/* Disable cd_pwr_save */
1345 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1346 	/* Clear Link Ready Interrupt */
1347 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1348 			LINK_RDY_INT, LINK_RDY_INT);
1349 	/* Enlarge the estimation window of PERST# glitch
1350 	 * to reduce the chance of invalid card interrupt
1351 	 */
1352 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1353 	/* Update RC oscillator to 400k
1354 	 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1355 	 *                1: 2M  0: 400k
1356 	 */
1357 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1358 	/* Set interrupt write clear
1359 	 * bit 1: U_elbi_if_rd_clr_en
1360 	 *	1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1361 	 *	0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1362 	 */
1363 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1364 
1365 	err = rtsx_pci_send_cmd(pcr, 100);
1366 	if (err < 0)
1367 		return err;
1368 
1369 	switch (PCI_PID(pcr)) {
1370 	case PID_5250:
1371 	case PID_524A:
1372 	case PID_525A:
1373 	case PID_5260:
1374 	case PID_5261:
1375 	case PID_5228:
1376 		rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1377 		break;
1378 	default:
1379 		break;
1380 	}
1381 
1382 	/*init ocp*/
1383 	rtsx_pci_init_ocp(pcr);
1384 
1385 	/* Enable clk_request_n to enable clock power management */
1386 	pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
1387 					0, PCI_EXP_LNKCTL_CLKREQ_EN);
1388 	/* Enter L1 when host tx idle */
1389 	pci_write_config_byte(pdev, 0x70F, 0x5B);
1390 
1391 	if (pcr->ops->extra_init_hw) {
1392 		err = pcr->ops->extra_init_hw(pcr);
1393 		if (err < 0)
1394 			return err;
1395 	}
1396 
1397 	rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
1398 
1399 	/* No CD interrupt if probing driver with card inserted.
1400 	 * So we need to initialize pcr->card_exist here.
1401 	 */
1402 	if (pcr->ops->cd_deglitch)
1403 		pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1404 	else
1405 		pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1406 
1407 	return 0;
1408 }
1409 
rtsx_pci_init_chip(struct rtsx_pcr * pcr)1410 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1411 {
1412 	int err;
1413 
1414 	spin_lock_init(&pcr->lock);
1415 	mutex_init(&pcr->pcr_mutex);
1416 
1417 	switch (PCI_PID(pcr)) {
1418 	default:
1419 	case 0x5209:
1420 		rts5209_init_params(pcr);
1421 		break;
1422 
1423 	case 0x5229:
1424 		rts5229_init_params(pcr);
1425 		break;
1426 
1427 	case 0x5289:
1428 		rtl8411_init_params(pcr);
1429 		break;
1430 
1431 	case 0x5227:
1432 		rts5227_init_params(pcr);
1433 		break;
1434 
1435 	case 0x522A:
1436 		rts522a_init_params(pcr);
1437 		break;
1438 
1439 	case 0x5249:
1440 		rts5249_init_params(pcr);
1441 		break;
1442 
1443 	case 0x524A:
1444 		rts524a_init_params(pcr);
1445 		break;
1446 
1447 	case 0x525A:
1448 		rts525a_init_params(pcr);
1449 		break;
1450 
1451 	case 0x5287:
1452 		rtl8411b_init_params(pcr);
1453 		break;
1454 
1455 	case 0x5286:
1456 		rtl8402_init_params(pcr);
1457 		break;
1458 
1459 	case 0x5260:
1460 		rts5260_init_params(pcr);
1461 		break;
1462 
1463 	case 0x5261:
1464 		rts5261_init_params(pcr);
1465 		break;
1466 
1467 	case 0x5228:
1468 		rts5228_init_params(pcr);
1469 		break;
1470 	}
1471 
1472 	pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1473 			PCI_PID(pcr), pcr->ic_version);
1474 
1475 	pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1476 			GFP_KERNEL);
1477 	if (!pcr->slots)
1478 		return -ENOMEM;
1479 
1480 	if (pcr->ops->fetch_vendor_settings)
1481 		pcr->ops->fetch_vendor_settings(pcr);
1482 
1483 	pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1484 	pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1485 			pcr->sd30_drive_sel_1v8);
1486 	pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1487 			pcr->sd30_drive_sel_3v3);
1488 	pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1489 			pcr->card_drive_sel);
1490 	pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1491 
1492 	pcr->state = PDEV_STAT_IDLE;
1493 	err = rtsx_pci_init_hw(pcr);
1494 	if (err < 0) {
1495 		kfree(pcr->slots);
1496 		return err;
1497 	}
1498 
1499 	return 0;
1500 }
1501 
rtsx_pci_probe(struct pci_dev * pcidev,const struct pci_device_id * id)1502 static int rtsx_pci_probe(struct pci_dev *pcidev,
1503 			  const struct pci_device_id *id)
1504 {
1505 	struct rtsx_pcr *pcr;
1506 	struct pcr_handle *handle;
1507 	u32 base, len;
1508 	int ret, i, bar = 0;
1509 	u8 val;
1510 
1511 	dev_dbg(&(pcidev->dev),
1512 		": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1513 		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1514 		(int)pcidev->revision);
1515 
1516 	ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
1517 	if (ret < 0)
1518 		return ret;
1519 
1520 	ret = pci_enable_device(pcidev);
1521 	if (ret)
1522 		return ret;
1523 
1524 	ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1525 	if (ret)
1526 		goto disable;
1527 
1528 	pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1529 	if (!pcr) {
1530 		ret = -ENOMEM;
1531 		goto release_pci;
1532 	}
1533 
1534 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1535 	if (!handle) {
1536 		ret = -ENOMEM;
1537 		goto free_pcr;
1538 	}
1539 	handle->pcr = pcr;
1540 
1541 	idr_preload(GFP_KERNEL);
1542 	spin_lock(&rtsx_pci_lock);
1543 	ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1544 	if (ret >= 0)
1545 		pcr->id = ret;
1546 	spin_unlock(&rtsx_pci_lock);
1547 	idr_preload_end();
1548 	if (ret < 0)
1549 		goto free_handle;
1550 
1551 	pcr->pci = pcidev;
1552 	dev_set_drvdata(&pcidev->dev, handle);
1553 
1554 	if (CHK_PCI_PID(pcr, 0x525A))
1555 		bar = 1;
1556 	len = pci_resource_len(pcidev, bar);
1557 	base = pci_resource_start(pcidev, bar);
1558 	pcr->remap_addr = ioremap(base, len);
1559 	if (!pcr->remap_addr) {
1560 		ret = -ENOMEM;
1561 		goto free_handle;
1562 	}
1563 
1564 	pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1565 			RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1566 			GFP_KERNEL);
1567 	if (pcr->rtsx_resv_buf == NULL) {
1568 		ret = -ENXIO;
1569 		goto unmap;
1570 	}
1571 	pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1572 	pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1573 	pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1574 	pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1575 	rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
1576 	if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
1577 		pcr->aspm_enabled = false;
1578 	else
1579 		pcr->aspm_enabled = true;
1580 	pcr->card_inserted = 0;
1581 	pcr->card_removed = 0;
1582 	INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1583 	INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
1584 
1585 	pcr->msi_en = msi_en;
1586 	if (pcr->msi_en) {
1587 		ret = pci_enable_msi(pcidev);
1588 		if (ret)
1589 			pcr->msi_en = false;
1590 	}
1591 
1592 	ret = rtsx_pci_acquire_irq(pcr);
1593 	if (ret < 0)
1594 		goto disable_msi;
1595 
1596 	pci_set_master(pcidev);
1597 	synchronize_irq(pcr->irq);
1598 
1599 	ret = rtsx_pci_init_chip(pcr);
1600 	if (ret < 0)
1601 		goto disable_irq;
1602 
1603 	for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1604 		rtsx_pcr_cells[i].platform_data = handle;
1605 		rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1606 	}
1607 
1608 	if (pcr->rtd3_en) {
1609 		INIT_DELAYED_WORK(&pcr->rtd3_work, rtsx_pci_rtd3_work);
1610 		pm_runtime_allow(&pcidev->dev);
1611 		pm_runtime_enable(&pcidev->dev);
1612 		pcr->is_runtime_suspended = false;
1613 	}
1614 
1615 
1616 	ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1617 			ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1618 	if (ret < 0)
1619 		goto free_slots;
1620 
1621 	schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1622 
1623 	return 0;
1624 
1625 free_slots:
1626 	kfree(pcr->slots);
1627 disable_irq:
1628 	free_irq(pcr->irq, (void *)pcr);
1629 disable_msi:
1630 	if (pcr->msi_en)
1631 		pci_disable_msi(pcr->pci);
1632 	dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1633 			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1634 unmap:
1635 	iounmap(pcr->remap_addr);
1636 free_handle:
1637 	kfree(handle);
1638 free_pcr:
1639 	kfree(pcr);
1640 release_pci:
1641 	pci_release_regions(pcidev);
1642 disable:
1643 	pci_disable_device(pcidev);
1644 
1645 	return ret;
1646 }
1647 
rtsx_pci_remove(struct pci_dev * pcidev)1648 static void rtsx_pci_remove(struct pci_dev *pcidev)
1649 {
1650 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1651 	struct rtsx_pcr *pcr = handle->pcr;
1652 
1653 	if (pcr->rtd3_en)
1654 		pm_runtime_get_noresume(&pcr->pci->dev);
1655 
1656 	pcr->remove_pci = true;
1657 
1658 	/* Disable interrupts at the pcr level */
1659 	spin_lock_irq(&pcr->lock);
1660 	rtsx_pci_writel(pcr, RTSX_BIER, 0);
1661 	pcr->bier = 0;
1662 	spin_unlock_irq(&pcr->lock);
1663 
1664 	cancel_delayed_work_sync(&pcr->carddet_work);
1665 	cancel_delayed_work_sync(&pcr->idle_work);
1666 	if (pcr->rtd3_en)
1667 		cancel_delayed_work_sync(&pcr->rtd3_work);
1668 
1669 	mfd_remove_devices(&pcidev->dev);
1670 
1671 	dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1672 			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1673 	free_irq(pcr->irq, (void *)pcr);
1674 	if (pcr->msi_en)
1675 		pci_disable_msi(pcr->pci);
1676 	iounmap(pcr->remap_addr);
1677 
1678 	pci_release_regions(pcidev);
1679 	pci_disable_device(pcidev);
1680 
1681 	spin_lock(&rtsx_pci_lock);
1682 	idr_remove(&rtsx_pci_idr, pcr->id);
1683 	spin_unlock(&rtsx_pci_lock);
1684 
1685 	if (pcr->rtd3_en) {
1686 		pm_runtime_disable(&pcr->pci->dev);
1687 		pm_runtime_put_noidle(&pcr->pci->dev);
1688 	}
1689 
1690 	kfree(pcr->slots);
1691 	kfree(pcr);
1692 	kfree(handle);
1693 
1694 	dev_dbg(&(pcidev->dev),
1695 		": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1696 		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1697 }
1698 
rtsx_pci_suspend(struct device * dev_d)1699 static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
1700 {
1701 	struct pci_dev *pcidev = to_pci_dev(dev_d);
1702 	struct pcr_handle *handle;
1703 	struct rtsx_pcr *pcr;
1704 
1705 	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1706 
1707 	handle = pci_get_drvdata(pcidev);
1708 	pcr = handle->pcr;
1709 
1710 	cancel_delayed_work(&pcr->carddet_work);
1711 	cancel_delayed_work(&pcr->idle_work);
1712 
1713 	mutex_lock(&pcr->pcr_mutex);
1714 
1715 	rtsx_pci_power_off(pcr, HOST_ENTER_S3);
1716 
1717 	device_wakeup_disable(dev_d);
1718 
1719 	mutex_unlock(&pcr->pcr_mutex);
1720 	return 0;
1721 }
1722 
rtsx_pci_resume(struct device * dev_d)1723 static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
1724 {
1725 	struct pci_dev *pcidev = to_pci_dev(dev_d);
1726 	struct pcr_handle *handle;
1727 	struct rtsx_pcr *pcr;
1728 	int ret = 0;
1729 
1730 	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1731 
1732 	handle = pci_get_drvdata(pcidev);
1733 	pcr = handle->pcr;
1734 
1735 	mutex_lock(&pcr->pcr_mutex);
1736 
1737 	ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1738 	if (ret)
1739 		goto out;
1740 
1741 	ret = rtsx_pci_init_hw(pcr);
1742 	if (ret)
1743 		goto out;
1744 
1745 	schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1746 
1747 out:
1748 	mutex_unlock(&pcr->pcr_mutex);
1749 	return ret;
1750 }
1751 
1752 #ifdef CONFIG_PM
1753 
rtsx_pci_shutdown(struct pci_dev * pcidev)1754 static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1755 {
1756 	struct pcr_handle *handle;
1757 	struct rtsx_pcr *pcr;
1758 
1759 	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1760 
1761 	handle = pci_get_drvdata(pcidev);
1762 	pcr = handle->pcr;
1763 	rtsx_pci_power_off(pcr, HOST_ENTER_S1);
1764 
1765 	pci_disable_device(pcidev);
1766 	free_irq(pcr->irq, (void *)pcr);
1767 	if (pcr->msi_en)
1768 		pci_disable_msi(pcr->pci);
1769 }
1770 
rtsx_pci_runtime_suspend(struct device * device)1771 static int rtsx_pci_runtime_suspend(struct device *device)
1772 {
1773 	struct pci_dev *pcidev = to_pci_dev(device);
1774 	struct pcr_handle *handle;
1775 	struct rtsx_pcr *pcr;
1776 
1777 	handle = pci_get_drvdata(pcidev);
1778 	pcr = handle->pcr;
1779 	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1780 
1781 	cancel_delayed_work(&pcr->carddet_work);
1782 	cancel_delayed_work(&pcr->rtd3_work);
1783 	cancel_delayed_work(&pcr->idle_work);
1784 
1785 	mutex_lock(&pcr->pcr_mutex);
1786 	rtsx_pci_power_off(pcr, HOST_ENTER_S3);
1787 
1788 	free_irq(pcr->irq, (void *)pcr);
1789 
1790 	mutex_unlock(&pcr->pcr_mutex);
1791 
1792 	pcr->is_runtime_suspended = true;
1793 
1794 	return 0;
1795 }
1796 
rtsx_pci_runtime_resume(struct device * device)1797 static int rtsx_pci_runtime_resume(struct device *device)
1798 {
1799 	struct pci_dev *pcidev = to_pci_dev(device);
1800 	struct pcr_handle *handle;
1801 	struct rtsx_pcr *pcr;
1802 
1803 	handle = pci_get_drvdata(pcidev);
1804 	pcr = handle->pcr;
1805 	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1806 
1807 	mutex_lock(&pcr->pcr_mutex);
1808 
1809 	rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1810 	rtsx_pci_acquire_irq(pcr);
1811 	synchronize_irq(pcr->irq);
1812 
1813 	if (pcr->ops->fetch_vendor_settings)
1814 		pcr->ops->fetch_vendor_settings(pcr);
1815 
1816 	rtsx_pci_init_hw(pcr);
1817 
1818 	if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) {
1819 		pcr->slots[RTSX_SD_CARD].card_event(
1820 				pcr->slots[RTSX_SD_CARD].p_dev);
1821 	}
1822 
1823 	schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1824 
1825 	mutex_unlock(&pcr->pcr_mutex);
1826 	return 0;
1827 }
1828 
1829 #else /* CONFIG_PM */
1830 
1831 #define rtsx_pci_shutdown NULL
1832 #define rtsx_pci_runtime_suspend NULL
1833 #define rtsx_pic_runtime_resume NULL
1834 
1835 #endif /* CONFIG_PM */
1836 
1837 static const struct dev_pm_ops rtsx_pci_pm_ops = {
1838 	SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume)
1839 	SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, NULL)
1840 };
1841 
1842 static struct pci_driver rtsx_pci_driver = {
1843 	.name = DRV_NAME_RTSX_PCI,
1844 	.id_table = rtsx_pci_ids,
1845 	.probe = rtsx_pci_probe,
1846 	.remove = rtsx_pci_remove,
1847 	.driver.pm = &rtsx_pci_pm_ops,
1848 	.shutdown = rtsx_pci_shutdown,
1849 };
1850 module_pci_driver(rtsx_pci_driver);
1851 
1852 MODULE_LICENSE("GPL");
1853 MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1854 MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");
1855