1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Enable PCIe link L0s/L1 state and Clock Power Management
4 *
5 * Copyright (C) 2007 Intel
6 * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
7 * Copyright (C) Shaohua Li (shaohua.li@intel.com)
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/pci_regs.h>
15 #include <linux/errno.h>
16 #include <linux/pm.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/jiffies.h>
20 #include <linux/delay.h>
21 #include <linux/pci-aspm.h>
22 #include "../pci.h"
23
24 #ifdef MODULE_PARAM_PREFIX
25 #undef MODULE_PARAM_PREFIX
26 #endif
27 #define MODULE_PARAM_PREFIX "pcie_aspm."
28
29 /* Note: those are not register definitions */
30 #define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
31 #define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
32 #define ASPM_STATE_L1 (4) /* L1 state */
33 #define ASPM_STATE_L1_1 (8) /* ASPM L1.1 state */
34 #define ASPM_STATE_L1_2 (0x10) /* ASPM L1.2 state */
35 #define ASPM_STATE_L1_1_PCIPM (0x20) /* PCI PM L1.1 state */
36 #define ASPM_STATE_L1_2_PCIPM (0x40) /* PCI PM L1.2 state */
37 #define ASPM_STATE_L1_SS_PCIPM (ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1_2_PCIPM)
38 #define ASPM_STATE_L1_2_MASK (ASPM_STATE_L1_2 | ASPM_STATE_L1_2_PCIPM)
39 #define ASPM_STATE_L1SS (ASPM_STATE_L1_1 | ASPM_STATE_L1_1_PCIPM |\
40 ASPM_STATE_L1_2_MASK)
41 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
42 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \
43 ASPM_STATE_L1SS)
44
45 struct aspm_latency {
46 u32 l0s; /* L0s latency (nsec) */
47 u32 l1; /* L1 latency (nsec) */
48 };
49
50 struct pcie_link_state {
51 struct pci_dev *pdev; /* Upstream component of the Link */
52 struct pci_dev *downstream; /* Downstream component, function 0 */
53 struct pcie_link_state *root; /* pointer to the root port link */
54 struct pcie_link_state *parent; /* pointer to the parent Link state */
55 struct list_head sibling; /* node in link_list */
56 struct list_head children; /* list of child link states */
57 struct list_head link; /* node in parent's children list */
58
59 /* ASPM state */
60 u32 aspm_support:7; /* Supported ASPM state */
61 u32 aspm_enabled:7; /* Enabled ASPM state */
62 u32 aspm_capable:7; /* Capable ASPM state with latency */
63 u32 aspm_default:7; /* Default ASPM state by BIOS */
64 u32 aspm_disable:7; /* Disabled ASPM state */
65
66 /* Clock PM state */
67 u32 clkpm_capable:1; /* Clock PM capable? */
68 u32 clkpm_enabled:1; /* Current Clock PM state */
69 u32 clkpm_default:1; /* Default Clock PM state by BIOS */
70 u32 clkpm_disable:1; /* Clock PM disabled */
71
72 /* Exit latencies */
73 struct aspm_latency latency_up; /* Upstream direction exit latency */
74 struct aspm_latency latency_dw; /* Downstream direction exit latency */
75 /*
76 * Endpoint acceptable latencies. A pcie downstream port only
77 * has one slot under it, so at most there are 8 functions.
78 */
79 struct aspm_latency acceptable[8];
80
81 /* L1 PM Substate info */
82 struct {
83 u32 up_cap_ptr; /* L1SS cap ptr in upstream dev */
84 u32 dw_cap_ptr; /* L1SS cap ptr in downstream dev */
85 u32 ctl1; /* value to be programmed in ctl1 */
86 u32 ctl2; /* value to be programmed in ctl2 */
87 } l1ss;
88 };
89
90 static int aspm_disabled, aspm_force;
91 static bool aspm_support_enabled = true;
92 static DEFINE_MUTEX(aspm_lock);
93 static LIST_HEAD(link_list);
94
95 #define POLICY_DEFAULT 0 /* BIOS default setting */
96 #define POLICY_PERFORMANCE 1 /* high performance */
97 #define POLICY_POWERSAVE 2 /* high power saving */
98 #define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */
99
100 #ifdef CONFIG_PCIEASPM_PERFORMANCE
101 static int aspm_policy = POLICY_PERFORMANCE;
102 #elif defined CONFIG_PCIEASPM_POWERSAVE
103 static int aspm_policy = POLICY_POWERSAVE;
104 #elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE
105 static int aspm_policy = POLICY_POWER_SUPERSAVE;
106 #else
107 static int aspm_policy;
108 #endif
109
110 static const char *policy_str[] = {
111 [POLICY_DEFAULT] = "default",
112 [POLICY_PERFORMANCE] = "performance",
113 [POLICY_POWERSAVE] = "powersave",
114 [POLICY_POWER_SUPERSAVE] = "powersupersave"
115 };
116
117 #define LINK_RETRAIN_TIMEOUT HZ
118
policy_to_aspm_state(struct pcie_link_state * link)119 static int policy_to_aspm_state(struct pcie_link_state *link)
120 {
121 switch (aspm_policy) {
122 case POLICY_PERFORMANCE:
123 /* Disable ASPM and Clock PM */
124 return 0;
125 case POLICY_POWERSAVE:
126 /* Enable ASPM L0s/L1 */
127 return (ASPM_STATE_L0S | ASPM_STATE_L1);
128 case POLICY_POWER_SUPERSAVE:
129 /* Enable Everything */
130 return ASPM_STATE_ALL;
131 case POLICY_DEFAULT:
132 return link->aspm_default;
133 }
134 return 0;
135 }
136
policy_to_clkpm_state(struct pcie_link_state * link)137 static int policy_to_clkpm_state(struct pcie_link_state *link)
138 {
139 switch (aspm_policy) {
140 case POLICY_PERFORMANCE:
141 /* Disable ASPM and Clock PM */
142 return 0;
143 case POLICY_POWERSAVE:
144 case POLICY_POWER_SUPERSAVE:
145 /* Enable Clock PM */
146 return 1;
147 case POLICY_DEFAULT:
148 return link->clkpm_default;
149 }
150 return 0;
151 }
152
pcie_set_clkpm_nocheck(struct pcie_link_state * link,int enable)153 static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
154 {
155 struct pci_dev *child;
156 struct pci_bus *linkbus = link->pdev->subordinate;
157 u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0;
158
159 list_for_each_entry(child, &linkbus->devices, bus_list)
160 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
161 PCI_EXP_LNKCTL_CLKREQ_EN,
162 val);
163 link->clkpm_enabled = !!enable;
164 }
165
pcie_set_clkpm(struct pcie_link_state * link,int enable)166 static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
167 {
168 /*
169 * Don't enable Clock PM if the link is not Clock PM capable
170 * or Clock PM is disabled
171 */
172 if (!link->clkpm_capable || link->clkpm_disable)
173 enable = 0;
174 /* Need nothing if the specified equals to current state */
175 if (link->clkpm_enabled == enable)
176 return;
177 pcie_set_clkpm_nocheck(link, enable);
178 }
179
pcie_clkpm_cap_init(struct pcie_link_state * link,int blacklist)180 static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
181 {
182 int capable = 1, enabled = 1;
183 u32 reg32;
184 u16 reg16;
185 struct pci_dev *child;
186 struct pci_bus *linkbus = link->pdev->subordinate;
187
188 /* All functions should have the same cap and state, take the worst */
189 list_for_each_entry(child, &linkbus->devices, bus_list) {
190 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, ®32);
191 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
192 capable = 0;
193 enabled = 0;
194 break;
195 }
196 pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16);
197 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
198 enabled = 0;
199 }
200 link->clkpm_enabled = enabled;
201 link->clkpm_default = enabled;
202 link->clkpm_capable = capable;
203 link->clkpm_disable = blacklist ? 1 : 0;
204 }
205
pcie_wait_for_retrain(struct pci_dev * pdev)206 static int pcie_wait_for_retrain(struct pci_dev *pdev)
207 {
208 unsigned long end_jiffies;
209 u16 reg16;
210
211 /* Wait for Link Training to be cleared by hardware */
212 end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
213 do {
214 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, ®16);
215 if (!(reg16 & PCI_EXP_LNKSTA_LT))
216 return 0;
217 msleep(1);
218 } while (time_before(jiffies, end_jiffies));
219
220 return -ETIMEDOUT;
221 }
222
pcie_retrain_link(struct pcie_link_state * link)223 static int pcie_retrain_link(struct pcie_link_state *link)
224 {
225 struct pci_dev *parent = link->pdev;
226 int rc;
227 u16 reg16;
228
229 /*
230 * Ensure the updated LNKCTL parameters are used during link
231 * training by checking that there is no ongoing link training to
232 * avoid LTSSM race as recommended in Implementation Note at the
233 * end of PCIe r6.0.1 sec 7.5.3.7.
234 */
235 rc = pcie_wait_for_retrain(parent);
236 if (rc)
237 return rc;
238
239 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16);
240 reg16 |= PCI_EXP_LNKCTL_RL;
241 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
242 if (parent->clear_retrain_link) {
243 /*
244 * Due to an erratum in some devices the Retrain Link bit
245 * needs to be cleared again manually to allow the link
246 * training to succeed.
247 */
248 reg16 &= ~PCI_EXP_LNKCTL_RL;
249 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
250 }
251
252 return pcie_wait_for_retrain(parent);
253 }
254
255 /*
256 * pcie_aspm_configure_common_clock: check if the 2 ends of a link
257 * could use common clock. If they are, configure them to use the
258 * common clock. That will reduce the ASPM state exit latency.
259 */
pcie_aspm_configure_common_clock(struct pcie_link_state * link)260 static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
261 {
262 int same_clock = 1;
263 u16 reg16, ccc, parent_old_ccc, child_old_ccc[8];
264 struct pci_dev *child, *parent = link->pdev;
265 struct pci_bus *linkbus = parent->subordinate;
266 /*
267 * All functions of a slot should have the same Slot Clock
268 * Configuration, so just check one function
269 */
270 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
271 BUG_ON(!pci_is_pcie(child));
272
273 /* Check downstream component if bit Slot Clock Configuration is 1 */
274 pcie_capability_read_word(child, PCI_EXP_LNKSTA, ®16);
275 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
276 same_clock = 0;
277
278 /* Check upstream component if bit Slot Clock Configuration is 1 */
279 pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16);
280 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
281 same_clock = 0;
282
283 /* Port might be already in common clock mode */
284 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16);
285 parent_old_ccc = reg16 & PCI_EXP_LNKCTL_CCC;
286 if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) {
287 bool consistent = true;
288
289 list_for_each_entry(child, &linkbus->devices, bus_list) {
290 pcie_capability_read_word(child, PCI_EXP_LNKCTL,
291 ®16);
292 if (!(reg16 & PCI_EXP_LNKCTL_CCC)) {
293 consistent = false;
294 break;
295 }
296 }
297 if (consistent)
298 return;
299 pci_warn(parent, "ASPM: current common clock configuration is broken, reconfiguring\n");
300 }
301
302 ccc = same_clock ? PCI_EXP_LNKCTL_CCC : 0;
303 /* Configure downstream component, all functions */
304 list_for_each_entry(child, &linkbus->devices, bus_list) {
305 pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16);
306 child_old_ccc[PCI_FUNC(child->devfn)] = reg16 & PCI_EXP_LNKCTL_CCC;
307 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
308 PCI_EXP_LNKCTL_CCC, ccc);
309 }
310
311 /* Configure upstream component */
312 pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
313 PCI_EXP_LNKCTL_CCC, ccc);
314
315 if (pcie_retrain_link(link)) {
316
317 /* Training failed. Restore common clock configurations */
318 pci_err(parent, "ASPM: Could not configure common clock\n");
319 list_for_each_entry(child, &linkbus->devices, bus_list)
320 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
321 PCI_EXP_LNKCTL_CCC,
322 child_old_ccc[PCI_FUNC(child->devfn)]);
323 pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
324 PCI_EXP_LNKCTL_CCC, parent_old_ccc);
325 }
326 }
327
328 /* Convert L0s latency encoding to ns */
calc_l0s_latency(u32 encoding)329 static u32 calc_l0s_latency(u32 encoding)
330 {
331 if (encoding == 0x7)
332 return (5 * 1000); /* > 4us */
333 return (64 << encoding);
334 }
335
336 /* Convert L0s acceptable latency encoding to ns */
calc_l0s_acceptable(u32 encoding)337 static u32 calc_l0s_acceptable(u32 encoding)
338 {
339 if (encoding == 0x7)
340 return -1U;
341 return (64 << encoding);
342 }
343
344 /* Convert L1 latency encoding to ns */
calc_l1_latency(u32 encoding)345 static u32 calc_l1_latency(u32 encoding)
346 {
347 if (encoding == 0x7)
348 return (65 * 1000); /* > 64us */
349 return (1000 << encoding);
350 }
351
352 /* Convert L1 acceptable latency encoding to ns */
calc_l1_acceptable(u32 encoding)353 static u32 calc_l1_acceptable(u32 encoding)
354 {
355 if (encoding == 0x7)
356 return -1U;
357 return (1000 << encoding);
358 }
359
360 /* Convert L1SS T_pwr encoding to usec */
calc_l1ss_pwron(struct pci_dev * pdev,u32 scale,u32 val)361 static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val)
362 {
363 switch (scale) {
364 case 0:
365 return val * 2;
366 case 1:
367 return val * 10;
368 case 2:
369 return val * 100;
370 }
371 pci_err(pdev, "%s: Invalid T_PwrOn scale: %u\n", __func__, scale);
372 return 0;
373 }
374
encode_l12_threshold(u32 threshold_us,u32 * scale,u32 * value)375 static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
376 {
377 u32 threshold_ns = threshold_us * 1000;
378
379 /* See PCIe r3.1, sec 7.33.3 and sec 6.18 */
380 if (threshold_ns < 32) {
381 *scale = 0;
382 *value = threshold_ns;
383 } else if (threshold_ns < 1024) {
384 *scale = 1;
385 *value = threshold_ns >> 5;
386 } else if (threshold_ns < 32768) {
387 *scale = 2;
388 *value = threshold_ns >> 10;
389 } else if (threshold_ns < 1048576) {
390 *scale = 3;
391 *value = threshold_ns >> 15;
392 } else if (threshold_ns < 33554432) {
393 *scale = 4;
394 *value = threshold_ns >> 20;
395 } else {
396 *scale = 5;
397 *value = threshold_ns >> 25;
398 }
399 }
400
401 struct aspm_register_info {
402 u32 support:2;
403 u32 enabled:2;
404 u32 latency_encoding_l0s;
405 u32 latency_encoding_l1;
406
407 /* L1 substates */
408 u32 l1ss_cap_ptr;
409 u32 l1ss_cap;
410 u32 l1ss_ctl1;
411 u32 l1ss_ctl2;
412 };
413
pcie_get_aspm_reg(struct pci_dev * pdev,struct aspm_register_info * info)414 static void pcie_get_aspm_reg(struct pci_dev *pdev,
415 struct aspm_register_info *info)
416 {
417 u16 reg16;
418 u32 reg32;
419
420 pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, ®32);
421 info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
422 info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
423 info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
424 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, ®16);
425 info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC;
426
427 /* Read L1 PM substate capabilities */
428 info->l1ss_cap = info->l1ss_ctl1 = info->l1ss_ctl2 = 0;
429 info->l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
430 if (!info->l1ss_cap_ptr)
431 return;
432 pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CAP,
433 &info->l1ss_cap);
434 if (!(info->l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) {
435 info->l1ss_cap = 0;
436 return;
437 }
438
439 /*
440 * If we don't have LTR for the entire path from the Root Complex
441 * to this device, we can't use ASPM L1.2 because it relies on the
442 * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
443 */
444 if (!pdev->ltr_path)
445 info->l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
446
447 pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL1,
448 &info->l1ss_ctl1);
449 pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL2,
450 &info->l1ss_ctl2);
451 }
452
pcie_aspm_check_latency(struct pci_dev * endpoint)453 static void pcie_aspm_check_latency(struct pci_dev *endpoint)
454 {
455 u32 latency, l1_switch_latency = 0;
456 struct aspm_latency *acceptable;
457 struct pcie_link_state *link;
458
459 /* Device not in D0 doesn't need latency check */
460 if ((endpoint->current_state != PCI_D0) &&
461 (endpoint->current_state != PCI_UNKNOWN))
462 return;
463
464 link = endpoint->bus->self->link_state;
465 acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
466
467 while (link) {
468 /* Check upstream direction L0s latency */
469 if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
470 (link->latency_up.l0s > acceptable->l0s))
471 link->aspm_capable &= ~ASPM_STATE_L0S_UP;
472
473 /* Check downstream direction L0s latency */
474 if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
475 (link->latency_dw.l0s > acceptable->l0s))
476 link->aspm_capable &= ~ASPM_STATE_L0S_DW;
477 /*
478 * Check L1 latency.
479 * Every switch on the path to root complex need 1
480 * more microsecond for L1. Spec doesn't mention L0s.
481 *
482 * The exit latencies for L1 substates are not advertised
483 * by a device. Since the spec also doesn't mention a way
484 * to determine max latencies introduced by enabling L1
485 * substates on the components, it is not clear how to do
486 * a L1 substate exit latency check. We assume that the
487 * L1 exit latencies advertised by a device include L1
488 * substate latencies (and hence do not do any check).
489 */
490 latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1);
491 if ((link->aspm_capable & ASPM_STATE_L1) &&
492 (latency + l1_switch_latency > acceptable->l1))
493 link->aspm_capable &= ~ASPM_STATE_L1;
494 l1_switch_latency += 1000;
495
496 link = link->parent;
497 }
498 }
499
500 /*
501 * The L1 PM substate capability is only implemented in function 0 in a
502 * multi function device.
503 */
pci_function_0(struct pci_bus * linkbus)504 static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
505 {
506 struct pci_dev *child;
507
508 list_for_each_entry(child, &linkbus->devices, bus_list)
509 if (PCI_FUNC(child->devfn) == 0)
510 return child;
511 return NULL;
512 }
513
514 /* Calculate L1.2 PM substate timing parameters */
aspm_calc_l1ss_info(struct pcie_link_state * link,struct aspm_register_info * upreg,struct aspm_register_info * dwreg)515 static void aspm_calc_l1ss_info(struct pcie_link_state *link,
516 struct aspm_register_info *upreg,
517 struct aspm_register_info *dwreg)
518 {
519 u32 val1, val2, scale1, scale2;
520 u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
521
522 link->l1ss.up_cap_ptr = upreg->l1ss_cap_ptr;
523 link->l1ss.dw_cap_ptr = dwreg->l1ss_cap_ptr;
524 link->l1ss.ctl1 = link->l1ss.ctl2 = 0;
525
526 if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
527 return;
528
529 /* Choose the greater of the two Port Common_Mode_Restore_Times */
530 val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
531 val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
532 t_common_mode = max(val1, val2);
533
534 /* Choose the greater of the two Port T_POWER_ON times */
535 val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
536 scale1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
537 val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
538 scale2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
539
540 if (calc_l1ss_pwron(link->pdev, scale1, val1) >
541 calc_l1ss_pwron(link->downstream, scale2, val2)) {
542 link->l1ss.ctl2 |= scale1 | (val1 << 3);
543 t_power_on = calc_l1ss_pwron(link->pdev, scale1, val1);
544 } else {
545 link->l1ss.ctl2 |= scale2 | (val2 << 3);
546 t_power_on = calc_l1ss_pwron(link->downstream, scale2, val2);
547 }
548
549 /*
550 * Set LTR_L1.2_THRESHOLD to the time required to transition the
551 * Link from L0 to L1.2 and back to L0 so we enter L1.2 only if
552 * downstream devices report (via LTR) that they can tolerate at
553 * least that much latency.
554 *
555 * Based on PCIe r3.1, sec 5.5.3.3.1, Figures 5-16 and 5-17, and
556 * Table 5-11. T(POWER_OFF) is at most 2us and T(L1.2) is at
557 * least 4us.
558 */
559 l1_2_threshold = 2 + 4 + t_common_mode + t_power_on;
560 encode_l12_threshold(l1_2_threshold, &scale, &value);
561 link->l1ss.ctl1 |= t_common_mode << 8 | scale << 29 | value << 16;
562 }
563
pcie_aspm_cap_init(struct pcie_link_state * link,int blacklist)564 static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
565 {
566 struct pci_dev *child = link->downstream, *parent = link->pdev;
567 struct pci_bus *linkbus = parent->subordinate;
568 struct aspm_register_info upreg, dwreg;
569
570 if (blacklist) {
571 /* Set enabled/disable so that we will disable ASPM later */
572 link->aspm_enabled = ASPM_STATE_ALL;
573 link->aspm_disable = ASPM_STATE_ALL;
574 return;
575 }
576
577 /* Get upstream/downstream components' register state */
578 pcie_get_aspm_reg(parent, &upreg);
579 pcie_get_aspm_reg(child, &dwreg);
580
581 /*
582 * If ASPM not supported, don't mess with the clocks and link,
583 * bail out now.
584 */
585 if (!(upreg.support & dwreg.support))
586 return;
587
588 /* Configure common clock before checking latencies */
589 pcie_aspm_configure_common_clock(link);
590
591 /*
592 * Re-read upstream/downstream components' register state
593 * after clock configuration
594 */
595 pcie_get_aspm_reg(parent, &upreg);
596 pcie_get_aspm_reg(child, &dwreg);
597
598 /*
599 * Setup L0s state
600 *
601 * Note that we must not enable L0s in either direction on a
602 * given link unless components on both sides of the link each
603 * support L0s.
604 */
605 if (dwreg.support & upreg.support & PCIE_LINK_STATE_L0S)
606 link->aspm_support |= ASPM_STATE_L0S;
607 if (dwreg.enabled & PCIE_LINK_STATE_L0S)
608 link->aspm_enabled |= ASPM_STATE_L0S_UP;
609 if (upreg.enabled & PCIE_LINK_STATE_L0S)
610 link->aspm_enabled |= ASPM_STATE_L0S_DW;
611 link->latency_up.l0s = calc_l0s_latency(upreg.latency_encoding_l0s);
612 link->latency_dw.l0s = calc_l0s_latency(dwreg.latency_encoding_l0s);
613
614 /* Setup L1 state */
615 if (upreg.support & dwreg.support & PCIE_LINK_STATE_L1)
616 link->aspm_support |= ASPM_STATE_L1;
617 if (upreg.enabled & dwreg.enabled & PCIE_LINK_STATE_L1)
618 link->aspm_enabled |= ASPM_STATE_L1;
619 link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1);
620 link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1);
621
622 /* Setup L1 substate */
623 if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
624 link->aspm_support |= ASPM_STATE_L1_1;
625 if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
626 link->aspm_support |= ASPM_STATE_L1_2;
627 if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
628 link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
629 if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
630 link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
631
632 if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
633 link->aspm_enabled |= ASPM_STATE_L1_1;
634 if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
635 link->aspm_enabled |= ASPM_STATE_L1_2;
636 if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
637 link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
638 if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
639 link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
640
641 if (link->aspm_support & ASPM_STATE_L1SS)
642 aspm_calc_l1ss_info(link, &upreg, &dwreg);
643
644 /* Save default state */
645 link->aspm_default = link->aspm_enabled;
646
647 /* Setup initial capable state. Will be updated later */
648 link->aspm_capable = link->aspm_support;
649
650 /* Get and check endpoint acceptable latencies */
651 list_for_each_entry(child, &linkbus->devices, bus_list) {
652 u32 reg32, encoding;
653 struct aspm_latency *acceptable =
654 &link->acceptable[PCI_FUNC(child->devfn)];
655
656 if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT &&
657 pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)
658 continue;
659
660 pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32);
661 /* Calculate endpoint L0s acceptable latency */
662 encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
663 acceptable->l0s = calc_l0s_acceptable(encoding);
664 /* Calculate endpoint L1 acceptable latency */
665 encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
666 acceptable->l1 = calc_l1_acceptable(encoding);
667
668 pcie_aspm_check_latency(child);
669 }
670 }
671
pci_clear_and_set_dword(struct pci_dev * pdev,int pos,u32 clear,u32 set)672 static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
673 u32 clear, u32 set)
674 {
675 u32 val;
676
677 pci_read_config_dword(pdev, pos, &val);
678 val &= ~clear;
679 val |= set;
680 pci_write_config_dword(pdev, pos, val);
681 }
682
683 /* Configure the ASPM L1 substates */
pcie_config_aspm_l1ss(struct pcie_link_state * link,u32 state)684 static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
685 {
686 u32 val, enable_req;
687 struct pci_dev *child = link->downstream, *parent = link->pdev;
688 u32 up_cap_ptr = link->l1ss.up_cap_ptr;
689 u32 dw_cap_ptr = link->l1ss.dw_cap_ptr;
690
691 enable_req = (link->aspm_enabled ^ state) & state;
692
693 /*
694 * Here are the rules specified in the PCIe spec for enabling L1SS:
695 * - When enabling L1.x, enable bit at parent first, then at child
696 * - When disabling L1.x, disable bit at child first, then at parent
697 * - When enabling ASPM L1.x, need to disable L1
698 * (at child followed by parent).
699 * - The ASPM/PCIPM L1.2 must be disabled while programming timing
700 * parameters
701 *
702 * To keep it simple, disable all L1SS bits first, and later enable
703 * what is needed.
704 */
705
706 /* Disable all L1 substates */
707 pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
708 PCI_L1SS_CTL1_L1SS_MASK, 0);
709 pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
710 PCI_L1SS_CTL1_L1SS_MASK, 0);
711 /*
712 * If needed, disable L1, and it gets enabled later
713 * in pcie_config_aspm_link().
714 */
715 if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
716 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
717 PCI_EXP_LNKCTL_ASPM_L1, 0);
718 pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
719 PCI_EXP_LNKCTL_ASPM_L1, 0);
720 }
721
722 if (enable_req & ASPM_STATE_L1_2_MASK) {
723
724 /* Program T_POWER_ON times in both ports */
725 pci_write_config_dword(parent, up_cap_ptr + PCI_L1SS_CTL2,
726 link->l1ss.ctl2);
727 pci_write_config_dword(child, dw_cap_ptr + PCI_L1SS_CTL2,
728 link->l1ss.ctl2);
729
730 /* Program Common_Mode_Restore_Time in upstream device */
731 pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
732 PCI_L1SS_CTL1_CM_RESTORE_TIME,
733 link->l1ss.ctl1);
734
735 /* Program LTR_L1.2_THRESHOLD time in both ports */
736 pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
737 PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
738 PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
739 link->l1ss.ctl1);
740 pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
741 PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
742 PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
743 link->l1ss.ctl1);
744 }
745
746 val = 0;
747 if (state & ASPM_STATE_L1_1)
748 val |= PCI_L1SS_CTL1_ASPM_L1_1;
749 if (state & ASPM_STATE_L1_2)
750 val |= PCI_L1SS_CTL1_ASPM_L1_2;
751 if (state & ASPM_STATE_L1_1_PCIPM)
752 val |= PCI_L1SS_CTL1_PCIPM_L1_1;
753 if (state & ASPM_STATE_L1_2_PCIPM)
754 val |= PCI_L1SS_CTL1_PCIPM_L1_2;
755
756 /* Enable what we need to enable */
757 pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
758 PCI_L1SS_CTL1_L1SS_MASK, val);
759 pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
760 PCI_L1SS_CTL1_L1SS_MASK, val);
761 }
762
pcie_config_aspm_dev(struct pci_dev * pdev,u32 val)763 static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
764 {
765 pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
766 PCI_EXP_LNKCTL_ASPMC, val);
767 }
768
pcie_config_aspm_link(struct pcie_link_state * link,u32 state)769 static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
770 {
771 u32 upstream = 0, dwstream = 0;
772 struct pci_dev *child = link->downstream, *parent = link->pdev;
773 struct pci_bus *linkbus = parent->subordinate;
774
775 /* Enable only the states that were not explicitly disabled */
776 state &= (link->aspm_capable & ~link->aspm_disable);
777
778 /* Can't enable any substates if L1 is not enabled */
779 if (!(state & ASPM_STATE_L1))
780 state &= ~ASPM_STATE_L1SS;
781
782 /* Spec says both ports must be in D0 before enabling PCI PM substates*/
783 if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) {
784 state &= ~ASPM_STATE_L1_SS_PCIPM;
785 state |= (link->aspm_enabled & ASPM_STATE_L1_SS_PCIPM);
786 }
787
788 /* Nothing to do if the link is already in the requested state */
789 if (link->aspm_enabled == state)
790 return;
791 /* Convert ASPM state to upstream/downstream ASPM register state */
792 if (state & ASPM_STATE_L0S_UP)
793 dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
794 if (state & ASPM_STATE_L0S_DW)
795 upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
796 if (state & ASPM_STATE_L1) {
797 upstream |= PCI_EXP_LNKCTL_ASPM_L1;
798 dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
799 }
800
801 if (link->aspm_capable & ASPM_STATE_L1SS)
802 pcie_config_aspm_l1ss(link, state);
803
804 /*
805 * Spec 2.0 suggests all functions should be configured the
806 * same setting for ASPM. Enabling ASPM L1 should be done in
807 * upstream component first and then downstream, and vice
808 * versa for disabling ASPM L1. Spec doesn't mention L0S.
809 */
810 if (state & ASPM_STATE_L1)
811 pcie_config_aspm_dev(parent, upstream);
812 list_for_each_entry(child, &linkbus->devices, bus_list)
813 pcie_config_aspm_dev(child, dwstream);
814 if (!(state & ASPM_STATE_L1))
815 pcie_config_aspm_dev(parent, upstream);
816
817 link->aspm_enabled = state;
818 }
819
pcie_config_aspm_path(struct pcie_link_state * link)820 static void pcie_config_aspm_path(struct pcie_link_state *link)
821 {
822 while (link) {
823 pcie_config_aspm_link(link, policy_to_aspm_state(link));
824 link = link->parent;
825 }
826 }
827
free_link_state(struct pcie_link_state * link)828 static void free_link_state(struct pcie_link_state *link)
829 {
830 link->pdev->link_state = NULL;
831 kfree(link);
832 }
833
pcie_aspm_sanity_check(struct pci_dev * pdev)834 static int pcie_aspm_sanity_check(struct pci_dev *pdev)
835 {
836 struct pci_dev *child;
837 u32 reg32;
838
839 /*
840 * Some functions in a slot might not all be PCIe functions,
841 * very strange. Disable ASPM for the whole slot
842 */
843 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
844 if (!pci_is_pcie(child))
845 return -EINVAL;
846
847 /*
848 * If ASPM is disabled then we're not going to change
849 * the BIOS state. It's safe to continue even if it's a
850 * pre-1.1 device
851 */
852
853 if (aspm_disabled)
854 continue;
855
856 /*
857 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
858 * RBER bit to determine if a function is 1.1 version device
859 */
860 pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32);
861 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
862 pci_info(child, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n");
863 return -EINVAL;
864 }
865 }
866 return 0;
867 }
868
alloc_pcie_link_state(struct pci_dev * pdev)869 static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
870 {
871 struct pcie_link_state *link;
872
873 link = kzalloc(sizeof(*link), GFP_KERNEL);
874 if (!link)
875 return NULL;
876
877 INIT_LIST_HEAD(&link->sibling);
878 INIT_LIST_HEAD(&link->children);
879 INIT_LIST_HEAD(&link->link);
880 link->pdev = pdev;
881 link->downstream = pci_function_0(pdev->subordinate);
882
883 /*
884 * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
885 * hierarchies. Note that some PCIe host implementations omit
886 * the root ports entirely, in which case a downstream port on
887 * a switch may become the root of the link state chain for all
888 * its subordinate endpoints.
889 */
890 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
891 pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
892 !pdev->bus->parent->self) {
893 link->root = link;
894 } else {
895 struct pcie_link_state *parent;
896
897 parent = pdev->bus->parent->self->link_state;
898 if (!parent) {
899 kfree(link);
900 return NULL;
901 }
902
903 link->parent = parent;
904 link->root = link->parent->root;
905 list_add(&link->link, &parent->children);
906 }
907
908 list_add(&link->sibling, &link_list);
909 pdev->link_state = link;
910 return link;
911 }
912
913 /*
914 * pcie_aspm_init_link_state: Initiate PCI express link state.
915 * It is called after the pcie and its children devices are scanned.
916 * @pdev: the root port or switch downstream port
917 */
pcie_aspm_init_link_state(struct pci_dev * pdev)918 void pcie_aspm_init_link_state(struct pci_dev *pdev)
919 {
920 struct pcie_link_state *link;
921 int blacklist = !!pcie_aspm_sanity_check(pdev);
922
923 if (!aspm_support_enabled)
924 return;
925
926 if (pdev->link_state)
927 return;
928
929 /*
930 * We allocate pcie_link_state for the component on the upstream
931 * end of a Link, so there's nothing to do unless this device has a
932 * Link on its secondary side.
933 */
934 if (!pdev->has_secondary_link)
935 return;
936
937 /* VIA has a strange chipset, root port is under a bridge */
938 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT &&
939 pdev->bus->self)
940 return;
941
942 down_read(&pci_bus_sem);
943 if (list_empty(&pdev->subordinate->devices))
944 goto out;
945
946 mutex_lock(&aspm_lock);
947 link = alloc_pcie_link_state(pdev);
948 if (!link)
949 goto unlock;
950 /*
951 * Setup initial ASPM state. Note that we need to configure
952 * upstream links also because capable state of them can be
953 * update through pcie_aspm_cap_init().
954 */
955 pcie_aspm_cap_init(link, blacklist);
956
957 /* Setup initial Clock PM state */
958 pcie_clkpm_cap_init(link, blacklist);
959
960 /*
961 * At this stage drivers haven't had an opportunity to change the
962 * link policy setting. Enabling ASPM on broken hardware can cripple
963 * it even before the driver has had a chance to disable ASPM, so
964 * default to a safe level right now. If we're enabling ASPM beyond
965 * the BIOS's expectation, we'll do so once pci_enable_device() is
966 * called.
967 */
968 if (aspm_policy != POLICY_POWERSAVE &&
969 aspm_policy != POLICY_POWER_SUPERSAVE) {
970 pcie_config_aspm_path(link);
971 pcie_set_clkpm(link, policy_to_clkpm_state(link));
972 }
973
974 unlock:
975 mutex_unlock(&aspm_lock);
976 out:
977 up_read(&pci_bus_sem);
978 }
979
980 /* Recheck latencies and update aspm_capable for links under the root */
pcie_update_aspm_capable(struct pcie_link_state * root)981 static void pcie_update_aspm_capable(struct pcie_link_state *root)
982 {
983 struct pcie_link_state *link;
984 BUG_ON(root->parent);
985 list_for_each_entry(link, &link_list, sibling) {
986 if (link->root != root)
987 continue;
988 link->aspm_capable = link->aspm_support;
989 }
990 list_for_each_entry(link, &link_list, sibling) {
991 struct pci_dev *child;
992 struct pci_bus *linkbus = link->pdev->subordinate;
993 if (link->root != root)
994 continue;
995 list_for_each_entry(child, &linkbus->devices, bus_list) {
996 if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) &&
997 (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END))
998 continue;
999 pcie_aspm_check_latency(child);
1000 }
1001 }
1002 }
1003
1004 /* @pdev: the endpoint device */
pcie_aspm_exit_link_state(struct pci_dev * pdev)1005 void pcie_aspm_exit_link_state(struct pci_dev *pdev)
1006 {
1007 struct pci_dev *parent = pdev->bus->self;
1008 struct pcie_link_state *link, *root, *parent_link;
1009
1010 if (!parent || !parent->link_state)
1011 return;
1012
1013 down_read(&pci_bus_sem);
1014 mutex_lock(&aspm_lock);
1015 /*
1016 * All PCIe functions are in one slot, remove one function will remove
1017 * the whole slot, so just wait until we are the last function left.
1018 */
1019 if (!list_empty(&parent->subordinate->devices))
1020 goto out;
1021
1022 link = parent->link_state;
1023 root = link->root;
1024 parent_link = link->parent;
1025
1026 /* All functions are removed, so just disable ASPM for the link */
1027 pcie_config_aspm_link(link, 0);
1028 list_del(&link->sibling);
1029 list_del(&link->link);
1030 /* Clock PM is for endpoint device */
1031 free_link_state(link);
1032
1033 /* Recheck latencies and configure upstream links */
1034 if (parent_link) {
1035 pcie_update_aspm_capable(root);
1036 pcie_config_aspm_path(parent_link);
1037 }
1038 out:
1039 mutex_unlock(&aspm_lock);
1040 up_read(&pci_bus_sem);
1041 }
1042
1043 /* @pdev: the root port or switch downstream port */
pcie_aspm_pm_state_change(struct pci_dev * pdev)1044 void pcie_aspm_pm_state_change(struct pci_dev *pdev)
1045 {
1046 struct pcie_link_state *link = pdev->link_state;
1047
1048 if (aspm_disabled || !link)
1049 return;
1050 /*
1051 * Devices changed PM state, we should recheck if latency
1052 * meets all functions' requirement
1053 */
1054 down_read(&pci_bus_sem);
1055 mutex_lock(&aspm_lock);
1056 pcie_update_aspm_capable(link->root);
1057 pcie_config_aspm_path(link);
1058 mutex_unlock(&aspm_lock);
1059 up_read(&pci_bus_sem);
1060 }
1061
pcie_aspm_powersave_config_link(struct pci_dev * pdev)1062 void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
1063 {
1064 struct pcie_link_state *link = pdev->link_state;
1065
1066 if (aspm_disabled || !link)
1067 return;
1068
1069 if (aspm_policy != POLICY_POWERSAVE &&
1070 aspm_policy != POLICY_POWER_SUPERSAVE)
1071 return;
1072
1073 down_read(&pci_bus_sem);
1074 mutex_lock(&aspm_lock);
1075 pcie_config_aspm_path(link);
1076 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1077 mutex_unlock(&aspm_lock);
1078 up_read(&pci_bus_sem);
1079 }
1080
__pci_disable_link_state(struct pci_dev * pdev,int state,bool sem)1081 static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
1082 {
1083 struct pci_dev *parent = pdev->bus->self;
1084 struct pcie_link_state *link;
1085
1086 if (!pci_is_pcie(pdev))
1087 return;
1088
1089 if (pdev->has_secondary_link)
1090 parent = pdev;
1091 if (!parent || !parent->link_state)
1092 return;
1093
1094 /*
1095 * A driver requested that ASPM be disabled on this device, but
1096 * if we don't have permission to manage ASPM (e.g., on ACPI
1097 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
1098 * the _OSC method), we can't honor that request. Windows has
1099 * a similar mechanism using "PciASPMOptOut", which is also
1100 * ignored in this situation.
1101 */
1102 if (aspm_disabled) {
1103 pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n");
1104 return;
1105 }
1106
1107 if (sem)
1108 down_read(&pci_bus_sem);
1109 mutex_lock(&aspm_lock);
1110 link = parent->link_state;
1111 if (state & PCIE_LINK_STATE_L0S)
1112 link->aspm_disable |= ASPM_STATE_L0S;
1113 if (state & PCIE_LINK_STATE_L1)
1114 link->aspm_disable |= ASPM_STATE_L1;
1115 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1116
1117 if (state & PCIE_LINK_STATE_CLKPM)
1118 link->clkpm_disable = 1;
1119 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1120 mutex_unlock(&aspm_lock);
1121 if (sem)
1122 up_read(&pci_bus_sem);
1123 }
1124
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1125 void pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1126 {
1127 __pci_disable_link_state(pdev, state, false);
1128 }
1129 EXPORT_SYMBOL(pci_disable_link_state_locked);
1130
1131 /**
1132 * pci_disable_link_state - Disable device's link state, so the link will
1133 * never enter specific states. Note that if the BIOS didn't grant ASPM
1134 * control to the OS, this does nothing because we can't touch the LNKCTL
1135 * register.
1136 *
1137 * @pdev: PCI device
1138 * @state: ASPM link state to disable
1139 */
pci_disable_link_state(struct pci_dev * pdev,int state)1140 void pci_disable_link_state(struct pci_dev *pdev, int state)
1141 {
1142 __pci_disable_link_state(pdev, state, true);
1143 }
1144 EXPORT_SYMBOL(pci_disable_link_state);
1145
pcie_aspm_set_policy(const char * val,const struct kernel_param * kp)1146 static int pcie_aspm_set_policy(const char *val,
1147 const struct kernel_param *kp)
1148 {
1149 int i;
1150 struct pcie_link_state *link;
1151
1152 if (aspm_disabled)
1153 return -EPERM;
1154 i = sysfs_match_string(policy_str, val);
1155 if (i < 0)
1156 return i;
1157 if (i == aspm_policy)
1158 return 0;
1159
1160 down_read(&pci_bus_sem);
1161 mutex_lock(&aspm_lock);
1162 aspm_policy = i;
1163 list_for_each_entry(link, &link_list, sibling) {
1164 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1165 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1166 }
1167 mutex_unlock(&aspm_lock);
1168 up_read(&pci_bus_sem);
1169 return 0;
1170 }
1171
pcie_aspm_get_policy(char * buffer,const struct kernel_param * kp)1172 static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
1173 {
1174 int i, cnt = 0;
1175 for (i = 0; i < ARRAY_SIZE(policy_str); i++)
1176 if (i == aspm_policy)
1177 cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
1178 else
1179 cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
1180 cnt += sprintf(buffer + cnt, "\n");
1181 return cnt;
1182 }
1183
1184 module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
1185 NULL, 0644);
1186
1187 #ifdef CONFIG_PCIEASPM_DEBUG
link_state_show(struct device * dev,struct device_attribute * attr,char * buf)1188 static ssize_t link_state_show(struct device *dev,
1189 struct device_attribute *attr,
1190 char *buf)
1191 {
1192 struct pci_dev *pci_device = to_pci_dev(dev);
1193 struct pcie_link_state *link_state = pci_device->link_state;
1194
1195 return sprintf(buf, "%d\n", link_state->aspm_enabled);
1196 }
1197
link_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)1198 static ssize_t link_state_store(struct device *dev,
1199 struct device_attribute *attr,
1200 const char *buf,
1201 size_t n)
1202 {
1203 struct pci_dev *pdev = to_pci_dev(dev);
1204 struct pcie_link_state *link, *root = pdev->link_state->root;
1205 u32 state;
1206
1207 if (aspm_disabled)
1208 return -EPERM;
1209
1210 if (kstrtouint(buf, 10, &state))
1211 return -EINVAL;
1212 if ((state & ~ASPM_STATE_ALL) != 0)
1213 return -EINVAL;
1214
1215 down_read(&pci_bus_sem);
1216 mutex_lock(&aspm_lock);
1217 list_for_each_entry(link, &link_list, sibling) {
1218 if (link->root != root)
1219 continue;
1220 pcie_config_aspm_link(link, state);
1221 }
1222 mutex_unlock(&aspm_lock);
1223 up_read(&pci_bus_sem);
1224 return n;
1225 }
1226
clk_ctl_show(struct device * dev,struct device_attribute * attr,char * buf)1227 static ssize_t clk_ctl_show(struct device *dev,
1228 struct device_attribute *attr,
1229 char *buf)
1230 {
1231 struct pci_dev *pci_device = to_pci_dev(dev);
1232 struct pcie_link_state *link_state = pci_device->link_state;
1233
1234 return sprintf(buf, "%d\n", link_state->clkpm_enabled);
1235 }
1236
clk_ctl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)1237 static ssize_t clk_ctl_store(struct device *dev,
1238 struct device_attribute *attr,
1239 const char *buf,
1240 size_t n)
1241 {
1242 struct pci_dev *pdev = to_pci_dev(dev);
1243 bool state;
1244
1245 if (strtobool(buf, &state))
1246 return -EINVAL;
1247
1248 down_read(&pci_bus_sem);
1249 mutex_lock(&aspm_lock);
1250 pcie_set_clkpm_nocheck(pdev->link_state, state);
1251 mutex_unlock(&aspm_lock);
1252 up_read(&pci_bus_sem);
1253
1254 return n;
1255 }
1256
1257 static DEVICE_ATTR_RW(link_state);
1258 static DEVICE_ATTR_RW(clk_ctl);
1259
1260 static char power_group[] = "power";
pcie_aspm_create_sysfs_dev_files(struct pci_dev * pdev)1261 void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
1262 {
1263 struct pcie_link_state *link_state = pdev->link_state;
1264
1265 if (!link_state)
1266 return;
1267
1268 if (link_state->aspm_support)
1269 sysfs_add_file_to_group(&pdev->dev.kobj,
1270 &dev_attr_link_state.attr, power_group);
1271 if (link_state->clkpm_capable)
1272 sysfs_add_file_to_group(&pdev->dev.kobj,
1273 &dev_attr_clk_ctl.attr, power_group);
1274 }
1275
pcie_aspm_remove_sysfs_dev_files(struct pci_dev * pdev)1276 void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
1277 {
1278 struct pcie_link_state *link_state = pdev->link_state;
1279
1280 if (!link_state)
1281 return;
1282
1283 if (link_state->aspm_support)
1284 sysfs_remove_file_from_group(&pdev->dev.kobj,
1285 &dev_attr_link_state.attr, power_group);
1286 if (link_state->clkpm_capable)
1287 sysfs_remove_file_from_group(&pdev->dev.kobj,
1288 &dev_attr_clk_ctl.attr, power_group);
1289 }
1290 #endif
1291
pcie_aspm_disable(char * str)1292 static int __init pcie_aspm_disable(char *str)
1293 {
1294 if (!strcmp(str, "off")) {
1295 aspm_policy = POLICY_DEFAULT;
1296 aspm_disabled = 1;
1297 aspm_support_enabled = false;
1298 printk(KERN_INFO "PCIe ASPM is disabled\n");
1299 } else if (!strcmp(str, "force")) {
1300 aspm_force = 1;
1301 printk(KERN_INFO "PCIe ASPM is forcibly enabled\n");
1302 }
1303 return 1;
1304 }
1305
1306 __setup("pcie_aspm=", pcie_aspm_disable);
1307
pcie_no_aspm(void)1308 void pcie_no_aspm(void)
1309 {
1310 /*
1311 * Disabling ASPM is intended to prevent the kernel from modifying
1312 * existing hardware state, not to clear existing state. To that end:
1313 * (a) set policy to POLICY_DEFAULT in order to avoid changing state
1314 * (b) prevent userspace from changing policy
1315 */
1316 if (!aspm_force) {
1317 aspm_policy = POLICY_DEFAULT;
1318 aspm_disabled = 1;
1319 }
1320 }
1321
pcie_aspm_support_enabled(void)1322 bool pcie_aspm_support_enabled(void)
1323 {
1324 return aspm_support_enabled;
1325 }
1326 EXPORT_SYMBOL(pcie_aspm_support_enabled);
1327