1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pci.h>
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include <linux/ioport.h>
6 #include <linux/wait.h>
7
8 #include "pci.h"
9
10 /*
11 * This interrupt-safe spinlock protects all accesses to PCI
12 * configuration space.
13 */
14
15 DEFINE_RAW_SPINLOCK(pci_lock);
16
17 /*
18 * Wrappers for all PCI configuration access functions. They just check
19 * alignment, do locking and call the low-level functions pointed to
20 * by pci_dev->ops.
21 */
22
23 #define PCI_byte_BAD 0
24 #define PCI_word_BAD (pos & 1)
25 #define PCI_dword_BAD (pos & 3)
26
27 #ifdef CONFIG_PCI_LOCKLESS_CONFIG
28 # define pci_lock_config(f) do { (void)(f); } while (0)
29 # define pci_unlock_config(f) do { (void)(f); } while (0)
30 #else
31 # define pci_lock_config(f) raw_spin_lock_irqsave(&pci_lock, f)
32 # define pci_unlock_config(f) raw_spin_unlock_irqrestore(&pci_lock, f)
33 #endif
34
35 #define PCI_OP_READ(size, type, len) \
36 int pci_bus_read_config_##size \
37 (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
38 { \
39 int res; \
40 unsigned long flags; \
41 u32 data = 0; \
42 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
43 pci_lock_config(flags); \
44 res = bus->ops->read(bus, devfn, pos, len, &data); \
45 *value = (type)data; \
46 pci_unlock_config(flags); \
47 return res; \
48 }
49
50 #define PCI_OP_WRITE(size, type, len) \
51 int pci_bus_write_config_##size \
52 (struct pci_bus *bus, unsigned int devfn, int pos, type value) \
53 { \
54 int res; \
55 unsigned long flags; \
56 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
57 pci_lock_config(flags); \
58 res = bus->ops->write(bus, devfn, pos, len, value); \
59 pci_unlock_config(flags); \
60 return res; \
61 }
62
63 PCI_OP_READ(byte, u8, 1)
64 PCI_OP_READ(word, u16, 2)
65 PCI_OP_READ(dword, u32, 4)
66 PCI_OP_WRITE(byte, u8, 1)
67 PCI_OP_WRITE(word, u16, 2)
68 PCI_OP_WRITE(dword, u32, 4)
69
70 EXPORT_SYMBOL(pci_bus_read_config_byte);
71 EXPORT_SYMBOL(pci_bus_read_config_word);
72 EXPORT_SYMBOL(pci_bus_read_config_dword);
73 EXPORT_SYMBOL(pci_bus_write_config_byte);
74 EXPORT_SYMBOL(pci_bus_write_config_word);
75 EXPORT_SYMBOL(pci_bus_write_config_dword);
76
pci_generic_config_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)77 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
78 int where, int size, u32 *val)
79 {
80 void __iomem *addr;
81
82 addr = bus->ops->map_bus(bus, devfn, where);
83 if (!addr) {
84 *val = ~0;
85 return PCIBIOS_DEVICE_NOT_FOUND;
86 }
87
88 if (size == 1)
89 *val = readb(addr);
90 else if (size == 2)
91 *val = readw(addr);
92 else
93 *val = readl(addr);
94
95 return PCIBIOS_SUCCESSFUL;
96 }
97 EXPORT_SYMBOL_GPL(pci_generic_config_read);
98
pci_generic_config_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)99 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
100 int where, int size, u32 val)
101 {
102 void __iomem *addr;
103
104 addr = bus->ops->map_bus(bus, devfn, where);
105 if (!addr)
106 return PCIBIOS_DEVICE_NOT_FOUND;
107
108 if (size == 1)
109 writeb(val, addr);
110 else if (size == 2)
111 writew(val, addr);
112 else
113 writel(val, addr);
114
115 return PCIBIOS_SUCCESSFUL;
116 }
117 EXPORT_SYMBOL_GPL(pci_generic_config_write);
118
pci_generic_config_read32(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)119 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
120 int where, int size, u32 *val)
121 {
122 void __iomem *addr;
123
124 addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
125 if (!addr) {
126 *val = ~0;
127 return PCIBIOS_DEVICE_NOT_FOUND;
128 }
129
130 *val = readl(addr);
131
132 if (size <= 2)
133 *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
134
135 return PCIBIOS_SUCCESSFUL;
136 }
137 EXPORT_SYMBOL_GPL(pci_generic_config_read32);
138
pci_generic_config_write32(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)139 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
140 int where, int size, u32 val)
141 {
142 void __iomem *addr;
143 u32 mask, tmp;
144
145 addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
146 if (!addr)
147 return PCIBIOS_DEVICE_NOT_FOUND;
148
149 if (size == 4) {
150 writel(val, addr);
151 return PCIBIOS_SUCCESSFUL;
152 }
153
154 /*
155 * In general, hardware that supports only 32-bit writes on PCI is
156 * not spec-compliant. For example, software may perform a 16-bit
157 * write. If the hardware only supports 32-bit accesses, we must
158 * do a 32-bit read, merge in the 16 bits we intend to write,
159 * followed by a 32-bit write. If the 16 bits we *don't* intend to
160 * write happen to have any RW1C (write-one-to-clear) bits set, we
161 * just inadvertently cleared something we shouldn't have.
162 */
163 if (!bus->unsafe_warn) {
164 dev_warn(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
165 size, pci_domain_nr(bus), bus->number,
166 PCI_SLOT(devfn), PCI_FUNC(devfn), where);
167 bus->unsafe_warn = 1;
168 }
169
170 mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
171 tmp = readl(addr) & mask;
172 tmp |= val << ((where & 0x3) * 8);
173 writel(tmp, addr);
174
175 return PCIBIOS_SUCCESSFUL;
176 }
177 EXPORT_SYMBOL_GPL(pci_generic_config_write32);
178
179 /**
180 * pci_bus_set_ops - Set raw operations of pci bus
181 * @bus: pci bus struct
182 * @ops: new raw operations
183 *
184 * Return previous raw operations
185 */
pci_bus_set_ops(struct pci_bus * bus,struct pci_ops * ops)186 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
187 {
188 struct pci_ops *old_ops;
189 unsigned long flags;
190
191 raw_spin_lock_irqsave(&pci_lock, flags);
192 old_ops = bus->ops;
193 bus->ops = ops;
194 raw_spin_unlock_irqrestore(&pci_lock, flags);
195 return old_ops;
196 }
197 EXPORT_SYMBOL(pci_bus_set_ops);
198
199 /*
200 * The following routines are to prevent the user from accessing PCI config
201 * space when it's unsafe to do so. Some devices require this during BIST and
202 * we're required to prevent it during D-state transitions.
203 *
204 * We have a bit per device to indicate it's blocked and a global wait queue
205 * for callers to sleep on until devices are unblocked.
206 */
207 static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
208
pci_wait_cfg(struct pci_dev * dev)209 static noinline void pci_wait_cfg(struct pci_dev *dev)
210 __must_hold(&pci_lock)
211 {
212 do {
213 raw_spin_unlock_irq(&pci_lock);
214 wait_event(pci_cfg_wait, !dev->block_cfg_access);
215 raw_spin_lock_irq(&pci_lock);
216 } while (dev->block_cfg_access);
217 }
218
219 /* Returns 0 on success, negative values indicate error. */
220 #define PCI_USER_READ_CONFIG(size, type) \
221 int pci_user_read_config_##size \
222 (struct pci_dev *dev, int pos, type *val) \
223 { \
224 int ret = PCIBIOS_SUCCESSFUL; \
225 u32 data = -1; \
226 if (PCI_##size##_BAD) \
227 return -EINVAL; \
228 raw_spin_lock_irq(&pci_lock); \
229 if (unlikely(dev->block_cfg_access)) \
230 pci_wait_cfg(dev); \
231 ret = dev->bus->ops->read(dev->bus, dev->devfn, \
232 pos, sizeof(type), &data); \
233 raw_spin_unlock_irq(&pci_lock); \
234 *val = (type)data; \
235 return pcibios_err_to_errno(ret); \
236 } \
237 EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
238
239 /* Returns 0 on success, negative values indicate error. */
240 #define PCI_USER_WRITE_CONFIG(size, type) \
241 int pci_user_write_config_##size \
242 (struct pci_dev *dev, int pos, type val) \
243 { \
244 int ret = PCIBIOS_SUCCESSFUL; \
245 if (PCI_##size##_BAD) \
246 return -EINVAL; \
247 raw_spin_lock_irq(&pci_lock); \
248 if (unlikely(dev->block_cfg_access)) \
249 pci_wait_cfg(dev); \
250 ret = dev->bus->ops->write(dev->bus, dev->devfn, \
251 pos, sizeof(type), val); \
252 raw_spin_unlock_irq(&pci_lock); \
253 return pcibios_err_to_errno(ret); \
254 } \
255 EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
256
PCI_USER_READ_CONFIG(byte,u8)257 PCI_USER_READ_CONFIG(byte, u8)
258 PCI_USER_READ_CONFIG(word, u16)
259 PCI_USER_READ_CONFIG(dword, u32)
260 PCI_USER_WRITE_CONFIG(byte, u8)
261 PCI_USER_WRITE_CONFIG(word, u16)
262 PCI_USER_WRITE_CONFIG(dword, u32)
263
264 /**
265 * pci_cfg_access_lock - Lock PCI config reads/writes
266 * @dev: pci device struct
267 *
268 * When access is locked, any userspace reads or writes to config
269 * space and concurrent lock requests will sleep until access is
270 * allowed via pci_cfg_access_unlock() again.
271 */
272 void pci_cfg_access_lock(struct pci_dev *dev)
273 {
274 might_sleep();
275
276 raw_spin_lock_irq(&pci_lock);
277 if (dev->block_cfg_access)
278 pci_wait_cfg(dev);
279 dev->block_cfg_access = 1;
280 raw_spin_unlock_irq(&pci_lock);
281 }
282 EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
283
284 /**
285 * pci_cfg_access_trylock - try to lock PCI config reads/writes
286 * @dev: pci device struct
287 *
288 * Same as pci_cfg_access_lock, but will return 0 if access is
289 * already locked, 1 otherwise. This function can be used from
290 * atomic contexts.
291 */
pci_cfg_access_trylock(struct pci_dev * dev)292 bool pci_cfg_access_trylock(struct pci_dev *dev)
293 {
294 unsigned long flags;
295 bool locked = true;
296
297 raw_spin_lock_irqsave(&pci_lock, flags);
298 if (dev->block_cfg_access)
299 locked = false;
300 else
301 dev->block_cfg_access = 1;
302 raw_spin_unlock_irqrestore(&pci_lock, flags);
303
304 return locked;
305 }
306 EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
307
308 /**
309 * pci_cfg_access_unlock - Unlock PCI config reads/writes
310 * @dev: pci device struct
311 *
312 * This function allows PCI config accesses to resume.
313 */
pci_cfg_access_unlock(struct pci_dev * dev)314 void pci_cfg_access_unlock(struct pci_dev *dev)
315 {
316 unsigned long flags;
317
318 raw_spin_lock_irqsave(&pci_lock, flags);
319
320 /*
321 * This indicates a problem in the caller, but we don't need
322 * to kill them, unlike a double-block above.
323 */
324 WARN_ON(!dev->block_cfg_access);
325
326 dev->block_cfg_access = 0;
327 raw_spin_unlock_irqrestore(&pci_lock, flags);
328
329 wake_up_all(&pci_cfg_wait);
330 }
331 EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
332
pcie_cap_version(const struct pci_dev * dev)333 static inline int pcie_cap_version(const struct pci_dev *dev)
334 {
335 return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
336 }
337
pcie_downstream_port(const struct pci_dev * dev)338 static bool pcie_downstream_port(const struct pci_dev *dev)
339 {
340 int type = pci_pcie_type(dev);
341
342 return type == PCI_EXP_TYPE_ROOT_PORT ||
343 type == PCI_EXP_TYPE_DOWNSTREAM ||
344 type == PCI_EXP_TYPE_PCIE_BRIDGE;
345 }
346
pcie_cap_has_lnkctl(const struct pci_dev * dev)347 bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
348 {
349 int type = pci_pcie_type(dev);
350
351 return type == PCI_EXP_TYPE_ENDPOINT ||
352 type == PCI_EXP_TYPE_LEG_END ||
353 type == PCI_EXP_TYPE_ROOT_PORT ||
354 type == PCI_EXP_TYPE_UPSTREAM ||
355 type == PCI_EXP_TYPE_DOWNSTREAM ||
356 type == PCI_EXP_TYPE_PCI_BRIDGE ||
357 type == PCI_EXP_TYPE_PCIE_BRIDGE;
358 }
359
pcie_cap_has_sltctl(const struct pci_dev * dev)360 static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
361 {
362 return pcie_downstream_port(dev) &&
363 pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
364 }
365
pcie_cap_has_rtctl(const struct pci_dev * dev)366 static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
367 {
368 int type = pci_pcie_type(dev);
369
370 return type == PCI_EXP_TYPE_ROOT_PORT ||
371 type == PCI_EXP_TYPE_RC_EC;
372 }
373
pcie_capability_reg_implemented(struct pci_dev * dev,int pos)374 static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
375 {
376 if (!pci_is_pcie(dev))
377 return false;
378
379 switch (pos) {
380 case PCI_EXP_FLAGS:
381 return true;
382 case PCI_EXP_DEVCAP:
383 case PCI_EXP_DEVCTL:
384 case PCI_EXP_DEVSTA:
385 return true;
386 case PCI_EXP_LNKCAP:
387 case PCI_EXP_LNKCTL:
388 case PCI_EXP_LNKSTA:
389 return pcie_cap_has_lnkctl(dev);
390 case PCI_EXP_SLTCAP:
391 case PCI_EXP_SLTCTL:
392 case PCI_EXP_SLTSTA:
393 return pcie_cap_has_sltctl(dev);
394 case PCI_EXP_RTCTL:
395 case PCI_EXP_RTCAP:
396 case PCI_EXP_RTSTA:
397 return pcie_cap_has_rtctl(dev);
398 case PCI_EXP_DEVCAP2:
399 case PCI_EXP_DEVCTL2:
400 case PCI_EXP_LNKCAP2:
401 case PCI_EXP_LNKCTL2:
402 case PCI_EXP_LNKSTA2:
403 return pcie_cap_version(dev) > 1;
404 default:
405 return false;
406 }
407 }
408
409 /*
410 * Note that these accessor functions are only for the "PCI Express
411 * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
412 * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
413 */
pcie_capability_read_word(struct pci_dev * dev,int pos,u16 * val)414 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
415 {
416 int ret;
417
418 *val = 0;
419 if (pos & 1)
420 return -EINVAL;
421
422 if (pcie_capability_reg_implemented(dev, pos)) {
423 ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
424 /*
425 * Reset *val to 0 if pci_read_config_word() fails, it may
426 * have been written as 0xFFFF if hardware error happens
427 * during pci_read_config_word().
428 */
429 if (ret)
430 *val = 0;
431 return ret;
432 }
433
434 /*
435 * For Functions that do not implement the Slot Capabilities,
436 * Slot Status, and Slot Control registers, these spaces must
437 * be hardwired to 0b, with the exception of the Presence Detect
438 * State bit in the Slot Status register of Downstream Ports,
439 * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
440 */
441 if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
442 pos == PCI_EXP_SLTSTA)
443 *val = PCI_EXP_SLTSTA_PDS;
444
445 return 0;
446 }
447 EXPORT_SYMBOL(pcie_capability_read_word);
448
pcie_capability_read_dword(struct pci_dev * dev,int pos,u32 * val)449 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
450 {
451 int ret;
452
453 *val = 0;
454 if (pos & 3)
455 return -EINVAL;
456
457 if (pcie_capability_reg_implemented(dev, pos)) {
458 ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
459 /*
460 * Reset *val to 0 if pci_read_config_dword() fails, it may
461 * have been written as 0xFFFFFFFF if hardware error happens
462 * during pci_read_config_dword().
463 */
464 if (ret)
465 *val = 0;
466 return ret;
467 }
468
469 if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
470 pos == PCI_EXP_SLTSTA)
471 *val = PCI_EXP_SLTSTA_PDS;
472
473 return 0;
474 }
475 EXPORT_SYMBOL(pcie_capability_read_dword);
476
pcie_capability_write_word(struct pci_dev * dev,int pos,u16 val)477 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
478 {
479 if (pos & 1)
480 return -EINVAL;
481
482 if (!pcie_capability_reg_implemented(dev, pos))
483 return 0;
484
485 return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
486 }
487 EXPORT_SYMBOL(pcie_capability_write_word);
488
pcie_capability_write_dword(struct pci_dev * dev,int pos,u32 val)489 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
490 {
491 if (pos & 3)
492 return -EINVAL;
493
494 if (!pcie_capability_reg_implemented(dev, pos))
495 return 0;
496
497 return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
498 }
499 EXPORT_SYMBOL(pcie_capability_write_dword);
500
pcie_capability_clear_and_set_word(struct pci_dev * dev,int pos,u16 clear,u16 set)501 int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
502 u16 clear, u16 set)
503 {
504 int ret;
505 u16 val;
506
507 ret = pcie_capability_read_word(dev, pos, &val);
508 if (!ret) {
509 val &= ~clear;
510 val |= set;
511 ret = pcie_capability_write_word(dev, pos, val);
512 }
513
514 return ret;
515 }
516 EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
517
pcie_capability_clear_and_set_dword(struct pci_dev * dev,int pos,u32 clear,u32 set)518 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
519 u32 clear, u32 set)
520 {
521 int ret;
522 u32 val;
523
524 ret = pcie_capability_read_dword(dev, pos, &val);
525 if (!ret) {
526 val &= ~clear;
527 val |= set;
528 ret = pcie_capability_write_dword(dev, pos, val);
529 }
530
531 return ret;
532 }
533 EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
534
pci_read_config_byte(const struct pci_dev * dev,int where,u8 * val)535 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
536 {
537 if (pci_dev_is_disconnected(dev)) {
538 *val = ~0;
539 return PCIBIOS_DEVICE_NOT_FOUND;
540 }
541 return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
542 }
543 EXPORT_SYMBOL(pci_read_config_byte);
544
pci_read_config_word(const struct pci_dev * dev,int where,u16 * val)545 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
546 {
547 if (pci_dev_is_disconnected(dev)) {
548 *val = ~0;
549 return PCIBIOS_DEVICE_NOT_FOUND;
550 }
551 return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
552 }
553 EXPORT_SYMBOL(pci_read_config_word);
554
pci_read_config_dword(const struct pci_dev * dev,int where,u32 * val)555 int pci_read_config_dword(const struct pci_dev *dev, int where,
556 u32 *val)
557 {
558 if (pci_dev_is_disconnected(dev)) {
559 *val = ~0;
560 return PCIBIOS_DEVICE_NOT_FOUND;
561 }
562 return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
563 }
564 EXPORT_SYMBOL(pci_read_config_dword);
565
pci_write_config_byte(const struct pci_dev * dev,int where,u8 val)566 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
567 {
568 if (pci_dev_is_disconnected(dev))
569 return PCIBIOS_DEVICE_NOT_FOUND;
570 return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
571 }
572 EXPORT_SYMBOL(pci_write_config_byte);
573
pci_write_config_word(const struct pci_dev * dev,int where,u16 val)574 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
575 {
576 if (pci_dev_is_disconnected(dev))
577 return PCIBIOS_DEVICE_NOT_FOUND;
578 return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
579 }
580 EXPORT_SYMBOL(pci_write_config_word);
581
pci_write_config_dword(const struct pci_dev * dev,int where,u32 val)582 int pci_write_config_dword(const struct pci_dev *dev, int where,
583 u32 val)
584 {
585 if (pci_dev_is_disconnected(dev))
586 return PCIBIOS_DEVICE_NOT_FOUND;
587 return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
588 }
589 EXPORT_SYMBOL(pci_write_config_dword);
590