Merge branches 'audit', 'delay', 'fixes', 'misc' and 'sta2x11' into for-linus
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / pci / pci.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/pci.h>
075c1771 14#include <linux/pm.h>
5a0e3ad6 15#include <linux/slab.h>
1da177e4
LT
16#include <linux/module.h>
17#include <linux/spinlock.h>
4e57b681 18#include <linux/string.h>
229f5afd 19#include <linux/log2.h>
7d715a6c 20#include <linux/pci-aspm.h>
c300bd2f 21#include <linux/pm_wakeup.h>
8dd7f803 22#include <linux/interrupt.h>
32a9a682 23#include <linux/device.h>
b67ea761 24#include <linux/pm_runtime.h>
284f5f9d 25#include <asm-generic/pci-bridge.h>
32a9a682 26#include <asm/setup.h>
bc56b9e0 27#include "pci.h"
1da177e4 28
00240c38
AS
29const char *pci_power_names[] = {
30 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
31};
32EXPORT_SYMBOL_GPL(pci_power_names);
33
93177a74
RW
34int isa_dma_bridge_buggy;
35EXPORT_SYMBOL(isa_dma_bridge_buggy);
36
37int pci_pci_problems;
38EXPORT_SYMBOL(pci_pci_problems);
39
1ae861e6
RW
40unsigned int pci_pm_d3_delay;
41
df17e62e
MG
42static void pci_pme_list_scan(struct work_struct *work);
43
44static LIST_HEAD(pci_pme_list);
45static DEFINE_MUTEX(pci_pme_list_mutex);
46static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
47
48struct pci_pme_device {
49 struct list_head list;
50 struct pci_dev *dev;
51};
52
53#define PME_TIMEOUT 1000 /* How long between PME checks */
54
1ae861e6
RW
55static void pci_dev_d3_sleep(struct pci_dev *dev)
56{
57 unsigned int delay = dev->d3_delay;
58
59 if (delay < pci_pm_d3_delay)
60 delay = pci_pm_d3_delay;
61
62 msleep(delay);
63}
1da177e4 64
32a2eea7
JG
65#ifdef CONFIG_PCI_DOMAINS
66int pci_domains_supported = 1;
67#endif
68
4516a618
AN
69#define DEFAULT_CARDBUS_IO_SIZE (256)
70#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
71/* pci=cbmemsize=nnM,cbiosize=nn can override this */
72unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
73unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
74
28760489
EB
75#define DEFAULT_HOTPLUG_IO_SIZE (256)
76#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
77/* pci=hpmemsize=nnM,hpiosize=nn can override this */
78unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
79unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
80
5f39e670 81enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495 82
ac1aa47b
JB
83/*
84 * The default CLS is used if arch didn't set CLS explicitly and not
85 * all pci devices agree on the same value. Arch can override either
86 * the dfl or actual value as it sees fit. Don't forget this is
87 * measured in 32-bit words, not bytes.
88 */
98e724c7 89u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
ac1aa47b
JB
90u8 pci_cache_line_size;
91
96c55900
MS
92/*
93 * If we set up a device for bus mastering, we need to check the latency
94 * timer as certain BIOSes forget to set it properly.
95 */
96unsigned int pcibios_max_latency = 255;
97
6748dcc2
RW
98/* If set, the PCIe ARI capability will not be used. */
99static bool pcie_ari_disabled;
100
1da177e4
LT
101/**
102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
103 * @bus: pointer to PCI bus structure to search
104 *
105 * Given a PCI bus, returns the highest PCI bus number present in the set
106 * including the given PCI bus and its list of child PCI buses.
107 */
96bde06a 108unsigned char pci_bus_max_busnr(struct pci_bus* bus)
1da177e4
LT
109{
110 struct list_head *tmp;
111 unsigned char max, n;
112
b82db5ce 113 max = bus->subordinate;
1da177e4
LT
114 list_for_each(tmp, &bus->children) {
115 n = pci_bus_max_busnr(pci_bus_b(tmp));
116 if(n > max)
117 max = n;
118 }
119 return max;
120}
b82db5ce 121EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
1da177e4 122
1684f5dd
AM
123#ifdef CONFIG_HAS_IOMEM
124void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
125{
126 /*
127 * Make sure the BAR is actually a memory resource, not an IO resource
128 */
129 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
130 WARN_ON(1);
131 return NULL;
132 }
133 return ioremap_nocache(pci_resource_start(pdev, bar),
134 pci_resource_len(pdev, bar));
135}
136EXPORT_SYMBOL_GPL(pci_ioremap_bar);
137#endif
138
b82db5ce 139#if 0
1da177e4
LT
140/**
141 * pci_max_busnr - returns maximum PCI bus number
142 *
143 * Returns the highest PCI bus number present in the system global list of
144 * PCI buses.
145 */
146unsigned char __devinit
147pci_max_busnr(void)
148{
149 struct pci_bus *bus = NULL;
150 unsigned char max, n;
151
152 max = 0;
153 while ((bus = pci_find_next_bus(bus)) != NULL) {
154 n = pci_bus_max_busnr(bus);
155 if(n > max)
156 max = n;
157 }
158 return max;
159}
160
54c762fe
AB
161#endif /* 0 */
162
687d5fe3
ME
163#define PCI_FIND_CAP_TTL 48
164
165static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
166 u8 pos, int cap, int *ttl)
24a4e377
RD
167{
168 u8 id;
24a4e377 169
687d5fe3 170 while ((*ttl)--) {
24a4e377
RD
171 pci_bus_read_config_byte(bus, devfn, pos, &pos);
172 if (pos < 0x40)
173 break;
174 pos &= ~3;
175 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
176 &id);
177 if (id == 0xff)
178 break;
179 if (id == cap)
180 return pos;
181 pos += PCI_CAP_LIST_NEXT;
182 }
183 return 0;
184}
185
687d5fe3
ME
186static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
187 u8 pos, int cap)
188{
189 int ttl = PCI_FIND_CAP_TTL;
190
191 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
192}
193
24a4e377
RD
194int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
195{
196 return __pci_find_next_cap(dev->bus, dev->devfn,
197 pos + PCI_CAP_LIST_NEXT, cap);
198}
199EXPORT_SYMBOL_GPL(pci_find_next_capability);
200
d3bac118
ME
201static int __pci_bus_find_cap_start(struct pci_bus *bus,
202 unsigned int devfn, u8 hdr_type)
1da177e4
LT
203{
204 u16 status;
1da177e4
LT
205
206 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
207 if (!(status & PCI_STATUS_CAP_LIST))
208 return 0;
209
210 switch (hdr_type) {
211 case PCI_HEADER_TYPE_NORMAL:
212 case PCI_HEADER_TYPE_BRIDGE:
d3bac118 213 return PCI_CAPABILITY_LIST;
1da177e4 214 case PCI_HEADER_TYPE_CARDBUS:
d3bac118 215 return PCI_CB_CAPABILITY_LIST;
1da177e4
LT
216 default:
217 return 0;
218 }
d3bac118
ME
219
220 return 0;
1da177e4
LT
221}
222
223/**
224 * pci_find_capability - query for devices' capabilities
225 * @dev: PCI device to query
226 * @cap: capability code
227 *
228 * Tell if a device supports a given PCI capability.
229 * Returns the address of the requested capability structure within the
230 * device's PCI configuration space or 0 in case the device does not
231 * support it. Possible values for @cap:
232 *
233 * %PCI_CAP_ID_PM Power Management
234 * %PCI_CAP_ID_AGP Accelerated Graphics Port
235 * %PCI_CAP_ID_VPD Vital Product Data
236 * %PCI_CAP_ID_SLOTID Slot Identification
237 * %PCI_CAP_ID_MSI Message Signalled Interrupts
238 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
239 * %PCI_CAP_ID_PCIX PCI-X
240 * %PCI_CAP_ID_EXP PCI Express
241 */
242int pci_find_capability(struct pci_dev *dev, int cap)
243{
d3bac118
ME
244 int pos;
245
246 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
247 if (pos)
248 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
249
250 return pos;
1da177e4
LT
251}
252
253/**
254 * pci_bus_find_capability - query for devices' capabilities
255 * @bus: the PCI bus to query
256 * @devfn: PCI device to query
257 * @cap: capability code
258 *
259 * Like pci_find_capability() but works for pci devices that do not have a
260 * pci_dev structure set up yet.
261 *
262 * Returns the address of the requested capability structure within the
263 * device's PCI configuration space or 0 in case the device does not
264 * support it.
265 */
266int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
267{
d3bac118 268 int pos;
1da177e4
LT
269 u8 hdr_type;
270
271 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
272
d3bac118
ME
273 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
274 if (pos)
275 pos = __pci_find_next_cap(bus, devfn, pos, cap);
276
277 return pos;
1da177e4
LT
278}
279
280/**
281 * pci_find_ext_capability - Find an extended capability
282 * @dev: PCI device to query
283 * @cap: capability code
284 *
285 * Returns the address of the requested extended capability structure
286 * within the device's PCI configuration space or 0 if the device does
287 * not support it. Possible values for @cap:
288 *
289 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
290 * %PCI_EXT_CAP_ID_VC Virtual Channel
291 * %PCI_EXT_CAP_ID_DSN Device Serial Number
292 * %PCI_EXT_CAP_ID_PWR Power Budgeting
293 */
294int pci_find_ext_capability(struct pci_dev *dev, int cap)
295{
296 u32 header;
557848c3
ZY
297 int ttl;
298 int pos = PCI_CFG_SPACE_SIZE;
1da177e4 299
557848c3
ZY
300 /* minimum 8 bytes per capability */
301 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
302
303 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
1da177e4
LT
304 return 0;
305
306 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
307 return 0;
308
309 /*
310 * If we have no capabilities, this is indicated by cap ID,
311 * cap version and next pointer all being 0.
312 */
313 if (header == 0)
314 return 0;
315
316 while (ttl-- > 0) {
317 if (PCI_EXT_CAP_ID(header) == cap)
318 return pos;
319
320 pos = PCI_EXT_CAP_NEXT(header);
557848c3 321 if (pos < PCI_CFG_SPACE_SIZE)
1da177e4
LT
322 break;
323
324 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
325 break;
326 }
327
328 return 0;
329}
3a720d72 330EXPORT_SYMBOL_GPL(pci_find_ext_capability);
1da177e4 331
cf4c43dd
JB
332/**
333 * pci_bus_find_ext_capability - find an extended capability
334 * @bus: the PCI bus to query
335 * @devfn: PCI device to query
336 * @cap: capability code
337 *
338 * Like pci_find_ext_capability() but works for pci devices that do not have a
339 * pci_dev structure set up yet.
340 *
341 * Returns the address of the requested capability structure within the
342 * device's PCI configuration space or 0 in case the device does not
343 * support it.
344 */
345int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
346 int cap)
347{
348 u32 header;
349 int ttl;
350 int pos = PCI_CFG_SPACE_SIZE;
351
352 /* minimum 8 bytes per capability */
353 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
354
355 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
356 return 0;
357 if (header == 0xffffffff || header == 0)
358 return 0;
359
360 while (ttl-- > 0) {
361 if (PCI_EXT_CAP_ID(header) == cap)
362 return pos;
363
364 pos = PCI_EXT_CAP_NEXT(header);
365 if (pos < PCI_CFG_SPACE_SIZE)
366 break;
367
368 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
369 break;
370 }
371
372 return 0;
373}
374
687d5fe3
ME
375static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
376{
377 int rc, ttl = PCI_FIND_CAP_TTL;
378 u8 cap, mask;
379
380 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
381 mask = HT_3BIT_CAP_MASK;
382 else
383 mask = HT_5BIT_CAP_MASK;
384
385 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
386 PCI_CAP_ID_HT, &ttl);
387 while (pos) {
388 rc = pci_read_config_byte(dev, pos + 3, &cap);
389 if (rc != PCIBIOS_SUCCESSFUL)
390 return 0;
391
392 if ((cap & mask) == ht_cap)
393 return pos;
394
47a4d5be
BG
395 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
396 pos + PCI_CAP_LIST_NEXT,
687d5fe3
ME
397 PCI_CAP_ID_HT, &ttl);
398 }
399
400 return 0;
401}
402/**
403 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
404 * @dev: PCI device to query
405 * @pos: Position from which to continue searching
406 * @ht_cap: Hypertransport capability code
407 *
408 * To be used in conjunction with pci_find_ht_capability() to search for
409 * all capabilities matching @ht_cap. @pos should always be a value returned
410 * from pci_find_ht_capability().
411 *
412 * NB. To be 100% safe against broken PCI devices, the caller should take
413 * steps to avoid an infinite loop.
414 */
415int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
416{
417 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
418}
419EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
420
421/**
422 * pci_find_ht_capability - query a device's Hypertransport capabilities
423 * @dev: PCI device to query
424 * @ht_cap: Hypertransport capability code
425 *
426 * Tell if a device supports a given Hypertransport capability.
427 * Returns an address within the device's PCI configuration space
428 * or 0 in case the device does not support the request capability.
429 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
430 * which has a Hypertransport capability matching @ht_cap.
431 */
432int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
433{
434 int pos;
435
436 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
437 if (pos)
438 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
439
440 return pos;
441}
442EXPORT_SYMBOL_GPL(pci_find_ht_capability);
443
1da177e4
LT
444/**
445 * pci_find_parent_resource - return resource region of parent bus of given region
446 * @dev: PCI device structure contains resources to be searched
447 * @res: child resource record for which parent is sought
448 *
449 * For given resource region of given device, return the resource
450 * region of parent bus the given region is contained in or where
451 * it should be allocated from.
452 */
453struct resource *
454pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
455{
456 const struct pci_bus *bus = dev->bus;
457 int i;
89a74ecc 458 struct resource *best = NULL, *r;
1da177e4 459
89a74ecc 460 pci_bus_for_each_resource(bus, r, i) {
1da177e4
LT
461 if (!r)
462 continue;
463 if (res->start && !(res->start >= r->start && res->end <= r->end))
464 continue; /* Not contained */
465 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
466 continue; /* Wrong type */
467 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
468 return r; /* Exact match */
8c8def26
LT
469 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
470 if (r->flags & IORESOURCE_PREFETCH)
471 continue;
472 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
473 if (!best)
474 best = r;
1da177e4
LT
475 }
476 return best;
477}
478
064b53db
JL
479/**
480 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
481 * @dev: PCI device to have its BARs restored
482 *
483 * Restore the BAR values for a given device, so as to make it
484 * accessible by its driver.
485 */
ad668599 486static void
064b53db
JL
487pci_restore_bars(struct pci_dev *dev)
488{
bc5f5a82 489 int i;
064b53db 490
bc5f5a82 491 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
14add80b 492 pci_update_resource(dev, i);
064b53db
JL
493}
494
961d9120
RW
495static struct pci_platform_pm_ops *pci_platform_pm;
496
497int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
498{
eb9d0fe4
RW
499 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
500 || !ops->sleep_wake || !ops->can_wakeup)
961d9120
RW
501 return -EINVAL;
502 pci_platform_pm = ops;
503 return 0;
504}
505
506static inline bool platform_pci_power_manageable(struct pci_dev *dev)
507{
508 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
509}
510
511static inline int platform_pci_set_power_state(struct pci_dev *dev,
512 pci_power_t t)
513{
514 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
515}
516
517static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
518{
519 return pci_platform_pm ?
520 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
521}
8f7020d3 522
eb9d0fe4
RW
523static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
524{
525 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
526}
527
528static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
529{
530 return pci_platform_pm ?
531 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
532}
533
b67ea761
RW
534static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
535{
536 return pci_platform_pm ?
537 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
538}
539
1da177e4 540/**
44e4e66e
RW
541 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
542 * given PCI device
543 * @dev: PCI device to handle.
44e4e66e 544 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1da177e4 545 *
44e4e66e
RW
546 * RETURN VALUE:
547 * -EINVAL if the requested state is invalid.
548 * -EIO if device does not support PCI PM or its PM capabilities register has a
549 * wrong version, or device doesn't support the requested state.
550 * 0 if device already is in the requested state.
551 * 0 if device's power state has been successfully changed.
1da177e4 552 */
f00a20ef 553static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1da177e4 554{
337001b6 555 u16 pmcsr;
44e4e66e 556 bool need_restore = false;
1da177e4 557
4a865905
RW
558 /* Check if we're already there */
559 if (dev->current_state == state)
560 return 0;
561
337001b6 562 if (!dev->pm_cap)
cca03dec
AL
563 return -EIO;
564
44e4e66e
RW
565 if (state < PCI_D0 || state > PCI_D3hot)
566 return -EINVAL;
567
1da177e4
LT
568 /* Validate current state:
569 * Can enter D0 from any state, but if we can only go deeper
570 * to sleep if we're already in a low power state
571 */
4a865905 572 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
44e4e66e 573 && dev->current_state > state) {
80ccba11
BH
574 dev_err(&dev->dev, "invalid power transition "
575 "(from state %d to %d)\n", dev->current_state, state);
1da177e4 576 return -EINVAL;
44e4e66e 577 }
1da177e4 578
1da177e4 579 /* check if this device supports the desired state */
337001b6
RW
580 if ((state == PCI_D1 && !dev->d1_support)
581 || (state == PCI_D2 && !dev->d2_support))
3fe9d19f 582 return -EIO;
1da177e4 583
337001b6 584 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
064b53db 585
32a36585 586 /* If we're (effectively) in D3, force entire word to 0.
1da177e4
LT
587 * This doesn't affect PME_Status, disables PME_En, and
588 * sets PowerState to 0.
589 */
32a36585 590 switch (dev->current_state) {
d3535fbb
JL
591 case PCI_D0:
592 case PCI_D1:
593 case PCI_D2:
594 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
595 pmcsr |= state;
596 break;
f62795f1
RW
597 case PCI_D3hot:
598 case PCI_D3cold:
32a36585
JL
599 case PCI_UNKNOWN: /* Boot-up */
600 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
f00a20ef 601 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
44e4e66e 602 need_restore = true;
32a36585 603 /* Fall-through: force to D0 */
32a36585 604 default:
d3535fbb 605 pmcsr = 0;
32a36585 606 break;
1da177e4
LT
607 }
608
609 /* enter specified state */
337001b6 610 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1da177e4
LT
611
612 /* Mandatory power management transition delays */
613 /* see PCI PM 1.1 5.6.1 table 18 */
614 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1ae861e6 615 pci_dev_d3_sleep(dev);
1da177e4 616 else if (state == PCI_D2 || dev->current_state == PCI_D2)
aa8c6c93 617 udelay(PCI_PM_D2_DELAY);
1da177e4 618
e13cdbd7
RW
619 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
620 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
621 if (dev->current_state != state && printk_ratelimit())
622 dev_info(&dev->dev, "Refused to change power state, "
623 "currently in D%d\n", dev->current_state);
064b53db
JL
624
625 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
626 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
627 * from D3hot to D0 _may_ perform an internal reset, thereby
628 * going to "D0 Uninitialized" rather than "D0 Initialized".
629 * For example, at least some versions of the 3c905B and the
630 * 3c556B exhibit this behaviour.
631 *
632 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
633 * devices in a D3hot state at boot. Consequently, we need to
634 * restore at least the BARs so that the device will be
635 * accessible to its driver.
636 */
637 if (need_restore)
638 pci_restore_bars(dev);
639
f00a20ef 640 if (dev->bus->self)
7d715a6c
SL
641 pcie_aspm_pm_state_change(dev->bus->self);
642
1da177e4
LT
643 return 0;
644}
645
44e4e66e
RW
646/**
647 * pci_update_current_state - Read PCI power state of given device from its
648 * PCI PM registers and cache it
649 * @dev: PCI device to handle.
f06fc0b6 650 * @state: State to cache in case the device doesn't have the PM capability
44e4e66e 651 */
73410429 652void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
44e4e66e 653{
337001b6 654 if (dev->pm_cap) {
44e4e66e
RW
655 u16 pmcsr;
656
337001b6 657 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
44e4e66e 658 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
f06fc0b6
RW
659 } else {
660 dev->current_state = state;
44e4e66e
RW
661 }
662}
663
0e5dd46b
RW
664/**
665 * pci_platform_power_transition - Use platform to change device power state
666 * @dev: PCI device to handle.
667 * @state: State to put the device into.
668 */
669static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
670{
671 int error;
672
673 if (platform_pci_power_manageable(dev)) {
674 error = platform_pci_set_power_state(dev, state);
675 if (!error)
676 pci_update_current_state(dev, state);
b51306c6
AH
677 /* Fall back to PCI_D0 if native PM is not supported */
678 if (!dev->pm_cap)
679 dev->current_state = PCI_D0;
0e5dd46b
RW
680 } else {
681 error = -ENODEV;
682 /* Fall back to PCI_D0 if native PM is not supported */
b3bad72e
RW
683 if (!dev->pm_cap)
684 dev->current_state = PCI_D0;
0e5dd46b
RW
685 }
686
687 return error;
688}
689
690/**
691 * __pci_start_power_transition - Start power transition of a PCI device
692 * @dev: PCI device to handle.
693 * @state: State to put the device into.
694 */
695static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
696{
697 if (state == PCI_D0)
698 pci_platform_power_transition(dev, PCI_D0);
699}
700
701/**
702 * __pci_complete_power_transition - Complete power transition of a PCI device
703 * @dev: PCI device to handle.
704 * @state: State to put the device into.
705 *
706 * This function should not be called directly by device drivers.
707 */
708int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
709{
cc2893b6 710 return state >= PCI_D0 ?
0e5dd46b
RW
711 pci_platform_power_transition(dev, state) : -EINVAL;
712}
713EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
714
44e4e66e
RW
715/**
716 * pci_set_power_state - Set the power state of a PCI device
717 * @dev: PCI device to handle.
718 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
719 *
877d0310 720 * Transition a device to a new power state, using the platform firmware and/or
44e4e66e
RW
721 * the device's PCI PM registers.
722 *
723 * RETURN VALUE:
724 * -EINVAL if the requested state is invalid.
725 * -EIO if device does not support PCI PM or its PM capabilities register has a
726 * wrong version, or device doesn't support the requested state.
727 * 0 if device already is in the requested state.
728 * 0 if device's power state has been successfully changed.
729 */
730int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
731{
337001b6 732 int error;
44e4e66e
RW
733
734 /* bound the state we're entering */
735 if (state > PCI_D3hot)
736 state = PCI_D3hot;
737 else if (state < PCI_D0)
738 state = PCI_D0;
739 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
740 /*
741 * If the device or the parent bridge do not support PCI PM,
742 * ignore the request if we're doing anything other than putting
743 * it into D0 (which would only happen on boot).
744 */
745 return 0;
746
0e5dd46b
RW
747 __pci_start_power_transition(dev, state);
748
979b1791
AC
749 /* This device is quirked not to be put into D3, so
750 don't put it in D3 */
751 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
752 return 0;
44e4e66e 753
f00a20ef 754 error = pci_raw_set_power_state(dev, state);
44e4e66e 755
0e5dd46b
RW
756 if (!__pci_complete_power_transition(dev, state))
757 error = 0;
1a680b7c
NC
758 /*
759 * When aspm_policy is "powersave" this call ensures
760 * that ASPM is configured.
761 */
762 if (!error && dev->bus->self)
763 pcie_aspm_powersave_config_link(dev->bus->self);
44e4e66e
RW
764
765 return error;
766}
767
1da177e4
LT
768/**
769 * pci_choose_state - Choose the power state of a PCI device
770 * @dev: PCI device to be suspended
771 * @state: target sleep state for the whole system. This is the value
772 * that is passed to suspend() function.
773 *
774 * Returns PCI power state suitable for given device and given system
775 * message.
776 */
777
778pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
779{
ab826ca4 780 pci_power_t ret;
0f64474b 781
1da177e4
LT
782 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
783 return PCI_D0;
784
961d9120
RW
785 ret = platform_pci_choose_state(dev);
786 if (ret != PCI_POWER_ERROR)
787 return ret;
ca078bae
PM
788
789 switch (state.event) {
790 case PM_EVENT_ON:
791 return PCI_D0;
792 case PM_EVENT_FREEZE:
b887d2e6
DB
793 case PM_EVENT_PRETHAW:
794 /* REVISIT both freeze and pre-thaw "should" use D0 */
ca078bae 795 case PM_EVENT_SUSPEND:
3a2d5b70 796 case PM_EVENT_HIBERNATE:
ca078bae 797 return PCI_D3hot;
1da177e4 798 default:
80ccba11
BH
799 dev_info(&dev->dev, "unrecognized suspend event %d\n",
800 state.event);
1da177e4
LT
801 BUG();
802 }
803 return PCI_D0;
804}
805
806EXPORT_SYMBOL(pci_choose_state);
807
89858517
YZ
808#define PCI_EXP_SAVE_REGS 7
809
1b6b8ce2
YZ
810#define pcie_cap_has_devctl(type, flags) 1
811#define pcie_cap_has_lnkctl(type, flags) \
812 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
813 (type == PCI_EXP_TYPE_ROOT_PORT || \
814 type == PCI_EXP_TYPE_ENDPOINT || \
815 type == PCI_EXP_TYPE_LEG_END))
816#define pcie_cap_has_sltctl(type, flags) \
817 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
818 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
819 (type == PCI_EXP_TYPE_DOWNSTREAM && \
820 (flags & PCI_EXP_FLAGS_SLOT))))
821#define pcie_cap_has_rtctl(type, flags) \
822 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
823 (type == PCI_EXP_TYPE_ROOT_PORT || \
824 type == PCI_EXP_TYPE_RC_EC))
825#define pcie_cap_has_devctl2(type, flags) \
826 ((flags & PCI_EXP_FLAGS_VERS) > 1)
827#define pcie_cap_has_lnkctl2(type, flags) \
828 ((flags & PCI_EXP_FLAGS_VERS) > 1)
829#define pcie_cap_has_sltctl2(type, flags) \
830 ((flags & PCI_EXP_FLAGS_VERS) > 1)
831
34a4876e
YL
832static struct pci_cap_saved_state *pci_find_saved_cap(
833 struct pci_dev *pci_dev, char cap)
834{
835 struct pci_cap_saved_state *tmp;
836 struct hlist_node *pos;
837
838 hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
839 if (tmp->cap.cap_nr == cap)
840 return tmp;
841 }
842 return NULL;
843}
844
b56a5a23
MT
845static int pci_save_pcie_state(struct pci_dev *dev)
846{
847 int pos, i = 0;
848 struct pci_cap_saved_state *save_state;
849 u16 *cap;
1b6b8ce2 850 u16 flags;
b56a5a23 851
06a1cbaf
KK
852 pos = pci_pcie_cap(dev);
853 if (!pos)
b56a5a23
MT
854 return 0;
855
9f35575d 856 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
b56a5a23 857 if (!save_state) {
e496b617 858 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
b56a5a23
MT
859 return -ENOMEM;
860 }
24a4742f 861 cap = (u16 *)&save_state->cap.data[0];
b56a5a23 862
1b6b8ce2
YZ
863 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
864
865 if (pcie_cap_has_devctl(dev->pcie_type, flags))
866 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
867 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
868 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
869 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
870 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
871 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
872 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
873 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
874 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
875 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
876 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
877 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
878 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
63f4898a 879
b56a5a23
MT
880 return 0;
881}
882
883static void pci_restore_pcie_state(struct pci_dev *dev)
884{
885 int i = 0, pos;
886 struct pci_cap_saved_state *save_state;
887 u16 *cap;
1b6b8ce2 888 u16 flags;
b56a5a23
MT
889
890 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
891 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
892 if (!save_state || pos <= 0)
893 return;
24a4742f 894 cap = (u16 *)&save_state->cap.data[0];
b56a5a23 895
1b6b8ce2
YZ
896 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
897
898 if (pcie_cap_has_devctl(dev->pcie_type, flags))
899 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
900 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
901 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
902 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
903 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
904 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
905 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
906 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
907 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
908 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
909 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
910 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
911 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
b56a5a23
MT
912}
913
cc692a5f
SH
914
915static int pci_save_pcix_state(struct pci_dev *dev)
916{
63f4898a 917 int pos;
cc692a5f 918 struct pci_cap_saved_state *save_state;
cc692a5f
SH
919
920 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
921 if (pos <= 0)
922 return 0;
923
f34303de 924 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
cc692a5f 925 if (!save_state) {
e496b617 926 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
cc692a5f
SH
927 return -ENOMEM;
928 }
cc692a5f 929
24a4742f
AW
930 pci_read_config_word(dev, pos + PCI_X_CMD,
931 (u16 *)save_state->cap.data);
63f4898a 932
cc692a5f
SH
933 return 0;
934}
935
936static void pci_restore_pcix_state(struct pci_dev *dev)
937{
938 int i = 0, pos;
939 struct pci_cap_saved_state *save_state;
940 u16 *cap;
941
942 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
943 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
944 if (!save_state || pos <= 0)
945 return;
24a4742f 946 cap = (u16 *)&save_state->cap.data[0];
cc692a5f
SH
947
948 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
cc692a5f
SH
949}
950
951
1da177e4
LT
952/**
953 * pci_save_state - save the PCI configuration space of a device before suspending
954 * @dev: - PCI device that we're dealing with
1da177e4
LT
955 */
956int
957pci_save_state(struct pci_dev *dev)
958{
959 int i;
960 /* XXX: 100% dword access ok here? */
961 for (i = 0; i < 16; i++)
9e0b5b2c 962 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
aa8c6c93 963 dev->state_saved = true;
b56a5a23
MT
964 if ((i = pci_save_pcie_state(dev)) != 0)
965 return i;
cc692a5f
SH
966 if ((i = pci_save_pcix_state(dev)) != 0)
967 return i;
1da177e4
LT
968 return 0;
969}
970
ebfc5b80
RW
971static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
972 u32 saved_val, int retry)
973{
974 u32 val;
975
976 pci_read_config_dword(pdev, offset, &val);
977 if (val == saved_val)
978 return;
979
980 for (;;) {
981 dev_dbg(&pdev->dev, "restoring config space at offset "
982 "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
983 pci_write_config_dword(pdev, offset, saved_val);
984 if (retry-- <= 0)
985 return;
986
987 pci_read_config_dword(pdev, offset, &val);
988 if (val == saved_val)
989 return;
990
991 mdelay(1);
992 }
993}
994
a6cb9ee7
RW
995static void pci_restore_config_space_range(struct pci_dev *pdev,
996 int start, int end, int retry)
ebfc5b80
RW
997{
998 int index;
999
1000 for (index = end; index >= start; index--)
1001 pci_restore_config_dword(pdev, 4 * index,
1002 pdev->saved_config_space[index],
1003 retry);
1004}
1005
a6cb9ee7
RW
1006static void pci_restore_config_space(struct pci_dev *pdev)
1007{
1008 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1009 pci_restore_config_space_range(pdev, 10, 15, 0);
1010 /* Restore BARs before the command register. */
1011 pci_restore_config_space_range(pdev, 4, 9, 10);
1012 pci_restore_config_space_range(pdev, 0, 3, 0);
1013 } else {
1014 pci_restore_config_space_range(pdev, 0, 15, 0);
1015 }
1016}
1017
1da177e4
LT
1018/**
1019 * pci_restore_state - Restore the saved state of a PCI device
1020 * @dev: - PCI device that we're dealing with
1da177e4 1021 */
1d3c16a8 1022void pci_restore_state(struct pci_dev *dev)
1da177e4 1023{
c82f63e4 1024 if (!dev->state_saved)
1d3c16a8 1025 return;
4b77b0a2 1026
b56a5a23
MT
1027 /* PCI Express register must be restored first */
1028 pci_restore_pcie_state(dev);
1900ca13 1029 pci_restore_ats_state(dev);
b56a5a23 1030
a6cb9ee7 1031 pci_restore_config_space(dev);
ebfc5b80 1032
cc692a5f 1033 pci_restore_pcix_state(dev);
41017f0c 1034 pci_restore_msi_state(dev);
8c5cdb6a 1035 pci_restore_iov_state(dev);
8fed4b65 1036
4b77b0a2 1037 dev->state_saved = false;
1da177e4
LT
1038}
1039
ffbdd3f7
AW
1040struct pci_saved_state {
1041 u32 config_space[16];
1042 struct pci_cap_saved_data cap[0];
1043};
1044
1045/**
1046 * pci_store_saved_state - Allocate and return an opaque struct containing
1047 * the device saved state.
1048 * @dev: PCI device that we're dealing with
1049 *
1050 * Rerturn NULL if no state or error.
1051 */
1052struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1053{
1054 struct pci_saved_state *state;
1055 struct pci_cap_saved_state *tmp;
1056 struct pci_cap_saved_data *cap;
1057 struct hlist_node *pos;
1058 size_t size;
1059
1060 if (!dev->state_saved)
1061 return NULL;
1062
1063 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1064
1065 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1066 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1067
1068 state = kzalloc(size, GFP_KERNEL);
1069 if (!state)
1070 return NULL;
1071
1072 memcpy(state->config_space, dev->saved_config_space,
1073 sizeof(state->config_space));
1074
1075 cap = state->cap;
1076 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1077 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1078 memcpy(cap, &tmp->cap, len);
1079 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1080 }
1081 /* Empty cap_save terminates list */
1082
1083 return state;
1084}
1085EXPORT_SYMBOL_GPL(pci_store_saved_state);
1086
1087/**
1088 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1089 * @dev: PCI device that we're dealing with
1090 * @state: Saved state returned from pci_store_saved_state()
1091 */
1092int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1093{
1094 struct pci_cap_saved_data *cap;
1095
1096 dev->state_saved = false;
1097
1098 if (!state)
1099 return 0;
1100
1101 memcpy(dev->saved_config_space, state->config_space,
1102 sizeof(state->config_space));
1103
1104 cap = state->cap;
1105 while (cap->size) {
1106 struct pci_cap_saved_state *tmp;
1107
1108 tmp = pci_find_saved_cap(dev, cap->cap_nr);
1109 if (!tmp || tmp->cap.size != cap->size)
1110 return -EINVAL;
1111
1112 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1113 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1114 sizeof(struct pci_cap_saved_data) + cap->size);
1115 }
1116
1117 dev->state_saved = true;
1118 return 0;
1119}
1120EXPORT_SYMBOL_GPL(pci_load_saved_state);
1121
1122/**
1123 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1124 * and free the memory allocated for it.
1125 * @dev: PCI device that we're dealing with
1126 * @state: Pointer to saved state returned from pci_store_saved_state()
1127 */
1128int pci_load_and_free_saved_state(struct pci_dev *dev,
1129 struct pci_saved_state **state)
1130{
1131 int ret = pci_load_saved_state(dev, *state);
1132 kfree(*state);
1133 *state = NULL;
1134 return ret;
1135}
1136EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1137
38cc1302
HS
1138static int do_pci_enable_device(struct pci_dev *dev, int bars)
1139{
1140 int err;
1141
1142 err = pci_set_power_state(dev, PCI_D0);
1143 if (err < 0 && err != -EIO)
1144 return err;
1145 err = pcibios_enable_device(dev, bars);
1146 if (err < 0)
1147 return err;
1148 pci_fixup_device(pci_fixup_enable, dev);
1149
1150 return 0;
1151}
1152
1153/**
0b62e13b 1154 * pci_reenable_device - Resume abandoned device
38cc1302
HS
1155 * @dev: PCI device to be resumed
1156 *
1157 * Note this function is a backend of pci_default_resume and is not supposed
1158 * to be called by normal code, write proper resume handler and use it instead.
1159 */
0b62e13b 1160int pci_reenable_device(struct pci_dev *dev)
38cc1302 1161{
296ccb08 1162 if (pci_is_enabled(dev))
38cc1302
HS
1163 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1164 return 0;
1165}
1166
b718989d
BH
1167static int __pci_enable_device_flags(struct pci_dev *dev,
1168 resource_size_t flags)
1da177e4
LT
1169{
1170 int err;
b718989d 1171 int i, bars = 0;
1da177e4 1172
97c145f7
JB
1173 /*
1174 * Power state could be unknown at this point, either due to a fresh
1175 * boot or a device removal call. So get the current power state
1176 * so that things like MSI message writing will behave as expected
1177 * (e.g. if the device really is in D0 at enable time).
1178 */
1179 if (dev->pm_cap) {
1180 u16 pmcsr;
1181 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1182 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1183 }
1184
9fb625c3
HS
1185 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1186 return 0; /* already enabled */
1187
497f16f2
YL
1188 /* only skip sriov related */
1189 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1190 if (dev->resource[i].flags & flags)
1191 bars |= (1 << i);
1192 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
b718989d
BH
1193 if (dev->resource[i].flags & flags)
1194 bars |= (1 << i);
1195
38cc1302 1196 err = do_pci_enable_device(dev, bars);
95a62965 1197 if (err < 0)
38cc1302 1198 atomic_dec(&dev->enable_cnt);
9fb625c3 1199 return err;
1da177e4
LT
1200}
1201
b718989d
BH
1202/**
1203 * pci_enable_device_io - Initialize a device for use with IO space
1204 * @dev: PCI device to be initialized
1205 *
1206 * Initialize device before it's used by a driver. Ask low-level code
1207 * to enable I/O resources. Wake up the device if it was suspended.
1208 * Beware, this function can fail.
1209 */
1210int pci_enable_device_io(struct pci_dev *dev)
1211{
1212 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1213}
1214
1215/**
1216 * pci_enable_device_mem - Initialize a device for use with Memory space
1217 * @dev: PCI device to be initialized
1218 *
1219 * Initialize device before it's used by a driver. Ask low-level code
1220 * to enable Memory resources. Wake up the device if it was suspended.
1221 * Beware, this function can fail.
1222 */
1223int pci_enable_device_mem(struct pci_dev *dev)
1224{
1225 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1226}
1227
bae94d02
IPG
1228/**
1229 * pci_enable_device - Initialize device before it's used by a driver.
1230 * @dev: PCI device to be initialized
1231 *
1232 * Initialize device before it's used by a driver. Ask low-level code
1233 * to enable I/O and memory. Wake up the device if it was suspended.
1234 * Beware, this function can fail.
1235 *
1236 * Note we don't actually enable the device many times if we call
1237 * this function repeatedly (we just increment the count).
1238 */
1239int pci_enable_device(struct pci_dev *dev)
1240{
b718989d 1241 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
bae94d02
IPG
1242}
1243
9ac7849e
TH
1244/*
1245 * Managed PCI resources. This manages device on/off, intx/msi/msix
1246 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1247 * there's no need to track it separately. pci_devres is initialized
1248 * when a device is enabled using managed PCI device enable interface.
1249 */
1250struct pci_devres {
7f375f32
TH
1251 unsigned int enabled:1;
1252 unsigned int pinned:1;
9ac7849e
TH
1253 unsigned int orig_intx:1;
1254 unsigned int restore_intx:1;
1255 u32 region_mask;
1256};
1257
1258static void pcim_release(struct device *gendev, void *res)
1259{
1260 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1261 struct pci_devres *this = res;
1262 int i;
1263
1264 if (dev->msi_enabled)
1265 pci_disable_msi(dev);
1266 if (dev->msix_enabled)
1267 pci_disable_msix(dev);
1268
1269 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1270 if (this->region_mask & (1 << i))
1271 pci_release_region(dev, i);
1272
1273 if (this->restore_intx)
1274 pci_intx(dev, this->orig_intx);
1275
7f375f32 1276 if (this->enabled && !this->pinned)
9ac7849e
TH
1277 pci_disable_device(dev);
1278}
1279
1280static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1281{
1282 struct pci_devres *dr, *new_dr;
1283
1284 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1285 if (dr)
1286 return dr;
1287
1288 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1289 if (!new_dr)
1290 return NULL;
1291 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1292}
1293
1294static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1295{
1296 if (pci_is_managed(pdev))
1297 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1298 return NULL;
1299}
1300
1301/**
1302 * pcim_enable_device - Managed pci_enable_device()
1303 * @pdev: PCI device to be initialized
1304 *
1305 * Managed pci_enable_device().
1306 */
1307int pcim_enable_device(struct pci_dev *pdev)
1308{
1309 struct pci_devres *dr;
1310 int rc;
1311
1312 dr = get_pci_dr(pdev);
1313 if (unlikely(!dr))
1314 return -ENOMEM;
b95d58ea
TH
1315 if (dr->enabled)
1316 return 0;
9ac7849e
TH
1317
1318 rc = pci_enable_device(pdev);
1319 if (!rc) {
1320 pdev->is_managed = 1;
7f375f32 1321 dr->enabled = 1;
9ac7849e
TH
1322 }
1323 return rc;
1324}
1325
1326/**
1327 * pcim_pin_device - Pin managed PCI device
1328 * @pdev: PCI device to pin
1329 *
1330 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1331 * driver detach. @pdev must have been enabled with
1332 * pcim_enable_device().
1333 */
1334void pcim_pin_device(struct pci_dev *pdev)
1335{
1336 struct pci_devres *dr;
1337
1338 dr = find_pci_dr(pdev);
7f375f32 1339 WARN_ON(!dr || !dr->enabled);
9ac7849e 1340 if (dr)
7f375f32 1341 dr->pinned = 1;
9ac7849e
TH
1342}
1343
1da177e4
LT
1344/**
1345 * pcibios_disable_device - disable arch specific PCI resources for device dev
1346 * @dev: the PCI device to disable
1347 *
1348 * Disables architecture specific PCI resources for the device. This
1349 * is the default implementation. Architecture implementations can
1350 * override this.
1351 */
1352void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1353
fa58d305
RW
1354static void do_pci_disable_device(struct pci_dev *dev)
1355{
1356 u16 pci_command;
1357
1358 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1359 if (pci_command & PCI_COMMAND_MASTER) {
1360 pci_command &= ~PCI_COMMAND_MASTER;
1361 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1362 }
1363
1364 pcibios_disable_device(dev);
1365}
1366
1367/**
1368 * pci_disable_enabled_device - Disable device without updating enable_cnt
1369 * @dev: PCI device to disable
1370 *
1371 * NOTE: This function is a backend of PCI power management routines and is
1372 * not supposed to be called drivers.
1373 */
1374void pci_disable_enabled_device(struct pci_dev *dev)
1375{
296ccb08 1376 if (pci_is_enabled(dev))
fa58d305
RW
1377 do_pci_disable_device(dev);
1378}
1379
1da177e4
LT
1380/**
1381 * pci_disable_device - Disable PCI device after use
1382 * @dev: PCI device to be disabled
1383 *
1384 * Signal to the system that the PCI device is not in use by the system
1385 * anymore. This only involves disabling PCI bus-mastering, if active.
bae94d02
IPG
1386 *
1387 * Note we don't actually disable the device until all callers of
ee6583f6 1388 * pci_enable_device() have called pci_disable_device().
1da177e4
LT
1389 */
1390void
1391pci_disable_device(struct pci_dev *dev)
1392{
9ac7849e 1393 struct pci_devres *dr;
99dc804d 1394
9ac7849e
TH
1395 dr = find_pci_dr(dev);
1396 if (dr)
7f375f32 1397 dr->enabled = 0;
9ac7849e 1398
bae94d02
IPG
1399 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1400 return;
1401
fa58d305 1402 do_pci_disable_device(dev);
1da177e4 1403
fa58d305 1404 dev->is_busmaster = 0;
1da177e4
LT
1405}
1406
f7bdd12d
BK
1407/**
1408 * pcibios_set_pcie_reset_state - set reset state for device dev
45e829ea 1409 * @dev: the PCIe device reset
f7bdd12d
BK
1410 * @state: Reset state to enter into
1411 *
1412 *
45e829ea 1413 * Sets the PCIe reset state for the device. This is the default
f7bdd12d
BK
1414 * implementation. Architecture implementations can override this.
1415 */
1416int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1417 enum pcie_reset_state state)
1418{
1419 return -EINVAL;
1420}
1421
1422/**
1423 * pci_set_pcie_reset_state - set reset state for device dev
45e829ea 1424 * @dev: the PCIe device reset
f7bdd12d
BK
1425 * @state: Reset state to enter into
1426 *
1427 *
1428 * Sets the PCI reset state for the device.
1429 */
1430int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1431{
1432 return pcibios_set_pcie_reset_state(dev, state);
1433}
1434
58ff4633
RW
1435/**
1436 * pci_check_pme_status - Check if given device has generated PME.
1437 * @dev: Device to check.
1438 *
1439 * Check the PME status of the device and if set, clear it and clear PME enable
1440 * (if set). Return 'true' if PME status and PME enable were both set or
1441 * 'false' otherwise.
1442 */
1443bool pci_check_pme_status(struct pci_dev *dev)
1444{
1445 int pmcsr_pos;
1446 u16 pmcsr;
1447 bool ret = false;
1448
1449 if (!dev->pm_cap)
1450 return false;
1451
1452 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1453 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1454 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1455 return false;
1456
1457 /* Clear PME status. */
1458 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1459 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1460 /* Disable PME to avoid interrupt flood. */
1461 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1462 ret = true;
1463 }
1464
1465 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1466
1467 return ret;
1468}
1469
b67ea761
RW
1470/**
1471 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1472 * @dev: Device to handle.
379021d5 1473 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
b67ea761
RW
1474 *
1475 * Check if @dev has generated PME and queue a resume request for it in that
1476 * case.
1477 */
379021d5 1478static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
b67ea761 1479{
379021d5
RW
1480 if (pme_poll_reset && dev->pme_poll)
1481 dev->pme_poll = false;
1482
c125e96f 1483 if (pci_check_pme_status(dev)) {
c125e96f 1484 pci_wakeup_event(dev);
0f953bf6 1485 pm_request_resume(&dev->dev);
c125e96f 1486 }
b67ea761
RW
1487 return 0;
1488}
1489
1490/**
1491 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1492 * @bus: Top bus of the subtree to walk.
1493 */
1494void pci_pme_wakeup_bus(struct pci_bus *bus)
1495{
1496 if (bus)
379021d5 1497 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
b67ea761
RW
1498}
1499
eb9d0fe4
RW
1500/**
1501 * pci_pme_capable - check the capability of PCI device to generate PME#
1502 * @dev: PCI device to handle.
eb9d0fe4
RW
1503 * @state: PCI state from which device will issue PME#.
1504 */
e5899e1b 1505bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
eb9d0fe4 1506{
337001b6 1507 if (!dev->pm_cap)
eb9d0fe4
RW
1508 return false;
1509
337001b6 1510 return !!(dev->pme_support & (1 << state));
eb9d0fe4
RW
1511}
1512
df17e62e
MG
1513static void pci_pme_list_scan(struct work_struct *work)
1514{
379021d5 1515 struct pci_pme_device *pme_dev, *n;
df17e62e
MG
1516
1517 mutex_lock(&pci_pme_list_mutex);
1518 if (!list_empty(&pci_pme_list)) {
379021d5
RW
1519 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1520 if (pme_dev->dev->pme_poll) {
1521 pci_pme_wakeup(pme_dev->dev, NULL);
1522 } else {
1523 list_del(&pme_dev->list);
1524 kfree(pme_dev);
1525 }
1526 }
1527 if (!list_empty(&pci_pme_list))
1528 schedule_delayed_work(&pci_pme_work,
1529 msecs_to_jiffies(PME_TIMEOUT));
df17e62e
MG
1530 }
1531 mutex_unlock(&pci_pme_list_mutex);
1532}
1533
eb9d0fe4
RW
1534/**
1535 * pci_pme_active - enable or disable PCI device's PME# function
1536 * @dev: PCI device to handle.
eb9d0fe4
RW
1537 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1538 *
1539 * The caller must verify that the device is capable of generating PME# before
1540 * calling this function with @enable equal to 'true'.
1541 */
5a6c9b60 1542void pci_pme_active(struct pci_dev *dev, bool enable)
eb9d0fe4
RW
1543{
1544 u16 pmcsr;
1545
337001b6 1546 if (!dev->pm_cap)
eb9d0fe4
RW
1547 return;
1548
337001b6 1549 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
eb9d0fe4
RW
1550 /* Clear PME_Status by writing 1 to it and enable PME# */
1551 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1552 if (!enable)
1553 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1554
337001b6 1555 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
eb9d0fe4 1556
df17e62e
MG
1557 /* PCI (as opposed to PCIe) PME requires that the device have
1558 its PME# line hooked up correctly. Not all hardware vendors
1559 do this, so the PME never gets delivered and the device
1560 remains asleep. The easiest way around this is to
1561 periodically walk the list of suspended devices and check
1562 whether any have their PME flag set. The assumption is that
1563 we'll wake up often enough anyway that this won't be a huge
1564 hit, and the power savings from the devices will still be a
1565 win. */
1566
379021d5 1567 if (dev->pme_poll) {
df17e62e
MG
1568 struct pci_pme_device *pme_dev;
1569 if (enable) {
1570 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1571 GFP_KERNEL);
1572 if (!pme_dev)
1573 goto out;
1574 pme_dev->dev = dev;
1575 mutex_lock(&pci_pme_list_mutex);
1576 list_add(&pme_dev->list, &pci_pme_list);
1577 if (list_is_singular(&pci_pme_list))
1578 schedule_delayed_work(&pci_pme_work,
1579 msecs_to_jiffies(PME_TIMEOUT));
1580 mutex_unlock(&pci_pme_list_mutex);
1581 } else {
1582 mutex_lock(&pci_pme_list_mutex);
1583 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1584 if (pme_dev->dev == dev) {
1585 list_del(&pme_dev->list);
1586 kfree(pme_dev);
1587 break;
1588 }
1589 }
1590 mutex_unlock(&pci_pme_list_mutex);
1591 }
1592 }
1593
1594out:
85b8582d 1595 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
eb9d0fe4
RW
1596}
1597
1da177e4 1598/**
6cbf8214 1599 * __pci_enable_wake - enable PCI device as wakeup event source
075c1771
DB
1600 * @dev: PCI device affected
1601 * @state: PCI state from which device will issue wakeup events
6cbf8214 1602 * @runtime: True if the events are to be generated at run time
075c1771
DB
1603 * @enable: True to enable event generation; false to disable
1604 *
1605 * This enables the device as a wakeup event source, or disables it.
1606 * When such events involves platform-specific hooks, those hooks are
1607 * called automatically by this routine.
1608 *
1609 * Devices with legacy power management (no standard PCI PM capabilities)
eb9d0fe4 1610 * always require such platform hooks.
075c1771 1611 *
eb9d0fe4
RW
1612 * RETURN VALUE:
1613 * 0 is returned on success
1614 * -EINVAL is returned if device is not supposed to wake up the system
1615 * Error code depending on the platform is returned if both the platform and
1616 * the native mechanism fail to enable the generation of wake-up events
1da177e4 1617 */
6cbf8214
RW
1618int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1619 bool runtime, bool enable)
1da177e4 1620{
5bcc2fb4 1621 int ret = 0;
075c1771 1622
6cbf8214 1623 if (enable && !runtime && !device_may_wakeup(&dev->dev))
eb9d0fe4 1624 return -EINVAL;
1da177e4 1625
e80bb09d
RW
1626 /* Don't do the same thing twice in a row for one device. */
1627 if (!!enable == !!dev->wakeup_prepared)
1628 return 0;
1629
eb9d0fe4
RW
1630 /*
1631 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1632 * Anderson we should be doing PME# wake enable followed by ACPI wake
1633 * enable. To disable wake-up we call the platform first, for symmetry.
075c1771 1634 */
1da177e4 1635
5bcc2fb4
RW
1636 if (enable) {
1637 int error;
1da177e4 1638
5bcc2fb4
RW
1639 if (pci_pme_capable(dev, state))
1640 pci_pme_active(dev, true);
1641 else
1642 ret = 1;
6cbf8214
RW
1643 error = runtime ? platform_pci_run_wake(dev, true) :
1644 platform_pci_sleep_wake(dev, true);
5bcc2fb4
RW
1645 if (ret)
1646 ret = error;
e80bb09d
RW
1647 if (!ret)
1648 dev->wakeup_prepared = true;
5bcc2fb4 1649 } else {
6cbf8214
RW
1650 if (runtime)
1651 platform_pci_run_wake(dev, false);
1652 else
1653 platform_pci_sleep_wake(dev, false);
5bcc2fb4 1654 pci_pme_active(dev, false);
e80bb09d 1655 dev->wakeup_prepared = false;
5bcc2fb4 1656 }
1da177e4 1657
5bcc2fb4 1658 return ret;
eb9d0fe4 1659}
6cbf8214 1660EXPORT_SYMBOL(__pci_enable_wake);
1da177e4 1661
0235c4fc
RW
1662/**
1663 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1664 * @dev: PCI device to prepare
1665 * @enable: True to enable wake-up event generation; false to disable
1666 *
1667 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1668 * and this function allows them to set that up cleanly - pci_enable_wake()
1669 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1670 * ordering constraints.
1671 *
1672 * This function only returns error code if the device is not capable of
1673 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1674 * enable wake-up power for it.
1675 */
1676int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1677{
1678 return pci_pme_capable(dev, PCI_D3cold) ?
1679 pci_enable_wake(dev, PCI_D3cold, enable) :
1680 pci_enable_wake(dev, PCI_D3hot, enable);
1681}
1682
404cc2d8 1683/**
37139074
JB
1684 * pci_target_state - find an appropriate low power state for a given PCI dev
1685 * @dev: PCI device
1686 *
1687 * Use underlying platform code to find a supported low power state for @dev.
1688 * If the platform can't manage @dev, return the deepest state from which it
1689 * can generate wake events, based on any available PME info.
404cc2d8 1690 */
e5899e1b 1691pci_power_t pci_target_state(struct pci_dev *dev)
404cc2d8
RW
1692{
1693 pci_power_t target_state = PCI_D3hot;
404cc2d8
RW
1694
1695 if (platform_pci_power_manageable(dev)) {
1696 /*
1697 * Call the platform to choose the target state of the device
1698 * and enable wake-up from this state if supported.
1699 */
1700 pci_power_t state = platform_pci_choose_state(dev);
1701
1702 switch (state) {
1703 case PCI_POWER_ERROR:
1704 case PCI_UNKNOWN:
1705 break;
1706 case PCI_D1:
1707 case PCI_D2:
1708 if (pci_no_d1d2(dev))
1709 break;
1710 default:
1711 target_state = state;
404cc2d8 1712 }
d2abdf62
RW
1713 } else if (!dev->pm_cap) {
1714 target_state = PCI_D0;
404cc2d8
RW
1715 } else if (device_may_wakeup(&dev->dev)) {
1716 /*
1717 * Find the deepest state from which the device can generate
1718 * wake-up events, make it the target state and enable device
1719 * to generate PME#.
1720 */
337001b6
RW
1721 if (dev->pme_support) {
1722 while (target_state
1723 && !(dev->pme_support & (1 << target_state)))
1724 target_state--;
404cc2d8
RW
1725 }
1726 }
1727
e5899e1b
RW
1728 return target_state;
1729}
1730
1731/**
1732 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1733 * @dev: Device to handle.
1734 *
1735 * Choose the power state appropriate for the device depending on whether
1736 * it can wake up the system and/or is power manageable by the platform
1737 * (PCI_D3hot is the default) and put the device into that state.
1738 */
1739int pci_prepare_to_sleep(struct pci_dev *dev)
1740{
1741 pci_power_t target_state = pci_target_state(dev);
1742 int error;
1743
1744 if (target_state == PCI_POWER_ERROR)
1745 return -EIO;
1746
8efb8c76 1747 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
c157dfa3 1748
404cc2d8
RW
1749 error = pci_set_power_state(dev, target_state);
1750
1751 if (error)
1752 pci_enable_wake(dev, target_state, false);
1753
1754 return error;
1755}
1756
1757/**
443bd1c4 1758 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
404cc2d8
RW
1759 * @dev: Device to handle.
1760 *
88393161 1761 * Disable device's system wake-up capability and put it into D0.
404cc2d8
RW
1762 */
1763int pci_back_from_sleep(struct pci_dev *dev)
1764{
1765 pci_enable_wake(dev, PCI_D0, false);
1766 return pci_set_power_state(dev, PCI_D0);
1767}
1768
6cbf8214
RW
1769/**
1770 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1771 * @dev: PCI device being suspended.
1772 *
1773 * Prepare @dev to generate wake-up events at run time and put it into a low
1774 * power state.
1775 */
1776int pci_finish_runtime_suspend(struct pci_dev *dev)
1777{
1778 pci_power_t target_state = pci_target_state(dev);
1779 int error;
1780
1781 if (target_state == PCI_POWER_ERROR)
1782 return -EIO;
1783
1784 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1785
1786 error = pci_set_power_state(dev, target_state);
1787
1788 if (error)
1789 __pci_enable_wake(dev, target_state, true, false);
1790
1791 return error;
1792}
1793
b67ea761
RW
1794/**
1795 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1796 * @dev: Device to check.
1797 *
1798 * Return true if the device itself is cabable of generating wake-up events
1799 * (through the platform or using the native PCIe PME) or if the device supports
1800 * PME and one of its upstream bridges can generate wake-up events.
1801 */
1802bool pci_dev_run_wake(struct pci_dev *dev)
1803{
1804 struct pci_bus *bus = dev->bus;
1805
1806 if (device_run_wake(&dev->dev))
1807 return true;
1808
1809 if (!dev->pme_support)
1810 return false;
1811
1812 while (bus->parent) {
1813 struct pci_dev *bridge = bus->self;
1814
1815 if (device_run_wake(&bridge->dev))
1816 return true;
1817
1818 bus = bus->parent;
1819 }
1820
1821 /* We have reached the root bus. */
1822 if (bus->bridge)
1823 return device_run_wake(bus->bridge);
1824
1825 return false;
1826}
1827EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1828
eb9d0fe4
RW
1829/**
1830 * pci_pm_init - Initialize PM functions of given PCI device
1831 * @dev: PCI device to handle.
1832 */
1833void pci_pm_init(struct pci_dev *dev)
1834{
1835 int pm;
1836 u16 pmc;
1da177e4 1837
bb910a70 1838 pm_runtime_forbid(&dev->dev);
a1e4d72c 1839 device_enable_async_suspend(&dev->dev);
e80bb09d 1840 dev->wakeup_prepared = false;
bb910a70 1841
337001b6
RW
1842 dev->pm_cap = 0;
1843
eb9d0fe4
RW
1844 /* find PCI PM capability in list */
1845 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1846 if (!pm)
50246dd4 1847 return;
eb9d0fe4
RW
1848 /* Check device's ability to generate PME# */
1849 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
075c1771 1850
eb9d0fe4
RW
1851 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1852 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1853 pmc & PCI_PM_CAP_VER_MASK);
50246dd4 1854 return;
eb9d0fe4
RW
1855 }
1856
337001b6 1857 dev->pm_cap = pm;
1ae861e6 1858 dev->d3_delay = PCI_PM_D3_WAIT;
337001b6
RW
1859
1860 dev->d1_support = false;
1861 dev->d2_support = false;
1862 if (!pci_no_d1d2(dev)) {
c9ed77ee 1863 if (pmc & PCI_PM_CAP_D1)
337001b6 1864 dev->d1_support = true;
c9ed77ee 1865 if (pmc & PCI_PM_CAP_D2)
337001b6 1866 dev->d2_support = true;
c9ed77ee
BH
1867
1868 if (dev->d1_support || dev->d2_support)
1869 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
ec84f126
JB
1870 dev->d1_support ? " D1" : "",
1871 dev->d2_support ? " D2" : "");
337001b6
RW
1872 }
1873
1874 pmc &= PCI_PM_CAP_PME_MASK;
1875 if (pmc) {
10c3d71d
BH
1876 dev_printk(KERN_DEBUG, &dev->dev,
1877 "PME# supported from%s%s%s%s%s\n",
c9ed77ee
BH
1878 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1879 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1880 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1881 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1882 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
337001b6 1883 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
379021d5 1884 dev->pme_poll = true;
eb9d0fe4
RW
1885 /*
1886 * Make device's PM flags reflect the wake-up capability, but
1887 * let the user space enable it to wake up the system as needed.
1888 */
1889 device_set_wakeup_capable(&dev->dev, true);
eb9d0fe4 1890 /* Disable the PME# generation functionality */
337001b6
RW
1891 pci_pme_active(dev, false);
1892 } else {
1893 dev->pme_support = 0;
eb9d0fe4 1894 }
1da177e4
LT
1895}
1896
eb9c39d0
JB
1897/**
1898 * platform_pci_wakeup_init - init platform wakeup if present
1899 * @dev: PCI device
1900 *
1901 * Some devices don't have PCI PM caps but can still generate wakeup
1902 * events through platform methods (like ACPI events). If @dev supports
1903 * platform wakeup events, set the device flag to indicate as much. This
1904 * may be redundant if the device also supports PCI PM caps, but double
1905 * initialization should be safe in that case.
1906 */
1907void platform_pci_wakeup_init(struct pci_dev *dev)
1908{
1909 if (!platform_pci_can_wakeup(dev))
1910 return;
1911
1912 device_set_wakeup_capable(&dev->dev, true);
eb9c39d0
JB
1913 platform_pci_sleep_wake(dev, false);
1914}
1915
34a4876e
YL
1916static void pci_add_saved_cap(struct pci_dev *pci_dev,
1917 struct pci_cap_saved_state *new_cap)
1918{
1919 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1920}
1921
63f4898a
RW
1922/**
1923 * pci_add_save_buffer - allocate buffer for saving given capability registers
1924 * @dev: the PCI device
1925 * @cap: the capability to allocate the buffer for
1926 * @size: requested size of the buffer
1927 */
1928static int pci_add_cap_save_buffer(
1929 struct pci_dev *dev, char cap, unsigned int size)
1930{
1931 int pos;
1932 struct pci_cap_saved_state *save_state;
1933
1934 pos = pci_find_capability(dev, cap);
1935 if (pos <= 0)
1936 return 0;
1937
1938 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1939 if (!save_state)
1940 return -ENOMEM;
1941
24a4742f
AW
1942 save_state->cap.cap_nr = cap;
1943 save_state->cap.size = size;
63f4898a
RW
1944 pci_add_saved_cap(dev, save_state);
1945
1946 return 0;
1947}
1948
1949/**
1950 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1951 * @dev: the PCI device
1952 */
1953void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1954{
1955 int error;
1956
89858517
YZ
1957 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1958 PCI_EXP_SAVE_REGS * sizeof(u16));
63f4898a
RW
1959 if (error)
1960 dev_err(&dev->dev,
1961 "unable to preallocate PCI Express save buffer\n");
1962
1963 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1964 if (error)
1965 dev_err(&dev->dev,
1966 "unable to preallocate PCI-X save buffer\n");
1967}
1968
f796841e
YL
1969void pci_free_cap_save_buffers(struct pci_dev *dev)
1970{
1971 struct pci_cap_saved_state *tmp;
1972 struct hlist_node *pos, *n;
1973
1974 hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
1975 kfree(tmp);
1976}
1977
58c3a727
YZ
1978/**
1979 * pci_enable_ari - enable ARI forwarding if hardware support it
1980 * @dev: the PCI device
1981 */
1982void pci_enable_ari(struct pci_dev *dev)
1983{
1984 int pos;
1985 u32 cap;
864d296c 1986 u16 flags, ctrl;
8113587c 1987 struct pci_dev *bridge;
58c3a727 1988
6748dcc2 1989 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
58c3a727
YZ
1990 return;
1991
8113587c
ZY
1992 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1993 if (!pos)
58c3a727
YZ
1994 return;
1995
8113587c 1996 bridge = dev->bus->self;
5f4d91a1 1997 if (!bridge || !pci_is_pcie(bridge))
8113587c
ZY
1998 return;
1999
06a1cbaf 2000 pos = pci_pcie_cap(bridge);
58c3a727
YZ
2001 if (!pos)
2002 return;
2003
864d296c
CW
2004 /* ARI is a PCIe v2 feature */
2005 pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
2006 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
2007 return;
2008
8113587c 2009 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
58c3a727
YZ
2010 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2011 return;
2012
8113587c 2013 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
58c3a727 2014 ctrl |= PCI_EXP_DEVCTL2_ARI;
8113587c 2015 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
58c3a727 2016
8113587c 2017 bridge->ari_enabled = 1;
58c3a727
YZ
2018}
2019
b48d4425
JB
2020/**
2021 * pci_enable_ido - enable ID-based ordering on a device
2022 * @dev: the PCI device
2023 * @type: which types of IDO to enable
2024 *
2025 * Enable ID-based ordering on @dev. @type can contain the bits
2026 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2027 * which types of transactions are allowed to be re-ordered.
2028 */
2029void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2030{
2031 int pos;
2032 u16 ctrl;
2033
2034 pos = pci_pcie_cap(dev);
2035 if (!pos)
2036 return;
2037
2038 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2039 if (type & PCI_EXP_IDO_REQUEST)
2040 ctrl |= PCI_EXP_IDO_REQ_EN;
2041 if (type & PCI_EXP_IDO_COMPLETION)
2042 ctrl |= PCI_EXP_IDO_CMP_EN;
2043 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2044}
2045EXPORT_SYMBOL(pci_enable_ido);
2046
2047/**
2048 * pci_disable_ido - disable ID-based ordering on a device
2049 * @dev: the PCI device
2050 * @type: which types of IDO to disable
2051 */
2052void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2053{
2054 int pos;
2055 u16 ctrl;
2056
2057 if (!pci_is_pcie(dev))
2058 return;
2059
2060 pos = pci_pcie_cap(dev);
2061 if (!pos)
2062 return;
2063
2064 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2065 if (type & PCI_EXP_IDO_REQUEST)
2066 ctrl &= ~PCI_EXP_IDO_REQ_EN;
2067 if (type & PCI_EXP_IDO_COMPLETION)
2068 ctrl &= ~PCI_EXP_IDO_CMP_EN;
2069 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2070}
2071EXPORT_SYMBOL(pci_disable_ido);
2072
48a92a81
JB
2073/**
2074 * pci_enable_obff - enable optimized buffer flush/fill
2075 * @dev: PCI device
2076 * @type: type of signaling to use
2077 *
2078 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2079 * signaling if possible, falling back to message signaling only if
2080 * WAKE# isn't supported. @type should indicate whether the PCIe link
2081 * be brought out of L0s or L1 to send the message. It should be either
2082 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2083 *
2084 * If your device can benefit from receiving all messages, even at the
2085 * power cost of bringing the link back up from a low power state, use
2086 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2087 * preferred type).
2088 *
2089 * RETURNS:
2090 * Zero on success, appropriate error number on failure.
2091 */
2092int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2093{
2094 int pos;
2095 u32 cap;
2096 u16 ctrl;
2097 int ret;
2098
2099 if (!pci_is_pcie(dev))
2100 return -ENOTSUPP;
2101
2102 pos = pci_pcie_cap(dev);
2103 if (!pos)
2104 return -ENOTSUPP;
2105
2106 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2107 if (!(cap & PCI_EXP_OBFF_MASK))
2108 return -ENOTSUPP; /* no OBFF support at all */
2109
2110 /* Make sure the topology supports OBFF as well */
2111 if (dev->bus) {
2112 ret = pci_enable_obff(dev->bus->self, type);
2113 if (ret)
2114 return ret;
2115 }
2116
2117 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2118 if (cap & PCI_EXP_OBFF_WAKE)
2119 ctrl |= PCI_EXP_OBFF_WAKE_EN;
2120 else {
2121 switch (type) {
2122 case PCI_EXP_OBFF_SIGNAL_L0:
2123 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2124 ctrl |= PCI_EXP_OBFF_MSGA_EN;
2125 break;
2126 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2127 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2128 ctrl |= PCI_EXP_OBFF_MSGB_EN;
2129 break;
2130 default:
2131 WARN(1, "bad OBFF signal type\n");
2132 return -ENOTSUPP;
2133 }
2134 }
2135 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2136
2137 return 0;
2138}
2139EXPORT_SYMBOL(pci_enable_obff);
2140
2141/**
2142 * pci_disable_obff - disable optimized buffer flush/fill
2143 * @dev: PCI device
2144 *
2145 * Disable OBFF on @dev.
2146 */
2147void pci_disable_obff(struct pci_dev *dev)
2148{
2149 int pos;
2150 u16 ctrl;
2151
2152 if (!pci_is_pcie(dev))
2153 return;
2154
2155 pos = pci_pcie_cap(dev);
2156 if (!pos)
2157 return;
2158
2159 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2160 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2161 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2162}
2163EXPORT_SYMBOL(pci_disable_obff);
2164
51c2e0a7
JB
2165/**
2166 * pci_ltr_supported - check whether a device supports LTR
2167 * @dev: PCI device
2168 *
2169 * RETURNS:
2170 * True if @dev supports latency tolerance reporting, false otherwise.
2171 */
2172bool pci_ltr_supported(struct pci_dev *dev)
2173{
2174 int pos;
2175 u32 cap;
2176
2177 if (!pci_is_pcie(dev))
2178 return false;
2179
2180 pos = pci_pcie_cap(dev);
2181 if (!pos)
2182 return false;
2183
2184 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2185
2186 return cap & PCI_EXP_DEVCAP2_LTR;
2187}
2188EXPORT_SYMBOL(pci_ltr_supported);
2189
2190/**
2191 * pci_enable_ltr - enable latency tolerance reporting
2192 * @dev: PCI device
2193 *
2194 * Enable LTR on @dev if possible, which means enabling it first on
2195 * upstream ports.
2196 *
2197 * RETURNS:
2198 * Zero on success, errno on failure.
2199 */
2200int pci_enable_ltr(struct pci_dev *dev)
2201{
2202 int pos;
2203 u16 ctrl;
2204 int ret;
2205
2206 if (!pci_ltr_supported(dev))
2207 return -ENOTSUPP;
2208
2209 pos = pci_pcie_cap(dev);
2210 if (!pos)
2211 return -ENOTSUPP;
2212
2213 /* Only primary function can enable/disable LTR */
2214 if (PCI_FUNC(dev->devfn) != 0)
2215 return -EINVAL;
2216
2217 /* Enable upstream ports first */
2218 if (dev->bus) {
2219 ret = pci_enable_ltr(dev->bus->self);
2220 if (ret)
2221 return ret;
2222 }
2223
2224 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2225 ctrl |= PCI_EXP_LTR_EN;
2226 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2227
2228 return 0;
2229}
2230EXPORT_SYMBOL(pci_enable_ltr);
2231
2232/**
2233 * pci_disable_ltr - disable latency tolerance reporting
2234 * @dev: PCI device
2235 */
2236void pci_disable_ltr(struct pci_dev *dev)
2237{
2238 int pos;
2239 u16 ctrl;
2240
2241 if (!pci_ltr_supported(dev))
2242 return;
2243
2244 pos = pci_pcie_cap(dev);
2245 if (!pos)
2246 return;
2247
2248 /* Only primary function can enable/disable LTR */
2249 if (PCI_FUNC(dev->devfn) != 0)
2250 return;
2251
2252 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2253 ctrl &= ~PCI_EXP_LTR_EN;
2254 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2255}
2256EXPORT_SYMBOL(pci_disable_ltr);
2257
2258static int __pci_ltr_scale(int *val)
2259{
2260 int scale = 0;
2261
2262 while (*val > 1023) {
2263 *val = (*val + 31) / 32;
2264 scale++;
2265 }
2266 return scale;
2267}
2268
2269/**
2270 * pci_set_ltr - set LTR latency values
2271 * @dev: PCI device
2272 * @snoop_lat_ns: snoop latency in nanoseconds
2273 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2274 *
2275 * Figure out the scale and set the LTR values accordingly.
2276 */
2277int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2278{
2279 int pos, ret, snoop_scale, nosnoop_scale;
2280 u16 val;
2281
2282 if (!pci_ltr_supported(dev))
2283 return -ENOTSUPP;
2284
2285 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2286 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2287
2288 if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2289 nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2290 return -EINVAL;
2291
2292 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2293 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2294 return -EINVAL;
2295
2296 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2297 if (!pos)
2298 return -ENOTSUPP;
2299
2300 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2301 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2302 if (ret != 4)
2303 return -EIO;
2304
2305 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2306 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2307 if (ret != 4)
2308 return -EIO;
2309
2310 return 0;
2311}
2312EXPORT_SYMBOL(pci_set_ltr);
2313
5d990b62
CW
2314static int pci_acs_enable;
2315
2316/**
2317 * pci_request_acs - ask for ACS to be enabled if supported
2318 */
2319void pci_request_acs(void)
2320{
2321 pci_acs_enable = 1;
2322}
2323
ae21ee65
AK
2324/**
2325 * pci_enable_acs - enable ACS if hardware support it
2326 * @dev: the PCI device
2327 */
2328void pci_enable_acs(struct pci_dev *dev)
2329{
2330 int pos;
2331 u16 cap;
2332 u16 ctrl;
2333
5d990b62
CW
2334 if (!pci_acs_enable)
2335 return;
2336
5f4d91a1 2337 if (!pci_is_pcie(dev))
ae21ee65
AK
2338 return;
2339
2340 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2341 if (!pos)
2342 return;
2343
2344 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2345 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2346
2347 /* Source Validation */
2348 ctrl |= (cap & PCI_ACS_SV);
2349
2350 /* P2P Request Redirect */
2351 ctrl |= (cap & PCI_ACS_RR);
2352
2353 /* P2P Completion Redirect */
2354 ctrl |= (cap & PCI_ACS_CR);
2355
2356 /* Upstream Forwarding */
2357 ctrl |= (cap & PCI_ACS_UF);
2358
2359 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2360}
2361
57c2cf71
BH
2362/**
2363 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2364 * @dev: the PCI device
2365 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2366 *
2367 * Perform INTx swizzling for a device behind one level of bridge. This is
2368 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
46b952a3
MW
2369 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2370 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2371 * the PCI Express Base Specification, Revision 2.1)
57c2cf71 2372 */
3df425f3 2373u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
57c2cf71 2374{
46b952a3
MW
2375 int slot;
2376
2377 if (pci_ari_enabled(dev->bus))
2378 slot = 0;
2379 else
2380 slot = PCI_SLOT(dev->devfn);
2381
2382 return (((pin - 1) + slot) % 4) + 1;
57c2cf71
BH
2383}
2384
1da177e4
LT
2385int
2386pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2387{
2388 u8 pin;
2389
514d207d 2390 pin = dev->pin;
1da177e4
LT
2391 if (!pin)
2392 return -1;
878f2e50 2393
8784fd4d 2394 while (!pci_is_root_bus(dev->bus)) {
57c2cf71 2395 pin = pci_swizzle_interrupt_pin(dev, pin);
1da177e4
LT
2396 dev = dev->bus->self;
2397 }
2398 *bridge = dev;
2399 return pin;
2400}
2401
68feac87
BH
2402/**
2403 * pci_common_swizzle - swizzle INTx all the way to root bridge
2404 * @dev: the PCI device
2405 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2406 *
2407 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2408 * bridges all the way up to a PCI root bus.
2409 */
2410u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2411{
2412 u8 pin = *pinp;
2413
1eb39487 2414 while (!pci_is_root_bus(dev->bus)) {
68feac87
BH
2415 pin = pci_swizzle_interrupt_pin(dev, pin);
2416 dev = dev->bus->self;
2417 }
2418 *pinp = pin;
2419 return PCI_SLOT(dev->devfn);
2420}
2421
1da177e4
LT
2422/**
2423 * pci_release_region - Release a PCI bar
2424 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2425 * @bar: BAR to release
2426 *
2427 * Releases the PCI I/O and memory resources previously reserved by a
2428 * successful call to pci_request_region. Call this function only
2429 * after all use of the PCI regions has ceased.
2430 */
2431void pci_release_region(struct pci_dev *pdev, int bar)
2432{
9ac7849e
TH
2433 struct pci_devres *dr;
2434
1da177e4
LT
2435 if (pci_resource_len(pdev, bar) == 0)
2436 return;
2437 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2438 release_region(pci_resource_start(pdev, bar),
2439 pci_resource_len(pdev, bar));
2440 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2441 release_mem_region(pci_resource_start(pdev, bar),
2442 pci_resource_len(pdev, bar));
9ac7849e
TH
2443
2444 dr = find_pci_dr(pdev);
2445 if (dr)
2446 dr->region_mask &= ~(1 << bar);
1da177e4
LT
2447}
2448
2449/**
f5ddcac4 2450 * __pci_request_region - Reserved PCI I/O and memory resource
1da177e4
LT
2451 * @pdev: PCI device whose resources are to be reserved
2452 * @bar: BAR to be reserved
2453 * @res_name: Name to be associated with resource.
f5ddcac4 2454 * @exclusive: whether the region access is exclusive or not
1da177e4
LT
2455 *
2456 * Mark the PCI region associated with PCI device @pdev BR @bar as
2457 * being reserved by owner @res_name. Do not access any
2458 * address inside the PCI regions unless this call returns
2459 * successfully.
2460 *
f5ddcac4
RD
2461 * If @exclusive is set, then the region is marked so that userspace
2462 * is explicitly not allowed to map the resource via /dev/mem or
2463 * sysfs MMIO access.
2464 *
1da177e4
LT
2465 * Returns 0 on success, or %EBUSY on error. A warning
2466 * message is also printed on failure.
2467 */
e8de1481
AV
2468static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2469 int exclusive)
1da177e4 2470{
9ac7849e
TH
2471 struct pci_devres *dr;
2472
1da177e4
LT
2473 if (pci_resource_len(pdev, bar) == 0)
2474 return 0;
2475
2476 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2477 if (!request_region(pci_resource_start(pdev, bar),
2478 pci_resource_len(pdev, bar), res_name))
2479 goto err_out;
2480 }
2481 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
e8de1481
AV
2482 if (!__request_mem_region(pci_resource_start(pdev, bar),
2483 pci_resource_len(pdev, bar), res_name,
2484 exclusive))
1da177e4
LT
2485 goto err_out;
2486 }
9ac7849e
TH
2487
2488 dr = find_pci_dr(pdev);
2489 if (dr)
2490 dr->region_mask |= 1 << bar;
2491
1da177e4
LT
2492 return 0;
2493
2494err_out:
c7dabef8 2495 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
096e6f67 2496 &pdev->resource[bar]);
1da177e4
LT
2497 return -EBUSY;
2498}
2499
e8de1481 2500/**
f5ddcac4 2501 * pci_request_region - Reserve PCI I/O and memory resource
e8de1481
AV
2502 * @pdev: PCI device whose resources are to be reserved
2503 * @bar: BAR to be reserved
f5ddcac4 2504 * @res_name: Name to be associated with resource
e8de1481 2505 *
f5ddcac4 2506 * Mark the PCI region associated with PCI device @pdev BAR @bar as
e8de1481
AV
2507 * being reserved by owner @res_name. Do not access any
2508 * address inside the PCI regions unless this call returns
2509 * successfully.
2510 *
2511 * Returns 0 on success, or %EBUSY on error. A warning
2512 * message is also printed on failure.
2513 */
2514int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2515{
2516 return __pci_request_region(pdev, bar, res_name, 0);
2517}
2518
2519/**
2520 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2521 * @pdev: PCI device whose resources are to be reserved
2522 * @bar: BAR to be reserved
2523 * @res_name: Name to be associated with resource.
2524 *
2525 * Mark the PCI region associated with PCI device @pdev BR @bar as
2526 * being reserved by owner @res_name. Do not access any
2527 * address inside the PCI regions unless this call returns
2528 * successfully.
2529 *
2530 * Returns 0 on success, or %EBUSY on error. A warning
2531 * message is also printed on failure.
2532 *
2533 * The key difference that _exclusive makes it that userspace is
2534 * explicitly not allowed to map the resource via /dev/mem or
2535 * sysfs.
2536 */
2537int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2538{
2539 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2540}
c87deff7
HS
2541/**
2542 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2543 * @pdev: PCI device whose resources were previously reserved
2544 * @bars: Bitmask of BARs to be released
2545 *
2546 * Release selected PCI I/O and memory resources previously reserved.
2547 * Call this function only after all use of the PCI regions has ceased.
2548 */
2549void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2550{
2551 int i;
2552
2553 for (i = 0; i < 6; i++)
2554 if (bars & (1 << i))
2555 pci_release_region(pdev, i);
2556}
2557
e8de1481
AV
2558int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2559 const char *res_name, int excl)
c87deff7
HS
2560{
2561 int i;
2562
2563 for (i = 0; i < 6; i++)
2564 if (bars & (1 << i))
e8de1481 2565 if (__pci_request_region(pdev, i, res_name, excl))
c87deff7
HS
2566 goto err_out;
2567 return 0;
2568
2569err_out:
2570 while(--i >= 0)
2571 if (bars & (1 << i))
2572 pci_release_region(pdev, i);
2573
2574 return -EBUSY;
2575}
1da177e4 2576
e8de1481
AV
2577
2578/**
2579 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2580 * @pdev: PCI device whose resources are to be reserved
2581 * @bars: Bitmask of BARs to be requested
2582 * @res_name: Name to be associated with resource
2583 */
2584int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2585 const char *res_name)
2586{
2587 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2588}
2589
2590int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2591 int bars, const char *res_name)
2592{
2593 return __pci_request_selected_regions(pdev, bars, res_name,
2594 IORESOURCE_EXCLUSIVE);
2595}
2596
1da177e4
LT
2597/**
2598 * pci_release_regions - Release reserved PCI I/O and memory resources
2599 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2600 *
2601 * Releases all PCI I/O and memory resources previously reserved by a
2602 * successful call to pci_request_regions. Call this function only
2603 * after all use of the PCI regions has ceased.
2604 */
2605
2606void pci_release_regions(struct pci_dev *pdev)
2607{
c87deff7 2608 pci_release_selected_regions(pdev, (1 << 6) - 1);
1da177e4
LT
2609}
2610
2611/**
2612 * pci_request_regions - Reserved PCI I/O and memory resources
2613 * @pdev: PCI device whose resources are to be reserved
2614 * @res_name: Name to be associated with resource.
2615 *
2616 * Mark all PCI regions associated with PCI device @pdev as
2617 * being reserved by owner @res_name. Do not access any
2618 * address inside the PCI regions unless this call returns
2619 * successfully.
2620 *
2621 * Returns 0 on success, or %EBUSY on error. A warning
2622 * message is also printed on failure.
2623 */
3c990e92 2624int pci_request_regions(struct pci_dev *pdev, const char *res_name)
1da177e4 2625{
c87deff7 2626 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
1da177e4
LT
2627}
2628
e8de1481
AV
2629/**
2630 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2631 * @pdev: PCI device whose resources are to be reserved
2632 * @res_name: Name to be associated with resource.
2633 *
2634 * Mark all PCI regions associated with PCI device @pdev as
2635 * being reserved by owner @res_name. Do not access any
2636 * address inside the PCI regions unless this call returns
2637 * successfully.
2638 *
2639 * pci_request_regions_exclusive() will mark the region so that
2640 * /dev/mem and the sysfs MMIO access will not be allowed.
2641 *
2642 * Returns 0 on success, or %EBUSY on error. A warning
2643 * message is also printed on failure.
2644 */
2645int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2646{
2647 return pci_request_selected_regions_exclusive(pdev,
2648 ((1 << 6) - 1), res_name);
2649}
2650
6a479079
BH
2651static void __pci_set_master(struct pci_dev *dev, bool enable)
2652{
2653 u16 old_cmd, cmd;
2654
2655 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2656 if (enable)
2657 cmd = old_cmd | PCI_COMMAND_MASTER;
2658 else
2659 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2660 if (cmd != old_cmd) {
2661 dev_dbg(&dev->dev, "%s bus mastering\n",
2662 enable ? "enabling" : "disabling");
2663 pci_write_config_word(dev, PCI_COMMAND, cmd);
2664 }
2665 dev->is_busmaster = enable;
2666}
e8de1481 2667
96c55900
MS
2668/**
2669 * pcibios_set_master - enable PCI bus-mastering for device dev
2670 * @dev: the PCI device to enable
2671 *
2672 * Enables PCI bus-mastering for the device. This is the default
2673 * implementation. Architecture specific implementations can override
2674 * this if necessary.
2675 */
2676void __weak pcibios_set_master(struct pci_dev *dev)
2677{
2678 u8 lat;
2679
f676678f
MS
2680 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2681 if (pci_is_pcie(dev))
2682 return;
2683
96c55900
MS
2684 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2685 if (lat < 16)
2686 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2687 else if (lat > pcibios_max_latency)
2688 lat = pcibios_max_latency;
2689 else
2690 return;
2691 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2692 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2693}
2694
1da177e4
LT
2695/**
2696 * pci_set_master - enables bus-mastering for device dev
2697 * @dev: the PCI device to enable
2698 *
2699 * Enables bus-mastering on the device and calls pcibios_set_master()
2700 * to do the needed arch specific settings.
2701 */
6a479079 2702void pci_set_master(struct pci_dev *dev)
1da177e4 2703{
6a479079 2704 __pci_set_master(dev, true);
1da177e4
LT
2705 pcibios_set_master(dev);
2706}
2707
6a479079
BH
2708/**
2709 * pci_clear_master - disables bus-mastering for device dev
2710 * @dev: the PCI device to disable
2711 */
2712void pci_clear_master(struct pci_dev *dev)
2713{
2714 __pci_set_master(dev, false);
2715}
2716
1da177e4 2717/**
edb2d97e
MW
2718 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2719 * @dev: the PCI device for which MWI is to be enabled
1da177e4 2720 *
edb2d97e
MW
2721 * Helper function for pci_set_mwi.
2722 * Originally copied from drivers/net/acenic.c.
1da177e4
LT
2723 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2724 *
2725 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2726 */
15ea76d4 2727int pci_set_cacheline_size(struct pci_dev *dev)
1da177e4
LT
2728{
2729 u8 cacheline_size;
2730
2731 if (!pci_cache_line_size)
15ea76d4 2732 return -EINVAL;
1da177e4
LT
2733
2734 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2735 equal to or multiple of the right value. */
2736 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2737 if (cacheline_size >= pci_cache_line_size &&
2738 (cacheline_size % pci_cache_line_size) == 0)
2739 return 0;
2740
2741 /* Write the correct value. */
2742 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2743 /* Read it back. */
2744 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2745 if (cacheline_size == pci_cache_line_size)
2746 return 0;
2747
80ccba11
BH
2748 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2749 "supported\n", pci_cache_line_size << 2);
1da177e4
LT
2750
2751 return -EINVAL;
2752}
15ea76d4
TH
2753EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2754
2755#ifdef PCI_DISABLE_MWI
2756int pci_set_mwi(struct pci_dev *dev)
2757{
2758 return 0;
2759}
2760
2761int pci_try_set_mwi(struct pci_dev *dev)
2762{
2763 return 0;
2764}
2765
2766void pci_clear_mwi(struct pci_dev *dev)
2767{
2768}
2769
2770#else
1da177e4
LT
2771
2772/**
2773 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2774 * @dev: the PCI device for which MWI is enabled
2775 *
694625c0 2776 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
1da177e4
LT
2777 *
2778 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2779 */
2780int
2781pci_set_mwi(struct pci_dev *dev)
2782{
2783 int rc;
2784 u16 cmd;
2785
edb2d97e 2786 rc = pci_set_cacheline_size(dev);
1da177e4
LT
2787 if (rc)
2788 return rc;
2789
2790 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2791 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
80ccba11 2792 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
1da177e4
LT
2793 cmd |= PCI_COMMAND_INVALIDATE;
2794 pci_write_config_word(dev, PCI_COMMAND, cmd);
2795 }
2796
2797 return 0;
2798}
2799
694625c0
RD
2800/**
2801 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2802 * @dev: the PCI device for which MWI is enabled
2803 *
2804 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2805 * Callers are not required to check the return value.
2806 *
2807 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2808 */
2809int pci_try_set_mwi(struct pci_dev *dev)
2810{
2811 int rc = pci_set_mwi(dev);
2812 return rc;
2813}
2814
1da177e4
LT
2815/**
2816 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2817 * @dev: the PCI device to disable
2818 *
2819 * Disables PCI Memory-Write-Invalidate transaction on the device
2820 */
2821void
2822pci_clear_mwi(struct pci_dev *dev)
2823{
2824 u16 cmd;
2825
2826 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2827 if (cmd & PCI_COMMAND_INVALIDATE) {
2828 cmd &= ~PCI_COMMAND_INVALIDATE;
2829 pci_write_config_word(dev, PCI_COMMAND, cmd);
2830 }
2831}
edb2d97e 2832#endif /* ! PCI_DISABLE_MWI */
1da177e4 2833
a04ce0ff
BR
2834/**
2835 * pci_intx - enables/disables PCI INTx for device dev
8f7020d3
RD
2836 * @pdev: the PCI device to operate on
2837 * @enable: boolean: whether to enable or disable PCI INTx
a04ce0ff
BR
2838 *
2839 * Enables/disables PCI INTx for device dev
2840 */
2841void
2842pci_intx(struct pci_dev *pdev, int enable)
2843{
2844 u16 pci_command, new;
2845
2846 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2847
2848 if (enable) {
2849 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2850 } else {
2851 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2852 }
2853
2854 if (new != pci_command) {
9ac7849e
TH
2855 struct pci_devres *dr;
2856
2fd9d74b 2857 pci_write_config_word(pdev, PCI_COMMAND, new);
9ac7849e
TH
2858
2859 dr = find_pci_dr(pdev);
2860 if (dr && !dr->restore_intx) {
2861 dr->restore_intx = 1;
2862 dr->orig_intx = !enable;
2863 }
a04ce0ff
BR
2864 }
2865}
2866
a2e27787
JK
2867/**
2868 * pci_intx_mask_supported - probe for INTx masking support
6e9292c5 2869 * @dev: the PCI device to operate on
a2e27787
JK
2870 *
2871 * Check if the device dev support INTx masking via the config space
2872 * command word.
2873 */
2874bool pci_intx_mask_supported(struct pci_dev *dev)
2875{
2876 bool mask_supported = false;
2877 u16 orig, new;
2878
2879 pci_cfg_access_lock(dev);
2880
2881 pci_read_config_word(dev, PCI_COMMAND, &orig);
2882 pci_write_config_word(dev, PCI_COMMAND,
2883 orig ^ PCI_COMMAND_INTX_DISABLE);
2884 pci_read_config_word(dev, PCI_COMMAND, &new);
2885
2886 /*
2887 * There's no way to protect against hardware bugs or detect them
2888 * reliably, but as long as we know what the value should be, let's
2889 * go ahead and check it.
2890 */
2891 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2892 dev_err(&dev->dev, "Command register changed from "
2893 "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2894 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2895 mask_supported = true;
2896 pci_write_config_word(dev, PCI_COMMAND, orig);
2897 }
2898
2899 pci_cfg_access_unlock(dev);
2900 return mask_supported;
2901}
2902EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2903
2904static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2905{
2906 struct pci_bus *bus = dev->bus;
2907 bool mask_updated = true;
2908 u32 cmd_status_dword;
2909 u16 origcmd, newcmd;
2910 unsigned long flags;
2911 bool irq_pending;
2912
2913 /*
2914 * We do a single dword read to retrieve both command and status.
2915 * Document assumptions that make this possible.
2916 */
2917 BUILD_BUG_ON(PCI_COMMAND % 4);
2918 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2919
2920 raw_spin_lock_irqsave(&pci_lock, flags);
2921
2922 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2923
2924 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2925
2926 /*
2927 * Check interrupt status register to see whether our device
2928 * triggered the interrupt (when masking) or the next IRQ is
2929 * already pending (when unmasking).
2930 */
2931 if (mask != irq_pending) {
2932 mask_updated = false;
2933 goto done;
2934 }
2935
2936 origcmd = cmd_status_dword;
2937 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2938 if (mask)
2939 newcmd |= PCI_COMMAND_INTX_DISABLE;
2940 if (newcmd != origcmd)
2941 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2942
2943done:
2944 raw_spin_unlock_irqrestore(&pci_lock, flags);
2945
2946 return mask_updated;
2947}
2948
2949/**
2950 * pci_check_and_mask_intx - mask INTx on pending interrupt
6e9292c5 2951 * @dev: the PCI device to operate on
a2e27787
JK
2952 *
2953 * Check if the device dev has its INTx line asserted, mask it and
2954 * return true in that case. False is returned if not interrupt was
2955 * pending.
2956 */
2957bool pci_check_and_mask_intx(struct pci_dev *dev)
2958{
2959 return pci_check_and_set_intx_mask(dev, true);
2960}
2961EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
2962
2963/**
2964 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
6e9292c5 2965 * @dev: the PCI device to operate on
a2e27787
JK
2966 *
2967 * Check if the device dev has its INTx line asserted, unmask it if not
2968 * and return true. False is returned and the mask remains active if
2969 * there was still an interrupt pending.
2970 */
2971bool pci_check_and_unmask_intx(struct pci_dev *dev)
2972{
2973 return pci_check_and_set_intx_mask(dev, false);
2974}
2975EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
2976
f5f2b131
EB
2977/**
2978 * pci_msi_off - disables any msi or msix capabilities
8d7d86e9 2979 * @dev: the PCI device to operate on
f5f2b131
EB
2980 *
2981 * If you want to use msi see pci_enable_msi and friends.
2982 * This is a lower level primitive that allows us to disable
2983 * msi operation at the device level.
2984 */
2985void pci_msi_off(struct pci_dev *dev)
2986{
2987 int pos;
2988 u16 control;
2989
2990 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2991 if (pos) {
2992 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2993 control &= ~PCI_MSI_FLAGS_ENABLE;
2994 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2995 }
2996 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2997 if (pos) {
2998 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2999 control &= ~PCI_MSIX_FLAGS_ENABLE;
3000 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3001 }
3002}
b03214d5 3003EXPORT_SYMBOL_GPL(pci_msi_off);
f5f2b131 3004
4d57cdfa
FT
3005int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3006{
3007 return dma_set_max_seg_size(&dev->dev, size);
3008}
3009EXPORT_SYMBOL(pci_set_dma_max_seg_size);
4d57cdfa 3010
59fc67de
FT
3011int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3012{
3013 return dma_set_seg_boundary(&dev->dev, mask);
3014}
3015EXPORT_SYMBOL(pci_set_dma_seg_boundary);
59fc67de 3016
8c1c699f 3017static int pcie_flr(struct pci_dev *dev, int probe)
8dd7f803 3018{
8c1c699f
YZ
3019 int i;
3020 int pos;
8dd7f803 3021 u32 cap;
04b55c47 3022 u16 status, control;
8dd7f803 3023
06a1cbaf 3024 pos = pci_pcie_cap(dev);
8c1c699f 3025 if (!pos)
8dd7f803 3026 return -ENOTTY;
8c1c699f
YZ
3027
3028 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
8dd7f803
SY
3029 if (!(cap & PCI_EXP_DEVCAP_FLR))
3030 return -ENOTTY;
3031
d91cdc74
SY
3032 if (probe)
3033 return 0;
3034
8dd7f803 3035 /* Wait for Transaction Pending bit clean */
8c1c699f
YZ
3036 for (i = 0; i < 4; i++) {
3037 if (i)
3038 msleep((1 << (i - 1)) * 100);
5fe5db05 3039
8c1c699f
YZ
3040 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
3041 if (!(status & PCI_EXP_DEVSTA_TRPND))
3042 goto clear;
3043 }
3044
3045 dev_err(&dev->dev, "transaction is not cleared; "
3046 "proceeding with reset anyway\n");
3047
3048clear:
04b55c47
SR
3049 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
3050 control |= PCI_EXP_DEVCTL_BCR_FLR;
3051 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
3052
8c1c699f 3053 msleep(100);
8dd7f803 3054
8dd7f803
SY
3055 return 0;
3056}
d91cdc74 3057
8c1c699f 3058static int pci_af_flr(struct pci_dev *dev, int probe)
1ca88797 3059{
8c1c699f
YZ
3060 int i;
3061 int pos;
1ca88797 3062 u8 cap;
8c1c699f 3063 u8 status;
1ca88797 3064
8c1c699f
YZ
3065 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3066 if (!pos)
1ca88797 3067 return -ENOTTY;
8c1c699f
YZ
3068
3069 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
1ca88797
SY
3070 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3071 return -ENOTTY;
3072
3073 if (probe)
3074 return 0;
3075
1ca88797 3076 /* Wait for Transaction Pending bit clean */
8c1c699f
YZ
3077 for (i = 0; i < 4; i++) {
3078 if (i)
3079 msleep((1 << (i - 1)) * 100);
3080
3081 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3082 if (!(status & PCI_AF_STATUS_TP))
3083 goto clear;
3084 }
5fe5db05 3085
8c1c699f
YZ
3086 dev_err(&dev->dev, "transaction is not cleared; "
3087 "proceeding with reset anyway\n");
5fe5db05 3088
8c1c699f
YZ
3089clear:
3090 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
1ca88797 3091 msleep(100);
8c1c699f 3092
1ca88797
SY
3093 return 0;
3094}
3095
83d74e03
RW
3096/**
3097 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3098 * @dev: Device to reset.
3099 * @probe: If set, only check if the device can be reset this way.
3100 *
3101 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3102 * unset, it will be reinitialized internally when going from PCI_D3hot to
3103 * PCI_D0. If that's the case and the device is not in a low-power state
3104 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3105 *
3106 * NOTE: This causes the caller to sleep for twice the device power transition
3107 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3108 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3109 * Moreover, only devices in D0 can be reset by this function.
3110 */
f85876ba 3111static int pci_pm_reset(struct pci_dev *dev, int probe)
d91cdc74 3112{
f85876ba
YZ
3113 u16 csr;
3114
3115 if (!dev->pm_cap)
3116 return -ENOTTY;
d91cdc74 3117
f85876ba
YZ
3118 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3119 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3120 return -ENOTTY;
d91cdc74 3121
f85876ba
YZ
3122 if (probe)
3123 return 0;
1ca88797 3124
f85876ba
YZ
3125 if (dev->current_state != PCI_D0)
3126 return -EINVAL;
3127
3128 csr &= ~PCI_PM_CTRL_STATE_MASK;
3129 csr |= PCI_D3hot;
3130 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3131 pci_dev_d3_sleep(dev);
f85876ba
YZ
3132
3133 csr &= ~PCI_PM_CTRL_STATE_MASK;
3134 csr |= PCI_D0;
3135 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3136 pci_dev_d3_sleep(dev);
f85876ba
YZ
3137
3138 return 0;
3139}
3140
c12ff1df
YZ
3141static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3142{
3143 u16 ctrl;
3144 struct pci_dev *pdev;
3145
654b75e0 3146 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
c12ff1df
YZ
3147 return -ENOTTY;
3148
3149 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3150 if (pdev != dev)
3151 return -ENOTTY;
3152
3153 if (probe)
3154 return 0;
3155
3156 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3157 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3158 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3159 msleep(100);
3160
3161 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3162 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3163 msleep(100);
3164
3165 return 0;
3166}
3167
977f857c 3168static int __pci_dev_reset(struct pci_dev *dev, int probe)
d91cdc74 3169{
8c1c699f
YZ
3170 int rc;
3171
3172 might_sleep();
3173
b9c3b266
DC
3174 rc = pci_dev_specific_reset(dev, probe);
3175 if (rc != -ENOTTY)
3176 goto done;
3177
8c1c699f
YZ
3178 rc = pcie_flr(dev, probe);
3179 if (rc != -ENOTTY)
3180 goto done;
d91cdc74 3181
8c1c699f 3182 rc = pci_af_flr(dev, probe);
f85876ba
YZ
3183 if (rc != -ENOTTY)
3184 goto done;
3185
3186 rc = pci_pm_reset(dev, probe);
c12ff1df
YZ
3187 if (rc != -ENOTTY)
3188 goto done;
3189
3190 rc = pci_parent_bus_reset(dev, probe);
8c1c699f 3191done:
977f857c
KRW
3192 return rc;
3193}
3194
3195static int pci_dev_reset(struct pci_dev *dev, int probe)
3196{
3197 int rc;
3198
3199 if (!probe) {
3200 pci_cfg_access_lock(dev);
3201 /* block PM suspend, driver probe, etc. */
3202 device_lock(&dev->dev);
3203 }
3204
3205 rc = __pci_dev_reset(dev, probe);
3206
8c1c699f 3207 if (!probe) {
8e9394ce 3208 device_unlock(&dev->dev);
fb51ccbf 3209 pci_cfg_access_unlock(dev);
8c1c699f 3210 }
8c1c699f 3211 return rc;
d91cdc74 3212}
d91cdc74 3213/**
8c1c699f
YZ
3214 * __pci_reset_function - reset a PCI device function
3215 * @dev: PCI device to reset
d91cdc74
SY
3216 *
3217 * Some devices allow an individual function to be reset without affecting
3218 * other functions in the same device. The PCI device must be responsive
3219 * to PCI config space in order to use this function.
3220 *
3221 * The device function is presumed to be unused when this function is called.
3222 * Resetting the device will make the contents of PCI configuration space
3223 * random, so any caller of this must be prepared to reinitialise the
3224 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3225 * etc.
3226 *
8c1c699f 3227 * Returns 0 if the device function was successfully reset or negative if the
d91cdc74
SY
3228 * device doesn't support resetting a single function.
3229 */
8c1c699f 3230int __pci_reset_function(struct pci_dev *dev)
d91cdc74 3231{
8c1c699f 3232 return pci_dev_reset(dev, 0);
d91cdc74 3233}
8c1c699f 3234EXPORT_SYMBOL_GPL(__pci_reset_function);
8dd7f803 3235
6fbf9e7a
KRW
3236/**
3237 * __pci_reset_function_locked - reset a PCI device function while holding
3238 * the @dev mutex lock.
3239 * @dev: PCI device to reset
3240 *
3241 * Some devices allow an individual function to be reset without affecting
3242 * other functions in the same device. The PCI device must be responsive
3243 * to PCI config space in order to use this function.
3244 *
3245 * The device function is presumed to be unused and the caller is holding
3246 * the device mutex lock when this function is called.
3247 * Resetting the device will make the contents of PCI configuration space
3248 * random, so any caller of this must be prepared to reinitialise the
3249 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3250 * etc.
3251 *
3252 * Returns 0 if the device function was successfully reset or negative if the
3253 * device doesn't support resetting a single function.
3254 */
3255int __pci_reset_function_locked(struct pci_dev *dev)
3256{
977f857c 3257 return __pci_dev_reset(dev, 0);
6fbf9e7a
KRW
3258}
3259EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3260
711d5779
MT
3261/**
3262 * pci_probe_reset_function - check whether the device can be safely reset
3263 * @dev: PCI device to reset
3264 *
3265 * Some devices allow an individual function to be reset without affecting
3266 * other functions in the same device. The PCI device must be responsive
3267 * to PCI config space in order to use this function.
3268 *
3269 * Returns 0 if the device function can be reset or negative if the
3270 * device doesn't support resetting a single function.
3271 */
3272int pci_probe_reset_function(struct pci_dev *dev)
3273{
3274 return pci_dev_reset(dev, 1);
3275}
3276
8dd7f803 3277/**
8c1c699f
YZ
3278 * pci_reset_function - quiesce and reset a PCI device function
3279 * @dev: PCI device to reset
8dd7f803
SY
3280 *
3281 * Some devices allow an individual function to be reset without affecting
3282 * other functions in the same device. The PCI device must be responsive
3283 * to PCI config space in order to use this function.
3284 *
3285 * This function does not just reset the PCI portion of a device, but
3286 * clears all the state associated with the device. This function differs
8c1c699f 3287 * from __pci_reset_function in that it saves and restores device state
8dd7f803
SY
3288 * over the reset.
3289 *
8c1c699f 3290 * Returns 0 if the device function was successfully reset or negative if the
8dd7f803
SY
3291 * device doesn't support resetting a single function.
3292 */
3293int pci_reset_function(struct pci_dev *dev)
3294{
8c1c699f 3295 int rc;
8dd7f803 3296
8c1c699f
YZ
3297 rc = pci_dev_reset(dev, 1);
3298 if (rc)
3299 return rc;
8dd7f803 3300
8dd7f803
SY
3301 pci_save_state(dev);
3302
8c1c699f
YZ
3303 /*
3304 * both INTx and MSI are disabled after the Interrupt Disable bit
3305 * is set and the Bus Master bit is cleared.
3306 */
8dd7f803
SY
3307 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3308
8c1c699f 3309 rc = pci_dev_reset(dev, 0);
8dd7f803
SY
3310
3311 pci_restore_state(dev);
8dd7f803 3312
8c1c699f 3313 return rc;
8dd7f803
SY
3314}
3315EXPORT_SYMBOL_GPL(pci_reset_function);
3316
d556ad4b
PO
3317/**
3318 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3319 * @dev: PCI device to query
3320 *
3321 * Returns mmrbc: maximum designed memory read count in bytes
3322 * or appropriate error value.
3323 */
3324int pcix_get_max_mmrbc(struct pci_dev *dev)
3325{
7c9e2b1c 3326 int cap;
d556ad4b
PO
3327 u32 stat;
3328
3329 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3330 if (!cap)
3331 return -EINVAL;
3332
7c9e2b1c 3333 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
d556ad4b
PO
3334 return -EINVAL;
3335
25daeb55 3336 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
d556ad4b
PO
3337}
3338EXPORT_SYMBOL(pcix_get_max_mmrbc);
3339
3340/**
3341 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3342 * @dev: PCI device to query
3343 *
3344 * Returns mmrbc: maximum memory read count in bytes
3345 * or appropriate error value.
3346 */
3347int pcix_get_mmrbc(struct pci_dev *dev)
3348{
7c9e2b1c 3349 int cap;
bdc2bda7 3350 u16 cmd;
d556ad4b
PO
3351
3352 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3353 if (!cap)
3354 return -EINVAL;
3355
7c9e2b1c
DN
3356 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3357 return -EINVAL;
d556ad4b 3358
7c9e2b1c 3359 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
d556ad4b
PO
3360}
3361EXPORT_SYMBOL(pcix_get_mmrbc);
3362
3363/**
3364 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3365 * @dev: PCI device to query
3366 * @mmrbc: maximum memory read count in bytes
3367 * valid values are 512, 1024, 2048, 4096
3368 *
3369 * If possible sets maximum memory read byte count, some bridges have erratas
3370 * that prevent this.
3371 */
3372int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3373{
7c9e2b1c 3374 int cap;
bdc2bda7
DN
3375 u32 stat, v, o;
3376 u16 cmd;
d556ad4b 3377
229f5afd 3378 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
7c9e2b1c 3379 return -EINVAL;
d556ad4b
PO
3380
3381 v = ffs(mmrbc) - 10;
3382
3383 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3384 if (!cap)
7c9e2b1c 3385 return -EINVAL;
d556ad4b 3386
7c9e2b1c
DN
3387 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3388 return -EINVAL;
d556ad4b
PO
3389
3390 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3391 return -E2BIG;
3392
7c9e2b1c
DN
3393 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3394 return -EINVAL;
d556ad4b
PO
3395
3396 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3397 if (o != v) {
3398 if (v > o && dev->bus &&
3399 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3400 return -EIO;
3401
3402 cmd &= ~PCI_X_CMD_MAX_READ;
3403 cmd |= v << 2;
7c9e2b1c
DN
3404 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3405 return -EIO;
d556ad4b 3406 }
7c9e2b1c 3407 return 0;
d556ad4b
PO
3408}
3409EXPORT_SYMBOL(pcix_set_mmrbc);
3410
3411/**
3412 * pcie_get_readrq - get PCI Express read request size
3413 * @dev: PCI device to query
3414 *
3415 * Returns maximum memory read request in bytes
3416 * or appropriate error value.
3417 */
3418int pcie_get_readrq(struct pci_dev *dev)
3419{
3420 int ret, cap;
3421 u16 ctl;
3422
06a1cbaf 3423 cap = pci_pcie_cap(dev);
d556ad4b
PO
3424 if (!cap)
3425 return -EINVAL;
3426
3427 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3428 if (!ret)
93e75fab 3429 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
d556ad4b
PO
3430
3431 return ret;
3432}
3433EXPORT_SYMBOL(pcie_get_readrq);
3434
3435/**
3436 * pcie_set_readrq - set PCI Express maximum memory read request
3437 * @dev: PCI device to query
42e61f4a 3438 * @rq: maximum memory read count in bytes
d556ad4b
PO
3439 * valid values are 128, 256, 512, 1024, 2048, 4096
3440 *
c9b378c7 3441 * If possible sets maximum memory read request in bytes
d556ad4b
PO
3442 */
3443int pcie_set_readrq(struct pci_dev *dev, int rq)
3444{
3445 int cap, err = -EINVAL;
3446 u16 ctl, v;
3447
229f5afd 3448 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
d556ad4b
PO
3449 goto out;
3450
06a1cbaf 3451 cap = pci_pcie_cap(dev);
d556ad4b
PO
3452 if (!cap)
3453 goto out;
3454
3455 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3456 if (err)
3457 goto out;
a1c473aa
BH
3458 /*
3459 * If using the "performance" PCIe config, we clamp the
3460 * read rq size to the max packet size to prevent the
3461 * host bridge generating requests larger than we can
3462 * cope with
3463 */
3464 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3465 int mps = pcie_get_mps(dev);
3466
3467 if (mps < 0)
3468 return mps;
3469 if (mps < rq)
3470 rq = mps;
3471 }
3472
3473 v = (ffs(rq) - 8) << 12;
d556ad4b
PO
3474
3475 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3476 ctl &= ~PCI_EXP_DEVCTL_READRQ;
3477 ctl |= v;
c9b378c7 3478 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
d556ad4b
PO
3479 }
3480
3481out:
3482 return err;
3483}
3484EXPORT_SYMBOL(pcie_set_readrq);
3485
b03e7495
JM
3486/**
3487 * pcie_get_mps - get PCI Express maximum payload size
3488 * @dev: PCI device to query
3489 *
3490 * Returns maximum payload size in bytes
3491 * or appropriate error value.
3492 */
3493int pcie_get_mps(struct pci_dev *dev)
3494{
3495 int ret, cap;
3496 u16 ctl;
3497
3498 cap = pci_pcie_cap(dev);
3499 if (!cap)
3500 return -EINVAL;
3501
3502 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3503 if (!ret)
3504 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3505
3506 return ret;
3507}
3508
3509/**
3510 * pcie_set_mps - set PCI Express maximum payload size
3511 * @dev: PCI device to query
47c08f31 3512 * @mps: maximum payload size in bytes
b03e7495
JM
3513 * valid values are 128, 256, 512, 1024, 2048, 4096
3514 *
3515 * If possible sets maximum payload size
3516 */
3517int pcie_set_mps(struct pci_dev *dev, int mps)
3518{
3519 int cap, err = -EINVAL;
3520 u16 ctl, v;
3521
3522 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3523 goto out;
3524
3525 v = ffs(mps) - 8;
3526 if (v > dev->pcie_mpss)
3527 goto out;
3528 v <<= 5;
3529
3530 cap = pci_pcie_cap(dev);
3531 if (!cap)
3532 goto out;
3533
3534 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3535 if (err)
3536 goto out;
3537
3538 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3539 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3540 ctl |= v;
3541 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3542 }
3543out:
3544 return err;
3545}
3546
c87deff7
HS
3547/**
3548 * pci_select_bars - Make BAR mask from the type of resource
f95d882d 3549 * @dev: the PCI device for which BAR mask is made
c87deff7
HS
3550 * @flags: resource type mask to be selected
3551 *
3552 * This helper routine makes bar mask from the type of resource.
3553 */
3554int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3555{
3556 int i, bars = 0;
3557 for (i = 0; i < PCI_NUM_RESOURCES; i++)
3558 if (pci_resource_flags(dev, i) & flags)
3559 bars |= (1 << i);
3560 return bars;
3561}
3562
613e7ed6
YZ
3563/**
3564 * pci_resource_bar - get position of the BAR associated with a resource
3565 * @dev: the PCI device
3566 * @resno: the resource number
3567 * @type: the BAR type to be filled in
3568 *
3569 * Returns BAR position in config space, or 0 if the BAR is invalid.
3570 */
3571int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3572{
d1b054da
YZ
3573 int reg;
3574
613e7ed6
YZ
3575 if (resno < PCI_ROM_RESOURCE) {
3576 *type = pci_bar_unknown;
3577 return PCI_BASE_ADDRESS_0 + 4 * resno;
3578 } else if (resno == PCI_ROM_RESOURCE) {
3579 *type = pci_bar_mem32;
3580 return dev->rom_base_reg;
d1b054da
YZ
3581 } else if (resno < PCI_BRIDGE_RESOURCES) {
3582 /* device specific resource */
3583 reg = pci_iov_resource_bar(dev, resno, type);
3584 if (reg)
3585 return reg;
613e7ed6
YZ
3586 }
3587
865df576 3588 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
613e7ed6
YZ
3589 return 0;
3590}
3591
95a8b6ef
MT
3592/* Some architectures require additional programming to enable VGA */
3593static arch_set_vga_state_t arch_set_vga_state;
3594
3595void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3596{
3597 arch_set_vga_state = func; /* NULL disables */
3598}
3599
3600static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
7ad35cf2 3601 unsigned int command_bits, u32 flags)
95a8b6ef
MT
3602{
3603 if (arch_set_vga_state)
3604 return arch_set_vga_state(dev, decode, command_bits,
7ad35cf2 3605 flags);
95a8b6ef
MT
3606 return 0;
3607}
3608
deb2d2ec
BH
3609/**
3610 * pci_set_vga_state - set VGA decode state on device and parents if requested
19eea630
RD
3611 * @dev: the PCI device
3612 * @decode: true = enable decoding, false = disable decoding
3613 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3f37d622 3614 * @flags: traverse ancestors and change bridges
3448a19d 3615 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
deb2d2ec
BH
3616 */
3617int pci_set_vga_state(struct pci_dev *dev, bool decode,
3448a19d 3618 unsigned int command_bits, u32 flags)
deb2d2ec
BH
3619{
3620 struct pci_bus *bus;
3621 struct pci_dev *bridge;
3622 u16 cmd;
95a8b6ef 3623 int rc;
deb2d2ec 3624
3448a19d 3625 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
deb2d2ec 3626
95a8b6ef 3627 /* ARCH specific VGA enables */
3448a19d 3628 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
95a8b6ef
MT
3629 if (rc)
3630 return rc;
3631
3448a19d
DA
3632 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3633 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3634 if (decode == true)
3635 cmd |= command_bits;
3636 else
3637 cmd &= ~command_bits;
3638 pci_write_config_word(dev, PCI_COMMAND, cmd);
3639 }
deb2d2ec 3640
3448a19d 3641 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
deb2d2ec
BH
3642 return 0;
3643
3644 bus = dev->bus;
3645 while (bus) {
3646 bridge = bus->self;
3647 if (bridge) {
3648 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3649 &cmd);
3650 if (decode == true)
3651 cmd |= PCI_BRIDGE_CTL_VGA;
3652 else
3653 cmd &= ~PCI_BRIDGE_CTL_VGA;
3654 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3655 cmd);
3656 }
3657 bus = bus->parent;
3658 }
3659 return 0;
3660}
3661
32a9a682
YS
3662#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3663static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
e9d1e492 3664static DEFINE_SPINLOCK(resource_alignment_lock);
32a9a682
YS
3665
3666/**
3667 * pci_specified_resource_alignment - get resource alignment specified by user.
3668 * @dev: the PCI device to get
3669 *
3670 * RETURNS: Resource alignment if it is specified.
3671 * Zero if it is not specified.
3672 */
3673resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3674{
3675 int seg, bus, slot, func, align_order, count;
3676 resource_size_t align = 0;
3677 char *p;
3678
3679 spin_lock(&resource_alignment_lock);
3680 p = resource_alignment_param;
3681 while (*p) {
3682 count = 0;
3683 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3684 p[count] == '@') {
3685 p += count + 1;
3686 } else {
3687 align_order = -1;
3688 }
3689 if (sscanf(p, "%x:%x:%x.%x%n",
3690 &seg, &bus, &slot, &func, &count) != 4) {
3691 seg = 0;
3692 if (sscanf(p, "%x:%x.%x%n",
3693 &bus, &slot, &func, &count) != 3) {
3694 /* Invalid format */
3695 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3696 p);
3697 break;
3698 }
3699 }
3700 p += count;
3701 if (seg == pci_domain_nr(dev->bus) &&
3702 bus == dev->bus->number &&
3703 slot == PCI_SLOT(dev->devfn) &&
3704 func == PCI_FUNC(dev->devfn)) {
3705 if (align_order == -1) {
3706 align = PAGE_SIZE;
3707 } else {
3708 align = 1 << align_order;
3709 }
3710 /* Found */
3711 break;
3712 }
3713 if (*p != ';' && *p != ',') {
3714 /* End of param or invalid format */
3715 break;
3716 }
3717 p++;
3718 }
3719 spin_unlock(&resource_alignment_lock);
3720 return align;
3721}
3722
3723/**
3724 * pci_is_reassigndev - check if specified PCI is target device to reassign
3725 * @dev: the PCI device to check
3726 *
3727 * RETURNS: non-zero for PCI device is a target device to reassign,
3728 * or zero is not.
3729 */
3730int pci_is_reassigndev(struct pci_dev *dev)
3731{
3732 return (pci_specified_resource_alignment(dev) != 0);
3733}
3734
2069ecfb
YL
3735/*
3736 * This function disables memory decoding and releases memory resources
3737 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3738 * It also rounds up size to specified alignment.
3739 * Later on, the kernel will assign page-aligned memory resource back
3740 * to the device.
3741 */
3742void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3743{
3744 int i;
3745 struct resource *r;
3746 resource_size_t align, size;
3747 u16 command;
3748
3749 if (!pci_is_reassigndev(dev))
3750 return;
3751
3752 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3753 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3754 dev_warn(&dev->dev,
3755 "Can't reassign resources to host bridge.\n");
3756 return;
3757 }
3758
3759 dev_info(&dev->dev,
3760 "Disabling memory decoding and releasing memory resources.\n");
3761 pci_read_config_word(dev, PCI_COMMAND, &command);
3762 command &= ~PCI_COMMAND_MEMORY;
3763 pci_write_config_word(dev, PCI_COMMAND, command);
3764
3765 align = pci_specified_resource_alignment(dev);
3766 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3767 r = &dev->resource[i];
3768 if (!(r->flags & IORESOURCE_MEM))
3769 continue;
3770 size = resource_size(r);
3771 if (size < align) {
3772 size = align;
3773 dev_info(&dev->dev,
3774 "Rounding up size of resource #%d to %#llx.\n",
3775 i, (unsigned long long)size);
3776 }
3777 r->end = size - 1;
3778 r->start = 0;
3779 }
3780 /* Need to disable bridge's resource window,
3781 * to enable the kernel to reassign new resource
3782 * window later on.
3783 */
3784 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3785 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3786 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3787 r = &dev->resource[i];
3788 if (!(r->flags & IORESOURCE_MEM))
3789 continue;
3790 r->end = resource_size(r) - 1;
3791 r->start = 0;
3792 }
3793 pci_disable_bridge_window(dev);
3794 }
3795}
3796
32a9a682
YS
3797ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3798{
3799 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3800 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3801 spin_lock(&resource_alignment_lock);
3802 strncpy(resource_alignment_param, buf, count);
3803 resource_alignment_param[count] = '\0';
3804 spin_unlock(&resource_alignment_lock);
3805 return count;
3806}
3807
3808ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3809{
3810 size_t count;
3811 spin_lock(&resource_alignment_lock);
3812 count = snprintf(buf, size, "%s", resource_alignment_param);
3813 spin_unlock(&resource_alignment_lock);
3814 return count;
3815}
3816
3817static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3818{
3819 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3820}
3821
3822static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3823 const char *buf, size_t count)
3824{
3825 return pci_set_resource_alignment_param(buf, count);
3826}
3827
3828BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3829 pci_resource_alignment_store);
3830
3831static int __init pci_resource_alignment_sysfs_init(void)
3832{
3833 return bus_create_file(&pci_bus_type,
3834 &bus_attr_resource_alignment);
3835}
3836
3837late_initcall(pci_resource_alignment_sysfs_init);
3838
32a2eea7
JG
3839static void __devinit pci_no_domains(void)
3840{
3841#ifdef CONFIG_PCI_DOMAINS
3842 pci_domains_supported = 0;
3843#endif
3844}
3845
0ef5f8f6
AP
3846/**
3847 * pci_ext_cfg_enabled - can we access extended PCI config space?
3848 * @dev: The PCI device of the root bridge.
3849 *
3850 * Returns 1 if we can access PCI extended config space (offsets
3851 * greater than 0xff). This is the default implementation. Architecture
3852 * implementations can override this.
3853 */
3854int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3855{
3856 return 1;
3857}
3858
2d1c8618
BH
3859void __weak pci_fixup_cardbus(struct pci_bus *bus)
3860{
3861}
3862EXPORT_SYMBOL(pci_fixup_cardbus);
3863
ad04d31e 3864static int __init pci_setup(char *str)
1da177e4
LT
3865{
3866 while (str) {
3867 char *k = strchr(str, ',');
3868 if (k)
3869 *k++ = 0;
3870 if (*str && (str = pcibios_setup(str)) && *str) {
309e57df
MW
3871 if (!strcmp(str, "nomsi")) {
3872 pci_no_msi();
7f785763
RD
3873 } else if (!strcmp(str, "noaer")) {
3874 pci_no_aer();
b55438fd
YL
3875 } else if (!strncmp(str, "realloc=", 8)) {
3876 pci_realloc_get_opt(str + 8);
f483d392 3877 } else if (!strncmp(str, "realloc", 7)) {
b55438fd 3878 pci_realloc_get_opt("on");
32a2eea7
JG
3879 } else if (!strcmp(str, "nodomains")) {
3880 pci_no_domains();
6748dcc2
RW
3881 } else if (!strncmp(str, "noari", 5)) {
3882 pcie_ari_disabled = true;
4516a618
AN
3883 } else if (!strncmp(str, "cbiosize=", 9)) {
3884 pci_cardbus_io_size = memparse(str + 9, &str);
3885 } else if (!strncmp(str, "cbmemsize=", 10)) {
3886 pci_cardbus_mem_size = memparse(str + 10, &str);
32a9a682
YS
3887 } else if (!strncmp(str, "resource_alignment=", 19)) {
3888 pci_set_resource_alignment_param(str + 19,
3889 strlen(str + 19));
43c16408
AP
3890 } else if (!strncmp(str, "ecrc=", 5)) {
3891 pcie_ecrc_get_policy(str + 5);
28760489
EB
3892 } else if (!strncmp(str, "hpiosize=", 9)) {
3893 pci_hotplug_io_size = memparse(str + 9, &str);
3894 } else if (!strncmp(str, "hpmemsize=", 10)) {
3895 pci_hotplug_mem_size = memparse(str + 10, &str);
5f39e670
JM
3896 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3897 pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495
JM
3898 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3899 pcie_bus_config = PCIE_BUS_SAFE;
3900 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3901 pcie_bus_config = PCIE_BUS_PERFORMANCE;
5f39e670
JM
3902 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3903 pcie_bus_config = PCIE_BUS_PEER2PEER;
284f5f9d
BH
3904 } else if (!strncmp(str, "pcie_scan_all", 13)) {
3905 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
309e57df
MW
3906 } else {
3907 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3908 str);
3909 }
1da177e4
LT
3910 }
3911 str = k;
3912 }
0637a70a 3913 return 0;
1da177e4 3914}
0637a70a 3915early_param("pci", pci_setup);
1da177e4 3916
0b62e13b 3917EXPORT_SYMBOL(pci_reenable_device);
b718989d
BH
3918EXPORT_SYMBOL(pci_enable_device_io);
3919EXPORT_SYMBOL(pci_enable_device_mem);
1da177e4 3920EXPORT_SYMBOL(pci_enable_device);
9ac7849e
TH
3921EXPORT_SYMBOL(pcim_enable_device);
3922EXPORT_SYMBOL(pcim_pin_device);
1da177e4 3923EXPORT_SYMBOL(pci_disable_device);
1da177e4
LT
3924EXPORT_SYMBOL(pci_find_capability);
3925EXPORT_SYMBOL(pci_bus_find_capability);
3926EXPORT_SYMBOL(pci_release_regions);
3927EXPORT_SYMBOL(pci_request_regions);
e8de1481 3928EXPORT_SYMBOL(pci_request_regions_exclusive);
1da177e4
LT
3929EXPORT_SYMBOL(pci_release_region);
3930EXPORT_SYMBOL(pci_request_region);
e8de1481 3931EXPORT_SYMBOL(pci_request_region_exclusive);
c87deff7
HS
3932EXPORT_SYMBOL(pci_release_selected_regions);
3933EXPORT_SYMBOL(pci_request_selected_regions);
e8de1481 3934EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
1da177e4 3935EXPORT_SYMBOL(pci_set_master);
6a479079 3936EXPORT_SYMBOL(pci_clear_master);
1da177e4 3937EXPORT_SYMBOL(pci_set_mwi);
694625c0 3938EXPORT_SYMBOL(pci_try_set_mwi);
1da177e4 3939EXPORT_SYMBOL(pci_clear_mwi);
a04ce0ff 3940EXPORT_SYMBOL_GPL(pci_intx);
1da177e4
LT
3941EXPORT_SYMBOL(pci_assign_resource);
3942EXPORT_SYMBOL(pci_find_parent_resource);
c87deff7 3943EXPORT_SYMBOL(pci_select_bars);
1da177e4
LT
3944
3945EXPORT_SYMBOL(pci_set_power_state);
3946EXPORT_SYMBOL(pci_save_state);
3947EXPORT_SYMBOL(pci_restore_state);
e5899e1b 3948EXPORT_SYMBOL(pci_pme_capable);
5a6c9b60 3949EXPORT_SYMBOL(pci_pme_active);
0235c4fc 3950EXPORT_SYMBOL(pci_wake_from_d3);
e5899e1b 3951EXPORT_SYMBOL(pci_target_state);
404cc2d8
RW
3952EXPORT_SYMBOL(pci_prepare_to_sleep);
3953EXPORT_SYMBOL(pci_back_from_sleep);
f7bdd12d 3954EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);