1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
|
#include <linux/pci.h>
#include <linux/kernel.h>
#include <arch/hwregs/intr_vect.h>
void __devinit pcibios_fixup_bus(struct pci_bus *b)
{
}
char * __devinit pcibios_setup(char *str)
{
return NULL;
}
void pcibios_set_master(struct pci_dev *dev)
{
u8 lat;
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
/* Leave vm_pgoff as-is, the PCI space address is the physical
* address on this platform.
*/
prot = pgprot_val(vma->vm_page_prot);
vma->vm_page_prot = __pgprot(prot);
/* Write-combine setting is ignored, it is changed via the mtrr
* interfaces on this platform.
*/
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}
resource_size_t
pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
resource_size_t start = res->start;
if ((res->flags & IORESOURCE_IO) && (start & 0x300))
start = (start + 0x3ff) & ~0x3ff;
return start;
}
int pcibios_enable_resources(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
int idx;
struct resource *r;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for(idx=0; idx<6; idx++) {
/* Only set up the requested stuff */
if (!(mask & (1<<idx)))
continue;
r = &dev->resource[idx];
if (!r->start && r->end) {
printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (dev->resource[PCI_ROM_RESOURCE].start)
cmd |= PCI_COMMAND_MEMORY;
if (cmd != old_cmd) {
printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
int pcibios_enable_irq(struct pci_dev *dev)
{
dev->irq = EXT_INTR_VECT;
return 0;
}
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
int err;
if ((err = pcibios_enable_resources(dev, mask)) < 0)
return err;
if (!dev->msi_enabled)
pcibios_enable_irq(dev);
return 0;
}
int pcibios_assign_resources(void)
{
struct pci_dev *dev = NULL;
int idx;
struct resource *r;
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
int class = dev->class >> 8;
/* Don't touch classless devices and host bridges */
if (!class || class == PCI_CLASS_BRIDGE_HOST)
continue;
for(idx=0; idx<6; idx++) {
r = &dev->resource[idx];
if (!r->start && r->end)
pci_assign_resource(dev, idx);
}
}
return 0;
}
EXPORT_SYMBOL(pcibios_assign_resources);
|