1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/arch/alpha/kernel/pci.c
4 *
5 * Extruded from code written by
6 * Dave Rusling (david.rusling@reo.mts.dec.com)
7 * David Mosberger (davidm@cs.arizona.edu)
8 */
9
10 /* 2.3.x PCI/resources, 1999 Andrea Arcangeli <andrea@suse.de> */
11
12 /*
13 * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
14 * PCI-PCI bridges cleanup
15 */
16 #include <linux/string.h>
17 #include <linux/pci.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/kernel.h>
21 #include <linux/memblock.h>
22 #include <linux/module.h>
23 #include <linux/cache.h>
24 #include <linux/slab.h>
25 #include <linux/syscalls.h>
26 #include <asm/machvec.h>
27
28 #include "proto.h"
29 #include "pci_impl.h"
30
31
32 /*
33 * Some string constants used by the various core logics.
34 */
35
36 const char *const pci_io_names[] = {
37 "PCI IO bus 0", "PCI IO bus 1", "PCI IO bus 2", "PCI IO bus 3",
38 "PCI IO bus 4", "PCI IO bus 5", "PCI IO bus 6", "PCI IO bus 7"
39 };
40
41 const char *const pci_mem_names[] = {
42 "PCI mem bus 0", "PCI mem bus 1", "PCI mem bus 2", "PCI mem bus 3",
43 "PCI mem bus 4", "PCI mem bus 5", "PCI mem bus 6", "PCI mem bus 7"
44 };
45
46 const char pci_hae0_name[] = "HAE0";
47
48 /*
49 * If PCI_PROBE_ONLY in pci_flags is set, we don't change any PCI resource
50 * assignments.
51 */
52
53 /*
54 * The PCI controller list.
55 */
56
57 struct pci_controller *hose_head, **hose_tail = &hose_head;
58 struct pci_controller *pci_isa_hose;
59
60 /*
61 * Quirks.
62 */
63
quirk_isa_bridge(struct pci_dev * dev)64 static void quirk_isa_bridge(struct pci_dev *dev)
65 {
66 dev->class = PCI_CLASS_BRIDGE_ISA << 8;
67 }
68 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge);
69
quirk_cypress(struct pci_dev * dev)70 static void quirk_cypress(struct pci_dev *dev)
71 {
72 /* The Notorious Cy82C693 chip. */
73
74 /* The generic legacy mode IDE fixup in drivers/pci/probe.c
75 doesn't work correctly with the Cypress IDE controller as
76 it has non-standard register layout. Fix that. */
77 if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE) {
78 dev->resource[2].start = dev->resource[3].start = 0;
79 dev->resource[2].end = dev->resource[3].end = 0;
80 dev->resource[2].flags = dev->resource[3].flags = 0;
81 if (PCI_FUNC(dev->devfn) == 2) {
82 dev->resource[0].start = 0x170;
83 dev->resource[0].end = 0x177;
84 dev->resource[1].start = 0x376;
85 dev->resource[1].end = 0x376;
86 }
87 }
88
89 /* The Cypress bridge responds on the PCI bus in the address range
90 0xffff0000-0xffffffff (conventional x86 BIOS ROM). There is no
91 way to turn this off. The bridge also supports several extended
92 BIOS ranges (disabled after power-up), and some consoles do turn
93 them on. So if we use a large direct-map window, or a large SG
94 window, we must avoid the entire 0xfff00000-0xffffffff region. */
95 if (dev->class >> 8 == PCI_CLASS_BRIDGE_ISA) {
96 if (__direct_map_base + __direct_map_size >= 0xfff00000UL)
97 __direct_map_size = 0xfff00000UL - __direct_map_base;
98 else {
99 struct pci_controller *hose = dev->sysdata;
100 struct pci_iommu_arena *pci = hose->sg_pci;
101 if (pci && pci->dma_base + pci->size >= 0xfff00000UL)
102 pci->size = 0xfff00000UL - pci->dma_base;
103 }
104 }
105 }
106 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, quirk_cypress);
107
108 /* Called for each device after PCI setup is done. */
pcibios_fixup_final(struct pci_dev * dev)109 static void pcibios_fixup_final(struct pci_dev *dev)
110 {
111 unsigned int class = dev->class >> 8;
112
113 if (class == PCI_CLASS_BRIDGE_ISA || class == PCI_CLASS_BRIDGE_EISA) {
114 dev->dma_mask = MAX_ISA_DMA_ADDRESS - 1;
115 isa_bridge = dev;
116 }
117 }
118 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
119
120 /* Just declaring that the power-of-ten prefixes are actually the
121 power-of-two ones doesn't make it true :) */
122 #define KB 1024
123 #define MB (1024*KB)
124 #define GB (1024*MB)
125
126 resource_size_t
pcibios_align_resource(void * data,const struct resource * res,resource_size_t size,resource_size_t align)127 pcibios_align_resource(void *data, const struct resource *res,
128 resource_size_t size, resource_size_t align)
129 {
130 struct pci_dev *dev = data;
131 struct pci_controller *hose = dev->sysdata;
132 unsigned long alignto;
133 resource_size_t start = res->start;
134
135 if (res->flags & IORESOURCE_IO) {
136 /* Make sure we start at our min on all hoses */
137 if (start - hose->io_space->start < PCIBIOS_MIN_IO)
138 start = PCIBIOS_MIN_IO + hose->io_space->start;
139
140 /*
141 * Put everything into 0x00-0xff region modulo 0x400
142 */
143 if (start & 0x300)
144 start = (start + 0x3ff) & ~0x3ff;
145 }
146 else if (res->flags & IORESOURCE_MEM) {
147 /* Make sure we start at our min on all hoses */
148 if (start - hose->mem_space->start < PCIBIOS_MIN_MEM)
149 start = PCIBIOS_MIN_MEM + hose->mem_space->start;
150
151 /*
152 * The following holds at least for the Low Cost
153 * Alpha implementation of the PCI interface:
154 *
155 * In sparse memory address space, the first
156 * octant (16MB) of every 128MB segment is
157 * aliased to the very first 16 MB of the
158 * address space (i.e., it aliases the ISA
159 * memory address space). Thus, we try to
160 * avoid allocating PCI devices in that range.
161 * Can be allocated in 2nd-7th octant only.
162 * Devices that need more than 112MB of
163 * address space must be accessed through
164 * dense memory space only!
165 */
166
167 /* Align to multiple of size of minimum base. */
168 alignto = max_t(resource_size_t, 0x1000, align);
169 start = ALIGN(start, alignto);
170 if (hose->sparse_mem_base && size <= 7 * 16*MB) {
171 if (((start / (16*MB)) & 0x7) == 0) {
172 start &= ~(128*MB - 1);
173 start += 16*MB;
174 start = ALIGN(start, alignto);
175 }
176 if (start/(128*MB) != (start + size - 1)/(128*MB)) {
177 start &= ~(128*MB - 1);
178 start += (128 + 16)*MB;
179 start = ALIGN(start, alignto);
180 }
181 }
182 }
183
184 return start;
185 }
186 #undef KB
187 #undef MB
188 #undef GB
189
190 static int __init
pcibios_init(void)191 pcibios_init(void)
192 {
193 if (alpha_mv.init_pci)
194 alpha_mv.init_pci();
195 return 0;
196 }
197
198 subsys_initcall(pcibios_init);
199
200 #ifdef ALPHA_RESTORE_SRM_SETUP
201 /* Store PCI device configuration left by SRM here. */
202 struct pdev_srm_saved_conf
203 {
204 struct pdev_srm_saved_conf *next;
205 struct pci_dev *dev;
206 };
207
208 static struct pdev_srm_saved_conf *srm_saved_configs;
209
pdev_save_srm_config(struct pci_dev * dev)210 static void pdev_save_srm_config(struct pci_dev *dev)
211 {
212 struct pdev_srm_saved_conf *tmp;
213 static int printed = 0;
214
215 if (!alpha_using_srm || pci_has_flag(PCI_PROBE_ONLY))
216 return;
217
218 if (!printed) {
219 printk(KERN_INFO "pci: enabling save/restore of SRM state\n");
220 printed = 1;
221 }
222
223 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
224 if (!tmp) {
225 printk(KERN_ERR "%s: kmalloc() failed!\n", __func__);
226 return;
227 }
228 tmp->next = srm_saved_configs;
229 tmp->dev = dev;
230
231 pci_save_state(dev);
232
233 srm_saved_configs = tmp;
234 }
235
236 void
pci_restore_srm_config(void)237 pci_restore_srm_config(void)
238 {
239 struct pdev_srm_saved_conf *tmp;
240
241 /* No need to restore if probed only. */
242 if (pci_has_flag(PCI_PROBE_ONLY))
243 return;
244
245 /* Restore SRM config. */
246 for (tmp = srm_saved_configs; tmp; tmp = tmp->next) {
247 pci_restore_state(tmp->dev);
248 }
249 }
250 #else
251 #define pdev_save_srm_config(dev) do {} while (0)
252 #endif
253
pcibios_fixup_bus(struct pci_bus * bus)254 void pcibios_fixup_bus(struct pci_bus *bus)
255 {
256 struct pci_dev *dev = bus->self;
257
258 if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
259 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
260 pci_read_bridge_bases(bus);
261 }
262
263 list_for_each_entry(dev, &bus->devices, bus_list) {
264 pdev_save_srm_config(dev);
265 }
266 }
267
268 /*
269 * If we set up a device for bus mastering, we need to check the latency
270 * timer as certain firmware forgets to set it properly, as seen
271 * on SX164 and LX164 with SRM.
272 */
273 void
pcibios_set_master(struct pci_dev * dev)274 pcibios_set_master(struct pci_dev *dev)
275 {
276 u8 lat;
277 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
278 if (lat >= 16) return;
279 printk("PCI: Setting latency timer of device %s to 64\n",
280 pci_name(dev));
281 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
282 }
283
284 void __init
pcibios_claim_one_bus(struct pci_bus * b)285 pcibios_claim_one_bus(struct pci_bus *b)
286 {
287 struct pci_dev *dev;
288 struct pci_bus *child_bus;
289
290 list_for_each_entry(dev, &b->devices, bus_list) {
291 struct resource *r;
292 int i;
293
294 pci_dev_for_each_resource(dev, r, i) {
295 if (r->parent || !r->start || !r->flags)
296 continue;
297 if (pci_has_flag(PCI_PROBE_ONLY) ||
298 (r->flags & IORESOURCE_PCI_FIXED)) {
299 if (pci_claim_resource(dev, i) == 0)
300 continue;
301
302 pci_claim_bridge_resource(dev, i);
303 }
304 }
305 }
306
307 list_for_each_entry(child_bus, &b->children, node)
308 pcibios_claim_one_bus(child_bus);
309 }
310
311 static void __init
pcibios_claim_console_setup(void)312 pcibios_claim_console_setup(void)
313 {
314 struct pci_bus *b;
315
316 list_for_each_entry(b, &pci_root_buses, node)
317 pcibios_claim_one_bus(b);
318 }
319
320 void __init
common_init_pci(void)321 common_init_pci(void)
322 {
323 struct pci_controller *hose;
324 struct list_head resources;
325 struct pci_host_bridge *bridge;
326 struct pci_bus *bus;
327 int ret, next_busno;
328 int need_domain_info = 0;
329 u32 pci_mem_end;
330 u32 sg_base;
331 unsigned long end;
332
333 /* Scan all of the recorded PCI controllers. */
334 for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
335 sg_base = hose->sg_pci ? hose->sg_pci->dma_base : ~0;
336
337 /* Adjust hose mem_space limit to prevent PCI allocations
338 in the iommu windows. */
339 pci_mem_end = min((u32)__direct_map_base, sg_base) - 1;
340 end = hose->mem_space->start + pci_mem_end;
341 if (hose->mem_space->end > end)
342 hose->mem_space->end = end;
343
344 INIT_LIST_HEAD(&resources);
345 pci_add_resource_offset(&resources, hose->io_space,
346 hose->io_space->start);
347 pci_add_resource_offset(&resources, hose->mem_space,
348 hose->mem_space->start);
349
350 bridge = pci_alloc_host_bridge(0);
351 if (!bridge)
352 continue;
353
354 list_splice_init(&resources, &bridge->windows);
355 bridge->dev.parent = NULL;
356 bridge->sysdata = hose;
357 bridge->busnr = next_busno;
358 bridge->ops = alpha_mv.pci_ops;
359 bridge->swizzle_irq = alpha_mv.pci_swizzle;
360 bridge->map_irq = alpha_mv.pci_map_irq;
361
362 ret = pci_scan_root_bus_bridge(bridge);
363 if (ret) {
364 pci_free_host_bridge(bridge);
365 continue;
366 }
367
368 bus = hose->bus = bridge->bus;
369 hose->need_domain_info = need_domain_info;
370 next_busno = bus->busn_res.end + 1;
371 /* Don't allow 8-bit bus number overflow inside the hose -
372 reserve some space for bridges. */
373 if (next_busno > 224) {
374 next_busno = 0;
375 need_domain_info = 1;
376 }
377 }
378
379 pcibios_claim_console_setup();
380
381 pci_assign_unassigned_resources();
382 for (hose = hose_head; hose; hose = hose->next) {
383 bus = hose->bus;
384 if (bus)
385 pci_bus_add_devices(bus);
386 }
387 }
388
389 struct pci_controller * __init
alloc_pci_controller(void)390 alloc_pci_controller(void)
391 {
392 struct pci_controller *hose;
393
394 hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES);
395 if (!hose)
396 panic("%s: Failed to allocate %zu bytes\n", __func__,
397 sizeof(*hose));
398
399 *hose_tail = hose;
400 hose_tail = &hose->next;
401
402 return hose;
403 }
404
405 struct resource * __init
alloc_resource(void)406 alloc_resource(void)
407 {
408 void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
409
410 if (!ptr)
411 panic("%s: Failed to allocate %zu bytes\n", __func__,
412 sizeof(struct resource));
413
414 return ptr;
415 }
416
417
418 /* Provide information on locations of various I/O regions in physical
419 memory. Do this on a per-card basis so that we choose the right hose. */
420
SYSCALL_DEFINE3(pciconfig_iobase,long,which,unsigned long,bus,unsigned long,dfn)421 SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus,
422 unsigned long, dfn)
423 {
424 struct pci_controller *hose;
425 struct pci_dev *dev;
426
427 /* from hose or from bus.devfn */
428 if (which & IOBASE_FROM_HOSE) {
429 for(hose = hose_head; hose; hose = hose->next)
430 if (hose->index == bus) break;
431 if (!hose) return -ENODEV;
432 } else {
433 /* Special hook for ISA access. */
434 if (bus == 0 && dfn == 0) {
435 hose = pci_isa_hose;
436 } else {
437 dev = pci_get_domain_bus_and_slot(0, bus, dfn);
438 if (!dev)
439 return -ENODEV;
440 hose = dev->sysdata;
441 pci_dev_put(dev);
442 }
443 }
444
445 switch (which & ~IOBASE_FROM_HOSE) {
446 case IOBASE_HOSE:
447 return hose->index;
448 case IOBASE_SPARSE_MEM:
449 return hose->sparse_mem_base;
450 case IOBASE_DENSE_MEM:
451 return hose->dense_mem_base;
452 case IOBASE_SPARSE_IO:
453 return hose->sparse_io_base;
454 case IOBASE_DENSE_IO:
455 return hose->dense_io_base;
456 case IOBASE_ROOT_BUS:
457 return hose->bus->number;
458 }
459
460 return -EOPNOTSUPP;
461 }
462
463 /* Destroy an __iomem token. Not copied from lib/iomap.c. */
464
pci_iounmap(struct pci_dev * dev,void __iomem * addr)465 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
466 {
467 if (__is_mmio(addr))
468 iounmap(addr);
469 }
470
471 EXPORT_SYMBOL(pci_iounmap);
472
473 /* FIXME: Some boxes have multiple ISA bridges! */
474 struct pci_dev *isa_bridge;
475 EXPORT_SYMBOL(isa_bridge);
476