xref: /linux/drivers/char/agp/sworks-agp.c (revision 2da68a77)
1 /*
2  * Serverworks AGPGART routines.
3  */
4 
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/init.h>
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/jiffies.h>
11 #include <linux/agp_backend.h>
12 #include <asm/set_memory.h>
13 #include "agp.h"
14 
15 #define SVWRKS_COMMAND		0x04
16 #define SVWRKS_APSIZE		0x10
17 #define SVWRKS_MMBASE		0x14
18 #define SVWRKS_CACHING		0x4b
19 #define SVWRKS_AGP_ENABLE	0x60
20 #define SVWRKS_FEATURE		0x68
21 
22 #define SVWRKS_SIZE_MASK	0xfe000000
23 
24 /* Memory mapped registers */
25 #define SVWRKS_GART_CACHE	0x02
26 #define SVWRKS_GATTBASE		0x04
27 #define SVWRKS_TLBFLUSH		0x10
28 #define SVWRKS_POSTFLUSH	0x14
29 #define SVWRKS_DIRFLUSH		0x0c
30 
31 
32 struct serverworks_page_map {
33 	unsigned long *real;
34 	unsigned long __iomem *remapped;
35 };
36 
37 static struct _serverworks_private {
38 	struct pci_dev *svrwrks_dev;	/* device one */
39 	volatile u8 __iomem *registers;
40 	struct serverworks_page_map **gatt_pages;
41 	int num_tables;
42 	struct serverworks_page_map scratch_dir;
43 
44 	int gart_addr_ofs;
45 	int mm_addr_ofs;
46 } serverworks_private;
47 
48 static int serverworks_create_page_map(struct serverworks_page_map *page_map)
49 {
50 	int i;
51 
52 	page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
53 	if (page_map->real == NULL) {
54 		return -ENOMEM;
55 	}
56 
57 	set_memory_uc((unsigned long)page_map->real, 1);
58 	page_map->remapped = page_map->real;
59 
60 	for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
61 		writel(agp_bridge->scratch_page, page_map->remapped+i);
62 		/* Red Pen: Everyone else does pci posting flush here */
63 
64 	return 0;
65 }
66 
67 static void serverworks_free_page_map(struct serverworks_page_map *page_map)
68 {
69 	set_memory_wb((unsigned long)page_map->real, 1);
70 	free_page((unsigned long) page_map->real);
71 }
72 
73 static void serverworks_free_gatt_pages(void)
74 {
75 	int i;
76 	struct serverworks_page_map **tables;
77 	struct serverworks_page_map *entry;
78 
79 	tables = serverworks_private.gatt_pages;
80 	for (i = 0; i < serverworks_private.num_tables; i++) {
81 		entry = tables[i];
82 		if (entry != NULL) {
83 			if (entry->real != NULL) {
84 				serverworks_free_page_map(entry);
85 			}
86 			kfree(entry);
87 		}
88 	}
89 	kfree(tables);
90 }
91 
92 static int serverworks_create_gatt_pages(int nr_tables)
93 {
94 	struct serverworks_page_map **tables;
95 	struct serverworks_page_map *entry;
96 	int retval = 0;
97 	int i;
98 
99 	tables = kcalloc(nr_tables + 1, sizeof(struct serverworks_page_map *),
100 			 GFP_KERNEL);
101 	if (tables == NULL)
102 		return -ENOMEM;
103 
104 	for (i = 0; i < nr_tables; i++) {
105 		entry = kzalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);
106 		if (entry == NULL) {
107 			retval = -ENOMEM;
108 			break;
109 		}
110 		tables[i] = entry;
111 		retval = serverworks_create_page_map(entry);
112 		if (retval != 0) break;
113 	}
114 	serverworks_private.num_tables = nr_tables;
115 	serverworks_private.gatt_pages = tables;
116 
117 	if (retval != 0) serverworks_free_gatt_pages();
118 
119 	return retval;
120 }
121 
122 #define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
123 	GET_PAGE_DIR_IDX(addr)]->remapped)
124 
125 #ifndef GET_PAGE_DIR_OFF
126 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
127 #endif
128 
129 #ifndef GET_PAGE_DIR_IDX
130 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
131 	GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
132 #endif
133 
134 #ifndef GET_GATT_OFF
135 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
136 #endif
137 
138 static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
139 {
140 	struct aper_size_info_lvl2 *value;
141 	struct serverworks_page_map page_dir;
142 	int retval;
143 	u32 temp;
144 	int i;
145 
146 	value = A_SIZE_LVL2(agp_bridge->current_size);
147 	retval = serverworks_create_page_map(&page_dir);
148 	if (retval != 0) {
149 		return retval;
150 	}
151 	retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
152 	if (retval != 0) {
153 		serverworks_free_page_map(&page_dir);
154 		return retval;
155 	}
156 	/* Create a fake scratch directory */
157 	for (i = 0; i < 1024; i++) {
158 		writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
159 		writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
160 	}
161 
162 	retval = serverworks_create_gatt_pages(value->num_entries / 1024);
163 	if (retval != 0) {
164 		serverworks_free_page_map(&page_dir);
165 		serverworks_free_page_map(&serverworks_private.scratch_dir);
166 		return retval;
167 	}
168 
169 	agp_bridge->gatt_table_real = (u32 *)page_dir.real;
170 	agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
171 	agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
172 
173 	/* Get the address for the gart region.
174 	 * This is a bus address even on the alpha, b/c its
175 	 * used to program the agp master not the cpu
176 	 */
177 
178 	pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
179 	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
180 
181 	/* Calculate the agp offset */
182 	for (i = 0; i < value->num_entries / 1024; i++)
183 		writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
184 
185 	return 0;
186 }
187 
188 static int serverworks_free_gatt_table(struct agp_bridge_data *bridge)
189 {
190 	struct serverworks_page_map page_dir;
191 
192 	page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
193 	page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
194 
195 	serverworks_free_gatt_pages();
196 	serverworks_free_page_map(&page_dir);
197 	serverworks_free_page_map(&serverworks_private.scratch_dir);
198 	return 0;
199 }
200 
201 static int serverworks_fetch_size(void)
202 {
203 	int i;
204 	u32 temp;
205 	u32 temp2;
206 	struct aper_size_info_lvl2 *values;
207 
208 	values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
209 	pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
210 	pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,
211 					SVWRKS_SIZE_MASK);
212 	pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2);
213 	pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp);
214 	temp2 &= SVWRKS_SIZE_MASK;
215 
216 	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
217 		if (temp2 == values[i].size_value) {
218 			agp_bridge->previous_size =
219 			    agp_bridge->current_size = (void *) (values + i);
220 
221 			agp_bridge->aperture_size_idx = i;
222 			return values[i].size;
223 		}
224 	}
225 
226 	return 0;
227 }
228 
229 /*
230  * This routine could be implemented by taking the addresses
231  * written to the GATT, and flushing them individually.  However
232  * currently it just flushes the whole table.  Which is probably
233  * more efficient, since agp_memory blocks can be a large number of
234  * entries.
235  */
236 static void serverworks_tlbflush(struct agp_memory *temp)
237 {
238 	unsigned long timeout;
239 
240 	writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH);
241 	timeout = jiffies + 3*HZ;
242 	while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) {
243 		cpu_relax();
244 		if (time_after(jiffies, timeout)) {
245 			dev_err(&serverworks_private.svrwrks_dev->dev,
246 				"TLB post flush took more than 3 seconds\n");
247 			break;
248 		}
249 	}
250 
251 	writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH);
252 	timeout = jiffies + 3*HZ;
253 	while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) {
254 		cpu_relax();
255 		if (time_after(jiffies, timeout)) {
256 			dev_err(&serverworks_private.svrwrks_dev->dev,
257 				"TLB Dir flush took more than 3 seconds\n");
258 			break;
259 		}
260 	}
261 }
262 
263 static int serverworks_configure(void)
264 {
265 	u32 temp;
266 	u8 enable_reg;
267 	u16 cap_reg;
268 
269 	/* Get the memory mapped registers */
270 	pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp);
271 	temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
272 	serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
273 	if (!serverworks_private.registers) {
274 		dev_err(&agp_bridge->dev->dev, "can't ioremap(%#x)\n", temp);
275 		return -ENOMEM;
276 	}
277 
278 	writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE);
279 	readb(serverworks_private.registers+SVWRKS_GART_CACHE);	/* PCI Posting. */
280 
281 	writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE);
282 	readl(serverworks_private.registers+SVWRKS_GATTBASE);	/* PCI Posting. */
283 
284 	cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND);
285 	cap_reg &= ~0x0007;
286 	cap_reg |= 0x4;
287 	writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND);
288 	readw(serverworks_private.registers+SVWRKS_COMMAND);
289 
290 	pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg);
291 	enable_reg |= 0x1; /* Agp Enable bit */
292 	pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg);
293 	serverworks_tlbflush(NULL);
294 
295 	agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP);
296 
297 	/* Fill in the mode register */
298 	pci_read_config_dword(serverworks_private.svrwrks_dev,
299 			      agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode);
300 
301 	pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg);
302 	enable_reg &= ~0x3;
303 	pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg);
304 
305 	pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg);
306 	enable_reg |= (1<<6);
307 	pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg);
308 
309 	return 0;
310 }
311 
312 static void serverworks_cleanup(void)
313 {
314 	iounmap((void __iomem *) serverworks_private.registers);
315 }
316 
317 static int serverworks_insert_memory(struct agp_memory *mem,
318 			     off_t pg_start, int type)
319 {
320 	int i, j, num_entries;
321 	unsigned long __iomem *cur_gatt;
322 	unsigned long addr;
323 
324 	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
325 
326 	if (type != 0 || mem->type != 0) {
327 		return -EINVAL;
328 	}
329 	if ((pg_start + mem->page_count) > num_entries) {
330 		return -EINVAL;
331 	}
332 
333 	j = pg_start;
334 	while (j < (pg_start + mem->page_count)) {
335 		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
336 		cur_gatt = SVRWRKS_GET_GATT(addr);
337 		if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
338 			return -EBUSY;
339 		j++;
340 	}
341 
342 	if (!mem->is_flushed) {
343 		global_cache_flush();
344 		mem->is_flushed = true;
345 	}
346 
347 	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
348 		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
349 		cur_gatt = SVRWRKS_GET_GATT(addr);
350 		writel(agp_bridge->driver->mask_memory(agp_bridge,
351 				page_to_phys(mem->pages[i]), mem->type),
352 		       cur_gatt+GET_GATT_OFF(addr));
353 	}
354 	serverworks_tlbflush(mem);
355 	return 0;
356 }
357 
358 static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start,
359 			     int type)
360 {
361 	int i;
362 	unsigned long __iomem *cur_gatt;
363 	unsigned long addr;
364 
365 	if (type != 0 || mem->type != 0) {
366 		return -EINVAL;
367 	}
368 
369 	global_cache_flush();
370 	serverworks_tlbflush(mem);
371 
372 	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
373 		addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
374 		cur_gatt = SVRWRKS_GET_GATT(addr);
375 		writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
376 	}
377 
378 	serverworks_tlbflush(mem);
379 	return 0;
380 }
381 
382 static const struct gatt_mask serverworks_masks[] =
383 {
384 	{.mask = 1, .type = 0}
385 };
386 
387 static const struct aper_size_info_lvl2 serverworks_sizes[7] =
388 {
389 	{2048, 524288, 0x80000000},
390 	{1024, 262144, 0xc0000000},
391 	{512, 131072, 0xe0000000},
392 	{256, 65536, 0xf0000000},
393 	{128, 32768, 0xf8000000},
394 	{64, 16384, 0xfc000000},
395 	{32, 8192, 0xfe000000}
396 };
397 
398 static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode)
399 {
400 	u32 command;
401 
402 	pci_read_config_dword(serverworks_private.svrwrks_dev,
403 			      bridge->capndx + PCI_AGP_STATUS,
404 			      &command);
405 
406 	command = agp_collect_device_status(bridge, mode, command);
407 
408 	command &= ~0x10;	/* disable FW */
409 	command &= ~0x08;
410 
411 	command |= 0x100;
412 
413 	pci_write_config_dword(serverworks_private.svrwrks_dev,
414 			       bridge->capndx + PCI_AGP_COMMAND,
415 			       command);
416 
417 	agp_device_command(command, false);
418 }
419 
420 static const struct agp_bridge_driver sworks_driver = {
421 	.owner			= THIS_MODULE,
422 	.aperture_sizes		= serverworks_sizes,
423 	.size_type		= LVL2_APER_SIZE,
424 	.num_aperture_sizes	= 7,
425 	.configure		= serverworks_configure,
426 	.fetch_size		= serverworks_fetch_size,
427 	.cleanup		= serverworks_cleanup,
428 	.tlb_flush		= serverworks_tlbflush,
429 	.mask_memory		= agp_generic_mask_memory,
430 	.masks			= serverworks_masks,
431 	.agp_enable		= serverworks_agp_enable,
432 	.cache_flush		= global_cache_flush,
433 	.create_gatt_table	= serverworks_create_gatt_table,
434 	.free_gatt_table	= serverworks_free_gatt_table,
435 	.insert_memory		= serverworks_insert_memory,
436 	.remove_memory		= serverworks_remove_memory,
437 	.alloc_by_type		= agp_generic_alloc_by_type,
438 	.free_by_type		= agp_generic_free_by_type,
439 	.agp_alloc_page		= agp_generic_alloc_page,
440 	.agp_alloc_pages	= agp_generic_alloc_pages,
441 	.agp_destroy_page	= agp_generic_destroy_page,
442 	.agp_destroy_pages	= agp_generic_destroy_pages,
443 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
444 };
445 
446 static int agp_serverworks_probe(struct pci_dev *pdev,
447 				 const struct pci_device_id *ent)
448 {
449 	struct agp_bridge_data *bridge;
450 	struct pci_dev *bridge_dev;
451 	u32 temp, temp2;
452 	u8 cap_ptr = 0;
453 
454 	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
455 
456 	switch (pdev->device) {
457 	case 0x0006:
458 		dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n");
459 		return -ENODEV;
460 
461 	case PCI_DEVICE_ID_SERVERWORKS_HE:
462 	case PCI_DEVICE_ID_SERVERWORKS_LE:
463 	case 0x0007:
464 		break;
465 
466 	default:
467 		if (cap_ptr)
468 			dev_err(&pdev->dev, "unsupported Serverworks chipset "
469 				"[%04x/%04x]\n", pdev->vendor, pdev->device);
470 		return -ENODEV;
471 	}
472 
473 	/* Everything is on func 1 here so we are hardcoding function one */
474 	bridge_dev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
475 			(unsigned int)pdev->bus->number,
476 			PCI_DEVFN(0, 1));
477 	if (!bridge_dev) {
478 		dev_info(&pdev->dev, "can't find secondary device\n");
479 		return -ENODEV;
480 	}
481 
482 	serverworks_private.svrwrks_dev = bridge_dev;
483 	serverworks_private.gart_addr_ofs = 0x10;
484 
485 	pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
486 	if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
487 		pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
488 		if (temp2 != 0) {
489 			dev_info(&pdev->dev, "64 bit aperture address, "
490 				 "but top bits are not zero; disabling AGP\n");
491 			return -ENODEV;
492 		}
493 		serverworks_private.mm_addr_ofs = 0x18;
494 	} else
495 		serverworks_private.mm_addr_ofs = 0x14;
496 
497 	pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
498 	if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
499 		pci_read_config_dword(pdev,
500 				serverworks_private.mm_addr_ofs + 4, &temp2);
501 		if (temp2 != 0) {
502 			dev_info(&pdev->dev, "64 bit MMIO address, but top "
503 				 "bits are not zero; disabling AGP\n");
504 			return -ENODEV;
505 		}
506 	}
507 
508 	bridge = agp_alloc_bridge();
509 	if (!bridge)
510 		return -ENOMEM;
511 
512 	bridge->driver = &sworks_driver;
513 	bridge->dev_private_data = &serverworks_private;
514 	bridge->dev = pci_dev_get(pdev);
515 
516 	pci_set_drvdata(pdev, bridge);
517 	return agp_add_bridge(bridge);
518 }
519 
520 static void agp_serverworks_remove(struct pci_dev *pdev)
521 {
522 	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
523 
524 	pci_dev_put(bridge->dev);
525 	agp_remove_bridge(bridge);
526 	agp_put_bridge(bridge);
527 	pci_dev_put(serverworks_private.svrwrks_dev);
528 	serverworks_private.svrwrks_dev = NULL;
529 }
530 
531 static struct pci_device_id agp_serverworks_pci_table[] = {
532 	{
533 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
534 	.class_mask	= ~0,
535 	.vendor		= PCI_VENDOR_ID_SERVERWORKS,
536 	.device		= PCI_ANY_ID,
537 	.subvendor	= PCI_ANY_ID,
538 	.subdevice	= PCI_ANY_ID,
539 	},
540 	{ }
541 };
542 
543 MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table);
544 
545 static struct pci_driver agp_serverworks_pci_driver = {
546 	.name		= "agpgart-serverworks",
547 	.id_table	= agp_serverworks_pci_table,
548 	.probe		= agp_serverworks_probe,
549 	.remove		= agp_serverworks_remove,
550 };
551 
552 static int __init agp_serverworks_init(void)
553 {
554 	if (agp_off)
555 		return -EINVAL;
556 	return pci_register_driver(&agp_serverworks_pci_driver);
557 }
558 
559 static void __exit agp_serverworks_cleanup(void)
560 {
561 	pci_unregister_driver(&agp_serverworks_pci_driver);
562 }
563 
564 module_init(agp_serverworks_init);
565 module_exit(agp_serverworks_cleanup);
566 
567 MODULE_LICENSE("GPL and additional rights");
568 
569