1 // SPDX-License-Identifier: GPL-2.0+
2 #include <linux/module.h>
3 #include <linux/pci.h>
4 #include <linux/types.h>
5 #include <linux/export.h>
6 #include <linux/slab.h>
7 #include <linux/io.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <linux/mfd/core.h>
10 #include <linux/platform_device.h>
11 #include <linux/ioport.h>
12 #include <linux/uio_driver.h>
13 #include "pcie.h"
14
15 /* Core (Resource) Table Layout:
16 * one Resource per record (8 bytes)
17 * 6 5 4 3 2 1 0
18 * 3210987654321098765432109876543210987654321098765432109876543210
19 * IIIIIIIIIIII Core Type [up to 4095 types]
20 * D S2C DMA Present
21 * DDD S2C DMA Channel Number [up to 8 channels]
22 * LLLLLLLLLLLLLLLL Register Count (64-bit registers) [up to 65535 registers]
23 * OOOOOOOOOOOOOOOO Core Offset (in 4kB blocks) [up to 65535 cores]
24 * D C2S DMA Present
25 * DDD C2S DMA Channel Number [up to 8 channels]
26 * II IRQ Count [0 to 3 IRQs per core]
27 * 1111111000
28 * IIIIIII IRQ Base Number [up to 128 IRQs per card]
29 * ___ Spare
30 *
31 */
32
33 #define KPC_OLD_DMA_CH_NUM(present, channel) \
34 ((present) ? (0x8 | ((channel) & 0x7)) : 0)
35 #define KPC_OLD_S2C_DMA_CH_NUM(cte) \
36 KPC_OLD_DMA_CH_NUM(cte.s2c_dma_present, cte.s2c_dma_channel_num)
37 #define KPC_OLD_C2S_DMA_CH_NUM(cte) \
38 KPC_OLD_DMA_CH_NUM(cte.c2s_dma_present, cte.c2s_dma_channel_num)
39
40 #define KP_CORE_ID_INVALID 0
41 #define KP_CORE_ID_I2C 3
42 #define KP_CORE_ID_SPI 5
43
44 struct core_table_entry {
45 u16 type;
46 u32 offset;
47 u32 length;
48 bool s2c_dma_present;
49 u8 s2c_dma_channel_num;
50 bool c2s_dma_present;
51 u8 c2s_dma_channel_num;
52 u8 irq_count;
53 u8 irq_base_num;
54 };
55
56 static
parse_core_table_entry_v0(struct core_table_entry * cte,const u64 read_val)57 void parse_core_table_entry_v0(struct core_table_entry *cte, const u64 read_val)
58 {
59 cte->type = ((read_val & 0xFFF0000000000000UL) >> 52);
60 cte->offset = ((read_val & 0x00000000FFFF0000UL) >> 16) * 4096;
61 cte->length = ((read_val & 0x0000FFFF00000000UL) >> 32) * 8;
62 cte->s2c_dma_present = ((read_val & 0x0008000000000000UL) >> 51);
63 cte->s2c_dma_channel_num = ((read_val & 0x0007000000000000UL) >> 48);
64 cte->c2s_dma_present = ((read_val & 0x0000000000008000UL) >> 15);
65 cte->c2s_dma_channel_num = ((read_val & 0x0000000000007000UL) >> 12);
66 cte->irq_count = ((read_val & 0x0000000000000C00UL) >> 10);
67 cte->irq_base_num = ((read_val & 0x00000000000003F8UL) >> 3);
68 }
69
70 static
dbg_cte(struct kp2000_device * pcard,struct core_table_entry * cte)71 void dbg_cte(struct kp2000_device *pcard, struct core_table_entry *cte)
72 {
73 dev_dbg(&pcard->pdev->dev,
74 "CTE: type:%3d offset:%3d (%3d) length:%3d (%3d) s2c:%d c2s:%d irq_count:%d base_irq:%d\n",
75 cte->type,
76 cte->offset,
77 cte->offset / 4096,
78 cte->length,
79 cte->length / 8,
80 (cte->s2c_dma_present ? cte->s2c_dma_channel_num : -1),
81 (cte->c2s_dma_present ? cte->c2s_dma_channel_num : -1),
82 cte->irq_count,
83 cte->irq_base_num
84 );
85 }
86
87 static
parse_core_table_entry(struct core_table_entry * cte,const u64 read_val,const u8 entry_rev)88 void parse_core_table_entry(struct core_table_entry *cte, const u64 read_val, const u8 entry_rev)
89 {
90 switch (entry_rev) {
91 case 0:
92 parse_core_table_entry_v0(cte, read_val);
93 break;
94 default:
95 cte->type = 0;
96 break;
97 }
98 }
99
probe_core_basic(unsigned int core_num,struct kp2000_device * pcard,char * name,const struct core_table_entry cte)100 static int probe_core_basic(unsigned int core_num, struct kp2000_device *pcard,
101 char *name, const struct core_table_entry cte)
102 {
103 struct mfd_cell cell = { .id = core_num, .name = name };
104 struct resource resources[2];
105
106 struct kpc_core_device_platdata core_pdata = {
107 .card_id = pcard->card_id,
108 .build_version = pcard->build_version,
109 .hardware_revision = pcard->hardware_revision,
110 .ssid = pcard->ssid,
111 .ddna = pcard->ddna,
112 };
113
114 dev_dbg(&pcard->pdev->dev,
115 "Found Basic core: type = %02d dma = %02x / %02x offset = 0x%x length = 0x%x (%d regs)\n",
116 cte.type,
117 KPC_OLD_S2C_DMA_CH_NUM(cte),
118 KPC_OLD_C2S_DMA_CH_NUM(cte),
119 cte.offset,
120 cte.length,
121 cte.length / 8);
122
123 cell.platform_data = &core_pdata;
124 cell.pdata_size = sizeof(struct kpc_core_device_platdata);
125 cell.num_resources = 2;
126
127 memset(&resources, 0, sizeof(resources));
128
129 resources[0].start = cte.offset;
130 resources[0].end = cte.offset + (cte.length - 1);
131 resources[0].flags = IORESOURCE_MEM;
132
133 resources[1].start = pcard->pdev->irq;
134 resources[1].end = pcard->pdev->irq;
135 resources[1].flags = IORESOURCE_IRQ;
136
137 cell.resources = resources;
138
139 return mfd_add_devices(PCARD_TO_DEV(pcard), // parent
140 pcard->card_num * 100, // id
141 &cell, // struct mfd_cell *
142 1, // ndevs
143 &pcard->regs_base_resource,
144 0, // irq_base
145 NULL); // struct irq_domain *
146 }
147
148 struct kpc_uio_device {
149 struct list_head list;
150 struct kp2000_device *pcard;
151 struct device *dev;
152 struct uio_info uioinfo;
153 struct core_table_entry cte;
154 u16 core_num;
155 };
156
offset_show(struct device * dev,struct device_attribute * attr,char * buf)157 static ssize_t offset_show(struct device *dev, struct device_attribute *attr,
158 char *buf)
159 {
160 struct kpc_uio_device *kudev = dev_get_drvdata(dev);
161
162 return sprintf(buf, "%u\n", kudev->cte.offset);
163 }
164 static DEVICE_ATTR_RO(offset);
165
size_show(struct device * dev,struct device_attribute * attr,char * buf)166 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
167 char *buf)
168 {
169 struct kpc_uio_device *kudev = dev_get_drvdata(dev);
170
171 return sprintf(buf, "%u\n", kudev->cte.length);
172 }
173 static DEVICE_ATTR_RO(size);
174
type_show(struct device * dev,struct device_attribute * attr,char * buf)175 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
176 char *buf)
177 {
178 struct kpc_uio_device *kudev = dev_get_drvdata(dev);
179
180 return sprintf(buf, "%u\n", kudev->cte.type);
181 }
182 static DEVICE_ATTR_RO(type);
183
s2c_dma_show(struct device * dev,struct device_attribute * attr,char * buf)184 static ssize_t s2c_dma_show(struct device *dev, struct device_attribute *attr,
185 char *buf)
186 {
187 struct kpc_uio_device *kudev = dev_get_drvdata(dev);
188
189 if (!kudev->cte.s2c_dma_present)
190 return sprintf(buf, "%s", "not present\n");
191
192 return sprintf(buf, "%u\n", kudev->cte.s2c_dma_channel_num);
193 }
194 static DEVICE_ATTR_RO(s2c_dma);
195
c2s_dma_show(struct device * dev,struct device_attribute * attr,char * buf)196 static ssize_t c2s_dma_show(struct device *dev, struct device_attribute *attr,
197 char *buf)
198 {
199 struct kpc_uio_device *kudev = dev_get_drvdata(dev);
200
201 if (!kudev->cte.c2s_dma_present)
202 return sprintf(buf, "%s", "not present\n");
203
204 return sprintf(buf, "%u\n", kudev->cte.c2s_dma_channel_num);
205 }
206 static DEVICE_ATTR_RO(c2s_dma);
207
irq_count_show(struct device * dev,struct device_attribute * attr,char * buf)208 static ssize_t irq_count_show(struct device *dev, struct device_attribute *attr,
209 char *buf)
210 {
211 struct kpc_uio_device *kudev = dev_get_drvdata(dev);
212
213 return sprintf(buf, "%u\n", kudev->cte.irq_count);
214 }
215 static DEVICE_ATTR_RO(irq_count);
216
irq_base_num_show(struct device * dev,struct device_attribute * attr,char * buf)217 static ssize_t irq_base_num_show(struct device *dev,
218 struct device_attribute *attr, char *buf)
219 {
220 struct kpc_uio_device *kudev = dev_get_drvdata(dev);
221
222 return sprintf(buf, "%u\n", kudev->cte.irq_base_num);
223 }
224 static DEVICE_ATTR_RO(irq_base_num);
225
core_num_show(struct device * dev,struct device_attribute * attr,char * buf)226 static ssize_t core_num_show(struct device *dev, struct device_attribute *attr,
227 char *buf)
228 {
229 struct kpc_uio_device *kudev = dev_get_drvdata(dev);
230
231 return sprintf(buf, "%u\n", kudev->core_num);
232 }
233 static DEVICE_ATTR_RO(core_num);
234
235 struct attribute *kpc_uio_class_attrs[] = {
236 &dev_attr_offset.attr,
237 &dev_attr_size.attr,
238 &dev_attr_type.attr,
239 &dev_attr_s2c_dma.attr,
240 &dev_attr_c2s_dma.attr,
241 &dev_attr_irq_count.attr,
242 &dev_attr_irq_base_num.attr,
243 &dev_attr_core_num.attr,
244 NULL,
245 };
246
247 static
kp2000_check_uio_irq(struct kp2000_device * pcard,u32 irq_num)248 int kp2000_check_uio_irq(struct kp2000_device *pcard, u32 irq_num)
249 {
250 u64 interrupt_active = readq(pcard->sysinfo_regs_base + REG_INTERRUPT_ACTIVE);
251 u64 interrupt_mask_inv = ~readq(pcard->sysinfo_regs_base + REG_INTERRUPT_MASK);
252 u64 irq_check_mask = BIT_ULL(irq_num);
253
254 if (interrupt_active & irq_check_mask) { // if it's active (interrupt pending)
255 if (interrupt_mask_inv & irq_check_mask) { // and if it's not masked off
256 return 1;
257 }
258 }
259 return 0;
260 }
261
262 static
kuio_handler(int irq,struct uio_info * uioinfo)263 irqreturn_t kuio_handler(int irq, struct uio_info *uioinfo)
264 {
265 struct kpc_uio_device *kudev = uioinfo->priv;
266
267 if (irq != kudev->pcard->pdev->irq)
268 return IRQ_NONE;
269
270 if (kp2000_check_uio_irq(kudev->pcard, kudev->cte.irq_base_num)) {
271 /* Clear the active flag */
272 writeq(BIT_ULL(kudev->cte.irq_base_num),
273 kudev->pcard->sysinfo_regs_base + REG_INTERRUPT_ACTIVE);
274 return IRQ_HANDLED;
275 }
276 return IRQ_NONE;
277 }
278
279 static
kuio_irqcontrol(struct uio_info * uioinfo,s32 irq_on)280 int kuio_irqcontrol(struct uio_info *uioinfo, s32 irq_on)
281 {
282 struct kpc_uio_device *kudev = uioinfo->priv;
283 struct kp2000_device *pcard = kudev->pcard;
284 u64 mask;
285
286 mutex_lock(&pcard->sem);
287 mask = readq(pcard->sysinfo_regs_base + REG_INTERRUPT_MASK);
288 if (irq_on)
289 mask &= ~(BIT_ULL(kudev->cte.irq_base_num));
290 else
291 mask |= BIT_ULL(kudev->cte.irq_base_num);
292 writeq(mask, pcard->sysinfo_regs_base + REG_INTERRUPT_MASK);
293 mutex_unlock(&pcard->sem);
294
295 return 0;
296 }
297
probe_core_uio(unsigned int core_num,struct kp2000_device * pcard,char * name,const struct core_table_entry cte)298 static int probe_core_uio(unsigned int core_num, struct kp2000_device *pcard,
299 char *name, const struct core_table_entry cte)
300 {
301 struct kpc_uio_device *kudev;
302 int rv;
303
304 dev_dbg(&pcard->pdev->dev,
305 "Found UIO core: type = %02d dma = %02x / %02x offset = 0x%x length = 0x%x (%d regs)\n",
306 cte.type,
307 KPC_OLD_S2C_DMA_CH_NUM(cte),
308 KPC_OLD_C2S_DMA_CH_NUM(cte),
309 cte.offset,
310 cte.length,
311 cte.length / 8);
312
313 kudev = kzalloc(sizeof(*kudev), GFP_KERNEL);
314 if (!kudev)
315 return -ENOMEM;
316
317 INIT_LIST_HEAD(&kudev->list);
318 kudev->pcard = pcard;
319 kudev->cte = cte;
320 kudev->core_num = core_num;
321
322 kudev->uioinfo.priv = kudev;
323 kudev->uioinfo.name = name;
324 kudev->uioinfo.version = "0.0";
325 if (cte.irq_count > 0) {
326 kudev->uioinfo.irq_flags = IRQF_SHARED;
327 kudev->uioinfo.irq = pcard->pdev->irq;
328 kudev->uioinfo.handler = kuio_handler;
329 kudev->uioinfo.irqcontrol = kuio_irqcontrol;
330 } else {
331 kudev->uioinfo.irq = 0;
332 }
333
334 kudev->uioinfo.mem[0].name = "uiomap";
335 kudev->uioinfo.mem[0].addr = pci_resource_start(pcard->pdev, REG_BAR) + cte.offset;
336
337 // Round up to nearest PAGE_SIZE boundary
338 kudev->uioinfo.mem[0].size = (cte.length + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
339 kudev->uioinfo.mem[0].memtype = UIO_MEM_PHYS;
340
341 kudev->dev = device_create(kpc_uio_class,
342 &pcard->pdev->dev, MKDEV(0, 0), kudev, "%s.%d.%d.%d",
343 kudev->uioinfo.name, pcard->card_num, cte.type, kudev->core_num);
344 if (IS_ERR(kudev->dev)) {
345 dev_err(&pcard->pdev->dev, "%s: device_create failed!\n",
346 __func__);
347 kfree(kudev);
348 return -ENODEV;
349 }
350 dev_set_drvdata(kudev->dev, kudev);
351
352 rv = uio_register_device(kudev->dev, &kudev->uioinfo);
353 if (rv) {
354 dev_err(&pcard->pdev->dev, "%s: failed uio_register_device: %d\n",
355 __func__, rv);
356 put_device(kudev->dev);
357 kfree(kudev);
358 return rv;
359 }
360
361 list_add_tail(&kudev->list, &pcard->uio_devices_list);
362
363 return 0;
364 }
365
create_dma_engine_core(struct kp2000_device * pcard,size_t engine_regs_offset,int engine_num,int irq_num)366 static int create_dma_engine_core(struct kp2000_device *pcard,
367 size_t engine_regs_offset,
368 int engine_num, int irq_num)
369 {
370 struct mfd_cell cell = { .id = engine_num };
371 struct resource resources[2];
372
373 cell.platform_data = NULL;
374 cell.pdata_size = 0;
375 cell.name = KP_DRIVER_NAME_DMA_CONTROLLER;
376 cell.num_resources = 2;
377
378 memset(&resources, 0, sizeof(resources));
379
380 resources[0].start = engine_regs_offset;
381 resources[0].end = engine_regs_offset + (KPC_DMA_ENGINE_SIZE - 1);
382 resources[0].flags = IORESOURCE_MEM;
383
384 resources[1].start = irq_num;
385 resources[1].end = irq_num;
386 resources[1].flags = IORESOURCE_IRQ;
387
388 cell.resources = resources;
389
390 return mfd_add_devices(PCARD_TO_DEV(pcard), // parent
391 pcard->card_num * 100, // id
392 &cell, // struct mfd_cell *
393 1, // ndevs
394 &pcard->dma_base_resource,
395 0, // irq_base
396 NULL); // struct irq_domain *
397 }
398
kp2000_setup_dma_controller(struct kp2000_device * pcard)399 static int kp2000_setup_dma_controller(struct kp2000_device *pcard)
400 {
401 int err;
402 unsigned int i;
403 u64 capabilities_reg;
404
405 // S2C Engines
406 for (i = 0 ; i < 32 ; i++) {
407 capabilities_reg = readq(pcard->dma_bar_base +
408 KPC_DMA_S2C_BASE_OFFSET +
409 (KPC_DMA_ENGINE_SIZE * i));
410
411 if (capabilities_reg & ENGINE_CAP_PRESENT_MASK) {
412 err = create_dma_engine_core(pcard, (KPC_DMA_S2C_BASE_OFFSET +
413 (KPC_DMA_ENGINE_SIZE * i)),
414 i, pcard->pdev->irq);
415 if (err)
416 goto err_out;
417 }
418 }
419 // C2S Engines
420 for (i = 0 ; i < 32 ; i++) {
421 capabilities_reg = readq(pcard->dma_bar_base +
422 KPC_DMA_C2S_BASE_OFFSET +
423 (KPC_DMA_ENGINE_SIZE * i));
424
425 if (capabilities_reg & ENGINE_CAP_PRESENT_MASK) {
426 err = create_dma_engine_core(pcard, (KPC_DMA_C2S_BASE_OFFSET +
427 (KPC_DMA_ENGINE_SIZE * i)),
428 32 + i, pcard->pdev->irq);
429 if (err)
430 goto err_out;
431 }
432 }
433
434 return 0;
435
436 err_out:
437 dev_err(&pcard->pdev->dev, "%s: failed to add a DMA Engine: %d\n",
438 __func__, err);
439 return err;
440 }
441
kp2000_probe_cores(struct kp2000_device * pcard)442 int kp2000_probe_cores(struct kp2000_device *pcard)
443 {
444 int err = 0;
445 int i;
446 int current_type_id;
447 u64 read_val;
448 unsigned int highest_core_id = 0;
449 struct core_table_entry cte;
450
451 err = kp2000_setup_dma_controller(pcard);
452 if (err)
453 return err;
454
455 INIT_LIST_HEAD(&pcard->uio_devices_list);
456
457 // First, iterate the core table looking for the highest CORE_ID
458 for (i = 0 ; i < pcard->core_table_length ; i++) {
459 read_val = readq(pcard->sysinfo_regs_base + ((pcard->core_table_offset + i) * 8));
460 parse_core_table_entry(&cte, read_val, pcard->core_table_rev);
461 dbg_cte(pcard, &cte);
462 if (cte.type > highest_core_id)
463 highest_core_id = cte.type;
464 if (cte.type == KP_CORE_ID_INVALID)
465 dev_info(&pcard->pdev->dev, "Found Invalid core: %016llx\n", read_val);
466 }
467 // Then, iterate over the possible core types.
468 for (current_type_id = 1 ; current_type_id <= highest_core_id ; current_type_id++) {
469 unsigned int core_num = 0;
470 /*
471 * Foreach core type, iterate the whole table and instantiate
472 * subdevices for each core.
473 * Yes, this is O(n*m) but the actual runtime is small enough
474 * that it's an acceptable tradeoff.
475 */
476 for (i = 0 ; i < pcard->core_table_length ; i++) {
477 read_val = readq(pcard->sysinfo_regs_base +
478 ((pcard->core_table_offset + i) * 8));
479 parse_core_table_entry(&cte, read_val, pcard->core_table_rev);
480
481 if (cte.type != current_type_id)
482 continue;
483
484 switch (cte.type) {
485 case KP_CORE_ID_I2C:
486 err = probe_core_basic(core_num, pcard,
487 KP_DRIVER_NAME_I2C, cte);
488 break;
489
490 case KP_CORE_ID_SPI:
491 err = probe_core_basic(core_num, pcard,
492 KP_DRIVER_NAME_SPI, cte);
493 break;
494
495 default:
496 err = probe_core_uio(core_num, pcard, "kpc_uio", cte);
497 break;
498 }
499 if (err) {
500 dev_err(&pcard->pdev->dev,
501 "%s: failed to add core %d: %d\n",
502 __func__, i, err);
503 goto error;
504 }
505 core_num++;
506 }
507 }
508
509 // Finally, instantiate a UIO device for the core_table.
510 cte.type = 0; // CORE_ID_BOARD_INFO
511 cte.offset = 0; // board info is always at the beginning
512 cte.length = 512 * 8;
513 cte.s2c_dma_present = false;
514 cte.s2c_dma_channel_num = 0;
515 cte.c2s_dma_present = false;
516 cte.c2s_dma_channel_num = 0;
517 cte.irq_count = 0;
518 cte.irq_base_num = 0;
519 err = probe_core_uio(0, pcard, "kpc_uio", cte);
520 if (err) {
521 dev_err(&pcard->pdev->dev, "%s: failed to add board_info core: %d\n",
522 __func__, err);
523 goto error;
524 }
525
526 return 0;
527
528 error:
529 kp2000_remove_cores(pcard);
530 mfd_remove_devices(PCARD_TO_DEV(pcard));
531 return err;
532 }
533
kp2000_remove_cores(struct kp2000_device * pcard)534 void kp2000_remove_cores(struct kp2000_device *pcard)
535 {
536 struct list_head *ptr;
537 struct list_head *next;
538
539 list_for_each_safe(ptr, next, &pcard->uio_devices_list) {
540 struct kpc_uio_device *kudev = list_entry(ptr, struct kpc_uio_device, list);
541
542 uio_unregister_device(&kudev->uioinfo);
543 device_unregister(kudev->dev);
544 list_del(&kudev->list);
545 kfree(kudev);
546 }
547 }
548
549