1 /* $OpenBSD: qcsmem.c,v 1.1 2023/05/19 21:13:49 patrick Exp $ */
2 /*
3 * Copyright (c) 2023 Patrick Wildt <patrick@blueri.se>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/device.h>
21 #include <sys/malloc.h>
22 #include <sys/atomic.h>
23
24 #include <machine/bus.h>
25 #include <machine/fdt.h>
26
27 #include <dev/ofw/openfirm.h>
28 #include <dev/ofw/ofw_misc.h>
29 #include <dev/ofw/fdt.h>
30
31 #define QCSMEM_ITEM_FIXED 8
32 #define QCSMEM_ITEM_COUNT 512
33 #define QCSMEM_HOST_COUNT 15
34
35 struct qcsmem_proc_comm {
36 uint32_t command;
37 uint32_t status;
38 uint32_t params[2];
39 };
40
41 struct qcsmem_global_entry {
42 uint32_t allocated;
43 uint32_t offset;
44 uint32_t size;
45 uint32_t aux_base;
46 #define QCSMEM_GLOBAL_ENTRY_AUX_BASE_MASK 0xfffffffc
47 };
48
49 struct qcsmem_header {
50 struct qcsmem_proc_comm proc_comm[4];
51 uint32_t version[32];
52 #define QCSMEM_HEADER_VERSION_MASTER_SBL_IDX 7
53 #define QCSMEM_HEADER_VERSION_GLOBAL_HEAP 11
54 #define QCSMEM_HEADER_VERSION_GLOBAL_PART 12
55 uint32_t initialized;
56 uint32_t free_offset;
57 uint32_t available;
58 uint32_t reserved;
59 struct qcsmem_global_entry toc[QCSMEM_ITEM_COUNT];
60 };
61
62 struct qcsmem_ptable_entry {
63 uint32_t offset;
64 uint32_t size;
65 uint32_t flags;
66 uint16_t host[2];
67 #define QCSMEM_LOCAL_HOST 0
68 #define QCSMEM_GLOBAL_HOST 0xfffe
69 uint32_t cacheline;
70 uint32_t reserved[7];
71 };
72
73 struct qcsmem_ptable {
74 uint32_t magic;
75 #define QCSMEM_PTABLE_MAGIC 0x434f5424
76 uint32_t version;
77 #define QCSMEM_PTABLE_VERSION 1
78 uint32_t num_entries;
79 uint32_t reserved[5];
80 struct qcsmem_ptable_entry entry[];
81 };
82
83 struct qcsmem_partition_header {
84 uint32_t magic;
85 #define QCSMEM_PART_HDR_MAGIC 0x54525024
86 uint16_t host[2];
87 uint32_t size;
88 uint32_t offset_free_uncached;
89 uint32_t offset_free_cached;
90 uint32_t reserved[3];
91 };
92
93 struct qcsmem_partition {
94 struct qcsmem_partition_header *phdr;
95 size_t cacheline;
96 size_t size;
97 };
98
99 struct qcsmem_private_entry {
100 uint16_t canary;
101 #define QCSMEM_PRIV_ENTRY_CANARY 0xa5a5
102 uint16_t item;
103 uint32_t size;
104 uint16_t padding_data;
105 uint16_t padding_hdr;
106 uint32_t reserved;
107 };
108
109 struct qcsmem_info {
110 uint32_t magic;
111 #define QCSMEM_INFO_MAGIC 0x49494953
112 uint32_t size;
113 uint32_t base_addr;
114 uint32_t reserved;
115 uint32_t num_items;
116 };
117
118 struct qcsmem_softc {
119 struct device sc_dev;
120 bus_space_tag_t sc_iot;
121 bus_space_handle_t sc_ioh;
122 int sc_node;
123
124 bus_addr_t sc_aux_base;
125 bus_size_t sc_aux_size;
126
127 int sc_item_count;
128 struct qcsmem_partition sc_global_partition;
129 struct qcsmem_partition sc_partitions[QCSMEM_HOST_COUNT];
130 };
131
132 struct qcsmem_softc *qcsmem_sc;
133
134 int qcsmem_match(struct device *, void *, void *);
135 void qcsmem_attach(struct device *, struct device *, void *);
136
137 const struct cfattach qcsmem_ca = {
138 sizeof (struct qcsmem_softc), qcsmem_match, qcsmem_attach
139 };
140
141 struct cfdriver qcsmem_cd = {
142 NULL, "qcsmem", DV_DULL
143 };
144
145 int
qcsmem_match(struct device * parent,void * match,void * aux)146 qcsmem_match(struct device *parent, void *match, void *aux)
147 {
148 struct fdt_attach_args *faa = aux;
149
150 return OF_is_compatible(faa->fa_node, "qcom,smem");
151 }
152
153 void
qcsmem_attach(struct device * parent,struct device * self,void * aux)154 qcsmem_attach(struct device *parent, struct device *self, void *aux)
155 {
156 struct qcsmem_softc *sc = (struct qcsmem_softc *)self;
157 struct fdt_attach_args *faa = aux;
158 struct qcsmem_header *header;
159 struct qcsmem_ptable *ptable;
160 struct qcsmem_ptable_entry *pte;
161 struct qcsmem_info *info;
162 struct qcsmem_partition *part;
163 struct qcsmem_partition_header *phdr;
164 uint32_t version;
165 int i;
166
167 if (faa->fa_nreg < 1) {
168 printf(": no registers\n");
169 return;
170 }
171
172 sc->sc_node = faa->fa_node;
173 sc->sc_iot = faa->fa_iot;
174 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
175 faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
176 printf(": can't map registers\n");
177 return;
178 }
179 sc->sc_aux_base = faa->fa_reg[0].addr;
180 sc->sc_aux_size = faa->fa_reg[0].addr;
181
182 ptable = bus_space_vaddr(sc->sc_iot, sc->sc_ioh) +
183 faa->fa_reg[0].size - PAGE_SIZE;
184 if (ptable->magic != QCSMEM_PTABLE_MAGIC ||
185 ptable->version != QCSMEM_PTABLE_VERSION) {
186 printf(": unsupported ptable 0x%x/0x%x\n",
187 ptable->magic, ptable->version);
188 bus_space_unmap(sc->sc_iot, sc->sc_ioh,
189 faa->fa_reg[0].size);
190 return;
191 }
192
193 header = bus_space_vaddr(sc->sc_iot, sc->sc_ioh);
194 version = header->version[QCSMEM_HEADER_VERSION_MASTER_SBL_IDX] >> 16;
195 if (version != QCSMEM_HEADER_VERSION_GLOBAL_PART) {
196 printf(": unsupported header 0x%x\n", version);
197 return;
198 }
199
200 for (i = 0; i < ptable->num_entries; i++) {
201 pte = &ptable->entry[i];
202 if (!pte->offset || !pte->size)
203 continue;
204 if (pte->host[0] == QCSMEM_GLOBAL_HOST &&
205 pte->host[1] == QCSMEM_GLOBAL_HOST)
206 part = &sc->sc_global_partition;
207 else if (pte->host[0] == QCSMEM_LOCAL_HOST &&
208 pte->host[1] < QCSMEM_HOST_COUNT)
209 part = &sc->sc_partitions[pte->host[1]];
210 else if (pte->host[1] == QCSMEM_LOCAL_HOST &&
211 pte->host[0] < QCSMEM_HOST_COUNT)
212 part = &sc->sc_partitions[pte->host[0]];
213 else
214 continue;
215 if (part->phdr != NULL)
216 continue;
217 phdr = bus_space_vaddr(sc->sc_iot, sc->sc_ioh) +
218 pte->offset;
219 if (phdr->magic != QCSMEM_PART_HDR_MAGIC) {
220 printf(": unsupported partition 0x%x\n",
221 phdr->magic);
222 return;
223 }
224 if (pte->host[0] != phdr->host[0] ||
225 pte->host[1] != phdr->host[1]) {
226 printf(": bad hosts 0x%x/0x%x+0x%x/0x%x\n",
227 pte->host[0], phdr->host[0],
228 pte->host[1], phdr->host[1]);
229 return;
230 }
231 if (pte->size != phdr->size) {
232 printf(": bad size 0x%x/0x%x\n",
233 pte->size, phdr->size);
234 return;
235 }
236 if (phdr->offset_free_uncached > phdr->size) {
237 printf(": bad size 0x%x > 0x%x\n",
238 phdr->offset_free_uncached, phdr->size);
239 return;
240 }
241 part->phdr = phdr;
242 part->size = pte->size;
243 part->cacheline = pte->cacheline;
244 }
245 if (sc->sc_global_partition.phdr == NULL) {
246 printf(": could not find global partition\n");
247 return;
248 }
249
250 sc->sc_item_count = QCSMEM_ITEM_COUNT;
251 info = (struct qcsmem_info *)&ptable->entry[ptable->num_entries];
252 if (info->magic == QCSMEM_INFO_MAGIC)
253 sc->sc_item_count = info->num_items;
254
255 printf("\n");
256
257 qcsmem_sc = sc;
258 }
259
260 int
qcsmem_alloc_private(struct qcsmem_softc * sc,struct qcsmem_partition * part,int item,int size)261 qcsmem_alloc_private(struct qcsmem_softc *sc, struct qcsmem_partition *part,
262 int item, int size)
263 {
264 struct qcsmem_private_entry *entry, *last;
265 struct qcsmem_partition_header *phdr = part->phdr;
266
267 entry = (void *)&phdr[1];
268 last = (void *)phdr + phdr->offset_free_uncached;
269
270 if ((void *)last > (void *)phdr + part->size)
271 return EINVAL;
272
273 while (entry < last) {
274 if (entry->canary != QCSMEM_PRIV_ENTRY_CANARY) {
275 printf("%s: invalid canary\n", sc->sc_dev.dv_xname);
276 return EINVAL;
277 }
278
279 if (entry->item == item)
280 return 0;
281
282 entry = (void *)&entry[1] + entry->padding_hdr +
283 entry->size;
284 }
285
286 if ((void *)entry > (void *)phdr + part->size)
287 return EINVAL;
288
289 if ((void *)&entry[1] + roundup(size, 8) >
290 (void *)phdr + phdr->offset_free_cached)
291 return EINVAL;
292
293 entry->canary = QCSMEM_PRIV_ENTRY_CANARY;
294 entry->item = item;
295 entry->size = roundup(size, 8);
296 entry->padding_data = entry->size - size;
297 entry->padding_hdr = 0;
298 membar_producer();
299
300 phdr->offset_free_uncached += sizeof(*entry) + entry->size;
301
302 return 0;
303 }
304
305 int
qcsmem_alloc_global(struct qcsmem_softc * sc,int item,int size)306 qcsmem_alloc_global(struct qcsmem_softc *sc, int item, int size)
307 {
308 struct qcsmem_header *header;
309 struct qcsmem_global_entry *entry;
310
311 header = bus_space_vaddr(sc->sc_iot, sc->sc_ioh);
312 entry = &header->toc[item];
313 if (entry->allocated)
314 return 0;
315
316 size = roundup(size, 8);
317 if (size > header->available)
318 return EINVAL;
319
320 entry->offset = header->free_offset;
321 entry->size = size;
322 membar_producer();
323 entry->allocated = 1;
324
325 header->free_offset += size;
326 header->available -= size;
327
328 return 0;
329 }
330
331 int
qcsmem_alloc(int host,int item,int size)332 qcsmem_alloc(int host, int item, int size)
333 {
334 struct qcsmem_softc *sc = qcsmem_sc;
335 struct qcsmem_partition *part;
336 int ret;
337
338 if (sc == NULL)
339 return ENXIO;
340
341 if (item < QCSMEM_ITEM_FIXED)
342 return EPERM;
343
344 if (item >= sc->sc_item_count)
345 return ENXIO;
346
347 ret = hwlock_lock_idx_timeout(sc->sc_node, 0, 1000);
348 if (ret)
349 return ret;
350
351 if (host < QCSMEM_HOST_COUNT &&
352 sc->sc_partitions[host].phdr != NULL) {
353 part = &sc->sc_partitions[host];
354 ret = qcsmem_alloc_private(sc, part, item, size);
355 } else if (sc->sc_global_partition.phdr != NULL) {
356 part = &sc->sc_global_partition;
357 ret = qcsmem_alloc_private(sc, part, item, size);
358 } else {
359 ret = qcsmem_alloc_global(sc, item, size);
360 }
361
362 hwlock_unlock_idx(sc->sc_node, 0);
363 return ret;
364 }
365
366 void *
qcsmem_get_private(struct qcsmem_softc * sc,struct qcsmem_partition * part,int item,int * size)367 qcsmem_get_private(struct qcsmem_softc *sc, struct qcsmem_partition *part,
368 int item, int *size)
369 {
370 struct qcsmem_private_entry *entry, *last;
371 struct qcsmem_partition_header *phdr = part->phdr;
372
373 entry = (void *)&phdr[1];
374 last = (void *)phdr + phdr->offset_free_uncached;
375
376 while (entry < last) {
377 if (entry->canary != QCSMEM_PRIV_ENTRY_CANARY) {
378 printf("%s: invalid canary\n", sc->sc_dev.dv_xname);
379 return NULL;
380 }
381
382 if (entry->item == item) {
383 if (size != NULL) {
384 if (entry->size > part->size ||
385 entry->padding_data > entry->size)
386 return NULL;
387 *size = entry->size - entry->padding_data;
388 }
389
390 return (void *)&entry[1] + entry->padding_hdr;
391 }
392
393 entry = (void *)&entry[1] + entry->padding_hdr +
394 entry->size;
395 }
396
397 if ((void *)entry > (void *)phdr + part->size)
398 return NULL;
399
400 entry = (void *)phdr + phdr->size -
401 roundup(sizeof(*entry), part->cacheline);
402 last = (void *)phdr + phdr->offset_free_cached;
403
404 if ((void *)entry < (void *)phdr ||
405 (void *)last > (void *)phdr + part->size)
406 return NULL;
407
408 while (entry > last) {
409 if (entry->canary != QCSMEM_PRIV_ENTRY_CANARY) {
410 printf("%s: invalid canary\n", sc->sc_dev.dv_xname);
411 return NULL;
412 }
413
414 if (entry->item == item) {
415 if (size != NULL) {
416 if (entry->size > part->size ||
417 entry->padding_data > entry->size)
418 return NULL;
419 *size = entry->size - entry->padding_data;
420 }
421
422 return (void *)entry - entry->size;
423 }
424
425 entry = (void *)entry - entry->size -
426 roundup(sizeof(*entry), part->cacheline);
427 }
428
429 if ((void *)entry < (void *)phdr)
430 return NULL;
431
432 return NULL;
433 }
434
435 void *
qcsmem_get_global(struct qcsmem_softc * sc,int item,int * size)436 qcsmem_get_global(struct qcsmem_softc *sc, int item, int *size)
437 {
438 struct qcsmem_header *header;
439 struct qcsmem_global_entry *entry;
440 uint32_t aux_base;
441
442 header = bus_space_vaddr(sc->sc_iot, sc->sc_ioh);
443 entry = &header->toc[item];
444 if (!entry->allocated)
445 return NULL;
446
447 aux_base = entry->aux_base & QCSMEM_GLOBAL_ENTRY_AUX_BASE_MASK;
448 if (aux_base != 0 && aux_base != sc->sc_aux_base)
449 return NULL;
450
451 if (entry->size + entry->offset > sc->sc_aux_size)
452 return NULL;
453
454 if (size != NULL)
455 *size = entry->size;
456
457 return bus_space_vaddr(sc->sc_iot, sc->sc_ioh) + entry->offset;
458 }
459
460 void *
qcsmem_get(int host,int item,int * size)461 qcsmem_get(int host, int item, int *size)
462 {
463 struct qcsmem_softc *sc = qcsmem_sc;
464 struct qcsmem_partition *part;
465 void *p = NULL;
466 int ret;
467
468 if (sc == NULL)
469 return NULL;
470
471 if (item >= sc->sc_item_count)
472 return NULL;
473
474 ret = hwlock_lock_idx_timeout(sc->sc_node, 0, 1000);
475 if (ret)
476 return NULL;
477
478 if (host < QCSMEM_HOST_COUNT &&
479 sc->sc_partitions[host].phdr != NULL) {
480 part = &sc->sc_partitions[host];
481 p = qcsmem_get_private(sc, part, item, size);
482 } else if (sc->sc_global_partition.phdr != NULL) {
483 part = &sc->sc_global_partition;
484 p = qcsmem_get_private(sc, part, item, size);
485 } else {
486 p = qcsmem_get_global(sc, item, size);
487 }
488
489 hwlock_unlock_idx(sc->sc_node, 0);
490 return p;
491 }
492