1 /* $OpenBSD: ofw_misc.c,v 1.43 2023/05/17 23:25:45 patrick Exp $ */
2 /*
3 * Copyright (c) 2017-2021 Mark Kettenis
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include <sys/types.h>
19 #include <sys/device.h>
20 #include <sys/malloc.h>
21 #include <sys/systm.h>
22
23 #include <net/if.h>
24 #include <net/if_media.h>
25
26 #include <machine/bus.h>
27
28 #include <dev/mii/mii.h>
29 #include <dev/mii/miivar.h>
30 #include <dev/ofw/openfirm.h>
31 #include <dev/ofw/ofw_gpio.h>
32 #include <dev/ofw/ofw_misc.h>
33 #include <dev/ofw/ofw_regulator.h>
34
35 /*
36 * Register maps.
37 */
38
39 struct regmap {
40 int rm_node;
41 uint32_t rm_phandle;
42 bus_space_tag_t rm_tag;
43 bus_space_handle_t rm_handle;
44 bus_size_t rm_size;
45
46 LIST_ENTRY(regmap) rm_list;
47 };
48
49 LIST_HEAD(, regmap) regmaps = LIST_HEAD_INITIALIZER(regmap);
50
51 void
regmap_register(int node,bus_space_tag_t tag,bus_space_handle_t handle,bus_size_t size)52 regmap_register(int node, bus_space_tag_t tag, bus_space_handle_t handle,
53 bus_size_t size)
54 {
55 struct regmap *rm;
56
57 rm = malloc(sizeof(struct regmap), M_DEVBUF, M_WAITOK);
58 rm->rm_node = node;
59 rm->rm_phandle = OF_getpropint(node, "phandle", 0);
60 rm->rm_tag = tag;
61 rm->rm_handle = handle;
62 rm->rm_size = size;
63 LIST_INSERT_HEAD(®maps, rm, rm_list);
64 }
65
66 struct regmap *
regmap_bycompatible(char * compatible)67 regmap_bycompatible(char *compatible)
68 {
69 struct regmap *rm;
70
71 LIST_FOREACH(rm, ®maps, rm_list) {
72 if (OF_is_compatible(rm->rm_node, compatible))
73 return rm;
74 }
75
76 return NULL;
77 }
78
79 struct regmap *
regmap_bynode(int node)80 regmap_bynode(int node)
81 {
82 struct regmap *rm;
83
84 LIST_FOREACH(rm, ®maps, rm_list) {
85 if (rm->rm_node == node)
86 return rm;
87 }
88
89 return NULL;
90 }
91
92 struct regmap *
regmap_byphandle(uint32_t phandle)93 regmap_byphandle(uint32_t phandle)
94 {
95 struct regmap *rm;
96
97 if (phandle == 0)
98 return NULL;
99
100 LIST_FOREACH(rm, ®maps, rm_list) {
101 if (rm->rm_phandle == phandle)
102 return rm;
103 }
104
105 return NULL;
106 }
107
108 void
regmap_write_4(struct regmap * rm,bus_size_t offset,uint32_t value)109 regmap_write_4(struct regmap *rm, bus_size_t offset, uint32_t value)
110 {
111 KASSERT(offset <= rm->rm_size - sizeof(uint32_t));
112 bus_space_write_4(rm->rm_tag, rm->rm_handle, offset, value);
113 }
114
115 uint32_t
regmap_read_4(struct regmap * rm,bus_size_t offset)116 regmap_read_4(struct regmap *rm, bus_size_t offset)
117 {
118 KASSERT(offset <= rm->rm_size - sizeof(uint32_t));
119 return bus_space_read_4(rm->rm_tag, rm->rm_handle, offset);
120 }
121
122 /*
123 * Network interface support.
124 */
125
126 LIST_HEAD(, if_device) if_devices =
127 LIST_HEAD_INITIALIZER(if_devices);
128
129 void
if_register(struct if_device * ifd)130 if_register(struct if_device *ifd)
131 {
132 ifd->if_phandle = OF_getpropint(ifd->if_node, "phandle", 0);
133
134 LIST_INSERT_HEAD(&if_devices, ifd, if_list);
135 }
136
137 struct ifnet *
if_bynode(int node)138 if_bynode(int node)
139 {
140 struct if_device *ifd;
141
142 LIST_FOREACH(ifd, &if_devices, if_list) {
143 if (ifd->if_node == node)
144 return (ifd->if_ifp);
145 }
146
147 return (NULL);
148 }
149
150 struct ifnet *
if_byphandle(uint32_t phandle)151 if_byphandle(uint32_t phandle)
152 {
153 struct if_device *ifd;
154
155 if (phandle == 0)
156 return (NULL);
157
158 LIST_FOREACH(ifd, &if_devices, if_list) {
159 if (ifd->if_phandle == phandle)
160 return (ifd->if_ifp);
161 }
162
163 return (NULL);
164 }
165
166 /*
167 * PHY support.
168 */
169
170 LIST_HEAD(, phy_device) phy_devices =
171 LIST_HEAD_INITIALIZER(phy_devices);
172
173 void
phy_register(struct phy_device * pd)174 phy_register(struct phy_device *pd)
175 {
176 pd->pd_cells = OF_getpropint(pd->pd_node, "#phy-cells", 0);
177 pd->pd_phandle = OF_getpropint(pd->pd_node, "phandle", 0);
178 if (pd->pd_phandle == 0)
179 return;
180
181 LIST_INSERT_HEAD(&phy_devices, pd, pd_list);
182 }
183
184 int
phy_usb_nop_enable(int node)185 phy_usb_nop_enable(int node)
186 {
187 uint32_t vcc_supply;
188 uint32_t *gpio;
189 int len;
190
191 vcc_supply = OF_getpropint(node, "vcc-supply", 0);
192 if (vcc_supply)
193 regulator_enable(vcc_supply);
194
195 len = OF_getproplen(node, "reset-gpios");
196 if (len <= 0)
197 return 0;
198
199 /* There should only be a single GPIO pin. */
200 gpio = malloc(len, M_TEMP, M_WAITOK);
201 OF_getpropintarray(node, "reset-gpios", gpio, len);
202
203 gpio_controller_config_pin(gpio, GPIO_CONFIG_OUTPUT);
204 gpio_controller_set_pin(gpio, 1);
205 delay(10000);
206 gpio_controller_set_pin(gpio, 0);
207
208 free(gpio, M_TEMP, len);
209
210 return 0;
211 }
212
213 int
phy_enable_cells(uint32_t * cells)214 phy_enable_cells(uint32_t *cells)
215 {
216 struct phy_device *pd;
217 uint32_t phandle = cells[0];
218 int node;
219
220 LIST_FOREACH(pd, &phy_devices, pd_list) {
221 if (pd->pd_phandle == phandle)
222 break;
223 }
224
225 if (pd && pd->pd_enable)
226 return pd->pd_enable(pd->pd_cookie, &cells[1]);
227
228 node = OF_getnodebyphandle(phandle);
229 if (node == 0)
230 return ENXIO;
231
232 if (OF_is_compatible(node, "usb-nop-xceiv"))
233 return phy_usb_nop_enable(node);
234
235 return ENXIO;
236 }
237
238 uint32_t *
phy_next_phy(uint32_t * cells)239 phy_next_phy(uint32_t *cells)
240 {
241 uint32_t phandle = cells[0];
242 int node, ncells;
243
244 node = OF_getnodebyphandle(phandle);
245 if (node == 0)
246 return NULL;
247
248 ncells = OF_getpropint(node, "#phy-cells", 0);
249 return cells + ncells + 1;
250 }
251
252 int
phy_enable_prop_idx(int node,char * prop,int idx)253 phy_enable_prop_idx(int node, char *prop, int idx)
254 {
255 uint32_t *phys;
256 uint32_t *phy;
257 int rv = -1;
258 int len;
259
260 len = OF_getproplen(node, prop);
261 if (len <= 0)
262 return -1;
263
264 phys = malloc(len, M_TEMP, M_WAITOK);
265 OF_getpropintarray(node, prop, phys, len);
266
267 phy = phys;
268 while (phy && phy < phys + (len / sizeof(uint32_t))) {
269 if (idx <= 0)
270 rv = phy_enable_cells(phy);
271 if (idx == 0)
272 break;
273 phy = phy_next_phy(phy);
274 idx--;
275 }
276
277 free(phys, M_TEMP, len);
278 return rv;
279 }
280
281 int
phy_enable_idx(int node,int idx)282 phy_enable_idx(int node, int idx)
283 {
284 return (phy_enable_prop_idx(node, "phys", idx));
285 }
286
287 int
phy_enable(int node,const char * name)288 phy_enable(int node, const char *name)
289 {
290 int idx;
291
292 idx = OF_getindex(node, name, "phy-names");
293 if (idx == -1)
294 return -1;
295
296 return phy_enable_idx(node, idx);
297 }
298
299 /*
300 * I2C support.
301 */
302
303 LIST_HEAD(, i2c_bus) i2c_busses =
304 LIST_HEAD_INITIALIZER(i2c_bus);
305
306 void
i2c_register(struct i2c_bus * ib)307 i2c_register(struct i2c_bus *ib)
308 {
309 ib->ib_phandle = OF_getpropint(ib->ib_node, "phandle", 0);
310 if (ib->ib_phandle == 0)
311 return;
312
313 LIST_INSERT_HEAD(&i2c_busses, ib, ib_list);
314 }
315
316 struct i2c_controller *
i2c_bynode(int node)317 i2c_bynode(int node)
318 {
319 struct i2c_bus *ib;
320
321 LIST_FOREACH(ib, &i2c_busses, ib_list) {
322 if (ib->ib_node == node)
323 return ib->ib_ic;
324 }
325
326 return NULL;
327 }
328
329 struct i2c_controller *
i2c_byphandle(uint32_t phandle)330 i2c_byphandle(uint32_t phandle)
331 {
332 struct i2c_bus *ib;
333
334 if (phandle == 0)
335 return NULL;
336
337 LIST_FOREACH(ib, &i2c_busses, ib_list) {
338 if (ib->ib_phandle == phandle)
339 return ib->ib_ic;
340 }
341
342 return NULL;
343 }
344
345 /*
346 * SFP support.
347 */
348
349 LIST_HEAD(, sfp_device) sfp_devices =
350 LIST_HEAD_INITIALIZER(sfp_devices);
351
352 void
sfp_register(struct sfp_device * sd)353 sfp_register(struct sfp_device *sd)
354 {
355 sd->sd_phandle = OF_getpropint(sd->sd_node, "phandle", 0);
356 if (sd->sd_phandle == 0)
357 return;
358
359 LIST_INSERT_HEAD(&sfp_devices, sd, sd_list);
360 }
361
362 int
sfp_do_enable(uint32_t phandle,int enable)363 sfp_do_enable(uint32_t phandle, int enable)
364 {
365 struct sfp_device *sd;
366
367 if (phandle == 0)
368 return ENXIO;
369
370 LIST_FOREACH(sd, &sfp_devices, sd_list) {
371 if (sd->sd_phandle == phandle)
372 return sd->sd_enable(sd->sd_cookie, enable);
373 }
374
375 return ENXIO;
376 }
377
378 int
sfp_enable(uint32_t phandle)379 sfp_enable(uint32_t phandle)
380 {
381 return sfp_do_enable(phandle, 1);
382 }
383
384 int
sfp_disable(uint32_t phandle)385 sfp_disable(uint32_t phandle)
386 {
387 return sfp_do_enable(phandle, 0);
388 }
389
390 int
sfp_get_sffpage(uint32_t phandle,struct if_sffpage * sff)391 sfp_get_sffpage(uint32_t phandle, struct if_sffpage *sff)
392 {
393 struct sfp_device *sd;
394
395 if (phandle == 0)
396 return ENXIO;
397
398 LIST_FOREACH(sd, &sfp_devices, sd_list) {
399 if (sd->sd_phandle == phandle)
400 return sd->sd_get_sffpage(sd->sd_cookie, sff);
401 }
402
403 return ENXIO;
404 }
405
406 #define SFF8472_TCC_XCC 3 /* 10G Ethernet Compliance Codes */
407 #define SFF8472_TCC_XCC_10G_SR (1 << 4)
408 #define SFF8472_TCC_XCC_10G_LR (1 << 5)
409 #define SFF8472_TCC_XCC_10G_LRM (1 << 6)
410 #define SFF8472_TCC_XCC_10G_ER (1 << 7)
411 #define SFF8472_TCC_ECC 6 /* Ethernet Compliance Codes */
412 #define SFF8472_TCC_ECC_1000_SX (1 << 0)
413 #define SFF8472_TCC_ECC_1000_LX (1 << 1)
414 #define SFF8472_TCC_ECC_1000_CX (1 << 2)
415 #define SFF8472_TCC_ECC_1000_T (1 << 3)
416 #define SFF8472_TCC_SCT 8 /* SFP+ Cable Technology */
417 #define SFF8472_TCC_SCT_PASSIVE (1 << 2)
418 #define SFF8472_TCC_SCT_ACTIVE (1 << 3)
419
420 int
sfp_add_media(uint32_t phandle,struct mii_data * mii)421 sfp_add_media(uint32_t phandle, struct mii_data *mii)
422 {
423 struct if_sffpage sff;
424 int error;
425
426 memset(&sff, 0, sizeof(sff));
427 sff.sff_addr = IFSFF_ADDR_EEPROM;
428 sff.sff_page = 0;
429
430 error = sfp_get_sffpage(phandle, &sff);
431 if (error)
432 return error;
433
434 /* SFP */
435 if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_SX) {
436 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_SX, 0, NULL);
437 mii->mii_media_active = IFM_ETHER | IFM_1000_SX | IFM_FDX;
438 }
439 if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_LX) {
440 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_LX, 0, NULL);
441 mii->mii_media_active = IFM_ETHER | IFM_1000_LX | IFM_FDX;
442 }
443 if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_CX) {
444 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_CX, 0, NULL);
445 mii->mii_media_active = IFM_ETHER | IFM_1000_CX | IFM_FDX;
446 }
447 if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_T) {
448 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_T, 0, NULL);
449 mii->mii_media_active = IFM_ETHER | IFM_1000_T | IFM_FDX;
450 }
451
452 /* SFP+ */
453 if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_SR) {
454 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_SR, 0, NULL);
455 mii->mii_media_active = IFM_ETHER | IFM_10G_SR | IFM_FDX;
456 }
457 if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_LR) {
458 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_LR, 0, NULL);
459 mii->mii_media_active = IFM_ETHER | IFM_10G_LR | IFM_FDX;
460 }
461 if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_LRM) {
462 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_LRM, 0, NULL);
463 mii->mii_media_active = IFM_ETHER | IFM_10G_LRM | IFM_FDX;
464 }
465 if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_ER) {
466 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_ER, 0, NULL);
467 mii->mii_media_active = IFM_ETHER | IFM_10G_ER | IFM_FDX;
468 }
469
470 /* SFP+ DAC */
471 if (sff.sff_data[SFF8472_TCC_SCT] & SFF8472_TCC_SCT_PASSIVE ||
472 sff.sff_data[SFF8472_TCC_SCT] & SFF8472_TCC_SCT_ACTIVE) {
473 ifmedia_add(&mii->mii_media,
474 IFM_ETHER | IFM_10G_SFP_CU, 0, NULL);
475 mii->mii_media_active = IFM_ETHER | IFM_10G_SFP_CU | IFM_FDX;
476 }
477
478 return 0;
479 }
480
481 /*
482 * PWM support.
483 */
484
485 LIST_HEAD(, pwm_device) pwm_devices =
486 LIST_HEAD_INITIALIZER(pwm_devices);
487
488 void
pwm_register(struct pwm_device * pd)489 pwm_register(struct pwm_device *pd)
490 {
491 pd->pd_cells = OF_getpropint(pd->pd_node, "#pwm-cells", 0);
492 pd->pd_phandle = OF_getpropint(pd->pd_node, "phandle", 0);
493 if (pd->pd_phandle == 0)
494 return;
495
496 LIST_INSERT_HEAD(&pwm_devices, pd, pd_list);
497
498 }
499
500 int
pwm_init_state(uint32_t * cells,struct pwm_state * ps)501 pwm_init_state(uint32_t *cells, struct pwm_state *ps)
502 {
503 struct pwm_device *pd;
504
505 LIST_FOREACH(pd, &pwm_devices, pd_list) {
506 if (pd->pd_phandle == cells[0]) {
507 memset(ps, 0, sizeof(struct pwm_state));
508 pd->pd_get_state(pd->pd_cookie, &cells[1], ps);
509 ps->ps_pulse_width = 0;
510 if (pd->pd_cells >= 2)
511 ps->ps_period = cells[2];
512 if (pd->pd_cells >= 3)
513 ps->ps_flags = cells[3];
514 return 0;
515 }
516 }
517
518 return ENXIO;
519 }
520
521 int
pwm_get_state(uint32_t * cells,struct pwm_state * ps)522 pwm_get_state(uint32_t *cells, struct pwm_state *ps)
523 {
524 struct pwm_device *pd;
525
526 LIST_FOREACH(pd, &pwm_devices, pd_list) {
527 if (pd->pd_phandle == cells[0])
528 return pd->pd_get_state(pd->pd_cookie, &cells[1], ps);
529 }
530
531 return ENXIO;
532 }
533
534 int
pwm_set_state(uint32_t * cells,struct pwm_state * ps)535 pwm_set_state(uint32_t *cells, struct pwm_state *ps)
536 {
537 struct pwm_device *pd;
538
539 LIST_FOREACH(pd, &pwm_devices, pd_list) {
540 if (pd->pd_phandle == cells[0])
541 return pd->pd_set_state(pd->pd_cookie, &cells[1], ps);
542 }
543
544 return ENXIO;
545 }
546
547 /*
548 * Non-volatile memory support.
549 */
550
551 LIST_HEAD(, nvmem_device) nvmem_devices =
552 LIST_HEAD_INITIALIZER(nvmem_devices);
553
554 struct nvmem_cell {
555 uint32_t nc_phandle;
556 struct nvmem_device *nc_nd;
557 bus_addr_t nc_addr;
558 bus_size_t nc_size;
559 uint32_t nc_offset;
560 uint32_t nc_bitlen;
561
562 LIST_ENTRY(nvmem_cell) nc_list;
563 };
564
565 LIST_HEAD(, nvmem_cell) nvmem_cells =
566 LIST_HEAD_INITIALIZER(nvmem_cells);
567
568 void
nvmem_register_child(int node,struct nvmem_device * nd)569 nvmem_register_child(int node, struct nvmem_device *nd)
570 {
571 struct nvmem_cell *nc;
572 uint32_t phandle;
573 uint32_t reg[2], bits[2] = {};
574
575 phandle = OF_getpropint(node, "phandle", 0);
576 if (phandle == 0)
577 return;
578
579 if (OF_getpropintarray(node, "reg", reg, sizeof(reg)) != sizeof(reg))
580 return;
581
582 OF_getpropintarray(node, "bits", bits, sizeof(bits));
583
584 nc = malloc(sizeof(struct nvmem_cell), M_DEVBUF, M_WAITOK);
585 nc->nc_phandle = phandle;
586 nc->nc_nd = nd;
587 nc->nc_addr = reg[0];
588 nc->nc_size = reg[1];
589 nc->nc_offset = bits[0];
590 nc->nc_bitlen = bits[1];
591 LIST_INSERT_HEAD(&nvmem_cells, nc, nc_list);
592 }
593
594 void
nvmem_register(struct nvmem_device * nd)595 nvmem_register(struct nvmem_device *nd)
596 {
597 int node;
598
599 nd->nd_phandle = OF_getpropint(nd->nd_node, "phandle", 0);
600 if (nd->nd_phandle)
601 LIST_INSERT_HEAD(&nvmem_devices, nd, nd_list);
602
603 for (node = OF_child(nd->nd_node); node; node = OF_peer(node))
604 nvmem_register_child(node, nd);
605 }
606
607 int
nvmem_read(uint32_t phandle,bus_addr_t addr,void * data,bus_size_t size)608 nvmem_read(uint32_t phandle, bus_addr_t addr, void *data, bus_size_t size)
609 {
610 struct nvmem_device *nd;
611
612 if (phandle == 0)
613 return ENXIO;
614
615 LIST_FOREACH(nd, &nvmem_devices, nd_list) {
616 if (nd->nd_phandle == phandle)
617 return nd->nd_read(nd->nd_cookie, addr, data, size);
618 }
619
620 return ENXIO;
621 }
622
623 int
nvmem_read_cell(int node,const char * name,void * data,bus_size_t size)624 nvmem_read_cell(int node, const char *name, void *data, bus_size_t size)
625 {
626 struct nvmem_device *nd;
627 struct nvmem_cell *nc;
628 uint8_t *p = data;
629 bus_addr_t addr;
630 uint32_t phandle, *phandles;
631 uint32_t offset, bitlen;
632 int id, len, first;
633
634 id = OF_getindex(node, name, "nvmem-cell-names");
635 if (id < 0)
636 return ENXIO;
637
638 len = OF_getproplen(node, "nvmem-cells");
639 if (len <= 0)
640 return ENXIO;
641
642 phandles = malloc(len, M_TEMP, M_WAITOK);
643 OF_getpropintarray(node, "nvmem-cells", phandles, len);
644 phandle = phandles[id];
645 free(phandles, M_TEMP, len);
646
647 LIST_FOREACH(nc, &nvmem_cells, nc_list) {
648 if (nc->nc_phandle == phandle)
649 break;
650 }
651 if (nc == NULL)
652 return ENXIO;
653
654 nd = nc->nc_nd;
655 if (nd->nd_read == NULL)
656 return EACCES;
657
658 first = 1;
659 addr = nc->nc_addr + (nc->nc_offset / 8);
660 offset = nc->nc_offset % 8;
661 bitlen = nc->nc_bitlen;
662 while (bitlen > 0 && size > 0) {
663 uint8_t mask, tmp;
664 int error;
665
666 error = nd->nd_read(nd->nd_cookie, addr++, &tmp, 1);
667 if (error)
668 return error;
669
670 if (bitlen >= 8)
671 mask = 0xff;
672 else
673 mask = (1 << bitlen) - 1;
674
675 if (!first) {
676 *p++ |= (tmp << (8 - offset)) & (mask << (8 - offset));
677 bitlen -= MIN(offset, bitlen);
678 mask >>= offset;
679 size--;
680 }
681
682 if (bitlen > 0 && size > 0) {
683 *p = (tmp >> offset) & mask;
684 bitlen -= MIN(8 - offset, bitlen);
685 }
686
687 first = 0;
688 }
689 if (nc->nc_bitlen > 0)
690 return 0;
691
692 if (size > nc->nc_size)
693 return EINVAL;
694
695 return nd->nd_read(nd->nd_cookie, nc->nc_addr, data, size);
696 }
697
698 int
nvmem_write_cell(int node,const char * name,const void * data,bus_size_t size)699 nvmem_write_cell(int node, const char *name, const void *data, bus_size_t size)
700 {
701 struct nvmem_device *nd;
702 struct nvmem_cell *nc;
703 const uint8_t *p = data;
704 bus_addr_t addr;
705 uint32_t phandle, *phandles;
706 uint32_t offset, bitlen;
707 int id, len, first;
708
709 id = OF_getindex(node, name, "nvmem-cell-names");
710 if (id < 0)
711 return ENXIO;
712
713 len = OF_getproplen(node, "nvmem-cells");
714 if (len <= 0)
715 return ENXIO;
716
717 phandles = malloc(len, M_TEMP, M_WAITOK);
718 OF_getpropintarray(node, "nvmem-cells", phandles, len);
719 phandle = phandles[id];
720 free(phandles, M_TEMP, len);
721
722 LIST_FOREACH(nc, &nvmem_cells, nc_list) {
723 if (nc->nc_phandle == phandle)
724 break;
725 }
726 if (nc == NULL)
727 return ENXIO;
728
729 nd = nc->nc_nd;
730 if (nd->nd_write == NULL)
731 return EACCES;
732
733 first = 1;
734 addr = nc->nc_addr + (nc->nc_offset / 8);
735 offset = nc->nc_offset % 8;
736 bitlen = nc->nc_bitlen;
737 while (bitlen > 0 && size > 0) {
738 uint8_t mask, tmp;
739 int error;
740
741 error = nd->nd_read(nd->nd_cookie, addr, &tmp, 1);
742 if (error)
743 return error;
744
745 if (bitlen >= 8)
746 mask = 0xff;
747 else
748 mask = (1 << bitlen) - 1;
749
750 tmp &= ~(mask << offset);
751 tmp |= (*p++ << offset) & (mask << offset);
752 bitlen -= MIN(8 - offset, bitlen);
753 size--;
754
755 if (!first && bitlen > 0 && size > 0) {
756 tmp &= ~(mask >> (8 - offset));
757 tmp |= (*p >> (8 - offset)) & (mask >> (8 - offset));
758 bitlen -= MIN(offset, bitlen);
759 }
760
761 error = nd->nd_write(nd->nd_cookie, addr++, &tmp, 1);
762 if (error)
763 return error;
764
765 first = 0;
766 }
767 if (nc->nc_bitlen > 0)
768 return 0;
769
770 if (size > nc->nc_size)
771 return EINVAL;
772
773 return nd->nd_write(nd->nd_cookie, nc->nc_addr, data, size);
774 }
775
776 /* Port/endpoint interface support */
777
778 LIST_HEAD(, endpoint) endpoints =
779 LIST_HEAD_INITIALIZER(endpoints);
780
781 void
endpoint_register(int node,struct device_port * dp,enum endpoint_type type)782 endpoint_register(int node, struct device_port *dp, enum endpoint_type type)
783 {
784 struct endpoint *ep;
785
786 ep = malloc(sizeof(*ep), M_DEVBUF, M_WAITOK);
787 ep->ep_node = node;
788 ep->ep_phandle = OF_getpropint(node, "phandle", 0);
789 ep->ep_reg = OF_getpropint(node, "reg", -1);
790 ep->ep_port = dp;
791 ep->ep_type = type;
792
793 LIST_INSERT_HEAD(&endpoints, ep, ep_list);
794 LIST_INSERT_HEAD(&dp->dp_endpoints, ep, ep_plist);
795 }
796
797 void
device_port_register(int node,struct device_ports * ports,enum endpoint_type type)798 device_port_register(int node, struct device_ports *ports,
799 enum endpoint_type type)
800 {
801 struct device_port *dp;
802
803 dp = malloc(sizeof(*dp), M_DEVBUF, M_WAITOK);
804 dp->dp_node = node;
805 dp->dp_phandle = OF_getpropint(node, "phandle", 0);
806 dp->dp_reg = OF_getpropint(node, "reg", -1);
807 dp->dp_ports = ports;
808 LIST_INIT(&dp->dp_endpoints);
809 for (node = OF_child(node); node; node = OF_peer(node))
810 endpoint_register(node, dp, type);
811
812 LIST_INSERT_HEAD(&ports->dp_ports, dp, dp_list);
813 }
814
815 void
device_ports_register(struct device_ports * ports,enum endpoint_type type)816 device_ports_register(struct device_ports *ports,
817 enum endpoint_type type)
818 {
819 int node;
820
821 LIST_INIT(&ports->dp_ports);
822
823 node = OF_getnodebyname(ports->dp_node, "ports");
824 if (node == 0) {
825 node = OF_getnodebyname(ports->dp_node, "port");
826 if (node == 0)
827 return;
828
829 device_port_register(node, ports, type);
830 return;
831 }
832
833 for (node = OF_child(node); node; node = OF_peer(node))
834 device_port_register(node, ports, type);
835 }
836
837 struct device_ports *
device_ports_byphandle(uint32_t phandle)838 device_ports_byphandle(uint32_t phandle)
839 {
840 struct endpoint *ep;
841
842 if (phandle == 0)
843 return NULL;
844
845 LIST_FOREACH(ep, &endpoints, ep_list) {
846 if (ep->ep_port->dp_phandle == phandle)
847 return ep->ep_port->dp_ports;
848 }
849
850 return NULL;
851 }
852
853 struct endpoint *
endpoint_byphandle(uint32_t phandle)854 endpoint_byphandle(uint32_t phandle)
855 {
856 struct endpoint *ep;
857
858 if (phandle == 0)
859 return NULL;
860
861 LIST_FOREACH(ep, &endpoints, ep_list) {
862 if (ep->ep_phandle == phandle)
863 return ep;
864 }
865
866 return NULL;
867 }
868
869 struct endpoint *
endpoint_byreg(struct device_ports * ports,uint32_t dp_reg,uint32_t ep_reg)870 endpoint_byreg(struct device_ports *ports, uint32_t dp_reg, uint32_t ep_reg)
871 {
872 struct device_port *dp;
873 struct endpoint *ep;
874
875 LIST_FOREACH(dp, &ports->dp_ports, dp_list) {
876 if (dp->dp_reg != dp_reg)
877 continue;
878 LIST_FOREACH(ep, &dp->dp_endpoints, ep_list) {
879 if (ep->ep_reg != ep_reg)
880 continue;
881 return ep;
882 }
883 }
884
885 return NULL;
886 }
887
888 struct endpoint *
endpoint_remote(struct endpoint * ep)889 endpoint_remote(struct endpoint *ep)
890 {
891 struct endpoint *rep;
892 int phandle;
893
894 phandle = OF_getpropint(ep->ep_node, "remote-endpoint", 0);
895 if (phandle == 0)
896 return NULL;
897
898 LIST_FOREACH(rep, &endpoints, ep_list) {
899 if (rep->ep_phandle == phandle)
900 return rep;
901 }
902
903 return NULL;
904 }
905
906 int
endpoint_activate(struct endpoint * ep,void * arg)907 endpoint_activate(struct endpoint *ep, void *arg)
908 {
909 struct device_ports *ports = ep->ep_port->dp_ports;
910 return ports->dp_ep_activate(ports->dp_cookie, ep, arg);
911 }
912
913 void *
endpoint_get_cookie(struct endpoint * ep)914 endpoint_get_cookie(struct endpoint *ep)
915 {
916 struct device_ports *ports = ep->ep_port->dp_ports;
917 return ports->dp_ep_get_cookie(ports->dp_cookie, ep);
918 }
919
920 int
device_port_activate(uint32_t phandle,void * arg)921 device_port_activate(uint32_t phandle, void *arg)
922 {
923 struct device_port *dp = NULL;
924 struct endpoint *ep, *rep;
925 int count;
926 int error;
927
928 if (phandle == 0)
929 return ENXIO;
930
931 LIST_FOREACH(ep, &endpoints, ep_list) {
932 if (ep->ep_port->dp_phandle == phandle) {
933 dp = ep->ep_port;
934 break;
935 }
936 }
937 if (dp == NULL)
938 return ENXIO;
939
940 count = 0;
941 LIST_FOREACH(ep, &dp->dp_endpoints, ep_plist) {
942 rep = endpoint_remote(ep);
943 if (rep == NULL)
944 continue;
945
946 error = endpoint_activate(ep, arg);
947 if (error)
948 continue;
949 error = endpoint_activate(rep, arg);
950 if (error)
951 continue;
952 count++;
953 }
954
955 return count ? 0 : ENXIO;
956 }
957
958 /* Digital audio interface support */
959
960 LIST_HEAD(, dai_device) dai_devices =
961 LIST_HEAD_INITIALIZER(dai_devices);
962
963 void *
dai_ep_get_cookie(void * cookie,struct endpoint * ep)964 dai_ep_get_cookie(void *cookie, struct endpoint *ep)
965 {
966 return cookie;
967 }
968
969 void
dai_register(struct dai_device * dd)970 dai_register(struct dai_device *dd)
971 {
972 dd->dd_phandle = OF_getpropint(dd->dd_node, "phandle", 0);
973 if (dd->dd_phandle != 0)
974 LIST_INSERT_HEAD(&dai_devices, dd, dd_list);
975
976 dd->dd_ports.dp_node = dd->dd_node;
977 dd->dd_ports.dp_cookie = dd;
978 dd->dd_ports.dp_ep_get_cookie = dai_ep_get_cookie;
979 device_ports_register(&dd->dd_ports, EP_DAI_DEVICE);
980 }
981
982 struct dai_device *
dai_byphandle(uint32_t phandle)983 dai_byphandle(uint32_t phandle)
984 {
985 struct dai_device *dd;
986
987 if (phandle == 0)
988 return NULL;
989
990 LIST_FOREACH(dd, &dai_devices, dd_list) {
991 if (dd->dd_phandle == phandle)
992 return dd;
993 }
994
995 return NULL;
996 }
997
998 /* MII support */
999
1000 LIST_HEAD(, mii_bus) mii_busses =
1001 LIST_HEAD_INITIALIZER(mii_busses);
1002
1003 void
mii_register(struct mii_bus * md)1004 mii_register(struct mii_bus *md)
1005 {
1006 LIST_INSERT_HEAD(&mii_busses, md, md_list);
1007 }
1008
1009 struct mii_bus *
mii_bynode(int node)1010 mii_bynode(int node)
1011 {
1012 struct mii_bus *md;
1013
1014 LIST_FOREACH(md, &mii_busses, md_list) {
1015 if (md->md_node == node)
1016 return md;
1017 }
1018
1019 return NULL;
1020 }
1021
1022 struct mii_bus *
mii_byphandle(uint32_t phandle)1023 mii_byphandle(uint32_t phandle)
1024 {
1025 int node;
1026
1027 if (phandle == 0)
1028 return NULL;
1029
1030 node = OF_getnodebyphandle(phandle);
1031 if (node == 0)
1032 return NULL;
1033
1034 node = OF_parent(node);
1035 if (node == 0)
1036 return NULL;
1037
1038 return mii_bynode(node);
1039 }
1040
1041 /* IOMMU support */
1042
1043 LIST_HEAD(, iommu_device) iommu_devices =
1044 LIST_HEAD_INITIALIZER(iommu_devices);
1045
1046 void
iommu_device_register(struct iommu_device * id)1047 iommu_device_register(struct iommu_device *id)
1048 {
1049 id->id_phandle = OF_getpropint(id->id_node, "phandle", 0);
1050 if (id->id_phandle == 0)
1051 return;
1052
1053 LIST_INSERT_HEAD(&iommu_devices, id, id_list);
1054 }
1055
1056 bus_dma_tag_t
iommu_device_do_map(uint32_t phandle,uint32_t * cells,bus_dma_tag_t dmat)1057 iommu_device_do_map(uint32_t phandle, uint32_t *cells, bus_dma_tag_t dmat)
1058 {
1059 struct iommu_device *id;
1060
1061 if (phandle == 0)
1062 return dmat;
1063
1064 LIST_FOREACH(id, &iommu_devices, id_list) {
1065 if (id->id_phandle == phandle)
1066 return id->id_map(id->id_cookie, cells, dmat);
1067 }
1068
1069 return dmat;
1070 }
1071
1072 int
iommu_device_lookup(int node,uint32_t * phandle,uint32_t * cells)1073 iommu_device_lookup(int node, uint32_t *phandle, uint32_t *cells)
1074 {
1075 uint32_t *cell;
1076 uint32_t *map;
1077 int len, icells, ncells;
1078 int ret = 1;
1079 int i;
1080
1081 len = OF_getproplen(node, "iommus");
1082 if (len <= 0)
1083 return ret;
1084
1085 map = malloc(len, M_TEMP, M_WAITOK);
1086 OF_getpropintarray(node, "iommus", map, len);
1087
1088 cell = map;
1089 ncells = len / sizeof(uint32_t);
1090 while (ncells > 1) {
1091 node = OF_getnodebyphandle(cell[0]);
1092 if (node == 0)
1093 goto out;
1094
1095 icells = OF_getpropint(node, "#iommu-cells", 1);
1096 if (ncells < icells + 1)
1097 goto out;
1098
1099 KASSERT(icells <= 2);
1100
1101 *phandle = cell[0];
1102 for (i = 0; i < icells; i++)
1103 cells[i] = cell[1 + i];
1104 ret = 0;
1105 break;
1106
1107 cell += (1 + icells);
1108 ncells -= (1 + icells);
1109 }
1110
1111 out:
1112 free(map, M_TEMP, len);
1113
1114 return ret;
1115 }
1116
1117 int
iommu_device_lookup_pci(int node,uint32_t rid,uint32_t * phandle,uint32_t * cells)1118 iommu_device_lookup_pci(int node, uint32_t rid, uint32_t *phandle,
1119 uint32_t *cells)
1120 {
1121 uint32_t sid_base;
1122 uint32_t *cell;
1123 uint32_t *map;
1124 uint32_t mask, rid_base;
1125 int len, length, icells, ncells;
1126 int ret = 1;
1127
1128 len = OF_getproplen(node, "iommu-map");
1129 if (len <= 0)
1130 return ret;
1131
1132 map = malloc(len, M_TEMP, M_WAITOK);
1133 OF_getpropintarray(node, "iommu-map", map, len);
1134
1135 mask = OF_getpropint(node, "iommu-map-mask", 0xffff);
1136 rid = rid & mask;
1137
1138 cell = map;
1139 ncells = len / sizeof(uint32_t);
1140 while (ncells > 1) {
1141 node = OF_getnodebyphandle(cell[1]);
1142 if (node == 0)
1143 goto out;
1144
1145 icells = OF_getpropint(node, "#iommu-cells", 1);
1146 if (ncells < icells + 3)
1147 goto out;
1148
1149 KASSERT(icells == 1);
1150
1151 rid_base = cell[0];
1152 sid_base = cell[2];
1153 length = cell[3];
1154 if (rid >= rid_base && rid < rid_base + length) {
1155 cells[0] = sid_base + (rid - rid_base);
1156 *phandle = cell[1];
1157 ret = 0;
1158 break;
1159 }
1160
1161 cell += 4;
1162 ncells -= 4;
1163 }
1164
1165 out:
1166 free(map, M_TEMP, len);
1167
1168 return ret;
1169 }
1170
1171 bus_dma_tag_t
iommu_device_map(int node,bus_dma_tag_t dmat)1172 iommu_device_map(int node, bus_dma_tag_t dmat)
1173 {
1174 uint32_t phandle, cells[2] = {0};
1175
1176 if (iommu_device_lookup(node, &phandle, &cells[0]))
1177 return dmat;
1178
1179 return iommu_device_do_map(phandle, &cells[0], dmat);
1180 }
1181
1182 bus_dma_tag_t
iommu_device_map_pci(int node,uint32_t rid,bus_dma_tag_t dmat)1183 iommu_device_map_pci(int node, uint32_t rid, bus_dma_tag_t dmat)
1184 {
1185 uint32_t phandle, cells[2] = {0};
1186
1187 if (iommu_device_lookup_pci(node, rid, &phandle, &cells[0]))
1188 return dmat;
1189
1190 return iommu_device_do_map(phandle, &cells[0], dmat);
1191 }
1192
1193 void
iommu_device_do_reserve(uint32_t phandle,uint32_t * cells,bus_addr_t addr,bus_size_t size)1194 iommu_device_do_reserve(uint32_t phandle, uint32_t *cells, bus_addr_t addr,
1195 bus_size_t size)
1196 {
1197 struct iommu_device *id;
1198
1199 if (phandle == 0)
1200 return;
1201
1202 LIST_FOREACH(id, &iommu_devices, id_list) {
1203 if (id->id_phandle == phandle) {
1204 id->id_reserve(id->id_cookie, cells, addr, size);
1205 break;
1206 }
1207 }
1208 }
1209
1210 void
iommu_reserve_region_pci(int node,uint32_t rid,bus_addr_t addr,bus_size_t size)1211 iommu_reserve_region_pci(int node, uint32_t rid, bus_addr_t addr,
1212 bus_size_t size)
1213 {
1214 uint32_t phandle, cells[2] = {0};
1215
1216 if (iommu_device_lookup_pci(node, rid, &phandle, &cells[0]))
1217 return;
1218
1219 return iommu_device_do_reserve(phandle, &cells[0], addr, size);
1220 }
1221
1222 /*
1223 * Mailbox support.
1224 */
1225
1226 struct mbox_channel {
1227 struct mbox_device *mc_md;
1228 void *mc_cookie;
1229 };
1230
1231 LIST_HEAD(, mbox_device) mbox_devices =
1232 LIST_HEAD_INITIALIZER(mbox_devices);
1233
1234 void
mbox_register(struct mbox_device * md)1235 mbox_register(struct mbox_device *md)
1236 {
1237 md->md_cells = OF_getpropint(md->md_node, "#mbox-cells", 0);
1238 md->md_phandle = OF_getpropint(md->md_node, "phandle", 0);
1239 if (md->md_phandle == 0)
1240 return;
1241
1242 LIST_INSERT_HEAD(&mbox_devices, md, md_list);
1243 }
1244
1245 struct mbox_channel *
mbox_channel_cells(uint32_t * cells,struct mbox_client * client)1246 mbox_channel_cells(uint32_t *cells, struct mbox_client *client)
1247 {
1248 struct mbox_device *md;
1249 struct mbox_channel *mc;
1250 uint32_t phandle = cells[0];
1251 void *cookie;
1252
1253 LIST_FOREACH(md, &mbox_devices, md_list) {
1254 if (md->md_phandle == phandle)
1255 break;
1256 }
1257
1258 if (md && md->md_channel) {
1259 cookie = md->md_channel(md->md_cookie, &cells[1], client);
1260 if (cookie) {
1261 mc = malloc(sizeof(*mc), M_DEVBUF, M_WAITOK);
1262 mc->mc_md = md;
1263 mc->mc_cookie = cookie;
1264 return mc;
1265 }
1266 }
1267
1268 return NULL;
1269 }
1270
1271 uint32_t *
mbox_next_mbox(uint32_t * cells)1272 mbox_next_mbox(uint32_t *cells)
1273 {
1274 uint32_t phandle = cells[0];
1275 int node, ncells;
1276
1277 node = OF_getnodebyphandle(phandle);
1278 if (node == 0)
1279 return NULL;
1280
1281 ncells = OF_getpropint(node, "#mbox-cells", 0);
1282 return cells + ncells + 1;
1283 }
1284
1285 struct mbox_channel *
mbox_channel_idx(int node,int idx,struct mbox_client * client)1286 mbox_channel_idx(int node, int idx, struct mbox_client *client)
1287 {
1288 struct mbox_channel *mc = NULL;
1289 uint32_t *mboxes;
1290 uint32_t *mbox;
1291 int len;
1292
1293 len = OF_getproplen(node, "mboxes");
1294 if (len <= 0)
1295 return NULL;
1296
1297 mboxes = malloc(len, M_TEMP, M_WAITOK);
1298 OF_getpropintarray(node, "mboxes", mboxes, len);
1299
1300 mbox = mboxes;
1301 while (mbox && mbox < mboxes + (len / sizeof(uint32_t))) {
1302 if (idx == 0) {
1303 mc = mbox_channel_cells(mbox, client);
1304 break;
1305 }
1306 mbox = mbox_next_mbox(mbox);
1307 idx--;
1308 }
1309
1310 free(mboxes, M_TEMP, len);
1311 return mc;
1312 }
1313
1314 struct mbox_channel *
mbox_channel(int node,const char * name,struct mbox_client * client)1315 mbox_channel(int node, const char *name, struct mbox_client *client)
1316 {
1317 int idx;
1318
1319 idx = OF_getindex(node, name, "mbox-names");
1320 if (idx == -1)
1321 return NULL;
1322
1323 return mbox_channel_idx(node, idx, client);
1324 }
1325
1326 int
mbox_send(struct mbox_channel * mc,const void * data,size_t len)1327 mbox_send(struct mbox_channel *mc, const void *data, size_t len)
1328 {
1329 struct mbox_device *md = mc->mc_md;
1330
1331 if (md->md_send)
1332 return md->md_send(mc->mc_cookie, data, len);
1333
1334 return ENXIO;
1335 }
1336
1337 int
mbox_recv(struct mbox_channel * mc,void * data,size_t len)1338 mbox_recv(struct mbox_channel *mc, void *data, size_t len)
1339 {
1340 struct mbox_device *md = mc->mc_md;
1341
1342 if (md->md_recv)
1343 return md->md_recv(mc->mc_cookie, data, len);
1344
1345 return ENXIO;
1346 }
1347
1348 /* hwlock support */
1349
1350 LIST_HEAD(, hwlock_device) hwlock_devices =
1351 LIST_HEAD_INITIALIZER(hwlock_devices);
1352
1353 void
hwlock_register(struct hwlock_device * hd)1354 hwlock_register(struct hwlock_device *hd)
1355 {
1356 hd->hd_cells = OF_getpropint(hd->hd_node, "#hwlock-cells", 0);
1357 hd->hd_phandle = OF_getpropint(hd->hd_node, "phandle", 0);
1358 if (hd->hd_phandle == 0)
1359 return;
1360
1361 LIST_INSERT_HEAD(&hwlock_devices, hd, hd_list);
1362 }
1363
1364 int
hwlock_lock_cells(uint32_t * cells,int lock)1365 hwlock_lock_cells(uint32_t *cells, int lock)
1366 {
1367 struct hwlock_device *hd;
1368 uint32_t phandle = cells[0];
1369
1370 LIST_FOREACH(hd, &hwlock_devices, hd_list) {
1371 if (hd->hd_phandle == phandle)
1372 break;
1373 }
1374
1375 if (hd && hd->hd_lock)
1376 return hd->hd_lock(hd->hd_cookie, &cells[1], lock);
1377
1378 return ENXIO;
1379 }
1380
1381 uint32_t *
hwlock_next_hwlock(uint32_t * cells)1382 hwlock_next_hwlock(uint32_t *cells)
1383 {
1384 uint32_t phandle = cells[0];
1385 int node, ncells;
1386
1387 node = OF_getnodebyphandle(phandle);
1388 if (node == 0)
1389 return NULL;
1390
1391 ncells = OF_getpropint(node, "#hwlock-cells", 0);
1392 return cells + ncells + 1;
1393 }
1394
1395 int
hwlock_do_lock_idx(int node,int idx,int lock)1396 hwlock_do_lock_idx(int node, int idx, int lock)
1397 {
1398 uint32_t *hwlocks;
1399 uint32_t *hwlock;
1400 int rv = -1;
1401 int len;
1402
1403 len = OF_getproplen(node, "hwlocks");
1404 if (len <= 0)
1405 return -1;
1406
1407 hwlocks = malloc(len, M_TEMP, M_WAITOK);
1408 OF_getpropintarray(node, "hwlocks", hwlocks, len);
1409
1410 hwlock = hwlocks;
1411 while (hwlock && hwlock < hwlocks + (len / sizeof(uint32_t))) {
1412 if (idx <= 0)
1413 rv = hwlock_lock_cells(hwlock, lock);
1414 if (idx == 0)
1415 break;
1416 hwlock = hwlock_next_hwlock(hwlock);
1417 idx--;
1418 }
1419
1420 free(hwlocks, M_TEMP, len);
1421 return rv;
1422 }
1423
1424 int
hwlock_lock_idx(int node,int idx)1425 hwlock_lock_idx(int node, int idx)
1426 {
1427 return hwlock_do_lock_idx(node, idx, 1);
1428 }
1429
1430 int
hwlock_lock_idx_timeout(int node,int idx,int ms)1431 hwlock_lock_idx_timeout(int node, int idx, int ms)
1432 {
1433 int i, ret = ENXIO;
1434
1435 for (i = 0; i <= ms; i++) {
1436 ret = hwlock_do_lock_idx(node, idx, 1);
1437 if (ret == EAGAIN) {
1438 delay(1000);
1439 continue;
1440 }
1441 break;
1442 }
1443
1444 return ret;
1445 }
1446
1447 int
hwlock_unlock_idx(int node,int idx)1448 hwlock_unlock_idx(int node, int idx)
1449 {
1450 return hwlock_do_lock_idx(node, idx, 0);
1451 }
1452