1 /*
2  * Copyright © 2020 Inria.  All rights reserved.
3  * See COPYING in top-level directory.
4  */
5 
6 #include "private/autogen/config.h"
7 #include "hwloc.h"
8 #include "private/private.h"
9 
10 
11 /*****************************
12  * Attributes
13  */
14 
15 static __hwloc_inline
hwloc__memattr_get_convenience_value(hwloc_memattr_id_t id,hwloc_obj_t node)16 hwloc_uint64_t hwloc__memattr_get_convenience_value(hwloc_memattr_id_t id,
17                                                     hwloc_obj_t node)
18 {
19   if (id == HWLOC_MEMATTR_ID_CAPACITY)
20     return node->attr->numanode.local_memory;
21   else if (id == HWLOC_MEMATTR_ID_LOCALITY)
22     return hwloc_bitmap_weight(node->cpuset);
23   else
24     assert(0);
25   return 0; /* shut up the compiler */
26 }
27 
28 void
hwloc_internal_memattrs_init(struct hwloc_topology * topology)29 hwloc_internal_memattrs_init(struct hwloc_topology *topology)
30 {
31   topology->nr_memattrs = 0;
32   topology->memattrs = NULL;
33 }
34 
35 static void
hwloc__setup_memattr(struct hwloc_internal_memattr_s * imattr,char * name,unsigned long flags,unsigned long iflags)36 hwloc__setup_memattr(struct hwloc_internal_memattr_s *imattr,
37                      char *name,
38                      unsigned long flags,
39                      unsigned long iflags)
40 {
41   imattr->name = name;
42   imattr->flags = flags;
43   imattr->iflags = iflags;
44 
45   imattr->nr_targets = 0;
46   imattr->targets = NULL;
47 }
48 
49 void
hwloc_internal_memattrs_prepare(struct hwloc_topology * topology)50 hwloc_internal_memattrs_prepare(struct hwloc_topology *topology)
51 {
52 #define NR_DEFAULT_MEMATTRS 4
53   topology->memattrs = malloc(NR_DEFAULT_MEMATTRS * sizeof(*topology->memattrs));
54   if (!topology->memattrs)
55     return;
56 
57   assert(HWLOC_MEMATTR_ID_CAPACITY < NR_DEFAULT_MEMATTRS);
58   hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_CAPACITY],
59                        (char *) "Capacity",
60                        HWLOC_MEMATTR_FLAG_HIGHER_FIRST,
61                        HWLOC_IMATTR_FLAG_STATIC_NAME|HWLOC_IMATTR_FLAG_CONVENIENCE);
62 
63   assert(HWLOC_MEMATTR_ID_LOCALITY < NR_DEFAULT_MEMATTRS);
64   hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_LOCALITY],
65                        (char *) "Locality",
66                        HWLOC_MEMATTR_FLAG_LOWER_FIRST,
67                        HWLOC_IMATTR_FLAG_STATIC_NAME|HWLOC_IMATTR_FLAG_CONVENIENCE);
68 
69   assert(HWLOC_MEMATTR_ID_BANDWIDTH < NR_DEFAULT_MEMATTRS);
70   hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_BANDWIDTH],
71                        (char *) "Bandwidth",
72                        HWLOC_MEMATTR_FLAG_HIGHER_FIRST|HWLOC_MEMATTR_FLAG_NEED_INITIATOR,
73                        HWLOC_IMATTR_FLAG_STATIC_NAME);
74 
75   assert(HWLOC_MEMATTR_ID_LATENCY < NR_DEFAULT_MEMATTRS);
76   hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_LATENCY],
77                        (char *) "Latency",
78                        HWLOC_MEMATTR_FLAG_LOWER_FIRST|HWLOC_MEMATTR_FLAG_NEED_INITIATOR,
79                        HWLOC_IMATTR_FLAG_STATIC_NAME);
80 
81   topology->nr_memattrs = NR_DEFAULT_MEMATTRS;
82 }
83 
84 static void
hwloc__imi_destroy(struct hwloc_internal_memattr_initiator_s * imi)85 hwloc__imi_destroy(struct hwloc_internal_memattr_initiator_s *imi)
86 {
87   if (imi->initiator.type == HWLOC_LOCATION_TYPE_CPUSET)
88     hwloc_bitmap_free(imi->initiator.location.cpuset);
89 }
90 
91 static void
hwloc__imtg_destroy(struct hwloc_internal_memattr_s * imattr,struct hwloc_internal_memattr_target_s * imtg)92 hwloc__imtg_destroy(struct hwloc_internal_memattr_s *imattr,
93                     struct hwloc_internal_memattr_target_s *imtg)
94 {
95   if (imattr->flags & HWLOC_MEMATTR_FLAG_NEED_INITIATOR) {
96     /* only attributes with initiators may have something to free() in the array */
97     unsigned k;
98     for(k=0; k<imtg->nr_initiators; k++)
99       hwloc__imi_destroy(&imtg->initiators[k]);
100   }
101   free(imtg->initiators);
102 }
103 
104 void
hwloc_internal_memattrs_destroy(struct hwloc_topology * topology)105 hwloc_internal_memattrs_destroy(struct hwloc_topology *topology)
106 {
107   unsigned id;
108   for(id=0; id<topology->nr_memattrs; id++) {
109     struct hwloc_internal_memattr_s *imattr = &topology->memattrs[id];
110     unsigned j;
111     for(j=0; j<imattr->nr_targets; j++)
112       hwloc__imtg_destroy(imattr, &imattr->targets[j]);
113     free(imattr->targets);
114     if (!(imattr->iflags & HWLOC_IMATTR_FLAG_STATIC_NAME))
115       free(imattr->name);
116   }
117   free(topology->memattrs);
118 
119   topology->memattrs = NULL;
120   topology->nr_memattrs = 0;
121 }
122 
123 int
hwloc_internal_memattrs_dup(struct hwloc_topology * new,struct hwloc_topology * old)124 hwloc_internal_memattrs_dup(struct hwloc_topology *new, struct hwloc_topology *old)
125 {
126   struct hwloc_tma *tma = new->tma;
127   struct hwloc_internal_memattr_s *imattrs;
128   hwloc_memattr_id_t id;
129 
130   imattrs = hwloc_tma_malloc(tma, old->nr_memattrs * sizeof(*imattrs));
131   if (!imattrs)
132     return -1;
133   new->memattrs = imattrs;
134   new->nr_memattrs = old->nr_memattrs;
135   memcpy(imattrs, old->memattrs, old->nr_memattrs * sizeof(*imattrs));
136 
137   for(id=0; id<old->nr_memattrs; id++) {
138     struct hwloc_internal_memattr_s *oimattr = &old->memattrs[id];
139     struct hwloc_internal_memattr_s *nimattr = &imattrs[id];
140     unsigned j;
141 
142     assert(oimattr->name);
143     nimattr->name = hwloc_tma_strdup(tma, oimattr->name);
144     if (!nimattr->name) {
145       assert(!tma || !tma->dontfree); /* this tma cannot fail to allocate */
146       new->nr_memattrs = id;
147       goto failed;
148     }
149     nimattr->iflags &= ~HWLOC_IMATTR_FLAG_STATIC_NAME;
150     nimattr->iflags &= ~HWLOC_IMATTR_FLAG_CACHE_VALID; /* cache will need refresh */
151 
152     if (!oimattr->nr_targets)
153       continue;
154 
155     nimattr->targets = hwloc_tma_malloc(tma, oimattr->nr_targets * sizeof(*nimattr->targets));
156     if (!nimattr->targets) {
157       free(nimattr->name);
158       new->nr_memattrs = id;
159       goto failed;
160     }
161     memcpy(nimattr->targets, oimattr->targets, oimattr->nr_targets * sizeof(*nimattr->targets));
162 
163     for(j=0; j<oimattr->nr_targets; j++) {
164       struct hwloc_internal_memattr_target_s *oimtg = &oimattr->targets[j];
165       struct hwloc_internal_memattr_target_s *nimtg = &nimattr->targets[j];
166       unsigned k;
167 
168       nimtg->obj = NULL; /* cache will need refresh */
169 
170       if (!oimtg->nr_initiators)
171         continue;
172 
173       nimtg->initiators = hwloc_tma_malloc(tma, oimtg->nr_initiators * sizeof(*nimtg->initiators));
174       if (!nimtg->initiators) {
175         nimattr->nr_targets = j;
176         new->nr_memattrs = id+1;
177         goto failed;
178       }
179       memcpy(nimtg->initiators, oimtg->initiators, oimtg->nr_initiators * sizeof(*nimtg->initiators));
180 
181       for(k=0; k<oimtg->nr_initiators; k++) {
182         struct hwloc_internal_memattr_initiator_s *oimi = &oimtg->initiators[k];
183         struct hwloc_internal_memattr_initiator_s *nimi = &nimtg->initiators[k];
184         if (oimi->initiator.type == HWLOC_LOCATION_TYPE_CPUSET) {
185           nimi->initiator.location.cpuset = hwloc_bitmap_tma_dup(tma, oimi->initiator.location.cpuset);
186           if (!nimi->initiator.location.cpuset) {
187             nimtg->nr_initiators = k;
188             nimattr->nr_targets = j+1;
189             new->nr_memattrs = id+1;
190             goto failed;
191           }
192         } else if (oimi->initiator.type == HWLOC_LOCATION_TYPE_OBJECT) {
193           nimi->initiator.location.object.obj = NULL; /* cache will need refresh */
194         }
195       }
196     }
197   }
198   return 0;
199 
200  failed:
201   hwloc_internal_memattrs_destroy(new);
202   return -1;
203 }
204 
205 int
hwloc_memattr_get_by_name(hwloc_topology_t topology,const char * name,hwloc_memattr_id_t * idp)206 hwloc_memattr_get_by_name(hwloc_topology_t topology,
207                           const char *name,
208                           hwloc_memattr_id_t *idp)
209 {
210   unsigned id;
211   for(id=0; id<topology->nr_memattrs; id++) {
212     if (!strcmp(topology->memattrs[id].name, name)) {
213       *idp = id;
214       return 0;
215     }
216   }
217   errno = EINVAL;
218   return -1;
219 }
220 
221 int
hwloc_memattr_get_name(hwloc_topology_t topology,hwloc_memattr_id_t id,const char ** namep)222 hwloc_memattr_get_name(hwloc_topology_t topology,
223                        hwloc_memattr_id_t id,
224                        const char **namep)
225 {
226   if (id >= topology->nr_memattrs) {
227     errno = EINVAL;
228     return -1;
229   }
230   *namep = topology->memattrs[id].name;
231   return 0;
232 }
233 
234 int
hwloc_memattr_get_flags(hwloc_topology_t topology,hwloc_memattr_id_t id,unsigned long * flagsp)235 hwloc_memattr_get_flags(hwloc_topology_t topology,
236                         hwloc_memattr_id_t id,
237                         unsigned long *flagsp)
238 {
239   if (id >= topology->nr_memattrs) {
240     errno = EINVAL;
241     return -1;
242   }
243   *flagsp = topology->memattrs[id].flags;
244   return 0;
245 }
246 
247 int
hwloc_memattr_register(hwloc_topology_t topology,const char * _name,unsigned long flags,hwloc_memattr_id_t * id)248 hwloc_memattr_register(hwloc_topology_t topology,
249                        const char *_name,
250                        unsigned long flags,
251                        hwloc_memattr_id_t *id)
252 {
253   struct hwloc_internal_memattr_s *newattrs;
254   char *name;
255   unsigned i;
256 
257   /* check flags */
258   if (flags & ~(HWLOC_MEMATTR_FLAG_NEED_INITIATOR|HWLOC_MEMATTR_FLAG_LOWER_FIRST|HWLOC_MEMATTR_FLAG_HIGHER_FIRST)) {
259     errno = EINVAL;
260     return -1;
261   }
262   if (!(flags & (HWLOC_MEMATTR_FLAG_LOWER_FIRST|HWLOC_MEMATTR_FLAG_HIGHER_FIRST))) {
263     errno = EINVAL;
264     return -1;
265   }
266   if ((flags & (HWLOC_MEMATTR_FLAG_LOWER_FIRST|HWLOC_MEMATTR_FLAG_HIGHER_FIRST))
267       == (HWLOC_MEMATTR_FLAG_LOWER_FIRST|HWLOC_MEMATTR_FLAG_HIGHER_FIRST)) {
268     errno = EINVAL;
269     return -1;
270   }
271 
272   if (!_name) {
273     errno = EINVAL;
274     return -1;
275   }
276 
277   /* check name isn't already used */
278   for(i=0; i<topology->nr_memattrs; i++) {
279     if (!strcmp(_name, topology->memattrs[i].name)) {
280       errno = EBUSY;
281       return -1;
282     }
283   }
284 
285   name = strdup(_name);
286   if (!name)
287     return -1;
288 
289   newattrs = realloc(topology->memattrs, (topology->nr_memattrs + 1) * sizeof(*topology->memattrs));
290   if (!newattrs) {
291     free(name);
292     return -1;
293   }
294 
295   hwloc__setup_memattr(&newattrs[topology->nr_memattrs],
296                        name, flags, 0);
297 
298   /* memattr valid when just created */
299   newattrs[topology->nr_memattrs].iflags |= HWLOC_IMATTR_FLAG_CACHE_VALID;
300 
301   *id = topology->nr_memattrs;
302   topology->nr_memattrs++;
303   topology->memattrs = newattrs;
304   return 0;
305 }
306 
307 
308 /***************************
309  * Internal Locations
310  */
311 
312 /* return 1 if cpuset/obj matchs the existing initiator location,
313  * for instance if the cpuset of query is included in the cpuset of existing
314  */
315 static int
match_internal_location(struct hwloc_internal_location_s * iloc,struct hwloc_internal_memattr_initiator_s * imi)316 match_internal_location(struct hwloc_internal_location_s *iloc,
317                         struct hwloc_internal_memattr_initiator_s *imi)
318 {
319   if (iloc->type != imi->initiator.type)
320     return 0;
321   switch (iloc->type) {
322   case HWLOC_LOCATION_TYPE_CPUSET:
323     return hwloc_bitmap_isincluded(iloc->location.cpuset, imi->initiator.location.cpuset);
324   case HWLOC_LOCATION_TYPE_OBJECT:
325     return iloc->location.object.type == imi->initiator.location.object.type
326       && iloc->location.object.gp_index == imi->initiator.location.object.gp_index;
327   default:
328     return 0;
329   }
330 }
331 
332 static int
to_internal_location(struct hwloc_internal_location_s * iloc,struct hwloc_location * location)333 to_internal_location(struct hwloc_internal_location_s *iloc,
334                      struct hwloc_location *location)
335 {
336   iloc->type = location->type;
337 
338   switch (location->type) {
339   case HWLOC_LOCATION_TYPE_CPUSET:
340     if (!location->location.cpuset || hwloc_bitmap_iszero(location->location.cpuset)) {
341       errno = EINVAL;
342       return -1;
343     }
344     iloc->location.cpuset = location->location.cpuset;
345     return 0;
346   case HWLOC_LOCATION_TYPE_OBJECT:
347     if (!location->location.object) {
348       errno = EINVAL;
349       return -1;
350     }
351     iloc->location.object.gp_index = location->location.object->gp_index;
352     iloc->location.object.type = location->location.object->type;
353     return 0;
354   default:
355     errno = EINVAL;
356     return -1;
357   }
358 }
359 
360 static int
from_internal_location(struct hwloc_internal_location_s * iloc,struct hwloc_location * location)361 from_internal_location(struct hwloc_internal_location_s *iloc,
362                        struct hwloc_location *location)
363 {
364   location->type = iloc->type;
365 
366   switch (iloc->type) {
367   case HWLOC_LOCATION_TYPE_CPUSET:
368     location->location.cpuset = iloc->location.cpuset;
369     return 0;
370   case HWLOC_LOCATION_TYPE_OBJECT:
371     /* requires the cache to be refreshed */
372     location->location.object = iloc->location.object.obj;
373     if (!location->location.object)
374       return -1;
375     return 0;
376   default:
377     errno = EINVAL;
378     return -1;
379   }
380 }
381 
382 
383 /************************
384  * Refreshing
385  */
386 
387 static int
hwloc__imi_refresh(struct hwloc_topology * topology,struct hwloc_internal_memattr_initiator_s * imi)388 hwloc__imi_refresh(struct hwloc_topology *topology,
389                    struct hwloc_internal_memattr_initiator_s *imi)
390 {
391   switch (imi->initiator.type) {
392   case HWLOC_LOCATION_TYPE_CPUSET: {
393     hwloc_bitmap_and(imi->initiator.location.cpuset, imi->initiator.location.cpuset, topology->levels[0][0]->cpuset);
394     if (hwloc_bitmap_iszero(imi->initiator.location.cpuset)) {
395       hwloc__imi_destroy(imi);
396       return -1;
397     }
398     return 0;
399   }
400   case HWLOC_LOCATION_TYPE_OBJECT: {
401     hwloc_obj_t obj = hwloc_get_obj_by_type_and_gp_index(topology,
402                                                          imi->initiator.location.object.type,
403                                                          imi->initiator.location.object.gp_index);
404     if (!obj) {
405       hwloc__imi_destroy(imi);
406       return -1;
407     }
408     imi->initiator.location.object.obj = obj;
409     return 0;
410   }
411   default:
412     assert(0);
413   }
414   return -1;
415 }
416 
417 static int
hwloc__imtg_refresh(struct hwloc_topology * topology,struct hwloc_internal_memattr_s * imattr,struct hwloc_internal_memattr_target_s * imtg)418 hwloc__imtg_refresh(struct hwloc_topology *topology,
419                     struct hwloc_internal_memattr_s *imattr,
420                     struct hwloc_internal_memattr_target_s *imtg)
421 {
422   hwloc_obj_t node;
423 
424   /* no need to refresh convenience memattrs */
425   assert(!(imattr->iflags & HWLOC_IMATTR_FLAG_CONVENIENCE));
426 
427   /* check the target object */
428   if (imtg->gp_index == (hwloc_uint64_t) -1) {
429     /* only NUMA and PU may work with os_index, and only NUMA is currently used internally */
430     if (imtg->type == HWLOC_OBJ_NUMANODE)
431       node = hwloc_get_numanode_obj_by_os_index(topology, imtg->os_index);
432     else if (imtg->type == HWLOC_OBJ_PU)
433       node = hwloc_get_pu_obj_by_os_index(topology, imtg->os_index);
434     else
435       node = NULL;
436   } else {
437     node = hwloc_get_obj_by_type_and_gp_index(topology, imtg->type, imtg->gp_index);
438   }
439   if (!node) {
440     hwloc__imtg_destroy(imattr, imtg);
441     return -1;
442   }
443 
444   /* save the gp_index in case it wasn't initialized yet */
445   imtg->gp_index = node->gp_index;
446   /* cache the object */
447   imtg->obj = node;
448 
449   if (imattr->flags & HWLOC_MEMATTR_FLAG_NEED_INITIATOR) {
450     /* check the initiators */
451     unsigned k, l;
452     for(k=0, l=0; k<imtg->nr_initiators; k++) {
453       int err = hwloc__imi_refresh(topology, &imtg->initiators[k]);
454       if (err < 0)
455         continue;
456       if (k != l)
457         memcpy(&imtg->initiators[l], &imtg->initiators[k], sizeof(*imtg->initiators));
458       l++;
459     }
460     imtg->nr_initiators = l;
461     if (!imtg->nr_initiators) {
462       hwloc__imtg_destroy(imattr, imtg);
463       return -1;
464     }
465   }
466   return 0;
467 }
468 
469 static void
hwloc__imattr_refresh(struct hwloc_topology * topology,struct hwloc_internal_memattr_s * imattr)470 hwloc__imattr_refresh(struct hwloc_topology *topology,
471                       struct hwloc_internal_memattr_s *imattr)
472 {
473   unsigned j, k;
474   for(j=0, k=0; j<imattr->nr_targets; j++) {
475     int ret = hwloc__imtg_refresh(topology, imattr, &imattr->targets[j]);
476     if (!ret) {
477       /* target still valid, move it if some former targets were removed */
478       if (j != k)
479         memcpy(&imattr->targets[k], &imattr->targets[j], sizeof(*imattr->targets));
480       k++;
481     }
482   }
483   imattr->nr_targets = k;
484   imattr->iflags |= HWLOC_IMATTR_FLAG_CACHE_VALID;
485 }
486 
487 void
hwloc_internal_memattrs_refresh(struct hwloc_topology * topology)488 hwloc_internal_memattrs_refresh(struct hwloc_topology *topology)
489 {
490   unsigned id;
491   for(id=0; id<topology->nr_memattrs; id++) {
492     struct hwloc_internal_memattr_s *imattr = &topology->memattrs[id];
493     if (imattr->iflags & HWLOC_IMATTR_FLAG_CACHE_VALID)
494       /* nothing to refresh */
495       continue;
496     hwloc__imattr_refresh(topology, imattr);
497   }
498 }
499 
500 void
hwloc_internal_memattrs_need_refresh(struct hwloc_topology * topology)501 hwloc_internal_memattrs_need_refresh(struct hwloc_topology *topology)
502 {
503   unsigned id;
504   for(id=0; id<topology->nr_memattrs; id++) {
505     struct hwloc_internal_memattr_s *imattr = &topology->memattrs[id];
506     if (imattr->iflags & HWLOC_IMATTR_FLAG_CONVENIENCE)
507       /* no need to refresh convenience memattrs */
508       continue;
509     imattr->iflags &= ~HWLOC_IMATTR_FLAG_CACHE_VALID;
510   }
511 }
512 
513 
514 /********************************
515  * Targets
516  */
517 
518 static struct hwloc_internal_memattr_target_s *
hwloc__memattr_get_target(struct hwloc_internal_memattr_s * imattr,hwloc_obj_type_t target_type,hwloc_uint64_t target_gp_index,unsigned target_os_index,int create)519 hwloc__memattr_get_target(struct hwloc_internal_memattr_s *imattr,
520                           hwloc_obj_type_t target_type,
521                           hwloc_uint64_t target_gp_index,
522                           unsigned target_os_index,
523                           int create)
524 {
525   struct hwloc_internal_memattr_target_s *news, *new;
526   unsigned j;
527 
528   for(j=0; j<imattr->nr_targets; j++) {
529     if (target_type == imattr->targets[j].type)
530       if ((target_gp_index != (hwloc_uint64_t)-1 && target_gp_index == imattr->targets[j].gp_index)
531           || (target_os_index != (unsigned)-1 && target_os_index == imattr->targets[j].os_index))
532         return &imattr->targets[j];
533   }
534   if (!create)
535     return NULL;
536 
537   news = realloc(imattr->targets, (imattr->nr_targets+1)*sizeof(*imattr->targets));
538   if (!news)
539     return NULL;
540   imattr->targets = news;
541 
542   /* FIXME sort targets? by logical index at the end of load? */
543 
544   new = &news[imattr->nr_targets];
545   new->type = target_type;
546   new->gp_index = target_gp_index;
547   new->os_index = target_os_index;
548 
549   /* cached object will be refreshed later on actual access */
550   new->obj = NULL;
551   imattr->iflags &= ~HWLOC_IMATTR_FLAG_CACHE_VALID;
552   /* When setting a value after load(), the caller has the target object
553    * (and initiator object, if not CPU set). Hence, we could avoid invalidating
554    * the cache here.
555    * The overhead of the imattr-wide refresh isn't high enough so far
556    * to justify making the cache management more complex.
557    */
558 
559   new->nr_initiators = 0;
560   new->initiators = NULL;
561   new->noinitiator_value = 0;
562   imattr->nr_targets++;
563   return new;
564 }
565 
566 static struct hwloc_internal_memattr_initiator_s *
567 hwloc__memattr_get_initiator_from_location(struct hwloc_internal_memattr_s *imattr,
568                                            struct hwloc_internal_memattr_target_s *imtg,
569                                            struct hwloc_location *location);
570 
571 int
hwloc_memattr_get_targets(hwloc_topology_t topology,hwloc_memattr_id_t id,struct hwloc_location * initiator,unsigned long flags,unsigned * nrp,hwloc_obj_t * targets,hwloc_uint64_t * values)572 hwloc_memattr_get_targets(hwloc_topology_t topology,
573                           hwloc_memattr_id_t id,
574                           struct hwloc_location *initiator,
575                           unsigned long flags,
576                           unsigned *nrp, hwloc_obj_t *targets, hwloc_uint64_t *values)
577 {
578   struct hwloc_internal_memattr_s *imattr;
579   unsigned i, found = 0, max;
580 
581   if (flags) {
582     errno = EINVAL;
583     return -1;
584   }
585 
586   if (!nrp || (*nrp && !targets)) {
587     errno = EINVAL;
588     return -1;
589   }
590   max = *nrp;
591 
592   if (id >= topology->nr_memattrs) {
593     errno = EINVAL;
594     return -1;
595   }
596   imattr = &topology->memattrs[id];
597 
598   if (imattr->iflags & HWLOC_IMATTR_FLAG_CONVENIENCE) {
599     /* convenience attributes */
600     for(i=0; ; i++) {
601       hwloc_obj_t node = hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, i);
602       if (!node)
603         break;
604       if (found<max) {
605         targets[found] = node;
606         if (values)
607           values[found] = hwloc__memattr_get_convenience_value(id, node);
608       }
609       found++;
610     }
611     goto done;
612   }
613 
614   /* normal attributes */
615 
616   if (!(imattr->iflags & HWLOC_IMATTR_FLAG_CACHE_VALID))
617     hwloc__imattr_refresh(topology, imattr);
618 
619   for(i=0; i<imattr->nr_targets; i++) {
620     struct hwloc_internal_memattr_target_s *imtg = &imattr->targets[i];
621     hwloc_uint64_t value = 0;
622 
623     if (imattr->flags & HWLOC_MEMATTR_FLAG_NEED_INITIATOR) {
624       if (initiator) {
625         /* find a matching initiator */
626         struct hwloc_internal_memattr_initiator_s *imi = hwloc__memattr_get_initiator_from_location(imattr, imtg, initiator);
627         if (!imi)
628           continue;
629         value = imi->value;
630       }
631     } else {
632       value = imtg->noinitiator_value;
633     }
634 
635     if (found<max) {
636       targets[found] = imtg->obj;
637       if (values)
638         values[found] = value;
639     }
640     found++;
641   }
642 
643  done:
644   *nrp = found;
645   return 0;
646 }
647 
648 
649 /************************
650  * Initiators
651  */
652 
653 static struct hwloc_internal_memattr_initiator_s *
hwloc__memattr_target_get_initiator(struct hwloc_internal_memattr_target_s * imtg,struct hwloc_internal_location_s * iloc,int create)654 hwloc__memattr_target_get_initiator(struct hwloc_internal_memattr_target_s *imtg,
655                                     struct hwloc_internal_location_s *iloc,
656                                     int create)
657 {
658   struct hwloc_internal_memattr_initiator_s *news, *new;
659   unsigned k;
660 
661   for(k=0; k<imtg->nr_initiators; k++) {
662     struct hwloc_internal_memattr_initiator_s *imi = &imtg->initiators[k];
663     if (match_internal_location(iloc, imi)) {
664       return imi;
665     }
666   }
667 
668   if (!create)
669     return NULL;
670 
671   news = realloc(imtg->initiators, (imtg->nr_initiators+1)*sizeof(*imtg->initiators));
672   if (!news)
673     return NULL;
674   new = &news[imtg->nr_initiators];
675 
676   new->initiator = *iloc;
677   if (iloc->type == HWLOC_LOCATION_TYPE_CPUSET) {
678     new->initiator.location.cpuset = hwloc_bitmap_dup(iloc->location.cpuset);
679     if (!new->initiator.location.cpuset)
680       goto out_with_realloc;
681   }
682 
683   imtg->nr_initiators++;
684   imtg->initiators = news;
685   return new;
686 
687  out_with_realloc:
688   imtg->initiators = news;
689   return NULL;
690 }
691 
692 static struct hwloc_internal_memattr_initiator_s *
hwloc__memattr_get_initiator_from_location(struct hwloc_internal_memattr_s * imattr,struct hwloc_internal_memattr_target_s * imtg,struct hwloc_location * location)693 hwloc__memattr_get_initiator_from_location(struct hwloc_internal_memattr_s *imattr,
694                                            struct hwloc_internal_memattr_target_s *imtg,
695                                            struct hwloc_location *location)
696 {
697   struct hwloc_internal_memattr_initiator_s *imi;
698   struct hwloc_internal_location_s iloc;
699 
700   assert(imattr->flags & HWLOC_MEMATTR_FLAG_NEED_INITIATOR);
701 
702   /* use the initiator value */
703   if (!location) {
704     errno = EINVAL;
705     return NULL;
706   }
707 
708   if (to_internal_location(&iloc, location) < 0) {
709     errno = EINVAL;
710     return NULL;
711   }
712 
713   imi = hwloc__memattr_target_get_initiator(imtg, &iloc, 0);
714   if (!imi) {
715     errno = EINVAL;
716     return NULL;
717   }
718 
719   return imi;
720 }
721 
722 int
hwloc_memattr_get_initiators(hwloc_topology_t topology,hwloc_memattr_id_t id,hwloc_obj_t target_node,unsigned long flags,unsigned * nrp,struct hwloc_location * initiators,hwloc_uint64_t * values)723 hwloc_memattr_get_initiators(hwloc_topology_t topology,
724                              hwloc_memattr_id_t id,
725                              hwloc_obj_t target_node,
726                              unsigned long flags,
727                              unsigned *nrp, struct hwloc_location *initiators, hwloc_uint64_t *values)
728 {
729   struct hwloc_internal_memattr_s *imattr;
730   struct hwloc_internal_memattr_target_s *imtg;
731   unsigned i, max;
732 
733   if (flags) {
734     errno = EINVAL;
735     return -1;
736   }
737 
738   if (!nrp || (*nrp && !initiators)) {
739     errno = EINVAL;
740     return -1;
741   }
742   max = *nrp;
743 
744   if (id >= topology->nr_memattrs) {
745     errno = EINVAL;
746     return -1;
747   }
748   imattr = &topology->memattrs[id];
749   if (!(imattr->flags & HWLOC_MEMATTR_FLAG_NEED_INITIATOR)) {
750     *nrp = 0;
751     return 0;
752   }
753 
754   /* all convenience attributes have no initiators */
755   assert(!(imattr->iflags & HWLOC_IMATTR_FLAG_CONVENIENCE));
756 
757   if (!(imattr->iflags & HWLOC_IMATTR_FLAG_CACHE_VALID))
758     hwloc__imattr_refresh(topology, imattr);
759 
760   imtg = hwloc__memattr_get_target(imattr, target_node->type, target_node->gp_index, target_node->os_index, 0);
761   if (!imtg) {
762     errno = EINVAL;
763     return -1;
764   }
765 
766   for(i=0; i<imtg->nr_initiators && i<max; i++) {
767     struct hwloc_internal_memattr_initiator_s *imi = &imtg->initiators[i];
768     int err = from_internal_location(&imi->initiator, &initiators[i]);
769     assert(!err);
770     if (values)
771       /* no need to handle capacity/locality special cases here, those are initiator-less attributes */
772       values[i] = imi->value;
773   }
774 
775   *nrp = imtg->nr_initiators;
776   return 0;
777 }
778 
779 
780 /**************************
781  * Values
782  */
783 
784 int
hwloc_memattr_get_value(hwloc_topology_t topology,hwloc_memattr_id_t id,hwloc_obj_t target_node,struct hwloc_location * initiator,unsigned long flags,hwloc_uint64_t * valuep)785 hwloc_memattr_get_value(hwloc_topology_t topology,
786                         hwloc_memattr_id_t id,
787                         hwloc_obj_t target_node,
788                         struct hwloc_location *initiator,
789                         unsigned long flags,
790                         hwloc_uint64_t *valuep)
791 {
792   struct hwloc_internal_memattr_s *imattr;
793   struct hwloc_internal_memattr_target_s *imtg;
794 
795   if (flags) {
796     errno = EINVAL;
797     return -1;
798   }
799 
800   if (id >= topology->nr_memattrs) {
801     errno = EINVAL;
802     return -1;
803   }
804   imattr = &topology->memattrs[id];
805 
806   if (imattr->iflags & HWLOC_IMATTR_FLAG_CONVENIENCE) {
807     /* convenience attributes */
808     *valuep = hwloc__memattr_get_convenience_value(id, target_node);
809     return 0;
810   }
811 
812   /* normal attributes */
813 
814   if (!(imattr->iflags & HWLOC_IMATTR_FLAG_CACHE_VALID))
815     hwloc__imattr_refresh(topology, imattr);
816 
817   imtg = hwloc__memattr_get_target(imattr, target_node->type, target_node->gp_index, target_node->os_index, 0);
818   if (!imtg) {
819     errno = EINVAL;
820     return -1;
821   }
822 
823   if (imattr->flags & HWLOC_MEMATTR_FLAG_NEED_INITIATOR) {
824     /* find the initiator and set its value */
825     struct hwloc_internal_memattr_initiator_s *imi = hwloc__memattr_get_initiator_from_location(imattr, imtg, initiator);
826     if (!imi)
827       return -1;
828     *valuep = imi->value;
829   } else {
830     /* get the no-initiator value */
831     *valuep = imtg->noinitiator_value;
832   }
833   return 0;
834 }
835 
836 static int
hwloc__internal_memattr_set_value(hwloc_topology_t topology,hwloc_memattr_id_t id,hwloc_obj_type_t target_type,hwloc_uint64_t target_gp_index,unsigned target_os_index,struct hwloc_internal_location_s * initiator,hwloc_uint64_t value)837 hwloc__internal_memattr_set_value(hwloc_topology_t topology,
838                                   hwloc_memattr_id_t id,
839                                   hwloc_obj_type_t target_type,
840                                   hwloc_uint64_t target_gp_index,
841                                   unsigned target_os_index,
842                                   struct hwloc_internal_location_s *initiator,
843                                   hwloc_uint64_t value)
844 {
845   struct hwloc_internal_memattr_s *imattr;
846   struct hwloc_internal_memattr_target_s *imtg;
847 
848   if (id >= topology->nr_memattrs) {
849     /* something bad happened during init */
850     errno = EINVAL;
851     return -1;
852   }
853   imattr = &topology->memattrs[id];
854 
855   if (imattr->flags & HWLOC_MEMATTR_FLAG_NEED_INITIATOR) {
856     /* check given initiator */
857     if (!initiator) {
858       errno = EINVAL;
859       return -1;
860     }
861   }
862 
863   if (imattr->iflags & HWLOC_IMATTR_FLAG_CONVENIENCE) {
864     /* convenience attributes are read-only */
865     errno = EINVAL;
866     return -1;
867   }
868 
869   if (topology->is_loaded && !(imattr->iflags & HWLOC_IMATTR_FLAG_CACHE_VALID))
870     /* don't refresh when adding values during load (some nodes might not be ready yet),
871      * we'll refresh later
872      */
873     hwloc__imattr_refresh(topology, imattr);
874 
875   imtg = hwloc__memattr_get_target(imattr, target_type, target_gp_index, target_os_index, 1);
876   if (!imtg)
877     return -1;
878 
879   if (imattr->flags & HWLOC_MEMATTR_FLAG_NEED_INITIATOR) {
880     /* find/add the initiator and set its value */
881     // FIXME what if cpuset is larger than an existing one ?
882     struct hwloc_internal_memattr_initiator_s *imi = hwloc__memattr_target_get_initiator(imtg, initiator, 1);
883     if (!imi)
884       return -1;
885     imi->value = value;
886 
887   } else {
888     /* set the no-initiator value */
889     imtg->noinitiator_value = value;
890   }
891 
892   return 0;
893 
894 }
895 
896 int
hwloc_internal_memattr_set_value(hwloc_topology_t topology,hwloc_memattr_id_t id,hwloc_obj_type_t target_type,hwloc_uint64_t target_gp_index,unsigned target_os_index,struct hwloc_internal_location_s * initiator,hwloc_uint64_t value)897 hwloc_internal_memattr_set_value(hwloc_topology_t topology,
898                                  hwloc_memattr_id_t id,
899                                  hwloc_obj_type_t target_type,
900                                  hwloc_uint64_t target_gp_index,
901                                  unsigned target_os_index,
902                                  struct hwloc_internal_location_s *initiator,
903                                  hwloc_uint64_t value)
904 {
905   assert(id != HWLOC_MEMATTR_ID_CAPACITY);
906   assert(id != HWLOC_MEMATTR_ID_LOCALITY);
907 
908   return hwloc__internal_memattr_set_value(topology, id, target_type, target_gp_index, target_os_index, initiator, value);
909 }
910 
911 int
hwloc_memattr_set_value(hwloc_topology_t topology,hwloc_memattr_id_t id,hwloc_obj_t target_node,struct hwloc_location * initiator,unsigned long flags,hwloc_uint64_t value)912 hwloc_memattr_set_value(hwloc_topology_t topology,
913                         hwloc_memattr_id_t id,
914                         hwloc_obj_t target_node,
915                         struct hwloc_location *initiator,
916                         unsigned long flags,
917                         hwloc_uint64_t value)
918 {
919   struct hwloc_internal_location_s iloc, *ilocp;
920 
921   if (flags) {
922     errno = EINVAL;
923     return -1;
924   }
925 
926   if (initiator) {
927     if (to_internal_location(&iloc, initiator) < 0) {
928       errno = EINVAL;
929       return -1;
930     }
931     ilocp = &iloc;
932   } else {
933     ilocp = NULL;
934   }
935 
936   return hwloc__internal_memattr_set_value(topology, id, target_node->type, target_node->gp_index, target_node->os_index, ilocp, value);
937 }
938 
939 
940 /**********************
941  * Best target
942  */
943 
944 static void
hwloc__update_best_target(hwloc_obj_t * best_obj,hwloc_uint64_t * best_value,int * found,hwloc_obj_t new_obj,hwloc_uint64_t new_value,int keep_highest)945 hwloc__update_best_target(hwloc_obj_t *best_obj, hwloc_uint64_t *best_value, int *found,
946                           hwloc_obj_t new_obj, hwloc_uint64_t new_value,
947                           int keep_highest)
948 {
949   if (*found) {
950     if (keep_highest) {
951       if (new_value <= *best_value)
952         return;
953     } else {
954       if (new_value >= *best_value)
955         return;
956     }
957   }
958 
959   *best_obj = new_obj;
960   *best_value = new_value;
961   *found = 1;
962 }
963 
964 int
hwloc_memattr_get_best_target(hwloc_topology_t topology,hwloc_memattr_id_t id,struct hwloc_location * initiator,unsigned long flags,hwloc_obj_t * bestp,hwloc_uint64_t * valuep)965 hwloc_memattr_get_best_target(hwloc_topology_t topology,
966                               hwloc_memattr_id_t id,
967                               struct hwloc_location *initiator,
968                               unsigned long flags,
969                               hwloc_obj_t *bestp, hwloc_uint64_t *valuep)
970 {
971   struct hwloc_internal_memattr_s *imattr;
972   hwloc_uint64_t best_value = 0; /* shutup the compiler */
973   hwloc_obj_t best = NULL;
974   int found = 0;
975   unsigned j;
976 
977   if (flags) {
978     errno = EINVAL;
979     return -1;
980   }
981 
982   if (id >= topology->nr_memattrs) {
983     errno = EINVAL;
984     return -1;
985   }
986   imattr = &topology->memattrs[id];
987 
988   if (imattr->iflags & HWLOC_IMATTR_FLAG_CONVENIENCE) {
989     /* convenience attributes */
990     for(j=0; ; j++) {
991       hwloc_obj_t node = hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, j);
992       hwloc_uint64_t value;
993       if (!node)
994         break;
995       value = hwloc__memattr_get_convenience_value(id, node);
996       hwloc__update_best_target(&best, &best_value, &found,
997                                 node, value,
998                                 imattr->flags & HWLOC_MEMATTR_FLAG_HIGHER_FIRST);
999     }
1000     goto done;
1001   }
1002 
1003   /* normal attributes */
1004 
1005   if (!(imattr->iflags & HWLOC_IMATTR_FLAG_CACHE_VALID))
1006     /* not strictly need */
1007     hwloc__imattr_refresh(topology, imattr);
1008 
1009   for(j=0; j<imattr->nr_targets; j++) {
1010     struct hwloc_internal_memattr_target_s *imtg = &imattr->targets[j];
1011     hwloc_uint64_t value;
1012     if (imattr->flags & HWLOC_MEMATTR_FLAG_NEED_INITIATOR) {
1013       /* find the initiator and set its value */
1014       struct hwloc_internal_memattr_initiator_s *imi = hwloc__memattr_get_initiator_from_location(imattr, imtg, initiator);
1015       if (!imi)
1016         continue;
1017       value = imi->value;
1018     } else {
1019       /* get the no-initiator value */
1020       value = imtg->noinitiator_value;
1021     }
1022     hwloc__update_best_target(&best, &best_value, &found,
1023                               imtg->obj, value,
1024                               imattr->flags & HWLOC_MEMATTR_FLAG_HIGHER_FIRST);
1025   }
1026 
1027  done:
1028   if (found) {
1029     assert(best);
1030     *bestp = best;
1031     if (valuep)
1032       *valuep = best_value;
1033     return 0;
1034   } else {
1035     errno = ENOENT;
1036     return -1;
1037   }
1038 }
1039 
1040 /**********************
1041  * Best initiators
1042  */
1043 
1044 static void
hwloc__update_best_initiator(struct hwloc_internal_location_s * best_initiator,hwloc_uint64_t * best_value,int * found,struct hwloc_internal_location_s * new_initiator,hwloc_uint64_t new_value,int keep_highest)1045 hwloc__update_best_initiator(struct hwloc_internal_location_s *best_initiator, hwloc_uint64_t *best_value, int *found,
1046                              struct hwloc_internal_location_s *new_initiator, hwloc_uint64_t new_value,
1047                              int keep_highest)
1048 {
1049   if (*found) {
1050     if (keep_highest) {
1051       if (new_value <= *best_value)
1052         return;
1053     } else {
1054       if (new_value >= *best_value)
1055         return;
1056     }
1057   }
1058 
1059   *best_initiator = *new_initiator;
1060   *best_value = new_value;
1061   *found = 1;
1062 }
1063 
1064 int
hwloc_memattr_get_best_initiator(hwloc_topology_t topology,hwloc_memattr_id_t id,hwloc_obj_t target_node,unsigned long flags,struct hwloc_location * bestp,hwloc_uint64_t * valuep)1065 hwloc_memattr_get_best_initiator(hwloc_topology_t topology,
1066                                  hwloc_memattr_id_t id,
1067                                  hwloc_obj_t target_node,
1068                                  unsigned long flags,
1069                                  struct hwloc_location *bestp, hwloc_uint64_t *valuep)
1070 {
1071   struct hwloc_internal_memattr_s *imattr;
1072   struct hwloc_internal_memattr_target_s *imtg;
1073   struct hwloc_internal_location_s best_initiator;
1074   hwloc_uint64_t best_value;
1075   int found;
1076   unsigned i;
1077 
1078   if (flags) {
1079     errno = EINVAL;
1080     return -1;
1081   }
1082 
1083   if (id >= topology->nr_memattrs) {
1084     errno = EINVAL;
1085     return -1;
1086   }
1087   imattr = &topology->memattrs[id];
1088 
1089   if (!(imattr->flags & HWLOC_MEMATTR_FLAG_NEED_INITIATOR)) {
1090     errno = EINVAL;
1091     return -1;
1092   }
1093 
1094   if (!(imattr->iflags & HWLOC_IMATTR_FLAG_CACHE_VALID))
1095     /* not strictly need */
1096     hwloc__imattr_refresh(topology, imattr);
1097 
1098   imtg = hwloc__memattr_get_target(imattr, target_node->type, target_node->gp_index, target_node->os_index, 0);
1099   if (!imtg) {
1100     errno = EINVAL;
1101     return -1;
1102   }
1103 
1104   found = 0;
1105   for(i=0; i<imtg->nr_initiators; i++) {
1106     struct hwloc_internal_memattr_initiator_s *imi = &imtg->initiators[i];
1107     hwloc__update_best_initiator(&best_initiator, &best_value, &found,
1108                                  &imi->initiator, imi->value,
1109                                  imattr->flags & HWLOC_MEMATTR_FLAG_HIGHER_FIRST);
1110   }
1111 
1112   if (found) {
1113     if (valuep)
1114       *valuep = best_value;
1115     return from_internal_location(&best_initiator, bestp);
1116   } else {
1117     errno = ENOENT;
1118     return -1;
1119   }
1120 }
1121 
1122 /****************************
1123  * Listing local nodes
1124  */
1125 
1126 static __hwloc_inline int
match_local_obj_cpuset(hwloc_obj_t node,hwloc_cpuset_t cpuset,unsigned long flags)1127 match_local_obj_cpuset(hwloc_obj_t node, hwloc_cpuset_t cpuset, unsigned long flags)
1128 {
1129   if (flags & HWLOC_LOCAL_NUMANODE_FLAG_ALL)
1130     return 1;
1131   if ((flags & HWLOC_LOCAL_NUMANODE_FLAG_LARGER_LOCALITY)
1132       && hwloc_bitmap_isincluded(cpuset, node->cpuset))
1133     return 1;
1134   if ((flags & HWLOC_LOCAL_NUMANODE_FLAG_SMALLER_LOCALITY)
1135       && hwloc_bitmap_isincluded(node->cpuset, cpuset))
1136     return 1;
1137   return hwloc_bitmap_isequal(node->cpuset, cpuset);
1138 }
1139 
1140 int
hwloc_get_local_numanode_objs(hwloc_topology_t topology,struct hwloc_location * location,unsigned * nrp,hwloc_obj_t * nodes,unsigned long flags)1141 hwloc_get_local_numanode_objs(hwloc_topology_t topology,
1142                               struct hwloc_location *location,
1143                               unsigned *nrp,
1144                               hwloc_obj_t *nodes,
1145                               unsigned long flags)
1146 {
1147   hwloc_cpuset_t cpuset;
1148   hwloc_obj_t node;
1149   unsigned i;
1150 
1151   if (flags & ~(HWLOC_LOCAL_NUMANODE_FLAG_SMALLER_LOCALITY
1152                 |HWLOC_LOCAL_NUMANODE_FLAG_LARGER_LOCALITY
1153                 | HWLOC_LOCAL_NUMANODE_FLAG_ALL)) {
1154     errno = EINVAL;
1155     return -1;
1156   }
1157 
1158   if (!nrp || (*nrp && !nodes)) {
1159     errno = EINVAL;
1160     return -1;
1161   }
1162 
1163   if (!location) {
1164     if (!(flags & HWLOC_LOCAL_NUMANODE_FLAG_ALL)) {
1165       errno = EINVAL;
1166       return -1;
1167     }
1168     cpuset = NULL; /* unused */
1169 
1170   } else {
1171     if (location->type == HWLOC_LOCATION_TYPE_CPUSET) {
1172       cpuset = location->location.cpuset;
1173     } else if (location->type == HWLOC_LOCATION_TYPE_OBJECT) {
1174       hwloc_obj_t obj = location->location.object;
1175       while (!obj->cpuset)
1176         obj = obj->parent;
1177       cpuset = obj->cpuset;
1178     } else {
1179       errno = EINVAL;
1180       return -1;
1181     }
1182   }
1183 
1184   i = 0;
1185   for(node = hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, 0);
1186       node;
1187       node = node->next_cousin) {
1188     if (!match_local_obj_cpuset(node, cpuset, flags))
1189       continue;
1190     if (i < *nrp)
1191       nodes[i] = node;
1192     i++;
1193   }
1194 
1195   *nrp = i;
1196   return 0;
1197 }
1198