xref: /qemu/hw/ppc/spapr_numa.c (revision d370f9cf)
1 /*
2  * QEMU PowerPC pSeries Logical Partition NUMA associativity handling
3  *
4  * Copyright IBM Corp. 2020
5  *
6  * Authors:
7  *  Daniel Henrique Barboza      <danielhb413@gmail.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "qemu-common.h"
15 #include "hw/ppc/spapr_numa.h"
16 #include "hw/pci-host/spapr.h"
17 #include "hw/ppc/fdt.h"
18 
19 /* Moved from hw/ppc/spapr_pci_nvlink2.c */
20 #define SPAPR_GPU_NUMA_ID           (cpu_to_be32(1))
21 
22 void spapr_numa_associativity_init(SpaprMachineState *spapr,
23                                    MachineState *machine)
24 {
25     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
26     int nb_numa_nodes = machine->numa_state->num_nodes;
27     int i, j, max_nodes_with_gpus;
28 
29     /*
30      * For all associativity arrays: first position is the size,
31      * position MAX_DISTANCE_REF_POINTS is always the numa_id,
32      * represented by the index 'i'.
33      *
34      * This will break on sparse NUMA setups, when/if QEMU starts
35      * to support it, because there will be no more guarantee that
36      * 'i' will be a valid node_id set by the user.
37      */
38     for (i = 0; i < nb_numa_nodes; i++) {
39         spapr->numa_assoc_array[i][0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS);
40         spapr->numa_assoc_array[i][MAX_DISTANCE_REF_POINTS] = cpu_to_be32(i);
41     }
42 
43     /*
44      * Initialize NVLink GPU associativity arrays. We know that
45      * the first GPU will take the first available NUMA id, and
46      * we'll have a maximum of NVGPU_MAX_NUM GPUs in the machine.
47      * At this point we're not sure if there are GPUs or not, but
48      * let's initialize the associativity arrays and allow NVLink
49      * GPUs to be handled like regular NUMA nodes later on.
50      */
51     max_nodes_with_gpus = nb_numa_nodes + NVGPU_MAX_NUM;
52 
53     for (i = nb_numa_nodes; i < max_nodes_with_gpus; i++) {
54         spapr->numa_assoc_array[i][0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS);
55 
56         for (j = 1; j < MAX_DISTANCE_REF_POINTS; j++) {
57             uint32_t gpu_assoc = smc->pre_5_1_assoc_refpoints ?
58                                  SPAPR_GPU_NUMA_ID : cpu_to_be32(i);
59             spapr->numa_assoc_array[i][j] = gpu_assoc;
60         }
61 
62         spapr->numa_assoc_array[i][MAX_DISTANCE_REF_POINTS] = cpu_to_be32(i);
63     }
64 }
65 
66 void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
67                                        int offset, int nodeid)
68 {
69     _FDT((fdt_setprop(fdt, offset, "ibm,associativity",
70                       spapr->numa_assoc_array[nodeid],
71                       sizeof(spapr->numa_assoc_array[nodeid]))));
72 }
73 
74 static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr,
75                                            PowerPCCPU *cpu)
76 {
77     uint32_t *vcpu_assoc = g_new(uint32_t, VCPU_ASSOC_SIZE);
78     int index = spapr_get_vcpu_id(cpu);
79 
80     /*
81      * VCPUs have an extra 'cpu_id' value in ibm,associativity
82      * compared to other resources. Increment the size at index
83      * 0, put cpu_id last, then copy the remaining associativity
84      * domains.
85      */
86     vcpu_assoc[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS + 1);
87     vcpu_assoc[VCPU_ASSOC_SIZE - 1] = cpu_to_be32(index);
88     memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id] + 1,
89            (VCPU_ASSOC_SIZE - 2) * sizeof(uint32_t));
90 
91     return vcpu_assoc;
92 }
93 
94 int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
95                             int offset, PowerPCCPU *cpu)
96 {
97     g_autofree uint32_t *vcpu_assoc = NULL;
98 
99     vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu);
100 
101     /* Advertise NUMA via ibm,associativity */
102     return fdt_setprop(fdt, offset, "ibm,associativity", vcpu_assoc,
103                        VCPU_ASSOC_SIZE * sizeof(uint32_t));
104 }
105 
106 
107 int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
108                                          int offset)
109 {
110     MachineState *machine = MACHINE(spapr);
111     int nb_numa_nodes = machine->numa_state->num_nodes;
112     int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1;
113     uint32_t *int_buf, *cur_index, buf_len;
114     int ret, i;
115 
116     /* ibm,associativity-lookup-arrays */
117     buf_len = (nr_nodes * MAX_DISTANCE_REF_POINTS + 2) * sizeof(uint32_t);
118     cur_index = int_buf = g_malloc0(buf_len);
119     int_buf[0] = cpu_to_be32(nr_nodes);
120      /* Number of entries per associativity list */
121     int_buf[1] = cpu_to_be32(MAX_DISTANCE_REF_POINTS);
122     cur_index += 2;
123     for (i = 0; i < nr_nodes; i++) {
124         /*
125          * For the lookup-array we use the ibm,associativity array,
126          * from numa_assoc_array. without the first element (size).
127          */
128         uint32_t *associativity = spapr->numa_assoc_array[i];
129         memcpy(cur_index, ++associativity,
130                sizeof(uint32_t) * MAX_DISTANCE_REF_POINTS);
131         cur_index += MAX_DISTANCE_REF_POINTS;
132     }
133     ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf,
134                       (cur_index - int_buf) * sizeof(uint32_t));
135     g_free(int_buf);
136 
137     return ret;
138 }
139 
140 /*
141  * Helper that writes ibm,associativity-reference-points and
142  * max-associativity-domains in the RTAS pointed by @rtas
143  * in the DT @fdt.
144  */
145 void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas)
146 {
147     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
148     uint32_t refpoints[] = {
149         cpu_to_be32(0x4),
150         cpu_to_be32(0x4),
151         cpu_to_be32(0x2),
152     };
153     uint32_t nr_refpoints = ARRAY_SIZE(refpoints);
154     uint32_t maxdomain = cpu_to_be32(spapr->gpu_numa_id > 1 ? 1 : 0);
155     uint32_t maxdomains[] = {
156         cpu_to_be32(4),
157         maxdomain,
158         maxdomain,
159         maxdomain,
160         cpu_to_be32(spapr->gpu_numa_id),
161     };
162 
163     if (smc->pre_5_1_assoc_refpoints) {
164         nr_refpoints = 2;
165     }
166 
167     _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points",
168                      refpoints, nr_refpoints * sizeof(refpoints[0])));
169 
170     _FDT(fdt_setprop(fdt, rtas, "ibm,max-associativity-domains",
171                      maxdomains, sizeof(maxdomains)));
172 }
173 
174 static target_ulong h_home_node_associativity(PowerPCCPU *cpu,
175                                               SpaprMachineState *spapr,
176                                               target_ulong opcode,
177                                               target_ulong *args)
178 {
179     target_ulong flags = args[0];
180     target_ulong procno = args[1];
181     PowerPCCPU *tcpu;
182     int idx;
183 
184     /* only support procno from H_REGISTER_VPA */
185     if (flags != 0x1) {
186         return H_FUNCTION;
187     }
188 
189     tcpu = spapr_find_cpu(procno);
190     if (tcpu == NULL) {
191         return H_P2;
192     }
193 
194     /* sequence is the same as in the "ibm,associativity" property */
195 
196     idx = 0;
197 #define ASSOCIATIVITY(a, b) (((uint64_t)(a) << 32) | \
198                              ((uint64_t)(b) & 0xffffffff))
199     args[idx++] = ASSOCIATIVITY(0, 0);
200     args[idx++] = ASSOCIATIVITY(0, tcpu->node_id);
201     args[idx++] = ASSOCIATIVITY(procno, -1);
202     for ( ; idx < 6; idx++) {
203         args[idx] = -1;
204     }
205 #undef ASSOCIATIVITY
206 
207     return H_SUCCESS;
208 }
209 
210 static void spapr_numa_register_types(void)
211 {
212     /* Virtual Processor Home Node */
213     spapr_register_hypercall(H_HOME_NODE_ASSOCIATIVITY,
214                              h_home_node_associativity);
215 }
216 
217 type_init(spapr_numa_register_types)
218