xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c (revision d642ef71)
1 /*
2  * Copyright 2021 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 #include <subdev/gsp.h>
24 
25 #include <nvfw/acr.h>
26 
27 static int
28 ga102_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust)
29 {
30 	struct wpr_header_v2 hdr;
31 	struct lsb_header_v2 *lsb;
32 	struct nvkm_acr_lsfw *lsfw;
33 	u32 offset = 0;
34 
35 	lsb = kvmalloc(sizeof(*lsb), GFP_KERNEL);
36 	if (!lsb)
37 		return -ENOMEM;
38 
39 	do {
40 		nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr));
41 		wpr_header_v2_dump(&acr->subdev, &hdr);
42 
43 		list_for_each_entry(lsfw, &acr->lsfw, head) {
44 			if (lsfw->id != hdr.wpr.falcon_id)
45 				continue;
46 
47 			nvkm_robj(acr->wpr, hdr.wpr.lsb_offset, lsb, sizeof(*lsb));
48 			lsb_header_v2_dump(&acr->subdev, lsb);
49 
50 			lsfw->func->bld_patch(acr, lsb->bl_data_off, adjust);
51 			break;
52 		}
53 
54 		offset += sizeof(hdr);
55 	} while (hdr.wpr.falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID);
56 
57 	kvfree(lsb);
58 	return 0;
59 }
60 
61 static int
62 ga102_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw)
63 {
64 	struct lsb_header_v2 *hdr;
65 	int ret = 0;
66 
67 	if (WARN_ON(lsfw->sig->size != sizeof(hdr->signature)))
68 		return -EINVAL;
69 
70 	hdr = kvzalloc(sizeof(*hdr), GFP_KERNEL);
71 	if (!hdr)
72 		return -ENOMEM;
73 
74 	hdr->hdr.identifier = WPR_GENERIC_HEADER_ID_LSF_LSB_HEADER;
75 	hdr->hdr.version = 2;
76 	hdr->hdr.size = sizeof(*hdr);
77 
78 	memcpy(&hdr->signature, lsfw->sig->data, lsfw->sig->size);
79 	hdr->ucode_off = lsfw->offset.img;
80 	hdr->ucode_size = lsfw->ucode_size;
81 	hdr->data_size = lsfw->data_size;
82 	hdr->bl_code_size = lsfw->bootloader_size;
83 	hdr->bl_imem_off = lsfw->bootloader_imem_offset;
84 	hdr->bl_data_off = lsfw->offset.bld;
85 	hdr->bl_data_size = lsfw->bl_data_size;
86 	hdr->app_code_off = lsfw->app_start_offset + lsfw->app_resident_code_offset;
87 	hdr->app_code_size = ALIGN(lsfw->app_resident_code_size, 0x100);
88 	hdr->app_data_off = lsfw->app_start_offset + lsfw->app_resident_data_offset;
89 	hdr->app_data_size = ALIGN(lsfw->app_resident_data_size, 0x100);
90 	hdr->app_imem_offset = lsfw->app_imem_offset;
91 	hdr->app_dmem_offset = lsfw->app_dmem_offset;
92 	hdr->flags = lsfw->func->flags;
93 	hdr->monitor_code_offset = 0;
94 	hdr->monitor_data_offset = 0;
95 	hdr->manifest_offset = 0;
96 
97 	if (lsfw->secure_bootloader) {
98 		struct nvkm_falcon_fw fw = {
99 			.fw.img = hdr->hs_fmc_params.pkc_signature,
100 			.fw.name = "LSFW",
101 			.func = &(const struct nvkm_falcon_fw_func) {
102 				.signature = ga100_flcn_fw_signature,
103 			},
104 			.sig_size = lsfw->sig_size,
105 			.sig_nr = lsfw->sig_nr,
106 			.sigs = lsfw->sigs,
107 			.fuse_ver = lsfw->fuse_ver,
108 			.engine_id = lsfw->engine_id,
109 			.ucode_id = lsfw->ucode_id,
110 			.falcon = lsfw->falcon,
111 
112 		};
113 
114 		ret = nvkm_falcon_get(fw.falcon, &acr->subdev);
115 		if (ret == 0) {
116 			hdr->hs_fmc_params.hs_fmc = 1;
117 			hdr->hs_fmc_params.pkc_algo = 0;
118 			hdr->hs_fmc_params.pkc_algo_version = 1;
119 			hdr->hs_fmc_params.engid_mask = lsfw->engine_id;
120 			hdr->hs_fmc_params.ucode_id = lsfw->ucode_id;
121 			hdr->hs_fmc_params.fuse_ver = lsfw->fuse_ver;
122 			ret = nvkm_falcon_fw_patch(&fw);
123 			nvkm_falcon_put(fw.falcon, &acr->subdev);
124 		}
125 	}
126 
127 	nvkm_wobj(acr->wpr, lsfw->offset.lsb, hdr, sizeof(*hdr));
128 	kvfree(hdr);
129 	return ret;
130 }
131 
132 static int
133 ga102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
134 {
135 	struct nvkm_acr_lsfw *lsfw;
136 	struct wpr_header_v2 hdr;
137 	u32 offset = 0;
138 	int ret;
139 
140 	/*XXX: shared sub-WPR headers, fill terminator for now. */
141 	nvkm_wo32(acr->wpr, 0x300, (2 << 16) | WPR_GENERIC_HEADER_ID_LSF_SHARED_SUB_WPR);
142 	nvkm_wo32(acr->wpr, 0x304, 0x14);
143 	nvkm_wo32(acr->wpr, 0x308, 0xffffffff);
144 	nvkm_wo32(acr->wpr, 0x30c, 0);
145 	nvkm_wo32(acr->wpr, 0x310, 0);
146 
147 	/* Fill per-LSF structures. */
148 	list_for_each_entry(lsfw, &acr->lsfw, head) {
149 		struct lsf_signature_v2 *sig = (void *)lsfw->sig->data;
150 
151 		hdr.hdr.identifier = WPR_GENERIC_HEADER_ID_LSF_WPR_HEADER;
152 		hdr.hdr.version = 2;
153 		hdr.hdr.size = sizeof(hdr);
154 		hdr.wpr.falcon_id = lsfw->id;
155 		hdr.wpr.lsb_offset = lsfw->offset.lsb;
156 		hdr.wpr.bootstrap_owner = NVKM_ACR_LSF_GSPLITE;
157 		hdr.wpr.lazy_bootstrap = 1;
158 		hdr.wpr.bin_version = sig->ls_ucode_version;
159 		hdr.wpr.status = WPR_HEADER_V1_STATUS_COPY;
160 
161 		/* Write WPR header. */
162 		nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
163 		offset += sizeof(hdr);
164 
165 		/* Write LSB header. */
166 		ret = ga102_acr_wpr_build_lsb(acr, lsfw);
167 		if (ret)
168 			return ret;
169 
170 		/* Write ucode image. */
171 		nvkm_wobj(acr->wpr, lsfw->offset.img,
172 				    lsfw->img.data,
173 				    lsfw->img.size);
174 
175 		/* Write bootloader data. */
176 		lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
177 	}
178 
179 	/* Finalise WPR. */
180 	hdr.hdr.identifier = WPR_GENERIC_HEADER_ID_LSF_WPR_HEADER;
181 	hdr.hdr.version = 2;
182 	hdr.hdr.size = sizeof(hdr);
183 	hdr.wpr.falcon_id = WPR_HEADER_V1_FALCON_ID_INVALID;
184 	nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
185 	return 0;
186 }
187 
188 static u32
189 ga102_acr_wpr_layout(struct nvkm_acr *acr)
190 {
191 	struct nvkm_acr_lsfw *lsfw;
192 	u32 wpr = 0;
193 
194 	wpr += 21 /* MAX_LSF */ * sizeof(struct wpr_header_v2);
195 	wpr  = ALIGN(wpr, 256);
196 
197 	wpr += 0x100; /* Shared sub-WPR headers. */
198 
199 	list_for_each_entry(lsfw, &acr->lsfw, head) {
200 		wpr  = ALIGN(wpr, 256);
201 		lsfw->offset.lsb = wpr;
202 		wpr += sizeof(struct lsb_header_v2);
203 
204 		wpr  = ALIGN(wpr, 4096);
205 		lsfw->offset.img = wpr;
206 		wpr += lsfw->img.size;
207 
208 		wpr  = ALIGN(wpr, 256);
209 		lsfw->offset.bld = wpr;
210 		lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256);
211 		wpr += lsfw->bl_data_size;
212 	}
213 
214 	return wpr;
215 }
216 
217 static int
218 ga102_acr_wpr_parse(struct nvkm_acr *acr)
219 {
220 	const struct wpr_header_v2 *hdr = (void *)acr->wpr_fw->data;
221 
222 	while (hdr->wpr.falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) {
223 		wpr_header_v2_dump(&acr->subdev, hdr);
224 		if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->wpr.falcon_id))
225 			return -ENOMEM;
226 	}
227 
228 	return 0;
229 }
230 
231 MODULE_FIRMWARE("nvidia/ga102/acr/ucode_unload.bin");
232 MODULE_FIRMWARE("nvidia/ga103/acr/ucode_unload.bin");
233 MODULE_FIRMWARE("nvidia/ga104/acr/ucode_unload.bin");
234 MODULE_FIRMWARE("nvidia/ga106/acr/ucode_unload.bin");
235 MODULE_FIRMWARE("nvidia/ga107/acr/ucode_unload.bin");
236 
237 static const struct nvkm_acr_hsf_fwif
238 ga102_acr_unload_fwif[] = {
239 	{  0, ga100_acr_hsfw_ctor, &ga102_flcn_fw, NVKM_ACR_HSF_SEC2 },
240 	{}
241 };
242 
243 MODULE_FIRMWARE("nvidia/ga102/acr/ucode_asb.bin");
244 MODULE_FIRMWARE("nvidia/ga103/acr/ucode_asb.bin");
245 MODULE_FIRMWARE("nvidia/ga104/acr/ucode_asb.bin");
246 MODULE_FIRMWARE("nvidia/ga106/acr/ucode_asb.bin");
247 MODULE_FIRMWARE("nvidia/ga107/acr/ucode_asb.bin");
248 
249 static const struct nvkm_acr_hsf_fwif
250 ga102_acr_asb_fwif[] = {
251 	{  0, ga100_acr_hsfw_ctor, &ga102_flcn_fw, NVKM_ACR_HSF_GSP },
252 	{}
253 };
254 
255 static const struct nvkm_falcon_fw_func
256 ga102_acr_ahesasc_0 = {
257 	.signature = ga100_flcn_fw_signature,
258 	.reset = gm200_flcn_fw_reset,
259 	.setup = gp102_acr_load_setup,
260 	.load = ga102_flcn_fw_load,
261 	.boot = ga102_flcn_fw_boot,
262 };
263 
264 MODULE_FIRMWARE("nvidia/ga102/acr/ucode_ahesasc.bin");
265 MODULE_FIRMWARE("nvidia/ga103/acr/ucode_ahesasc.bin");
266 MODULE_FIRMWARE("nvidia/ga104/acr/ucode_ahesasc.bin");
267 MODULE_FIRMWARE("nvidia/ga106/acr/ucode_ahesasc.bin");
268 MODULE_FIRMWARE("nvidia/ga107/acr/ucode_ahesasc.bin");
269 
270 static const struct nvkm_acr_hsf_fwif
271 ga102_acr_ahesasc_fwif[] = {
272 	{  0, ga100_acr_hsfw_ctor, &ga102_acr_ahesasc_0, NVKM_ACR_HSF_SEC2 },
273 	{}
274 };
275 
276 static const struct nvkm_acr_func
277 ga102_acr = {
278 	.ahesasc = ga102_acr_ahesasc_fwif,
279 	.asb = ga102_acr_asb_fwif,
280 	.unload = ga102_acr_unload_fwif,
281 	.wpr_parse = ga102_acr_wpr_parse,
282 	.wpr_layout = ga102_acr_wpr_layout,
283 	.wpr_alloc = gp102_acr_wpr_alloc,
284 	.wpr_patch = ga102_acr_wpr_patch,
285 	.wpr_build = ga102_acr_wpr_build,
286 	.wpr_check = ga100_acr_wpr_check,
287 	.init = tu102_acr_init,
288 };
289 
290 static int
291 ga102_acr_load(struct nvkm_acr *acr, int version,
292 	       const struct nvkm_acr_fwif *fwif)
293 {
294 	struct nvkm_subdev *subdev = &acr->subdev;
295 	const struct nvkm_acr_hsf_fwif *hsfwif;
296 
297 	hsfwif = nvkm_firmware_load(subdev, fwif->func->ahesasc, "AcrAHESASC",
298 				    acr, NULL, "acr/ucode_ahesasc", "AHESASC");
299 	if (IS_ERR(hsfwif))
300 		return PTR_ERR(hsfwif);
301 
302 	hsfwif = nvkm_firmware_load(subdev, fwif->func->asb, "AcrASB",
303 				    acr, NULL, "acr/ucode_asb", "ASB");
304 	if (IS_ERR(hsfwif))
305 		return PTR_ERR(hsfwif);
306 
307 	hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
308 				    acr, NULL, "acr/ucode_unload", "unload");
309 	if (IS_ERR(hsfwif))
310 		return PTR_ERR(hsfwif);
311 
312 	return 0;
313 }
314 
315 static const struct nvkm_acr_fwif
316 ga102_acr_fwif[] = {
317 	{  0, ga102_acr_load, &ga102_acr },
318 	{ -1, gm200_acr_nofw, &gm200_acr },
319 	{}
320 };
321 
322 int
323 ga102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
324 	      struct nvkm_acr **pacr)
325 {
326 	if (nvkm_gsp_rm(device->gsp))
327 		return -ENODEV;
328 
329 	return nvkm_acr_new_(ga102_acr_fwif, device, type, inst, pacr);
330 }
331