1 /* Copyright 2013-2014 IBM Corp.
2  *
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * 	http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12  * implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <skiboot.h>
18 #include <fsp.h>
19 #include <opal.h>
20 #include <lock.h>
21 #include <device.h>
22 #include <errorlog.h>
23 
24 /*
25  * The FSP NVRAM API operates in "blocks" of 4K. It is entirely exposed
26  * to the OS via the OPAL APIs.
27  *
28  * In order to avoid dealing with complicated read/modify/write state
29  * machines (and added issues related to FSP failover in the middle)
30  * we keep a memory copy of the entire nvram which we load at boot
31  * time. We save only modified blocks.
32  *
33  * To limit the amount of memory used by the nvram image, we limit
34  * how much nvram we support to NVRAM_SIZE. Additionally, this limit
35  * of 1M is the maximum that the CHRP/PAPR nvram partition format
36  * supports for a partition entry.
37  *
38  * (Q: should we save the whole thing in case of FSP failover ?)
39  *
40  * The nvram is expected to comply with the CHRP/PAPR defined format,
41  * and specifically contain a System partition (ID 0x70) named "common"
42  * with configuration variables for the bootloader and a FW private
43  * partition for future use by skiboot.
44  *
45  * If the partition layout appears broken or lacks one of the above
46  * partitions, we reformat the entire nvram at boot time.
47  *
48  * We do not exploit the ability of the FSP to store a checksum. This
49  * is documented as possibly going away. The CHRP format for nvram
50  * that Linux uses has its own (though weak) checksum mechanism already
51  *
52  */
53 
54 #define NVRAM_BLKSIZE	0x1000
55 
56 struct nvram_triplet {
57 	uint64_t	dma_addr;
58 	uint32_t	blk_offset;
59 	uint32_t	blk_count;
60 } __packed;
61 
62 #define NVRAM_FLAG_CLEAR_WPEND	0x80000000
63 
64 enum nvram_state {
65 	NVRAM_STATE_CLOSED,
66 	NVRAM_STATE_OPENING,
67 	NVRAM_STATE_BROKEN,
68 	NVRAM_STATE_OPEN,
69 	NVRAM_STATE_ABSENT,
70 };
71 
72 static void *fsp_nvram_image;
73 static uint32_t fsp_nvram_size;
74 static struct lock fsp_nvram_lock = LOCK_UNLOCKED;
75 static struct fsp_msg *fsp_nvram_msg;
76 static uint32_t fsp_nvram_dirty_start;
77 static uint32_t fsp_nvram_dirty_end;
78 static bool fsp_nvram_was_read;
79 static struct nvram_triplet fsp_nvram_triplet __align(0x1000);
80 static enum nvram_state fsp_nvram_state = NVRAM_STATE_CLOSED;
81 
82 DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_INIT, OPAL_PLATFORM_ERR_EVT , OPAL_NVRAM,
83 		OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
84 		OPAL_NA);
85 
86 DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_OPEN, OPAL_PLATFORM_ERR_EVT, OPAL_NVRAM,
87 		OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
88 		OPAL_NA);
89 
90 DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_SIZE, OPAL_PLATFORM_ERR_EVT, OPAL_NVRAM,
91 		OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
92 		OPAL_NA);
93 
94 DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_READ, OPAL_PLATFORM_ERR_EVT, OPAL_NVRAM,
95 		OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
96 		OPAL_NA);
97 
98 DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_WRITE, OPAL_PLATFORM_ERR_EVT, OPAL_NVRAM,
99 		OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
100 		OPAL_NA);
101 
102 static void fsp_nvram_send_write(void);
103 
fsp_nvram_wr_complete(struct fsp_msg * msg)104 static void fsp_nvram_wr_complete(struct fsp_msg *msg)
105 {
106 	struct fsp_msg *resp = msg->resp;
107 	uint8_t rc;
108 
109 	lock(&fsp_nvram_lock);
110 	fsp_nvram_msg = NULL;
111 
112 	/* Check for various errors. If an error occurred,
113 	 * we generally assume the nvram is completely dirty
114 	 * but we won't trigger a new write until we get
115 	 * either a new attempt at writing, or an FSP reset
116 	 * reload (TODO)
117 	 */
118 	if (!resp || resp->state != fsp_msg_response)
119 		goto fail_dirty;
120 	rc = (msg->word1 >> 8) & 0xff;
121 	switch(rc) {
122 	case 0:
123 	case 0x44:
124 		/* Sync to secondary required... XXX */
125 	case 0x45:
126 		break;
127 	case 0xef:
128 		/* Sync to secondary failed, let's ignore that for now,
129 		 * maybe when (if) we handle redundant FSPs ...
130 		 */
131 		prerror("FSP: NVRAM sync to secondary failed\n");
132 		break;
133 	default:
134 		log_simple_error(&e_info(OPAL_RC_NVRAM_WRITE),
135 			"FSP: NVRAM write return error 0x%02x\n", rc);
136 		goto fail_dirty;
137 	}
138 	fsp_freemsg(msg);
139 	if (fsp_nvram_dirty_start <= fsp_nvram_dirty_end)
140 		fsp_nvram_send_write();
141 	unlock(&fsp_nvram_lock);
142 	return;
143  fail_dirty:
144 	fsp_nvram_dirty_start = 0;
145 	fsp_nvram_dirty_end = fsp_nvram_size - 1;
146 	fsp_freemsg(msg);
147 	unlock(&fsp_nvram_lock);
148 }
149 
fsp_nvram_send_write(void)150 static void fsp_nvram_send_write(void)
151 {
152 	uint32_t start = fsp_nvram_dirty_start;
153 	uint32_t end = fsp_nvram_dirty_end;
154 	uint32_t count;
155 
156 	if (start > end || fsp_nvram_state != NVRAM_STATE_OPEN)
157 		return;
158 	count = (end - start) / NVRAM_BLKSIZE + 1;
159 	fsp_nvram_triplet.dma_addr = PSI_DMA_NVRAM_BODY + start;
160 	fsp_nvram_triplet.blk_offset = start / NVRAM_BLKSIZE;
161 	fsp_nvram_triplet.blk_count = count;
162 	fsp_nvram_msg = fsp_mkmsg(FSP_CMD_WRITE_VNVRAM, 6,
163 				  0, PSI_DMA_NVRAM_TRIPL, 1,
164 				  NVRAM_FLAG_CLEAR_WPEND, 0, 0);
165 	if (fsp_queue_msg(fsp_nvram_msg, fsp_nvram_wr_complete)) {
166 		fsp_freemsg(fsp_nvram_msg);
167 		fsp_nvram_msg = NULL;
168 		log_simple_error(&e_info(OPAL_RC_NVRAM_WRITE),
169 				"FSP: Error queueing nvram update\n");
170 		return;
171 	}
172 	fsp_nvram_dirty_start = fsp_nvram_size;
173 	fsp_nvram_dirty_end = 0;
174 }
175 
fsp_nvram_rd_complete(struct fsp_msg * msg)176 static void fsp_nvram_rd_complete(struct fsp_msg *msg)
177 {
178 	int64_t rc;
179 
180 	lock(&fsp_nvram_lock);
181 
182 	/* Read complete, check status. What to do if the read fails ?
183 	 *
184 	 * Well, there could be various reasons such as an FSP reboot
185 	 * at the wrong time, but there is really not much we can do
186 	 * so for now I'll just mark the nvram as closed, and we'll
187 	 * attempt a re-open and re-read whenever the OS tries to
188 	 * access it
189 	 */
190 	rc = (msg->resp->word1 >> 8) & 0xff;
191 	fsp_nvram_msg = NULL;
192 	fsp_freemsg(msg);
193 	if (rc) {
194 		prerror("FSP: NVRAM read failed, will try again later\n");
195 		fsp_nvram_state = NVRAM_STATE_CLOSED;
196 	} else {
197 		/* nvram was read once, no need to do it ever again */
198 		fsp_nvram_was_read = true;
199 		fsp_nvram_state = NVRAM_STATE_OPEN;
200 
201 		/* XXX Here we should look for nvram settings that concern
202 		 * us such as guest kernel arguments etc...
203 		 */
204 	}
205 	unlock(&fsp_nvram_lock);
206 	nvram_read_complete(fsp_nvram_state == NVRAM_STATE_OPEN);
207 	if (fsp_nvram_state != NVRAM_STATE_OPEN)
208 		log_simple_error(&e_info(OPAL_RC_NVRAM_INIT),
209 		"FSP: NVRAM not read, skipping init\n");
210 }
211 
fsp_nvram_send_read(void)212 static void fsp_nvram_send_read(void)
213 {
214 	fsp_nvram_msg = fsp_mkmsg(FSP_CMD_READ_VNVRAM, 4,
215 				  0, PSI_DMA_NVRAM_BODY, 0,
216 				  fsp_nvram_size / NVRAM_BLKSIZE);
217 	if (fsp_queue_msg(fsp_nvram_msg, fsp_nvram_rd_complete)) {
218 		/* If the nvram read fails to queue, we mark ourselves
219 		 * closed. Shouldn't have happened anyway. Not much else
220 		 * we can do.
221 		 */
222 		fsp_nvram_state = NVRAM_STATE_CLOSED;
223 		fsp_freemsg(fsp_nvram_msg);
224 		fsp_nvram_msg = NULL;
225 		log_simple_error(&e_info(OPAL_RC_NVRAM_READ),
226 				"FSP: Error queueing nvram read\n");
227 		return;
228 	}
229 }
230 
fsp_nvram_open_complete(struct fsp_msg * msg)231 static void fsp_nvram_open_complete(struct fsp_msg *msg)
232 {
233 	int8_t rc;
234 
235 	lock(&fsp_nvram_lock);
236 
237 	/* Open complete, check status */
238 	rc = (msg->resp->word1 >> 8) & 0xff;
239 	fsp_nvram_msg = NULL;
240 	fsp_freemsg(msg);
241 	if (rc) {
242 		log_simple_error(&e_info(OPAL_RC_NVRAM_OPEN),
243 			"FSP: NVRAM open failed, FSP error 0x%02x\n", rc);
244 		goto failed;
245 	}
246 	if (fsp_nvram_was_read)
247 		fsp_nvram_state = NVRAM_STATE_OPEN;
248 	else
249 		fsp_nvram_send_read();
250 	unlock(&fsp_nvram_lock);
251 	return;
252  failed:
253 	fsp_nvram_state = NVRAM_STATE_CLOSED;
254 	unlock(&fsp_nvram_lock);
255 }
256 
fsp_nvram_send_open(void)257 static void fsp_nvram_send_open(void)
258 {
259 	printf("FSP NVRAM: Opening nvram...\n");
260 	fsp_nvram_msg = fsp_mkmsg(FSP_CMD_OPEN_VNVRAM, 1, fsp_nvram_size);
261 	assert(fsp_nvram_msg);
262 	fsp_nvram_state = NVRAM_STATE_OPENING;
263 	if (!fsp_queue_msg(fsp_nvram_msg, fsp_nvram_open_complete))
264 		return;
265 
266 	prerror("FSP NVRAM: Failed to queue nvram open message\n");
267 	fsp_freemsg(fsp_nvram_msg);
268 	fsp_nvram_msg = NULL;
269 	fsp_nvram_state = NVRAM_STATE_CLOSED;
270 }
271 
fsp_nvram_get_size(uint32_t * out_size)272 static bool fsp_nvram_get_size(uint32_t *out_size)
273 {
274 	struct fsp_msg *msg;
275 	int rc, size;
276 
277 	msg = fsp_mkmsg(FSP_CMD_GET_VNVRAM_SIZE, 0);
278 	assert(msg);
279 
280 	rc = fsp_sync_msg(msg, false);
281 	size = msg->resp ? msg->resp->data.words[0] : 0;
282 	fsp_freemsg(msg);
283 	if (rc || size == 0) {
284 		log_simple_error(&e_info(OPAL_RC_NVRAM_SIZE),
285 			"FSP: Error %d nvram size reported is %d\n", rc, size);
286 		fsp_nvram_state = NVRAM_STATE_BROKEN;
287 		return false;
288 	}
289 	printf("FSP: NVRAM file size from FSP is %d bytes\n", size);
290 	*out_size = size;
291 	return true;
292 }
293 
fsp_nvram_msg_rr(u32 cmd_sub_mod,struct fsp_msg * msg)294 static bool fsp_nvram_msg_rr(u32 cmd_sub_mod, struct fsp_msg *msg)
295 {
296 	assert(msg == NULL);
297 
298 	switch (cmd_sub_mod) {
299 	case FSP_RESET_START:
300 		printf("FSP: Closing NVRAM on account of FSP Reset\n");
301 		fsp_nvram_state = NVRAM_STATE_CLOSED;
302 		return true;
303 	case FSP_RELOAD_COMPLETE:
304 		printf("FSP: Reopening NVRAM of FSP Reload complete\n");
305 		lock(&fsp_nvram_lock);
306 		fsp_nvram_send_open();
307 		unlock(&fsp_nvram_lock);
308 		return true;
309 	}
310 	return false;
311 }
312 
313 static struct fsp_client fsp_nvram_client_rr = {
314 	.message = fsp_nvram_msg_rr,
315 };
316 
fsp_vnvram_msg(u32 cmd_sub_mod,struct fsp_msg * msg)317 static bool fsp_vnvram_msg(u32 cmd_sub_mod, struct fsp_msg *msg)
318 {
319 	u32 cmd;
320 	struct fsp_msg *resp;
321 
322 	assert(msg == NULL);
323 	switch (cmd_sub_mod) {
324 	case FSP_CMD_GET_VNV_STATS:
325 		prlog(PR_DEBUG,
326 		      "FSP NVRAM: Get vNVRAM statistics not supported\n");
327 		cmd = FSP_RSP_GET_VNV_STATS | FSP_STATUS_INVALID_SUBCMD;
328 		break;
329 	case FSP_CMD_FREE_VNV_STATS:
330 		prlog(PR_DEBUG,
331 		      "FSP NVRAM: Free vNVRAM statistics buffer not supported\n");
332 		cmd = FSP_RSP_FREE_VNV_STATS | FSP_STATUS_INVALID_SUBCMD;
333 		break;
334 	default:
335 		return false;
336 	}
337 
338 	resp = fsp_mkmsg(cmd, 0);
339 	if (!resp) {
340 		prerror("FSP NVRAM: Failed to allocate resp message\n");
341 		return false;
342 	}
343 	if (fsp_queue_msg(resp, fsp_freemsg)) {
344 		prerror("FSP NVRAM: Failed to queue resp message\n");
345 		fsp_freemsg(resp);
346 		return false;
347 	}
348 	return true;
349 }
350 
351 static struct fsp_client fsp_vnvram_client = {
352 	.message = fsp_vnvram_msg,
353 };
354 
fsp_nvram_info(uint32_t * total_size)355 int fsp_nvram_info(uint32_t *total_size)
356 {
357 	if (!fsp_present()) {
358 		fsp_nvram_state = NVRAM_STATE_ABSENT;
359 		return OPAL_HARDWARE;
360 	}
361 
362 	if (!fsp_nvram_get_size(total_size))
363 		return OPAL_HARDWARE;
364 	return OPAL_SUCCESS;
365 }
366 
fsp_nvram_start_read(void * dst,uint32_t src,uint32_t len)367 int fsp_nvram_start_read(void *dst, uint32_t src, uint32_t len)
368 {
369 	/* We are currently limited to fully aligned transfers */
370 	assert((((uint64_t)dst) & 0xfff) == 0);
371 	assert(dst);
372 
373 	/* Currently don't support src!=0 */
374 	assert(src == 0);
375 
376 	if (!fsp_present())
377 		return -ENODEV;
378 
379 	op_display(OP_LOG, OP_MOD_INIT, 0x0007);
380 
381 	lock(&fsp_nvram_lock);
382 
383 	/* Store image info */
384 	fsp_nvram_image = dst;
385 	fsp_nvram_size = len;
386 
387 	/* Mark nvram as not dirty */
388 	fsp_nvram_dirty_start = len;
389 	fsp_nvram_dirty_end = 0;
390 
391 	/* Map TCEs */
392 	fsp_tce_map(PSI_DMA_NVRAM_TRIPL, &fsp_nvram_triplet,
393 		    PSI_DMA_NVRAM_TRIPL_SZ);
394 	fsp_tce_map(PSI_DMA_NVRAM_BODY, dst, PSI_DMA_NVRAM_BODY_SZ);
395 
396 	/* Register for the reset/reload event */
397 	fsp_register_client(&fsp_nvram_client_rr, FSP_MCLASS_RR_EVENT);
398 
399 	/* Register for virtual NVRAM interface events */
400 	fsp_register_client(&fsp_vnvram_client, FSP_MCLASS_VIRTUAL_NVRAM);
401 
402 	/* Open and load the nvram from the FSP */
403 	fsp_nvram_send_open();
404 
405 	unlock(&fsp_nvram_lock);
406 
407 	return 0;
408 }
409 
fsp_nvram_write(uint32_t offset,void * src,uint32_t size)410 int fsp_nvram_write(uint32_t offset, void *src, uint32_t size)
411 {
412 	uint64_t end = offset + size - 1;
413 
414 	/* We only support writing from the original image */
415 	if (src != fsp_nvram_image + offset)
416 		return OPAL_HARDWARE;
417 
418 	offset &= ~(NVRAM_BLKSIZE - 1);
419 	end &= ~(NVRAM_BLKSIZE - 1);
420 
421 	lock(&fsp_nvram_lock);
422 	/* If the nvram is closed, try re-opening */
423 	if (fsp_nvram_state == NVRAM_STATE_CLOSED)
424 		fsp_nvram_send_open();
425 	if (fsp_nvram_dirty_start > offset)
426 		fsp_nvram_dirty_start = offset;
427 	if (fsp_nvram_dirty_end < end)
428 		fsp_nvram_dirty_end = end;
429 	if (!fsp_nvram_msg && fsp_nvram_state == NVRAM_STATE_OPEN)
430 		fsp_nvram_send_write();
431 	unlock(&fsp_nvram_lock);
432 
433 	return 0;
434 }
435