1 /* Copyright 2013-2016 IBM Corp.
2  *
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * 	http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12  * implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define pr_fmt(fmt) "IPMI: " fmt
18 #include <ccan/list/list.h>
19 #include <ccan/str/str.h>
20 #include <compiler.h>
21 #include <errno.h>
22 #include <skiboot.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <ipmi.h>
26 #include <device.h>
27 #include <opal.h>
28 #include <lock.h>
29 #include <errorlog.h>
30 #include <pel.h>
31 #include <opal-msg.h>
32 #include <debug_descriptor.h>
33 #include <occ.h>
34 
35 /* OEM SEL fields */
36 #define SEL_OEM_ID_0		0x55
37 #define SEL_OEM_ID_1		0x55
38 #define SEL_RECORD_TYPE_OEM	0xC0
39 #define SEL_RECORD_TYPE_EVENT	0x02
40 
41 #define SEL_NETFN_IBM		0x3a
42 
43 /* OEM SEL Commands */
44 /* TODO: Move these to their respective source files */
45 #define CMD_AMI_POWER		0x04
46 #define CMD_AMI_PNOR_ACCESS	0x07
47 #define CMD_AMI_OCC_RESET	0x0e
48 
49 /* XXX: Listed here for completeness, registered in libflash/ipmi-flash.c */
50 #define CMD_OP_HIOMAP_EVENT	0x0f
51 
52 #define SOFT_OFF	        0x00
53 #define SOFT_REBOOT	        0x01
54 
55 #define RELEASE_PNOR		0x00
56 #define REQUEST_PNOR		0x01
57 
58 /* 32.1 SEL Event Records type */
59 #define SEL_REC_TYPE_SYS_EVENT	0x02
60 #define SEL_REC_TYPE_AMI_ESEL	0xDF
61 
62 /* OEM SEL generator ID for AMI */
63 #define SEL_GENERATOR_ID_AMI	0x2000
64 
65 /* IPMI SEL version */
66 #define SEL_EVM_VER_1		0x03
67 #define SEL_EVM_VER_2		0x04
68 
69 /*
70  * Sensor type for System events
71  *
72  * Sensor information (type, number, etc) is passed to us via
73  * device tree. Currently we are using System Event type to
74  * log OPAL events.
75  */
76 #define SENSOR_TYPE_SYS_EVENT	0x12
77 
78 /*
79  * 42.1 Event/Reading Type Codes
80  *
81  * Note that device hotplug and availability related events
82  * are not defined as we are not using those events type.
83  */
84 #define SEL_EVENT_DIR_TYPE_UNSPECIFIED	0x00
85 #define SEL_EVENT_DIR_TYPE_THRESHOLD	0x01
86 #define SEL_EVENT_DIR_TYPE_STATE	0x03
87 #define SEL_EVENT_DIR_TYPE_PREDICTIVE	0x04
88 #define SEL_EVENT_DIR_TYPE_LIMIT	0x05
89 #define SEL_EVENT_DIR_TYPE_PERFORMANCE	0x06
90 #define SEL_EVENT_DIR_TYPE_TRANSITION	0x07
91 #define SEL_EVENT_DIR_TYPE_OEM		0x70
92 
93 /*
94  * 42.1 Event/Reading Type Codes
95  */
96 #define SEL_DATA1_AMI			0xAA
97 #define SEL_DATA1_DEASSERTED		0x00
98 #define SEL_DATA1_ASSERTED		0x01
99 #define SEL_DATA1_OK			0x00
100 #define SEL_DATA1_NON_CRIT_FROM_OK	0x01
101 #define SEL_DATA1_CRIT_FROM_LESS_SEV	0x02
102 #define SEL_DATA1_NON_REC_FROM_LESS_SEV	0x03
103 #define SEL_DATA1_NON_CRIT		0x04
104 #define SEL_DATA1_CRITICAL		0x05
105 #define SEL_DATA1_NON_RECOVERABLE	0X06
106 #define SEL_DATA1_MONITOR		0x07
107 #define SEL_DATA1_INFORMATIONAL		0x08
108 
109 /* SEL Record Entry */
110 struct sel_record {
111 	le16		record_id;
112 	uint8_t		record_type;
113 	le32		timestamp;
114 	le16		generator_id;
115 	uint8_t		evm_ver;
116 	uint8_t		sensor_type;
117 	uint8_t		sensor_number;
118 	uint8_t		event_dir_type;
119 	uint8_t		event_data1;
120 	uint8_t		event_data2;
121 	uint8_t		event_data3;
122 } __packed;
123 
124 static struct sel_record sel_record;
125 
126 struct oem_sel {
127 	/* SEL header */
128 	uint8_t id[2];
129 	uint8_t type;
130 	uint8_t timestamp[4];
131 	uint8_t manuf_id[3];
132 	/* OEM SEL data (6 bytes) follows */
133 	uint8_t netfun;
134 	uint8_t cmd;
135 	uint8_t data[4];
136 };
137 
138 #define ESEL_HDR_SIZE 7
139 
140 /* Used for sending PANIC events like abort() path */
141 struct ipmi_sel_panic_msg {
142 	bool		busy;
143 	struct ipmi_msg	*msg;
144 	struct lock	lock;
145 };
146 static struct ipmi_sel_panic_msg ipmi_sel_panic_msg;
147 
148 static LIST_HEAD(sel_handlers);
149 
150 /* Forward declaration */
151 static void ipmi_elog_poll(struct ipmi_msg *msg);
152 
153 /*
154  * Allocate IPMI message:
155  * For normal event, allocate memory using ipmi_mkmsg and for PANIC
156  * event, use pre-allocated buffer.
157  */
ipmi_sel_alloc_msg(struct errorlog * elog_buf)158 static struct ipmi_msg *ipmi_sel_alloc_msg(struct errorlog *elog_buf)
159 {
160 	struct ipmi_msg *msg = NULL;
161 
162 	if (elog_buf->event_severity == OPAL_ERROR_PANIC) {
163 		/* Called before initialization completes */
164 		if (ipmi_sel_panic_msg.msg == NULL) {
165 			ipmi_sel_init();	/* Try to allocate IPMI message */
166 			if (ipmi_sel_panic_msg.msg == NULL)
167 				return NULL;
168 		}
169 
170 		if (ipmi_sel_panic_msg.busy == true)
171 			return NULL;
172 
173 		lock(&ipmi_sel_panic_msg.lock);
174 		msg = ipmi_sel_panic_msg.msg;
175 		ipmi_sel_panic_msg.busy = true;
176 		unlock(&ipmi_sel_panic_msg.lock);
177 
178 		ipmi_init_msg(msg, IPMI_DEFAULT_INTERFACE, IPMI_RESERVE_SEL,
179 				ipmi_elog_poll, elog_buf, IPMI_MAX_REQ_SIZE, 2);
180 	} else {
181 		msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, IPMI_RESERVE_SEL,
182 				ipmi_elog_poll, elog_buf, NULL,
183 				IPMI_MAX_REQ_SIZE, 2);
184 	}
185 
186 	return msg;
187 }
188 
ipmi_sel_free_msg(struct ipmi_msg * msg)189 static void ipmi_sel_free_msg(struct ipmi_msg *msg)
190 {
191 	if (msg == ipmi_sel_panic_msg.msg) {
192 		lock(&ipmi_sel_panic_msg.lock);
193 		ipmi_sel_panic_msg.busy = false;
194 		unlock(&ipmi_sel_panic_msg.lock);
195 	} else {
196 		ipmi_free_msg(msg);
197 	}
198 
199 	msg = NULL;
200 }
201 
202 /* Initialize eSEL record */
ipmi_init_esel_record(void)203 static void ipmi_init_esel_record(void)
204 {
205 	memset(&sel_record, 0, sizeof(struct sel_record));
206 	sel_record.record_type = SEL_REC_TYPE_AMI_ESEL;
207 	sel_record.generator_id = SEL_GENERATOR_ID_AMI;
208 	sel_record.evm_ver = SEL_EVM_VER_2;
209 	sel_record.sensor_type	= SENSOR_TYPE_SYS_EVENT;
210 	sel_record.sensor_number =
211 		ipmi_get_sensor_number(SENSOR_TYPE_SYS_EVENT);
212 	sel_record.event_dir_type = SEL_EVENT_DIR_TYPE_OEM;
213 	sel_record.event_data1 = SEL_DATA1_AMI;
214 }
215 
216 /* Update required fields in SEL record */
ipmi_update_sel_record(uint8_t event_severity,uint16_t esel_record_id)217 static void ipmi_update_sel_record(uint8_t event_severity, uint16_t esel_record_id)
218 {
219 	sel_record.record_type = SEL_REC_TYPE_SYS_EVENT;
220 	sel_record.event_data2 = (esel_record_id >> 8) & 0xff;
221 	sel_record.event_data3 = esel_record_id & 0xff;
222 
223 	switch (event_severity) {
224 	case OPAL_ERROR_PANIC:
225 		sel_record.event_dir_type = SEL_EVENT_DIR_TYPE_TRANSITION;
226 		sel_record.event_data1 = SEL_DATA1_CRITICAL;
227 		break;
228 	case OPAL_UNRECOVERABLE_ERR_GENERAL:	/* Fall through */
229 	case OPAL_UNRECOVERABLE_ERR_DEGRADE_PERF:
230 	case OPAL_UNRECOVERABLE_ERR_LOSS_REDUNDANCY:
231 	case OPAL_UNRECOVERABLE_ERR_LOSS_REDUNDANCY_PERF:
232 	case OPAL_UNRECOVERABLE_ERR_LOSS_OF_FUNCTION:
233 		sel_record.event_dir_type = SEL_EVENT_DIR_TYPE_TRANSITION;
234 		sel_record.event_data1 = SEL_DATA1_NON_RECOVERABLE;
235 		break;
236 	case OPAL_PREDICTIVE_ERR_GENERAL:	/* Fall through */
237 	case OPAL_PREDICTIVE_ERR_DEGRADED_PERF:
238 	case OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT:
239 	case OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_BOOT_DEGRADE_PERF:
240 	case OPAL_PREDICTIVE_ERR_LOSS_OF_REDUNDANCY:
241 		sel_record.event_dir_type = SEL_EVENT_DIR_TYPE_PREDICTIVE;
242 		sel_record.event_data1 = SEL_DATA1_NON_CRIT_FROM_OK;
243 		break;
244 	case OPAL_RECOVERED_ERR_GENERAL:
245 		sel_record.event_dir_type = SEL_EVENT_DIR_TYPE_TRANSITION;
246 		sel_record.event_data1 = SEL_DATA1_OK;
247 		break;
248 	case OPAL_INFO:
249 		sel_record.event_dir_type = SEL_EVENT_DIR_TYPE_TRANSITION;
250 		sel_record.event_data1 = SEL_DATA1_INFORMATIONAL;
251 		break;
252 	default:
253 		sel_record.event_dir_type = SEL_EVENT_DIR_TYPE_STATE;
254 		sel_record.event_data1 = SEL_DATA1_ASSERTED;
255 		break;
256 	}
257 }
258 
ipmi_elog_error(struct ipmi_msg * msg)259 static void ipmi_elog_error(struct ipmi_msg *msg)
260 {
261 	if (msg->cc == IPMI_LOST_ARBITRATION_ERR)
262 		/* Retry due to SEL erase */
263 		ipmi_queue_msg(msg);
264 	else {
265 		opal_elog_complete(msg->user_data, false);
266 		ipmi_sel_free_msg(msg);
267 	}
268 }
269 
ipmi_log_sel_event_error(struct ipmi_msg * msg)270 static void ipmi_log_sel_event_error(struct ipmi_msg *msg)
271 {
272 	if (msg->cc != IPMI_CC_NO_ERROR)
273 		prlog(PR_INFO, "SEL: Failed to log SEL event\n");
274 
275 	ipmi_sel_free_msg(msg);
276 }
277 
ipmi_log_sel_event_complete(struct ipmi_msg * msg)278 static void ipmi_log_sel_event_complete(struct ipmi_msg *msg)
279 {
280 	prlog(PR_INFO, "SEL: New event logged [ID : %x%x]\n", msg->data[1],
281 		msg->data[0]);
282 
283 	ipmi_sel_free_msg(msg);
284 }
285 
286 /* Log SEL event with eSEL record ID */
ipmi_log_sel_event(struct ipmi_msg * msg,uint8_t event_severity,uint16_t esel_record_id)287 static void ipmi_log_sel_event(struct ipmi_msg *msg, uint8_t event_severity,
288 				uint16_t esel_record_id)
289 {
290 	/* Fill required SEL event fields */
291 	ipmi_update_sel_record(event_severity, esel_record_id);
292 
293 	/* Fill IPMI message */
294 	ipmi_init_msg(msg, IPMI_DEFAULT_INTERFACE, IPMI_ADD_SEL_EVENT,
295 		      ipmi_log_sel_event_complete, NULL,
296 		      sizeof(struct sel_record), 2);
297 
298 	/* Copy SEL data */
299 	memcpy(msg->data, &sel_record, sizeof(struct sel_record));
300 
301 	msg->error = ipmi_log_sel_event_error;
302 	ipmi_queue_msg_head(msg);
303 }
304 
305 /* Goes through the required steps to add a complete eSEL:
306  *
307  *  1. Get a reservation
308  *  2. Add eSEL header
309  *  3. Partially add data to the SEL
310  *
311  * Because a reservation is needed we need to ensure eSEL's are added
312  * as a single transaction as concurrent/interleaved adds would cancel
313  * the reservation. We guarantee this by always adding our messages to
314  * the head of the transmission queue, blocking any other messages
315  * being sent until we have completed sending this message.
316  *
317  * There is still a very small chance that we will accidentally
318  * interleave a message if there is another one waiting at the head of
319  * the ipmi queue and another cpu calls the ipmi poller before we
320  * complete. However this should just cause a resevation cancelled
321  * error which we have to deal with anyway (eg. because there may be a
322  * SEL erase in progress) so it shouldn't cause any problems.
323  */
ipmi_elog_poll(struct ipmi_msg * msg)324 static void ipmi_elog_poll(struct ipmi_msg *msg)
325 {
326 	static bool first = false;
327 	static char pel_buf[IPMI_MAX_PEL_SIZE];
328 	static size_t pel_size;
329 	static size_t esel_size;
330 	static int esel_index = 0;
331 	int pel_index;
332 	static unsigned int reservation_id = 0;
333 	static unsigned int record_id = 0;
334 	struct errorlog *elog_buf = (struct errorlog *) msg->user_data;
335 	size_t req_size;
336 
337 	if (bmc_platform->sw->ipmi_oem_partial_add_esel == 0) {
338 		prlog(PR_WARNING, "Dropped eSEL: BMC code is buggy/missing\n");
339 		return;
340 	}
341 
342 	ipmi_init_esel_record();
343 	if (msg->cmd == IPMI_CMD(IPMI_RESERVE_SEL)) {
344 		first = true;
345 		reservation_id = msg->data[0];
346 		reservation_id |= msg->data[1] << 8;
347 		if (!reservation_id) {
348 			/*
349 			 * According to specification we should never
350 			 * get here, but just in case we do we cancel
351 			 * sending the message.
352 			 */
353 			prerror("Invalid reservation id");
354 			opal_elog_complete(elog_buf, false);
355 			ipmi_sel_free_msg(msg);
356 			return;
357 		}
358 
359 		pel_size = create_pel_log(elog_buf, pel_buf, IPMI_MAX_PEL_SIZE);
360 		esel_size = pel_size + sizeof(struct sel_record);
361 		esel_index = 0;
362 		record_id = 0;
363 	} else {
364 		record_id = msg->data[0];
365 		record_id |= msg->data[1] << 8;
366 	}
367 
368 	/* Start or continue the IPMI_PARTIAL_ADD_SEL */
369 	if (esel_index >= esel_size) {
370 		/*
371 		 * We're all done. Invalidate the resevation id to
372 		 * ensure we get an error if we cut in on another eSEL
373 		 * message.
374 		 */
375 		reservation_id = 0;
376 		esel_index = 0;
377 
378 		/* Log SEL event and free ipmi message */
379 		ipmi_log_sel_event(msg, elog_buf->event_severity, record_id);
380 
381 		opal_elog_complete(elog_buf, true);
382 		return;
383 	}
384 
385 	if ((esel_size - esel_index) <= (IPMI_MAX_REQ_SIZE - ESEL_HDR_SIZE)) {
386 		/* Last data to send */
387 		msg->data[6] = 1;
388 		req_size = esel_size - esel_index + ESEL_HDR_SIZE;
389 	} else {
390 		msg->data[6] = 0;
391 		req_size = IPMI_MAX_REQ_SIZE;
392 	}
393 
394 	ipmi_init_msg(msg, IPMI_DEFAULT_INTERFACE,
395 		      bmc_platform->sw->ipmi_oem_partial_add_esel,
396 		      ipmi_elog_poll, elog_buf, req_size, 2);
397 
398 	msg->data[0] = reservation_id & 0xff;
399 	msg->data[1] = (reservation_id >> 8) & 0xff;
400 	msg->data[2] = record_id & 0xff;
401 	msg->data[3] = (record_id >> 8) & 0xff;
402 	msg->data[4] = esel_index & 0xff;
403 	msg->data[5] = (esel_index >> 8) & 0xff;
404 
405 	if (first) {
406 		first = false;
407 		memcpy(&msg->data[ESEL_HDR_SIZE], &sel_record,
408 			sizeof(struct sel_record));
409 		esel_index = sizeof(struct sel_record);
410 		msg->req_size = esel_index + ESEL_HDR_SIZE;
411 	} else {
412 		pel_index = esel_index - sizeof(struct sel_record);
413 		memcpy(&msg->data[ESEL_HDR_SIZE], &pel_buf[pel_index],
414 			msg->req_size - ESEL_HDR_SIZE);
415 		esel_index += msg->req_size - ESEL_HDR_SIZE;
416 	}
417 
418 	ipmi_queue_msg_head(msg);
419 	return;
420 }
421 
ipmi_elog_commit(struct errorlog * elog_buf)422 int ipmi_elog_commit(struct errorlog *elog_buf)
423 {
424 	struct ipmi_msg *msg;
425 
426 	/* Only log events that needs attention */
427 	if (elog_buf->event_severity <
428 			OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT ||
429 			elog_buf->elog_origin != ORG_SAPPHIRE) {
430 		prlog(PR_INFO, "dropping non severe PEL event\n");
431 		opal_elog_complete(elog_buf, true);
432 		return 0;
433 	}
434 
435 	/*
436 	 * We pass a large request size in to mkmsg so that we have a
437 	 * large enough allocation to reuse the message to pass the
438 	 * PEL data via a series of partial add commands.
439 	 */
440 	msg = ipmi_sel_alloc_msg(elog_buf);
441 	if (!msg) {
442 		opal_elog_complete(elog_buf, false);
443 		return OPAL_RESOURCE;
444 	}
445 
446 	msg->error = ipmi_elog_error;
447 	msg->req_size = 0;
448 	if (elog_buf->event_severity == OPAL_ERROR_PANIC)
449 		ipmi_queue_msg_sync(msg);
450 	else
451 		ipmi_queue_msg(msg);
452 
453 	return 0;
454 }
455 
456 #define ACCESS_DENIED	0x00
457 #define ACCESS_GRANTED	0x01
458 
sel_pnor(uint8_t access,void * context __unused)459 static void sel_pnor(uint8_t access, void *context __unused)
460 {
461 	struct ipmi_msg *msg;
462 	uint8_t granted = ACCESS_GRANTED;
463 
464 	switch (access) {
465 	case REQUEST_PNOR:
466 		prlog(PR_NOTICE, "PNOR access requested\n");
467 		if (bmc_platform->sw->ipmi_oem_pnor_access_status == 0) {
468 			/**
469 			 * @fwts-label PNORAccessYeahButNoBut
470 			 * @fwts-advice OPAL doesn't know that the BMC supports
471 			 * PNOR access commands. This will be a bug in the OPAL
472 			 * support for this BMC.
473 			 */
474 			prlog(PR_ERR, "PNOR BUG: access requested but BMC doesn't support request\n");
475 			break;
476 		}
477 
478 		granted = flash_reserve();
479 		if (granted)
480 			occ_pnor_set_owner(PNOR_OWNER_EXTERNAL);
481 		/* Ack the request */
482 		msg = ipmi_mkmsg_simple(bmc_platform->sw->ipmi_oem_pnor_access_status, &granted, 1);
483 		ipmi_queue_msg(msg);
484 		break;
485 	case RELEASE_PNOR:
486 		prlog(PR_NOTICE, "PNOR access released\n");
487 		flash_release();
488 		occ_pnor_set_owner(PNOR_OWNER_HOST);
489 		break;
490 	default:
491 		/**
492 		 * @fwts-label InvalidPNORAccessRequest
493 		 * @fwts-advice In negotiating PNOR access with BMC, we
494 		 * got an odd/invalid request from the BMC. Likely a bug
495 		 * in OPAL/BMC interaction.
496 		 */
497 		prlog(PR_ERR, "invalid PNOR access requested: %02x\n",
498 		      access);
499 	}
500 }
501 
sel_power(uint8_t power,void * context __unused)502 static void sel_power(uint8_t power, void *context __unused)
503 {
504 	switch (power) {
505 	case SOFT_OFF:
506 		prlog(PR_NOTICE, "Soft shutdown requested\n");
507 		if (opal_booting() && platform.cec_power_down) {
508 			prlog(PR_NOTICE, "Host not up, shutting down now\n");
509 			platform.cec_power_down(IPMI_CHASSIS_PWR_DOWN);
510 		} else {
511 			opal_queue_msg(OPAL_MSG_SHUTDOWN, NULL, NULL, SOFT_OFF);
512 		}
513 
514 		break;
515 	case SOFT_REBOOT:
516 		prlog(PR_NOTICE, "Soft reboot requested\n");
517 		if (opal_booting() && platform.cec_reboot) {
518 			prlog(PR_NOTICE, "Host not up, rebooting now\n");
519 			platform.cec_reboot();
520 		} else {
521 			opal_queue_msg(OPAL_MSG_SHUTDOWN, NULL, NULL, SOFT_REBOOT);
522 		}
523 
524 		break;
525 	default:
526 		prlog(PR_WARNING, "requested bad power state: %02x\n",
527 		      power);
528 	}
529 }
530 
occ_sensor_id_to_chip(uint8_t sensor,uint32_t * chip)531 static uint32_t occ_sensor_id_to_chip(uint8_t sensor, uint32_t *chip)
532 {
533 	struct dt_node *node, *bmc_node, *sensors_node;
534 
535 	/* Default chip id */
536 	*chip = 0;
537 
538 	bmc_node = dt_find_by_name(dt_root, "bmc");
539 	if (!bmc_node)
540 		return 0;
541 
542 	sensors_node = dt_find_by_name(bmc_node, "sensors");
543 	if (!sensors_node)
544 		return 0;
545 
546 	node = dt_find_by_name_addr(sensors_node, "sensor", sensor);
547 	if (!node) {
548 		prlog(PR_DEBUG, "Could not find OCC sensor node. Id : %d\n",
549 		      (u32)sensor);
550 		return 0;
551 	}
552 
553 	if (!dt_has_node_property(node, "ibm,chip-id", NULL)) {
554 		prlog(PR_DEBUG, "Could not find chip-id for OCC sensor : %d\n",
555 		      (u32)sensor);
556 		return 0;
557 	}
558 
559 	*chip = dt_get_chip_id(node);
560 	return 0;
561 }
562 
sel_occ_reset(uint8_t sensor,void * context __unused)563 static void sel_occ_reset(uint8_t sensor, void *context __unused)
564 {
565 	uint32_t chip;
566 	int rc;
567 
568 	rc = occ_sensor_id_to_chip(sensor, &chip);
569 	if (rc) {
570 		/**
571 		 * @fwts-label: SELUnknownOCCReset
572 		 * @fwts-advice: Likely bug in what sent us the OCC reset.
573 		 */
574 		prlog(PR_ERR, "SEL message to reset an unknown OCC "
575 				"(sensor ID 0x%02x)\n", sensor);
576 		return;
577 	}
578 
579 	prd_occ_reset(chip);
580 }
581 
582 struct ipmi_sel_handler {
583 	uint8_t oem_cmd;
584 	void (*fn)(uint8_t data, void *context);
585 	void *context;
586 	struct list_node node;
587 };
588 
ipmi_sel_register(uint8_t oem_cmd,void (* fn)(uint8_t data,void * context),void * context)589 int ipmi_sel_register(uint8_t oem_cmd,
590 		      void (*fn)(uint8_t data, void *context),
591 		      void *context)
592 {
593 	struct ipmi_sel_handler *handler;
594 
595 	list_for_each(&sel_handlers, handler, node) {
596 		if (handler->oem_cmd == oem_cmd) {
597 			prerror("Handler for SEL command 0x%02x already registered\n",
598 				oem_cmd);
599 			return -EINVAL;
600 		}
601 	}
602 
603 	handler = malloc(sizeof(*handler));
604 	if (!handler)
605 		return -ENOMEM;
606 
607 	handler->oem_cmd = oem_cmd;
608 	handler->fn = fn;
609 	handler->context = context;
610 
611 	list_add(&sel_handlers, &handler->node);
612 
613 	return 0;
614 }
615 
ipmi_sel_init(void)616 void ipmi_sel_init(void)
617 {
618 	int rc;
619 
620 	/* Already done */
621 	if (ipmi_sel_panic_msg.msg != NULL)
622 		return;
623 
624 	memset(&ipmi_sel_panic_msg, 0, sizeof(struct ipmi_sel_panic_msg));
625 	ipmi_sel_panic_msg.msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE,
626 					IPMI_RESERVE_SEL, ipmi_elog_poll,
627 					NULL, NULL, IPMI_MAX_REQ_SIZE, 2);
628 
629 	/* Hackishly register these old-style handlers here for now */
630 	/* TODO: Move them to their appropriate source files */
631 	rc = ipmi_sel_register(CMD_AMI_POWER, sel_power, NULL);
632 	if (rc < 0) {
633 		prerror("Failed to register SEL handler for %s",
634 			stringify(CMD_AMI_POWER));
635 	}
636 
637 	rc = ipmi_sel_register(CMD_AMI_OCC_RESET, sel_occ_reset, NULL);
638 	if (rc < 0) {
639 		prerror("Failed to register SEL handler for %s",
640 			stringify(CMD_AMI_OCC_RESET));
641 	}
642 
643 	rc = ipmi_sel_register(CMD_AMI_PNOR_ACCESS, sel_pnor, NULL);
644 	if (rc < 0) {
645 		prerror("Failed to register SEL handler for %s",
646 			stringify(CMD_AMI_PNOR_ACCESS));
647 	}
648 }
649 
ipmi_parse_sel(struct ipmi_msg * msg)650 void ipmi_parse_sel(struct ipmi_msg *msg)
651 {
652 	struct ipmi_sel_handler *handler;
653 	struct oem_sel sel;
654 
655 	assert(msg->resp_size <= 16);
656 
657 	memcpy(&sel, msg->data, msg->resp_size);
658 
659 	/* We do not process system event records */
660 	if (sel.type == SEL_RECORD_TYPE_EVENT) {
661 		prlog(PR_INFO, "dropping System Event Record SEL\n");
662 		return;
663 	}
664 
665 	prlog(PR_DEBUG, "SEL received (%d bytes, netfn %d, cmd %d)\n",
666 			msg->resp_size, sel.netfun, sel.cmd);
667 
668 	/* Only accept OEM SEL messages */
669 	if (sel.id[0] != SEL_OEM_ID_0 || sel.id[1] != SEL_OEM_ID_1 ||
670 		sel.type != SEL_RECORD_TYPE_OEM) {
671 		prlog(PR_WARNING, "unknown SEL %02x%02x (type %02x)\n",
672 		      sel.id[0], sel.id[1], sel.type);
673 		return;
674 	}
675 
676 	list_for_each(&sel_handlers, handler, node) {
677 		if (handler->oem_cmd == sel.cmd) {
678 			handler->fn(sel.data[0], handler->context);
679 			return;
680 		}
681 	}
682 
683 	prlog(PR_WARNING, "unknown OEM SEL command %02x received\n", sel.cmd);
684 }
685