1 /* Copyright 2013-2019 IBM Corp.
2  *
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *	http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12  * implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define pr_fmt(fmt) "BT: " fmt
18 
19 #include <skiboot.h>
20 #include <lpc.h>
21 #include <lock.h>
22 #include <device.h>
23 #include <timebase.h>
24 #include <ipmi.h>
25 #include <bt.h>
26 #include <timer.h>
27 #include <ipmi.h>
28 #include <timebase.h>
29 #include <chip.h>
30 #include <interrupts.h>
31 
32 /* BT registers */
33 #define BT_CTRL			0
34 #define BT_CTRL_B_BUSY		0x80
35 #define BT_CTRL_H_BUSY		0x40
36 #define BT_CTRL_OEM0		0x20
37 #define BT_CTRL_SMS_ATN		0x10
38 #define BT_CTRL_B2H_ATN		0x08
39 #define BT_CTRL_H2B_ATN		0x04
40 #define BT_CTRL_CLR_RD_PTR	0x02
41 #define BT_CTRL_CLR_WR_PTR	0x01
42 #define BT_HOST2BMC		1
43 #define BT_INTMASK		2
44 #define BT_INTMASK_B2H_IRQEN	0x01
45 #define BT_INTMASK_B2H_IRQ	0x02
46 #define BT_INTMASK_BMC_HWRST	0x80
47 
48 /* Maximum size of the HW FIFO */
49 #define BT_FIFO_LEN		64
50 
51 /* Default poll interval before interrupts are working */
52 #define BT_DEFAULT_POLL_MS	200
53 
54 /*
55  * Minimum size of an IPMI request/response including
56  * mandatory headers.
57  */
58 #define BT_MIN_REQ_LEN		3
59 #define BT_MIN_RESP_LEN		4
60 
61 /* How long (in uS) to poll for new ipmi data. */
62 #define POLL_TIMEOUT		10000
63 
64 /* Maximum number of outstanding messages to allow in the queue. */
65 #define BT_MAX_QUEUE_LEN	10
66 
67 /* How long (in seconds) before a message is timed out. */
68 #define BT_MSG_TIMEOUT		3
69 
70 /* Maximum number of times to attempt sending a message before giving up. */
71 #define BT_MAX_RETRIES		1
72 
73 /* Macro to enable printing BT message queue for debug */
74 #define BT_QUEUE_DEBUG		0
75 
76 /* BT message logging macros */
77 #define _BT_Q_LOG(level, msg, fmt, args...) \
78 	do { if (msg) \
79 			prlog(level, "seq 0x%02x netfn 0x%02x cmd 0x%02x: " fmt "\n", \
80 			(msg)->seq, ((msg)->ipmi_msg.netfn >> 2), (msg)->ipmi_msg.cmd, ##args); \
81 		else \
82 			prlog(level, "seq 0x?? netfn 0x?? cmd 0x??: " fmt "\n", ##args); \
83 	} while (0)
84 
85 #define BT_Q_ERR(msg, fmt, args...) \
86 	_BT_Q_LOG(PR_ERR, msg, fmt, ##args)
87 
88 #define BT_Q_DBG(msg, fmt, args...) \
89 	_BT_Q_LOG(PR_DEBUG, msg, fmt, ##args)
90 
91 #define BT_Q_TRACE(msg, fmt, args...) \
92 	_BT_Q_LOG(PR_TRACE, msg, fmt, ##args)
93 
94 struct bt_msg {
95 	struct list_node link;
96 	unsigned long tb;
97 	uint8_t seq;
98 	uint8_t send_count;
99 	bool disable_retry;
100 	struct ipmi_msg ipmi_msg;
101 };
102 
103 struct bt_caps {
104 	uint8_t num_requests;
105 	uint16_t input_buf_len;
106 	uint16_t output_buf_len;
107 	uint8_t msg_timeout;
108 	uint8_t max_retries;
109 };
110 
111 struct bt {
112 	uint32_t base_addr;
113 	struct lock lock;
114 	struct list_head msgq;
115 	struct list_head msgq_sync; /* separate list for synchronous messages */
116 	struct timer poller;
117 	bool irq_ok;
118 	int queue_len;
119 	struct bt_caps caps;
120 };
121 
122 static struct bt bt;
123 static struct bt_msg *inflight_bt_msg; /* Holds in flight message */
124 
125 static int ipmi_seq;
126 
bt_inb(uint32_t reg)127 static inline uint8_t bt_inb(uint32_t reg)
128 {
129 	return lpc_inb(bt.base_addr + reg);
130 }
131 
bt_outb(uint8_t data,uint32_t reg)132 static inline void bt_outb(uint8_t data, uint32_t reg)
133 {
134 	lpc_outb(data, bt.base_addr + reg);
135 }
136 
bt_set_h_busy(bool value)137 static inline void bt_set_h_busy(bool value)
138 {
139 	uint8_t rval;
140 
141 	rval = bt_inb(BT_CTRL);
142 	if (value != !!(rval & BT_CTRL_H_BUSY))
143 		bt_outb(BT_CTRL_H_BUSY, BT_CTRL);
144 }
145 
bt_assert_h_busy(void)146 static inline void bt_assert_h_busy(void)
147 {
148 	uint8_t rval;
149 	rval = bt_inb(BT_CTRL);
150 	assert(rval & BT_CTRL_H_BUSY);
151 }
152 
get_bt_caps_complete(struct ipmi_msg * msg)153 static void get_bt_caps_complete(struct ipmi_msg *msg)
154 {
155 	/* Ignore errors, we'll fallback to using the defaults, no big deal */
156 	if (msg->data[0] == 0) {
157 		prlog(PR_DEBUG, "Got illegal BMC BT capability\n");
158 		goto out;
159 	}
160 
161 	if (msg->data[1] != BT_FIFO_LEN) {
162 		prlog(PR_DEBUG, "Got a input buffer len (%u) cap which differs from the default\n",
163 				msg->data[1]);
164 	}
165 
166 	if (msg->data[2] != BT_FIFO_LEN) {
167 		prlog(PR_DEBUG, "Got a output buffer len (%u) cap which differs from the default\n",
168 				msg->data[2]);
169 	}
170 
171 	/*
172 	 * IPMI Spec says that the value for buffer sizes are:
173 	 * "the largest value allowed in first byte"
174 	 * Therefore we want to add one to what we get
175 	 */
176 	bt.caps.num_requests = msg->data[0];
177 	bt.caps.input_buf_len = msg->data[1] + 1;
178 	bt.caps.output_buf_len = msg->data[2] + 1;
179 	bt.caps.msg_timeout = msg->data[3];
180 	bt.caps.max_retries = msg->data[4];
181 	prlog(PR_DEBUG, "BMC BT capabilities received:\n");
182 	prlog(PR_DEBUG, "buffer sizes: %d input %d output\n",
183 			bt.caps.input_buf_len, bt.caps.output_buf_len);
184 	prlog(PR_DEBUG, "number of requests: %d\n", bt.caps.num_requests);
185 	prlog(PR_DEBUG,  "msg timeout: %d max retries: %d\n",
186 			bt.caps.msg_timeout, bt.caps.max_retries);
187 
188 out:
189 	ipmi_free_msg(msg);
190 }
191 
get_bt_caps(void)192 static void get_bt_caps(void)
193 {
194 
195 	struct ipmi_msg *bmc_caps;
196 	/*
197 	 * Didn't sent a message, now is a good time to ask the BMC for its
198 	 * capabilities.
199 	 */
200 	bmc_caps = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, IPMI_GET_BT_CAPS,
201 			get_bt_caps_complete, NULL, NULL, 0, sizeof(struct bt_caps));
202 	if (!bmc_caps)
203 		prerror("Couldn't create BMC BT capabilities msg\n");
204 
205 	if (bmc_caps && ipmi_queue_msg(bmc_caps))
206 		prerror("Couldn't enqueue request for BMC BT capabilities\n");
207 
208 	/* Ignore errors, we'll fallback to using the defaults, no big deal */
209 }
210 
bt_idle(void)211 static inline bool bt_idle(void)
212 {
213 	uint8_t bt_ctrl = bt_inb(BT_CTRL);
214 
215 	return !(bt_ctrl & BT_CTRL_B_BUSY) && !(bt_ctrl & BT_CTRL_H2B_ATN);
216 }
217 
218 /* Must be called with bt.lock held */
bt_msg_del(struct bt_msg * bt_msg)219 static void bt_msg_del(struct bt_msg *bt_msg)
220 {
221 	list_del(&bt_msg->link);
222 	bt.queue_len--;
223 	unlock(&bt.lock);
224 	ipmi_cmd_done(bt_msg->ipmi_msg.cmd,
225 		      IPMI_NETFN_RETURN_CODE(bt_msg->ipmi_msg.netfn),
226 		      IPMI_TIMEOUT_ERR, &bt_msg->ipmi_msg);
227 	lock(&bt.lock);
228 }
229 
bt_init_interface(void)230 static void bt_init_interface(void)
231 {
232 	/* Clear interrupt condition & enable irq */
233 	bt_outb(BT_INTMASK_B2H_IRQ | BT_INTMASK_B2H_IRQEN, BT_INTMASK);
234 
235 	/* Take care of a stable H_BUSY if any */
236 	bt_set_h_busy(false);
237 }
238 
bt_reset_interface(void)239 static void bt_reset_interface(void)
240 {
241 	bt_outb(BT_INTMASK_BMC_HWRST, BT_INTMASK);
242 	bt_init_interface();
243 }
244 
245 /*
246  * Try and send a message from the message queue. Caller must hold
247  * bt.bt_lock and bt.lock and ensue the message queue is not
248  * empty.
249  */
bt_send_msg(struct bt_msg * bt_msg)250 static void bt_send_msg(struct bt_msg *bt_msg)
251 {
252 	int i;
253 	struct ipmi_msg *ipmi_msg;
254 
255 	ipmi_msg = &bt_msg->ipmi_msg;
256 
257 	/* Send the message */
258 	bt_outb(BT_CTRL_CLR_WR_PTR, BT_CTRL);
259 
260 	/* Byte 1 - Length */
261 	bt_outb(ipmi_msg->req_size + BT_MIN_REQ_LEN, BT_HOST2BMC);
262 
263 	/* Byte 2 - NetFn/LUN */
264 	bt_outb(ipmi_msg->netfn, BT_HOST2BMC);
265 
266 	/* Byte 3 - Seq */
267 	bt_outb(bt_msg->seq, BT_HOST2BMC);
268 
269 	/* Byte 4 - Cmd */
270 	bt_outb(ipmi_msg->cmd, BT_HOST2BMC);
271 
272 	/* Byte 5:N - Data */
273 	for (i = 0; i < ipmi_msg->req_size; i++)
274 		bt_outb(ipmi_msg->data[i], BT_HOST2BMC);
275 
276 	BT_Q_TRACE(bt_msg, "Message sent to host");
277 	bt_msg->send_count++;
278 
279 	bt_outb(BT_CTRL_H2B_ATN, BT_CTRL);
280 
281 	return;
282 }
283 
bt_clear_fifo(void)284 static void bt_clear_fifo(void)
285 {
286 	int i;
287 
288 	for (i = 0; i < bt.caps.input_buf_len; i++)
289 		bt_outb(0xff, BT_HOST2BMC);
290 }
291 
bt_flush_msg(void)292 static void bt_flush_msg(void)
293 {
294 	bt_assert_h_busy();
295 	bt_outb(BT_CTRL_B2H_ATN | BT_CTRL_CLR_RD_PTR | BT_CTRL_CLR_WR_PTR, BT_CTRL);
296 	bt_clear_fifo();
297 	/* Can't hurt to clear the write pointer again, just to be sure */
298 	bt_outb(BT_CTRL_CLR_WR_PTR, BT_CTRL);
299 	bt_set_h_busy(false);
300 }
301 
bt_get_resp(void)302 static void bt_get_resp(void)
303 {
304 	int i;
305 	struct ipmi_msg *ipmi_msg;
306 	uint8_t resp_len, netfn, seq, cmd;
307 	uint8_t cc = IPMI_CC_NO_ERROR;
308 
309 	/* Indicate to the BMC that we are busy */
310 	bt_set_h_busy(true);
311 
312 	/* Clear B2H_ATN and read pointer */
313 	bt_outb(BT_CTRL_B2H_ATN, BT_CTRL);
314 	bt_outb(BT_CTRL_CLR_RD_PTR, BT_CTRL);
315 
316 	/* Read the response */
317 	/* Byte 1 - Length (includes header size) */
318 	resp_len = bt_inb(BT_HOST2BMC) - BT_MIN_RESP_LEN;
319 
320 	/* Byte 2 - NetFn/LUN */
321 	netfn = bt_inb(BT_HOST2BMC);
322 
323 	/* Byte 3 - Seq */
324 	seq = bt_inb(BT_HOST2BMC);
325 
326 	/* Byte 4 - Cmd */
327 	cmd = bt_inb(BT_HOST2BMC);
328 
329 	/* Byte 5 - Completion Code */
330 	cc = bt_inb(BT_HOST2BMC);
331 
332 	/* Find the corresponding message */
333 	if (inflight_bt_msg == NULL || inflight_bt_msg->seq != seq) {
334 		/* A response to a message we no longer care about. */
335 		prlog(PR_INFO, "Nobody cared about a response to an BT/IPMI message"
336 		       "(seq 0x%02x netfn 0x%02x cmd 0x%02x)\n", seq, (netfn >> 2), cmd);
337 		bt_flush_msg();
338 		return;
339 	}
340 
341 	ipmi_msg = &inflight_bt_msg->ipmi_msg;
342 
343 	/*
344 	 * Make sure we have enough room to store the response. As all values
345 	 * are unsigned we will also trigger this error if
346 	 * bt_inb(BT_HOST2BMC) < BT_MIN_RESP_LEN (which should never occur).
347 	 */
348 	if (resp_len > ipmi_msg->resp_size) {
349 		BT_Q_ERR(inflight_bt_msg, "Invalid resp_len %d", resp_len);
350 		resp_len = ipmi_msg->resp_size;
351 		cc = IPMI_ERR_MSG_TRUNCATED;
352 	}
353 	ipmi_msg->resp_size = resp_len;
354 
355 	/* Byte 6:N - Data */
356 	for (i = 0; i < resp_len; i++)
357 		ipmi_msg->data[i] = bt_inb(BT_HOST2BMC);
358 	bt_set_h_busy(false);
359 
360 	BT_Q_TRACE(inflight_bt_msg, "IPMI MSG done");
361 
362 	list_del(&inflight_bt_msg->link);
363 	/* Ready to send next message */
364 	inflight_bt_msg = NULL;
365 	bt.queue_len--;
366 	unlock(&bt.lock);
367 
368 	/* Call IPMI layer to finish processing the message. */
369 	ipmi_cmd_done(cmd, netfn, cc, ipmi_msg);
370 	lock(&bt.lock);
371 
372 	return;
373 }
374 
bt_expire_old_msg(uint64_t tb)375 static void bt_expire_old_msg(uint64_t tb)
376 {
377 	struct bt_msg *bt_msg = inflight_bt_msg;
378 
379 	if (bt_msg && bt_msg->tb > 0 && !chip_quirk(QUIRK_SIMICS) &&
380 	    (tb_compare(tb, bt_msg->tb +
381 			secs_to_tb(bt.caps.msg_timeout)) == TB_AAFTERB)) {
382 		if (bt_msg->send_count <= bt.caps.max_retries &&
383 		    !bt_msg->disable_retry) {
384 			/* A message timeout is usually due to the BMC
385 			 * clearing the H2B_ATN flag without actually
386 			 * doing anything. The data will still be in the
387 			 * FIFO so just reset the flag.*/
388 			BT_Q_ERR(bt_msg, "Retry sending message");
389 
390 			/* This means we have started message timeout, but not
391 			 * yet sent message to BMC as driver was not free to
392 			 * send message. Lets resend message.
393 			 */
394 			if (bt_msg->send_count == 0)
395 				bt_send_msg(bt_msg);
396 			else
397 				bt_outb(BT_CTRL_H2B_ATN, BT_CTRL);
398 
399 			bt_msg->send_count++;
400 			bt_msg->tb = tb;
401 		} else {
402 			BT_Q_ERR(bt_msg, "Timeout sending message");
403 			bt_msg_del(bt_msg);
404 
405 			/* Ready to send next message */
406 			inflight_bt_msg = NULL;
407 
408 			/*
409 			 * Timing out a message is inherently racy as the BMC
410 			 * may start writing just as we decide to kill the
411 			 * message. Hopefully resetting the interface is
412 			 * sufficient to guard against such things.
413 			 */
414 			bt_reset_interface();
415 		}
416 	}
417 }
418 
419 #if BT_QUEUE_DEBUG
print_debug_queue_info(void)420 static void print_debug_queue_info(void)
421 {
422 	struct bt_msg *msg;
423 	static bool printed;
424 
425 	if (!list_empty(&bt.msgq_sync) || !list_empty(&bt.msgq)) {
426 		printed = false;
427 		prlog(PR_DEBUG, "-------- BT Sync Msg Queue -------\n");
428 		list_for_each(&bt.msgq_sync, msg, link) {
429 			BT_Q_DBG(msg, "[ sent %d ]", msg->send_count);
430 		}
431 		prlog(PR_DEBUG, "---------- BT Msg Queue ----------\n");
432 		list_for_each(&bt.msgq, msg, link) {
433 			BT_Q_DBG(msg, "[ sent %d ]", msg->send_count);
434 		}
435 		prlog(PR_DEBUG, "----------------------------------\n");
436 	} else if (!printed) {
437 		printed = true;
438 		prlog(PR_DEBUG, "------- BT Msg Queue Empty -------\n");
439 	}
440 }
441 #endif
442 
bt_send_and_unlock(void)443 static void bt_send_and_unlock(void)
444 {
445 	/* Busy? */
446 	if (inflight_bt_msg)
447 		goto out_unlock;
448 
449 	if (!lpc_ok())
450 		goto out_unlock;
451 
452 	/* Synchronous messages gets priority over normal message */
453 	if (!list_empty(&bt.msgq_sync))
454 		inflight_bt_msg = list_top(&bt.msgq_sync, struct bt_msg, link);
455 	else if (!list_empty(&bt.msgq))
456 		inflight_bt_msg = list_top(&bt.msgq, struct bt_msg, link);
457 	else
458 		goto out_unlock;
459 
460 	assert(inflight_bt_msg);
461 	/*
462 	 * Start the message timeout once it gets to the top
463 	 * of the queue. This will ensure we timeout messages
464 	 * in the case of a broken bt interface as occurs when
465 	 * the BMC is not responding to any IPMI messages.
466 	 */
467 	if (inflight_bt_msg->tb == 0)
468 		inflight_bt_msg->tb = mftb();
469 
470 	/*
471 	 * Only send it if we haven't already.
472 	 * Timeouts and retries happen in bt_expire_old_msg()
473 	 * called from bt_poll()
474 	 */
475 	if (bt_idle() && inflight_bt_msg->send_count == 0)
476 		bt_send_msg(inflight_bt_msg);
477 
478 out_unlock:
479 	unlock(&bt.lock);
480 }
481 
bt_poll(struct timer * t __unused,void * data __unused,uint64_t now)482 static void bt_poll(struct timer *t __unused, void *data __unused,
483 		    uint64_t now)
484 {
485 	uint8_t bt_ctrl;
486 
487 	/* Don't do anything if the LPC bus is offline */
488 	if (!lpc_ok())
489 		return;
490 
491 	/*
492 	 * If we can't get the lock assume someone else will notice
493 	 * the new message and process it.
494 	 */
495 	lock(&bt.lock);
496 
497 #if BT_QUEUE_DEBUG
498 	print_debug_queue_info();
499 #endif
500 
501 	bt_ctrl = bt_inb(BT_CTRL);
502 
503 	/* Is there a response waiting for us? */
504 	if (bt_ctrl & BT_CTRL_B2H_ATN)
505 		bt_get_resp();
506 
507 	bt_expire_old_msg(now);
508 
509 	/* Check for sms_atn */
510 	if (bt_inb(BT_CTRL) & BT_CTRL_SMS_ATN) {
511 		bt_outb(BT_CTRL_SMS_ATN, BT_CTRL);
512 		unlock(&bt.lock);
513 		ipmi_sms_attention();
514 		lock(&bt.lock);
515 	}
516 
517 	/*
518 	 * Send messages if we can. If the BMC was really quick we
519 	 * could loop back to the start and check for a response
520 	 * instead of unlocking, but testing shows the BMC isn't that
521 	 * fast so we will wait for the IRQ or a call to the pollers instead.
522 	 */
523 	bt_send_and_unlock();
524 
525 	schedule_timer(&bt.poller,
526 		       bt.irq_ok ? TIMER_POLL : msecs_to_tb(BT_DEFAULT_POLL_MS));
527 }
528 
bt_ipmi_poll(void)529 static void bt_ipmi_poll(void)
530 {
531 	bt_poll(NULL, NULL, mftb());
532 }
533 
bt_add_msg(struct bt_msg * bt_msg)534 static void bt_add_msg(struct bt_msg *bt_msg)
535 {
536 	bt_msg->tb = 0;
537 	bt_msg->seq = ipmi_seq++;
538 	bt_msg->send_count = 0;
539 	bt.queue_len++;
540 	if (bt.queue_len > BT_MAX_QUEUE_LEN) {
541 		/* Maximum queue length exceeded, remove oldest messages. */
542 		BT_Q_ERR(bt_msg, "Maximum queue length exceeded");
543 		/* First try to remove message from normal queue */
544 		if (!list_empty(&bt.msgq))
545 			bt_msg = list_tail(&bt.msgq, struct bt_msg, link);
546 		else if (!list_empty(&bt.msgq_sync))
547 			bt_msg = list_tail(&bt.msgq_sync, struct bt_msg, link);
548 		assert(bt_msg);
549 		BT_Q_ERR(bt_msg, "Removed from queue");
550 		bt_msg_del(bt_msg);
551 	}
552 }
553 
554 /* Add message to synchronous message list */
bt_add_ipmi_msg_head(struct ipmi_msg * ipmi_msg)555 static int bt_add_ipmi_msg_head(struct ipmi_msg *ipmi_msg)
556 {
557 	struct bt_msg *bt_msg = container_of(ipmi_msg, struct bt_msg, ipmi_msg);
558 
559 	lock(&bt.lock);
560 	bt_add_msg(bt_msg);
561 	list_add_tail(&bt.msgq_sync, &bt_msg->link);
562 	bt_send_and_unlock();
563 
564 	return 0;
565 }
566 
bt_add_ipmi_msg(struct ipmi_msg * ipmi_msg)567 static int bt_add_ipmi_msg(struct ipmi_msg *ipmi_msg)
568 {
569 	struct bt_msg *bt_msg = container_of(ipmi_msg, struct bt_msg, ipmi_msg);
570 
571 	lock(&bt.lock);
572 	bt_add_msg(bt_msg);
573 	list_add_tail(&bt.msgq, &bt_msg->link);
574 	bt_send_and_unlock();
575 
576 	return 0;
577 }
578 
bt_irq(uint32_t chip_id __unused,uint32_t irq_mask __unused)579 static void bt_irq(uint32_t chip_id __unused, uint32_t irq_mask __unused)
580 {
581 	uint8_t ireg;
582 
583 	ireg = bt_inb(BT_INTMASK);
584 
585 	bt.irq_ok = true;
586 	if (ireg & BT_INTMASK_B2H_IRQ) {
587 		bt_outb(BT_INTMASK_B2H_IRQ | BT_INTMASK_B2H_IRQEN, BT_INTMASK);
588 		bt_poll(NULL, NULL, mftb());
589 	}
590 }
591 
592 /*
593  * Allocate an ipmi message and bt container and return the ipmi
594  * message struct. Allocates enough space for the request and response
595  * data.
596  */
bt_alloc_ipmi_msg(size_t request_size,size_t response_size)597 static struct ipmi_msg *bt_alloc_ipmi_msg(size_t request_size, size_t response_size)
598 {
599 	struct bt_msg *bt_msg;
600 
601 	bt_msg = zalloc(sizeof(struct bt_msg) + MAX(request_size, response_size));
602 	if (!bt_msg)
603 		return NULL;
604 
605 	bt_msg->ipmi_msg.req_size = request_size;
606 	bt_msg->ipmi_msg.resp_size = response_size;
607 	bt_msg->ipmi_msg.data = (uint8_t *) (bt_msg + 1);
608 
609 	return &bt_msg->ipmi_msg;
610 }
611 
612 /*
613  * Free a previously allocated ipmi message.
614  */
bt_free_ipmi_msg(struct ipmi_msg * ipmi_msg)615 static void bt_free_ipmi_msg(struct ipmi_msg *ipmi_msg)
616 {
617 	struct bt_msg *bt_msg = container_of(ipmi_msg, struct bt_msg, ipmi_msg);
618 
619 	free(bt_msg);
620 }
621 
622 /*
623  * Do not resend IPMI messages to BMC.
624  */
bt_disable_ipmi_msg_retry(struct ipmi_msg * ipmi_msg)625 static void bt_disable_ipmi_msg_retry(struct ipmi_msg *ipmi_msg)
626 {
627 	struct bt_msg *bt_msg = container_of(ipmi_msg, struct bt_msg, ipmi_msg);
628 
629 	bt_msg->disable_retry = true;
630 }
631 
632 /*
633  * Remove a message from the queue. The memory allocated for the ipmi message
634  * will need to be freed by the caller with bt_free_ipmi_msg() as it will no
635  * longer be in the queue of messages.
636  */
bt_del_ipmi_msg(struct ipmi_msg * ipmi_msg)637 static int bt_del_ipmi_msg(struct ipmi_msg *ipmi_msg)
638 {
639 	struct bt_msg *bt_msg = container_of(ipmi_msg, struct bt_msg, ipmi_msg);
640 
641 	lock(&bt.lock);
642 	list_del(&bt_msg->link);
643 	bt.queue_len--;
644 	bt_send_and_unlock();
645 	return 0;
646 }
647 
648 static struct ipmi_backend bt_backend = {
649 	.alloc_msg = bt_alloc_ipmi_msg,
650 	.free_msg = bt_free_ipmi_msg,
651 	.queue_msg = bt_add_ipmi_msg,
652 	.queue_msg_head = bt_add_ipmi_msg_head,
653 	.dequeue_msg = bt_del_ipmi_msg,
654 	.disable_retry = bt_disable_ipmi_msg_retry,
655 	.poll = bt_ipmi_poll,
656 };
657 
658 static struct lpc_client bt_lpc_client = {
659 	.interrupt = bt_irq,
660 };
661 
bt_init(void)662 void bt_init(void)
663 {
664 	struct dt_node *n;
665 	const struct dt_property *prop;
666 	uint32_t irq;
667 
668 	/* Set sane capability defaults */
669 	bt.caps.num_requests = 1;
670 	bt.caps.input_buf_len = BT_FIFO_LEN;
671 	bt.caps.output_buf_len = BT_FIFO_LEN;
672 	bt.caps.msg_timeout = BT_MSG_TIMEOUT;
673 	bt.caps.max_retries = BT_MAX_RETRIES;
674 
675 	/* We support only one */
676 	n = dt_find_compatible_node(dt_root, NULL, "ipmi-bt");
677 	if (!n) {
678 		prerror("No BT device\n");
679 		return;
680 	}
681 
682 	/* Get IO base */
683 	prop = dt_find_property(n, "reg");
684 	if (!prop) {
685 		prerror("Can't find reg property\n");
686 		return;
687 	}
688 	if (dt_property_get_cell(prop, 0) != OPAL_LPC_IO) {
689 		prerror("Only supports IO addresses\n");
690 		return;
691 	}
692 	bt.base_addr = dt_property_get_cell(prop, 1);
693 	init_timer(&bt.poller, bt_poll, NULL);
694 
695 	bt_init_interface();
696 	init_lock(&bt.lock);
697 
698 	/*
699 	 * The iBT interface comes up in the busy state until the daemon has
700 	 * initialised it.
701 	 */
702 	list_head_init(&bt.msgq);
703 	list_head_init(&bt.msgq_sync);
704 	inflight_bt_msg = NULL;
705 	bt.queue_len = 0;
706 
707 	prlog(PR_INFO, "Interface initialized, IO 0x%04x\n", bt.base_addr);
708 
709 	ipmi_register_backend(&bt_backend);
710 
711 	/*
712 	 * We initially schedule the poller as a relatively fast timer, at
713 	 * least until we have at least one interrupt occurring at which
714 	 * point we turn it into a background poller
715 	 */
716 	schedule_timer(&bt.poller, msecs_to_tb(BT_DEFAULT_POLL_MS));
717 
718 	irq = dt_prop_get_u32(n, "interrupts");
719 	bt_lpc_client.interrupts = LPC_IRQ(irq);
720 	lpc_register_client(dt_get_chip_id(n), &bt_lpc_client,
721 			    IRQ_ATTR_TARGET_OPAL);
722 
723 	/* Enqueue an IPMI message to ask the BMC about its BT capabilities */
724 	get_bt_caps();
725 
726 	prlog(PR_DEBUG, "Using LPC IRQ %d\n", irq);
727 }
728