1 // SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
2 /*
3 * Real Time Clock (RTC) attached to FSP
4 *
5 * Copyright 2013-2017 IBM Corp.
6 */
7
8 #include <skiboot.h>
9 #include <fsp.h>
10 #include <lock.h>
11 #include <timebase.h>
12 #include <time.h>
13 #include <time-utils.h>
14 #include <opal-api.h>
15 #include <opal-msg.h>
16 #include <errorlog.h>
17 #include <device.h>
18
19 /*
20 * Note on how those operate:
21 *
22 * Because the RTC calls can be pretty slow, these functions will shoot
23 * an asynchronous request to the FSP (if none is already pending)
24 *
25 * The requests will return OPAL_BUSY_EVENT as long as the event has
26 * not been completed.
27 *
28 * WARNING: An attempt at doing an RTC write while one is already pending
29 * will simply ignore the new arguments and continue returning
30 * OPAL_BUSY_EVENT. This is to be compatible with existing Linux code.
31 *
32 * Completion of the request will result in an event OPAL_EVENT_RTC
33 * being signaled, which will remain raised until a corresponding call
34 * to opal_rtc_read() or opal_rtc_write() finally returns OPAL_SUCCESS,
35 * at which point the operation is complete and the event cleared.
36 *
37 * If we end up taking longer than rtc_read_timeout_ms millieconds waiting
38 * for the response from a read request, we simply return a cached value (plus
39 * an offset calculated from the timebase. When the read request finally
40 * returns, we update our cache value accordingly.
41 *
42 * There is two separate set of state for reads and writes. If both are
43 * attempted at the same time, the event bit will remain set as long as either
44 * of the two has a pending event to signal.
45 */
46
47 #include <rtc.h>
48
49 /* All of the below state is protected by rtc_lock.
50 * It should be held for the shortest amount of time possible.
51 * Certainly not across calls to FSP.
52 */
53 static struct lock rtc_lock;
54
55 static enum {
56 RTC_TOD_VALID,
57 RTC_TOD_INVALID,
58 RTC_TOD_PERMANENT_ERROR,
59 } rtc_tod_state = RTC_TOD_INVALID;
60
61 /* State machine for getting an RTC request.
62 * RTC_{READ/WRITE}_NO_REQUEST -> RTC_{READ/WRITE}_PENDING_REQUEST (one in flight)
63 * RTC_{READ/WRITE}_PENDING_REQUEST -> RTC_{READ/WRITE}_REQUEST_AVAILABLE,
64 * when FSP responds
65 * RTC_{READ/WRITE}_REQUEST_AVAILABLE -> RTC_{READ/WRITE}_NO_REQUEST,
66 * when OS retrieves it
67 */
68 static enum {
69 RTC_READ_NO_REQUEST,
70 RTC_READ_PENDING_REQUEST,
71 RTC_READ_REQUEST_AVAILABLE,
72 } rtc_read_request_state = RTC_READ_NO_REQUEST;
73
74 static enum {
75 RTC_WRITE_NO_REQUEST,
76 RTC_WRITE_PENDING_REQUEST,
77 RTC_WRITE_REQUEST_AVAILABLE,
78 } rtc_write_request_state = RTC_WRITE_NO_REQUEST;
79
80 static bool rtc_tod_cache_dirty = false;
81
82 struct opal_tpo_data {
83 uint64_t tpo_async_token;
84 __be32 *year_month_day;
85 __be32 *hour_min;
86 };
87
88 /* Timebase value when we last initiated a RTC read request */
89 static unsigned long read_req_tb;
90
91 /* If a RTC read takes longer than this, we return a value generated
92 * from the cache + timebase */
93 static const int rtc_read_timeout_ms = 1500;
94
95 DEFINE_LOG_ENTRY(OPAL_RC_RTC_TOD, OPAL_PLATFORM_ERR_EVT, OPAL_RTC,
96 OPAL_PLATFORM_FIRMWARE, OPAL_INFO, OPAL_NA);
97
98 DEFINE_LOG_ENTRY(OPAL_RC_RTC_READ, OPAL_PLATFORM_ERR_EVT, OPAL_RTC,
99 OPAL_PLATFORM_FIRMWARE, OPAL_INFO, OPAL_NA);
100
fsp_tpo_req_complete(struct fsp_msg * read_resp)101 static void fsp_tpo_req_complete(struct fsp_msg *read_resp)
102 {
103 struct opal_tpo_data *attr = read_resp->user_data;
104 int val;
105 int rc;
106
107 val = (read_resp->resp->word1 >> 8) & 0xff;
108 switch (val) {
109 case FSP_STATUS_TOD_RESET:
110 log_simple_error(&e_info(OPAL_RC_RTC_TOD),
111 "RTC TPO in invalid state\n");
112 rc = OPAL_INTERNAL_ERROR;
113 break;
114
115 case FSP_STATUS_TOD_PERMANENT_ERROR:
116 log_simple_error(&e_info(OPAL_RC_RTC_TOD),
117 "RTC TPO in permanent error state\n");
118 rc = OPAL_INTERNAL_ERROR;
119 break;
120 case FSP_STATUS_INVALID_DATA:
121 log_simple_error(&e_info(OPAL_RC_RTC_TOD),
122 "RTC TPO: Invalid data\n");
123 rc = OPAL_PARAMETER;
124 break;
125 case FSP_STATUS_SUCCESS:
126 /* Save the read TPO value in our cache */
127 if (attr->year_month_day)
128 *attr->year_month_day = cpu_to_be32(fsp_msg_get_data_word(read_resp->resp, 0));
129 if (attr->hour_min)
130 *attr->hour_min = cpu_to_be32(fsp_msg_get_data_word(read_resp->resp, 1));
131 rc = OPAL_SUCCESS;
132 break;
133
134 default:
135 log_simple_error(&e_info(OPAL_RC_RTC_TOD),
136 "TPO read failed: %d\n", val);
137 rc = OPAL_INTERNAL_ERROR;
138 break;
139 }
140 opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL,
141 cpu_to_be64(attr->tpo_async_token),
142 cpu_to_be64(rc));
143 free(attr);
144 fsp_freemsg(read_resp);
145 }
146
fsp_rtc_process_read(struct fsp_msg * read_resp)147 static void fsp_rtc_process_read(struct fsp_msg *read_resp)
148 {
149 int val = (read_resp->word1 >> 8) & 0xff;
150 struct tm tm;
151
152 assert(lock_held_by_me(&rtc_lock));
153
154 assert(rtc_read_request_state == RTC_READ_PENDING_REQUEST);
155
156 switch (val) {
157 case FSP_STATUS_TOD_RESET:
158 log_simple_error(&e_info(OPAL_RC_RTC_TOD),
159 "RTC TOD in invalid state\n");
160 rtc_tod_state = RTC_TOD_INVALID;
161 break;
162
163 case FSP_STATUS_TOD_PERMANENT_ERROR:
164 log_simple_error(&e_info(OPAL_RC_RTC_TOD),
165 "RTC TOD in permanent error state\n");
166 rtc_tod_state = RTC_TOD_PERMANENT_ERROR;
167 break;
168
169 case FSP_STATUS_SUCCESS:
170 /* Save the read RTC value in our cache */
171 rtc_tod_state = RTC_TOD_VALID;
172 datetime_to_tm(fsp_msg_get_data_word(read_resp, 0),
173 (u64)fsp_msg_get_data_word(read_resp, 1) << 32, &tm);
174 rtc_cache_update(&tm);
175 prlog(PR_TRACE, "FSP-RTC Got time: %d-%d-%d %d:%d:%d\n",
176 tm.tm_year, tm.tm_mon, tm.tm_mday,
177 tm.tm_hour, tm.tm_min, tm.tm_sec);
178 break;
179
180 default:
181 log_simple_error(&e_info(OPAL_RC_RTC_TOD),
182 "RTC TOD read failed: %d\n", val);
183 rtc_tod_state = RTC_TOD_INVALID;
184 }
185 rtc_read_request_state = RTC_READ_REQUEST_AVAILABLE;
186 }
187
opal_rtc_eval_events(bool read_write)188 static void opal_rtc_eval_events(bool read_write)
189 {
190 bool request_available;
191
192 if (read_write)
193 request_available = (rtc_read_request_state ==
194 RTC_READ_REQUEST_AVAILABLE);
195 else
196 request_available = (rtc_write_request_state ==
197 RTC_WRITE_REQUEST_AVAILABLE);
198
199 assert(lock_held_by_me(&rtc_lock));
200 opal_update_pending_evt(OPAL_EVENT_RTC,
201 request_available ? OPAL_EVENT_RTC : 0);
202 }
203
fsp_rtc_req_complete(struct fsp_msg * msg)204 static void fsp_rtc_req_complete(struct fsp_msg *msg)
205 {
206 lock(&rtc_lock);
207 prlog(PR_TRACE, "RTC completion %p\n", msg);
208
209 if (fsp_msg_cmd(msg) == (FSP_CMD_READ_TOD & 0xffffff)) {
210 fsp_rtc_process_read(msg->resp);
211 opal_rtc_eval_events(true);
212 } else {
213 assert(rtc_write_request_state == RTC_WRITE_PENDING_REQUEST);
214 rtc_write_request_state = RTC_WRITE_REQUEST_AVAILABLE;
215 opal_rtc_eval_events(false);
216 }
217
218 unlock(&rtc_lock);
219 fsp_freemsg(msg);
220 }
221
fsp_rtc_send_read_request(void)222 static int64_t fsp_rtc_send_read_request(void)
223 {
224 struct fsp_msg *msg;
225 int rc;
226
227 assert(lock_held_by_me(&rtc_lock));
228 assert(rtc_read_request_state == RTC_READ_NO_REQUEST);
229
230 msg = fsp_mkmsg(FSP_CMD_READ_TOD, 0);
231 if (!msg) {
232 log_simple_error(&e_info(OPAL_RC_RTC_READ),
233 "RTC: failed to allocate read message\n");
234 return OPAL_INTERNAL_ERROR;
235 }
236
237 rc = fsp_queue_msg(msg, fsp_rtc_req_complete);
238 if (rc) {
239 fsp_freemsg(msg);
240 log_simple_error(&e_info(OPAL_RC_RTC_READ),
241 "RTC: failed to queue read message: %d\n", rc);
242 return OPAL_INTERNAL_ERROR;
243 }
244
245 rtc_read_request_state = RTC_READ_PENDING_REQUEST;
246
247 read_req_tb = mftb();
248
249 return OPAL_BUSY_EVENT;
250 }
251
fsp_opal_rtc_read(__be32 * __ymd,__be64 * __hmsm)252 static int64_t fsp_opal_rtc_read(__be32 *__ymd, __be64 *__hmsm)
253 {
254 int64_t rc;
255 uint32_t ymd;
256 uint64_t hmsm;
257
258 if (!__ymd || !__hmsm)
259 return OPAL_PARAMETER;
260
261 lock(&rtc_lock);
262
263 if (rtc_tod_state == RTC_TOD_PERMANENT_ERROR) {
264 rc = OPAL_HARDWARE;
265 goto out;
266 }
267
268 /* During R/R of FSP, read cached TOD */
269 if (fsp_in_rr()) {
270 if (rtc_tod_state == RTC_TOD_VALID) {
271 rtc_cache_get_datetime(&ymd, &hmsm);
272 rc = OPAL_SUCCESS;
273 } else {
274 rc = OPAL_INTERNAL_ERROR;
275 }
276 goto out;
277 }
278
279 /* If we don't have a read pending already, fire off a request and
280 * return */
281 if (rtc_read_request_state == RTC_READ_NO_REQUEST) {
282 prlog(PR_TRACE, "Sending new RTC read request\n");
283 rc = fsp_rtc_send_read_request();
284 /* If our pending read is done, clear events and return the time
285 * from the cache */
286 } else if (rtc_read_request_state == RTC_READ_REQUEST_AVAILABLE) {
287 prlog(PR_TRACE, "RTC read complete, state %d\n", rtc_tod_state);
288 rtc_read_request_state = RTC_READ_NO_REQUEST;
289
290 opal_rtc_eval_events(true);
291
292 if (rtc_tod_state == RTC_TOD_VALID) {
293 rtc_cache_get_datetime(&ymd, &hmsm);
294 prlog(PR_TRACE,"FSP-RTC Cached datetime: %x %llx\n",
295 ymd, hmsm);
296 rc = OPAL_SUCCESS;
297 } else {
298 rc = OPAL_INTERNAL_ERROR;
299 }
300
301 /* Timeout: return our cached value (updated from tb), but leave the
302 * read request pending so it will update the cache later */
303 } else if (mftb() > read_req_tb + msecs_to_tb(rtc_read_timeout_ms)) {
304 prlog(PR_TRACE, "RTC read timed out\n");
305
306 if (rtc_tod_state == RTC_TOD_VALID) {
307 rtc_cache_get_datetime(&ymd, &hmsm);
308 rc = OPAL_SUCCESS;
309 } else {
310 rc = OPAL_INTERNAL_ERROR;
311 }
312 /* Otherwise, we're still waiting on the read to complete */
313 } else {
314 assert(rtc_read_request_state == RTC_READ_PENDING_REQUEST);
315 rc = OPAL_BUSY_EVENT;
316 }
317 out:
318 unlock(&rtc_lock);
319
320 if (rc == OPAL_SUCCESS) {
321 *__ymd = cpu_to_be32(ymd);
322 *__hmsm = cpu_to_be64(hmsm);
323 }
324
325 return rc;
326 }
327
fsp_rtc_send_write_request(uint32_t year_month_day,uint64_t hour_minute_second_millisecond)328 static int64_t fsp_rtc_send_write_request(uint32_t year_month_day,
329 uint64_t hour_minute_second_millisecond)
330 {
331 struct fsp_msg *msg;
332 uint32_t w0, w1, w2;
333
334 assert(lock_held_by_me(&rtc_lock));
335 assert(rtc_write_request_state == RTC_WRITE_NO_REQUEST);
336
337 /* Create a request and send it. Just like for read, we ignore
338 * the "millisecond" field which is probably supposed to be
339 * microseconds and which Linux ignores as well anyway
340 */
341 w0 = year_month_day;
342 w1 = (hour_minute_second_millisecond >> 32) & 0xffffff00;
343 w2 = 0;
344
345 msg = fsp_mkmsg(FSP_CMD_WRITE_TOD, 3, w0, w1, w2);
346 if (!msg) {
347 prlog(PR_TRACE, " -> allocation failed !\n");
348 return OPAL_INTERNAL_ERROR;
349 }
350 prlog(PR_TRACE, " -> req at %p\n", msg);
351
352 if (fsp_queue_msg(msg, fsp_rtc_req_complete)) {
353 prlog(PR_TRACE, " -> queueing failed !\n");
354 fsp_freemsg(msg);
355 return OPAL_INTERNAL_ERROR;
356 }
357
358 rtc_write_request_state = RTC_WRITE_PENDING_REQUEST;
359
360 return OPAL_BUSY_EVENT;
361 }
362
fsp_opal_rtc_write(uint32_t year_month_day,uint64_t hour_minute_second_millisecond)363 static int64_t fsp_opal_rtc_write(uint32_t year_month_day,
364 uint64_t hour_minute_second_millisecond)
365 {
366 int rc;
367 struct tm tm;
368
369 lock(&rtc_lock);
370 if (rtc_tod_state == RTC_TOD_PERMANENT_ERROR) {
371 rc = OPAL_HARDWARE;
372 goto out;
373 }
374
375 if (fsp_in_rr()) {
376 datetime_to_tm(year_month_day,
377 hour_minute_second_millisecond, &tm);
378 rtc_cache_update(&tm);
379 rtc_tod_cache_dirty = true;
380 rc = OPAL_SUCCESS;
381 goto out;
382 }
383
384 if (rtc_write_request_state == RTC_WRITE_NO_REQUEST) {
385 prlog(PR_TRACE, "Sending new RTC write request\n");
386 rc = fsp_rtc_send_write_request(year_month_day,
387 hour_minute_second_millisecond);
388 } else if (rtc_write_request_state == RTC_WRITE_PENDING_REQUEST) {
389 rc = OPAL_BUSY_EVENT;
390 } else {
391 assert(rtc_write_request_state == RTC_WRITE_REQUEST_AVAILABLE);
392 rtc_write_request_state = RTC_WRITE_NO_REQUEST;
393
394 opal_rtc_eval_events(false);
395 rc = OPAL_SUCCESS;
396 }
397
398 out:
399 unlock(&rtc_lock);
400 return rc;
401 }
402
403 /* Set timed power on values to fsp */
fsp_opal_tpo_write(uint64_t async_token,uint32_t y_m_d,uint32_t hr_min)404 static int64_t fsp_opal_tpo_write(uint64_t async_token, uint32_t y_m_d,
405 uint32_t hr_min)
406 {
407 static struct opal_tpo_data *attr;
408 struct fsp_msg *msg;
409
410 if (!fsp_present())
411 return OPAL_HARDWARE;
412
413 attr = zalloc(sizeof(struct opal_tpo_data));
414 if (!attr)
415 return OPAL_NO_MEM;
416
417 /* Create a request and send it.*/
418 attr->tpo_async_token = async_token;
419
420 /* check if this is a disable tpo request */
421 if (y_m_d == 0 && hr_min == 0) {
422 prlog(PR_TRACE, "Sending TPO disable request...\n");
423 msg = fsp_mkmsg(FSP_CMD_TPO_DISABLE, 0);
424 } else {
425 prlog(PR_TRACE, "Sending TPO write request...\n");
426 msg = fsp_mkmsg(FSP_CMD_TPO_WRITE, 2, y_m_d, hr_min);
427 }
428
429 if (!msg) {
430 prerror("TPO: Failed to create message for WRITE to FSP\n");
431 free(attr);
432 return OPAL_INTERNAL_ERROR;
433 }
434 msg->user_data = attr;
435 if (fsp_queue_msg(msg, fsp_tpo_req_complete)) {
436 free(attr);
437 fsp_freemsg(msg);
438 return OPAL_INTERNAL_ERROR;
439 }
440 return OPAL_ASYNC_COMPLETION;
441 }
442
443 /* Read Timed power on (TPO) from FSP */
fsp_opal_tpo_read(uint64_t async_token,__be32 * y_m_d,__be32 * hr_min)444 static int64_t fsp_opal_tpo_read(uint64_t async_token, __be32 *y_m_d,
445 __be32 *hr_min)
446 {
447 static struct opal_tpo_data *attr;
448 struct fsp_msg *msg;
449 int64_t rc;
450
451 if (!fsp_present())
452 return OPAL_HARDWARE;
453
454 if (!y_m_d || !hr_min)
455 return OPAL_PARAMETER;
456
457 attr = zalloc(sizeof(*attr));
458 if (!attr)
459 return OPAL_NO_MEM;
460
461 /* Send read requet to FSP */
462 attr->tpo_async_token = async_token;
463 attr->year_month_day = y_m_d;
464 attr->hour_min = hr_min;
465
466 prlog(PR_TRACE, "Sending new TPO read request\n");
467 msg = fsp_mkmsg(FSP_CMD_TPO_READ, 0);
468 if (!msg) {
469 log_simple_error(&e_info(OPAL_RC_RTC_READ),
470 "TPO: failed to allocate read message\n");
471 free(attr);
472 return OPAL_INTERNAL_ERROR;
473 }
474 msg->user_data = attr;
475 rc = fsp_queue_msg(msg, fsp_tpo_req_complete);
476 if (rc) {
477 free(attr);
478 fsp_freemsg(msg);
479 log_simple_error(&e_info(OPAL_RC_RTC_READ),
480 "TPO: failed to queue read message: %lld\n", rc);
481 return OPAL_INTERNAL_ERROR;
482 }
483 return OPAL_ASYNC_COMPLETION;
484 }
485
rtc_flush_cached_tod(void)486 static void rtc_flush_cached_tod(void)
487 {
488 struct fsp_msg *msg;
489 uint64_t h_m_s_m;
490 uint32_t y_m_d;
491
492 if (rtc_cache_get_datetime(&y_m_d, &h_m_s_m))
493 return;
494 msg = fsp_mkmsg(FSP_CMD_WRITE_TOD, 3, y_m_d,
495 (h_m_s_m >> 32) & 0xffffff00, 0);
496 if (!msg) {
497 prerror("TPO: %s : Failed to allocate write TOD message\n",
498 __func__);
499 return;
500 }
501 if (fsp_queue_msg(msg, fsp_freemsg)) {
502 fsp_freemsg(msg);
503 prerror("TPO: %s : Failed to queue WRITE_TOD command\n",
504 __func__);
505 return;
506 }
507 }
508
fsp_rtc_msg_rr(u32 cmd_sub_mod,struct fsp_msg * msg)509 static bool fsp_rtc_msg_rr(u32 cmd_sub_mod, struct fsp_msg *msg)
510 {
511
512 int rc = false;
513 assert(msg == NULL);
514
515 switch (cmd_sub_mod) {
516 case FSP_RESET_START:
517 rc = true;
518 break;
519 case FSP_RELOAD_COMPLETE:
520 lock(&rtc_lock);
521 if (rtc_tod_cache_dirty) {
522 rtc_flush_cached_tod();
523 rtc_tod_cache_dirty = false;
524 }
525 unlock(&rtc_lock);
526 rc = true;
527 break;
528 }
529
530 return rc;
531 }
532
533 static struct fsp_client fsp_rtc_client_rr = {
534 .message = fsp_rtc_msg_rr,
535 };
536
fsp_rtc_init(void)537 void fsp_rtc_init(void)
538 {
539 struct dt_node *np;
540
541 if (!fsp_present()) {
542 rtc_tod_state = RTC_TOD_PERMANENT_ERROR;
543 return;
544 }
545
546 opal_register(OPAL_RTC_READ, fsp_opal_rtc_read, 2);
547 opal_register(OPAL_RTC_WRITE, fsp_opal_rtc_write, 2);
548 opal_register(OPAL_WRITE_TPO, fsp_opal_tpo_write, 3);
549 opal_register(OPAL_READ_TPO, fsp_opal_tpo_read, 3);
550
551 np = dt_new(opal_node, "rtc");
552 dt_add_property_strings(np, "compatible", "ibm,opal-rtc");
553 dt_add_property(np, "has-tpo", NULL, 0);
554
555 /* Register for the reset/reload event */
556 fsp_register_client(&fsp_rtc_client_rr, FSP_MCLASS_RR_EVENT);
557
558 prlog(PR_TRACE, "Getting initial RTC TOD\n");
559
560 /* We don't wait for RTC response and this is actually okay as
561 * any OPAL callers will wait correctly and if we ever have
562 * internal users then they should check the state properly
563 */
564 lock(&rtc_lock);
565 fsp_rtc_send_read_request();
566 unlock(&rtc_lock);
567 }
568