1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Texas Instruments System Control Interface Protocol Driver
4  * Based on drivers/firmware/ti_sci.c from Linux.
5  *
6  * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
7  *	Lokesh Vutla <lokeshvutla@ti.com>
8  */
9 
10 #include <common.h>
11 #include <dm.h>
12 #include <errno.h>
13 #include <log.h>
14 #include <mailbox.h>
15 #include <malloc.h>
16 #include <dm/device.h>
17 #include <dm/device_compat.h>
18 #include <dm/devres.h>
19 #include <linux/bitops.h>
20 #include <linux/compat.h>
21 #include <linux/err.h>
22 #include <linux/soc/ti/k3-sec-proxy.h>
23 #include <linux/soc/ti/ti_sci_protocol.h>
24 
25 #include "ti_sci.h"
26 
27 /* List of all TI SCI devices active in system */
28 static LIST_HEAD(ti_sci_list);
29 
30 /**
31  * struct ti_sci_xfer - Structure representing a message flow
32  * @tx_message:	Transmit message
33  * @rx_len:	Receive message length
34  */
35 struct ti_sci_xfer {
36 	struct k3_sec_proxy_msg tx_message;
37 	u8 rx_len;
38 };
39 
40 /**
41  * struct ti_sci_rm_type_map - Structure representing TISCI Resource
42  *				management representation of dev_ids.
43  * @dev_id:	TISCI device ID
44  * @type:	Corresponding id as identified by TISCI RM.
45  *
46  * Note: This is used only as a work around for using RM range apis
47  *	for AM654 SoC. For future SoCs dev_id will be used as type
48  *	for RM range APIs. In order to maintain ABI backward compatibility
49  *	type is not being changed for AM654 SoC.
50  */
51 struct ti_sci_rm_type_map {
52 	u32 dev_id;
53 	u16 type;
54 };
55 
56 /**
57  * struct ti_sci_desc - Description of SoC integration
58  * @default_host_id:	Host identifier representing the compute entity
59  * @max_rx_timeout_ms:	Timeout for communication with SoC (in Milliseconds)
60  * @max_msgs: Maximum number of messages that can be pending
61  *		  simultaneously in the system
62  * @max_msg_size: Maximum size of data per message that can be handled.
63  */
64 struct ti_sci_desc {
65 	u8 default_host_id;
66 	int max_rx_timeout_ms;
67 	int max_msgs;
68 	int max_msg_size;
69 };
70 
71 /**
72  * struct ti_sci_info - Structure representing a TI SCI instance
73  * @dev:	Device pointer
74  * @desc:	SoC description for this instance
75  * @handle:	Instance of TI SCI handle to send to clients.
76  * @chan_tx:	Transmit mailbox channel
77  * @chan_rx:	Receive mailbox channel
78  * @xfer:	xfer info
79  * @list:	list head
80  * @is_secure:	Determines if the communication is through secure threads.
81  * @host_id:	Host identifier representing the compute entity
82  * @seq:	Seq id used for verification for tx and rx message.
83  */
84 struct ti_sci_info {
85 	struct udevice *dev;
86 	const struct ti_sci_desc *desc;
87 	struct ti_sci_handle handle;
88 	struct mbox_chan chan_tx;
89 	struct mbox_chan chan_rx;
90 	struct mbox_chan chan_notify;
91 	struct ti_sci_xfer xfer;
92 	struct list_head list;
93 	struct list_head dev_list;
94 	bool is_secure;
95 	u8 host_id;
96 	u8 seq;
97 };
98 
99 struct ti_sci_exclusive_dev {
100 	u32 id;
101 	u32 count;
102 	struct list_head list;
103 };
104 
105 #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
106 
107 /**
108  * ti_sci_setup_one_xfer() - Setup one message type
109  * @info:	Pointer to SCI entity information
110  * @msg_type:	Message type
111  * @msg_flags:	Flag to set for the message
112  * @buf:	Buffer to be send to mailbox channel
113  * @tx_message_size: transmit message size
114  * @rx_message_size: receive message size. may be set to zero for send-only
115  *		     transactions.
116  *
117  * Helper function which is used by various command functions that are
118  * exposed to clients of this driver for allocating a message traffic event.
119  *
120  * Return: Corresponding ti_sci_xfer pointer if all went fine,
121  *	   else appropriate error pointer.
122  */
ti_sci_setup_one_xfer(struct ti_sci_info * info,u16 msg_type,u32 msg_flags,u32 * buf,size_t tx_message_size,size_t rx_message_size)123 static struct ti_sci_xfer *ti_sci_setup_one_xfer(struct ti_sci_info *info,
124 						 u16 msg_type, u32 msg_flags,
125 						 u32 *buf,
126 						 size_t tx_message_size,
127 						 size_t rx_message_size)
128 {
129 	struct ti_sci_xfer *xfer = &info->xfer;
130 	struct ti_sci_msg_hdr *hdr;
131 
132 	/* Ensure we have sane transfer sizes */
133 	if (rx_message_size > info->desc->max_msg_size ||
134 	    tx_message_size > info->desc->max_msg_size ||
135 	    (rx_message_size > 0 && rx_message_size < sizeof(*hdr)) ||
136 	    tx_message_size < sizeof(*hdr))
137 		return ERR_PTR(-ERANGE);
138 
139 	info->seq = ~info->seq;
140 	xfer->tx_message.buf = buf;
141 	xfer->tx_message.len = tx_message_size;
142 	xfer->rx_len = (u8)rx_message_size;
143 
144 	hdr = (struct ti_sci_msg_hdr *)buf;
145 	hdr->seq = info->seq;
146 	hdr->type = msg_type;
147 	hdr->host = info->host_id;
148 	hdr->flags = msg_flags;
149 
150 	return xfer;
151 }
152 
153 /**
154  * ti_sci_get_response() - Receive response from mailbox channel
155  * @info:	Pointer to SCI entity information
156  * @xfer:	Transfer to initiate and wait for response
157  * @chan:	Channel to receive the response
158  *
159  * Return: -ETIMEDOUT in case of no response, if transmit error,
160  *	   return corresponding error, else if all goes well,
161  *	   return 0.
162  */
ti_sci_get_response(struct ti_sci_info * info,struct ti_sci_xfer * xfer,struct mbox_chan * chan)163 static inline int ti_sci_get_response(struct ti_sci_info *info,
164 				      struct ti_sci_xfer *xfer,
165 				      struct mbox_chan *chan)
166 {
167 	struct k3_sec_proxy_msg *msg = &xfer->tx_message;
168 	struct ti_sci_secure_msg_hdr *secure_hdr;
169 	struct ti_sci_msg_hdr *hdr;
170 	int ret;
171 
172 	/* Receive the response */
173 	ret = mbox_recv(chan, msg, info->desc->max_rx_timeout_ms * 1000);
174 	if (ret) {
175 		dev_err(info->dev, "%s: Message receive failed. ret = %d\n",
176 			__func__, ret);
177 		return ret;
178 	}
179 
180 	/* ToDo: Verify checksum */
181 	if (info->is_secure) {
182 		secure_hdr = (struct ti_sci_secure_msg_hdr *)msg->buf;
183 		msg->buf = (u32 *)((void *)msg->buf + sizeof(*secure_hdr));
184 	}
185 
186 	/* msg is updated by mailbox driver */
187 	hdr = (struct ti_sci_msg_hdr *)msg->buf;
188 
189 	/* Sanity check for message response */
190 	if (hdr->seq != info->seq) {
191 		dev_dbg(info->dev, "%s: Message for %d is not expected\n",
192 			__func__, hdr->seq);
193 		return ret;
194 	}
195 
196 	if (msg->len > info->desc->max_msg_size) {
197 		dev_err(info->dev, "%s: Unable to handle %zu xfer (max %d)\n",
198 			__func__, msg->len, info->desc->max_msg_size);
199 		return -EINVAL;
200 	}
201 
202 	if (msg->len < xfer->rx_len) {
203 		dev_err(info->dev, "%s: Recv xfer %zu < expected %d length\n",
204 			__func__, msg->len, xfer->rx_len);
205 	}
206 
207 	return ret;
208 }
209 
210 /**
211  * ti_sci_do_xfer() - Do one transfer
212  * @info:	Pointer to SCI entity information
213  * @xfer:	Transfer to initiate and wait for response
214  *
215  * Return: 0 if all went fine, else return appropriate error.
216  */
ti_sci_do_xfer(struct ti_sci_info * info,struct ti_sci_xfer * xfer)217 static inline int ti_sci_do_xfer(struct ti_sci_info *info,
218 				 struct ti_sci_xfer *xfer)
219 {
220 	struct k3_sec_proxy_msg *msg = &xfer->tx_message;
221 	u8 secure_buf[info->desc->max_msg_size];
222 	struct ti_sci_secure_msg_hdr secure_hdr;
223 	int ret;
224 
225 	if (info->is_secure) {
226 		/* ToDo: get checksum of the entire message */
227 		secure_hdr.checksum = 0;
228 		secure_hdr.reserved = 0;
229 		memcpy(&secure_buf[sizeof(secure_hdr)], xfer->tx_message.buf,
230 		       xfer->tx_message.len);
231 
232 		xfer->tx_message.buf = (u32 *)secure_buf;
233 		xfer->tx_message.len += sizeof(secure_hdr);
234 
235 		if (xfer->rx_len)
236 			xfer->rx_len += sizeof(secure_hdr);
237 	}
238 
239 	/* Send the message */
240 	ret = mbox_send(&info->chan_tx, msg);
241 	if (ret) {
242 		dev_err(info->dev, "%s: Message sending failed. ret = %d\n",
243 			__func__, ret);
244 		return ret;
245 	}
246 
247 	/* Get response if requested */
248 	if (xfer->rx_len)
249 		ret = ti_sci_get_response(info, xfer, &info->chan_rx);
250 
251 	return ret;
252 }
253 
254 /**
255  * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
256  * @handle:	pointer to TI SCI handle
257  *
258  * Updates the SCI information in the internal data structure.
259  *
260  * Return: 0 if all went fine, else return appropriate error.
261  */
ti_sci_cmd_get_revision(struct ti_sci_handle * handle)262 static int ti_sci_cmd_get_revision(struct ti_sci_handle *handle)
263 {
264 	struct ti_sci_msg_resp_version *rev_info;
265 	struct ti_sci_version_info *ver;
266 	struct ti_sci_msg_hdr hdr;
267 	struct ti_sci_info *info;
268 	struct ti_sci_xfer *xfer;
269 	int ret;
270 
271 	if (IS_ERR(handle))
272 		return PTR_ERR(handle);
273 	if (!handle)
274 		return -EINVAL;
275 
276 	info = handle_to_ti_sci_info(handle);
277 
278 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_VERSION,
279 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
280 				     (u32 *)&hdr, sizeof(struct ti_sci_msg_hdr),
281 				     sizeof(*rev_info));
282 	if (IS_ERR(xfer)) {
283 		ret = PTR_ERR(xfer);
284 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
285 		return ret;
286 	}
287 
288 	ret = ti_sci_do_xfer(info, xfer);
289 	if (ret) {
290 		dev_err(info->dev, "Mbox communication fail %d\n", ret);
291 		return ret;
292 	}
293 
294 	rev_info = (struct ti_sci_msg_resp_version *)xfer->tx_message.buf;
295 
296 	ver = &handle->version;
297 	ver->abi_major = rev_info->abi_major;
298 	ver->abi_minor = rev_info->abi_minor;
299 	ver->firmware_revision = rev_info->firmware_revision;
300 	strncpy(ver->firmware_description, rev_info->firmware_description,
301 		sizeof(ver->firmware_description));
302 
303 	return 0;
304 }
305 
306 /**
307  * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
308  * @r:	pointer to response buffer
309  *
310  * Return: true if the response was an ACK, else returns false.
311  */
ti_sci_is_response_ack(void * r)312 static inline bool ti_sci_is_response_ack(void *r)
313 {
314 	struct ti_sci_msg_hdr *hdr = r;
315 
316 	return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
317 }
318 
319 /**
320  * cmd_set_board_config_using_msg() - Common command to send board configuration
321  *                                    message
322  * @handle:	pointer to TI SCI handle
323  * @msg_type:	One of the TISCI message types to set board configuration
324  * @addr:	Address where the board config structure is located
325  * @size:	Size of the board config structure
326  *
327  * Return: 0 if all went well, else returns appropriate error value.
328  */
cmd_set_board_config_using_msg(const struct ti_sci_handle * handle,u16 msg_type,u64 addr,u32 size)329 static int cmd_set_board_config_using_msg(const struct ti_sci_handle *handle,
330 					  u16 msg_type, u64 addr, u32 size)
331 {
332 	struct ti_sci_msg_board_config req;
333 	struct ti_sci_msg_hdr *resp;
334 	struct ti_sci_info *info;
335 	struct ti_sci_xfer *xfer;
336 	int ret = 0;
337 
338 	if (IS_ERR(handle))
339 		return PTR_ERR(handle);
340 	if (!handle)
341 		return -EINVAL;
342 
343 	info = handle_to_ti_sci_info(handle);
344 
345 	xfer = ti_sci_setup_one_xfer(info, msg_type,
346 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
347 				     (u32 *)&req, sizeof(req), sizeof(*resp));
348 	if (IS_ERR(xfer)) {
349 		ret = PTR_ERR(xfer);
350 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
351 		return ret;
352 	}
353 	req.boardcfgp_high = (addr >> 32) & 0xffffffff;
354 	req.boardcfgp_low = addr & 0xffffffff;
355 	req.boardcfg_size = size;
356 
357 	ret = ti_sci_do_xfer(info, xfer);
358 	if (ret) {
359 		dev_err(info->dev, "Mbox send fail %d\n", ret);
360 		return ret;
361 	}
362 
363 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
364 
365 	if (!ti_sci_is_response_ack(resp))
366 		return -ENODEV;
367 
368 	return ret;
369 }
370 
371 /**
372  * ti_sci_cmd_set_board_config() - Command to send board configuration message
373  * @handle:	pointer to TI SCI handle
374  * @addr:	Address where the board config structure is located
375  * @size:	Size of the board config structure
376  *
377  * Return: 0 if all went well, else returns appropriate error value.
378  */
ti_sci_cmd_set_board_config(const struct ti_sci_handle * handle,u64 addr,u32 size)379 static int ti_sci_cmd_set_board_config(const struct ti_sci_handle *handle,
380 				       u64 addr, u32 size)
381 {
382 	return cmd_set_board_config_using_msg(handle,
383 					      TI_SCI_MSG_BOARD_CONFIG,
384 					      addr, size);
385 }
386 
387 /**
388  * ti_sci_cmd_set_board_config_rm() - Command to send board resource
389  *				      management configuration
390  * @handle:	pointer to TI SCI handle
391  * @addr:	Address where the board RM config structure is located
392  * @size:	Size of the RM config structure
393  *
394  * Return: 0 if all went well, else returns appropriate error value.
395  */
396 static
ti_sci_cmd_set_board_config_rm(const struct ti_sci_handle * handle,u64 addr,u32 size)397 int ti_sci_cmd_set_board_config_rm(const struct ti_sci_handle *handle,
398 				   u64 addr, u32 size)
399 {
400 	return cmd_set_board_config_using_msg(handle,
401 					      TI_SCI_MSG_BOARD_CONFIG_RM,
402 					      addr, size);
403 }
404 
405 /**
406  * ti_sci_cmd_set_board_config_security() - Command to send board security
407  *					    configuration message
408  * @handle:	pointer to TI SCI handle
409  * @addr:	Address where the board security config structure is located
410  * @size:	Size of the security config structure
411  *
412  * Return: 0 if all went well, else returns appropriate error value.
413  */
414 static
ti_sci_cmd_set_board_config_security(const struct ti_sci_handle * handle,u64 addr,u32 size)415 int ti_sci_cmd_set_board_config_security(const struct ti_sci_handle *handle,
416 					 u64 addr, u32 size)
417 {
418 	return cmd_set_board_config_using_msg(handle,
419 					      TI_SCI_MSG_BOARD_CONFIG_SECURITY,
420 					      addr, size);
421 }
422 
423 /**
424  * ti_sci_cmd_set_board_config_pm() - Command to send board power and clock
425  *				      configuration message
426  * @handle:	pointer to TI SCI handle
427  * @addr:	Address where the board PM config structure is located
428  * @size:	Size of the PM config structure
429  *
430  * Return: 0 if all went well, else returns appropriate error value.
431  */
ti_sci_cmd_set_board_config_pm(const struct ti_sci_handle * handle,u64 addr,u32 size)432 static int ti_sci_cmd_set_board_config_pm(const struct ti_sci_handle *handle,
433 					  u64 addr, u32 size)
434 {
435 	return cmd_set_board_config_using_msg(handle,
436 					      TI_SCI_MSG_BOARD_CONFIG_PM,
437 					      addr, size);
438 }
439 
440 static struct ti_sci_exclusive_dev
ti_sci_get_exclusive_dev(struct list_head * dev_list,u32 id)441 *ti_sci_get_exclusive_dev(struct list_head *dev_list, u32 id)
442 {
443 	struct ti_sci_exclusive_dev *dev;
444 
445 	list_for_each_entry(dev, dev_list, list)
446 		if (dev->id == id)
447 			return dev;
448 
449 	return NULL;
450 }
451 
ti_sci_add_exclusive_dev(struct ti_sci_info * info,u32 id)452 static void ti_sci_add_exclusive_dev(struct ti_sci_info *info, u32 id)
453 {
454 	struct ti_sci_exclusive_dev *dev;
455 
456 	dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
457 	if (dev) {
458 		dev->count++;
459 		return;
460 	}
461 
462 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
463 	dev->id = id;
464 	dev->count = 1;
465 	INIT_LIST_HEAD(&dev->list);
466 	list_add_tail(&dev->list, &info->dev_list);
467 }
468 
ti_sci_delete_exclusive_dev(struct ti_sci_info * info,u32 id)469 static void ti_sci_delete_exclusive_dev(struct ti_sci_info *info, u32 id)
470 {
471 	struct ti_sci_exclusive_dev *dev;
472 
473 	dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
474 	if (!dev)
475 		return;
476 
477 	if (dev->count > 0)
478 		dev->count--;
479 }
480 
481 /**
482  * ti_sci_set_device_state() - Set device state helper
483  * @handle:	pointer to TI SCI handle
484  * @id:		Device identifier
485  * @flags:	flags to setup for the device
486  * @state:	State to move the device to
487  *
488  * Return: 0 if all went well, else returns appropriate error value.
489  */
ti_sci_set_device_state(const struct ti_sci_handle * handle,u32 id,u32 flags,u8 state)490 static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
491 				   u32 id, u32 flags, u8 state)
492 {
493 	struct ti_sci_msg_req_set_device_state req;
494 	struct ti_sci_msg_hdr *resp;
495 	struct ti_sci_info *info;
496 	struct ti_sci_xfer *xfer;
497 	int ret = 0;
498 
499 	if (IS_ERR(handle))
500 		return PTR_ERR(handle);
501 	if (!handle)
502 		return -EINVAL;
503 
504 	info = handle_to_ti_sci_info(handle);
505 
506 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
507 				     flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
508 				     (u32 *)&req, sizeof(req), sizeof(*resp));
509 	if (IS_ERR(xfer)) {
510 		ret = PTR_ERR(xfer);
511 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
512 		return ret;
513 	}
514 	req.id = id;
515 	req.state = state;
516 
517 	ret = ti_sci_do_xfer(info, xfer);
518 	if (ret) {
519 		dev_err(info->dev, "Mbox send fail %d\n", ret);
520 		return ret;
521 	}
522 
523 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
524 
525 	if (!ti_sci_is_response_ack(resp))
526 		return -ENODEV;
527 
528 	if (state == MSG_DEVICE_SW_STATE_AUTO_OFF)
529 		ti_sci_delete_exclusive_dev(info, id);
530 	else if (flags & MSG_FLAG_DEVICE_EXCLUSIVE)
531 		ti_sci_add_exclusive_dev(info, id);
532 
533 	return ret;
534 }
535 
536 /**
537  * ti_sci_set_device_state_no_wait() - Set device state helper without
538  *				       requesting or waiting for a response.
539  * @handle:	pointer to TI SCI handle
540  * @id:		Device identifier
541  * @flags:	flags to setup for the device
542  * @state:	State to move the device to
543  *
544  * Return: 0 if all went well, else returns appropriate error value.
545  */
ti_sci_set_device_state_no_wait(const struct ti_sci_handle * handle,u32 id,u32 flags,u8 state)546 static int ti_sci_set_device_state_no_wait(const struct ti_sci_handle *handle,
547 					   u32 id, u32 flags, u8 state)
548 {
549 	struct ti_sci_msg_req_set_device_state req;
550 	struct ti_sci_info *info;
551 	struct ti_sci_xfer *xfer;
552 	int ret = 0;
553 
554 	if (IS_ERR(handle))
555 		return PTR_ERR(handle);
556 	if (!handle)
557 		return -EINVAL;
558 
559 	info = handle_to_ti_sci_info(handle);
560 
561 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
562 				     flags | TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
563 				     (u32 *)&req, sizeof(req), 0);
564 	if (IS_ERR(xfer)) {
565 		ret = PTR_ERR(xfer);
566 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
567 		return ret;
568 	}
569 	req.id = id;
570 	req.state = state;
571 
572 	ret = ti_sci_do_xfer(info, xfer);
573 	if (ret)
574 		dev_err(info->dev, "Mbox send fail %d\n", ret);
575 
576 	return ret;
577 }
578 
579 /**
580  * ti_sci_get_device_state() - Get device state helper
581  * @handle:	Handle to the device
582  * @id:		Device Identifier
583  * @clcnt:	Pointer to Context Loss Count
584  * @resets:	pointer to resets
585  * @p_state:	pointer to p_state
586  * @c_state:	pointer to c_state
587  *
588  * Return: 0 if all went fine, else return appropriate error.
589  */
ti_sci_get_device_state(const struct ti_sci_handle * handle,u32 id,u32 * clcnt,u32 * resets,u8 * p_state,u8 * c_state)590 static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
591 				   u32 id,  u32 *clcnt,  u32 *resets,
592 				   u8 *p_state,  u8 *c_state)
593 {
594 	struct ti_sci_msg_resp_get_device_state *resp;
595 	struct ti_sci_msg_req_get_device_state req;
596 	struct ti_sci_info *info;
597 	struct ti_sci_xfer *xfer;
598 	int ret = 0;
599 
600 	if (IS_ERR(handle))
601 		return PTR_ERR(handle);
602 	if (!handle)
603 		return -EINVAL;
604 
605 	if (!clcnt && !resets && !p_state && !c_state)
606 		return -EINVAL;
607 
608 	info = handle_to_ti_sci_info(handle);
609 
610 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
611 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
612 				     (u32 *)&req, sizeof(req), sizeof(*resp));
613 	if (IS_ERR(xfer)) {
614 		ret = PTR_ERR(xfer);
615 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
616 		return ret;
617 	}
618 	req.id = id;
619 
620 	ret = ti_sci_do_xfer(info, xfer);
621 	if (ret) {
622 		dev_err(info->dev, "Mbox send fail %d\n", ret);
623 		return ret;
624 	}
625 
626 	resp = (struct ti_sci_msg_resp_get_device_state *)xfer->tx_message.buf;
627 	if (!ti_sci_is_response_ack(resp))
628 		return -ENODEV;
629 
630 	if (clcnt)
631 		*clcnt = resp->context_loss_count;
632 	if (resets)
633 		*resets = resp->resets;
634 	if (p_state)
635 		*p_state = resp->programmed_state;
636 	if (c_state)
637 		*c_state = resp->current_state;
638 
639 	return ret;
640 }
641 
642 /**
643  * ti_sci_cmd_get_device() - command to request for device managed by TISCI
644  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
645  * @id:		Device Identifier
646  *
647  * Request for the device - NOTE: the client MUST maintain integrity of
648  * usage count by balancing get_device with put_device. No refcounting is
649  * managed by driver for that purpose.
650  *
651  * NOTE: The request is for exclusive access for the processor.
652  *
653  * Return: 0 if all went fine, else return appropriate error.
654  */
ti_sci_cmd_get_device(const struct ti_sci_handle * handle,u32 id)655 static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
656 {
657 	return ti_sci_set_device_state(handle, id, 0,
658 				       MSG_DEVICE_SW_STATE_ON);
659 }
660 
ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle * handle,u32 id)661 static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
662 					   u32 id)
663 {
664 	return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
665 				       MSG_DEVICE_SW_STATE_ON);
666 }
667 
668 /**
669  * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
670  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
671  * @id:		Device Identifier
672  *
673  * Request for the device - NOTE: the client MUST maintain integrity of
674  * usage count by balancing get_device with put_device. No refcounting is
675  * managed by driver for that purpose.
676  *
677  * Return: 0 if all went fine, else return appropriate error.
678  */
ti_sci_cmd_idle_device(const struct ti_sci_handle * handle,u32 id)679 static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
680 {
681 	return ti_sci_set_device_state(handle, id,
682 				       0,
683 				       MSG_DEVICE_SW_STATE_RETENTION);
684 }
685 
ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle * handle,u32 id)686 static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
687 					    u32 id)
688 {
689 	return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
690 				       MSG_DEVICE_SW_STATE_RETENTION);
691 }
692 
693 /**
694  * ti_sci_cmd_put_device() - command to release a device managed by TISCI
695  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
696  * @id:		Device Identifier
697  *
698  * Request for the device - NOTE: the client MUST maintain integrity of
699  * usage count by balancing get_device with put_device. No refcounting is
700  * managed by driver for that purpose.
701  *
702  * Return: 0 if all went fine, else return appropriate error.
703  */
ti_sci_cmd_put_device(const struct ti_sci_handle * handle,u32 id)704 static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
705 {
706 	return ti_sci_set_device_state(handle, id, 0,
707 				       MSG_DEVICE_SW_STATE_AUTO_OFF);
708 }
709 
710 static
ti_sci_cmd_release_exclusive_devices(const struct ti_sci_handle * handle)711 int ti_sci_cmd_release_exclusive_devices(const struct ti_sci_handle *handle)
712 {
713 	struct ti_sci_exclusive_dev *dev, *tmp;
714 	struct ti_sci_info *info;
715 	int i, cnt;
716 
717 	info = handle_to_ti_sci_info(handle);
718 
719 	list_for_each_entry_safe(dev, tmp, &info->dev_list, list) {
720 		cnt = dev->count;
721 		debug("%s: id = %d, cnt = %d\n", __func__, dev->id, cnt);
722 		for (i = 0; i < cnt; i++)
723 			ti_sci_cmd_put_device(handle, dev->id);
724 	}
725 
726 	return 0;
727 }
728 
729 /**
730  * ti_sci_cmd_dev_is_valid() - Is the device valid
731  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
732  * @id:		Device Identifier
733  *
734  * Return: 0 if all went fine and the device ID is valid, else return
735  * appropriate error.
736  */
ti_sci_cmd_dev_is_valid(const struct ti_sci_handle * handle,u32 id)737 static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
738 {
739 	u8 unused;
740 
741 	/* check the device state which will also tell us if the ID is valid */
742 	return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
743 }
744 
745 /**
746  * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
747  * @handle:	Pointer to TISCI handle
748  * @id:		Device Identifier
749  * @count:	Pointer to Context Loss counter to populate
750  *
751  * Return: 0 if all went fine, else return appropriate error.
752  */
ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle * handle,u32 id,u32 * count)753 static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
754 				    u32 *count)
755 {
756 	return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
757 }
758 
759 /**
760  * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
761  * @handle:	Pointer to TISCI handle
762  * @id:		Device Identifier
763  * @r_state:	true if requested to be idle
764  *
765  * Return: 0 if all went fine, else return appropriate error.
766  */
ti_sci_cmd_dev_is_idle(const struct ti_sci_handle * handle,u32 id,bool * r_state)767 static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
768 				  bool *r_state)
769 {
770 	int ret;
771 	u8 state;
772 
773 	if (!r_state)
774 		return -EINVAL;
775 
776 	ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
777 	if (ret)
778 		return ret;
779 
780 	*r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
781 
782 	return 0;
783 }
784 
785 /**
786  * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
787  * @handle:	Pointer to TISCI handle
788  * @id:		Device Identifier
789  * @r_state:	true if requested to be stopped
790  * @curr_state:	true if currently stopped.
791  *
792  * Return: 0 if all went fine, else return appropriate error.
793  */
ti_sci_cmd_dev_is_stop(const struct ti_sci_handle * handle,u32 id,bool * r_state,bool * curr_state)794 static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
795 				  bool *r_state,  bool *curr_state)
796 {
797 	int ret;
798 	u8 p_state, c_state;
799 
800 	if (!r_state && !curr_state)
801 		return -EINVAL;
802 
803 	ret =
804 	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
805 	if (ret)
806 		return ret;
807 
808 	if (r_state)
809 		*r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
810 	if (curr_state)
811 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
812 
813 	return 0;
814 }
815 
816 /**
817  * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
818  * @handle:	Pointer to TISCI handle
819  * @id:		Device Identifier
820  * @r_state:	true if requested to be ON
821  * @curr_state:	true if currently ON and active
822  *
823  * Return: 0 if all went fine, else return appropriate error.
824  */
ti_sci_cmd_dev_is_on(const struct ti_sci_handle * handle,u32 id,bool * r_state,bool * curr_state)825 static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
826 				bool *r_state,  bool *curr_state)
827 {
828 	int ret;
829 	u8 p_state, c_state;
830 
831 	if (!r_state && !curr_state)
832 		return -EINVAL;
833 
834 	ret =
835 	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
836 	if (ret)
837 		return ret;
838 
839 	if (r_state)
840 		*r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
841 	if (curr_state)
842 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
843 
844 	return 0;
845 }
846 
847 /**
848  * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
849  * @handle:	Pointer to TISCI handle
850  * @id:		Device Identifier
851  * @curr_state:	true if currently transitioning.
852  *
853  * Return: 0 if all went fine, else return appropriate error.
854  */
ti_sci_cmd_dev_is_trans(const struct ti_sci_handle * handle,u32 id,bool * curr_state)855 static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
856 				   bool *curr_state)
857 {
858 	int ret;
859 	u8 state;
860 
861 	if (!curr_state)
862 		return -EINVAL;
863 
864 	ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
865 	if (ret)
866 		return ret;
867 
868 	*curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
869 
870 	return 0;
871 }
872 
873 /**
874  * ti_sci_cmd_set_device_resets() - command to set resets for device managed
875  *				    by TISCI
876  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
877  * @id:		Device Identifier
878  * @reset_state: Device specific reset bit field
879  *
880  * Return: 0 if all went fine, else return appropriate error.
881  */
ti_sci_cmd_set_device_resets(const struct ti_sci_handle * handle,u32 id,u32 reset_state)882 static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
883 					u32 id, u32 reset_state)
884 {
885 	struct ti_sci_msg_req_set_device_resets req;
886 	struct ti_sci_msg_hdr *resp;
887 	struct ti_sci_info *info;
888 	struct ti_sci_xfer *xfer;
889 	int ret = 0;
890 
891 	if (IS_ERR(handle))
892 		return PTR_ERR(handle);
893 	if (!handle)
894 		return -EINVAL;
895 
896 	info = handle_to_ti_sci_info(handle);
897 
898 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
899 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
900 				     (u32 *)&req, sizeof(req), sizeof(*resp));
901 	if (IS_ERR(xfer)) {
902 		ret = PTR_ERR(xfer);
903 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
904 		return ret;
905 	}
906 	req.id = id;
907 	req.resets = reset_state;
908 
909 	ret = ti_sci_do_xfer(info, xfer);
910 	if (ret) {
911 		dev_err(info->dev, "Mbox send fail %d\n", ret);
912 		return ret;
913 	}
914 
915 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
916 
917 	if (!ti_sci_is_response_ack(resp))
918 		return -ENODEV;
919 
920 	return ret;
921 }
922 
923 /**
924  * ti_sci_cmd_get_device_resets() - Get reset state for device managed
925  *				    by TISCI
926  * @handle:		Pointer to TISCI handle
927  * @id:			Device Identifier
928  * @reset_state:	Pointer to reset state to populate
929  *
930  * Return: 0 if all went fine, else return appropriate error.
931  */
ti_sci_cmd_get_device_resets(const struct ti_sci_handle * handle,u32 id,u32 * reset_state)932 static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
933 					u32 id, u32 *reset_state)
934 {
935 	return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
936 				       NULL);
937 }
938 
939 /**
940  * ti_sci_set_clock_state() - Set clock state helper
941  * @handle:	pointer to TI SCI handle
942  * @dev_id:	Device identifier this request is for
943  * @clk_id:	Clock identifier for the device for this request.
944  *		Each device has it's own set of clock inputs. This indexes
945  *		which clock input to modify.
946  * @flags:	Header flags as needed
947  * @state:	State to request for the clock.
948  *
949  * Return: 0 if all went well, else returns appropriate error value.
950  */
ti_sci_set_clock_state(const struct ti_sci_handle * handle,u32 dev_id,u8 clk_id,u32 flags,u8 state)951 static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
952 				  u32 dev_id, u8 clk_id,
953 				  u32 flags, u8 state)
954 {
955 	struct ti_sci_msg_req_set_clock_state req;
956 	struct ti_sci_msg_hdr *resp;
957 	struct ti_sci_info *info;
958 	struct ti_sci_xfer *xfer;
959 	int ret = 0;
960 
961 	if (IS_ERR(handle))
962 		return PTR_ERR(handle);
963 	if (!handle)
964 		return -EINVAL;
965 
966 	info = handle_to_ti_sci_info(handle);
967 
968 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
969 				     flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
970 				     (u32 *)&req, sizeof(req), sizeof(*resp));
971 	if (IS_ERR(xfer)) {
972 		ret = PTR_ERR(xfer);
973 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
974 		return ret;
975 	}
976 	req.dev_id = dev_id;
977 	req.clk_id = clk_id;
978 	req.request_state = state;
979 
980 	ret = ti_sci_do_xfer(info, xfer);
981 	if (ret) {
982 		dev_err(info->dev, "Mbox send fail %d\n", ret);
983 		return ret;
984 	}
985 
986 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
987 
988 	if (!ti_sci_is_response_ack(resp))
989 		return -ENODEV;
990 
991 	return ret;
992 }
993 
994 /**
995  * ti_sci_cmd_get_clock_state() - Get clock state helper
996  * @handle:	pointer to TI SCI handle
997  * @dev_id:	Device identifier this request is for
998  * @clk_id:	Clock identifier for the device for this request.
999  *		Each device has it's own set of clock inputs. This indexes
1000  *		which clock input to modify.
1001  * @programmed_state:	State requested for clock to move to
1002  * @current_state:	State that the clock is currently in
1003  *
1004  * Return: 0 if all went well, else returns appropriate error value.
1005  */
ti_sci_cmd_get_clock_state(const struct ti_sci_handle * handle,u32 dev_id,u8 clk_id,u8 * programmed_state,u8 * current_state)1006 static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
1007 				      u32 dev_id, u8 clk_id,
1008 				      u8 *programmed_state, u8 *current_state)
1009 {
1010 	struct ti_sci_msg_resp_get_clock_state *resp;
1011 	struct ti_sci_msg_req_get_clock_state req;
1012 	struct ti_sci_info *info;
1013 	struct ti_sci_xfer *xfer;
1014 	int ret = 0;
1015 
1016 	if (IS_ERR(handle))
1017 		return PTR_ERR(handle);
1018 	if (!handle)
1019 		return -EINVAL;
1020 
1021 	if (!programmed_state && !current_state)
1022 		return -EINVAL;
1023 
1024 	info = handle_to_ti_sci_info(handle);
1025 
1026 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1027 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1028 				     (u32 *)&req, sizeof(req), sizeof(*resp));
1029 	if (IS_ERR(xfer)) {
1030 		ret = PTR_ERR(xfer);
1031 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1032 		return ret;
1033 	}
1034 	req.dev_id = dev_id;
1035 	req.clk_id = clk_id;
1036 
1037 	ret = ti_sci_do_xfer(info, xfer);
1038 	if (ret) {
1039 		dev_err(info->dev, "Mbox send fail %d\n", ret);
1040 		return ret;
1041 	}
1042 
1043 	resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->tx_message.buf;
1044 
1045 	if (!ti_sci_is_response_ack(resp))
1046 		return -ENODEV;
1047 
1048 	if (programmed_state)
1049 		*programmed_state = resp->programmed_state;
1050 	if (current_state)
1051 		*current_state = resp->current_state;
1052 
1053 	return ret;
1054 }
1055 
1056 /**
1057  * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
1058  * @handle:	pointer to TI SCI handle
1059  * @dev_id:	Device identifier this request is for
1060  * @clk_id:	Clock identifier for the device for this request.
1061  *		Each device has it's own set of clock inputs. This indexes
1062  *		which clock input to modify.
1063  * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
1064  * @can_change_freq: 'true' if frequency change is desired, else 'false'
1065  * @enable_input_term: 'true' if input termination is desired, else 'false'
1066  *
1067  * Return: 0 if all went well, else returns appropriate error value.
1068  */
ti_sci_cmd_get_clock(const struct ti_sci_handle * handle,u32 dev_id,u8 clk_id,bool needs_ssc,bool can_change_freq,bool enable_input_term)1069 static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1070 				u8 clk_id, bool needs_ssc, bool can_change_freq,
1071 				bool enable_input_term)
1072 {
1073 	u32 flags = 0;
1074 
1075 	flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1076 	flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1077 	flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1078 
1079 	return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1080 				      MSG_CLOCK_SW_STATE_REQ);
1081 }
1082 
1083 /**
1084  * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
1085  * @handle:	pointer to TI SCI handle
1086  * @dev_id:	Device identifier this request is for
1087  * @clk_id:	Clock identifier for the device for this request.
1088  *		Each device has it's own set of clock inputs. This indexes
1089  *		which clock input to modify.
1090  *
1091  * NOTE: This clock must have been requested by get_clock previously.
1092  *
1093  * Return: 0 if all went well, else returns appropriate error value.
1094  */
ti_sci_cmd_idle_clock(const struct ti_sci_handle * handle,u32 dev_id,u8 clk_id)1095 static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1096 				 u32 dev_id, u8 clk_id)
1097 {
1098 	return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1099 				      MSG_CLOCK_SW_STATE_UNREQ);
1100 }
1101 
1102 /**
1103  * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
1104  * @handle:	pointer to TI SCI handle
1105  * @dev_id:	Device identifier this request is for
1106  * @clk_id:	Clock identifier for the device for this request.
1107  *		Each device has it's own set of clock inputs. This indexes
1108  *		which clock input to modify.
1109  *
1110  * NOTE: This clock must have been requested by get_clock previously.
1111  *
1112  * Return: 0 if all went well, else returns appropriate error value.
1113  */
ti_sci_cmd_put_clock(const struct ti_sci_handle * handle,u32 dev_id,u8 clk_id)1114 static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1115 				u32 dev_id, u8 clk_id)
1116 {
1117 	return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1118 				      MSG_CLOCK_SW_STATE_AUTO);
1119 }
1120 
1121 /**
1122  * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
1123  * @handle:	pointer to TI SCI handle
1124  * @dev_id:	Device identifier this request is for
1125  * @clk_id:	Clock identifier for the device for this request.
1126  *		Each device has it's own set of clock inputs. This indexes
1127  *		which clock input to modify.
1128  * @req_state: state indicating if the clock is auto managed
1129  *
1130  * Return: 0 if all went well, else returns appropriate error value.
1131  */
ti_sci_cmd_clk_is_auto(const struct ti_sci_handle * handle,u32 dev_id,u8 clk_id,bool * req_state)1132 static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1133 				  u32 dev_id, u8 clk_id, bool *req_state)
1134 {
1135 	u8 state = 0;
1136 	int ret;
1137 
1138 	if (!req_state)
1139 		return -EINVAL;
1140 
1141 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1142 	if (ret)
1143 		return ret;
1144 
1145 	*req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1146 	return 0;
1147 }
1148 
1149 /**
1150  * ti_sci_cmd_clk_is_on() - Is the clock ON
1151  * @handle:	pointer to TI SCI handle
1152  * @dev_id:	Device identifier this request is for
1153  * @clk_id:	Clock identifier for the device for this request.
1154  *		Each device has it's own set of clock inputs. This indexes
1155  *		which clock input to modify.
1156  * @req_state: state indicating if the clock is managed by us and enabled
1157  * @curr_state: state indicating if the clock is ready for operation
1158  *
1159  * Return: 0 if all went well, else returns appropriate error value.
1160  */
ti_sci_cmd_clk_is_on(const struct ti_sci_handle * handle,u32 dev_id,u8 clk_id,bool * req_state,bool * curr_state)1161 static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1162 				u8 clk_id, bool *req_state, bool *curr_state)
1163 {
1164 	u8 c_state = 0, r_state = 0;
1165 	int ret;
1166 
1167 	if (!req_state && !curr_state)
1168 		return -EINVAL;
1169 
1170 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1171 					 &r_state, &c_state);
1172 	if (ret)
1173 		return ret;
1174 
1175 	if (req_state)
1176 		*req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1177 	if (curr_state)
1178 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1179 	return 0;
1180 }
1181 
1182 /**
1183  * ti_sci_cmd_clk_is_off() - Is the clock OFF
1184  * @handle:	pointer to TI SCI handle
1185  * @dev_id:	Device identifier this request is for
1186  * @clk_id:	Clock identifier for the device for this request.
1187  *		Each device has it's own set of clock inputs. This indexes
1188  *		which clock input to modify.
1189  * @req_state: state indicating if the clock is managed by us and disabled
1190  * @curr_state: state indicating if the clock is NOT ready for operation
1191  *
1192  * Return: 0 if all went well, else returns appropriate error value.
1193  */
ti_sci_cmd_clk_is_off(const struct ti_sci_handle * handle,u32 dev_id,u8 clk_id,bool * req_state,bool * curr_state)1194 static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1195 				 u8 clk_id, bool *req_state, bool *curr_state)
1196 {
1197 	u8 c_state = 0, r_state = 0;
1198 	int ret;
1199 
1200 	if (!req_state && !curr_state)
1201 		return -EINVAL;
1202 
1203 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1204 					 &r_state, &c_state);
1205 	if (ret)
1206 		return ret;
1207 
1208 	if (req_state)
1209 		*req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1210 	if (curr_state)
1211 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1212 	return 0;
1213 }
1214 
1215 /**
1216  * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1217  * @handle:	pointer to TI SCI handle
1218  * @dev_id:	Device identifier this request is for
1219  * @clk_id:	Clock identifier for the device for this request.
1220  *		Each device has it's own set of clock inputs. This indexes
1221  *		which clock input to modify.
1222  * @parent_id:	Parent clock identifier to set
1223  *
1224  * Return: 0 if all went well, else returns appropriate error value.
1225  */
ti_sci_cmd_clk_set_parent(const struct ti_sci_handle * handle,u32 dev_id,u8 clk_id,u8 parent_id)1226 static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1227 				     u32 dev_id, u8 clk_id, u8 parent_id)
1228 {
1229 	struct ti_sci_msg_req_set_clock_parent req;
1230 	struct ti_sci_msg_hdr *resp;
1231 	struct ti_sci_info *info;
1232 	struct ti_sci_xfer *xfer;
1233 	int ret = 0;
1234 
1235 	if (IS_ERR(handle))
1236 		return PTR_ERR(handle);
1237 	if (!handle)
1238 		return -EINVAL;
1239 
1240 	info = handle_to_ti_sci_info(handle);
1241 
1242 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1243 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1244 				     (u32 *)&req, sizeof(req), sizeof(*resp));
1245 	if (IS_ERR(xfer)) {
1246 		ret = PTR_ERR(xfer);
1247 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1248 		return ret;
1249 	}
1250 	req.dev_id = dev_id;
1251 	req.clk_id = clk_id;
1252 	req.parent_id = parent_id;
1253 
1254 	ret = ti_sci_do_xfer(info, xfer);
1255 	if (ret) {
1256 		dev_err(info->dev, "Mbox send fail %d\n", ret);
1257 		return ret;
1258 	}
1259 
1260 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1261 
1262 	if (!ti_sci_is_response_ack(resp))
1263 		return -ENODEV;
1264 
1265 	return ret;
1266 }
1267 
1268 /**
1269  * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1270  * @handle:	pointer to TI SCI handle
1271  * @dev_id:	Device identifier this request is for
1272  * @clk_id:	Clock identifier for the device for this request.
1273  *		Each device has it's own set of clock inputs. This indexes
1274  *		which clock input to modify.
1275  * @parent_id:	Current clock parent
1276  *
1277  * Return: 0 if all went well, else returns appropriate error value.
1278  */
ti_sci_cmd_clk_get_parent(const struct ti_sci_handle * handle,u32 dev_id,u8 clk_id,u8 * parent_id)1279 static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1280 				     u32 dev_id, u8 clk_id, u8 *parent_id)
1281 {
1282 	struct ti_sci_msg_resp_get_clock_parent *resp;
1283 	struct ti_sci_msg_req_get_clock_parent req;
1284 	struct ti_sci_info *info;
1285 	struct ti_sci_xfer *xfer;
1286 	int ret = 0;
1287 
1288 	if (IS_ERR(handle))
1289 		return PTR_ERR(handle);
1290 	if (!handle || !parent_id)
1291 		return -EINVAL;
1292 
1293 	info = handle_to_ti_sci_info(handle);
1294 
1295 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1296 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1297 				     (u32 *)&req, sizeof(req), sizeof(*resp));
1298 	if (IS_ERR(xfer)) {
1299 		ret = PTR_ERR(xfer);
1300 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1301 		return ret;
1302 	}
1303 	req.dev_id = dev_id;
1304 	req.clk_id = clk_id;
1305 
1306 	ret = ti_sci_do_xfer(info, xfer);
1307 	if (ret) {
1308 		dev_err(info->dev, "Mbox send fail %d\n", ret);
1309 		return ret;
1310 	}
1311 
1312 	resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->tx_message.buf;
1313 
1314 	if (!ti_sci_is_response_ack(resp))
1315 		ret = -ENODEV;
1316 	else
1317 		*parent_id = resp->parent_id;
1318 
1319 	return ret;
1320 }
1321 
1322 /**
1323  * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1324  * @handle:	pointer to TI SCI handle
1325  * @dev_id:	Device identifier this request is for
1326  * @clk_id:	Clock identifier for the device for this request.
1327  *		Each device has it's own set of clock inputs. This indexes
1328  *		which clock input to modify.
1329  * @num_parents: Returns he number of parents to the current clock.
1330  *
1331  * Return: 0 if all went well, else returns appropriate error value.
1332  */
ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle * handle,u32 dev_id,u8 clk_id,u8 * num_parents)1333 static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1334 					  u32 dev_id, u8 clk_id,
1335 					  u8 *num_parents)
1336 {
1337 	struct ti_sci_msg_resp_get_clock_num_parents *resp;
1338 	struct ti_sci_msg_req_get_clock_num_parents req;
1339 	struct ti_sci_info *info;
1340 	struct ti_sci_xfer *xfer;
1341 	int ret = 0;
1342 
1343 	if (IS_ERR(handle))
1344 		return PTR_ERR(handle);
1345 	if (!handle || !num_parents)
1346 		return -EINVAL;
1347 
1348 	info = handle_to_ti_sci_info(handle);
1349 
1350 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1351 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1352 				     (u32 *)&req, sizeof(req), sizeof(*resp));
1353 	if (IS_ERR(xfer)) {
1354 		ret = PTR_ERR(xfer);
1355 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1356 		return ret;
1357 	}
1358 	req.dev_id = dev_id;
1359 	req.clk_id = clk_id;
1360 
1361 	ret = ti_sci_do_xfer(info, xfer);
1362 	if (ret) {
1363 		dev_err(info->dev, "Mbox send fail %d\n", ret);
1364 		return ret;
1365 	}
1366 
1367 	resp = (struct ti_sci_msg_resp_get_clock_num_parents *)
1368 							xfer->tx_message.buf;
1369 
1370 	if (!ti_sci_is_response_ack(resp))
1371 		ret = -ENODEV;
1372 	else
1373 		*num_parents = resp->num_parents;
1374 
1375 	return ret;
1376 }
1377 
1378 /**
1379  * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1380  * @handle:	pointer to TI SCI handle
1381  * @dev_id:	Device identifier this request is for
1382  * @clk_id:	Clock identifier for the device for this request.
1383  *		Each device has it's own set of clock inputs. This indexes
1384  *		which clock input to modify.
1385  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
1386  *		allowable programmed frequency and does not account for clock
1387  *		tolerances and jitter.
1388  * @target_freq: The target clock frequency in Hz. A frequency will be
1389  *		processed as close to this target frequency as possible.
1390  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
1391  *		allowable programmed frequency and does not account for clock
1392  *		tolerances and jitter.
1393  * @match_freq:	Frequency match in Hz response.
1394  *
1395  * Return: 0 if all went well, else returns appropriate error value.
1396  */
ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle * handle,u32 dev_id,u8 clk_id,u64 min_freq,u64 target_freq,u64 max_freq,u64 * match_freq)1397 static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1398 					 u32 dev_id, u8 clk_id, u64 min_freq,
1399 					 u64 target_freq, u64 max_freq,
1400 					 u64 *match_freq)
1401 {
1402 	struct ti_sci_msg_resp_query_clock_freq *resp;
1403 	struct ti_sci_msg_req_query_clock_freq req;
1404 	struct ti_sci_info *info;
1405 	struct ti_sci_xfer *xfer;
1406 	int ret = 0;
1407 
1408 	if (IS_ERR(handle))
1409 		return PTR_ERR(handle);
1410 	if (!handle || !match_freq)
1411 		return -EINVAL;
1412 
1413 	info = handle_to_ti_sci_info(handle);
1414 
1415 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1416 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1417 				     (u32 *)&req, sizeof(req), sizeof(*resp));
1418 	if (IS_ERR(xfer)) {
1419 		ret = PTR_ERR(xfer);
1420 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1421 		return ret;
1422 	}
1423 	req.dev_id = dev_id;
1424 	req.clk_id = clk_id;
1425 	req.min_freq_hz = min_freq;
1426 	req.target_freq_hz = target_freq;
1427 	req.max_freq_hz = max_freq;
1428 
1429 	ret = ti_sci_do_xfer(info, xfer);
1430 	if (ret) {
1431 		dev_err(info->dev, "Mbox send fail %d\n", ret);
1432 		return ret;
1433 	}
1434 
1435 	resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->tx_message.buf;
1436 
1437 	if (!ti_sci_is_response_ack(resp))
1438 		ret = -ENODEV;
1439 	else
1440 		*match_freq = resp->freq_hz;
1441 
1442 	return ret;
1443 }
1444 
1445 /**
1446  * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1447  * @handle:	pointer to TI SCI handle
1448  * @dev_id:	Device identifier this request is for
1449  * @clk_id:	Clock identifier for the device for this request.
1450  *		Each device has it's own set of clock inputs. This indexes
1451  *		which clock input to modify.
1452  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
1453  *		allowable programmed frequency and does not account for clock
1454  *		tolerances and jitter.
1455  * @target_freq: The target clock frequency in Hz. A frequency will be
1456  *		processed as close to this target frequency as possible.
1457  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
1458  *		allowable programmed frequency and does not account for clock
1459  *		tolerances and jitter.
1460  *
1461  * Return: 0 if all went well, else returns appropriate error value.
1462  */
ti_sci_cmd_clk_set_freq(const struct ti_sci_handle * handle,u32 dev_id,u8 clk_id,u64 min_freq,u64 target_freq,u64 max_freq)1463 static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1464 				   u32 dev_id, u8 clk_id, u64 min_freq,
1465 				   u64 target_freq, u64 max_freq)
1466 {
1467 	struct ti_sci_msg_req_set_clock_freq req;
1468 	struct ti_sci_msg_hdr *resp;
1469 	struct ti_sci_info *info;
1470 	struct ti_sci_xfer *xfer;
1471 	int ret = 0;
1472 
1473 	if (IS_ERR(handle))
1474 		return PTR_ERR(handle);
1475 	if (!handle)
1476 		return -EINVAL;
1477 
1478 	info = handle_to_ti_sci_info(handle);
1479 
1480 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1481 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1482 				     (u32 *)&req, sizeof(req), sizeof(*resp));
1483 	if (IS_ERR(xfer)) {
1484 		ret = PTR_ERR(xfer);
1485 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1486 		return ret;
1487 	}
1488 	req.dev_id = dev_id;
1489 	req.clk_id = clk_id;
1490 	req.min_freq_hz = min_freq;
1491 	req.target_freq_hz = target_freq;
1492 	req.max_freq_hz = max_freq;
1493 
1494 	ret = ti_sci_do_xfer(info, xfer);
1495 	if (ret) {
1496 		dev_err(info->dev, "Mbox send fail %d\n", ret);
1497 		return ret;
1498 	}
1499 
1500 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1501 
1502 	if (!ti_sci_is_response_ack(resp))
1503 		return -ENODEV;
1504 
1505 	return ret;
1506 }
1507 
1508 /**
1509  * ti_sci_cmd_clk_get_freq() - Get current frequency
1510  * @handle:	pointer to TI SCI handle
1511  * @dev_id:	Device identifier this request is for
1512  * @clk_id:	Clock identifier for the device for this request.
1513  *		Each device has it's own set of clock inputs. This indexes
1514  *		which clock input to modify.
1515  * @freq:	Currently frequency in Hz
1516  *
1517  * Return: 0 if all went well, else returns appropriate error value.
1518  */
ti_sci_cmd_clk_get_freq(const struct ti_sci_handle * handle,u32 dev_id,u8 clk_id,u64 * freq)1519 static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1520 				   u32 dev_id, u8 clk_id, u64 *freq)
1521 {
1522 	struct ti_sci_msg_resp_get_clock_freq *resp;
1523 	struct ti_sci_msg_req_get_clock_freq req;
1524 	struct ti_sci_info *info;
1525 	struct ti_sci_xfer *xfer;
1526 	int ret = 0;
1527 
1528 	if (IS_ERR(handle))
1529 		return PTR_ERR(handle);
1530 	if (!handle || !freq)
1531 		return -EINVAL;
1532 
1533 	info = handle_to_ti_sci_info(handle);
1534 
1535 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1536 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1537 				     (u32 *)&req, sizeof(req), sizeof(*resp));
1538 	if (IS_ERR(xfer)) {
1539 		ret = PTR_ERR(xfer);
1540 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1541 		return ret;
1542 	}
1543 	req.dev_id = dev_id;
1544 	req.clk_id = clk_id;
1545 
1546 	ret = ti_sci_do_xfer(info, xfer);
1547 	if (ret) {
1548 		dev_err(info->dev, "Mbox send fail %d\n", ret);
1549 		return ret;
1550 	}
1551 
1552 	resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->tx_message.buf;
1553 
1554 	if (!ti_sci_is_response_ack(resp))
1555 		ret = -ENODEV;
1556 	else
1557 		*freq = resp->freq_hz;
1558 
1559 	return ret;
1560 }
1561 
1562 /**
1563  * ti_sci_cmd_core_reboot() - Command to request system reset
1564  * @handle:	pointer to TI SCI handle
1565  *
1566  * Return: 0 if all went well, else returns appropriate error value.
1567  */
ti_sci_cmd_core_reboot(const struct ti_sci_handle * handle)1568 static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1569 {
1570 	struct ti_sci_msg_req_reboot req;
1571 	struct ti_sci_msg_hdr *resp;
1572 	struct ti_sci_info *info;
1573 	struct ti_sci_xfer *xfer;
1574 	int ret = 0;
1575 
1576 	if (IS_ERR(handle))
1577 		return PTR_ERR(handle);
1578 	if (!handle)
1579 		return -EINVAL;
1580 
1581 	info = handle_to_ti_sci_info(handle);
1582 
1583 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1584 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1585 				     (u32 *)&req, sizeof(req), sizeof(*resp));
1586 	if (IS_ERR(xfer)) {
1587 		ret = PTR_ERR(xfer);
1588 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1589 		return ret;
1590 	}
1591 	req.domain = 0;
1592 
1593 	ret = ti_sci_do_xfer(info, xfer);
1594 	if (ret) {
1595 		dev_err(info->dev, "Mbox send fail %d\n", ret);
1596 		return ret;
1597 	}
1598 
1599 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1600 
1601 	if (!ti_sci_is_response_ack(resp))
1602 		return -ENODEV;
1603 
1604 	return ret;
1605 }
1606 
1607 /**
1608  * ti_sci_get_resource_range - Helper to get a range of resources assigned
1609  *			       to a host. Resource is uniquely identified by
1610  *			       type and subtype.
1611  * @handle:		Pointer to TISCI handle.
1612  * @dev_id:		TISCI device ID.
1613  * @subtype:		Resource assignment subtype that is being requested
1614  *			from the given device.
1615  * @s_host:		Host processor ID to which the resources are allocated
1616  * @range_start:	Start index of the resource range
1617  * @range_num:		Number of resources in the range
1618  *
1619  * Return: 0 if all went fine, else return appropriate error.
1620  */
ti_sci_get_resource_range(const struct ti_sci_handle * handle,u32 dev_id,u8 subtype,u8 s_host,u16 * range_start,u16 * range_num)1621 static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1622 				     u32 dev_id, u8 subtype, u8 s_host,
1623 				     u16 *range_start, u16 *range_num)
1624 {
1625 	struct ti_sci_msg_resp_get_resource_range *resp;
1626 	struct ti_sci_msg_req_get_resource_range req;
1627 	struct ti_sci_xfer *xfer;
1628 	struct ti_sci_info *info;
1629 	int ret = 0;
1630 
1631 	if (IS_ERR(handle))
1632 		return PTR_ERR(handle);
1633 	if (!handle)
1634 		return -EINVAL;
1635 
1636 	info = handle_to_ti_sci_info(handle);
1637 
1638 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1639 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1640 				     (u32 *)&req, sizeof(req), sizeof(*resp));
1641 	if (IS_ERR(xfer)) {
1642 		ret = PTR_ERR(xfer);
1643 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1644 		return ret;
1645 	}
1646 
1647 	req.secondary_host = s_host;
1648 	req.type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
1649 	req.subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1650 
1651 	ret = ti_sci_do_xfer(info, xfer);
1652 	if (ret) {
1653 		dev_err(info->dev, "Mbox send fail %d\n", ret);
1654 		goto fail;
1655 	}
1656 
1657 	resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->tx_message.buf;
1658 	if (!ti_sci_is_response_ack(resp)) {
1659 		ret = -ENODEV;
1660 	} else if (!resp->range_start && !resp->range_num) {
1661 		ret = -ENODEV;
1662 	} else {
1663 		*range_start = resp->range_start;
1664 		*range_num = resp->range_num;
1665 	};
1666 
1667 fail:
1668 	return ret;
1669 }
1670 
1671 /**
1672  * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1673  *				   that is same as ti sci interface host.
1674  * @handle:		Pointer to TISCI handle.
1675  * @dev_id:		TISCI device ID.
1676  * @subtype:		Resource assignment subtype that is being requested
1677  *			from the given device.
1678  * @range_start:	Start index of the resource range
1679  * @range_num:		Number of resources in the range
1680  *
1681  * Return: 0 if all went fine, else return appropriate error.
1682  */
ti_sci_cmd_get_resource_range(const struct ti_sci_handle * handle,u32 dev_id,u8 subtype,u16 * range_start,u16 * range_num)1683 static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1684 					 u32 dev_id, u8 subtype,
1685 					 u16 *range_start, u16 *range_num)
1686 {
1687 	return ti_sci_get_resource_range(handle, dev_id, subtype,
1688 					 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1689 					 range_start, range_num);
1690 }
1691 
1692 /**
1693  * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1694  *					      assigned to a specified host.
1695  * @handle:		Pointer to TISCI handle.
1696  * @dev_id:		TISCI device ID.
1697  * @subtype:		Resource assignment subtype that is being requested
1698  *			from the given device.
1699  * @s_host:		Host processor ID to which the resources are allocated
1700  * @range_start:	Start index of the resource range
1701  * @range_num:		Number of resources in the range
1702  *
1703  * Return: 0 if all went fine, else return appropriate error.
1704  */
1705 static
ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle * handle,u32 dev_id,u8 subtype,u8 s_host,u16 * range_start,u16 * range_num)1706 int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1707 					     u32 dev_id, u8 subtype, u8 s_host,
1708 					     u16 *range_start, u16 *range_num)
1709 {
1710 	return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1711 					 range_start, range_num);
1712 }
1713 
1714 /**
1715  * ti_sci_cmd_query_msmc() - Command to query currently available msmc memory
1716  * @handle:		pointer to TI SCI handle
1717  * @msms_start:		MSMC start as returned by tisci
1718  * @msmc_end:		MSMC end as returned by tisci
1719  *
1720  * Return: 0 if all went well, else returns appropriate error value.
1721  */
ti_sci_cmd_query_msmc(const struct ti_sci_handle * handle,u64 * msmc_start,u64 * msmc_end)1722 static int ti_sci_cmd_query_msmc(const struct ti_sci_handle *handle,
1723 				 u64 *msmc_start, u64 *msmc_end)
1724 {
1725 	struct ti_sci_msg_resp_query_msmc *resp;
1726 	struct ti_sci_msg_hdr req;
1727 	struct ti_sci_info *info;
1728 	struct ti_sci_xfer *xfer;
1729 	int ret = 0;
1730 
1731 	if (IS_ERR(handle))
1732 		return PTR_ERR(handle);
1733 	if (!handle)
1734 		return -EINVAL;
1735 
1736 	info = handle_to_ti_sci_info(handle);
1737 
1738 	xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_QUERY_MSMC,
1739 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1740 				     (u32 *)&req, sizeof(req), sizeof(*resp));
1741 	if (IS_ERR(xfer)) {
1742 		ret = PTR_ERR(xfer);
1743 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1744 		return ret;
1745 	}
1746 
1747 	ret = ti_sci_do_xfer(info, xfer);
1748 	if (ret) {
1749 		dev_err(info->dev, "Mbox send fail %d\n", ret);
1750 		return ret;
1751 	}
1752 
1753 	resp = (struct ti_sci_msg_resp_query_msmc *)xfer->tx_message.buf;
1754 
1755 	if (!ti_sci_is_response_ack(resp))
1756 		return -ENODEV;
1757 
1758 	*msmc_start = ((u64)resp->msmc_start_high << TISCI_ADDR_HIGH_SHIFT) |
1759 			resp->msmc_start_low;
1760 	*msmc_end = ((u64)resp->msmc_end_high << TISCI_ADDR_HIGH_SHIFT) |
1761 			resp->msmc_end_low;
1762 
1763 	return ret;
1764 }
1765 
1766 /**
1767  * ti_sci_cmd_proc_request() - Command to request a physical processor control
1768  * @handle:	Pointer to TI SCI handle
1769  * @proc_id:	Processor ID this request is for
1770  *
1771  * Return: 0 if all went well, else returns appropriate error value.
1772  */
ti_sci_cmd_proc_request(const struct ti_sci_handle * handle,u8 proc_id)1773 static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
1774 				   u8 proc_id)
1775 {
1776 	struct ti_sci_msg_req_proc_request req;
1777 	struct ti_sci_msg_hdr *resp;
1778 	struct ti_sci_info *info;
1779 	struct ti_sci_xfer *xfer;
1780 	int ret = 0;
1781 
1782 	if (IS_ERR(handle))
1783 		return PTR_ERR(handle);
1784 	if (!handle)
1785 		return -EINVAL;
1786 
1787 	info = handle_to_ti_sci_info(handle);
1788 
1789 	xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_REQUEST,
1790 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1791 				     (u32 *)&req, sizeof(req), sizeof(*resp));
1792 	if (IS_ERR(xfer)) {
1793 		ret = PTR_ERR(xfer);
1794 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1795 		return ret;
1796 	}
1797 	req.processor_id = proc_id;
1798 
1799 	ret = ti_sci_do_xfer(info, xfer);
1800 	if (ret) {
1801 		dev_err(info->dev, "Mbox send fail %d\n", ret);
1802 		return ret;
1803 	}
1804 
1805 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1806 
1807 	if (!ti_sci_is_response_ack(resp))
1808 		ret = -ENODEV;
1809 
1810 	return ret;
1811 }
1812 
1813 /**
1814  * ti_sci_cmd_proc_release() - Command to release a physical processor control
1815  * @handle:	Pointer to TI SCI handle
1816  * @proc_id:	Processor ID this request is for
1817  *
1818  * Return: 0 if all went well, else returns appropriate error value.
1819  */
ti_sci_cmd_proc_release(const struct ti_sci_handle * handle,u8 proc_id)1820 static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
1821 				   u8 proc_id)
1822 {
1823 	struct ti_sci_msg_req_proc_release req;
1824 	struct ti_sci_msg_hdr *resp;
1825 	struct ti_sci_info *info;
1826 	struct ti_sci_xfer *xfer;
1827 	int ret = 0;
1828 
1829 	if (IS_ERR(handle))
1830 		return PTR_ERR(handle);
1831 	if (!handle)
1832 		return -EINVAL;
1833 
1834 	info = handle_to_ti_sci_info(handle);
1835 
1836 	xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_RELEASE,
1837 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1838 				     (u32 *)&req, sizeof(req), sizeof(*resp));
1839 	if (IS_ERR(xfer)) {
1840 		ret = PTR_ERR(xfer);
1841 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1842 		return ret;
1843 	}
1844 	req.processor_id = proc_id;
1845 
1846 	ret = ti_sci_do_xfer(info, xfer);
1847 	if (ret) {
1848 		dev_err(info->dev, "Mbox send fail %d\n", ret);
1849 		return ret;
1850 	}
1851 
1852 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1853 
1854 	if (!ti_sci_is_response_ack(resp))
1855 		ret = -ENODEV;
1856 
1857 	return ret;
1858 }
1859 
1860 /**
1861  * ti_sci_cmd_proc_handover() - Command to handover a physical processor
1862  *				control to a host in the processor's access
1863  *				control list.
1864  * @handle:	Pointer to TI SCI handle
1865  * @proc_id:	Processor ID this request is for
1866  * @host_id:	Host ID to get the control of the processor
1867  *
1868  * Return: 0 if all went well, else returns appropriate error value.
1869  */
ti_sci_cmd_proc_handover(const struct ti_sci_handle * handle,u8 proc_id,u8 host_id)1870 static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
1871 				    u8 proc_id, u8 host_id)
1872 {
1873 	struct ti_sci_msg_req_proc_handover req;
1874 	struct ti_sci_msg_hdr *resp;
1875 	struct ti_sci_info *info;
1876 	struct ti_sci_xfer *xfer;
1877 	int ret = 0;
1878 
1879 	if (IS_ERR(handle))
1880 		return PTR_ERR(handle);
1881 	if (!handle)
1882 		return -EINVAL;
1883 
1884 	info = handle_to_ti_sci_info(handle);
1885 
1886 	xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_HANDOVER,
1887 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1888 				     (u32 *)&req, sizeof(req), sizeof(*resp));
1889 	if (IS_ERR(xfer)) {
1890 		ret = PTR_ERR(xfer);
1891 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1892 		return ret;
1893 	}
1894 	req.processor_id = proc_id;
1895 	req.host_id = host_id;
1896 
1897 	ret = ti_sci_do_xfer(info, xfer);
1898 	if (ret) {
1899 		dev_err(info->dev, "Mbox send fail %d\n", ret);
1900 		return ret;
1901 	}
1902 
1903 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1904 
1905 	if (!ti_sci_is_response_ack(resp))
1906 		ret = -ENODEV;
1907 
1908 	return ret;
1909 }
1910 
1911 /**
1912  * ti_sci_cmd_set_proc_boot_cfg() - Command to set the processor boot
1913  *				    configuration flags
1914  * @handle:		Pointer to TI SCI handle
1915  * @proc_id:		Processor ID this request is for
1916  * @config_flags_set:	Configuration flags to be set
1917  * @config_flags_clear:	Configuration flags to be cleared.
1918  *
1919  * Return: 0 if all went well, else returns appropriate error value.
1920  */
ti_sci_cmd_set_proc_boot_cfg(const struct ti_sci_handle * handle,u8 proc_id,u64 bootvector,u32 config_flags_set,u32 config_flags_clear)1921 static int ti_sci_cmd_set_proc_boot_cfg(const struct ti_sci_handle *handle,
1922 					u8 proc_id, u64 bootvector,
1923 					u32 config_flags_set,
1924 					u32 config_flags_clear)
1925 {
1926 	struct ti_sci_msg_req_set_proc_boot_config req;
1927 	struct ti_sci_msg_hdr *resp;
1928 	struct ti_sci_info *info;
1929 	struct ti_sci_xfer *xfer;
1930 	int ret = 0;
1931 
1932 	if (IS_ERR(handle))
1933 		return PTR_ERR(handle);
1934 	if (!handle)
1935 		return -EINVAL;
1936 
1937 	info = handle_to_ti_sci_info(handle);
1938 
1939 	xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CONFIG,
1940 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1941 				     (u32 *)&req, sizeof(req), sizeof(*resp));
1942 	if (IS_ERR(xfer)) {
1943 		ret = PTR_ERR(xfer);
1944 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1945 		return ret;
1946 	}
1947 	req.processor_id = proc_id;
1948 	req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1949 	req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1950 				TISCI_ADDR_HIGH_SHIFT;
1951 	req.config_flags_set = config_flags_set;
1952 	req.config_flags_clear = config_flags_clear;
1953 
1954 	ret = ti_sci_do_xfer(info, xfer);
1955 	if (ret) {
1956 		dev_err(info->dev, "Mbox send fail %d\n", ret);
1957 		return ret;
1958 	}
1959 
1960 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1961 
1962 	if (!ti_sci_is_response_ack(resp))
1963 		ret = -ENODEV;
1964 
1965 	return ret;
1966 }
1967 
1968 /**
1969  * ti_sci_cmd_set_proc_boot_ctrl() - Command to set the processor boot
1970  *				     control flags
1971  * @handle:			Pointer to TI SCI handle
1972  * @proc_id:			Processor ID this request is for
1973  * @control_flags_set:		Control flags to be set
1974  * @control_flags_clear:	Control flags to be cleared
1975  *
1976  * Return: 0 if all went well, else returns appropriate error value.
1977  */
ti_sci_cmd_set_proc_boot_ctrl(const struct ti_sci_handle * handle,u8 proc_id,u32 control_flags_set,u32 control_flags_clear)1978 static int ti_sci_cmd_set_proc_boot_ctrl(const struct ti_sci_handle *handle,
1979 					 u8 proc_id, u32 control_flags_set,
1980 					 u32 control_flags_clear)
1981 {
1982 	struct ti_sci_msg_req_set_proc_boot_ctrl req;
1983 	struct ti_sci_msg_hdr *resp;
1984 	struct ti_sci_info *info;
1985 	struct ti_sci_xfer *xfer;
1986 	int ret = 0;
1987 
1988 	if (IS_ERR(handle))
1989 		return PTR_ERR(handle);
1990 	if (!handle)
1991 		return -EINVAL;
1992 
1993 	info = handle_to_ti_sci_info(handle);
1994 
1995 	xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CTRL,
1996 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1997 				     (u32 *)&req, sizeof(req), sizeof(*resp));
1998 	if (IS_ERR(xfer)) {
1999 		ret = PTR_ERR(xfer);
2000 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2001 		return ret;
2002 	}
2003 	req.processor_id = proc_id;
2004 	req.control_flags_set = control_flags_set;
2005 	req.control_flags_clear = control_flags_clear;
2006 
2007 	ret = ti_sci_do_xfer(info, xfer);
2008 	if (ret) {
2009 		dev_err(info->dev, "Mbox send fail %d\n", ret);
2010 		return ret;
2011 	}
2012 
2013 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2014 
2015 	if (!ti_sci_is_response_ack(resp))
2016 		ret = -ENODEV;
2017 
2018 	return ret;
2019 }
2020 
2021 /**
2022  * ti_sci_cmd_proc_auth_boot_image() - Command to authenticate and load the
2023  *			image and then set the processor configuration flags.
2024  * @handle:	Pointer to TI SCI handle
2025  * @image_addr:	Memory address at which payload image and certificate is
2026  *		located in memory, this is updated if the image data is
2027  *		moved during authentication.
2028  * @image_size: This is updated with the final size of the image after
2029  *		authentication.
2030  *
2031  * Return: 0 if all went well, else returns appropriate error value.
2032  */
ti_sci_cmd_proc_auth_boot_image(const struct ti_sci_handle * handle,u64 * image_addr,u32 * image_size)2033 static int ti_sci_cmd_proc_auth_boot_image(const struct ti_sci_handle *handle,
2034 					   u64 *image_addr, u32 *image_size)
2035 {
2036 	struct ti_sci_msg_req_proc_auth_boot_image req;
2037 	struct ti_sci_msg_resp_proc_auth_boot_image *resp;
2038 	struct ti_sci_info *info;
2039 	struct ti_sci_xfer *xfer;
2040 	int ret = 0;
2041 
2042 	if (IS_ERR(handle))
2043 		return PTR_ERR(handle);
2044 	if (!handle)
2045 		return -EINVAL;
2046 
2047 	info = handle_to_ti_sci_info(handle);
2048 
2049 	xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_AUTH_BOOT_IMIAGE,
2050 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2051 				     (u32 *)&req, sizeof(req), sizeof(*resp));
2052 	if (IS_ERR(xfer)) {
2053 		ret = PTR_ERR(xfer);
2054 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2055 		return ret;
2056 	}
2057 	req.cert_addr_low = *image_addr & TISCI_ADDR_LOW_MASK;
2058 	req.cert_addr_high = (*image_addr & TISCI_ADDR_HIGH_MASK) >>
2059 				TISCI_ADDR_HIGH_SHIFT;
2060 
2061 	ret = ti_sci_do_xfer(info, xfer);
2062 	if (ret) {
2063 		dev_err(info->dev, "Mbox send fail %d\n", ret);
2064 		return ret;
2065 	}
2066 
2067 	resp = (struct ti_sci_msg_resp_proc_auth_boot_image *)xfer->tx_message.buf;
2068 
2069 	if (!ti_sci_is_response_ack(resp))
2070 		return -ENODEV;
2071 
2072 	*image_addr = (resp->image_addr_low & TISCI_ADDR_LOW_MASK) |
2073 			(((u64)resp->image_addr_high <<
2074 			  TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2075 	*image_size = resp->image_size;
2076 
2077 	return ret;
2078 }
2079 
2080 /**
2081  * ti_sci_cmd_get_proc_boot_status() - Command to get the processor boot status
2082  * @handle:	Pointer to TI SCI handle
2083  * @proc_id:	Processor ID this request is for
2084  *
2085  * Return: 0 if all went well, else returns appropriate error value.
2086  */
ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle * handle,u8 proc_id,u64 * bv,u32 * cfg_flags,u32 * ctrl_flags,u32 * sts_flags)2087 static int ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle *handle,
2088 					   u8 proc_id, u64 *bv, u32 *cfg_flags,
2089 					   u32 *ctrl_flags, u32 *sts_flags)
2090 {
2091 	struct ti_sci_msg_resp_get_proc_boot_status *resp;
2092 	struct ti_sci_msg_req_get_proc_boot_status req;
2093 	struct ti_sci_info *info;
2094 	struct ti_sci_xfer *xfer;
2095 	int ret = 0;
2096 
2097 	if (IS_ERR(handle))
2098 		return PTR_ERR(handle);
2099 	if (!handle)
2100 		return -EINVAL;
2101 
2102 	info = handle_to_ti_sci_info(handle);
2103 
2104 	xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_GET_PROC_BOOT_STATUS,
2105 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2106 				     (u32 *)&req, sizeof(req), sizeof(*resp));
2107 	if (IS_ERR(xfer)) {
2108 		ret = PTR_ERR(xfer);
2109 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2110 		return ret;
2111 	}
2112 	req.processor_id = proc_id;
2113 
2114 	ret = ti_sci_do_xfer(info, xfer);
2115 	if (ret) {
2116 		dev_err(info->dev, "Mbox send fail %d\n", ret);
2117 		return ret;
2118 	}
2119 
2120 	resp = (struct ti_sci_msg_resp_get_proc_boot_status *)
2121 							xfer->tx_message.buf;
2122 
2123 	if (!ti_sci_is_response_ack(resp))
2124 		return -ENODEV;
2125 	*bv = (resp->bootvector_low & TISCI_ADDR_LOW_MASK) |
2126 			(((u64)resp->bootvector_high  <<
2127 			  TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2128 	*cfg_flags = resp->config_flags;
2129 	*ctrl_flags = resp->control_flags;
2130 	*sts_flags = resp->status_flags;
2131 
2132 	return ret;
2133 }
2134 
2135 /**
2136  * ti_sci_proc_wait_boot_status_no_wait() - Helper function to wait for a
2137  *				processor boot status without requesting or
2138  *				waiting for a response.
2139  * @proc_id:			Processor ID this request is for
2140  * @num_wait_iterations:	Total number of iterations we will check before
2141  *				we will timeout and give up
2142  * @num_match_iterations:	How many iterations should we have continued
2143  *				status to account for status bits glitching.
2144  *				This is to make sure that match occurs for
2145  *				consecutive checks. This implies that the
2146  *				worst case should consider that the stable
2147  *				time should at the worst be num_wait_iterations
2148  *				num_match_iterations to prevent timeout.
2149  * @delay_per_iteration_us:	Specifies how long to wait (in micro seconds)
2150  *				between each status checks. This is the minimum
2151  *				duration, and overhead of register reads and
2152  *				checks are on top of this and can vary based on
2153  *				varied conditions.
2154  * @delay_before_iterations_us:	Specifies how long to wait (in micro seconds)
2155  *				before the very first check in the first
2156  *				iteration of status check loop. This is the
2157  *				minimum duration, and overhead of register
2158  *				reads and checks are.
2159  * @status_flags_1_set_all_wait:If non-zero, Specifies that all bits of the
2160  *				status matching this field requested MUST be 1.
2161  * @status_flags_1_set_any_wait:If non-zero, Specifies that at least one of the
2162  *				bits matching this field requested MUST be 1.
2163  * @status_flags_1_clr_all_wait:If non-zero, Specifies that all bits of the
2164  *				status matching this field requested MUST be 0.
2165  * @status_flags_1_clr_any_wait:If non-zero, Specifies that at least one of the
2166  *				bits matching this field requested MUST be 0.
2167  *
2168  * Return: 0 if all goes well, else appropriate error message
2169  */
2170 static int
ti_sci_proc_wait_boot_status_no_wait(const struct ti_sci_handle * handle,u8 proc_id,u8 num_wait_iterations,u8 num_match_iterations,u8 delay_per_iteration_us,u8 delay_before_iterations_us,u32 status_flags_1_set_all_wait,u32 status_flags_1_set_any_wait,u32 status_flags_1_clr_all_wait,u32 status_flags_1_clr_any_wait)2171 ti_sci_proc_wait_boot_status_no_wait(const struct ti_sci_handle *handle,
2172 				     u8 proc_id,
2173 				     u8 num_wait_iterations,
2174 				     u8 num_match_iterations,
2175 				     u8 delay_per_iteration_us,
2176 				     u8 delay_before_iterations_us,
2177 				     u32 status_flags_1_set_all_wait,
2178 				     u32 status_flags_1_set_any_wait,
2179 				     u32 status_flags_1_clr_all_wait,
2180 				     u32 status_flags_1_clr_any_wait)
2181 {
2182 	struct ti_sci_msg_req_wait_proc_boot_status req;
2183 	struct ti_sci_info *info;
2184 	struct ti_sci_xfer *xfer;
2185 	int ret = 0;
2186 
2187 	if (IS_ERR(handle))
2188 		return PTR_ERR(handle);
2189 	if (!handle)
2190 		return -EINVAL;
2191 
2192 	info = handle_to_ti_sci_info(handle);
2193 
2194 	xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_WAIT_PROC_BOOT_STATUS,
2195 				     TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
2196 				     (u32 *)&req, sizeof(req), 0);
2197 	if (IS_ERR(xfer)) {
2198 		ret = PTR_ERR(xfer);
2199 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2200 		return ret;
2201 	}
2202 	req.processor_id = proc_id;
2203 	req.num_wait_iterations = num_wait_iterations;
2204 	req.num_match_iterations = num_match_iterations;
2205 	req.delay_per_iteration_us = delay_per_iteration_us;
2206 	req.delay_before_iterations_us = delay_before_iterations_us;
2207 	req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
2208 	req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
2209 	req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
2210 	req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
2211 
2212 	ret = ti_sci_do_xfer(info, xfer);
2213 	if (ret)
2214 		dev_err(info->dev, "Mbox send fail %d\n", ret);
2215 
2216 	return ret;
2217 }
2218 
2219 /**
2220  * ti_sci_cmd_proc_shutdown_no_wait() - Command to shutdown a core without
2221  *		requesting or waiting for a response. Note that this API call
2222  *		should be followed by placing the respective processor into
2223  *		either WFE or WFI mode.
2224  * @handle:	Pointer to TI SCI handle
2225  * @proc_id:	Processor ID this request is for
2226  *
2227  * Return: 0 if all went well, else returns appropriate error value.
2228  */
ti_sci_cmd_proc_shutdown_no_wait(const struct ti_sci_handle * handle,u8 proc_id)2229 static int ti_sci_cmd_proc_shutdown_no_wait(const struct ti_sci_handle *handle,
2230 					    u8 proc_id)
2231 {
2232 	int ret;
2233 	struct ti_sci_info *info;
2234 
2235 	if (IS_ERR(handle))
2236 		return PTR_ERR(handle);
2237 	if (!handle)
2238 		return -EINVAL;
2239 
2240 	info = handle_to_ti_sci_info(handle);
2241 
2242 	/*
2243 	 * Send the core boot status wait message waiting for either WFE or
2244 	 * WFI without requesting or waiting for a TISCI response with the
2245 	 * maximum wait time to give us the best chance to get to the WFE/WFI
2246 	 * command that should follow the invocation of this API before the
2247 	 * DMSC-internal processing of this command times out. Note that
2248 	 * waiting for the R5 WFE/WFI flags will also work on an ARMV8 type
2249 	 * core as the related flag bit positions are the same.
2250 	 */
2251 	ret = ti_sci_proc_wait_boot_status_no_wait(handle, proc_id,
2252 		U8_MAX, 100, U8_MAX, U8_MAX,
2253 		0, PROC_BOOT_STATUS_FLAG_R5_WFE | PROC_BOOT_STATUS_FLAG_R5_WFI,
2254 		0, 0);
2255 	if (ret) {
2256 		dev_err(info->dev, "Sending core %u wait message fail %d\n",
2257 			proc_id, ret);
2258 		return ret;
2259 	}
2260 
2261 	/*
2262 	 * Release a processor managed by TISCI without requesting or waiting
2263 	 * for a response.
2264 	 */
2265 	ret = ti_sci_set_device_state_no_wait(handle, proc_id, 0,
2266 					      MSG_DEVICE_SW_STATE_AUTO_OFF);
2267 	if (ret)
2268 		dev_err(info->dev, "Sending core %u shutdown message fail %d\n",
2269 			proc_id, ret);
2270 
2271 	return ret;
2272 }
2273 
2274 /**
2275  * ti_sci_cmd_ring_config() - configure RA ring
2276  * @handle:	pointer to TI SCI handle
2277  * @valid_params: Bitfield defining validity of ring configuration parameters.
2278  * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
2279  * @index: Ring index.
2280  * @addr_lo: The ring base address lo 32 bits
2281  * @addr_hi: The ring base address hi 32 bits
2282  * @count: Number of ring elements.
2283  * @mode: The mode of the ring
2284  * @size: The ring element size.
2285  * @order_id: Specifies the ring's bus order ID.
2286  *
2287  * Return: 0 if all went well, else returns appropriate error value.
2288  *
2289  * See @ti_sci_msg_rm_ring_cfg_req for more info.
2290  */
ti_sci_cmd_ring_config(const struct ti_sci_handle * handle,u32 valid_params,u16 nav_id,u16 index,u32 addr_lo,u32 addr_hi,u32 count,u8 mode,u8 size,u8 order_id)2291 static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2292 				  u32 valid_params, u16 nav_id, u16 index,
2293 				  u32 addr_lo, u32 addr_hi, u32 count,
2294 				  u8 mode, u8 size, u8 order_id)
2295 {
2296 	struct ti_sci_msg_rm_ring_cfg_resp *resp;
2297 	struct ti_sci_msg_rm_ring_cfg_req req;
2298 	struct ti_sci_xfer *xfer;
2299 	struct ti_sci_info *info;
2300 	int ret = 0;
2301 
2302 	if (IS_ERR(handle))
2303 		return PTR_ERR(handle);
2304 	if (!handle)
2305 		return -EINVAL;
2306 
2307 	info = handle_to_ti_sci_info(handle);
2308 
2309 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2310 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2311 				     (u32 *)&req, sizeof(req), sizeof(*resp));
2312 	if (IS_ERR(xfer)) {
2313 		ret = PTR_ERR(xfer);
2314 		dev_err(info->dev, "RM_RA:Message config failed(%d)\n", ret);
2315 		return ret;
2316 	}
2317 	req.valid_params = valid_params;
2318 	req.nav_id = nav_id;
2319 	req.index = index;
2320 	req.addr_lo = addr_lo;
2321 	req.addr_hi = addr_hi;
2322 	req.count = count;
2323 	req.mode = mode;
2324 	req.size = size;
2325 	req.order_id = order_id;
2326 
2327 	ret = ti_sci_do_xfer(info, xfer);
2328 	if (ret) {
2329 		dev_err(info->dev, "RM_RA:Mbox config send fail %d\n", ret);
2330 		goto fail;
2331 	}
2332 
2333 	resp = (struct ti_sci_msg_rm_ring_cfg_resp *)xfer->tx_message.buf;
2334 
2335 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2336 
2337 fail:
2338 	dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2339 	return ret;
2340 }
2341 
ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle * handle,u32 nav_id,u32 src_thread,u32 dst_thread)2342 static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2343 				   u32 nav_id, u32 src_thread, u32 dst_thread)
2344 {
2345 	struct ti_sci_msg_hdr *resp;
2346 	struct ti_sci_msg_psil_pair req;
2347 	struct ti_sci_xfer *xfer;
2348 	struct ti_sci_info *info;
2349 	int ret = 0;
2350 
2351 	if (IS_ERR(handle))
2352 		return PTR_ERR(handle);
2353 	if (!handle)
2354 		return -EINVAL;
2355 
2356 	info = handle_to_ti_sci_info(handle);
2357 
2358 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2359 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2360 				     (u32 *)&req, sizeof(req), sizeof(*resp));
2361 	if (IS_ERR(xfer)) {
2362 		ret = PTR_ERR(xfer);
2363 		dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2364 		return ret;
2365 	}
2366 	req.nav_id = nav_id;
2367 	req.src_thread = src_thread;
2368 	req.dst_thread = dst_thread;
2369 
2370 	ret = ti_sci_do_xfer(info, xfer);
2371 	if (ret) {
2372 		dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2373 		goto fail;
2374 	}
2375 
2376 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2377 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2378 
2379 fail:
2380 	dev_dbg(info->dev, "RM_PSIL: nav: %u link pair %u->%u ret:%u\n",
2381 		nav_id, src_thread, dst_thread, ret);
2382 	return ret;
2383 }
2384 
ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle * handle,u32 nav_id,u32 src_thread,u32 dst_thread)2385 static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2386 				     u32 nav_id, u32 src_thread, u32 dst_thread)
2387 {
2388 	struct ti_sci_msg_hdr *resp;
2389 	struct ti_sci_msg_psil_unpair req;
2390 	struct ti_sci_xfer *xfer;
2391 	struct ti_sci_info *info;
2392 	int ret = 0;
2393 
2394 	if (IS_ERR(handle))
2395 		return PTR_ERR(handle);
2396 	if (!handle)
2397 		return -EINVAL;
2398 
2399 	info = handle_to_ti_sci_info(handle);
2400 
2401 	xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2402 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2403 				     (u32 *)&req, sizeof(req), sizeof(*resp));
2404 	if (IS_ERR(xfer)) {
2405 		ret = PTR_ERR(xfer);
2406 		dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2407 		return ret;
2408 	}
2409 	req.nav_id = nav_id;
2410 	req.src_thread = src_thread;
2411 	req.dst_thread = dst_thread;
2412 
2413 	ret = ti_sci_do_xfer(info, xfer);
2414 	if (ret) {
2415 		dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2416 		goto fail;
2417 	}
2418 
2419 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2420 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2421 
2422 fail:
2423 	dev_dbg(info->dev, "RM_PSIL: link unpair %u->%u ret:%u\n",
2424 		src_thread, dst_thread, ret);
2425 	return ret;
2426 }
2427 
ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle * handle,const struct ti_sci_msg_rm_udmap_tx_ch_cfg * params)2428 static int ti_sci_cmd_rm_udmap_tx_ch_cfg(
2429 			const struct ti_sci_handle *handle,
2430 			const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2431 {
2432 	struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *resp;
2433 	struct ti_sci_msg_rm_udmap_tx_ch_cfg_req req;
2434 	struct ti_sci_xfer *xfer;
2435 	struct ti_sci_info *info;
2436 	int ret = 0;
2437 
2438 	if (IS_ERR(handle))
2439 		return PTR_ERR(handle);
2440 	if (!handle)
2441 		return -EINVAL;
2442 
2443 	info = handle_to_ti_sci_info(handle);
2444 
2445 	xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2446 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2447 				     (u32 *)&req, sizeof(req), sizeof(*resp));
2448 	if (IS_ERR(xfer)) {
2449 		ret = PTR_ERR(xfer);
2450 		dev_err(info->dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2451 		return ret;
2452 	}
2453 	req.valid_params = params->valid_params;
2454 	req.nav_id = params->nav_id;
2455 	req.index = params->index;
2456 	req.tx_pause_on_err = params->tx_pause_on_err;
2457 	req.tx_filt_einfo = params->tx_filt_einfo;
2458 	req.tx_filt_pswords = params->tx_filt_pswords;
2459 	req.tx_atype = params->tx_atype;
2460 	req.tx_chan_type = params->tx_chan_type;
2461 	req.tx_supr_tdpkt = params->tx_supr_tdpkt;
2462 	req.tx_fetch_size = params->tx_fetch_size;
2463 	req.tx_credit_count = params->tx_credit_count;
2464 	req.txcq_qnum = params->txcq_qnum;
2465 	req.tx_priority = params->tx_priority;
2466 	req.tx_qos = params->tx_qos;
2467 	req.tx_orderid = params->tx_orderid;
2468 	req.fdepth = params->fdepth;
2469 	req.tx_sched_priority = params->tx_sched_priority;
2470 	req.tx_burst_size = params->tx_burst_size;
2471 	req.tx_tdtype = params->tx_tdtype;
2472 	req.extended_ch_type = params->extended_ch_type;
2473 
2474 	ret = ti_sci_do_xfer(info, xfer);
2475 	if (ret) {
2476 		dev_err(info->dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2477 		goto fail;
2478 	}
2479 
2480 	resp =
2481 	      (struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *)xfer->tx_message.buf;
2482 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2483 
2484 fail:
2485 	dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2486 	return ret;
2487 }
2488 
ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle * handle,const struct ti_sci_msg_rm_udmap_rx_ch_cfg * params)2489 static int ti_sci_cmd_rm_udmap_rx_ch_cfg(
2490 			const struct ti_sci_handle *handle,
2491 			const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2492 {
2493 	struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *resp;
2494 	struct ti_sci_msg_rm_udmap_rx_ch_cfg_req req;
2495 	struct ti_sci_xfer *xfer;
2496 	struct ti_sci_info *info;
2497 	int ret = 0;
2498 
2499 	if (IS_ERR(handle))
2500 		return PTR_ERR(handle);
2501 	if (!handle)
2502 		return -EINVAL;
2503 
2504 	info = handle_to_ti_sci_info(handle);
2505 
2506 	xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2507 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2508 				     (u32 *)&req, sizeof(req), sizeof(*resp));
2509 	if (IS_ERR(xfer)) {
2510 		ret = PTR_ERR(xfer);
2511 		dev_err(info->dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2512 		return ret;
2513 	}
2514 
2515 	req.valid_params = params->valid_params;
2516 	req.nav_id = params->nav_id;
2517 	req.index = params->index;
2518 	req.rx_fetch_size = params->rx_fetch_size;
2519 	req.rxcq_qnum = params->rxcq_qnum;
2520 	req.rx_priority = params->rx_priority;
2521 	req.rx_qos = params->rx_qos;
2522 	req.rx_orderid = params->rx_orderid;
2523 	req.rx_sched_priority = params->rx_sched_priority;
2524 	req.flowid_start = params->flowid_start;
2525 	req.flowid_cnt = params->flowid_cnt;
2526 	req.rx_pause_on_err = params->rx_pause_on_err;
2527 	req.rx_atype = params->rx_atype;
2528 	req.rx_chan_type = params->rx_chan_type;
2529 	req.rx_ignore_short = params->rx_ignore_short;
2530 	req.rx_ignore_long = params->rx_ignore_long;
2531 
2532 	ret = ti_sci_do_xfer(info, xfer);
2533 	if (ret) {
2534 		dev_err(info->dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2535 		goto fail;
2536 	}
2537 
2538 	resp =
2539 	      (struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *)xfer->tx_message.buf;
2540 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2541 
2542 fail:
2543 	dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2544 	return ret;
2545 }
2546 
ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle * handle,const struct ti_sci_msg_rm_udmap_flow_cfg * params)2547 static int ti_sci_cmd_rm_udmap_rx_flow_cfg(
2548 			const struct ti_sci_handle *handle,
2549 			const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2550 {
2551 	struct ti_sci_msg_rm_udmap_flow_cfg_resp *resp;
2552 	struct ti_sci_msg_rm_udmap_flow_cfg_req req;
2553 	struct ti_sci_xfer *xfer;
2554 	struct ti_sci_info *info;
2555 	int ret = 0;
2556 
2557 	if (IS_ERR(handle))
2558 		return PTR_ERR(handle);
2559 	if (!handle)
2560 		return -EINVAL;
2561 
2562 	info = handle_to_ti_sci_info(handle);
2563 
2564 	xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2565 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2566 				     (u32 *)&req, sizeof(req), sizeof(*resp));
2567 	if (IS_ERR(xfer)) {
2568 		ret = PTR_ERR(xfer);
2569 		dev_err(info->dev, "RX_FL_CFG: Message alloc failed(%d)\n",
2570 			ret);
2571 		return ret;
2572 	}
2573 
2574 	req.valid_params = params->valid_params;
2575 	req.nav_id = params->nav_id;
2576 	req.flow_index = params->flow_index;
2577 	req.rx_einfo_present = params->rx_einfo_present;
2578 	req.rx_psinfo_present = params->rx_psinfo_present;
2579 	req.rx_error_handling = params->rx_error_handling;
2580 	req.rx_desc_type = params->rx_desc_type;
2581 	req.rx_sop_offset = params->rx_sop_offset;
2582 	req.rx_dest_qnum = params->rx_dest_qnum;
2583 	req.rx_src_tag_hi = params->rx_src_tag_hi;
2584 	req.rx_src_tag_lo = params->rx_src_tag_lo;
2585 	req.rx_dest_tag_hi = params->rx_dest_tag_hi;
2586 	req.rx_dest_tag_lo = params->rx_dest_tag_lo;
2587 	req.rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2588 	req.rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2589 	req.rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2590 	req.rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2591 	req.rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2592 	req.rx_fdq1_qnum = params->rx_fdq1_qnum;
2593 	req.rx_fdq2_qnum = params->rx_fdq2_qnum;
2594 	req.rx_fdq3_qnum = params->rx_fdq3_qnum;
2595 	req.rx_ps_location = params->rx_ps_location;
2596 
2597 	ret = ti_sci_do_xfer(info, xfer);
2598 	if (ret) {
2599 		dev_err(info->dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2600 		goto fail;
2601 	}
2602 
2603 	resp =
2604 	       (struct ti_sci_msg_rm_udmap_flow_cfg_resp *)xfer->tx_message.buf;
2605 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2606 
2607 fail:
2608 	dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2609 	return ret;
2610 }
2611 
2612 /**
2613  * ti_sci_cmd_set_fwl_region() - Request for configuring a firewall region
2614  * @handle:    pointer to TI SCI handle
2615  * @region:    region configuration parameters
2616  *
2617  * Return: 0 if all went well, else returns appropriate error value.
2618  */
ti_sci_cmd_set_fwl_region(const struct ti_sci_handle * handle,const struct ti_sci_msg_fwl_region * region)2619 static int ti_sci_cmd_set_fwl_region(const struct ti_sci_handle *handle,
2620 				     const struct ti_sci_msg_fwl_region *region)
2621 {
2622 	struct ti_sci_msg_fwl_set_firewall_region_req req;
2623 	struct ti_sci_msg_hdr *resp;
2624 	struct ti_sci_info *info;
2625 	struct ti_sci_xfer *xfer;
2626 	int ret = 0;
2627 
2628 	if (IS_ERR(handle))
2629 		return PTR_ERR(handle);
2630 	if (!handle)
2631 		return -EINVAL;
2632 
2633 	info = handle_to_ti_sci_info(handle);
2634 
2635 	xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_SET,
2636 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2637 				     (u32 *)&req, sizeof(req), sizeof(*resp));
2638 	if (IS_ERR(xfer)) {
2639 		ret = PTR_ERR(xfer);
2640 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2641 		return ret;
2642 	}
2643 
2644 	req.fwl_id = region->fwl_id;
2645 	req.region = region->region;
2646 	req.n_permission_regs = region->n_permission_regs;
2647 	req.control = region->control;
2648 	req.permissions[0] = region->permissions[0];
2649 	req.permissions[1] = region->permissions[1];
2650 	req.permissions[2] = region->permissions[2];
2651 	req.start_address = region->start_address;
2652 	req.end_address = region->end_address;
2653 
2654 	ret = ti_sci_do_xfer(info, xfer);
2655 	if (ret) {
2656 		dev_err(info->dev, "Mbox send fail %d\n", ret);
2657 		return ret;
2658 	}
2659 
2660 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2661 
2662 	if (!ti_sci_is_response_ack(resp))
2663 		return -ENODEV;
2664 
2665 	return 0;
2666 }
2667 
2668 /**
2669  * ti_sci_cmd_get_fwl_region() - Request for getting a firewall region
2670  * @handle:    pointer to TI SCI handle
2671  * @region:    region configuration parameters
2672  *
2673  * Return: 0 if all went well, else returns appropriate error value.
2674  */
ti_sci_cmd_get_fwl_region(const struct ti_sci_handle * handle,struct ti_sci_msg_fwl_region * region)2675 static int ti_sci_cmd_get_fwl_region(const struct ti_sci_handle *handle,
2676 				     struct ti_sci_msg_fwl_region *region)
2677 {
2678 	struct ti_sci_msg_fwl_get_firewall_region_req req;
2679 	struct ti_sci_msg_fwl_get_firewall_region_resp *resp;
2680 	struct ti_sci_info *info;
2681 	struct ti_sci_xfer *xfer;
2682 	int ret = 0;
2683 
2684 	if (IS_ERR(handle))
2685 		return PTR_ERR(handle);
2686 	if (!handle)
2687 		return -EINVAL;
2688 
2689 	info = handle_to_ti_sci_info(handle);
2690 
2691 	xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_GET,
2692 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2693 				     (u32 *)&req, sizeof(req), sizeof(*resp));
2694 	if (IS_ERR(xfer)) {
2695 		ret = PTR_ERR(xfer);
2696 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2697 		return ret;
2698 	}
2699 
2700 	req.fwl_id = region->fwl_id;
2701 	req.region = region->region;
2702 	req.n_permission_regs = region->n_permission_regs;
2703 
2704 	ret = ti_sci_do_xfer(info, xfer);
2705 	if (ret) {
2706 		dev_err(info->dev, "Mbox send fail %d\n", ret);
2707 		return ret;
2708 	}
2709 
2710 	resp = (struct ti_sci_msg_fwl_get_firewall_region_resp *)xfer->tx_message.buf;
2711 
2712 	if (!ti_sci_is_response_ack(resp))
2713 		return -ENODEV;
2714 
2715 	region->fwl_id = resp->fwl_id;
2716 	region->region = resp->region;
2717 	region->n_permission_regs = resp->n_permission_regs;
2718 	region->control = resp->control;
2719 	region->permissions[0] = resp->permissions[0];
2720 	region->permissions[1] = resp->permissions[1];
2721 	region->permissions[2] = resp->permissions[2];
2722 	region->start_address = resp->start_address;
2723 	region->end_address = resp->end_address;
2724 
2725 	return 0;
2726 }
2727 
2728 /**
2729  * ti_sci_cmd_change_fwl_owner() - Request for changing a firewall owner
2730  * @handle:    pointer to TI SCI handle
2731  * @region:    region configuration parameters
2732  *
2733  * Return: 0 if all went well, else returns appropriate error value.
2734  */
ti_sci_cmd_change_fwl_owner(const struct ti_sci_handle * handle,struct ti_sci_msg_fwl_owner * owner)2735 static int ti_sci_cmd_change_fwl_owner(const struct ti_sci_handle *handle,
2736 				       struct ti_sci_msg_fwl_owner *owner)
2737 {
2738 	struct ti_sci_msg_fwl_change_owner_info_req req;
2739 	struct ti_sci_msg_fwl_change_owner_info_resp *resp;
2740 	struct ti_sci_info *info;
2741 	struct ti_sci_xfer *xfer;
2742 	int ret = 0;
2743 
2744 	if (IS_ERR(handle))
2745 		return PTR_ERR(handle);
2746 	if (!handle)
2747 		return -EINVAL;
2748 
2749 	info = handle_to_ti_sci_info(handle);
2750 
2751 	xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_CHANGE_OWNER,
2752 				     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2753 				     (u32 *)&req, sizeof(req), sizeof(*resp));
2754 	if (IS_ERR(xfer)) {
2755 		ret = PTR_ERR(xfer);
2756 		dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2757 		return ret;
2758 	}
2759 
2760 	req.fwl_id = owner->fwl_id;
2761 	req.region = owner->region;
2762 	req.owner_index = owner->owner_index;
2763 
2764 	ret = ti_sci_do_xfer(info, xfer);
2765 	if (ret) {
2766 		dev_err(info->dev, "Mbox send fail %d\n", ret);
2767 		return ret;
2768 	}
2769 
2770 	resp = (struct ti_sci_msg_fwl_change_owner_info_resp *)xfer->tx_message.buf;
2771 
2772 	if (!ti_sci_is_response_ack(resp))
2773 		return -ENODEV;
2774 
2775 	owner->fwl_id = resp->fwl_id;
2776 	owner->region = resp->region;
2777 	owner->owner_index = resp->owner_index;
2778 	owner->owner_privid = resp->owner_privid;
2779 	owner->owner_permission_bits = resp->owner_permission_bits;
2780 
2781 	return ret;
2782 }
2783 
2784 /*
2785  * ti_sci_setup_ops() - Setup the operations structures
2786  * @info:	pointer to TISCI pointer
2787  */
ti_sci_setup_ops(struct ti_sci_info * info)2788 static void ti_sci_setup_ops(struct ti_sci_info *info)
2789 {
2790 	struct ti_sci_ops *ops = &info->handle.ops;
2791 	struct ti_sci_board_ops *bops = &ops->board_ops;
2792 	struct ti_sci_dev_ops *dops = &ops->dev_ops;
2793 	struct ti_sci_clk_ops *cops = &ops->clk_ops;
2794 	struct ti_sci_core_ops *core_ops = &ops->core_ops;
2795 	struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
2796 	struct ti_sci_proc_ops *pops = &ops->proc_ops;
2797 	struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2798 	struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2799 	struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
2800 	struct ti_sci_fwl_ops *fwl_ops = &ops->fwl_ops;
2801 
2802 	bops->board_config = ti_sci_cmd_set_board_config;
2803 	bops->board_config_rm = ti_sci_cmd_set_board_config_rm;
2804 	bops->board_config_security = ti_sci_cmd_set_board_config_security;
2805 	bops->board_config_pm = ti_sci_cmd_set_board_config_pm;
2806 
2807 	dops->get_device = ti_sci_cmd_get_device;
2808 	dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
2809 	dops->idle_device = ti_sci_cmd_idle_device;
2810 	dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
2811 	dops->put_device = ti_sci_cmd_put_device;
2812 	dops->is_valid = ti_sci_cmd_dev_is_valid;
2813 	dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2814 	dops->is_idle = ti_sci_cmd_dev_is_idle;
2815 	dops->is_stop = ti_sci_cmd_dev_is_stop;
2816 	dops->is_on = ti_sci_cmd_dev_is_on;
2817 	dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2818 	dops->set_device_resets = ti_sci_cmd_set_device_resets;
2819 	dops->get_device_resets = ti_sci_cmd_get_device_resets;
2820 	dops->release_exclusive_devices = ti_sci_cmd_release_exclusive_devices;
2821 
2822 	cops->get_clock = ti_sci_cmd_get_clock;
2823 	cops->idle_clock = ti_sci_cmd_idle_clock;
2824 	cops->put_clock = ti_sci_cmd_put_clock;
2825 	cops->is_auto = ti_sci_cmd_clk_is_auto;
2826 	cops->is_on = ti_sci_cmd_clk_is_on;
2827 	cops->is_off = ti_sci_cmd_clk_is_off;
2828 
2829 	cops->set_parent = ti_sci_cmd_clk_set_parent;
2830 	cops->get_parent = ti_sci_cmd_clk_get_parent;
2831 	cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2832 
2833 	cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2834 	cops->set_freq = ti_sci_cmd_clk_set_freq;
2835 	cops->get_freq = ti_sci_cmd_clk_get_freq;
2836 
2837 	core_ops->reboot_device = ti_sci_cmd_core_reboot;
2838 	core_ops->query_msmc = ti_sci_cmd_query_msmc;
2839 
2840 	rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2841 	rm_core_ops->get_range_from_shost =
2842 		ti_sci_cmd_get_resource_range_from_shost;
2843 
2844 	pops->proc_request = ti_sci_cmd_proc_request;
2845 	pops->proc_release = ti_sci_cmd_proc_release;
2846 	pops->proc_handover = ti_sci_cmd_proc_handover;
2847 	pops->set_proc_boot_cfg = ti_sci_cmd_set_proc_boot_cfg;
2848 	pops->set_proc_boot_ctrl = ti_sci_cmd_set_proc_boot_ctrl;
2849 	pops->proc_auth_boot_image = ti_sci_cmd_proc_auth_boot_image;
2850 	pops->get_proc_boot_status = ti_sci_cmd_get_proc_boot_status;
2851 	pops->proc_shutdown_no_wait = ti_sci_cmd_proc_shutdown_no_wait;
2852 
2853 	rops->config = ti_sci_cmd_ring_config;
2854 
2855 	psilops->pair = ti_sci_cmd_rm_psil_pair;
2856 	psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2857 
2858 	udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2859 	udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2860 	udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
2861 
2862 	fwl_ops->set_fwl_region = ti_sci_cmd_set_fwl_region;
2863 	fwl_ops->get_fwl_region = ti_sci_cmd_get_fwl_region;
2864 	fwl_ops->change_fwl_owner = ti_sci_cmd_change_fwl_owner;
2865 }
2866 
2867 /**
2868  * ti_sci_get_handle_from_sysfw() - Get the TI SCI handle of the SYSFW
2869  * @dev:	Pointer to the SYSFW device
2870  *
2871  * Return: pointer to handle if successful, else EINVAL if invalid conditions
2872  *         are encountered.
2873  */
2874 const
ti_sci_get_handle_from_sysfw(struct udevice * sci_dev)2875 struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *sci_dev)
2876 {
2877 	if (!sci_dev)
2878 		return ERR_PTR(-EINVAL);
2879 
2880 	struct ti_sci_info *info = dev_get_priv(sci_dev);
2881 
2882 	if (!info)
2883 		return ERR_PTR(-EINVAL);
2884 
2885 	struct ti_sci_handle *handle = &info->handle;
2886 
2887 	if (!handle)
2888 		return ERR_PTR(-EINVAL);
2889 
2890 	return handle;
2891 }
2892 
2893 /**
2894  * ti_sci_get_handle() - Get the TI SCI handle for a device
2895  * @dev:	Pointer to device for which we want SCI handle
2896  *
2897  * Return: pointer to handle if successful, else EINVAL if invalid conditions
2898  *         are encountered.
2899  */
ti_sci_get_handle(struct udevice * dev)2900 const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev)
2901 {
2902 	if (!dev)
2903 		return ERR_PTR(-EINVAL);
2904 
2905 	struct udevice *sci_dev = dev_get_parent(dev);
2906 
2907 	return ti_sci_get_handle_from_sysfw(sci_dev);
2908 }
2909 
2910 /**
2911  * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
2912  * @dev:	device node
2913  * @propname:	property name containing phandle on TISCI node
2914  *
2915  * Return: pointer to handle if successful, else appropriate error value.
2916  */
ti_sci_get_by_phandle(struct udevice * dev,const char * property)2917 const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev,
2918 						  const char *property)
2919 {
2920 	struct ti_sci_info *entry, *info = NULL;
2921 	u32 phandle, err;
2922 	ofnode node;
2923 
2924 	err = ofnode_read_u32(dev_ofnode(dev), property, &phandle);
2925 	if (err)
2926 		return ERR_PTR(err);
2927 
2928 	node = ofnode_get_by_phandle(phandle);
2929 	if (!ofnode_valid(node))
2930 		return ERR_PTR(-EINVAL);
2931 
2932 	list_for_each_entry(entry, &ti_sci_list, list)
2933 		if (ofnode_equal(dev_ofnode(entry->dev), node)) {
2934 			info = entry;
2935 			break;
2936 		}
2937 
2938 	if (!info)
2939 		return ERR_PTR(-ENODEV);
2940 
2941 	return &info->handle;
2942 }
2943 
2944 /**
2945  * ti_sci_of_to_info() - generate private data from device tree
2946  * @dev:	corresponding system controller interface device
2947  * @info:	pointer to driver specific private data
2948  *
2949  * Return: 0 if all goes good, else appropriate error message.
2950  */
ti_sci_of_to_info(struct udevice * dev,struct ti_sci_info * info)2951 static int ti_sci_of_to_info(struct udevice *dev, struct ti_sci_info *info)
2952 {
2953 	int ret;
2954 
2955 	ret = mbox_get_by_name(dev, "tx", &info->chan_tx);
2956 	if (ret) {
2957 		dev_err(dev, "%s: Acquiring Tx channel failed. ret = %d\n",
2958 			__func__, ret);
2959 		return ret;
2960 	}
2961 
2962 	ret = mbox_get_by_name(dev, "rx", &info->chan_rx);
2963 	if (ret) {
2964 		dev_err(dev, "%s: Acquiring Rx channel failed. ret = %d\n",
2965 			__func__, ret);
2966 		return ret;
2967 	}
2968 
2969 	/* Notify channel is optional. Enable only if populated */
2970 	ret = mbox_get_by_name(dev, "notify", &info->chan_notify);
2971 	if (ret) {
2972 		dev_dbg(dev, "%s: Acquiring notify channel failed. ret = %d\n",
2973 			__func__, ret);
2974 	}
2975 
2976 	info->host_id = dev_read_u32_default(dev, "ti,host-id",
2977 					     info->desc->default_host_id);
2978 
2979 	info->is_secure = dev_read_bool(dev, "ti,secure-host");
2980 
2981 	return 0;
2982 }
2983 
2984 /**
2985  * ti_sci_probe() - Basic probe
2986  * @dev:	corresponding system controller interface device
2987  *
2988  * Return: 0 if all goes good, else appropriate error message.
2989  */
ti_sci_probe(struct udevice * dev)2990 static int ti_sci_probe(struct udevice *dev)
2991 {
2992 	struct ti_sci_info *info;
2993 	int ret;
2994 
2995 	debug("%s(dev=%p)\n", __func__, dev);
2996 
2997 	info = dev_get_priv(dev);
2998 	info->desc = (void *)dev_get_driver_data(dev);
2999 
3000 	ret = ti_sci_of_to_info(dev, info);
3001 	if (ret) {
3002 		dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
3003 		return ret;
3004 	}
3005 
3006 	info->dev = dev;
3007 	info->seq = 0xA;
3008 
3009 	list_add_tail(&info->list, &ti_sci_list);
3010 	ti_sci_setup_ops(info);
3011 
3012 	ret = ti_sci_cmd_get_revision(&info->handle);
3013 
3014 	INIT_LIST_HEAD(&info->dev_list);
3015 
3016 	return ret;
3017 }
3018 
3019 /*
3020  * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
3021  * @res:	Pointer to the TISCI resource
3022  *
3023  * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
3024  */
ti_sci_get_free_resource(struct ti_sci_resource * res)3025 u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
3026 {
3027 	u16 set, free_bit;
3028 
3029 	for (set = 0; set < res->sets; set++) {
3030 		free_bit = find_first_zero_bit(res->desc[set].res_map,
3031 					       res->desc[set].num);
3032 		if (free_bit != res->desc[set].num) {
3033 			set_bit(free_bit, res->desc[set].res_map);
3034 			return res->desc[set].start + free_bit;
3035 		}
3036 	}
3037 
3038 	return TI_SCI_RESOURCE_NULL;
3039 }
3040 
3041 /**
3042  * ti_sci_release_resource() - Release a resource from TISCI resource.
3043  * @res:	Pointer to the TISCI resource
3044  */
ti_sci_release_resource(struct ti_sci_resource * res,u16 id)3045 void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
3046 {
3047 	u16 set;
3048 
3049 	for (set = 0; set < res->sets; set++) {
3050 		if (res->desc[set].start <= id &&
3051 		    (res->desc[set].num + res->desc[set].start) > id)
3052 			clear_bit(id - res->desc[set].start,
3053 				  res->desc[set].res_map);
3054 	}
3055 }
3056 
3057 /**
3058  * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
3059  * @handle:	TISCI handle
3060  * @dev:	Device pointer to which the resource is assigned
3061  * @of_prop:	property name by which the resource are represented
3062  *
3063  * Note: This function expects of_prop to be in the form of tuples
3064  *	<type, subtype>. Allocates and initializes ti_sci_resource structure
3065  *	for each of_prop. Client driver can directly call
3066  *	ti_sci_(get_free, release)_resource apis for handling the resource.
3067  *
3068  * Return: Pointer to ti_sci_resource if all went well else appropriate
3069  *	   error pointer.
3070  */
3071 struct ti_sci_resource *
devm_ti_sci_get_of_resource(const struct ti_sci_handle * handle,struct udevice * dev,u32 dev_id,char * of_prop)3072 devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
3073 			    struct udevice *dev, u32 dev_id, char *of_prop)
3074 {
3075 	u32 resource_subtype;
3076 	struct ti_sci_resource *res;
3077 	bool valid_set = false;
3078 	int sets, i, ret;
3079 	u32 *temp;
3080 
3081 	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
3082 	if (!res)
3083 		return ERR_PTR(-ENOMEM);
3084 
3085 	sets = dev_read_size(dev, of_prop);
3086 	if (sets < 0) {
3087 		dev_err(dev, "%s resource type ids not available\n", of_prop);
3088 		return ERR_PTR(sets);
3089 	}
3090 	temp = malloc(sets);
3091 	sets /= sizeof(u32);
3092 	res->sets = sets;
3093 
3094 	res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
3095 				 GFP_KERNEL);
3096 	if (!res->desc)
3097 		return ERR_PTR(-ENOMEM);
3098 
3099 	ret = dev_read_u32_array(dev, of_prop, temp, res->sets);
3100 	if (ret)
3101 		return ERR_PTR(-EINVAL);
3102 
3103 	for (i = 0; i < res->sets; i++) {
3104 		resource_subtype = temp[i];
3105 		ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
3106 							resource_subtype,
3107 							&res->desc[i].start,
3108 							&res->desc[i].num);
3109 		if (ret) {
3110 			dev_dbg(dev, "type %d subtype %d not allocated for host %d\n",
3111 				dev_id, resource_subtype,
3112 				handle_to_ti_sci_info(handle)->host_id);
3113 			res->desc[i].start = 0;
3114 			res->desc[i].num = 0;
3115 			continue;
3116 		}
3117 
3118 		valid_set = true;
3119 		dev_dbg(dev, "res type = %d, subtype = %d, start = %d, num = %d\n",
3120 			dev_id, resource_subtype, res->desc[i].start,
3121 			res->desc[i].num);
3122 
3123 		res->desc[i].res_map =
3124 			devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
3125 				     sizeof(*res->desc[i].res_map), GFP_KERNEL);
3126 		if (!res->desc[i].res_map)
3127 			return ERR_PTR(-ENOMEM);
3128 	}
3129 
3130 	if (valid_set)
3131 		return res;
3132 
3133 	return ERR_PTR(-EINVAL);
3134 }
3135 
3136 /* Description for K2G */
3137 static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3138 	.default_host_id = 2,
3139 	/* Conservative duration */
3140 	.max_rx_timeout_ms = 10000,
3141 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3142 	.max_msgs = 20,
3143 	.max_msg_size = 64,
3144 };
3145 
3146 /* Description for AM654 */
3147 static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3148 	.default_host_id = 12,
3149 	/* Conservative duration */
3150 	.max_rx_timeout_ms = 10000,
3151 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3152 	.max_msgs = 20,
3153 	.max_msg_size = 60,
3154 };
3155 
3156 static const struct udevice_id ti_sci_ids[] = {
3157 	{
3158 		.compatible = "ti,k2g-sci",
3159 		.data = (ulong)&ti_sci_pmmc_k2g_desc
3160 	},
3161 	{
3162 		.compatible = "ti,am654-sci",
3163 		.data = (ulong)&ti_sci_pmmc_am654_desc
3164 	},
3165 	{ /* Sentinel */ },
3166 };
3167 
3168 U_BOOT_DRIVER(ti_sci) = {
3169 	.name = "ti_sci",
3170 	.id = UCLASS_FIRMWARE,
3171 	.of_match = ti_sci_ids,
3172 	.probe = ti_sci_probe,
3173 	.priv_auto	= sizeof(struct ti_sci_info),
3174 };
3175