1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/atomic.h>
7 #include <linux/bug.h>
8 #include <linux/interrupt.h>
9 #include <linux/jiffies.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/lockdep.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/types.h>
19 #include <linux/wait.h>
20 
21 #include <soc/qcom/rpmh.h>
22 
23 #include "rpmh-internal.h"
24 
25 #define RPMH_TIMEOUT_MS			msecs_to_jiffies(10000)
26 
27 #define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name)	\
28 	struct rpmh_request name = {			\
29 		.msg = {				\
30 			.state = s,			\
31 			.cmds = name.cmd,		\
32 			.num_cmds = 0,			\
33 			.wait_for_compl = true,		\
34 		},					\
35 		.cmd = { { 0 } },			\
36 		.completion = q,			\
37 		.dev = device,				\
38 		.needs_free = false,				\
39 	}
40 
41 #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
42 
43 /**
44  * struct cache_req: the request object for caching
45  *
46  * @addr: the address of the resource
47  * @sleep_val: the sleep vote
48  * @wake_val: the wake vote
49  * @list: linked list obj
50  */
51 struct cache_req {
52 	u32 addr;
53 	u32 sleep_val;
54 	u32 wake_val;
55 	struct list_head list;
56 };
57 
58 /**
59  * struct batch_cache_req - An entry in our batch catch
60  *
61  * @list: linked list obj
62  * @count: number of messages
63  * @rpm_msgs: the messages
64  */
65 
66 struct batch_cache_req {
67 	struct list_head list;
68 	int count;
69 	struct rpmh_request rpm_msgs[];
70 };
71 
get_rpmh_ctrlr(const struct device * dev)72 static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
73 {
74 	struct rsc_drv *drv = dev_get_drvdata(dev->parent);
75 
76 	return &drv->client;
77 }
78 
rpmh_tx_done(const struct tcs_request * msg,int r)79 void rpmh_tx_done(const struct tcs_request *msg, int r)
80 {
81 	struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
82 						    msg);
83 	struct completion *compl = rpm_msg->completion;
84 	bool free = rpm_msg->needs_free;
85 
86 	rpm_msg->err = r;
87 
88 	if (r)
89 		dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
90 			rpm_msg->msg.cmds[0].addr, r);
91 
92 	if (!compl)
93 		goto exit;
94 
95 	/* Signal the blocking thread we are done */
96 	complete(compl);
97 
98 exit:
99 	if (free)
100 		kfree(rpm_msg);
101 }
102 
__find_req(struct rpmh_ctrlr * ctrlr,u32 addr)103 static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
104 {
105 	struct cache_req *p, *req = NULL;
106 
107 	list_for_each_entry(p, &ctrlr->cache, list) {
108 		if (p->addr == addr) {
109 			req = p;
110 			break;
111 		}
112 	}
113 
114 	return req;
115 }
116 
cache_rpm_request(struct rpmh_ctrlr * ctrlr,enum rpmh_state state,struct tcs_cmd * cmd)117 static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
118 					   enum rpmh_state state,
119 					   struct tcs_cmd *cmd)
120 {
121 	struct cache_req *req;
122 	unsigned long flags;
123 	u32 old_sleep_val, old_wake_val;
124 
125 	spin_lock_irqsave(&ctrlr->cache_lock, flags);
126 	req = __find_req(ctrlr, cmd->addr);
127 	if (req)
128 		goto existing;
129 
130 	req = kzalloc(sizeof(*req), GFP_ATOMIC);
131 	if (!req) {
132 		req = ERR_PTR(-ENOMEM);
133 		goto unlock;
134 	}
135 
136 	req->addr = cmd->addr;
137 	req->sleep_val = req->wake_val = UINT_MAX;
138 	list_add_tail(&req->list, &ctrlr->cache);
139 
140 existing:
141 	old_sleep_val = req->sleep_val;
142 	old_wake_val = req->wake_val;
143 
144 	switch (state) {
145 	case RPMH_ACTIVE_ONLY_STATE:
146 	case RPMH_WAKE_ONLY_STATE:
147 		req->wake_val = cmd->data;
148 		break;
149 	case RPMH_SLEEP_STATE:
150 		req->sleep_val = cmd->data;
151 		break;
152 	}
153 
154 	ctrlr->dirty |= (req->sleep_val != old_sleep_val ||
155 			 req->wake_val != old_wake_val) &&
156 			 req->sleep_val != UINT_MAX &&
157 			 req->wake_val != UINT_MAX;
158 
159 unlock:
160 	spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
161 
162 	return req;
163 }
164 
165 /**
166  * __rpmh_write: Cache and send the RPMH request
167  *
168  * @dev: The device making the request
169  * @state: Active/Sleep request type
170  * @rpm_msg: The data that needs to be sent (cmds).
171  *
172  * Cache the RPMH request and send if the state is ACTIVE_ONLY.
173  * SLEEP/WAKE_ONLY requests are not sent to the controller at
174  * this time. Use rpmh_flush() to send them to the controller.
175  */
__rpmh_write(const struct device * dev,enum rpmh_state state,struct rpmh_request * rpm_msg)176 static int __rpmh_write(const struct device *dev, enum rpmh_state state,
177 			struct rpmh_request *rpm_msg)
178 {
179 	struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
180 	int ret = -EINVAL;
181 	struct cache_req *req;
182 	int i;
183 
184 	/* Cache the request in our store and link the payload */
185 	for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
186 		req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
187 		if (IS_ERR(req))
188 			return PTR_ERR(req);
189 	}
190 
191 	if (state == RPMH_ACTIVE_ONLY_STATE) {
192 		WARN_ON(irqs_disabled());
193 		ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
194 	} else {
195 		/* Clean up our call by spoofing tx_done */
196 		ret = 0;
197 		rpmh_tx_done(&rpm_msg->msg, ret);
198 	}
199 
200 	return ret;
201 }
202 
__fill_rpmh_msg(struct rpmh_request * req,enum rpmh_state state,const struct tcs_cmd * cmd,u32 n)203 static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
204 		const struct tcs_cmd *cmd, u32 n)
205 {
206 	if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
207 		return -EINVAL;
208 
209 	memcpy(req->cmd, cmd, n * sizeof(*cmd));
210 
211 	req->msg.state = state;
212 	req->msg.cmds = req->cmd;
213 	req->msg.num_cmds = n;
214 
215 	return 0;
216 }
217 
218 /**
219  * rpmh_write_async: Write a set of RPMH commands
220  *
221  * @dev: The device making the request
222  * @state: Active/sleep set
223  * @cmd: The payload data
224  * @n: The number of elements in payload
225  *
226  * Write a set of RPMH commands, the order of commands is maintained
227  * and will be sent as a single shot.
228  */
rpmh_write_async(const struct device * dev,enum rpmh_state state,const struct tcs_cmd * cmd,u32 n)229 int rpmh_write_async(const struct device *dev, enum rpmh_state state,
230 		     const struct tcs_cmd *cmd, u32 n)
231 {
232 	struct rpmh_request *rpm_msg;
233 	int ret;
234 
235 	rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
236 	if (!rpm_msg)
237 		return -ENOMEM;
238 	rpm_msg->needs_free = true;
239 
240 	ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
241 	if (ret) {
242 		kfree(rpm_msg);
243 		return ret;
244 	}
245 
246 	return __rpmh_write(dev, state, rpm_msg);
247 }
248 EXPORT_SYMBOL(rpmh_write_async);
249 
250 /**
251  * rpmh_write: Write a set of RPMH commands and block until response
252  *
253  * @dev: The device making the request
254  * @state: Active/sleep set
255  * @cmd: The payload data
256  * @n: The number of elements in @cmd
257  *
258  * May sleep. Do not call from atomic contexts.
259  */
rpmh_write(const struct device * dev,enum rpmh_state state,const struct tcs_cmd * cmd,u32 n)260 int rpmh_write(const struct device *dev, enum rpmh_state state,
261 	       const struct tcs_cmd *cmd, u32 n)
262 {
263 	DECLARE_COMPLETION_ONSTACK(compl);
264 	DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
265 	int ret;
266 
267 	ret = __fill_rpmh_msg(&rpm_msg, state, cmd, n);
268 	if (ret)
269 		return ret;
270 
271 	ret = __rpmh_write(dev, state, &rpm_msg);
272 	if (ret)
273 		return ret;
274 
275 	ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
276 	WARN_ON(!ret);
277 	return (ret > 0) ? 0 : -ETIMEDOUT;
278 }
279 EXPORT_SYMBOL(rpmh_write);
280 
cache_batch(struct rpmh_ctrlr * ctrlr,struct batch_cache_req * req)281 static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
282 {
283 	unsigned long flags;
284 
285 	spin_lock_irqsave(&ctrlr->cache_lock, flags);
286 	list_add_tail(&req->list, &ctrlr->batch_cache);
287 	ctrlr->dirty = true;
288 	spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
289 }
290 
flush_batch(struct rpmh_ctrlr * ctrlr)291 static int flush_batch(struct rpmh_ctrlr *ctrlr)
292 {
293 	struct batch_cache_req *req;
294 	const struct rpmh_request *rpm_msg;
295 	int ret = 0;
296 	int i;
297 
298 	/* Send Sleep/Wake requests to the controller, expect no response */
299 	list_for_each_entry(req, &ctrlr->batch_cache, list) {
300 		for (i = 0; i < req->count; i++) {
301 			rpm_msg = req->rpm_msgs + i;
302 			ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
303 						       &rpm_msg->msg);
304 			if (ret)
305 				break;
306 		}
307 	}
308 
309 	return ret;
310 }
311 
312 /**
313  * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
314  * batch to finish.
315  *
316  * @dev: the device making the request
317  * @state: Active/sleep set
318  * @cmd: The payload data
319  * @n: The array of count of elements in each batch, 0 terminated.
320  *
321  * Write a request to the RSC controller without caching. If the request
322  * state is ACTIVE, then the requests are treated as completion request
323  * and sent to the controller immediately. The function waits until all the
324  * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
325  * request is sent as fire-n-forget and no ack is expected.
326  *
327  * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
328  */
rpmh_write_batch(const struct device * dev,enum rpmh_state state,const struct tcs_cmd * cmd,u32 * n)329 int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
330 		     const struct tcs_cmd *cmd, u32 *n)
331 {
332 	struct batch_cache_req *req;
333 	struct rpmh_request *rpm_msgs;
334 	struct completion *compls;
335 	struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
336 	unsigned long time_left;
337 	int count = 0;
338 	int ret, i;
339 	void *ptr;
340 
341 	if (!cmd || !n)
342 		return -EINVAL;
343 
344 	while (n[count] > 0)
345 		count++;
346 	if (!count)
347 		return -EINVAL;
348 
349 	ptr = kzalloc(sizeof(*req) +
350 		      count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
351 		      GFP_ATOMIC);
352 	if (!ptr)
353 		return -ENOMEM;
354 
355 	req = ptr;
356 	compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
357 
358 	req->count = count;
359 	rpm_msgs = req->rpm_msgs;
360 
361 	for (i = 0; i < count; i++) {
362 		__fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
363 		cmd += n[i];
364 	}
365 
366 	if (state != RPMH_ACTIVE_ONLY_STATE) {
367 		cache_batch(ctrlr, req);
368 		return 0;
369 	}
370 
371 	for (i = 0; i < count; i++) {
372 		struct completion *compl = &compls[i];
373 
374 		init_completion(compl);
375 		rpm_msgs[i].completion = compl;
376 		ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
377 		if (ret) {
378 			pr_err("Error(%d) sending RPMH message addr=%#x\n",
379 			       ret, rpm_msgs[i].msg.cmds[0].addr);
380 			break;
381 		}
382 	}
383 
384 	time_left = RPMH_TIMEOUT_MS;
385 	while (i--) {
386 		time_left = wait_for_completion_timeout(&compls[i], time_left);
387 		if (!time_left) {
388 			/*
389 			 * Better hope they never finish because they'll signal
390 			 * the completion that we're going to free once
391 			 * we've returned from this function.
392 			 */
393 			WARN_ON(1);
394 			ret = -ETIMEDOUT;
395 			goto exit;
396 		}
397 	}
398 
399 exit:
400 	kfree(ptr);
401 
402 	return ret;
403 }
404 EXPORT_SYMBOL(rpmh_write_batch);
405 
is_req_valid(struct cache_req * req)406 static int is_req_valid(struct cache_req *req)
407 {
408 	return (req->sleep_val != UINT_MAX &&
409 		req->wake_val != UINT_MAX &&
410 		req->sleep_val != req->wake_val);
411 }
412 
send_single(struct rpmh_ctrlr * ctrlr,enum rpmh_state state,u32 addr,u32 data)413 static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
414 		       u32 addr, u32 data)
415 {
416 	DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg);
417 
418 	/* Wake sets are always complete and sleep sets are not */
419 	rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
420 	rpm_msg.cmd[0].addr = addr;
421 	rpm_msg.cmd[0].data = data;
422 	rpm_msg.msg.num_cmds = 1;
423 
424 	return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
425 }
426 
427 /**
428  * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes
429  *
430  * @ctrlr: Controller making request to flush cached data
431  *
432  * Return:
433  * * 0          - Success
434  * * Error code - Otherwise
435  */
rpmh_flush(struct rpmh_ctrlr * ctrlr)436 int rpmh_flush(struct rpmh_ctrlr *ctrlr)
437 {
438 	struct cache_req *p;
439 	int ret = 0;
440 
441 	lockdep_assert_irqs_disabled();
442 
443 	/*
444 	 * Currently rpmh_flush() is only called when we think we're running
445 	 * on the last processor.  If the lock is busy it means another
446 	 * processor is up and it's better to abort than spin.
447 	 */
448 	if (!spin_trylock(&ctrlr->cache_lock))
449 		return -EBUSY;
450 
451 	if (!ctrlr->dirty) {
452 		pr_debug("Skipping flush, TCS has latest data.\n");
453 		goto exit;
454 	}
455 
456 	/* Invalidate the TCSes first to avoid stale data */
457 	rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
458 
459 	/* First flush the cached batch requests */
460 	ret = flush_batch(ctrlr);
461 	if (ret)
462 		goto exit;
463 
464 	list_for_each_entry(p, &ctrlr->cache, list) {
465 		if (!is_req_valid(p)) {
466 			pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
467 				 __func__, p->addr, p->sleep_val, p->wake_val);
468 			continue;
469 		}
470 		ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
471 				  p->sleep_val);
472 		if (ret)
473 			goto exit;
474 		ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
475 				  p->wake_val);
476 		if (ret)
477 			goto exit;
478 	}
479 
480 	ctrlr->dirty = false;
481 
482 exit:
483 	spin_unlock(&ctrlr->cache_lock);
484 	return ret;
485 }
486 
487 /**
488  * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
489  *
490  * @dev: The device making the request
491  *
492  * Invalidate the sleep and wake values in batch_cache.
493  */
rpmh_invalidate(const struct device * dev)494 void rpmh_invalidate(const struct device *dev)
495 {
496 	struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
497 	struct batch_cache_req *req, *tmp;
498 	unsigned long flags;
499 
500 	spin_lock_irqsave(&ctrlr->cache_lock, flags);
501 	list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
502 		kfree(req);
503 	INIT_LIST_HEAD(&ctrlr->batch_cache);
504 	ctrlr->dirty = true;
505 	spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
506 }
507 EXPORT_SYMBOL(rpmh_invalidate);
508