1 /*
2  * PowerNV OPAL asynchronous completion interfaces
3  *
4  * Copyright 2013-2017 IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #undef DEBUG
13 
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/semaphore.h>
19 #include <linux/spinlock.h>
20 #include <linux/wait.h>
21 #include <linux/gfp.h>
22 #include <linux/of.h>
23 #include <asm/machdep.h>
24 #include <asm/opal.h>
25 
26 enum opal_async_token_state {
27 	ASYNC_TOKEN_UNALLOCATED = 0,
28 	ASYNC_TOKEN_ALLOCATED,
29 	ASYNC_TOKEN_DISPATCHED,
30 	ASYNC_TOKEN_ABANDONED,
31 	ASYNC_TOKEN_COMPLETED
32 };
33 
34 struct opal_async_token {
35 	enum opal_async_token_state state;
36 	struct opal_msg response;
37 };
38 
39 static DECLARE_WAIT_QUEUE_HEAD(opal_async_wait);
40 static DEFINE_SPINLOCK(opal_async_comp_lock);
41 static struct semaphore opal_async_sem;
42 static unsigned int opal_max_async_tokens;
43 static struct opal_async_token *opal_async_tokens;
44 
__opal_async_get_token(void)45 static int __opal_async_get_token(void)
46 {
47 	unsigned long flags;
48 	int i, token = -EBUSY;
49 
50 	spin_lock_irqsave(&opal_async_comp_lock, flags);
51 
52 	for (i = 0; i < opal_max_async_tokens; i++) {
53 		if (opal_async_tokens[i].state == ASYNC_TOKEN_UNALLOCATED) {
54 			opal_async_tokens[i].state = ASYNC_TOKEN_ALLOCATED;
55 			token = i;
56 			break;
57 		}
58 	}
59 
60 	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
61 	return token;
62 }
63 
64 /*
65  * Note: If the returned token is used in an opal call and opal returns
66  * OPAL_ASYNC_COMPLETION you MUST call one of opal_async_wait_response() or
67  * opal_async_wait_response_interruptible() at least once before calling another
68  * opal_async_* function
69  */
opal_async_get_token_interruptible(void)70 int opal_async_get_token_interruptible(void)
71 {
72 	int token;
73 
74 	/* Wait until a token is available */
75 	if (down_interruptible(&opal_async_sem))
76 		return -ERESTARTSYS;
77 
78 	token = __opal_async_get_token();
79 	if (token < 0)
80 		up(&opal_async_sem);
81 
82 	return token;
83 }
84 EXPORT_SYMBOL_GPL(opal_async_get_token_interruptible);
85 
__opal_async_release_token(int token)86 static int __opal_async_release_token(int token)
87 {
88 	unsigned long flags;
89 	int rc;
90 
91 	if (token < 0 || token >= opal_max_async_tokens) {
92 		pr_err("%s: Passed token is out of range, token %d\n",
93 				__func__, token);
94 		return -EINVAL;
95 	}
96 
97 	spin_lock_irqsave(&opal_async_comp_lock, flags);
98 	switch (opal_async_tokens[token].state) {
99 	case ASYNC_TOKEN_COMPLETED:
100 	case ASYNC_TOKEN_ALLOCATED:
101 		opal_async_tokens[token].state = ASYNC_TOKEN_UNALLOCATED;
102 		rc = 0;
103 		break;
104 	/*
105 	 * DISPATCHED and ABANDONED tokens must wait for OPAL to respond.
106 	 * Mark a DISPATCHED token as ABANDONED so that the response handling
107 	 * code knows no one cares and that it can free it then.
108 	 */
109 	case ASYNC_TOKEN_DISPATCHED:
110 		opal_async_tokens[token].state = ASYNC_TOKEN_ABANDONED;
111 		/* Fall through */
112 	default:
113 		rc = 1;
114 	}
115 	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
116 
117 	return rc;
118 }
119 
opal_async_release_token(int token)120 int opal_async_release_token(int token)
121 {
122 	int ret;
123 
124 	ret = __opal_async_release_token(token);
125 	if (!ret)
126 		up(&opal_async_sem);
127 
128 	return ret;
129 }
130 EXPORT_SYMBOL_GPL(opal_async_release_token);
131 
opal_async_wait_response(uint64_t token,struct opal_msg * msg)132 int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
133 {
134 	if (token >= opal_max_async_tokens) {
135 		pr_err("%s: Invalid token passed\n", __func__);
136 		return -EINVAL;
137 	}
138 
139 	if (!msg) {
140 		pr_err("%s: Invalid message pointer passed\n", __func__);
141 		return -EINVAL;
142 	}
143 
144 	/*
145 	 * There is no need to mark the token as dispatched, wait_event()
146 	 * will block until the token completes.
147 	 *
148 	 * Wakeup the poller before we wait for events to speed things
149 	 * up on platforms or simulators where the interrupts aren't
150 	 * functional.
151 	 */
152 	opal_wake_poller();
153 	wait_event(opal_async_wait, opal_async_tokens[token].state
154 			== ASYNC_TOKEN_COMPLETED);
155 	memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg));
156 
157 	return 0;
158 }
159 EXPORT_SYMBOL_GPL(opal_async_wait_response);
160 
opal_async_wait_response_interruptible(uint64_t token,struct opal_msg * msg)161 int opal_async_wait_response_interruptible(uint64_t token, struct opal_msg *msg)
162 {
163 	unsigned long flags;
164 	int ret;
165 
166 	if (token >= opal_max_async_tokens) {
167 		pr_err("%s: Invalid token passed\n", __func__);
168 		return -EINVAL;
169 	}
170 
171 	if (!msg) {
172 		pr_err("%s: Invalid message pointer passed\n", __func__);
173 		return -EINVAL;
174 	}
175 
176 	/*
177 	 * The first time this gets called we mark the token as DISPATCHED
178 	 * so that if wait_event_interruptible() returns not zero and the
179 	 * caller frees the token, we know not to actually free the token
180 	 * until the response comes.
181 	 *
182 	 * Only change if the token is ALLOCATED - it may have been
183 	 * completed even before the caller gets around to calling this
184 	 * the first time.
185 	 *
186 	 * There is also a dirty great comment at the token allocation
187 	 * function that if the opal call returns OPAL_ASYNC_COMPLETION to
188 	 * the caller then the caller *must* call this or the not
189 	 * interruptible version before doing anything else with the
190 	 * token.
191 	 */
192 	if (opal_async_tokens[token].state == ASYNC_TOKEN_ALLOCATED) {
193 		spin_lock_irqsave(&opal_async_comp_lock, flags);
194 		if (opal_async_tokens[token].state == ASYNC_TOKEN_ALLOCATED)
195 			opal_async_tokens[token].state = ASYNC_TOKEN_DISPATCHED;
196 		spin_unlock_irqrestore(&opal_async_comp_lock, flags);
197 	}
198 
199 	/*
200 	 * Wakeup the poller before we wait for events to speed things
201 	 * up on platforms or simulators where the interrupts aren't
202 	 * functional.
203 	 */
204 	opal_wake_poller();
205 	ret = wait_event_interruptible(opal_async_wait,
206 			opal_async_tokens[token].state ==
207 			ASYNC_TOKEN_COMPLETED);
208 	if (!ret)
209 		memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg));
210 
211 	return ret;
212 }
213 EXPORT_SYMBOL_GPL(opal_async_wait_response_interruptible);
214 
215 /* Called from interrupt context */
opal_async_comp_event(struct notifier_block * nb,unsigned long msg_type,void * msg)216 static int opal_async_comp_event(struct notifier_block *nb,
217 		unsigned long msg_type, void *msg)
218 {
219 	struct opal_msg *comp_msg = msg;
220 	enum opal_async_token_state state;
221 	unsigned long flags;
222 	uint64_t token;
223 
224 	if (msg_type != OPAL_MSG_ASYNC_COMP)
225 		return 0;
226 
227 	token = be64_to_cpu(comp_msg->params[0]);
228 	spin_lock_irqsave(&opal_async_comp_lock, flags);
229 	state = opal_async_tokens[token].state;
230 	opal_async_tokens[token].state = ASYNC_TOKEN_COMPLETED;
231 	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
232 
233 	if (state == ASYNC_TOKEN_ABANDONED) {
234 		/* Free the token, no one else will */
235 		opal_async_release_token(token);
236 		return 0;
237 	}
238 	memcpy(&opal_async_tokens[token].response, comp_msg, sizeof(*comp_msg));
239 	wake_up(&opal_async_wait);
240 
241 	return 0;
242 }
243 
244 static struct notifier_block opal_async_comp_nb = {
245 		.notifier_call	= opal_async_comp_event,
246 		.next		= NULL,
247 		.priority	= 0,
248 };
249 
opal_async_comp_init(void)250 int __init opal_async_comp_init(void)
251 {
252 	struct device_node *opal_node;
253 	const __be32 *async;
254 	int err;
255 
256 	opal_node = of_find_node_by_path("/ibm,opal");
257 	if (!opal_node) {
258 		pr_err("%s: Opal node not found\n", __func__);
259 		err = -ENOENT;
260 		goto out;
261 	}
262 
263 	async = of_get_property(opal_node, "opal-msg-async-num", NULL);
264 	if (!async) {
265 		pr_err("%s: %pOF has no opal-msg-async-num\n",
266 				__func__, opal_node);
267 		err = -ENOENT;
268 		goto out_opal_node;
269 	}
270 
271 	opal_max_async_tokens = be32_to_cpup(async);
272 	opal_async_tokens = kcalloc(opal_max_async_tokens,
273 			sizeof(*opal_async_tokens), GFP_KERNEL);
274 	if (!opal_async_tokens) {
275 		err = -ENOMEM;
276 		goto out_opal_node;
277 	}
278 
279 	err = opal_message_notifier_register(OPAL_MSG_ASYNC_COMP,
280 			&opal_async_comp_nb);
281 	if (err) {
282 		pr_err("%s: Can't register OPAL event notifier (%d)\n",
283 				__func__, err);
284 		kfree(opal_async_tokens);
285 		goto out_opal_node;
286 	}
287 
288 	sema_init(&opal_async_sem, opal_max_async_tokens);
289 
290 out_opal_node:
291 	of_node_put(opal_node);
292 out:
293 	return err;
294 }
295