1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3  * linux/net/sunrpc/auth_gss/auth_gss.c
4  *
5  * RPCSEC_GSS client authentication.
6  *
7  *  Copyright (c) 2000 The Regents of the University of Michigan.
8  *  All rights reserved.
9  *
10  *  Dug Song       <dugsong@monkey.org>
11  *  Andy Adamson   <andros@umich.edu>
12  */
13 
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/sunrpc/clnt.h>
21 #include <linux/sunrpc/auth.h>
22 #include <linux/sunrpc/auth_gss.h>
23 #include <linux/sunrpc/gss_krb5.h>
24 #include <linux/sunrpc/svcauth_gss.h>
25 #include <linux/sunrpc/gss_err.h>
26 #include <linux/workqueue.h>
27 #include <linux/sunrpc/rpc_pipe_fs.h>
28 #include <linux/sunrpc/gss_api.h>
29 #include <linux/uaccess.h>
30 #include <linux/hashtable.h>
31 
32 #include "auth_gss_internal.h"
33 #include "../netns.h"
34 
35 #include <trace/events/rpcgss.h>
36 
37 static const struct rpc_authops authgss_ops;
38 
39 static const struct rpc_credops gss_credops;
40 static const struct rpc_credops gss_nullops;
41 
42 #define GSS_RETRY_EXPIRED 5
43 static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED;
44 
45 #define GSS_KEY_EXPIRE_TIMEO 240
46 static unsigned int gss_key_expire_timeo = GSS_KEY_EXPIRE_TIMEO;
47 
48 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
49 # define RPCDBG_FACILITY	RPCDBG_AUTH
50 #endif
51 
52 #define GSS_CRED_SLACK		(RPC_MAX_AUTH_SIZE * 2)
53 /* length of a krb5 verifier (48), plus data added before arguments when
54  * using integrity (two 4-byte integers): */
55 #define GSS_VERF_SLACK		100
56 
57 static DEFINE_HASHTABLE(gss_auth_hash_table, 4);
58 static DEFINE_SPINLOCK(gss_auth_hash_lock);
59 
60 struct gss_pipe {
61 	struct rpc_pipe_dir_object pdo;
62 	struct rpc_pipe *pipe;
63 	struct rpc_clnt *clnt;
64 	const char *name;
65 	struct kref kref;
66 };
67 
68 struct gss_auth {
69 	struct kref kref;
70 	struct hlist_node hash;
71 	struct rpc_auth rpc_auth;
72 	struct gss_api_mech *mech;
73 	enum rpc_gss_svc service;
74 	struct rpc_clnt *client;
75 	struct net *net;
76 	/*
77 	 * There are two upcall pipes; dentry[1], named "gssd", is used
78 	 * for the new text-based upcall; dentry[0] is named after the
79 	 * mechanism (for example, "krb5") and exists for
80 	 * backwards-compatibility with older gssd's.
81 	 */
82 	struct gss_pipe *gss_pipe[2];
83 	const char *target_name;
84 };
85 
86 /* pipe_version >= 0 if and only if someone has a pipe open. */
87 static DEFINE_SPINLOCK(pipe_version_lock);
88 static struct rpc_wait_queue pipe_version_rpc_waitqueue;
89 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
90 static void gss_put_auth(struct gss_auth *gss_auth);
91 
92 static void gss_free_ctx(struct gss_cl_ctx *);
93 static const struct rpc_pipe_ops gss_upcall_ops_v0;
94 static const struct rpc_pipe_ops gss_upcall_ops_v1;
95 
96 static inline struct gss_cl_ctx *
gss_get_ctx(struct gss_cl_ctx * ctx)97 gss_get_ctx(struct gss_cl_ctx *ctx)
98 {
99 	refcount_inc(&ctx->count);
100 	return ctx;
101 }
102 
103 static inline void
gss_put_ctx(struct gss_cl_ctx * ctx)104 gss_put_ctx(struct gss_cl_ctx *ctx)
105 {
106 	if (refcount_dec_and_test(&ctx->count))
107 		gss_free_ctx(ctx);
108 }
109 
110 /* gss_cred_set_ctx:
111  * called by gss_upcall_callback and gss_create_upcall in order
112  * to set the gss context. The actual exchange of an old context
113  * and a new one is protected by the pipe->lock.
114  */
115 static void
gss_cred_set_ctx(struct rpc_cred * cred,struct gss_cl_ctx * ctx)116 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
117 {
118 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
119 
120 	if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
121 		return;
122 	gss_get_ctx(ctx);
123 	rcu_assign_pointer(gss_cred->gc_ctx, ctx);
124 	set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
125 	smp_mb__before_atomic();
126 	clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
127 }
128 
129 static struct gss_cl_ctx *
gss_cred_get_ctx(struct rpc_cred * cred)130 gss_cred_get_ctx(struct rpc_cred *cred)
131 {
132 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
133 	struct gss_cl_ctx *ctx = NULL;
134 
135 	rcu_read_lock();
136 	ctx = rcu_dereference(gss_cred->gc_ctx);
137 	if (ctx)
138 		gss_get_ctx(ctx);
139 	rcu_read_unlock();
140 	return ctx;
141 }
142 
143 static struct gss_cl_ctx *
gss_alloc_context(void)144 gss_alloc_context(void)
145 {
146 	struct gss_cl_ctx *ctx;
147 
148 	ctx = kzalloc(sizeof(*ctx), GFP_NOFS);
149 	if (ctx != NULL) {
150 		ctx->gc_proc = RPC_GSS_PROC_DATA;
151 		ctx->gc_seq = 1;	/* NetApp 6.4R1 doesn't accept seq. no. 0 */
152 		spin_lock_init(&ctx->gc_seq_lock);
153 		refcount_set(&ctx->count,1);
154 	}
155 	return ctx;
156 }
157 
158 #define GSSD_MIN_TIMEOUT (60 * 60)
159 static const void *
gss_fill_context(const void * p,const void * end,struct gss_cl_ctx * ctx,struct gss_api_mech * gm)160 gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
161 {
162 	const void *q;
163 	unsigned int seclen;
164 	unsigned int timeout;
165 	unsigned long now = jiffies;
166 	u32 window_size;
167 	int ret;
168 
169 	/* First unsigned int gives the remaining lifetime in seconds of the
170 	 * credential - e.g. the remaining TGT lifetime for Kerberos or
171 	 * the -t value passed to GSSD.
172 	 */
173 	p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
174 	if (IS_ERR(p))
175 		goto err;
176 	if (timeout == 0)
177 		timeout = GSSD_MIN_TIMEOUT;
178 	ctx->gc_expiry = now + ((unsigned long)timeout * HZ);
179 	/* Sequence number window. Determines the maximum number of
180 	 * simultaneous requests
181 	 */
182 	p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
183 	if (IS_ERR(p))
184 		goto err;
185 	ctx->gc_win = window_size;
186 	/* gssd signals an error by passing ctx->gc_win = 0: */
187 	if (ctx->gc_win == 0) {
188 		/*
189 		 * in which case, p points to an error code. Anything other
190 		 * than -EKEYEXPIRED gets converted to -EACCES.
191 		 */
192 		p = simple_get_bytes(p, end, &ret, sizeof(ret));
193 		if (!IS_ERR(p))
194 			p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) :
195 						    ERR_PTR(-EACCES);
196 		goto err;
197 	}
198 	/* copy the opaque wire context */
199 	p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
200 	if (IS_ERR(p))
201 		goto err;
202 	/* import the opaque security context */
203 	p  = simple_get_bytes(p, end, &seclen, sizeof(seclen));
204 	if (IS_ERR(p))
205 		goto err;
206 	q = (const void *)((const char *)p + seclen);
207 	if (unlikely(q > end || q < p)) {
208 		p = ERR_PTR(-EFAULT);
209 		goto err;
210 	}
211 	ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS);
212 	if (ret < 0) {
213 		trace_rpcgss_import_ctx(ret);
214 		p = ERR_PTR(ret);
215 		goto err;
216 	}
217 
218 	/* is there any trailing data? */
219 	if (q == end) {
220 		p = q;
221 		goto done;
222 	}
223 
224 	/* pull in acceptor name (if there is one) */
225 	p = simple_get_netobj(q, end, &ctx->gc_acceptor);
226 	if (IS_ERR(p))
227 		goto err;
228 done:
229 	trace_rpcgss_context(window_size, ctx->gc_expiry, now, timeout,
230 			     ctx->gc_acceptor.len, ctx->gc_acceptor.data);
231 err:
232 	return p;
233 }
234 
235 /* XXX: Need some documentation about why UPCALL_BUF_LEN is so small.
236  *	Is user space expecting no more than UPCALL_BUF_LEN bytes?
237  *	Note that there are now _two_ NI_MAXHOST sized data items
238  *	being passed in this string.
239  */
240 #define UPCALL_BUF_LEN	256
241 
242 struct gss_upcall_msg {
243 	refcount_t count;
244 	kuid_t	uid;
245 	const char *service_name;
246 	struct rpc_pipe_msg msg;
247 	struct list_head list;
248 	struct gss_auth *auth;
249 	struct rpc_pipe *pipe;
250 	struct rpc_wait_queue rpc_waitqueue;
251 	wait_queue_head_t waitqueue;
252 	struct gss_cl_ctx *ctx;
253 	char databuf[UPCALL_BUF_LEN];
254 };
255 
get_pipe_version(struct net * net)256 static int get_pipe_version(struct net *net)
257 {
258 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
259 	int ret;
260 
261 	spin_lock(&pipe_version_lock);
262 	if (sn->pipe_version >= 0) {
263 		atomic_inc(&sn->pipe_users);
264 		ret = sn->pipe_version;
265 	} else
266 		ret = -EAGAIN;
267 	spin_unlock(&pipe_version_lock);
268 	return ret;
269 }
270 
put_pipe_version(struct net * net)271 static void put_pipe_version(struct net *net)
272 {
273 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
274 
275 	if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) {
276 		sn->pipe_version = -1;
277 		spin_unlock(&pipe_version_lock);
278 	}
279 }
280 
281 static void
gss_release_msg(struct gss_upcall_msg * gss_msg)282 gss_release_msg(struct gss_upcall_msg *gss_msg)
283 {
284 	struct net *net = gss_msg->auth->net;
285 	if (!refcount_dec_and_test(&gss_msg->count))
286 		return;
287 	put_pipe_version(net);
288 	BUG_ON(!list_empty(&gss_msg->list));
289 	if (gss_msg->ctx != NULL)
290 		gss_put_ctx(gss_msg->ctx);
291 	rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
292 	gss_put_auth(gss_msg->auth);
293 	kfree_const(gss_msg->service_name);
294 	kfree(gss_msg);
295 }
296 
297 static struct gss_upcall_msg *
__gss_find_upcall(struct rpc_pipe * pipe,kuid_t uid,const struct gss_auth * auth)298 __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth)
299 {
300 	struct gss_upcall_msg *pos;
301 	list_for_each_entry(pos, &pipe->in_downcall, list) {
302 		if (!uid_eq(pos->uid, uid))
303 			continue;
304 		if (auth && pos->auth->service != auth->service)
305 			continue;
306 		refcount_inc(&pos->count);
307 		return pos;
308 	}
309 	return NULL;
310 }
311 
312 /* Try to add an upcall to the pipefs queue.
313  * If an upcall owned by our uid already exists, then we return a reference
314  * to that upcall instead of adding the new upcall.
315  */
316 static inline struct gss_upcall_msg *
gss_add_msg(struct gss_upcall_msg * gss_msg)317 gss_add_msg(struct gss_upcall_msg *gss_msg)
318 {
319 	struct rpc_pipe *pipe = gss_msg->pipe;
320 	struct gss_upcall_msg *old;
321 
322 	spin_lock(&pipe->lock);
323 	old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth);
324 	if (old == NULL) {
325 		refcount_inc(&gss_msg->count);
326 		list_add(&gss_msg->list, &pipe->in_downcall);
327 	} else
328 		gss_msg = old;
329 	spin_unlock(&pipe->lock);
330 	return gss_msg;
331 }
332 
333 static void
__gss_unhash_msg(struct gss_upcall_msg * gss_msg)334 __gss_unhash_msg(struct gss_upcall_msg *gss_msg)
335 {
336 	list_del_init(&gss_msg->list);
337 	rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
338 	wake_up_all(&gss_msg->waitqueue);
339 	refcount_dec(&gss_msg->count);
340 }
341 
342 static void
gss_unhash_msg(struct gss_upcall_msg * gss_msg)343 gss_unhash_msg(struct gss_upcall_msg *gss_msg)
344 {
345 	struct rpc_pipe *pipe = gss_msg->pipe;
346 
347 	if (list_empty(&gss_msg->list))
348 		return;
349 	spin_lock(&pipe->lock);
350 	if (!list_empty(&gss_msg->list))
351 		__gss_unhash_msg(gss_msg);
352 	spin_unlock(&pipe->lock);
353 }
354 
355 static void
gss_handle_downcall_result(struct gss_cred * gss_cred,struct gss_upcall_msg * gss_msg)356 gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg)
357 {
358 	switch (gss_msg->msg.errno) {
359 	case 0:
360 		if (gss_msg->ctx == NULL)
361 			break;
362 		clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
363 		gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx);
364 		break;
365 	case -EKEYEXPIRED:
366 		set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
367 	}
368 	gss_cred->gc_upcall_timestamp = jiffies;
369 	gss_cred->gc_upcall = NULL;
370 	rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
371 }
372 
373 static void
gss_upcall_callback(struct rpc_task * task)374 gss_upcall_callback(struct rpc_task *task)
375 {
376 	struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred,
377 			struct gss_cred, gc_base);
378 	struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
379 	struct rpc_pipe *pipe = gss_msg->pipe;
380 
381 	spin_lock(&pipe->lock);
382 	gss_handle_downcall_result(gss_cred, gss_msg);
383 	spin_unlock(&pipe->lock);
384 	task->tk_status = gss_msg->msg.errno;
385 	gss_release_msg(gss_msg);
386 }
387 
gss_encode_v0_msg(struct gss_upcall_msg * gss_msg,const struct cred * cred)388 static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg,
389 			      const struct cred *cred)
390 {
391 	struct user_namespace *userns = cred->user_ns;
392 
393 	uid_t uid = from_kuid_munged(userns, gss_msg->uid);
394 	memcpy(gss_msg->databuf, &uid, sizeof(uid));
395 	gss_msg->msg.data = gss_msg->databuf;
396 	gss_msg->msg.len = sizeof(uid);
397 
398 	BUILD_BUG_ON(sizeof(uid) > sizeof(gss_msg->databuf));
399 }
400 
401 static ssize_t
gss_v0_upcall(struct file * file,struct rpc_pipe_msg * msg,char __user * buf,size_t buflen)402 gss_v0_upcall(struct file *file, struct rpc_pipe_msg *msg,
403 		char __user *buf, size_t buflen)
404 {
405 	struct gss_upcall_msg *gss_msg = container_of(msg,
406 						      struct gss_upcall_msg,
407 						      msg);
408 	if (msg->copied == 0)
409 		gss_encode_v0_msg(gss_msg, file->f_cred);
410 	return rpc_pipe_generic_upcall(file, msg, buf, buflen);
411 }
412 
gss_encode_v1_msg(struct gss_upcall_msg * gss_msg,const char * service_name,const char * target_name,const struct cred * cred)413 static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
414 				const char *service_name,
415 				const char *target_name,
416 				const struct cred *cred)
417 {
418 	struct user_namespace *userns = cred->user_ns;
419 	struct gss_api_mech *mech = gss_msg->auth->mech;
420 	char *p = gss_msg->databuf;
421 	size_t buflen = sizeof(gss_msg->databuf);
422 	int len;
423 
424 	len = scnprintf(p, buflen, "mech=%s uid=%d", mech->gm_name,
425 			from_kuid_munged(userns, gss_msg->uid));
426 	buflen -= len;
427 	p += len;
428 	gss_msg->msg.len = len;
429 
430 	/*
431 	 * target= is a full service principal that names the remote
432 	 * identity that we are authenticating to.
433 	 */
434 	if (target_name) {
435 		len = scnprintf(p, buflen, " target=%s", target_name);
436 		buflen -= len;
437 		p += len;
438 		gss_msg->msg.len += len;
439 	}
440 
441 	/*
442 	 * gssd uses service= and srchost= to select a matching key from
443 	 * the system's keytab to use as the source principal.
444 	 *
445 	 * service= is the service name part of the source principal,
446 	 * or "*" (meaning choose any).
447 	 *
448 	 * srchost= is the hostname part of the source principal. When
449 	 * not provided, gssd uses the local hostname.
450 	 */
451 	if (service_name) {
452 		char *c = strchr(service_name, '@');
453 
454 		if (!c)
455 			len = scnprintf(p, buflen, " service=%s",
456 					service_name);
457 		else
458 			len = scnprintf(p, buflen,
459 					" service=%.*s srchost=%s",
460 					(int)(c - service_name),
461 					service_name, c + 1);
462 		buflen -= len;
463 		p += len;
464 		gss_msg->msg.len += len;
465 	}
466 
467 	if (mech->gm_upcall_enctypes) {
468 		len = scnprintf(p, buflen, " enctypes=%s",
469 				mech->gm_upcall_enctypes);
470 		buflen -= len;
471 		p += len;
472 		gss_msg->msg.len += len;
473 	}
474 	trace_rpcgss_upcall_msg(gss_msg->databuf);
475 	len = scnprintf(p, buflen, "\n");
476 	if (len == 0)
477 		goto out_overflow;
478 	gss_msg->msg.len += len;
479 	gss_msg->msg.data = gss_msg->databuf;
480 	return 0;
481 out_overflow:
482 	WARN_ON_ONCE(1);
483 	return -ENOMEM;
484 }
485 
486 static ssize_t
gss_v1_upcall(struct file * file,struct rpc_pipe_msg * msg,char __user * buf,size_t buflen)487 gss_v1_upcall(struct file *file, struct rpc_pipe_msg *msg,
488 		char __user *buf, size_t buflen)
489 {
490 	struct gss_upcall_msg *gss_msg = container_of(msg,
491 						      struct gss_upcall_msg,
492 						      msg);
493 	int err;
494 	if (msg->copied == 0) {
495 		err = gss_encode_v1_msg(gss_msg,
496 					gss_msg->service_name,
497 					gss_msg->auth->target_name,
498 					file->f_cred);
499 		if (err)
500 			return err;
501 	}
502 	return rpc_pipe_generic_upcall(file, msg, buf, buflen);
503 }
504 
505 static struct gss_upcall_msg *
gss_alloc_msg(struct gss_auth * gss_auth,kuid_t uid,const char * service_name)506 gss_alloc_msg(struct gss_auth *gss_auth,
507 		kuid_t uid, const char *service_name)
508 {
509 	struct gss_upcall_msg *gss_msg;
510 	int vers;
511 	int err = -ENOMEM;
512 
513 	gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
514 	if (gss_msg == NULL)
515 		goto err;
516 	vers = get_pipe_version(gss_auth->net);
517 	err = vers;
518 	if (err < 0)
519 		goto err_free_msg;
520 	gss_msg->pipe = gss_auth->gss_pipe[vers]->pipe;
521 	INIT_LIST_HEAD(&gss_msg->list);
522 	rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
523 	init_waitqueue_head(&gss_msg->waitqueue);
524 	refcount_set(&gss_msg->count, 1);
525 	gss_msg->uid = uid;
526 	gss_msg->auth = gss_auth;
527 	kref_get(&gss_auth->kref);
528 	if (service_name) {
529 		gss_msg->service_name = kstrdup_const(service_name, GFP_NOFS);
530 		if (!gss_msg->service_name) {
531 			err = -ENOMEM;
532 			goto err_put_pipe_version;
533 		}
534 	}
535 	return gss_msg;
536 err_put_pipe_version:
537 	put_pipe_version(gss_auth->net);
538 err_free_msg:
539 	kfree(gss_msg);
540 err:
541 	return ERR_PTR(err);
542 }
543 
544 static struct gss_upcall_msg *
gss_setup_upcall(struct gss_auth * gss_auth,struct rpc_cred * cred)545 gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
546 {
547 	struct gss_cred *gss_cred = container_of(cred,
548 			struct gss_cred, gc_base);
549 	struct gss_upcall_msg *gss_new, *gss_msg;
550 	kuid_t uid = cred->cr_cred->fsuid;
551 
552 	gss_new = gss_alloc_msg(gss_auth, uid, gss_cred->gc_principal);
553 	if (IS_ERR(gss_new))
554 		return gss_new;
555 	gss_msg = gss_add_msg(gss_new);
556 	if (gss_msg == gss_new) {
557 		int res;
558 		refcount_inc(&gss_msg->count);
559 		res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
560 		if (res) {
561 			gss_unhash_msg(gss_new);
562 			refcount_dec(&gss_msg->count);
563 			gss_release_msg(gss_new);
564 			gss_msg = ERR_PTR(res);
565 		}
566 	} else
567 		gss_release_msg(gss_new);
568 	return gss_msg;
569 }
570 
warn_gssd(void)571 static void warn_gssd(void)
572 {
573 	dprintk("AUTH_GSS upcall failed. Please check user daemon is running.\n");
574 }
575 
576 static inline int
gss_refresh_upcall(struct rpc_task * task)577 gss_refresh_upcall(struct rpc_task *task)
578 {
579 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
580 	struct gss_auth *gss_auth = container_of(cred->cr_auth,
581 			struct gss_auth, rpc_auth);
582 	struct gss_cred *gss_cred = container_of(cred,
583 			struct gss_cred, gc_base);
584 	struct gss_upcall_msg *gss_msg;
585 	struct rpc_pipe *pipe;
586 	int err = 0;
587 
588 	gss_msg = gss_setup_upcall(gss_auth, cred);
589 	if (PTR_ERR(gss_msg) == -EAGAIN) {
590 		/* XXX: warning on the first, under the assumption we
591 		 * shouldn't normally hit this case on a refresh. */
592 		warn_gssd();
593 		rpc_sleep_on_timeout(&pipe_version_rpc_waitqueue,
594 				task, NULL, jiffies + (15 * HZ));
595 		err = -EAGAIN;
596 		goto out;
597 	}
598 	if (IS_ERR(gss_msg)) {
599 		err = PTR_ERR(gss_msg);
600 		goto out;
601 	}
602 	pipe = gss_msg->pipe;
603 	spin_lock(&pipe->lock);
604 	if (gss_cred->gc_upcall != NULL)
605 		rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
606 	else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
607 		gss_cred->gc_upcall = gss_msg;
608 		/* gss_upcall_callback will release the reference to gss_upcall_msg */
609 		refcount_inc(&gss_msg->count);
610 		rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
611 	} else {
612 		gss_handle_downcall_result(gss_cred, gss_msg);
613 		err = gss_msg->msg.errno;
614 	}
615 	spin_unlock(&pipe->lock);
616 	gss_release_msg(gss_msg);
617 out:
618 	trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
619 					     cred->cr_cred->fsuid), err);
620 	return err;
621 }
622 
623 static inline int
gss_create_upcall(struct gss_auth * gss_auth,struct gss_cred * gss_cred)624 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
625 {
626 	struct net *net = gss_auth->net;
627 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
628 	struct rpc_pipe *pipe;
629 	struct rpc_cred *cred = &gss_cred->gc_base;
630 	struct gss_upcall_msg *gss_msg;
631 	DEFINE_WAIT(wait);
632 	int err;
633 
634 retry:
635 	err = 0;
636 	/* if gssd is down, just skip upcalling altogether */
637 	if (!gssd_running(net)) {
638 		warn_gssd();
639 		err = -EACCES;
640 		goto out;
641 	}
642 	gss_msg = gss_setup_upcall(gss_auth, cred);
643 	if (PTR_ERR(gss_msg) == -EAGAIN) {
644 		err = wait_event_interruptible_timeout(pipe_version_waitqueue,
645 				sn->pipe_version >= 0, 15 * HZ);
646 		if (sn->pipe_version < 0) {
647 			warn_gssd();
648 			err = -EACCES;
649 		}
650 		if (err < 0)
651 			goto out;
652 		goto retry;
653 	}
654 	if (IS_ERR(gss_msg)) {
655 		err = PTR_ERR(gss_msg);
656 		goto out;
657 	}
658 	pipe = gss_msg->pipe;
659 	for (;;) {
660 		prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
661 		spin_lock(&pipe->lock);
662 		if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
663 			break;
664 		}
665 		spin_unlock(&pipe->lock);
666 		if (fatal_signal_pending(current)) {
667 			err = -ERESTARTSYS;
668 			goto out_intr;
669 		}
670 		schedule();
671 	}
672 	if (gss_msg->ctx) {
673 		trace_rpcgss_ctx_init(gss_cred);
674 		gss_cred_set_ctx(cred, gss_msg->ctx);
675 	} else {
676 		err = gss_msg->msg.errno;
677 	}
678 	spin_unlock(&pipe->lock);
679 out_intr:
680 	finish_wait(&gss_msg->waitqueue, &wait);
681 	gss_release_msg(gss_msg);
682 out:
683 	trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
684 					     cred->cr_cred->fsuid), err);
685 	return err;
686 }
687 
688 #define MSG_BUF_MAXSIZE 1024
689 
690 static ssize_t
gss_pipe_downcall(struct file * filp,const char __user * src,size_t mlen)691 gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
692 {
693 	const void *p, *end;
694 	void *buf;
695 	struct gss_upcall_msg *gss_msg;
696 	struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe;
697 	struct gss_cl_ctx *ctx;
698 	uid_t id;
699 	kuid_t uid;
700 	ssize_t err = -EFBIG;
701 
702 	if (mlen > MSG_BUF_MAXSIZE)
703 		goto out;
704 	err = -ENOMEM;
705 	buf = kmalloc(mlen, GFP_NOFS);
706 	if (!buf)
707 		goto out;
708 
709 	err = -EFAULT;
710 	if (copy_from_user(buf, src, mlen))
711 		goto err;
712 
713 	end = (const void *)((char *)buf + mlen);
714 	p = simple_get_bytes(buf, end, &id, sizeof(id));
715 	if (IS_ERR(p)) {
716 		err = PTR_ERR(p);
717 		goto err;
718 	}
719 
720 	uid = make_kuid(current_user_ns(), id);
721 	if (!uid_valid(uid)) {
722 		err = -EINVAL;
723 		goto err;
724 	}
725 
726 	err = -ENOMEM;
727 	ctx = gss_alloc_context();
728 	if (ctx == NULL)
729 		goto err;
730 
731 	err = -ENOENT;
732 	/* Find a matching upcall */
733 	spin_lock(&pipe->lock);
734 	gss_msg = __gss_find_upcall(pipe, uid, NULL);
735 	if (gss_msg == NULL) {
736 		spin_unlock(&pipe->lock);
737 		goto err_put_ctx;
738 	}
739 	list_del_init(&gss_msg->list);
740 	spin_unlock(&pipe->lock);
741 
742 	p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
743 	if (IS_ERR(p)) {
744 		err = PTR_ERR(p);
745 		switch (err) {
746 		case -EACCES:
747 		case -EKEYEXPIRED:
748 			gss_msg->msg.errno = err;
749 			err = mlen;
750 			break;
751 		case -EFAULT:
752 		case -ENOMEM:
753 		case -EINVAL:
754 		case -ENOSYS:
755 			gss_msg->msg.errno = -EAGAIN;
756 			break;
757 		default:
758 			printk(KERN_CRIT "%s: bad return from "
759 				"gss_fill_context: %zd\n", __func__, err);
760 			gss_msg->msg.errno = -EIO;
761 		}
762 		goto err_release_msg;
763 	}
764 	gss_msg->ctx = gss_get_ctx(ctx);
765 	err = mlen;
766 
767 err_release_msg:
768 	spin_lock(&pipe->lock);
769 	__gss_unhash_msg(gss_msg);
770 	spin_unlock(&pipe->lock);
771 	gss_release_msg(gss_msg);
772 err_put_ctx:
773 	gss_put_ctx(ctx);
774 err:
775 	kfree(buf);
776 out:
777 	return err;
778 }
779 
gss_pipe_open(struct inode * inode,int new_version)780 static int gss_pipe_open(struct inode *inode, int new_version)
781 {
782 	struct net *net = inode->i_sb->s_fs_info;
783 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
784 	int ret = 0;
785 
786 	spin_lock(&pipe_version_lock);
787 	if (sn->pipe_version < 0) {
788 		/* First open of any gss pipe determines the version: */
789 		sn->pipe_version = new_version;
790 		rpc_wake_up(&pipe_version_rpc_waitqueue);
791 		wake_up(&pipe_version_waitqueue);
792 	} else if (sn->pipe_version != new_version) {
793 		/* Trying to open a pipe of a different version */
794 		ret = -EBUSY;
795 		goto out;
796 	}
797 	atomic_inc(&sn->pipe_users);
798 out:
799 	spin_unlock(&pipe_version_lock);
800 	return ret;
801 
802 }
803 
gss_pipe_open_v0(struct inode * inode)804 static int gss_pipe_open_v0(struct inode *inode)
805 {
806 	return gss_pipe_open(inode, 0);
807 }
808 
gss_pipe_open_v1(struct inode * inode)809 static int gss_pipe_open_v1(struct inode *inode)
810 {
811 	return gss_pipe_open(inode, 1);
812 }
813 
814 static void
gss_pipe_release(struct inode * inode)815 gss_pipe_release(struct inode *inode)
816 {
817 	struct net *net = inode->i_sb->s_fs_info;
818 	struct rpc_pipe *pipe = RPC_I(inode)->pipe;
819 	struct gss_upcall_msg *gss_msg;
820 
821 restart:
822 	spin_lock(&pipe->lock);
823 	list_for_each_entry(gss_msg, &pipe->in_downcall, list) {
824 
825 		if (!list_empty(&gss_msg->msg.list))
826 			continue;
827 		gss_msg->msg.errno = -EPIPE;
828 		refcount_inc(&gss_msg->count);
829 		__gss_unhash_msg(gss_msg);
830 		spin_unlock(&pipe->lock);
831 		gss_release_msg(gss_msg);
832 		goto restart;
833 	}
834 	spin_unlock(&pipe->lock);
835 
836 	put_pipe_version(net);
837 }
838 
839 static void
gss_pipe_destroy_msg(struct rpc_pipe_msg * msg)840 gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
841 {
842 	struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
843 
844 	if (msg->errno < 0) {
845 		refcount_inc(&gss_msg->count);
846 		gss_unhash_msg(gss_msg);
847 		if (msg->errno == -ETIMEDOUT)
848 			warn_gssd();
849 		gss_release_msg(gss_msg);
850 	}
851 	gss_release_msg(gss_msg);
852 }
853 
gss_pipe_dentry_destroy(struct dentry * dir,struct rpc_pipe_dir_object * pdo)854 static void gss_pipe_dentry_destroy(struct dentry *dir,
855 		struct rpc_pipe_dir_object *pdo)
856 {
857 	struct gss_pipe *gss_pipe = pdo->pdo_data;
858 	struct rpc_pipe *pipe = gss_pipe->pipe;
859 
860 	if (pipe->dentry != NULL) {
861 		rpc_unlink(pipe->dentry);
862 		pipe->dentry = NULL;
863 	}
864 }
865 
gss_pipe_dentry_create(struct dentry * dir,struct rpc_pipe_dir_object * pdo)866 static int gss_pipe_dentry_create(struct dentry *dir,
867 		struct rpc_pipe_dir_object *pdo)
868 {
869 	struct gss_pipe *p = pdo->pdo_data;
870 	struct dentry *dentry;
871 
872 	dentry = rpc_mkpipe_dentry(dir, p->name, p->clnt, p->pipe);
873 	if (IS_ERR(dentry))
874 		return PTR_ERR(dentry);
875 	p->pipe->dentry = dentry;
876 	return 0;
877 }
878 
879 static const struct rpc_pipe_dir_object_ops gss_pipe_dir_object_ops = {
880 	.create = gss_pipe_dentry_create,
881 	.destroy = gss_pipe_dentry_destroy,
882 };
883 
gss_pipe_alloc(struct rpc_clnt * clnt,const char * name,const struct rpc_pipe_ops * upcall_ops)884 static struct gss_pipe *gss_pipe_alloc(struct rpc_clnt *clnt,
885 		const char *name,
886 		const struct rpc_pipe_ops *upcall_ops)
887 {
888 	struct gss_pipe *p;
889 	int err = -ENOMEM;
890 
891 	p = kmalloc(sizeof(*p), GFP_KERNEL);
892 	if (p == NULL)
893 		goto err;
894 	p->pipe = rpc_mkpipe_data(upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
895 	if (IS_ERR(p->pipe)) {
896 		err = PTR_ERR(p->pipe);
897 		goto err_free_gss_pipe;
898 	}
899 	p->name = name;
900 	p->clnt = clnt;
901 	kref_init(&p->kref);
902 	rpc_init_pipe_dir_object(&p->pdo,
903 			&gss_pipe_dir_object_ops,
904 			p);
905 	return p;
906 err_free_gss_pipe:
907 	kfree(p);
908 err:
909 	return ERR_PTR(err);
910 }
911 
912 struct gss_alloc_pdo {
913 	struct rpc_clnt *clnt;
914 	const char *name;
915 	const struct rpc_pipe_ops *upcall_ops;
916 };
917 
gss_pipe_match_pdo(struct rpc_pipe_dir_object * pdo,void * data)918 static int gss_pipe_match_pdo(struct rpc_pipe_dir_object *pdo, void *data)
919 {
920 	struct gss_pipe *gss_pipe;
921 	struct gss_alloc_pdo *args = data;
922 
923 	if (pdo->pdo_ops != &gss_pipe_dir_object_ops)
924 		return 0;
925 	gss_pipe = container_of(pdo, struct gss_pipe, pdo);
926 	if (strcmp(gss_pipe->name, args->name) != 0)
927 		return 0;
928 	if (!kref_get_unless_zero(&gss_pipe->kref))
929 		return 0;
930 	return 1;
931 }
932 
gss_pipe_alloc_pdo(void * data)933 static struct rpc_pipe_dir_object *gss_pipe_alloc_pdo(void *data)
934 {
935 	struct gss_pipe *gss_pipe;
936 	struct gss_alloc_pdo *args = data;
937 
938 	gss_pipe = gss_pipe_alloc(args->clnt, args->name, args->upcall_ops);
939 	if (!IS_ERR(gss_pipe))
940 		return &gss_pipe->pdo;
941 	return NULL;
942 }
943 
gss_pipe_get(struct rpc_clnt * clnt,const char * name,const struct rpc_pipe_ops * upcall_ops)944 static struct gss_pipe *gss_pipe_get(struct rpc_clnt *clnt,
945 		const char *name,
946 		const struct rpc_pipe_ops *upcall_ops)
947 {
948 	struct net *net = rpc_net_ns(clnt);
949 	struct rpc_pipe_dir_object *pdo;
950 	struct gss_alloc_pdo args = {
951 		.clnt = clnt,
952 		.name = name,
953 		.upcall_ops = upcall_ops,
954 	};
955 
956 	pdo = rpc_find_or_alloc_pipe_dir_object(net,
957 			&clnt->cl_pipedir_objects,
958 			gss_pipe_match_pdo,
959 			gss_pipe_alloc_pdo,
960 			&args);
961 	if (pdo != NULL)
962 		return container_of(pdo, struct gss_pipe, pdo);
963 	return ERR_PTR(-ENOMEM);
964 }
965 
__gss_pipe_free(struct gss_pipe * p)966 static void __gss_pipe_free(struct gss_pipe *p)
967 {
968 	struct rpc_clnt *clnt = p->clnt;
969 	struct net *net = rpc_net_ns(clnt);
970 
971 	rpc_remove_pipe_dir_object(net,
972 			&clnt->cl_pipedir_objects,
973 			&p->pdo);
974 	rpc_destroy_pipe_data(p->pipe);
975 	kfree(p);
976 }
977 
__gss_pipe_release(struct kref * kref)978 static void __gss_pipe_release(struct kref *kref)
979 {
980 	struct gss_pipe *p = container_of(kref, struct gss_pipe, kref);
981 
982 	__gss_pipe_free(p);
983 }
984 
gss_pipe_free(struct gss_pipe * p)985 static void gss_pipe_free(struct gss_pipe *p)
986 {
987 	if (p != NULL)
988 		kref_put(&p->kref, __gss_pipe_release);
989 }
990 
991 /*
992  * NOTE: we have the opportunity to use different
993  * parameters based on the input flavor (which must be a pseudoflavor)
994  */
995 static struct gss_auth *
gss_create_new(const struct rpc_auth_create_args * args,struct rpc_clnt * clnt)996 gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
997 {
998 	rpc_authflavor_t flavor = args->pseudoflavor;
999 	struct gss_auth *gss_auth;
1000 	struct gss_pipe *gss_pipe;
1001 	struct rpc_auth * auth;
1002 	int err = -ENOMEM; /* XXX? */
1003 
1004 	if (!try_module_get(THIS_MODULE))
1005 		return ERR_PTR(err);
1006 	if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
1007 		goto out_dec;
1008 	INIT_HLIST_NODE(&gss_auth->hash);
1009 	gss_auth->target_name = NULL;
1010 	if (args->target_name) {
1011 		gss_auth->target_name = kstrdup(args->target_name, GFP_KERNEL);
1012 		if (gss_auth->target_name == NULL)
1013 			goto err_free;
1014 	}
1015 	gss_auth->client = clnt;
1016 	gss_auth->net = get_net(rpc_net_ns(clnt));
1017 	err = -EINVAL;
1018 	gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
1019 	if (!gss_auth->mech)
1020 		goto err_put_net;
1021 	gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
1022 	if (gss_auth->service == 0)
1023 		goto err_put_mech;
1024 	if (!gssd_running(gss_auth->net))
1025 		goto err_put_mech;
1026 	auth = &gss_auth->rpc_auth;
1027 	auth->au_cslack = GSS_CRED_SLACK >> 2;
1028 	auth->au_rslack = GSS_KRB5_MAX_SLACK_NEEDED >> 2;
1029 	auth->au_verfsize = GSS_VERF_SLACK >> 2;
1030 	auth->au_ralign = GSS_VERF_SLACK >> 2;
1031 	__set_bit(RPCAUTH_AUTH_UPDATE_SLACK, &auth->au_flags);
1032 	auth->au_ops = &authgss_ops;
1033 	auth->au_flavor = flavor;
1034 	if (gss_pseudoflavor_to_datatouch(gss_auth->mech, flavor))
1035 		__set_bit(RPCAUTH_AUTH_DATATOUCH, &auth->au_flags);
1036 	refcount_set(&auth->au_count, 1);
1037 	kref_init(&gss_auth->kref);
1038 
1039 	err = rpcauth_init_credcache(auth);
1040 	if (err)
1041 		goto err_put_mech;
1042 	/*
1043 	 * Note: if we created the old pipe first, then someone who
1044 	 * examined the directory at the right moment might conclude
1045 	 * that we supported only the old pipe.  So we instead create
1046 	 * the new pipe first.
1047 	 */
1048 	gss_pipe = gss_pipe_get(clnt, "gssd", &gss_upcall_ops_v1);
1049 	if (IS_ERR(gss_pipe)) {
1050 		err = PTR_ERR(gss_pipe);
1051 		goto err_destroy_credcache;
1052 	}
1053 	gss_auth->gss_pipe[1] = gss_pipe;
1054 
1055 	gss_pipe = gss_pipe_get(clnt, gss_auth->mech->gm_name,
1056 			&gss_upcall_ops_v0);
1057 	if (IS_ERR(gss_pipe)) {
1058 		err = PTR_ERR(gss_pipe);
1059 		goto err_destroy_pipe_1;
1060 	}
1061 	gss_auth->gss_pipe[0] = gss_pipe;
1062 
1063 	return gss_auth;
1064 err_destroy_pipe_1:
1065 	gss_pipe_free(gss_auth->gss_pipe[1]);
1066 err_destroy_credcache:
1067 	rpcauth_destroy_credcache(auth);
1068 err_put_mech:
1069 	gss_mech_put(gss_auth->mech);
1070 err_put_net:
1071 	put_net(gss_auth->net);
1072 err_free:
1073 	kfree(gss_auth->target_name);
1074 	kfree(gss_auth);
1075 out_dec:
1076 	module_put(THIS_MODULE);
1077 	trace_rpcgss_createauth(flavor, err);
1078 	return ERR_PTR(err);
1079 }
1080 
1081 static void
gss_free(struct gss_auth * gss_auth)1082 gss_free(struct gss_auth *gss_auth)
1083 {
1084 	gss_pipe_free(gss_auth->gss_pipe[0]);
1085 	gss_pipe_free(gss_auth->gss_pipe[1]);
1086 	gss_mech_put(gss_auth->mech);
1087 	put_net(gss_auth->net);
1088 	kfree(gss_auth->target_name);
1089 
1090 	kfree(gss_auth);
1091 	module_put(THIS_MODULE);
1092 }
1093 
1094 static void
gss_free_callback(struct kref * kref)1095 gss_free_callback(struct kref *kref)
1096 {
1097 	struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref);
1098 
1099 	gss_free(gss_auth);
1100 }
1101 
1102 static void
gss_put_auth(struct gss_auth * gss_auth)1103 gss_put_auth(struct gss_auth *gss_auth)
1104 {
1105 	kref_put(&gss_auth->kref, gss_free_callback);
1106 }
1107 
1108 static void
gss_destroy(struct rpc_auth * auth)1109 gss_destroy(struct rpc_auth *auth)
1110 {
1111 	struct gss_auth *gss_auth = container_of(auth,
1112 			struct gss_auth, rpc_auth);
1113 
1114 	if (hash_hashed(&gss_auth->hash)) {
1115 		spin_lock(&gss_auth_hash_lock);
1116 		hash_del(&gss_auth->hash);
1117 		spin_unlock(&gss_auth_hash_lock);
1118 	}
1119 
1120 	gss_pipe_free(gss_auth->gss_pipe[0]);
1121 	gss_auth->gss_pipe[0] = NULL;
1122 	gss_pipe_free(gss_auth->gss_pipe[1]);
1123 	gss_auth->gss_pipe[1] = NULL;
1124 	rpcauth_destroy_credcache(auth);
1125 
1126 	gss_put_auth(gss_auth);
1127 }
1128 
1129 /*
1130  * Auths may be shared between rpc clients that were cloned from a
1131  * common client with the same xprt, if they also share the flavor and
1132  * target_name.
1133  *
1134  * The auth is looked up from the oldest parent sharing the same
1135  * cl_xprt, and the auth itself references only that common parent
1136  * (which is guaranteed to last as long as any of its descendants).
1137  */
1138 static struct gss_auth *
gss_auth_find_or_add_hashed(const struct rpc_auth_create_args * args,struct rpc_clnt * clnt,struct gss_auth * new)1139 gss_auth_find_or_add_hashed(const struct rpc_auth_create_args *args,
1140 		struct rpc_clnt *clnt,
1141 		struct gss_auth *new)
1142 {
1143 	struct gss_auth *gss_auth;
1144 	unsigned long hashval = (unsigned long)clnt;
1145 
1146 	spin_lock(&gss_auth_hash_lock);
1147 	hash_for_each_possible(gss_auth_hash_table,
1148 			gss_auth,
1149 			hash,
1150 			hashval) {
1151 		if (gss_auth->client != clnt)
1152 			continue;
1153 		if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor)
1154 			continue;
1155 		if (gss_auth->target_name != args->target_name) {
1156 			if (gss_auth->target_name == NULL)
1157 				continue;
1158 			if (args->target_name == NULL)
1159 				continue;
1160 			if (strcmp(gss_auth->target_name, args->target_name))
1161 				continue;
1162 		}
1163 		if (!refcount_inc_not_zero(&gss_auth->rpc_auth.au_count))
1164 			continue;
1165 		goto out;
1166 	}
1167 	if (new)
1168 		hash_add(gss_auth_hash_table, &new->hash, hashval);
1169 	gss_auth = new;
1170 out:
1171 	spin_unlock(&gss_auth_hash_lock);
1172 	return gss_auth;
1173 }
1174 
1175 static struct gss_auth *
gss_create_hashed(const struct rpc_auth_create_args * args,struct rpc_clnt * clnt)1176 gss_create_hashed(const struct rpc_auth_create_args *args,
1177 		  struct rpc_clnt *clnt)
1178 {
1179 	struct gss_auth *gss_auth;
1180 	struct gss_auth *new;
1181 
1182 	gss_auth = gss_auth_find_or_add_hashed(args, clnt, NULL);
1183 	if (gss_auth != NULL)
1184 		goto out;
1185 	new = gss_create_new(args, clnt);
1186 	if (IS_ERR(new))
1187 		return new;
1188 	gss_auth = gss_auth_find_or_add_hashed(args, clnt, new);
1189 	if (gss_auth != new)
1190 		gss_destroy(&new->rpc_auth);
1191 out:
1192 	return gss_auth;
1193 }
1194 
1195 static struct rpc_auth *
gss_create(const struct rpc_auth_create_args * args,struct rpc_clnt * clnt)1196 gss_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
1197 {
1198 	struct gss_auth *gss_auth;
1199 	struct rpc_xprt_switch *xps = rcu_access_pointer(clnt->cl_xpi.xpi_xpswitch);
1200 
1201 	while (clnt != clnt->cl_parent) {
1202 		struct rpc_clnt *parent = clnt->cl_parent;
1203 		/* Find the original parent for this transport */
1204 		if (rcu_access_pointer(parent->cl_xpi.xpi_xpswitch) != xps)
1205 			break;
1206 		clnt = parent;
1207 	}
1208 
1209 	gss_auth = gss_create_hashed(args, clnt);
1210 	if (IS_ERR(gss_auth))
1211 		return ERR_CAST(gss_auth);
1212 	return &gss_auth->rpc_auth;
1213 }
1214 
1215 static struct gss_cred *
gss_dup_cred(struct gss_auth * gss_auth,struct gss_cred * gss_cred)1216 gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
1217 {
1218 	struct gss_cred *new;
1219 
1220 	/* Make a copy of the cred so that we can reference count it */
1221 	new = kzalloc(sizeof(*gss_cred), GFP_NOFS);
1222 	if (new) {
1223 		struct auth_cred acred = {
1224 			.cred = gss_cred->gc_base.cr_cred,
1225 		};
1226 		struct gss_cl_ctx *ctx =
1227 			rcu_dereference_protected(gss_cred->gc_ctx, 1);
1228 
1229 		rpcauth_init_cred(&new->gc_base, &acred,
1230 				&gss_auth->rpc_auth,
1231 				&gss_nullops);
1232 		new->gc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
1233 		new->gc_service = gss_cred->gc_service;
1234 		new->gc_principal = gss_cred->gc_principal;
1235 		kref_get(&gss_auth->kref);
1236 		rcu_assign_pointer(new->gc_ctx, ctx);
1237 		gss_get_ctx(ctx);
1238 	}
1239 	return new;
1240 }
1241 
1242 /*
1243  * gss_send_destroy_context will cause the RPCSEC_GSS to send a NULL RPC call
1244  * to the server with the GSS control procedure field set to
1245  * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
1246  * all RPCSEC_GSS state associated with that context.
1247  */
1248 static void
gss_send_destroy_context(struct rpc_cred * cred)1249 gss_send_destroy_context(struct rpc_cred *cred)
1250 {
1251 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1252 	struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
1253 	struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
1254 	struct gss_cred *new;
1255 	struct rpc_task *task;
1256 
1257 	new = gss_dup_cred(gss_auth, gss_cred);
1258 	if (new) {
1259 		ctx->gc_proc = RPC_GSS_PROC_DESTROY;
1260 
1261 		trace_rpcgss_ctx_destroy(gss_cred);
1262 		task = rpc_call_null(gss_auth->client, &new->gc_base,
1263 				     RPC_TASK_ASYNC);
1264 		if (!IS_ERR(task))
1265 			rpc_put_task(task);
1266 
1267 		put_rpccred(&new->gc_base);
1268 	}
1269 }
1270 
1271 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
1272  * to create a new cred or context, so they check that things have been
1273  * allocated before freeing them. */
1274 static void
gss_do_free_ctx(struct gss_cl_ctx * ctx)1275 gss_do_free_ctx(struct gss_cl_ctx *ctx)
1276 {
1277 	gss_delete_sec_context(&ctx->gc_gss_ctx);
1278 	kfree(ctx->gc_wire_ctx.data);
1279 	kfree(ctx->gc_acceptor.data);
1280 	kfree(ctx);
1281 }
1282 
1283 static void
gss_free_ctx_callback(struct rcu_head * head)1284 gss_free_ctx_callback(struct rcu_head *head)
1285 {
1286 	struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu);
1287 	gss_do_free_ctx(ctx);
1288 }
1289 
1290 static void
gss_free_ctx(struct gss_cl_ctx * ctx)1291 gss_free_ctx(struct gss_cl_ctx *ctx)
1292 {
1293 	call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
1294 }
1295 
1296 static void
gss_free_cred(struct gss_cred * gss_cred)1297 gss_free_cred(struct gss_cred *gss_cred)
1298 {
1299 	kfree(gss_cred);
1300 }
1301 
1302 static void
gss_free_cred_callback(struct rcu_head * head)1303 gss_free_cred_callback(struct rcu_head *head)
1304 {
1305 	struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu);
1306 	gss_free_cred(gss_cred);
1307 }
1308 
1309 static void
gss_destroy_nullcred(struct rpc_cred * cred)1310 gss_destroy_nullcred(struct rpc_cred *cred)
1311 {
1312 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1313 	struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
1314 	struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
1315 
1316 	RCU_INIT_POINTER(gss_cred->gc_ctx, NULL);
1317 	put_cred(cred->cr_cred);
1318 	call_rcu(&cred->cr_rcu, gss_free_cred_callback);
1319 	if (ctx)
1320 		gss_put_ctx(ctx);
1321 	gss_put_auth(gss_auth);
1322 }
1323 
1324 static void
gss_destroy_cred(struct rpc_cred * cred)1325 gss_destroy_cred(struct rpc_cred *cred)
1326 {
1327 	if (test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
1328 		gss_send_destroy_context(cred);
1329 	gss_destroy_nullcred(cred);
1330 }
1331 
1332 static int
gss_hash_cred(struct auth_cred * acred,unsigned int hashbits)1333 gss_hash_cred(struct auth_cred *acred, unsigned int hashbits)
1334 {
1335 	return hash_64(from_kuid(&init_user_ns, acred->cred->fsuid), hashbits);
1336 }
1337 
1338 /*
1339  * Lookup RPCSEC_GSS cred for the current process
1340  */
1341 static struct rpc_cred *
gss_lookup_cred(struct rpc_auth * auth,struct auth_cred * acred,int flags)1342 gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
1343 {
1344 	return rpcauth_lookup_credcache(auth, acred, flags, GFP_NOFS);
1345 }
1346 
1347 static struct rpc_cred *
gss_create_cred(struct rpc_auth * auth,struct auth_cred * acred,int flags,gfp_t gfp)1348 gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
1349 {
1350 	struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
1351 	struct gss_cred	*cred = NULL;
1352 	int err = -ENOMEM;
1353 
1354 	if (!(cred = kzalloc(sizeof(*cred), gfp)))
1355 		goto out_err;
1356 
1357 	rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops);
1358 	/*
1359 	 * Note: in order to force a call to call_refresh(), we deliberately
1360 	 * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
1361 	 */
1362 	cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
1363 	cred->gc_service = gss_auth->service;
1364 	cred->gc_principal = acred->principal;
1365 	kref_get(&gss_auth->kref);
1366 	return &cred->gc_base;
1367 
1368 out_err:
1369 	return ERR_PTR(err);
1370 }
1371 
1372 static int
gss_cred_init(struct rpc_auth * auth,struct rpc_cred * cred)1373 gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
1374 {
1375 	struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
1376 	struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
1377 	int err;
1378 
1379 	do {
1380 		err = gss_create_upcall(gss_auth, gss_cred);
1381 	} while (err == -EAGAIN);
1382 	return err;
1383 }
1384 
1385 static char *
gss_stringify_acceptor(struct rpc_cred * cred)1386 gss_stringify_acceptor(struct rpc_cred *cred)
1387 {
1388 	char *string = NULL;
1389 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1390 	struct gss_cl_ctx *ctx;
1391 	unsigned int len;
1392 	struct xdr_netobj *acceptor;
1393 
1394 	rcu_read_lock();
1395 	ctx = rcu_dereference(gss_cred->gc_ctx);
1396 	if (!ctx)
1397 		goto out;
1398 
1399 	len = ctx->gc_acceptor.len;
1400 	rcu_read_unlock();
1401 
1402 	/* no point if there's no string */
1403 	if (!len)
1404 		return NULL;
1405 realloc:
1406 	string = kmalloc(len + 1, GFP_KERNEL);
1407 	if (!string)
1408 		return NULL;
1409 
1410 	rcu_read_lock();
1411 	ctx = rcu_dereference(gss_cred->gc_ctx);
1412 
1413 	/* did the ctx disappear or was it replaced by one with no acceptor? */
1414 	if (!ctx || !ctx->gc_acceptor.len) {
1415 		kfree(string);
1416 		string = NULL;
1417 		goto out;
1418 	}
1419 
1420 	acceptor = &ctx->gc_acceptor;
1421 
1422 	/*
1423 	 * Did we find a new acceptor that's longer than the original? Allocate
1424 	 * a longer buffer and try again.
1425 	 */
1426 	if (len < acceptor->len) {
1427 		len = acceptor->len;
1428 		rcu_read_unlock();
1429 		kfree(string);
1430 		goto realloc;
1431 	}
1432 
1433 	memcpy(string, acceptor->data, acceptor->len);
1434 	string[acceptor->len] = '\0';
1435 out:
1436 	rcu_read_unlock();
1437 	return string;
1438 }
1439 
1440 /*
1441  * Returns -EACCES if GSS context is NULL or will expire within the
1442  * timeout (miliseconds)
1443  */
1444 static int
gss_key_timeout(struct rpc_cred * rc)1445 gss_key_timeout(struct rpc_cred *rc)
1446 {
1447 	struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
1448 	struct gss_cl_ctx *ctx;
1449 	unsigned long timeout = jiffies + (gss_key_expire_timeo * HZ);
1450 	int ret = 0;
1451 
1452 	rcu_read_lock();
1453 	ctx = rcu_dereference(gss_cred->gc_ctx);
1454 	if (!ctx || time_after(timeout, ctx->gc_expiry))
1455 		ret = -EACCES;
1456 	rcu_read_unlock();
1457 
1458 	return ret;
1459 }
1460 
1461 static int
gss_match(struct auth_cred * acred,struct rpc_cred * rc,int flags)1462 gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
1463 {
1464 	struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
1465 	struct gss_cl_ctx *ctx;
1466 	int ret;
1467 
1468 	if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
1469 		goto out;
1470 	/* Don't match with creds that have expired. */
1471 	rcu_read_lock();
1472 	ctx = rcu_dereference(gss_cred->gc_ctx);
1473 	if (!ctx || time_after(jiffies, ctx->gc_expiry)) {
1474 		rcu_read_unlock();
1475 		return 0;
1476 	}
1477 	rcu_read_unlock();
1478 	if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags))
1479 		return 0;
1480 out:
1481 	if (acred->principal != NULL) {
1482 		if (gss_cred->gc_principal == NULL)
1483 			return 0;
1484 		ret = strcmp(acred->principal, gss_cred->gc_principal) == 0;
1485 	} else {
1486 		if (gss_cred->gc_principal != NULL)
1487 			return 0;
1488 		ret = uid_eq(rc->cr_cred->fsuid, acred->cred->fsuid);
1489 	}
1490 	return ret;
1491 }
1492 
1493 /*
1494  * Marshal credentials.
1495  *
1496  * The expensive part is computing the verifier. We can't cache a
1497  * pre-computed version of the verifier because the seqno, which
1498  * is different every time, is included in the MIC.
1499  */
gss_marshal(struct rpc_task * task,struct xdr_stream * xdr)1500 static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr)
1501 {
1502 	struct rpc_rqst *req = task->tk_rqstp;
1503 	struct rpc_cred *cred = req->rq_cred;
1504 	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
1505 						 gc_base);
1506 	struct gss_cl_ctx	*ctx = gss_cred_get_ctx(cred);
1507 	__be32		*p, *cred_len;
1508 	u32             maj_stat = 0;
1509 	struct xdr_netobj mic;
1510 	struct kvec	iov;
1511 	struct xdr_buf	verf_buf;
1512 	int status;
1513 
1514 	/* Credential */
1515 
1516 	p = xdr_reserve_space(xdr, 7 * sizeof(*p) +
1517 			      ctx->gc_wire_ctx.len);
1518 	if (!p)
1519 		goto marshal_failed;
1520 	*p++ = rpc_auth_gss;
1521 	cred_len = p++;
1522 
1523 	spin_lock(&ctx->gc_seq_lock);
1524 	req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
1525 	spin_unlock(&ctx->gc_seq_lock);
1526 	if (req->rq_seqno == MAXSEQ)
1527 		goto expired;
1528 	trace_rpcgss_seqno(task);
1529 
1530 	*p++ = cpu_to_be32(RPC_GSS_VERSION);
1531 	*p++ = cpu_to_be32(ctx->gc_proc);
1532 	*p++ = cpu_to_be32(req->rq_seqno);
1533 	*p++ = cpu_to_be32(gss_cred->gc_service);
1534 	p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
1535 	*cred_len = cpu_to_be32((p - (cred_len + 1)) << 2);
1536 
1537 	/* Verifier */
1538 
1539 	/* We compute the checksum for the verifier over the xdr-encoded bytes
1540 	 * starting with the xid and ending at the end of the credential: */
1541 	iov.iov_base = req->rq_snd_buf.head[0].iov_base;
1542 	iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
1543 	xdr_buf_from_iov(&iov, &verf_buf);
1544 
1545 	p = xdr_reserve_space(xdr, sizeof(*p));
1546 	if (!p)
1547 		goto marshal_failed;
1548 	*p++ = rpc_auth_gss;
1549 	mic.data = (u8 *)(p + 1);
1550 	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1551 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1552 		goto expired;
1553 	else if (maj_stat != 0)
1554 		goto bad_mic;
1555 	if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
1556 		goto marshal_failed;
1557 	status = 0;
1558 out:
1559 	gss_put_ctx(ctx);
1560 	return status;
1561 expired:
1562 	clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1563 	status = -EKEYEXPIRED;
1564 	goto out;
1565 marshal_failed:
1566 	status = -EMSGSIZE;
1567 	goto out;
1568 bad_mic:
1569 	trace_rpcgss_get_mic(task, maj_stat);
1570 	status = -EIO;
1571 	goto out;
1572 }
1573 
gss_renew_cred(struct rpc_task * task)1574 static int gss_renew_cred(struct rpc_task *task)
1575 {
1576 	struct rpc_cred *oldcred = task->tk_rqstp->rq_cred;
1577 	struct gss_cred *gss_cred = container_of(oldcred,
1578 						 struct gss_cred,
1579 						 gc_base);
1580 	struct rpc_auth *auth = oldcred->cr_auth;
1581 	struct auth_cred acred = {
1582 		.cred = oldcred->cr_cred,
1583 		.principal = gss_cred->gc_principal,
1584 	};
1585 	struct rpc_cred *new;
1586 
1587 	new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
1588 	if (IS_ERR(new))
1589 		return PTR_ERR(new);
1590 
1591 	task->tk_rqstp->rq_cred = new;
1592 	put_rpccred(oldcred);
1593 	return 0;
1594 }
1595 
gss_cred_is_negative_entry(struct rpc_cred * cred)1596 static int gss_cred_is_negative_entry(struct rpc_cred *cred)
1597 {
1598 	if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) {
1599 		unsigned long now = jiffies;
1600 		unsigned long begin, expire;
1601 		struct gss_cred *gss_cred;
1602 
1603 		gss_cred = container_of(cred, struct gss_cred, gc_base);
1604 		begin = gss_cred->gc_upcall_timestamp;
1605 		expire = begin + gss_expired_cred_retry_delay * HZ;
1606 
1607 		if (time_in_range_open(now, begin, expire))
1608 			return 1;
1609 	}
1610 	return 0;
1611 }
1612 
1613 /*
1614 * Refresh credentials. XXX - finish
1615 */
1616 static int
gss_refresh(struct rpc_task * task)1617 gss_refresh(struct rpc_task *task)
1618 {
1619 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1620 	int ret = 0;
1621 
1622 	if (gss_cred_is_negative_entry(cred))
1623 		return -EKEYEXPIRED;
1624 
1625 	if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
1626 			!test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
1627 		ret = gss_renew_cred(task);
1628 		if (ret < 0)
1629 			goto out;
1630 		cred = task->tk_rqstp->rq_cred;
1631 	}
1632 
1633 	if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
1634 		ret = gss_refresh_upcall(task);
1635 out:
1636 	return ret;
1637 }
1638 
1639 /* Dummy refresh routine: used only when destroying the context */
1640 static int
gss_refresh_null(struct rpc_task * task)1641 gss_refresh_null(struct rpc_task *task)
1642 {
1643 	return 0;
1644 }
1645 
1646 static int
gss_validate(struct rpc_task * task,struct xdr_stream * xdr)1647 gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
1648 {
1649 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1650 	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1651 	__be32		*p, *seq = NULL;
1652 	struct kvec	iov;
1653 	struct xdr_buf	verf_buf;
1654 	struct xdr_netobj mic;
1655 	u32		len, maj_stat;
1656 	int		status;
1657 
1658 	p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1659 	if (!p)
1660 		goto validate_failed;
1661 	if (*p++ != rpc_auth_gss)
1662 		goto validate_failed;
1663 	len = be32_to_cpup(p);
1664 	if (len > RPC_MAX_AUTH_SIZE)
1665 		goto validate_failed;
1666 	p = xdr_inline_decode(xdr, len);
1667 	if (!p)
1668 		goto validate_failed;
1669 
1670 	seq = kmalloc(4, GFP_NOFS);
1671 	if (!seq)
1672 		goto validate_failed;
1673 	*seq = cpu_to_be32(task->tk_rqstp->rq_seqno);
1674 	iov.iov_base = seq;
1675 	iov.iov_len = 4;
1676 	xdr_buf_from_iov(&iov, &verf_buf);
1677 	mic.data = (u8 *)p;
1678 	mic.len = len;
1679 	maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1680 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1681 		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1682 	if (maj_stat)
1683 		goto bad_mic;
1684 
1685 	/* We leave it to unwrap to calculate au_rslack. For now we just
1686 	 * calculate the length of the verifier: */
1687 	if (test_bit(RPCAUTH_AUTH_UPDATE_SLACK, &cred->cr_auth->au_flags))
1688 		cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
1689 	status = 0;
1690 out:
1691 	gss_put_ctx(ctx);
1692 	kfree(seq);
1693 	return status;
1694 
1695 validate_failed:
1696 	status = -EIO;
1697 	goto out;
1698 bad_mic:
1699 	trace_rpcgss_verify_mic(task, maj_stat);
1700 	status = -EACCES;
1701 	goto out;
1702 }
1703 
1704 static noinline_for_stack int
gss_wrap_req_integ(struct rpc_cred * cred,struct gss_cl_ctx * ctx,struct rpc_task * task,struct xdr_stream * xdr)1705 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1706 		   struct rpc_task *task, struct xdr_stream *xdr)
1707 {
1708 	struct rpc_rqst *rqstp = task->tk_rqstp;
1709 	struct xdr_buf integ_buf, *snd_buf = &rqstp->rq_snd_buf;
1710 	struct xdr_netobj mic;
1711 	__be32 *p, *integ_len;
1712 	u32 offset, maj_stat;
1713 
1714 	p = xdr_reserve_space(xdr, 2 * sizeof(*p));
1715 	if (!p)
1716 		goto wrap_failed;
1717 	integ_len = p++;
1718 	*p = cpu_to_be32(rqstp->rq_seqno);
1719 
1720 	if (rpcauth_wrap_req_encode(task, xdr))
1721 		goto wrap_failed;
1722 
1723 	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1724 	if (xdr_buf_subsegment(snd_buf, &integ_buf,
1725 				offset, snd_buf->len - offset))
1726 		goto wrap_failed;
1727 	*integ_len = cpu_to_be32(integ_buf.len);
1728 
1729 	p = xdr_reserve_space(xdr, 0);
1730 	if (!p)
1731 		goto wrap_failed;
1732 	mic.data = (u8 *)(p + 1);
1733 	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1734 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1735 		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1736 	else if (maj_stat)
1737 		goto bad_mic;
1738 	/* Check that the trailing MIC fit in the buffer, after the fact */
1739 	if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
1740 		goto wrap_failed;
1741 	return 0;
1742 wrap_failed:
1743 	return -EMSGSIZE;
1744 bad_mic:
1745 	trace_rpcgss_get_mic(task, maj_stat);
1746 	return -EIO;
1747 }
1748 
1749 static void
priv_release_snd_buf(struct rpc_rqst * rqstp)1750 priv_release_snd_buf(struct rpc_rqst *rqstp)
1751 {
1752 	int i;
1753 
1754 	for (i=0; i < rqstp->rq_enc_pages_num; i++)
1755 		__free_page(rqstp->rq_enc_pages[i]);
1756 	kfree(rqstp->rq_enc_pages);
1757 	rqstp->rq_release_snd_buf = NULL;
1758 }
1759 
1760 static int
alloc_enc_pages(struct rpc_rqst * rqstp)1761 alloc_enc_pages(struct rpc_rqst *rqstp)
1762 {
1763 	struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1764 	int first, last, i;
1765 
1766 	if (rqstp->rq_release_snd_buf)
1767 		rqstp->rq_release_snd_buf(rqstp);
1768 
1769 	if (snd_buf->page_len == 0) {
1770 		rqstp->rq_enc_pages_num = 0;
1771 		return 0;
1772 	}
1773 
1774 	first = snd_buf->page_base >> PAGE_SHIFT;
1775 	last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT;
1776 	rqstp->rq_enc_pages_num = last - first + 1 + 1;
1777 	rqstp->rq_enc_pages
1778 		= kmalloc_array(rqstp->rq_enc_pages_num,
1779 				sizeof(struct page *),
1780 				GFP_NOFS);
1781 	if (!rqstp->rq_enc_pages)
1782 		goto out;
1783 	for (i=0; i < rqstp->rq_enc_pages_num; i++) {
1784 		rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
1785 		if (rqstp->rq_enc_pages[i] == NULL)
1786 			goto out_free;
1787 	}
1788 	rqstp->rq_release_snd_buf = priv_release_snd_buf;
1789 	return 0;
1790 out_free:
1791 	rqstp->rq_enc_pages_num = i;
1792 	priv_release_snd_buf(rqstp);
1793 out:
1794 	return -EAGAIN;
1795 }
1796 
1797 static noinline_for_stack int
gss_wrap_req_priv(struct rpc_cred * cred,struct gss_cl_ctx * ctx,struct rpc_task * task,struct xdr_stream * xdr)1798 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1799 		  struct rpc_task *task, struct xdr_stream *xdr)
1800 {
1801 	struct rpc_rqst *rqstp = task->tk_rqstp;
1802 	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
1803 	u32		pad, offset, maj_stat;
1804 	int		status;
1805 	__be32		*p, *opaque_len;
1806 	struct page	**inpages;
1807 	int		first;
1808 	struct kvec	*iov;
1809 
1810 	status = -EIO;
1811 	p = xdr_reserve_space(xdr, 2 * sizeof(*p));
1812 	if (!p)
1813 		goto wrap_failed;
1814 	opaque_len = p++;
1815 	*p = cpu_to_be32(rqstp->rq_seqno);
1816 
1817 	if (rpcauth_wrap_req_encode(task, xdr))
1818 		goto wrap_failed;
1819 
1820 	status = alloc_enc_pages(rqstp);
1821 	if (unlikely(status))
1822 		goto wrap_failed;
1823 	first = snd_buf->page_base >> PAGE_SHIFT;
1824 	inpages = snd_buf->pages + first;
1825 	snd_buf->pages = rqstp->rq_enc_pages;
1826 	snd_buf->page_base -= first << PAGE_SHIFT;
1827 	/*
1828 	 * Move the tail into its own page, in case gss_wrap needs
1829 	 * more space in the head when wrapping.
1830 	 *
1831 	 * Still... Why can't gss_wrap just slide the tail down?
1832 	 */
1833 	if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
1834 		char *tmp;
1835 
1836 		tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
1837 		memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
1838 		snd_buf->tail[0].iov_base = tmp;
1839 	}
1840 	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1841 	maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1842 	/* slack space should prevent this ever happening: */
1843 	if (unlikely(snd_buf->len > snd_buf->buflen))
1844 		goto wrap_failed;
1845 	/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1846 	 * done anyway, so it's safe to put the request on the wire: */
1847 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1848 		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1849 	else if (maj_stat)
1850 		goto bad_wrap;
1851 
1852 	*opaque_len = cpu_to_be32(snd_buf->len - offset);
1853 	/* guess whether the pad goes into the head or the tail: */
1854 	if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1855 		iov = snd_buf->tail;
1856 	else
1857 		iov = snd_buf->head;
1858 	p = iov->iov_base + iov->iov_len;
1859 	pad = xdr_pad_size(snd_buf->len - offset);
1860 	memset(p, 0, pad);
1861 	iov->iov_len += pad;
1862 	snd_buf->len += pad;
1863 
1864 	return 0;
1865 wrap_failed:
1866 	return status;
1867 bad_wrap:
1868 	trace_rpcgss_wrap(task, maj_stat);
1869 	return -EIO;
1870 }
1871 
gss_wrap_req(struct rpc_task * task,struct xdr_stream * xdr)1872 static int gss_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
1873 {
1874 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1875 	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
1876 			gc_base);
1877 	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1878 	int status;
1879 
1880 	status = -EIO;
1881 	if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
1882 		/* The spec seems a little ambiguous here, but I think that not
1883 		 * wrapping context destruction requests makes the most sense.
1884 		 */
1885 		status = rpcauth_wrap_req_encode(task, xdr);
1886 		goto out;
1887 	}
1888 	switch (gss_cred->gc_service) {
1889 	case RPC_GSS_SVC_NONE:
1890 		status = rpcauth_wrap_req_encode(task, xdr);
1891 		break;
1892 	case RPC_GSS_SVC_INTEGRITY:
1893 		status = gss_wrap_req_integ(cred, ctx, task, xdr);
1894 		break;
1895 	case RPC_GSS_SVC_PRIVACY:
1896 		status = gss_wrap_req_priv(cred, ctx, task, xdr);
1897 		break;
1898 	default:
1899 		status = -EIO;
1900 	}
1901 out:
1902 	gss_put_ctx(ctx);
1903 	return status;
1904 }
1905 
1906 /**
1907  * gss_update_rslack - Possibly update RPC receive buffer size estimates
1908  * @task: rpc_task for incoming RPC Reply being unwrapped
1909  * @cred: controlling rpc_cred for @task
1910  * @before: XDR words needed before each RPC Reply message
1911  * @after: XDR words needed following each RPC Reply message
1912  *
1913  */
gss_update_rslack(struct rpc_task * task,struct rpc_cred * cred,unsigned int before,unsigned int after)1914 static void gss_update_rslack(struct rpc_task *task, struct rpc_cred *cred,
1915 			      unsigned int before, unsigned int after)
1916 {
1917 	struct rpc_auth *auth = cred->cr_auth;
1918 
1919 	if (test_and_clear_bit(RPCAUTH_AUTH_UPDATE_SLACK, &auth->au_flags)) {
1920 		auth->au_ralign = auth->au_verfsize + before;
1921 		auth->au_rslack = auth->au_verfsize + after;
1922 		trace_rpcgss_update_slack(task, auth);
1923 	}
1924 }
1925 
1926 static int
gss_unwrap_resp_auth(struct rpc_task * task,struct rpc_cred * cred)1927 gss_unwrap_resp_auth(struct rpc_task *task, struct rpc_cred *cred)
1928 {
1929 	gss_update_rslack(task, cred, 0, 0);
1930 	return 0;
1931 }
1932 
1933 /*
1934  * RFC 2203, Section 5.3.2.2
1935  *
1936  *	struct rpc_gss_integ_data {
1937  *		opaque databody_integ<>;
1938  *		opaque checksum<>;
1939  *	};
1940  *
1941  *	struct rpc_gss_data_t {
1942  *		unsigned int seq_num;
1943  *		proc_req_arg_t arg;
1944  *	};
1945  */
1946 static noinline_for_stack int
gss_unwrap_resp_integ(struct rpc_task * task,struct rpc_cred * cred,struct gss_cl_ctx * ctx,struct rpc_rqst * rqstp,struct xdr_stream * xdr)1947 gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
1948 		      struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
1949 		      struct xdr_stream *xdr)
1950 {
1951 	struct xdr_buf gss_data, *rcv_buf = &rqstp->rq_rcv_buf;
1952 	u32 len, offset, seqno, maj_stat;
1953 	struct xdr_netobj mic;
1954 	int ret;
1955 
1956 	ret = -EIO;
1957 	mic.data = NULL;
1958 
1959 	/* opaque databody_integ<>; */
1960 	if (xdr_stream_decode_u32(xdr, &len))
1961 		goto unwrap_failed;
1962 	if (len & 3)
1963 		goto unwrap_failed;
1964 	offset = rcv_buf->len - xdr_stream_remaining(xdr);
1965 	if (xdr_stream_decode_u32(xdr, &seqno))
1966 		goto unwrap_failed;
1967 	if (seqno != rqstp->rq_seqno)
1968 		goto bad_seqno;
1969 	if (xdr_buf_subsegment(rcv_buf, &gss_data, offset, len))
1970 		goto unwrap_failed;
1971 
1972 	/*
1973 	 * The xdr_stream now points to the beginning of the
1974 	 * upper layer payload, to be passed below to
1975 	 * rpcauth_unwrap_resp_decode(). The checksum, which
1976 	 * follows the upper layer payload in @rcv_buf, is
1977 	 * located and parsed without updating the xdr_stream.
1978 	 */
1979 
1980 	/* opaque checksum<>; */
1981 	offset += len;
1982 	if (xdr_decode_word(rcv_buf, offset, &len))
1983 		goto unwrap_failed;
1984 	offset += sizeof(__be32);
1985 	if (offset + len > rcv_buf->len)
1986 		goto unwrap_failed;
1987 	mic.len = len;
1988 	mic.data = kmalloc(len, GFP_NOFS);
1989 	if (!mic.data)
1990 		goto unwrap_failed;
1991 	if (read_bytes_from_xdr_buf(rcv_buf, offset, mic.data, mic.len))
1992 		goto unwrap_failed;
1993 
1994 	maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &gss_data, &mic);
1995 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1996 		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1997 	if (maj_stat != GSS_S_COMPLETE)
1998 		goto bad_mic;
1999 
2000 	gss_update_rslack(task, cred, 2, 2 + 1 + XDR_QUADLEN(mic.len));
2001 	ret = 0;
2002 
2003 out:
2004 	kfree(mic.data);
2005 	return ret;
2006 
2007 unwrap_failed:
2008 	trace_rpcgss_unwrap_failed(task);
2009 	goto out;
2010 bad_seqno:
2011 	trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, seqno);
2012 	goto out;
2013 bad_mic:
2014 	trace_rpcgss_verify_mic(task, maj_stat);
2015 	goto out;
2016 }
2017 
2018 static noinline_for_stack int
gss_unwrap_resp_priv(struct rpc_task * task,struct rpc_cred * cred,struct gss_cl_ctx * ctx,struct rpc_rqst * rqstp,struct xdr_stream * xdr)2019 gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
2020 		     struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
2021 		     struct xdr_stream *xdr)
2022 {
2023 	struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
2024 	struct kvec *head = rqstp->rq_rcv_buf.head;
2025 	u32 offset, opaque_len, maj_stat;
2026 	__be32 *p;
2027 
2028 	p = xdr_inline_decode(xdr, 2 * sizeof(*p));
2029 	if (unlikely(!p))
2030 		goto unwrap_failed;
2031 	opaque_len = be32_to_cpup(p++);
2032 	offset = (u8 *)(p) - (u8 *)head->iov_base;
2033 	if (offset + opaque_len > rcv_buf->len)
2034 		goto unwrap_failed;
2035 
2036 	maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset,
2037 			      offset + opaque_len, rcv_buf);
2038 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
2039 		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
2040 	if (maj_stat != GSS_S_COMPLETE)
2041 		goto bad_unwrap;
2042 	/* gss_unwrap decrypted the sequence number */
2043 	if (be32_to_cpup(p++) != rqstp->rq_seqno)
2044 		goto bad_seqno;
2045 
2046 	/* gss_unwrap redacts the opaque blob from the head iovec.
2047 	 * rcv_buf has changed, thus the stream needs to be reset.
2048 	 */
2049 	xdr_init_decode(xdr, rcv_buf, p, rqstp);
2050 
2051 	gss_update_rslack(task, cred, 2 + ctx->gc_gss_ctx->align,
2052 			  2 + ctx->gc_gss_ctx->slack);
2053 
2054 	return 0;
2055 unwrap_failed:
2056 	trace_rpcgss_unwrap_failed(task);
2057 	return -EIO;
2058 bad_seqno:
2059 	trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(--p));
2060 	return -EIO;
2061 bad_unwrap:
2062 	trace_rpcgss_unwrap(task, maj_stat);
2063 	return -EIO;
2064 }
2065 
2066 static bool
gss_seq_is_newer(u32 new,u32 old)2067 gss_seq_is_newer(u32 new, u32 old)
2068 {
2069 	return (s32)(new - old) > 0;
2070 }
2071 
2072 static bool
gss_xmit_need_reencode(struct rpc_task * task)2073 gss_xmit_need_reencode(struct rpc_task *task)
2074 {
2075 	struct rpc_rqst *req = task->tk_rqstp;
2076 	struct rpc_cred *cred = req->rq_cred;
2077 	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
2078 	u32 win, seq_xmit = 0;
2079 	bool ret = true;
2080 
2081 	if (!ctx)
2082 		goto out;
2083 
2084 	if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq)))
2085 		goto out_ctx;
2086 
2087 	seq_xmit = READ_ONCE(ctx->gc_seq_xmit);
2088 	while (gss_seq_is_newer(req->rq_seqno, seq_xmit)) {
2089 		u32 tmp = seq_xmit;
2090 
2091 		seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno);
2092 		if (seq_xmit == tmp) {
2093 			ret = false;
2094 			goto out_ctx;
2095 		}
2096 	}
2097 
2098 	win = ctx->gc_win;
2099 	if (win > 0)
2100 		ret = !gss_seq_is_newer(req->rq_seqno, seq_xmit - win);
2101 
2102 out_ctx:
2103 	gss_put_ctx(ctx);
2104 out:
2105 	trace_rpcgss_need_reencode(task, seq_xmit, ret);
2106 	return ret;
2107 }
2108 
2109 static int
gss_unwrap_resp(struct rpc_task * task,struct xdr_stream * xdr)2110 gss_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr)
2111 {
2112 	struct rpc_rqst *rqstp = task->tk_rqstp;
2113 	struct rpc_cred *cred = rqstp->rq_cred;
2114 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
2115 			gc_base);
2116 	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
2117 	int status = -EIO;
2118 
2119 	if (ctx->gc_proc != RPC_GSS_PROC_DATA)
2120 		goto out_decode;
2121 	switch (gss_cred->gc_service) {
2122 	case RPC_GSS_SVC_NONE:
2123 		status = gss_unwrap_resp_auth(task, cred);
2124 		break;
2125 	case RPC_GSS_SVC_INTEGRITY:
2126 		status = gss_unwrap_resp_integ(task, cred, ctx, rqstp, xdr);
2127 		break;
2128 	case RPC_GSS_SVC_PRIVACY:
2129 		status = gss_unwrap_resp_priv(task, cred, ctx, rqstp, xdr);
2130 		break;
2131 	}
2132 	if (status)
2133 		goto out;
2134 
2135 out_decode:
2136 	status = rpcauth_unwrap_resp_decode(task, xdr);
2137 out:
2138 	gss_put_ctx(ctx);
2139 	return status;
2140 }
2141 
2142 static const struct rpc_authops authgss_ops = {
2143 	.owner		= THIS_MODULE,
2144 	.au_flavor	= RPC_AUTH_GSS,
2145 	.au_name	= "RPCSEC_GSS",
2146 	.create		= gss_create,
2147 	.destroy	= gss_destroy,
2148 	.hash_cred	= gss_hash_cred,
2149 	.lookup_cred	= gss_lookup_cred,
2150 	.crcreate	= gss_create_cred,
2151 	.info2flavor	= gss_mech_info2flavor,
2152 	.flavor2info	= gss_mech_flavor2info,
2153 };
2154 
2155 static const struct rpc_credops gss_credops = {
2156 	.cr_name		= "AUTH_GSS",
2157 	.crdestroy		= gss_destroy_cred,
2158 	.cr_init		= gss_cred_init,
2159 	.crmatch		= gss_match,
2160 	.crmarshal		= gss_marshal,
2161 	.crrefresh		= gss_refresh,
2162 	.crvalidate		= gss_validate,
2163 	.crwrap_req		= gss_wrap_req,
2164 	.crunwrap_resp		= gss_unwrap_resp,
2165 	.crkey_timeout		= gss_key_timeout,
2166 	.crstringify_acceptor	= gss_stringify_acceptor,
2167 	.crneed_reencode	= gss_xmit_need_reencode,
2168 };
2169 
2170 static const struct rpc_credops gss_nullops = {
2171 	.cr_name		= "AUTH_GSS",
2172 	.crdestroy		= gss_destroy_nullcred,
2173 	.crmatch		= gss_match,
2174 	.crmarshal		= gss_marshal,
2175 	.crrefresh		= gss_refresh_null,
2176 	.crvalidate		= gss_validate,
2177 	.crwrap_req		= gss_wrap_req,
2178 	.crunwrap_resp		= gss_unwrap_resp,
2179 	.crstringify_acceptor	= gss_stringify_acceptor,
2180 };
2181 
2182 static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
2183 	.upcall		= gss_v0_upcall,
2184 	.downcall	= gss_pipe_downcall,
2185 	.destroy_msg	= gss_pipe_destroy_msg,
2186 	.open_pipe	= gss_pipe_open_v0,
2187 	.release_pipe	= gss_pipe_release,
2188 };
2189 
2190 static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
2191 	.upcall		= gss_v1_upcall,
2192 	.downcall	= gss_pipe_downcall,
2193 	.destroy_msg	= gss_pipe_destroy_msg,
2194 	.open_pipe	= gss_pipe_open_v1,
2195 	.release_pipe	= gss_pipe_release,
2196 };
2197 
rpcsec_gss_init_net(struct net * net)2198 static __net_init int rpcsec_gss_init_net(struct net *net)
2199 {
2200 	return gss_svc_init_net(net);
2201 }
2202 
rpcsec_gss_exit_net(struct net * net)2203 static __net_exit void rpcsec_gss_exit_net(struct net *net)
2204 {
2205 	gss_svc_shutdown_net(net);
2206 }
2207 
2208 static struct pernet_operations rpcsec_gss_net_ops = {
2209 	.init = rpcsec_gss_init_net,
2210 	.exit = rpcsec_gss_exit_net,
2211 };
2212 
2213 /*
2214  * Initialize RPCSEC_GSS module
2215  */
init_rpcsec_gss(void)2216 static int __init init_rpcsec_gss(void)
2217 {
2218 	int err = 0;
2219 
2220 	err = rpcauth_register(&authgss_ops);
2221 	if (err)
2222 		goto out;
2223 	err = gss_svc_init();
2224 	if (err)
2225 		goto out_unregister;
2226 	err = register_pernet_subsys(&rpcsec_gss_net_ops);
2227 	if (err)
2228 		goto out_svc_exit;
2229 	rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version");
2230 	return 0;
2231 out_svc_exit:
2232 	gss_svc_shutdown();
2233 out_unregister:
2234 	rpcauth_unregister(&authgss_ops);
2235 out:
2236 	return err;
2237 }
2238 
exit_rpcsec_gss(void)2239 static void __exit exit_rpcsec_gss(void)
2240 {
2241 	unregister_pernet_subsys(&rpcsec_gss_net_ops);
2242 	gss_svc_shutdown();
2243 	rpcauth_unregister(&authgss_ops);
2244 	rcu_barrier(); /* Wait for completion of call_rcu()'s */
2245 }
2246 
2247 MODULE_ALIAS("rpc-auth-6");
2248 MODULE_LICENSE("GPL");
2249 module_param_named(expired_cred_retry_delay,
2250 		   gss_expired_cred_retry_delay,
2251 		   uint, 0644);
2252 MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until "
2253 		"the RPC engine retries an expired credential");
2254 
2255 module_param_named(key_expire_timeo,
2256 		   gss_key_expire_timeo,
2257 		   uint, 0644);
2258 MODULE_PARM_DESC(key_expire_timeo, "Time (in seconds) at the end of a "
2259 		"credential keys lifetime where the NFS layer cleans up "
2260 		"prior to key expiration");
2261 
2262 module_init(init_rpcsec_gss)
2263 module_exit(exit_rpcsec_gss)
2264