1 /* $NetBSD: gss_aeap.c,v 1.2 2017/01/28 21:31:46 christos Exp $ */
2
3 /*
4 * AEAD support
5 */
6
7 #include "mech_locl.h"
8
9 /**
10 * Encrypts or sign the data.
11 *
12 * This is a more complicated version of gss_wrap(), it allows the
13 * caller to use AEAD data (signed header/trailer) and allow greater
14 * controll over where the encrypted data is placed.
15 *
16 * The maximum packet size is gss_context_stream_sizes.max_msg_size.
17 *
18 * The caller needs provide the folloing buffers when using in conf_req_flag=1 mode:
19 *
20 * - HEADER (of size gss_context_stream_sizes.header)
21 * { DATA or SIGN_ONLY } (optional, zero or more)
22 * PADDING (of size gss_context_stream_sizes.blocksize, if zero padding is zero, can be omitted)
23 * TRAILER (of size gss_context_stream_sizes.trailer)
24 *
25 * - on DCE-RPC mode, the caller can skip PADDING and TRAILER if the
26 * DATA elements is padded to a block bountry and header is of at
27 * least size gss_context_stream_sizes.header + gss_context_stream_sizes.trailer.
28 *
29 * HEADER, PADDING, TRAILER will be shrunken to the size required to transmit any of them too large.
30 *
31 * To generate gss_wrap() compatible packets, use: HEADER | DATA | PADDING | TRAILER
32 *
33 * When used in conf_req_flag=0,
34 *
35 * - HEADER (of size gss_context_stream_sizes.header)
36 * { DATA or SIGN_ONLY } (optional, zero or more)
37 * PADDING (of size gss_context_stream_sizes.blocksize, if zero padding is zero, can be omitted)
38 * TRAILER (of size gss_context_stream_sizes.trailer)
39 *
40 *
41 * The input sizes of HEADER, PADDING and TRAILER can be fetched using gss_wrap_iov_length() or
42 * gss_context_query_attributes().
43 *
44 * @ingroup gssapi
45 */
46
47
48 GSSAPI_LIB_FUNCTION OM_uint32 GSSAPI_LIB_CALL
gss_wrap_iov(OM_uint32 * minor_status,gss_ctx_id_t context_handle,int conf_req_flag,gss_qop_t qop_req,int * conf_state,gss_iov_buffer_desc * iov,int iov_count)49 gss_wrap_iov(OM_uint32 * minor_status,
50 gss_ctx_id_t context_handle,
51 int conf_req_flag,
52 gss_qop_t qop_req,
53 int * conf_state,
54 gss_iov_buffer_desc *iov,
55 int iov_count)
56 {
57 struct _gss_context *ctx = (struct _gss_context *) context_handle;
58 gssapi_mech_interface m;
59
60 if (minor_status)
61 *minor_status = 0;
62 if (conf_state)
63 *conf_state = 0;
64 if (ctx == NULL)
65 return GSS_S_NO_CONTEXT;
66 if (iov == NULL && iov_count != 0)
67 return GSS_S_CALL_INACCESSIBLE_READ;
68
69 m = ctx->gc_mech;
70
71 if (m->gm_wrap_iov == NULL)
72 return GSS_S_UNAVAILABLE;
73
74 return (m->gm_wrap_iov)(minor_status, ctx->gc_ctx,
75 conf_req_flag, qop_req, conf_state,
76 iov, iov_count);
77 }
78
79 /**
80 * Decrypt or verifies the signature on the data.
81 *
82 *
83 * @ingroup gssapi
84 */
85
86 GSSAPI_LIB_FUNCTION OM_uint32 GSSAPI_LIB_CALL
gss_unwrap_iov(OM_uint32 * minor_status,gss_ctx_id_t context_handle,int * conf_state,gss_qop_t * qop_state,gss_iov_buffer_desc * iov,int iov_count)87 gss_unwrap_iov(OM_uint32 *minor_status,
88 gss_ctx_id_t context_handle,
89 int *conf_state,
90 gss_qop_t *qop_state,
91 gss_iov_buffer_desc *iov,
92 int iov_count)
93 {
94 struct _gss_context *ctx = (struct _gss_context *) context_handle;
95 gssapi_mech_interface m;
96
97 if (minor_status)
98 *minor_status = 0;
99 if (conf_state)
100 *conf_state = 0;
101 if (qop_state)
102 *qop_state = 0;
103 if (ctx == NULL)
104 return GSS_S_NO_CONTEXT;
105 if (iov == NULL && iov_count != 0)
106 return GSS_S_CALL_INACCESSIBLE_READ;
107
108 m = ctx->gc_mech;
109
110 if (m->gm_unwrap_iov == NULL)
111 return GSS_S_UNAVAILABLE;
112
113 return (m->gm_unwrap_iov)(minor_status, ctx->gc_ctx,
114 conf_state, qop_state,
115 iov, iov_count);
116 }
117
118 /**
119 * Update the length fields in iov buffer for the types:
120 * - GSS_IOV_BUFFER_TYPE_HEADER
121 * - GSS_IOV_BUFFER_TYPE_PADDING
122 * - GSS_IOV_BUFFER_TYPE_TRAILER
123 *
124 * Consider using gss_context_query_attributes() to fetch the data instead.
125 *
126 * @ingroup gssapi
127 */
128
129 GSSAPI_LIB_FUNCTION OM_uint32 GSSAPI_LIB_CALL
gss_wrap_iov_length(OM_uint32 * minor_status,gss_ctx_id_t context_handle,int conf_req_flag,gss_qop_t qop_req,int * conf_state,gss_iov_buffer_desc * iov,int iov_count)130 gss_wrap_iov_length(OM_uint32 * minor_status,
131 gss_ctx_id_t context_handle,
132 int conf_req_flag,
133 gss_qop_t qop_req,
134 int *conf_state,
135 gss_iov_buffer_desc *iov,
136 int iov_count)
137 {
138 struct _gss_context *ctx = (struct _gss_context *) context_handle;
139 gssapi_mech_interface m;
140
141 if (minor_status)
142 *minor_status = 0;
143 if (conf_state)
144 *conf_state = 0;
145 if (ctx == NULL)
146 return GSS_S_NO_CONTEXT;
147 if (iov == NULL && iov_count != 0)
148 return GSS_S_CALL_INACCESSIBLE_READ;
149
150 m = ctx->gc_mech;
151
152 if (m->gm_wrap_iov_length == NULL)
153 return GSS_S_UNAVAILABLE;
154
155 return (m->gm_wrap_iov_length)(minor_status, ctx->gc_ctx,
156 conf_req_flag, qop_req, conf_state,
157 iov, iov_count);
158 }
159
160 /**
161 * Free all buffer allocated by gss_wrap_iov() or gss_unwrap_iov() by
162 * looking at the GSS_IOV_BUFFER_FLAG_ALLOCATED flag.
163 *
164 * @ingroup gssapi
165 */
166
167 GSSAPI_LIB_FUNCTION OM_uint32 GSSAPI_LIB_CALL
gss_release_iov_buffer(OM_uint32 * minor_status,gss_iov_buffer_desc * iov,int iov_count)168 gss_release_iov_buffer(OM_uint32 *minor_status,
169 gss_iov_buffer_desc *iov,
170 int iov_count)
171 {
172 OM_uint32 junk;
173 int i;
174
175 if (minor_status)
176 *minor_status = 0;
177 if (iov == NULL && iov_count != 0)
178 return GSS_S_CALL_INACCESSIBLE_READ;
179
180 for (i = 0; i < iov_count; i++) {
181 if ((iov[i].type & GSS_IOV_BUFFER_FLAG_ALLOCATED) == 0)
182 continue;
183 gss_release_buffer(&junk, &iov[i].buffer);
184 iov[i].type &= ~GSS_IOV_BUFFER_FLAG_ALLOCATED;
185 }
186 return GSS_S_COMPLETE;
187 }
188
189 /**
190 * Query the context for parameters.
191 *
192 * SSPI equivalent if this function is QueryContextAttributes.
193 *
194 * - GSS_C_ATTR_STREAM_SIZES data is a gss_context_stream_sizes.
195 *
196 * @ingroup gssapi
197 */
198
199 gss_OID_desc GSSAPI_LIB_FUNCTION __gss_c_attr_stream_sizes_oid_desc =
200 {10, rk_UNCONST("\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x03")};
201
202 GSSAPI_LIB_FUNCTION OM_uint32 GSSAPI_LIB_CALL
gss_context_query_attributes(OM_uint32 * minor_status,gss_const_ctx_id_t context_handle,const gss_OID attribute,void * data,size_t len)203 gss_context_query_attributes(OM_uint32 *minor_status,
204 gss_const_ctx_id_t context_handle,
205 const gss_OID attribute,
206 void *data,
207 size_t len)
208 {
209 if (minor_status)
210 *minor_status = 0;
211
212 if (gss_oid_equal(GSS_C_ATTR_STREAM_SIZES, attribute)) {
213 memset(data, 0, len);
214 return GSS_S_COMPLETE;
215 }
216
217 return GSS_S_FAILURE;
218 }
219
220 /*
221 * AEAD wrap API for a single piece of associated data, for compatibility
222 * with MIT and as specified by draft-howard-gssapi-aead-00.txt.
223 *
224 * @ingroup gssapi
225 */
226 GSSAPI_LIB_FUNCTION OM_uint32 GSSAPI_LIB_CALL
gss_wrap_aead(OM_uint32 * minor_status,gss_ctx_id_t context_handle,int conf_req_flag,gss_qop_t qop_req,gss_buffer_t input_assoc_buffer,gss_buffer_t input_payload_buffer,int * conf_state,gss_buffer_t output_message_buffer)227 gss_wrap_aead(OM_uint32 *minor_status,
228 gss_ctx_id_t context_handle,
229 int conf_req_flag,
230 gss_qop_t qop_req,
231 gss_buffer_t input_assoc_buffer,
232 gss_buffer_t input_payload_buffer,
233 int *conf_state,
234 gss_buffer_t output_message_buffer)
235 {
236 OM_uint32 major_status, tmp, flags = 0;
237 gss_iov_buffer_desc iov[5];
238 size_t i;
239 unsigned char *p;
240
241 memset(iov, 0, sizeof(iov));
242
243 iov[0].type = GSS_IOV_BUFFER_TYPE_HEADER;
244
245 iov[1].type = GSS_IOV_BUFFER_TYPE_SIGN_ONLY;
246 if (input_assoc_buffer)
247 iov[1].buffer = *input_assoc_buffer;
248
249 iov[2].type = GSS_IOV_BUFFER_TYPE_DATA;
250 if (input_payload_buffer)
251 iov[2].buffer.length = input_payload_buffer->length;
252
253 gss_inquire_context(minor_status, context_handle, NULL, NULL,
254 NULL, NULL, &flags, NULL, NULL);
255
256 /* krb5 mech rejects padding/trailer if DCE-style is set */
257 iov[3].type = (flags & GSS_C_DCE_STYLE) ? GSS_IOV_BUFFER_TYPE_EMPTY
258 : GSS_IOV_BUFFER_TYPE_PADDING;
259 iov[4].type = (flags & GSS_C_DCE_STYLE) ? GSS_IOV_BUFFER_TYPE_EMPTY
260 : GSS_IOV_BUFFER_TYPE_TRAILER;
261
262 major_status = gss_wrap_iov_length(minor_status, context_handle,
263 conf_req_flag, qop_req, conf_state,
264 iov, 5);
265 if (GSS_ERROR(major_status))
266 return major_status;
267
268 for (i = 0, output_message_buffer->length = 0; i < 5; i++) {
269 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_SIGN_ONLY)
270 continue;
271
272 output_message_buffer->length += iov[i].buffer.length;
273 }
274
275 output_message_buffer->value = malloc(output_message_buffer->length);
276 if (output_message_buffer->value == NULL) {
277 *minor_status = ENOMEM;
278 return GSS_S_FAILURE;
279 }
280
281 for (i = 0, p = output_message_buffer->value; i < 5; i++) {
282 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_SIGN_ONLY)
283 continue;
284 else if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA)
285 memcpy(p, input_payload_buffer->value, input_payload_buffer->length);
286
287 iov[i].buffer.value = p;
288 p += iov[i].buffer.length;
289 }
290
291 major_status = gss_wrap_iov(minor_status, context_handle, conf_req_flag,
292 qop_req, conf_state, iov, 5);
293 if (GSS_ERROR(major_status))
294 gss_release_buffer(&tmp, output_message_buffer);
295
296 return major_status;
297 }
298
299 /*
300 * AEAD unwrap for a single piece of associated data, for compatibility
301 * with MIT and as specified by draft-howard-gssapi-aead-00.txt.
302 *
303 * @ingroup gssapi
304 */
305 GSSAPI_LIB_FUNCTION OM_uint32 GSSAPI_LIB_CALL
gss_unwrap_aead(OM_uint32 * minor_status,gss_ctx_id_t context_handle,gss_buffer_t input_message_buffer,gss_buffer_t input_assoc_buffer,gss_buffer_t output_payload_buffer,int * conf_state,gss_qop_t * qop_state)306 gss_unwrap_aead(OM_uint32 *minor_status,
307 gss_ctx_id_t context_handle,
308 gss_buffer_t input_message_buffer,
309 gss_buffer_t input_assoc_buffer,
310 gss_buffer_t output_payload_buffer,
311 int *conf_state,
312 gss_qop_t *qop_state)
313 {
314 OM_uint32 major_status, tmp;
315 gss_iov_buffer_desc iov[3];
316
317 memset(iov, 0, sizeof(iov));
318
319 iov[0].type = GSS_IOV_BUFFER_TYPE_STREAM;
320 iov[0].buffer = *input_message_buffer;
321
322 iov[1].type = GSS_IOV_BUFFER_TYPE_SIGN_ONLY;
323 if (input_assoc_buffer)
324 iov[1].buffer = *input_assoc_buffer;
325
326 iov[2].type = GSS_IOV_BUFFER_TYPE_DATA | GSS_IOV_BUFFER_FLAG_ALLOCATE;
327
328 major_status = gss_unwrap_iov(minor_status, context_handle, conf_state,
329 qop_state, iov, 3);
330 if (GSS_ERROR(major_status))
331 gss_release_iov_buffer(&tmp, &iov[2], 1);
332 else
333 *output_payload_buffer = iov[2].buffer;
334
335 return major_status;
336 }
337