1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2005,2006,2007,2008 IBM Corporation
4 *
5 * Authors:
6 * Serge Hallyn <serue@us.ibm.com>
7 * Reiner Sailer <sailer@watson.ibm.com>
8 * Mimi Zohar <zohar@us.ibm.com>
9 *
10 * File: ima_queue.c
11 * Implements queues that store template measurements and
12 * maintains aggregate over the stored measurements
13 * in the pre-configured TPM PCR (if available).
14 * The measurement list is append-only. No entry is
15 * ever removed or changed during the boot-cycle.
16 */
17
18 #include <linux/rculist.h>
19 #include <linux/slab.h>
20 #include "ima.h"
21
22 #define AUDIT_CAUSE_LEN_MAX 32
23
24 /* pre-allocated array of tpm_digest structures to extend a PCR */
25 static struct tpm_digest *digests;
26
27 LIST_HEAD(ima_measurements); /* list of all measurements */
28 #ifdef CONFIG_IMA_KEXEC
29 static unsigned long binary_runtime_size;
30 #else
31 static unsigned long binary_runtime_size = ULONG_MAX;
32 #endif
33
34 /* key: inode (before secure-hashing a file) */
35 struct ima_h_table ima_htable = {
36 .len = ATOMIC_LONG_INIT(0),
37 .violations = ATOMIC_LONG_INIT(0),
38 .queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT
39 };
40
41 /* mutex protects atomicity of extending measurement list
42 * and extending the TPM PCR aggregate. Since tpm_extend can take
43 * long (and the tpm driver uses a mutex), we can't use the spinlock.
44 */
45 static DEFINE_MUTEX(ima_extend_list_mutex);
46
47 /* lookup up the digest value in the hash table, and return the entry */
ima_lookup_digest_entry(u8 * digest_value,int pcr)48 static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,
49 int pcr)
50 {
51 struct ima_queue_entry *qe, *ret = NULL;
52 unsigned int key;
53 int rc;
54
55 key = ima_hash_key(digest_value);
56 rcu_read_lock();
57 hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
58 rc = memcmp(qe->entry->digests[ima_hash_algo_idx].digest,
59 digest_value, hash_digest_size[ima_hash_algo]);
60 if ((rc == 0) && (qe->entry->pcr == pcr)) {
61 ret = qe;
62 break;
63 }
64 }
65 rcu_read_unlock();
66 return ret;
67 }
68
69 /*
70 * Calculate the memory required for serializing a single
71 * binary_runtime_measurement list entry, which contains a
72 * couple of variable length fields (e.g template name and data).
73 */
get_binary_runtime_size(struct ima_template_entry * entry)74 static int get_binary_runtime_size(struct ima_template_entry *entry)
75 {
76 int size = 0;
77
78 size += sizeof(u32); /* pcr */
79 size += TPM_DIGEST_SIZE;
80 size += sizeof(int); /* template name size field */
81 size += strlen(entry->template_desc->name);
82 size += sizeof(entry->template_data_len);
83 size += entry->template_data_len;
84 return size;
85 }
86
87 /* ima_add_template_entry helper function:
88 * - Add template entry to the measurement list and hash table, for
89 * all entries except those carried across kexec.
90 *
91 * (Called with ima_extend_list_mutex held.)
92 */
ima_add_digest_entry(struct ima_template_entry * entry,bool update_htable)93 static int ima_add_digest_entry(struct ima_template_entry *entry,
94 bool update_htable)
95 {
96 struct ima_queue_entry *qe;
97 unsigned int key;
98
99 qe = kmalloc(sizeof(*qe), GFP_KERNEL);
100 if (qe == NULL) {
101 pr_err("OUT OF MEMORY ERROR creating queue entry\n");
102 return -ENOMEM;
103 }
104 qe->entry = entry;
105
106 INIT_LIST_HEAD(&qe->later);
107 list_add_tail_rcu(&qe->later, &ima_measurements);
108
109 atomic_long_inc(&ima_htable.len);
110 if (update_htable) {
111 key = ima_hash_key(entry->digests[ima_hash_algo_idx].digest);
112 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
113 }
114
115 if (binary_runtime_size != ULONG_MAX) {
116 int size;
117
118 size = get_binary_runtime_size(entry);
119 binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ?
120 binary_runtime_size + size : ULONG_MAX;
121 }
122 return 0;
123 }
124
125 /*
126 * Return the amount of memory required for serializing the
127 * entire binary_runtime_measurement list, including the ima_kexec_hdr
128 * structure.
129 */
ima_get_binary_runtime_size(void)130 unsigned long ima_get_binary_runtime_size(void)
131 {
132 if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr)))
133 return ULONG_MAX;
134 else
135 return binary_runtime_size + sizeof(struct ima_kexec_hdr);
136 }
137
ima_pcr_extend(struct tpm_digest * digests_arg,int pcr)138 static int ima_pcr_extend(struct tpm_digest *digests_arg, int pcr)
139 {
140 int result = 0;
141
142 if (!ima_tpm_chip)
143 return result;
144
145 result = tpm_pcr_extend(ima_tpm_chip, pcr, digests_arg);
146 if (result != 0)
147 pr_err("Error Communicating to TPM chip, result: %d\n", result);
148 return result;
149 }
150
151 /*
152 * Add template entry to the measurement list and hash table, and
153 * extend the pcr.
154 *
155 * On systems which support carrying the IMA measurement list across
156 * kexec, maintain the total memory size required for serializing the
157 * binary_runtime_measurements.
158 */
ima_add_template_entry(struct ima_template_entry * entry,int violation,const char * op,struct inode * inode,const unsigned char * filename)159 int ima_add_template_entry(struct ima_template_entry *entry, int violation,
160 const char *op, struct inode *inode,
161 const unsigned char *filename)
162 {
163 u8 *digest = entry->digests[ima_hash_algo_idx].digest;
164 struct tpm_digest *digests_arg = entry->digests;
165 const char *audit_cause = "hash_added";
166 char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
167 int audit_info = 1;
168 int result = 0, tpmresult = 0;
169
170 mutex_lock(&ima_extend_list_mutex);
171 if (!violation) {
172 if (ima_lookup_digest_entry(digest, entry->pcr)) {
173 audit_cause = "hash_exists";
174 result = -EEXIST;
175 goto out;
176 }
177 }
178
179 result = ima_add_digest_entry(entry, 1);
180 if (result < 0) {
181 audit_cause = "ENOMEM";
182 audit_info = 0;
183 goto out;
184 }
185
186 if (violation) /* invalidate pcr */
187 digests_arg = digests;
188
189 tpmresult = ima_pcr_extend(digests_arg, entry->pcr);
190 if (tpmresult != 0) {
191 snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
192 tpmresult);
193 audit_cause = tpm_audit_cause;
194 audit_info = 0;
195 }
196 out:
197 mutex_unlock(&ima_extend_list_mutex);
198 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
199 op, audit_cause, result, audit_info);
200 return result;
201 }
202
ima_restore_measurement_entry(struct ima_template_entry * entry)203 int ima_restore_measurement_entry(struct ima_template_entry *entry)
204 {
205 int result = 0;
206
207 mutex_lock(&ima_extend_list_mutex);
208 result = ima_add_digest_entry(entry, 0);
209 mutex_unlock(&ima_extend_list_mutex);
210 return result;
211 }
212
ima_init_digests(void)213 int __init ima_init_digests(void)
214 {
215 u16 digest_size;
216 u16 crypto_id;
217 int i;
218
219 if (!ima_tpm_chip)
220 return 0;
221
222 digests = kcalloc(ima_tpm_chip->nr_allocated_banks, sizeof(*digests),
223 GFP_NOFS);
224 if (!digests)
225 return -ENOMEM;
226
227 for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
228 digests[i].alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
229 digest_size = ima_tpm_chip->allocated_banks[i].digest_size;
230 crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
231
232 /* for unmapped TPM algorithms digest is still a padded SHA1 */
233 if (crypto_id == HASH_ALGO__LAST)
234 digest_size = SHA1_DIGEST_SIZE;
235
236 memset(digests[i].digest, 0xff, digest_size);
237 }
238
239 return 0;
240 }
241