1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hosting Protected Virtual Machines
4  *
5  * Copyright IBM Corp. 2019, 2020
6  *    Author(s): Janosch Frank <frankja@linux.ibm.com>
7  */
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/pagemap.h>
11 #include <linux/sched/signal.h>
12 #include <asm/gmap.h>
13 #include <asm/uv.h>
14 #include <asm/mman.h>
15 #include "kvm-s390.h"
16 
kvm_s390_pv_destroy_cpu(struct kvm_vcpu * vcpu,u16 * rc,u16 * rrc)17 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
18 {
19 	int cc = 0;
20 
21 	if (kvm_s390_pv_cpu_get_handle(vcpu)) {
22 		cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
23 				   UVC_CMD_DESTROY_SEC_CPU, rc, rrc);
24 
25 		KVM_UV_EVENT(vcpu->kvm, 3,
26 			     "PROTVIRT DESTROY VCPU %d: rc %x rrc %x",
27 			     vcpu->vcpu_id, *rc, *rrc);
28 		WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x",
29 			  *rc, *rrc);
30 	}
31 	/* Intended memory leak for something that should never happen. */
32 	if (!cc)
33 		free_pages(vcpu->arch.pv.stor_base,
34 			   get_order(uv_info.guest_cpu_stor_len));
35 
36 	free_page(sida_origin(vcpu->arch.sie_block));
37 	vcpu->arch.sie_block->pv_handle_cpu = 0;
38 	vcpu->arch.sie_block->pv_handle_config = 0;
39 	memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
40 	vcpu->arch.sie_block->sdf = 0;
41 	/*
42 	 * The sidad field (for sdf == 2) is now the gbea field (for sdf == 0).
43 	 * Use the reset value of gbea to avoid leaking the kernel pointer of
44 	 * the just freed sida.
45 	 */
46 	vcpu->arch.sie_block->gbea = 1;
47 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
48 
49 	return cc ? EIO : 0;
50 }
51 
kvm_s390_pv_create_cpu(struct kvm_vcpu * vcpu,u16 * rc,u16 * rrc)52 int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
53 {
54 	struct uv_cb_csc uvcb = {
55 		.header.cmd = UVC_CMD_CREATE_SEC_CPU,
56 		.header.len = sizeof(uvcb),
57 	};
58 	int cc;
59 
60 	if (kvm_s390_pv_cpu_get_handle(vcpu))
61 		return -EINVAL;
62 
63 	vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT,
64 						   get_order(uv_info.guest_cpu_stor_len));
65 	if (!vcpu->arch.pv.stor_base)
66 		return -ENOMEM;
67 
68 	/* Input */
69 	uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm);
70 	uvcb.num = vcpu->arch.sie_block->icpua;
71 	uvcb.state_origin = (u64)vcpu->arch.sie_block;
72 	uvcb.stor_origin = (u64)vcpu->arch.pv.stor_base;
73 
74 	/* Alloc Secure Instruction Data Area Designation */
75 	vcpu->arch.sie_block->sidad = __get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
76 	if (!vcpu->arch.sie_block->sidad) {
77 		free_pages(vcpu->arch.pv.stor_base,
78 			   get_order(uv_info.guest_cpu_stor_len));
79 		return -ENOMEM;
80 	}
81 
82 	cc = uv_call(0, (u64)&uvcb);
83 	*rc = uvcb.header.rc;
84 	*rrc = uvcb.header.rrc;
85 	KVM_UV_EVENT(vcpu->kvm, 3,
86 		     "PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x",
87 		     vcpu->vcpu_id, uvcb.cpu_handle, uvcb.header.rc,
88 		     uvcb.header.rrc);
89 
90 	if (cc) {
91 		u16 dummy;
92 
93 		kvm_s390_pv_destroy_cpu(vcpu, &dummy, &dummy);
94 		return -EIO;
95 	}
96 
97 	/* Output */
98 	vcpu->arch.pv.handle = uvcb.cpu_handle;
99 	vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle;
100 	vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm);
101 	vcpu->arch.sie_block->sdf = 2;
102 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
103 	return 0;
104 }
105 
106 /* only free resources when the destroy was successful */
kvm_s390_pv_dealloc_vm(struct kvm * kvm)107 static void kvm_s390_pv_dealloc_vm(struct kvm *kvm)
108 {
109 	vfree(kvm->arch.pv.stor_var);
110 	free_pages(kvm->arch.pv.stor_base,
111 		   get_order(uv_info.guest_base_stor_len));
112 	memset(&kvm->arch.pv, 0, sizeof(kvm->arch.pv));
113 }
114 
kvm_s390_pv_alloc_vm(struct kvm * kvm)115 static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
116 {
117 	unsigned long base = uv_info.guest_base_stor_len;
118 	unsigned long virt = uv_info.guest_virt_var_stor_len;
119 	unsigned long npages = 0, vlen = 0;
120 	struct kvm_memory_slot *memslot;
121 
122 	kvm->arch.pv.stor_var = NULL;
123 	kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base));
124 	if (!kvm->arch.pv.stor_base)
125 		return -ENOMEM;
126 
127 	/*
128 	 * Calculate current guest storage for allocation of the
129 	 * variable storage, which is based on the length in MB.
130 	 *
131 	 * Slots are sorted by GFN
132 	 */
133 	mutex_lock(&kvm->slots_lock);
134 	memslot = kvm_memslots(kvm)->memslots;
135 	npages = memslot->base_gfn + memslot->npages;
136 	mutex_unlock(&kvm->slots_lock);
137 
138 	kvm->arch.pv.guest_len = npages * PAGE_SIZE;
139 
140 	/* Allocate variable storage */
141 	vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
142 	vlen += uv_info.guest_virt_base_stor_len;
143 	kvm->arch.pv.stor_var = vzalloc(vlen);
144 	if (!kvm->arch.pv.stor_var)
145 		goto out_err;
146 	return 0;
147 
148 out_err:
149 	kvm_s390_pv_dealloc_vm(kvm);
150 	return -ENOMEM;
151 }
152 
153 /* this should not fail, but if it does, we must not free the donated memory */
kvm_s390_pv_deinit_vm(struct kvm * kvm,u16 * rc,u16 * rrc)154 int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
155 {
156 	int cc;
157 
158 	/* make all pages accessible before destroying the guest */
159 	s390_reset_acc(kvm->mm);
160 
161 	cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
162 			   UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
163 	WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
164 	atomic_set(&kvm->mm->context.is_protected, 0);
165 	KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc);
166 	WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc);
167 	/* Inteded memory leak on "impossible" error */
168 	if (!cc)
169 		kvm_s390_pv_dealloc_vm(kvm);
170 	return cc ? -EIO : 0;
171 }
172 
kvm_s390_pv_init_vm(struct kvm * kvm,u16 * rc,u16 * rrc)173 int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
174 {
175 	struct uv_cb_cgc uvcb = {
176 		.header.cmd = UVC_CMD_CREATE_SEC_CONF,
177 		.header.len = sizeof(uvcb)
178 	};
179 	int cc, ret;
180 	u16 dummy;
181 
182 	ret = kvm_s390_pv_alloc_vm(kvm);
183 	if (ret)
184 		return ret;
185 
186 	/* Inputs */
187 	uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */
188 	uvcb.guest_stor_len = kvm->arch.pv.guest_len;
189 	uvcb.guest_asce = kvm->arch.gmap->asce;
190 	uvcb.guest_sca = (unsigned long)kvm->arch.sca;
191 	uvcb.conf_base_stor_origin = (u64)kvm->arch.pv.stor_base;
192 	uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
193 
194 	cc = uv_call(0, (u64)&uvcb);
195 	*rc = uvcb.header.rc;
196 	*rrc = uvcb.header.rrc;
197 	KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x",
198 		     uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc);
199 
200 	/* Outputs */
201 	kvm->arch.pv.handle = uvcb.guest_handle;
202 
203 	if (cc) {
204 		if (uvcb.header.rc & UVC_RC_NEED_DESTROY)
205 			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
206 		else
207 			kvm_s390_pv_dealloc_vm(kvm);
208 		return -EIO;
209 	}
210 	kvm->arch.gmap->guest_handle = uvcb.guest_handle;
211 	return 0;
212 }
213 
kvm_s390_pv_set_sec_parms(struct kvm * kvm,void * hdr,u64 length,u16 * rc,u16 * rrc)214 int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
215 			      u16 *rrc)
216 {
217 	struct uv_cb_ssc uvcb = {
218 		.header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS,
219 		.header.len = sizeof(uvcb),
220 		.sec_header_origin = (u64)hdr,
221 		.sec_header_len = length,
222 		.guest_handle = kvm_s390_pv_get_handle(kvm),
223 	};
224 	int cc = uv_call(0, (u64)&uvcb);
225 
226 	*rc = uvcb.header.rc;
227 	*rrc = uvcb.header.rrc;
228 	KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
229 		     *rc, *rrc);
230 	if (!cc)
231 		atomic_set(&kvm->mm->context.is_protected, 1);
232 	return cc ? -EINVAL : 0;
233 }
234 
unpack_one(struct kvm * kvm,unsigned long addr,u64 tweak,u64 offset,u16 * rc,u16 * rrc)235 static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
236 		      u64 offset, u16 *rc, u16 *rrc)
237 {
238 	struct uv_cb_unp uvcb = {
239 		.header.cmd = UVC_CMD_UNPACK_IMG,
240 		.header.len = sizeof(uvcb),
241 		.guest_handle = kvm_s390_pv_get_handle(kvm),
242 		.gaddr = addr,
243 		.tweak[0] = tweak,
244 		.tweak[1] = offset,
245 	};
246 	int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
247 
248 	*rc = uvcb.header.rc;
249 	*rrc = uvcb.header.rrc;
250 
251 	if (ret && ret != -EAGAIN)
252 		KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
253 			     uvcb.gaddr, *rc, *rrc);
254 	return ret;
255 }
256 
kvm_s390_pv_unpack(struct kvm * kvm,unsigned long addr,unsigned long size,unsigned long tweak,u16 * rc,u16 * rrc)257 int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
258 		       unsigned long tweak, u16 *rc, u16 *rrc)
259 {
260 	u64 offset = 0;
261 	int ret = 0;
262 
263 	if (addr & ~PAGE_MASK || !size || size & ~PAGE_MASK)
264 		return -EINVAL;
265 
266 	KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
267 		     addr, size);
268 
269 	while (offset < size) {
270 		ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
271 		if (ret == -EAGAIN) {
272 			cond_resched();
273 			if (fatal_signal_pending(current))
274 				break;
275 			continue;
276 		}
277 		if (ret)
278 			break;
279 		addr += PAGE_SIZE;
280 		offset += PAGE_SIZE;
281 	}
282 	if (!ret)
283 		KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful");
284 	return ret;
285 }
286 
kvm_s390_pv_set_cpu_state(struct kvm_vcpu * vcpu,u8 state)287 int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state)
288 {
289 	struct uv_cb_cpu_set_state uvcb = {
290 		.header.cmd	= UVC_CMD_CPU_SET_STATE,
291 		.header.len	= sizeof(uvcb),
292 		.cpu_handle	= kvm_s390_pv_cpu_get_handle(vcpu),
293 		.state		= state,
294 	};
295 	int cc;
296 
297 	cc = uv_call(0, (u64)&uvcb);
298 	KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x",
299 		     vcpu->vcpu_id, state, uvcb.header.rc, uvcb.header.rrc);
300 	if (cc)
301 		return -EINVAL;
302 	return 0;
303 }
304