xref: /linux/arch/s390/kernel/ctlreg.c (revision d642ef71)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	Copyright IBM Corp. 1999, 2023
4  */
5 
6 #include <linux/irqflags.h>
7 #include <linux/spinlock.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/smp.h>
11 #include <linux/cache.h>
12 #include <asm/abs_lowcore.h>
13 #include <asm/ctlreg.h>
14 
15 /*
16  * ctl_lock guards access to global control register contents which
17  * are kept in the control register save area within absolute lowcore
18  * at physical address zero.
19  */
20 static DEFINE_SPINLOCK(system_ctl_lock);
21 
22 void system_ctlreg_lock(void)
23 	__acquires(&system_ctl_lock)
24 {
25 	spin_lock(&system_ctl_lock);
26 }
27 
28 void system_ctlreg_unlock(void)
29 	__releases(&system_ctl_lock)
30 {
31 	spin_unlock(&system_ctl_lock);
32 }
33 
34 static bool system_ctlreg_area_init __ro_after_init;
35 
36 void __init system_ctlreg_init_save_area(struct lowcore *lc)
37 {
38 	struct lowcore *abs_lc;
39 
40 	abs_lc = get_abs_lowcore();
41 	__local_ctl_store(0, 15, lc->cregs_save_area);
42 	__local_ctl_store(0, 15, abs_lc->cregs_save_area);
43 	put_abs_lowcore(abs_lc);
44 	system_ctlreg_area_init = true;
45 }
46 
47 struct ctlreg_parms {
48 	unsigned long andval;
49 	unsigned long orval;
50 	unsigned long val;
51 	int request;
52 	int cr;
53 };
54 
55 static void ctlreg_callback(void *info)
56 {
57 	struct ctlreg_parms *pp = info;
58 	struct ctlreg regs[16];
59 
60 	__local_ctl_store(0, 15, regs);
61 	if (pp->request == CTLREG_LOAD) {
62 		regs[pp->cr].val = pp->val;
63 	} else {
64 		regs[pp->cr].val &= pp->andval;
65 		regs[pp->cr].val |= pp->orval;
66 	}
67 	__local_ctl_load(0, 15, regs);
68 }
69 
70 static void system_ctlreg_update(void *info)
71 {
72 	unsigned long flags;
73 
74 	if (system_state == SYSTEM_BOOTING) {
75 		/*
76 		 * For very early calls do not call on_each_cpu()
77 		 * since not everything might be setup.
78 		 */
79 		local_irq_save(flags);
80 		ctlreg_callback(info);
81 		local_irq_restore(flags);
82 	} else {
83 		on_each_cpu(ctlreg_callback, info, 1);
84 	}
85 }
86 
87 void system_ctlreg_modify(unsigned int cr, unsigned long data, int request)
88 {
89 	struct ctlreg_parms pp = { .cr = cr, .request = request, };
90 	struct lowcore *abs_lc;
91 
92 	switch (request) {
93 	case CTLREG_SET_BIT:
94 		pp.orval  = 1UL << data;
95 		pp.andval = -1UL;
96 		break;
97 	case CTLREG_CLEAR_BIT:
98 		pp.orval  = 0;
99 		pp.andval = ~(1UL << data);
100 		break;
101 	case CTLREG_LOAD:
102 		pp.val = data;
103 		break;
104 	}
105 	if (system_ctlreg_area_init) {
106 		system_ctlreg_lock();
107 		abs_lc = get_abs_lowcore();
108 		if (request == CTLREG_LOAD) {
109 			abs_lc->cregs_save_area[cr].val = pp.val;
110 		} else {
111 			abs_lc->cregs_save_area[cr].val &= pp.andval;
112 			abs_lc->cregs_save_area[cr].val |= pp.orval;
113 		}
114 		put_abs_lowcore(abs_lc);
115 		system_ctlreg_update(&pp);
116 		system_ctlreg_unlock();
117 	} else {
118 		system_ctlreg_update(&pp);
119 	}
120 }
121 EXPORT_SYMBOL(system_ctlreg_modify);
122