1 /*	$NetBSD: hwsq.h,v 1.3 2021/12/18 23:45:38 riastradh Exp $	*/
2 
3 /* SPDX-License-Identifier: MIT */
4 #ifndef __NVKM_BUS_HWSQ_H__
5 #define __NVKM_BUS_HWSQ_H__
6 #include <subdev/bus.h>
7 
8 struct hwsq {
9 	struct nvkm_subdev *subdev;
10 	struct nvkm_hwsq *hwsq;
11 	int sequence;
12 };
13 
14 struct hwsq_reg {
15 	int sequence;
16 	bool force;
17 	u32 addr;
18 	u32 stride; /* in bytes */
19 	u32 mask;
20 	u32 data;
21 };
22 
23 static inline struct hwsq_reg
hwsq_stride(u32 addr,u32 stride,u32 mask)24 hwsq_stride(u32 addr, u32 stride, u32 mask)
25 {
26 	return (struct hwsq_reg) {
27 		.sequence = 0,
28 		.force = 0,
29 		.addr = addr,
30 		.stride = stride,
31 		.mask = mask,
32 		.data = 0xdeadbeef,
33 	};
34 }
35 
36 static inline struct hwsq_reg
hwsq_reg2(u32 addr1,u32 addr2)37 hwsq_reg2(u32 addr1, u32 addr2)
38 {
39 	return (struct hwsq_reg) {
40 		.sequence = 0,
41 		.force = 0,
42 		.addr = addr1,
43 		.stride = addr2 - addr1,
44 		.mask = 0x3,
45 		.data = 0xdeadbeef,
46 	};
47 }
48 
49 static inline struct hwsq_reg
hwsq_reg(u32 addr)50 hwsq_reg(u32 addr)
51 {
52 	return (struct hwsq_reg) {
53 		.sequence = 0,
54 		.force = 0,
55 		.addr = addr,
56 		.stride = 0,
57 		.mask = 0x1,
58 		.data = 0xdeadbeef,
59 	};
60 }
61 
62 static inline int
hwsq_init(struct hwsq * ram,struct nvkm_subdev * subdev)63 hwsq_init(struct hwsq *ram, struct nvkm_subdev *subdev)
64 {
65 	int ret;
66 
67 	ret = nvkm_hwsq_init(subdev, &ram->hwsq);
68 	if (ret)
69 		return ret;
70 
71 	ram->sequence++;
72 	ram->subdev = subdev;
73 	return 0;
74 }
75 
76 static inline int
hwsq_exec(struct hwsq * ram,bool exec)77 hwsq_exec(struct hwsq *ram, bool exec)
78 {
79 	int ret = 0;
80 	if (ram->subdev) {
81 		ret = nvkm_hwsq_fini(&ram->hwsq, exec);
82 		ram->subdev = NULL;
83 	}
84 	return ret;
85 }
86 
87 static inline u32
hwsq_rd32(struct hwsq * ram,struct hwsq_reg * reg)88 hwsq_rd32(struct hwsq *ram, struct hwsq_reg *reg)
89 {
90 	struct nvkm_device *device = ram->subdev->device;
91 	if (reg->sequence != ram->sequence)
92 		reg->data = nvkm_rd32(device, reg->addr);
93 	return reg->data;
94 }
95 
96 static inline void
hwsq_wr32(struct hwsq * ram,struct hwsq_reg * reg,u32 data)97 hwsq_wr32(struct hwsq *ram, struct hwsq_reg *reg, u32 data)
98 {
99 	u32 mask, off = 0;
100 
101 	reg->sequence = ram->sequence;
102 	reg->data = data;
103 
104 	for (mask = reg->mask; mask > 0; mask = (mask & ~1) >> 1) {
105 		if (mask & 1)
106 			nvkm_hwsq_wr32(ram->hwsq, reg->addr+off, reg->data);
107 
108 		off += reg->stride;
109 	}
110 }
111 
112 static inline void
hwsq_nuke(struct hwsq * ram,struct hwsq_reg * reg)113 hwsq_nuke(struct hwsq *ram, struct hwsq_reg *reg)
114 {
115 	reg->force = true;
116 }
117 
118 static inline u32
hwsq_mask(struct hwsq * ram,struct hwsq_reg * reg,u32 mask,u32 data)119 hwsq_mask(struct hwsq *ram, struct hwsq_reg *reg, u32 mask, u32 data)
120 {
121 	u32 temp = hwsq_rd32(ram, reg);
122 	if (temp != ((temp & ~mask) | data) || reg->force)
123 		hwsq_wr32(ram, reg, (temp & ~mask) | data);
124 	return temp;
125 }
126 
127 static inline void
hwsq_setf(struct hwsq * ram,u8 flag,int data)128 hwsq_setf(struct hwsq *ram, u8 flag, int data)
129 {
130 	nvkm_hwsq_setf(ram->hwsq, flag, data);
131 }
132 
133 static inline void
hwsq_wait(struct hwsq * ram,u8 flag,u8 data)134 hwsq_wait(struct hwsq *ram, u8 flag, u8 data)
135 {
136 	nvkm_hwsq_wait(ram->hwsq, flag, data);
137 }
138 
139 static inline void
hwsq_wait_vblank(struct hwsq * ram)140 hwsq_wait_vblank(struct hwsq *ram)
141 {
142 	nvkm_hwsq_wait_vblank(ram->hwsq);
143 }
144 
145 static inline void
hwsq_nsec(struct hwsq * ram,u32 nsec)146 hwsq_nsec(struct hwsq *ram, u32 nsec)
147 {
148 	nvkm_hwsq_nsec(ram->hwsq, nsec);
149 }
150 #endif
151