1 #![cfg(feature = "hardware_buffer")]
2 use jni_sys::{jobject, JNIEnv};
3 use num_enum::{IntoPrimitive, TryFromPrimitive};
4 use std::{
5 convert::TryInto, mem::MaybeUninit, ops::Deref, os::raw::c_void, os::unix::io::RawFd,
6 ptr::NonNull,
7 };
8
9 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
10 pub struct HardwareBufferUsage(pub ffi::AHardwareBuffer_UsageFlags);
11
12 impl HardwareBufferUsage {
13 pub const CPU_READ_NEVER: Self =
14 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_CPU_READ_NEVER);
15 pub const CPU_READ_RARELY: Self =
16 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_CPU_READ_RARELY);
17 pub const CPU_READ_OFTEN: Self =
18 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN);
19 pub const CPU_READ_MASK: Self =
20 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_CPU_READ_MASK);
21
22 pub const CPU_WRITE_NEVER: Self =
23 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_CPU_WRITE_NEVER);
24 pub const CPU_WRITE_RARELY: Self =
25 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_CPU_WRITE_RARELY);
26 pub const CPU_WRITE_OFTEN: Self =
27 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN);
28 pub const CPU_WRITE_MASK: Self =
29 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_CPU_WRITE_MASK);
30
31 pub const GPU_SAMPLED_IMAGE: Self =
32 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE);
33 pub const GPU_FRAMEBUFFER: Self =
34 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER);
35 pub const COMPOSER_OVERLAY: Self =
36 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_COMPOSER_OVERLAY);
37 pub const PROTECTED_CONTENT: Self =
38 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT);
39 pub const VIDEO_ENCODE: Self =
40 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VIDEO_ENCODE);
41 pub const SENSOR_DIRECT_DATA: Self =
42 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_SENSOR_DIRECT_DATA);
43 pub const GPU_DATA_BUFFER: Self =
44 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER);
45 pub const GPU_CUBE_MAP: Self =
46 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP);
47 pub const GPU_MIPMAP_COMPLETE: Self =
48 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE);
49
50 pub const VENDOR_0: Self = Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_0);
51 pub const VENDOR_1: Self = Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_1);
52 pub const VENDOR_2: Self = Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_2);
53 pub const VENDOR_3: Self = Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_3);
54 pub const VENDOR_4: Self = Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_4);
55 pub const VENDOR_5: Self = Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_5);
56 pub const VENDOR_6: Self = Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_6);
57 pub const VENDOR_7: Self = Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_7);
58 pub const VENDOR_8: Self = Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_8);
59 pub const VENDOR_9: Self = Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_9);
60 pub const VENDOR_10: Self =
61 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_10);
62 pub const VENDOR_11: Self =
63 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_11);
64 pub const VENDOR_12: Self =
65 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_12);
66 pub const VENDOR_13: Self =
67 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_13);
68 pub const VENDOR_14: Self =
69 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_14);
70 pub const VENDOR_15: Self =
71 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_15);
72 pub const VENDOR_16: Self =
73 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_16);
74 pub const VENDOR_17: Self =
75 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_17);
76 pub const VENDOR_18: Self =
77 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_18);
78 pub const VENDOR_19: Self =
79 Self(ffi::AHardwareBuffer_UsageFlags_AHARDWAREBUFFER_USAGE_VENDOR_19);
80 }
81
82 #[repr(u32)]
83 #[derive(Copy, Clone, Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
84 #[allow(non_camel_case_types)]
85 pub enum HardwareBufferFormat {
86 R8G8B8A8_UNORM = ffi::AHardwareBuffer_Format_AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM,
87 R8G8B8X8_UNORM = ffi::AHardwareBuffer_Format_AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM,
88 R8G8B8_UNORM = ffi::AHardwareBuffer_Format_AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM,
89 R5G6B5_UNORM = ffi::AHardwareBuffer_Format_AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM,
90 R16G16B16A16_FLOAT = ffi::AHardwareBuffer_Format_AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT,
91 R10G10B10A2_UNORM = ffi::AHardwareBuffer_Format_AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM,
92 BLOB = ffi::AHardwareBuffer_Format_AHARDWAREBUFFER_FORMAT_BLOB,
93 D16_UNORM = ffi::AHardwareBuffer_Format_AHARDWAREBUFFER_FORMAT_D16_UNORM,
94 D24_UNORM = ffi::AHardwareBuffer_Format_AHARDWAREBUFFER_FORMAT_D24_UNORM,
95 D24_UNORM_S8_UINT = ffi::AHardwareBuffer_Format_AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT,
96 D32_FLOAT = ffi::AHardwareBuffer_Format_AHARDWAREBUFFER_FORMAT_D32_FLOAT,
97 D32_FLOAT_S8_UINT = ffi::AHardwareBuffer_Format_AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT,
98 S8_UINT = ffi::AHardwareBuffer_Format_AHARDWAREBUFFER_FORMAT_S8_UINT,
99 Y8Cb8Cr8_420 = ffi::AHardwareBuffer_Format_AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420,
100 }
101
102 #[derive(Copy, Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
103 pub struct HardwareBufferError(pub i32);
104
105 pub type Result<T, E = HardwareBufferError> = std::result::Result<T, E>;
106
107 pub type Rect = ffi::ARect;
108
construct<T>(with_ptr: impl FnOnce(*mut T) -> i32) -> Result<T, HardwareBufferError>109 fn construct<T>(with_ptr: impl FnOnce(*mut T) -> i32) -> Result<T, HardwareBufferError> {
110 let mut result = MaybeUninit::uninit();
111 let status = with_ptr(result.as_mut_ptr());
112 if status == 0 {
113 Ok(unsafe { result.assume_init() })
114 } else {
115 Err(HardwareBufferError(status))
116 }
117 }
118
119 #[derive(Debug)]
120 pub struct HardwareBuffer {
121 inner: NonNull<ffi::AHardwareBuffer>,
122 }
123
124 impl HardwareBuffer {
125 /// Create a `HardwareBuffer` from a native pointer
126 ///
127 /// # Safety
128 /// By calling this function, you assert that it is a valid pointer to
129 /// an NDK `AHardwareBuffer`.
from_ptr(ptr: NonNull<ffi::AHardwareBuffer>) -> Self130 pub unsafe fn from_ptr(ptr: NonNull<ffi::AHardwareBuffer>) -> Self {
131 Self { inner: ptr }
132 }
133
as_ptr(&self) -> *mut ffi::AHardwareBuffer134 fn as_ptr(&self) -> *mut ffi::AHardwareBuffer {
135 self.inner.as_ptr()
136 }
137
allocate(desc: HardwareBufferDesc) -> Result<HardwareBufferRef>138 pub fn allocate(desc: HardwareBufferDesc) -> Result<HardwareBufferRef> {
139 unsafe {
140 let ptr = construct(|res| ffi::AHardwareBuffer_allocate(&desc.into_native(), res))?;
141
142 Ok(HardwareBufferRef {
143 inner: Self::from_ptr(NonNull::new_unchecked(ptr)),
144 })
145 }
146 }
147
148 /// Create a `HardwareBuffer` from JNI pointers
149 ///
150 /// # Safety
151 /// By calling this function, you assert that it these are valid pointers to JNI objects.
from_jni(env: *mut JNIEnv, hardware_buffer: jobject) -> Self152 pub unsafe fn from_jni(env: *mut JNIEnv, hardware_buffer: jobject) -> Self {
153 let ptr =
154 ffi::AHardwareBuffer_fromHardwareBuffer(env as *mut ffi::JNIEnv, hardware_buffer as _);
155
156 Self::from_ptr(NonNull::new_unchecked(ptr))
157 }
158
to_jni(&self, env: *mut JNIEnv) -> jobject159 pub fn to_jni(&self, env: *mut JNIEnv) -> jobject {
160 let ptr = unsafe {
161 ffi::AHardwareBuffer_toHardwareBuffer(env as *mut ffi::JNIEnv, self.as_ptr())
162 };
163
164 ptr as jobject
165 }
166
describe(&self) -> HardwareBufferDesc167 pub fn describe(&self) -> HardwareBufferDesc {
168 let desc = unsafe {
169 let mut result = MaybeUninit::uninit();
170 ffi::AHardwareBuffer_describe(self.as_ptr(), result.as_mut_ptr());
171 result.assume_init()
172 };
173
174 HardwareBufferDesc {
175 width: desc.width,
176 height: desc.height,
177 layers: desc.layers,
178 format: desc.format.try_into().unwrap(),
179 usage: HardwareBufferUsage(desc.usage),
180 stride: desc.stride,
181 }
182 }
183
184 #[cfg(feature = "api-level-29")]
is_supported(desc: HardwareBufferDesc) -> bool185 pub fn is_supported(desc: HardwareBufferDesc) -> bool {
186 let res = unsafe { ffi::AHardwareBuffer_isSupported(&desc.into_native()) };
187 res == 1
188 }
189
lock( &self, usage: HardwareBufferUsage, fence: Option<RawFd>, rect: Option<Rect>, ) -> Result<*mut c_void>190 pub fn lock(
191 &self,
192 usage: HardwareBufferUsage,
193 fence: Option<RawFd>,
194 rect: Option<Rect>,
195 ) -> Result<*mut c_void> {
196 let fence = fence.unwrap_or(-1);
197 let rect = match rect {
198 Some(rect) => &rect,
199 None => std::ptr::null(),
200 };
201 construct(|res| unsafe {
202 ffi::AHardwareBuffer_lock(self.as_ptr(), usage.0, fence, rect, res)
203 })
204 }
205
206 #[cfg(feature = "api-level-29")]
lock_and_get_info( &self, usage: HardwareBufferUsage, fence: Option<RawFd>, rect: Option<Rect>, ) -> Result<LockedPlaneInfo>207 pub fn lock_and_get_info(
208 &self,
209 usage: HardwareBufferUsage,
210 fence: Option<RawFd>,
211 rect: Option<Rect>,
212 ) -> Result<LockedPlaneInfo> {
213 let fence = fence.unwrap_or(-1);
214 let rect = match rect {
215 Some(rect) => &rect,
216 None => std::ptr::null(),
217 };
218 let mut virtual_address = MaybeUninit::uninit();
219 let mut bytes_per_pixel = MaybeUninit::uninit();
220 let mut bytes_per_stride = MaybeUninit::uninit();
221 let status = unsafe {
222 ffi::AHardwareBuffer_lockAndGetInfo(
223 self.as_ptr(),
224 usage.0,
225 fence,
226 rect,
227 virtual_address.as_mut_ptr(),
228 bytes_per_pixel.as_mut_ptr(),
229 bytes_per_stride.as_mut_ptr(),
230 )
231 };
232 if status == 0 {
233 Ok(unsafe {
234 LockedPlaneInfo {
235 virtual_address: virtual_address.assume_init(),
236 bytes_per_pixel: bytes_per_pixel.assume_init() as u32,
237 bytes_per_stride: bytes_per_stride.assume_init() as u32,
238 }
239 })
240 } else {
241 Err(HardwareBufferError(status))
242 }
243 }
244
245 #[cfg(feature = "api-level-29")]
lock_planes( &self, usage: HardwareBufferUsage, fence: Option<RawFd>, rect: Option<Rect>, ) -> Result<HardwareBufferPlanes>246 pub fn lock_planes(
247 &self,
248 usage: HardwareBufferUsage,
249 fence: Option<RawFd>,
250 rect: Option<Rect>,
251 ) -> Result<HardwareBufferPlanes> {
252 let fence = fence.unwrap_or(-1);
253 let rect = match rect {
254 Some(rect) => &rect,
255 None => std::ptr::null(),
256 };
257 let planes = construct(|res| unsafe {
258 ffi::AHardwareBuffer_lockPlanes(self.as_ptr(), usage.0, fence, rect, res)
259 })?;
260
261 Ok(HardwareBufferPlanes {
262 inner: planes,
263 index: 0,
264 })
265 }
266
unlock(&self) -> Result<()>267 pub fn unlock(&self) -> Result<()> {
268 let status = unsafe { ffi::AHardwareBuffer_unlock(self.as_ptr(), std::ptr::null_mut()) };
269 if status == 0 {
270 Ok(())
271 } else {
272 Err(HardwareBufferError(status))
273 }
274 }
275
276 /// Returns a fence file descriptor that will become signalled when unlocking is completed,
277 /// or `None` if unlocking is already finished.
unlock_async(&self) -> Result<Option<RawFd>>278 pub fn unlock_async(&self) -> Result<Option<RawFd>> {
279 let fence = construct(|res| unsafe { ffi::AHardwareBuffer_unlock(self.as_ptr(), res) })?;
280 Ok(match fence {
281 -1 => None,
282 fence => Some(fence),
283 })
284 }
285
recv_handle_from_unix_socket(socket_fd: RawFd) -> Result<Self>286 pub fn recv_handle_from_unix_socket(socket_fd: RawFd) -> Result<Self> {
287 unsafe {
288 let ptr =
289 construct(|res| ffi::AHardwareBuffer_recvHandleFromUnixSocket(socket_fd, res))?;
290
291 Ok(Self::from_ptr(NonNull::new_unchecked(ptr)))
292 }
293 }
294
send_handle_to_unix_socket(&self, socket_fd: RawFd) -> Result<()>295 pub fn send_handle_to_unix_socket(&self, socket_fd: RawFd) -> Result<()> {
296 unsafe {
297 let status = ffi::AHardwareBuffer_sendHandleToUnixSocket(self.as_ptr(), socket_fd);
298 if status == 0 {
299 Ok(())
300 } else {
301 Err(HardwareBufferError(status))
302 }
303 }
304 }
305
acquire(&self) -> HardwareBufferRef306 pub fn acquire(&self) -> HardwareBufferRef {
307 unsafe {
308 ffi::AHardwareBuffer_acquire(self.as_ptr());
309 }
310 HardwareBufferRef {
311 inner: HardwareBuffer { inner: self.inner },
312 }
313 }
314 }
315
316 /// A `HardwareBuffer` with an owned reference, the reference is released when dropped.
317 /// It behaves much like a strong `Rc` reference.
318 #[derive(Debug)]
319 pub struct HardwareBufferRef {
320 inner: HardwareBuffer,
321 }
322
323 impl Deref for HardwareBufferRef {
324 type Target = HardwareBuffer;
325
deref(&self) -> &Self::Target326 fn deref(&self) -> &Self::Target {
327 &self.inner
328 }
329 }
330
331 impl Drop for HardwareBufferRef {
drop(&mut self)332 fn drop(&mut self) {
333 unsafe {
334 ffi::AHardwareBuffer_release(self.inner.as_ptr());
335 }
336 }
337 }
338
339 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
340 pub struct HardwareBufferDesc {
341 width: u32,
342 height: u32,
343 layers: u32,
344 format: HardwareBufferFormat,
345 usage: HardwareBufferUsage,
346 stride: u32,
347 }
348
349 impl HardwareBufferDesc {
into_native(self) -> ffi::AHardwareBuffer_Desc350 fn into_native(self) -> ffi::AHardwareBuffer_Desc {
351 ffi::AHardwareBuffer_Desc {
352 width: self.width,
353 height: self.height,
354 layers: self.layers,
355 format: self.format.try_into().unwrap(),
356 usage: self.usage.0,
357 stride: self.stride,
358 rfu0: 0,
359 rfu1: 0,
360 }
361 }
362 }
363
364 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
365 pub struct LockedPlaneInfo {
366 pub virtual_address: *mut c_void,
367 pub bytes_per_pixel: u32,
368 pub bytes_per_stride: u32,
369 }
370
371 #[derive(Debug)]
372 pub struct HardwareBufferPlanes {
373 inner: ffi::AHardwareBuffer_Planes,
374 index: u32,
375 }
376
377 impl Iterator for HardwareBufferPlanes {
378 type Item = LockedPlaneInfo;
379
next(&mut self) -> Option<LockedPlaneInfo>380 fn next(&mut self) -> Option<LockedPlaneInfo> {
381 if self.index == self.inner.planeCount {
382 None
383 } else {
384 let plane = self.inner.planes[self.index as usize];
385 self.index += 1;
386 Some(LockedPlaneInfo {
387 virtual_address: plane.data,
388 bytes_per_pixel: plane.pixelStride,
389 bytes_per_stride: plane.rowStride,
390 })
391 }
392 }
393 }
394