1 use {io, poll, Evented, Ready, Poll, PollOpt, Token};
2 use libc;
3 use zircon;
4 use zircon::AsHandleRef;
5 use sys::fuchsia::{DontDrop, poll_opts_to_wait_async, sys};
6 use std::mem;
7 use std::os::unix::io::RawFd;
8 use std::sync::{Arc, Mutex};
9
10 /// Properties of an `EventedFd`'s current registration
11 #[derive(Debug)]
12 pub struct EventedFdRegistration {
13 token: Token,
14 handle: DontDrop<zircon::Handle>,
15 rereg_signals: Option<(zircon::Signals, zircon::WaitAsyncOpts)>,
16 }
17
18 impl EventedFdRegistration {
new(token: Token, raw_handle: sys::zx_handle_t, rereg_signals: Option<(zircon::Signals, zircon::WaitAsyncOpts)>, ) -> Self19 unsafe fn new(token: Token,
20 raw_handle: sys::zx_handle_t,
21 rereg_signals: Option<(zircon::Signals, zircon::WaitAsyncOpts)>,
22 ) -> Self
23 {
24 EventedFdRegistration {
25 token: token,
26 handle: DontDrop::new(zircon::Handle::from_raw(raw_handle)),
27 rereg_signals: rereg_signals
28 }
29 }
30
rereg_signals(&self) -> Option<(zircon::Signals, zircon::WaitAsyncOpts)>31 pub fn rereg_signals(&self) -> Option<(zircon::Signals, zircon::WaitAsyncOpts)> {
32 self.rereg_signals
33 }
34 }
35
36 /// An event-ed file descriptor. The file descriptor is owned by this structure.
37 #[derive(Debug)]
38 pub struct EventedFdInner {
39 /// Properties of the current registration.
40 registration: Mutex<Option<EventedFdRegistration>>,
41
42 /// Owned file descriptor.
43 ///
44 /// `fd` is closed on `Drop`, so modifying `fd` is a memory-unsafe operation.
45 fd: RawFd,
46
47 /// Owned `fdio_t` pointer.
48 fdio: *const sys::fdio_t,
49 }
50
51 impl EventedFdInner {
rereg_for_level(&self, port: &zircon::Port)52 pub fn rereg_for_level(&self, port: &zircon::Port) {
53 let registration_opt = self.registration.lock().unwrap();
54 if let Some(ref registration) = *registration_opt {
55 if let Some((rereg_signals, rereg_opts)) = registration.rereg_signals {
56 let _res =
57 registration
58 .handle.inner_ref()
59 .wait_async_handle(
60 port,
61 registration.token.0 as u64,
62 rereg_signals,
63 rereg_opts);
64 }
65 }
66 }
67
registration(&self) -> &Mutex<Option<EventedFdRegistration>>68 pub fn registration(&self) -> &Mutex<Option<EventedFdRegistration>> {
69 &self.registration
70 }
71
fdio(&self) -> &sys::fdio_t72 pub fn fdio(&self) -> &sys::fdio_t {
73 unsafe { &*self.fdio }
74 }
75 }
76
77 impl Drop for EventedFdInner {
drop(&mut self)78 fn drop(&mut self) {
79 unsafe {
80 sys::__fdio_release(self.fdio);
81 let _ = libc::close(self.fd);
82 }
83 }
84 }
85
86 // `EventedInner` must be manually declared `Send + Sync` because it contains a `RawFd` and a
87 // `*const sys::fdio_t`. These are only used to make thread-safe system calls, so accessing
88 // them is entirely thread-safe.
89 //
90 // Note: one minor exception to this are the calls to `libc::close` and `__fdio_release`, which
91 // happen on `Drop`. These accesses are safe because `drop` can only be called at most once from
92 // a single thread, and after it is called no other functions can be called on the `EventedFdInner`.
93 unsafe impl Sync for EventedFdInner {}
94 unsafe impl Send for EventedFdInner {}
95
96 #[derive(Clone, Debug)]
97 pub struct EventedFd {
98 pub inner: Arc<EventedFdInner>
99 }
100
101 impl EventedFd {
new(fd: RawFd) -> Self102 pub unsafe fn new(fd: RawFd) -> Self {
103 let fdio = sys::__fdio_fd_to_io(fd);
104 assert!(fdio != ::std::ptr::null(), "FileDescriptor given to EventedFd must be valid.");
105
106 EventedFd {
107 inner: Arc::new(EventedFdInner {
108 registration: Mutex::new(None),
109 fd: fd,
110 fdio: fdio,
111 })
112 }
113 }
114
handle_and_signals_for_events(&self, interest: Ready, opts: PollOpt) -> (sys::zx_handle_t, zircon::Signals)115 fn handle_and_signals_for_events(&self, interest: Ready, opts: PollOpt)
116 -> (sys::zx_handle_t, zircon::Signals)
117 {
118 let epoll_events = ioevent_to_epoll(interest, opts);
119
120 unsafe {
121 let mut raw_handle: sys::zx_handle_t = mem::uninitialized();
122 let mut signals: sys::zx_signals_t = mem::uninitialized();
123 sys::__fdio_wait_begin(self.inner.fdio, epoll_events, &mut raw_handle, &mut signals);
124
125 (raw_handle, signals)
126 }
127 }
128
register_with_lock( &self, registration: &mut Option<EventedFdRegistration>, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()>129 fn register_with_lock(
130 &self,
131 registration: &mut Option<EventedFdRegistration>,
132 poll: &Poll,
133 token: Token,
134 interest: Ready,
135 opts: PollOpt) -> io::Result<()>
136 {
137 if registration.is_some() {
138 return Err(io::Error::new(
139 io::ErrorKind::AlreadyExists,
140 "Called register on an already registered file descriptor."));
141 }
142
143 let (raw_handle, signals) = self.handle_and_signals_for_events(interest, opts);
144
145 let needs_rereg = opts.is_level() && !opts.is_oneshot();
146
147 // If we need to reregister, then each registration should be `oneshot`
148 let opts = opts | if needs_rereg { PollOpt::oneshot() } else { PollOpt::empty() };
149
150 let rereg_signals = if needs_rereg {
151 Some((signals, poll_opts_to_wait_async(opts)))
152 } else {
153 None
154 };
155
156 *registration = Some(
157 unsafe { EventedFdRegistration::new(token, raw_handle, rereg_signals) }
158 );
159
160 // We don't have ownership of the handle, so we can't drop it
161 let handle = DontDrop::new(unsafe { zircon::Handle::from_raw(raw_handle) });
162
163 let registered = poll::selector(poll)
164 .register_fd(handle.inner_ref(), self, token, signals, opts);
165
166 if registered.is_err() {
167 *registration = None;
168 }
169
170 registered
171 }
172
deregister_with_lock( &self, registration: &mut Option<EventedFdRegistration>, poll: &Poll) -> io::Result<()>173 fn deregister_with_lock(
174 &self,
175 registration: &mut Option<EventedFdRegistration>,
176 poll: &Poll) -> io::Result<()>
177 {
178 let old_registration = if let Some(old_reg) = registration.take() {
179 old_reg
180 } else {
181 return Err(io::Error::new(
182 io::ErrorKind::NotFound,
183 "Called rereregister on an unregistered file descriptor."))
184 };
185
186 poll::selector(poll)
187 .deregister_fd(old_registration.handle.inner_ref(), old_registration.token)
188 }
189 }
190
191 impl Evented for EventedFd {
register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()>192 fn register(&self,
193 poll: &Poll,
194 token: Token,
195 interest: Ready,
196 opts: PollOpt) -> io::Result<()>
197 {
198 self.register_with_lock(
199 &mut *self.inner.registration.lock().unwrap(),
200 poll,
201 token,
202 interest,
203 opts)
204 }
205
reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()>206 fn reregister(&self,
207 poll: &Poll,
208 token: Token,
209 interest: Ready,
210 opts: PollOpt) -> io::Result<()>
211 {
212 // Take out the registration lock
213 let mut registration_lock = self.inner.registration.lock().unwrap();
214
215 // Deregister
216 self.deregister_with_lock(&mut *registration_lock, poll)?;
217
218 self.register_with_lock(
219 &mut *registration_lock,
220 poll,
221 token,
222 interest,
223 opts)
224 }
225
deregister(&self, poll: &Poll) -> io::Result<()>226 fn deregister(&self, poll: &Poll) -> io::Result<()> {
227 let mut registration_lock = self.inner.registration.lock().unwrap();
228 self.deregister_with_lock(&mut *registration_lock, poll)
229 }
230 }
231
ioevent_to_epoll(interest: Ready, opts: PollOpt) -> u32232 fn ioevent_to_epoll(interest: Ready, opts: PollOpt) -> u32 {
233 use event_imp::ready_from_usize;
234 const HUP: usize = 0b01000;
235
236 let mut kind = 0;
237
238 if interest.is_readable() {
239 kind |= libc::EPOLLIN;
240 }
241
242 if interest.is_writable() {
243 kind |= libc::EPOLLOUT;
244 }
245
246 if interest.contains(ready_from_usize(HUP)) {
247 kind |= libc::EPOLLRDHUP;
248 }
249
250 if opts.is_edge() {
251 kind |= libc::EPOLLET;
252 }
253
254 if opts.is_oneshot() {
255 kind |= libc::EPOLLONESHOT;
256 }
257
258 if opts.is_level() {
259 kind &= !libc::EPOLLET;
260 }
261
262 kind as u32
263 }
264