Lines Matching refs:mgmt

12 	struct mog_mgmt *mgmt = &mfd->as.mgmt;  in mgmt_digest_step()  local
13 struct mog_fd *fmfd = mgmt->forward; in mgmt_digest_step()
20 if (mgmt->prio == MOG_PRIO_FSCK) { in mgmt_digest_step()
33 assert(mgmt->wbuf == NULL && "wbuf should be NULL here"); in mgmt_digest_step()
46 mog_file_close(mgmt->forward); in mgmt_digest_step()
47 mgmt->prio = MOG_PRIO_NONE; in mgmt_digest_step()
48 mgmt->forward = NULL; in mgmt_digest_step()
53 struct mog_mgmt *mgmt = &mfd->as.mgmt; in mgmt_digest_in_progress() local
56 assert(mgmt->forward && mgmt->forward != MOG_IOSTAT && "bad forward"); in mgmt_digest_in_progress()
57 file = &mgmt->forward->as.file; in mgmt_digest_in_progress()
64 if (mgmt->wbuf == MOG_WR_ERROR) return MOG_NEXT_CLOSE; in mgmt_digest_in_progress()
65 if (mgmt->wbuf) return MOG_NEXT_WAIT_WR; in mgmt_digest_in_progress()
76 struct mog_mgmt *mgmt = &mfd->as.mgmt; in mgmt_close() local
78 mog_rbuf_reattach_and_null(&mgmt->rbuf); in mgmt_close()
79 assert((mgmt->wbuf == NULL || mgmt->wbuf == MOG_WR_ERROR) && in mgmt_close()
88 struct mog_mgmt *mgmt = &mfd->as.mgmt; in mog_mgmt_drop() local
90 if (mgmt->forward && mgmt->forward != MOG_IOSTAT) in mog_mgmt_drop()
91 mog_file_close(mgmt->forward); in mog_mgmt_drop()
95 void mog_mgmt_writev(struct mog_mgmt *mgmt, struct iovec *iov, int iovcnt) in mog_mgmt_writev() argument
97 struct mog_fd *mfd = mog_fd_of(mgmt); in mog_mgmt_writev()
99 assert(mgmt->wbuf == NULL && "tried to write while busy"); in mog_mgmt_writev()
100 mgmt->wbuf = mog_trywritev(mfd->fd, iov, iovcnt); in mog_mgmt_writev()
103 static enum mog_next mgmt_iostat_forever(struct mog_mgmt *mgmt) in mgmt_iostat_forever() argument
105 mog_rbuf_reattach_and_null(&mgmt->rbuf); /* no coming back from this */ in mgmt_iostat_forever()
107 mog_svc_devstats_subscribe(mgmt); in mgmt_iostat_forever()
113 static enum mog_next mgmt_wbuf_in_progress(struct mog_mgmt *mgmt) in mgmt_wbuf_in_progress() argument
115 assert(mgmt->wbuf != MOG_WR_ERROR && "still active after write error"); in mgmt_wbuf_in_progress()
116 switch (mog_tryflush(mog_fd_of(mgmt)->fd, &mgmt->wbuf)) { in mgmt_wbuf_in_progress()
119 if (mgmt->forward == MOG_IOSTAT) in mgmt_wbuf_in_progress()
120 return mgmt_iostat_forever(mgmt); in mgmt_wbuf_in_progress()
132 mgmt_defer_rbuf(struct mog_mgmt *mgmt, struct mog_rbuf *rbuf, size_t buf_len) in mgmt_defer_rbuf() argument
134 struct mog_rbuf *old = mgmt->rbuf; in mgmt_defer_rbuf()
135 size_t defer_bytes = buf_len - mgmt->buf_off; in mgmt_defer_rbuf()
136 char *src = rbuf->rptr + mgmt->buf_off; in mgmt_defer_rbuf()
138 assert(mgmt->buf_off >= 0 && "mgmt->buf_off negative"); in mgmt_defer_rbuf()
142 mog_rbuf_reattach_and_null(&mgmt->rbuf); in mgmt_defer_rbuf()
148 mgmt->rbuf = mog_rbuf_new(defer_bytes); in mgmt_defer_rbuf()
149 memcpy(mgmt->rbuf->rptr, src, defer_bytes); in mgmt_defer_rbuf()
150 mgmt->rbuf->rsize = defer_bytes; in mgmt_defer_rbuf()
152 mgmt->buf_off = 0; in mgmt_defer_rbuf()
159 struct mog_mgmt *mgmt = &mfd->as.mgmt; in mgmt_process_client() local
164 if (mgmt->mgmt_method == MOG_MGMT_METHOD_NONE) in mgmt_process_client()
167 dev = mog_dev_for(mgmt->svc, mgmt->mog_devid, false); in mgmt_process_client()
170 ioq = mgmt->prio == MOG_PRIO_NONE ? &dev->ioq : &dev->fsckq; in mgmt_process_client()
172 if (!mgmt->rbuf) in mgmt_process_client()
173 mgmt->rbuf = mog_rbuf_detach(rbuf); in mgmt_process_client()
174 mgmt->rbuf->rsize = buf_len; in mgmt_process_client()
179 switch (mgmt->mgmt_method) { in mgmt_process_client()
183 mog_mgmt_fn_size(mgmt, buf); in mgmt_process_client()
188 if (dev && mgmt->forward) in mgmt_process_client()
189 assert(mgmt->forward->as.file.ioq in mgmt_process_client()
193 mgmt->mgmt_method = MOG_MGMT_METHOD_NONE; in mgmt_process_client()
200 struct mog_mgmt *mgmt = &mfd->as.mgmt; in mgmt_run() local
204 if (mgmt->wbuf == MOG_WR_ERROR) in mgmt_run()
206 if (mgmt->forward == MOG_IOSTAT) in mgmt_run()
207 return mgmt_iostat_forever(mgmt); in mgmt_run()
210 mgmt_defer_rbuf(mgmt, rbuf, buf_len); in mgmt_run()
211 mog_mgmt_reset_parser(mgmt); in mgmt_run()
212 assert(mgmt->wbuf != MOG_WR_ERROR); in mgmt_run()
213 return mgmt->wbuf ? MOG_NEXT_WAIT_WR : MOG_NEXT_ACTIVE; in mgmt_run()
219 struct mog_mgmt *mgmt = &mfd->as.mgmt; in mgmt_rbuf_grow() local
223 mgmt->rbuf = *rbuf = mog_rbuf_grow(*rbuf); in mgmt_rbuf_grow()
231 struct mog_mgmt *mgmt = &mfd->as.mgmt; in mgmt_parse_continue() local
235 assert(mgmt->wbuf == NULL && in mgmt_parse_continue()
237 if (mgmt->buf_off >= (*rbuf)->rcapa) { in mgmt_parse_continue()
243 *off = mgmt->buf_off; in mgmt_parse_continue()
253 struct mog_mgmt *mgmt = &mfd->as.mgmt; in __mgmt_queue_step() local
263 if (mgmt->wbuf) return mgmt_wbuf_in_progress(mgmt); in __mgmt_queue_step()
264 if (mgmt->forward) return mgmt_digest_in_progress(mfd); in __mgmt_queue_step()
267 rbuf = mgmt->rbuf ? mgmt->rbuf : mog_rbuf_get(MOG_RBUF_BASE_SIZE); in __mgmt_queue_step()
269 off = mgmt->buf_off; in __mgmt_queue_step()
273 if (mgmt->rbuf) { in __mgmt_queue_step()
274 buf_len = mgmt->rbuf->rsize; in __mgmt_queue_step()
287 state = mog_mgmt_parse(mgmt, buf, buf_len); in __mgmt_queue_step()
288 if (mgmt->wbuf == MOG_WR_ERROR) return MOG_NEXT_CLOSE; in __mgmt_queue_step()
309 if (mgmt->rbuf == NULL) in __mgmt_queue_step()
310 mgmt->rbuf = mog_rbuf_detach(rbuf); in __mgmt_queue_step()
311 mgmt->rbuf->rsize = buf_len; in __mgmt_queue_step()
361 struct mog_mgmt *mgmt = &mfd->as.mgmt; in mog_mgmt_quit_step() local
362 struct mog_queue *q = mgmt->svc->queue; in mog_mgmt_quit_step()
367 if (mgmt->forward || mgmt->rbuf) { in mog_mgmt_quit_step()
374 assert(mgmt->prio == MOG_PRIO_NONE && "bad prio"); in mog_mgmt_quit_step()
405 struct mog_mgmt *mgmt = &mfd->as.mgmt; in mog_mgmt_post_accept() local
410 mog_mgmt_init(mgmt, ac->svc); in mog_mgmt_post_accept()