Lines Matching refs:fs

482 	struct dn_pipe *p = q->fs->pipe;  in ready_event()
566 struct dn_flow_set *fs = q->fs; in ready_event_wfq() local
578 fs->backlogged--; in ready_event_wfq()
586 q->F += (len << MY_M) / (uint64_t)fs->weight; in ready_event_wfq()
669 pipe->sum -= q->fs->weight; in dn_expire_pipe_cb()
724 expire_queues(struct dn_flow_set *fs) in expire_queues() argument
726 int i, initial_elements = fs->rq_elements; in expire_queues()
728 if (fs->last_expired == time_uptime) in expire_queues()
731 fs->last_expired = time_uptime; in expire_queues()
733 for (i = 0; i <= fs->rq_size; i++) { /* Last one is overflow */ in expire_queues()
736 LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) { in expire_queues()
746 KASSERT(fs->rq_elements > 0, in expire_queues()
747 ("invalid rq_elements %d", fs->rq_elements)); in expire_queues()
748 fs->rq_elements--; in expire_queues()
751 return initial_elements - fs->rq_elements; in expire_queues()
759 create_queue(struct dn_flow_set *fs, int i) in create_queue() argument
763 if (fs->rq_elements > fs->rq_size * dn_max_ratio && in create_queue()
764 expire_queues(fs) == 0) { in create_queue()
768 i = fs->rq_size; in create_queue()
769 if (!LIST_EMPTY(&fs->rq[i])) in create_queue()
770 return LIST_FIRST(&fs->rq[i]); in create_queue()
777 q->fs = fs; in create_queue()
782 LIST_INSERT_HEAD(&fs->rq[i], q, q_link); in create_queue()
783 fs->rq_elements++; in create_queue()
794 find_queue(struct dn_flow_set *fs, struct dn_flow_id *id) in find_queue() argument
799 if (!(fs->flags_fs & DN_HAVE_FLOW_MASK)) { in find_queue()
800 q = LIST_FIRST(&fs->rq[0]); in find_queue()
805 id->fid_dst_ip &= fs->flow_mask.fid_dst_ip; in find_queue()
806 id->fid_src_ip &= fs->flow_mask.fid_src_ip; in find_queue()
807 id->fid_dst_port &= fs->flow_mask.fid_dst_port; in find_queue()
808 id->fid_src_port &= fs->flow_mask.fid_src_port; in find_queue()
809 id->fid_proto &= fs->flow_mask.fid_proto; in find_queue()
819 i = i % fs->rq_size; in find_queue()
826 LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) { in find_queue()
843 KASSERT(fs->rq_elements > 0, in find_queue()
844 ("invalid rq_elements %d", fs->rq_elements)); in find_queue()
845 fs->rq_elements--; in find_queue()
848 if (q && LIST_FIRST(&fs->rq[i]) != q) { /* Found and not in front */ in find_queue()
850 LIST_INSERT_HEAD(&fs->rq[i], q, q_link); in find_queue()
854 q = create_queue(fs, i); in find_queue()
862 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len) in red_drops() argument
882 u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len; in red_drops()
892 int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q); in red_drops()
904 u_int t = (curr_time - q->q_time) / fs->lookup_step; in red_drops()
906 q->avg = (t < fs->lookup_depth) ? in red_drops()
907 SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0; in red_drops()
914 if (q->avg < fs->min_th) { in red_drops()
920 if (q->avg >= fs->max_th) { /* Average queue >= Max threshold */ in red_drops()
921 if (fs->flags_fs & DN_IS_GENTLE_RED) { in red_drops()
928 p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - fs->c_4; in red_drops()
934 } else if (q->avg > fs->min_th) { in red_drops()
940 p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2; in red_drops()
942 if (fs->flags_fs & DN_QSIZE_IS_BYTES) in red_drops()
943 p_b = (p_b * len) / fs->max_pkt_size; in red_drops()
985 struct dn_flow_set *fs, *fs_next; in dn_iterate_flowset() local
987 LIST_FOREACH_MUTABLE(fs, fs_hdr, fs_link, fs_next) in dn_iterate_flowset()
988 func(fs, arg); in dn_iterate_flowset()
1010 struct dn_flow_set *fs; in dn_find_flowset() local
1013 LIST_FOREACH(fs, fs_hdr, fs_link) { in dn_find_flowset()
1014 if (fs->fs_nr == fs_nr) in dn_find_flowset()
1017 return fs; in dn_find_flowset()
1023 struct dn_flow_set *fs = NULL; in dn_locate_flowset() local
1026 fs = dn_find_flowset(pipe_nr); in dn_locate_flowset()
1032 fs = &p->fs; in dn_locate_flowset()
1034 return fs; in dn_locate_flowset()
1056 struct dn_flow_set *fs; in dummynet_io() local
1071 fs = dn_locate_flowset(pipe_nr, is_pipe); in dummynet_io()
1072 if (fs == NULL) in dummynet_io()
1075 pipe = fs->pipe; in dummynet_io()
1077 pipe = dn_find_pipe(fs->parent_nr); in dummynet_io()
1079 fs->pipe = pipe; in dummynet_io()
1082 fs->parent_nr, fs->fs_nr); in dummynet_io()
1087 q = find_queue(fs, &pkt->id); in dummynet_io()
1097 if (fs->plr && krandom() < fs->plr) in dummynet_io()
1100 if (fs->flags_fs & DN_QSIZE_IS_BYTES) { in dummynet_io()
1101 if (q->len_bytes > fs->qsize) in dummynet_io()
1104 if (q->len >= fs->qsize) in dummynet_io()
1108 if ((fs->flags_fs & DN_IS_RED) && red_drops(fs, q, len)) in dummynet_io()
1150 pipe->sum += fs->weight; /* Add weight of new queue */ in dummynet_io()
1155 q->F = q->S + (len << MY_M) / (uint64_t)fs->weight; in dummynet_io()
1161 fs->backlogged++; in dummynet_io()
1208 purge_flow_set(struct dn_flow_set *fs, int all) in purge_flow_set() argument
1215 for (i = 0; i <= fs->rq_size; i++) { in purge_flow_set()
1218 while ((q = LIST_FIRST(&fs->rq[i])) != NULL) { in purge_flow_set()
1234 KASSERT(rq_elements == fs->rq_elements, in purge_flow_set()
1236 rq_elements, fs->rq_elements)); in purge_flow_set()
1237 fs->rq_elements = 0; in purge_flow_set()
1241 if (fs->w_q_lookup) in purge_flow_set()
1242 kfree(fs->w_q_lookup, M_DUMMYNET); in purge_flow_set()
1244 if (fs->rq) in purge_flow_set()
1245 kfree(fs->rq, M_DUMMYNET); in purge_flow_set()
1255 if (fs->pipe == NULL || (fs->pipe && fs != &fs->pipe->fs)) in purge_flow_set()
1256 kfree(fs, M_DUMMYNET); in purge_flow_set()
1270 purge_flow_set(&pipe->fs, 1); in purge_pipe()
1291 struct dn_flow_set *fs; in dummynet_flush() local
1311 while ((fs = LIST_FIRST(fs_hdr)) != NULL) { in dummynet_flush()
1312 LIST_REMOVE(fs, fs_link); in dummynet_flush()
1313 LIST_INSERT_HEAD(&fs_list, fs, fs_link); in dummynet_flush()
1326 while ((fs = LIST_FIRST(&fs_list)) != NULL) { in dummynet_flush()
1327 LIST_REMOVE(fs, fs_link); in dummynet_flush()
1328 purge_flow_set(fs, 1); in dummynet_flush()
1462 struct dn_ioc_flowset *ioc_fs = &ioc_pipe->fs; in config_pipe()
1497 x->fs.pipe = x; in config_pipe()
1511 for (i = 0; i <= x->fs.rq_size; i++) { in config_pipe()
1514 LIST_FOREACH(q, &x->fs.rq[i], q_link) in config_pipe()
1523 set_fs_parms(&x->fs, ioc_fs); in config_pipe()
1525 if (x->fs.rq == NULL) { /* A new pipe */ in config_pipe()
1528 alloc_hash(&x->fs, ioc_fs); in config_pipe()
1534 struct dn_flow_set *x, *fs; in config_pipe() local
1537 fs = dn_find_flowset(ioc_fs->fs_nr); in config_pipe()
1539 if (fs == NULL) { /* New flow_set */ in config_pipe()
1554 if (ioc_fs->parent_nr != 0 && fs->parent_nr != ioc_fs->parent_nr) in config_pipe()
1556 x = fs; in config_pipe()
1581 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs) in fs_remove_from_heap() argument
1586 if (((struct dn_flow_queue *)h->p[i].object)->fs == fs) { in fs_remove_from_heap()
1619 dn_unref_pipe_cb(struct dn_flow_set *fs, void *pipe0) in dn_unref_pipe_cb() argument
1623 if (fs->pipe == pipe) { in dn_unref_pipe_cb()
1625 pipe->pipe_nr, fs->fs_nr); in dn_unref_pipe_cb()
1626 fs->pipe = NULL; in dn_unref_pipe_cb()
1627 purge_flow_set(fs, 0); in dn_unref_pipe_cb()
1640 if (ioc_pipe->pipe_nr == 0 && ioc_pipe->fs.fs_nr == 0) in delete_pipe()
1642 if (ioc_pipe->pipe_nr != 0 && ioc_pipe->fs.fs_nr != 0) in delete_pipe()
1661 fs_remove_from_heap(&ready_heap, &p->fs); in delete_pipe()
1670 struct dn_flow_set *fs; in delete_pipe() local
1673 fs = dn_find_flowset(ioc_pipe->fs.fs_nr); in delete_pipe()
1674 if (fs == NULL) in delete_pipe()
1677 LIST_REMOVE(fs, fs_link); in delete_pipe()
1679 if ((p = fs->pipe) != NULL) { in delete_pipe()
1681 p->sum -= fs->weight * fs->backlogged; in delete_pipe()
1682 fs_remove_from_heap(&p->not_eligible_heap, fs); in delete_pipe()
1683 fs_remove_from_heap(&p->scheduler_heap, fs); in delete_pipe()
1685 fs_remove_from_heap(&p->idle_heap, fs); in delete_pipe()
1688 purge_flow_set(fs, 1); in delete_pipe()
1712 dn_copy_flowqueues(const struct dn_flow_set *fs, void *bp) in dn_copy_flowqueues() argument
1717 for (i = 0; i <= fs->rq_size; i++) { in dn_copy_flowqueues()
1720 LIST_FOREACH(q, &fs->rq[i], q_link) { in dn_copy_flowqueues()
1726 if (q->fs != fs) { /* XXX ASSERT */ in dn_copy_flowqueues()
1728 i, q->fs, fs); in dn_copy_flowqueues()
1747 if (copied != fs->rq_elements) { /* XXX ASSERT */ in dn_copy_flowqueues()
1749 copied, fs->rq_elements); in dn_copy_flowqueues()
1755 dn_copy_flowset(const struct dn_flow_set *fs, struct dn_ioc_flowset *ioc_fs, in dn_copy_flowset() argument
1760 ioc_fs->fs_nr = fs->fs_nr; in dn_copy_flowset()
1761 ioc_fs->flags_fs = fs->flags_fs; in dn_copy_flowset()
1762 ioc_fs->parent_nr = fs->parent_nr; in dn_copy_flowset()
1764 ioc_fs->weight = fs->weight; in dn_copy_flowset()
1765 ioc_fs->qsize = fs->qsize; in dn_copy_flowset()
1766 ioc_fs->plr = fs->plr; in dn_copy_flowset()
1768 ioc_fs->rq_size = fs->rq_size; in dn_copy_flowset()
1769 ioc_fs->rq_elements = fs->rq_elements; in dn_copy_flowset()
1771 ioc_fs->w_q = fs->w_q; in dn_copy_flowset()
1772 ioc_fs->max_th = fs->max_th; in dn_copy_flowset()
1773 ioc_fs->min_th = fs->min_th; in dn_copy_flowset()
1774 ioc_fs->max_p = fs->max_p; in dn_copy_flowset()
1776 dn_copy_flowid(&fs->flow_mask, &ioc_fs->flow_mask); in dn_copy_flowset()
1785 pipe->fs.rq_elements * sizeof(struct dn_ioc_flowqueue); in dn_calc_pipe_size_cb()
1789 dn_calc_fs_size_cb(struct dn_flow_set *fs, void *sz) in dn_calc_fs_size_cb() argument
1794 fs->rq_elements * sizeof(struct dn_ioc_flowqueue); in dn_calc_fs_size_cb()
1806 dn_copy_flowset(&pipe->fs, &ioc_pipe->fs, DN_IS_PIPE); in dn_copyout_pipe_cb()
1821 *bp = dn_copy_flowqueues(&pipe->fs, *bp); in dn_copyout_pipe_cb()
1825 dn_copyout_fs_cb(struct dn_flow_set *fs, void *bp0) in dn_copyout_fs_cb() argument
1833 dn_copy_flowset(fs, ioc_fs, DN_IS_QUEUE); in dn_copyout_fs_cb()
1839 *bp = dn_copy_flowqueues(fs, *bp); in dn_copyout_fs_cb()