1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * cgroups support for the BFQ I/O scheduler. 4 */ 5 #include <linux/module.h> 6 #include <linux/slab.h> 7 #include <linux/blkdev.h> 8 #include <linux/cgroup.h> 9 #include <linux/ktime.h> 10 #include <linux/rbtree.h> 11 #include <linux/ioprio.h> 12 #include <linux/sbitmap.h> 13 #include <linux/delay.h> 14 15 #include "elevator.h" 16 #include "bfq-iosched.h" 17 18 #ifdef CONFIG_BFQ_CGROUP_DEBUG 19 static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp) 20 { 21 int ret; 22 23 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp); 24 if (ret) 25 return ret; 26 27 atomic64_set(&stat->aux_cnt, 0); 28 return 0; 29 } 30 31 static void bfq_stat_exit(struct bfq_stat *stat) 32 { 33 percpu_counter_destroy(&stat->cpu_cnt); 34 } 35 36 /** 37 * bfq_stat_add - add a value to a bfq_stat 38 * @stat: target bfq_stat 39 * @val: value to add 40 * 41 * Add @val to @stat. The caller must ensure that IRQ on the same CPU 42 * don't re-enter this function for the same counter. 43 */ 44 static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val) 45 { 46 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); 47 } 48 49 /** 50 * bfq_stat_read - read the current value of a bfq_stat 51 * @stat: bfq_stat to read 52 */ 53 static inline uint64_t bfq_stat_read(struct bfq_stat *stat) 54 { 55 return percpu_counter_sum_positive(&stat->cpu_cnt); 56 } 57 58 /** 59 * bfq_stat_reset - reset a bfq_stat 60 * @stat: bfq_stat to reset 61 */ 62 static inline void bfq_stat_reset(struct bfq_stat *stat) 63 { 64 percpu_counter_set(&stat->cpu_cnt, 0); 65 atomic64_set(&stat->aux_cnt, 0); 66 } 67 68 /** 69 * bfq_stat_add_aux - add a bfq_stat into another's aux count 70 * @to: the destination bfq_stat 71 * @from: the source 72 * 73 * Add @from's count including the aux one to @to's aux count. 74 */ 75 static inline void bfq_stat_add_aux(struct bfq_stat *to, 76 struct bfq_stat *from) 77 { 78 atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt), 79 &to->aux_cnt); 80 } 81 82 /** 83 * blkg_prfill_stat - prfill callback for bfq_stat 84 * @sf: seq_file to print to 85 * @pd: policy private data of interest 86 * @off: offset to the bfq_stat in @pd 87 * 88 * prfill callback for printing a bfq_stat. 89 */ 90 static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, 91 int off) 92 { 93 return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off)); 94 } 95 96 /* bfqg stats flags */ 97 enum bfqg_stats_flags { 98 BFQG_stats_waiting = 0, 99 BFQG_stats_idling, 100 BFQG_stats_empty, 101 }; 102 103 #define BFQG_FLAG_FNS(name) \ 104 static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \ 105 { \ 106 stats->flags |= (1 << BFQG_stats_##name); \ 107 } \ 108 static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \ 109 { \ 110 stats->flags &= ~(1 << BFQG_stats_##name); \ 111 } \ 112 static int bfqg_stats_##name(struct bfqg_stats *stats) \ 113 { \ 114 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \ 115 } \ 116 117 BFQG_FLAG_FNS(waiting) 118 BFQG_FLAG_FNS(idling) 119 BFQG_FLAG_FNS(empty) 120 #undef BFQG_FLAG_FNS 121 122 /* This should be called with the scheduler lock held. */ 123 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) 124 { 125 u64 now; 126 127 if (!bfqg_stats_waiting(stats)) 128 return; 129 130 now = ktime_get_ns(); 131 if (now > stats->start_group_wait_time) 132 bfq_stat_add(&stats->group_wait_time, 133 now - stats->start_group_wait_time); 134 bfqg_stats_clear_waiting(stats); 135 } 136 137 /* This should be called with the scheduler lock held. */ 138 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, 139 struct bfq_group *curr_bfqg) 140 { 141 struct bfqg_stats *stats = &bfqg->stats; 142 143 if (bfqg_stats_waiting(stats)) 144 return; 145 if (bfqg == curr_bfqg) 146 return; 147 stats->start_group_wait_time = ktime_get_ns(); 148 bfqg_stats_mark_waiting(stats); 149 } 150 151 /* This should be called with the scheduler lock held. */ 152 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) 153 { 154 u64 now; 155 156 if (!bfqg_stats_empty(stats)) 157 return; 158 159 now = ktime_get_ns(); 160 if (now > stats->start_empty_time) 161 bfq_stat_add(&stats->empty_time, 162 now - stats->start_empty_time); 163 bfqg_stats_clear_empty(stats); 164 } 165 166 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) 167 { 168 bfq_stat_add(&bfqg->stats.dequeue, 1); 169 } 170 171 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) 172 { 173 struct bfqg_stats *stats = &bfqg->stats; 174 175 if (blkg_rwstat_total(&stats->queued)) 176 return; 177 178 /* 179 * group is already marked empty. This can happen if bfqq got new 180 * request in parent group and moved to this group while being added 181 * to service tree. Just ignore the event and move on. 182 */ 183 if (bfqg_stats_empty(stats)) 184 return; 185 186 stats->start_empty_time = ktime_get_ns(); 187 bfqg_stats_mark_empty(stats); 188 } 189 190 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) 191 { 192 struct bfqg_stats *stats = &bfqg->stats; 193 194 if (bfqg_stats_idling(stats)) { 195 u64 now = ktime_get_ns(); 196 197 if (now > stats->start_idle_time) 198 bfq_stat_add(&stats->idle_time, 199 now - stats->start_idle_time); 200 bfqg_stats_clear_idling(stats); 201 } 202 } 203 204 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) 205 { 206 struct bfqg_stats *stats = &bfqg->stats; 207 208 stats->start_idle_time = ktime_get_ns(); 209 bfqg_stats_mark_idling(stats); 210 } 211 212 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) 213 { 214 struct bfqg_stats *stats = &bfqg->stats; 215 216 bfq_stat_add(&stats->avg_queue_size_sum, 217 blkg_rwstat_total(&stats->queued)); 218 bfq_stat_add(&stats->avg_queue_size_samples, 1); 219 bfqg_stats_update_group_wait_time(stats); 220 } 221 222 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, 223 blk_opf_t opf) 224 { 225 blkg_rwstat_add(&bfqg->stats.queued, opf, 1); 226 bfqg_stats_end_empty_time(&bfqg->stats); 227 if (!(bfqq == bfqg->bfqd->in_service_queue)) 228 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); 229 } 230 231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) 232 { 233 blkg_rwstat_add(&bfqg->stats.queued, opf, -1); 234 } 235 236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) 237 { 238 blkg_rwstat_add(&bfqg->stats.merged, opf, 1); 239 } 240 241 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, 242 u64 io_start_time_ns, blk_opf_t opf) 243 { 244 struct bfqg_stats *stats = &bfqg->stats; 245 u64 now = ktime_get_ns(); 246 247 if (now > io_start_time_ns) 248 blkg_rwstat_add(&stats->service_time, opf, 249 now - io_start_time_ns); 250 if (io_start_time_ns > start_time_ns) 251 blkg_rwstat_add(&stats->wait_time, opf, 252 io_start_time_ns - start_time_ns); 253 } 254 255 #else /* CONFIG_BFQ_CGROUP_DEBUG */ 256 257 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { } 258 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { } 259 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, 260 u64 io_start_time_ns, blk_opf_t opf) { } 261 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } 262 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } 263 264 #endif /* CONFIG_BFQ_CGROUP_DEBUG */ 265 266 #ifdef CONFIG_BFQ_GROUP_IOSCHED 267 268 /* 269 * blk-cgroup policy-related handlers 270 * The following functions help in converting between blk-cgroup 271 * internal structures and BFQ-specific structures. 272 */ 273 274 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd) 275 { 276 return pd ? container_of(pd, struct bfq_group, pd) : NULL; 277 } 278 279 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg) 280 { 281 return pd_to_blkg(&bfqg->pd); 282 } 283 284 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) 285 { 286 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq)); 287 } 288 289 /* 290 * bfq_group handlers 291 * The following functions help in navigating the bfq_group hierarchy 292 * by allowing to find the parent of a bfq_group or the bfq_group 293 * associated to a bfq_queue. 294 */ 295 296 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg) 297 { 298 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent; 299 300 return pblkg ? blkg_to_bfqg(pblkg) : NULL; 301 } 302 303 struct bfq_group *bfqq_group(struct bfq_queue *bfqq) 304 { 305 struct bfq_entity *group_entity = bfqq->entity.parent; 306 307 return group_entity ? container_of(group_entity, struct bfq_group, 308 entity) : 309 bfqq->bfqd->root_group; 310 } 311 312 /* 313 * The following two functions handle get and put of a bfq_group by 314 * wrapping the related blk-cgroup hooks. 315 */ 316 317 static void bfqg_get(struct bfq_group *bfqg) 318 { 319 refcount_inc(&bfqg->ref); 320 } 321 322 static void bfqg_put(struct bfq_group *bfqg) 323 { 324 if (refcount_dec_and_test(&bfqg->ref)) 325 kfree(bfqg); 326 } 327 328 static void bfqg_and_blkg_get(struct bfq_group *bfqg) 329 { 330 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ 331 bfqg_get(bfqg); 332 333 blkg_get(bfqg_to_blkg(bfqg)); 334 } 335 336 void bfqg_and_blkg_put(struct bfq_group *bfqg) 337 { 338 blkg_put(bfqg_to_blkg(bfqg)); 339 340 bfqg_put(bfqg); 341 } 342 343 void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq) 344 { 345 struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg); 346 347 if (!bfqg) 348 return; 349 350 blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq)); 351 blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1); 352 } 353 354 /* @stats = 0 */ 355 static void bfqg_stats_reset(struct bfqg_stats *stats) 356 { 357 #ifdef CONFIG_BFQ_CGROUP_DEBUG 358 /* queued stats shouldn't be cleared */ 359 blkg_rwstat_reset(&stats->merged); 360 blkg_rwstat_reset(&stats->service_time); 361 blkg_rwstat_reset(&stats->wait_time); 362 bfq_stat_reset(&stats->time); 363 bfq_stat_reset(&stats->avg_queue_size_sum); 364 bfq_stat_reset(&stats->avg_queue_size_samples); 365 bfq_stat_reset(&stats->dequeue); 366 bfq_stat_reset(&stats->group_wait_time); 367 bfq_stat_reset(&stats->idle_time); 368 bfq_stat_reset(&stats->empty_time); 369 #endif 370 } 371 372 /* @to += @from */ 373 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from) 374 { 375 if (!to || !from) 376 return; 377 378 #ifdef CONFIG_BFQ_CGROUP_DEBUG 379 /* queued stats shouldn't be cleared */ 380 blkg_rwstat_add_aux(&to->merged, &from->merged); 381 blkg_rwstat_add_aux(&to->service_time, &from->service_time); 382 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); 383 bfq_stat_add_aux(&from->time, &from->time); 384 bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); 385 bfq_stat_add_aux(&to->avg_queue_size_samples, 386 &from->avg_queue_size_samples); 387 bfq_stat_add_aux(&to->dequeue, &from->dequeue); 388 bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time); 389 bfq_stat_add_aux(&to->idle_time, &from->idle_time); 390 bfq_stat_add_aux(&to->empty_time, &from->empty_time); 391 #endif 392 } 393 394 /* 395 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors' 396 * recursive stats can still account for the amount used by this bfqg after 397 * it's gone. 398 */ 399 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) 400 { 401 struct bfq_group *parent; 402 403 if (!bfqg) /* root_group */ 404 return; 405 406 parent = bfqg_parent(bfqg); 407 408 lockdep_assert_held(&bfqg_to_blkg(bfqg)->disk->queue->queue_lock); 409 410 if (unlikely(!parent)) 411 return; 412 413 bfqg_stats_add_aux(&parent->stats, &bfqg->stats); 414 bfqg_stats_reset(&bfqg->stats); 415 } 416 417 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) 418 { 419 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); 420 421 entity->weight = entity->new_weight; 422 entity->orig_weight = entity->new_weight; 423 if (bfqq) { 424 bfqq->ioprio = bfqq->new_ioprio; 425 bfqq->ioprio_class = bfqq->new_ioprio_class; 426 /* 427 * Make sure that bfqg and its associated blkg do not 428 * disappear before entity. 429 */ 430 bfqg_and_blkg_get(bfqg); 431 } 432 entity->parent = bfqg->my_entity; /* NULL for root group */ 433 entity->sched_data = &bfqg->sched_data; 434 } 435 436 static void bfqg_stats_exit(struct bfqg_stats *stats) 437 { 438 blkg_rwstat_exit(&stats->bytes); 439 blkg_rwstat_exit(&stats->ios); 440 #ifdef CONFIG_BFQ_CGROUP_DEBUG 441 blkg_rwstat_exit(&stats->merged); 442 blkg_rwstat_exit(&stats->service_time); 443 blkg_rwstat_exit(&stats->wait_time); 444 blkg_rwstat_exit(&stats->queued); 445 bfq_stat_exit(&stats->time); 446 bfq_stat_exit(&stats->avg_queue_size_sum); 447 bfq_stat_exit(&stats->avg_queue_size_samples); 448 bfq_stat_exit(&stats->dequeue); 449 bfq_stat_exit(&stats->group_wait_time); 450 bfq_stat_exit(&stats->idle_time); 451 bfq_stat_exit(&stats->empty_time); 452 #endif 453 } 454 455 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) 456 { 457 if (blkg_rwstat_init(&stats->bytes, gfp) || 458 blkg_rwstat_init(&stats->ios, gfp)) 459 goto error; 460 461 #ifdef CONFIG_BFQ_CGROUP_DEBUG 462 if (blkg_rwstat_init(&stats->merged, gfp) || 463 blkg_rwstat_init(&stats->service_time, gfp) || 464 blkg_rwstat_init(&stats->wait_time, gfp) || 465 blkg_rwstat_init(&stats->queued, gfp) || 466 bfq_stat_init(&stats->time, gfp) || 467 bfq_stat_init(&stats->avg_queue_size_sum, gfp) || 468 bfq_stat_init(&stats->avg_queue_size_samples, gfp) || 469 bfq_stat_init(&stats->dequeue, gfp) || 470 bfq_stat_init(&stats->group_wait_time, gfp) || 471 bfq_stat_init(&stats->idle_time, gfp) || 472 bfq_stat_init(&stats->empty_time, gfp)) 473 goto error; 474 #endif 475 476 return 0; 477 478 error: 479 bfqg_stats_exit(stats); 480 return -ENOMEM; 481 } 482 483 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd) 484 { 485 return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL; 486 } 487 488 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) 489 { 490 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq)); 491 } 492 493 static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) 494 { 495 struct bfq_group_data *bgd; 496 497 bgd = kzalloc(sizeof(*bgd), gfp); 498 if (!bgd) 499 return NULL; 500 return &bgd->pd; 501 } 502 503 static void bfq_cpd_init(struct blkcg_policy_data *cpd) 504 { 505 struct bfq_group_data *d = cpd_to_bfqgd(cpd); 506 507 d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ? 508 CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL; 509 } 510 511 static void bfq_cpd_free(struct blkcg_policy_data *cpd) 512 { 513 kfree(cpd_to_bfqgd(cpd)); 514 } 515 516 static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q, 517 struct blkcg *blkcg) 518 { 519 struct bfq_group *bfqg; 520 521 bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node); 522 if (!bfqg) 523 return NULL; 524 525 if (bfqg_stats_init(&bfqg->stats, gfp)) { 526 kfree(bfqg); 527 return NULL; 528 } 529 530 /* see comments in bfq_bic_update_cgroup for why refcounting */ 531 refcount_set(&bfqg->ref, 1); 532 return &bfqg->pd; 533 } 534 535 static void bfq_pd_init(struct blkg_policy_data *pd) 536 { 537 struct blkcg_gq *blkg = pd_to_blkg(pd); 538 struct bfq_group *bfqg = blkg_to_bfqg(blkg); 539 struct bfq_data *bfqd = blkg->disk->queue->elevator->elevator_data; 540 struct bfq_entity *entity = &bfqg->entity; 541 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg); 542 543 entity->orig_weight = entity->weight = entity->new_weight = d->weight; 544 entity->my_sched_data = &bfqg->sched_data; 545 entity->last_bfqq_created = NULL; 546 547 bfqg->my_entity = entity; /* 548 * the root_group's will be set to NULL 549 * in bfq_init_queue() 550 */ 551 bfqg->bfqd = bfqd; 552 bfqg->active_entities = 0; 553 bfqg->num_queues_with_pending_reqs = 0; 554 bfqg->online = true; 555 bfqg->rq_pos_tree = RB_ROOT; 556 } 557 558 static void bfq_pd_free(struct blkg_policy_data *pd) 559 { 560 struct bfq_group *bfqg = pd_to_bfqg(pd); 561 562 bfqg_stats_exit(&bfqg->stats); 563 bfqg_put(bfqg); 564 } 565 566 static void bfq_pd_reset_stats(struct blkg_policy_data *pd) 567 { 568 struct bfq_group *bfqg = pd_to_bfqg(pd); 569 570 bfqg_stats_reset(&bfqg->stats); 571 } 572 573 static void bfq_group_set_parent(struct bfq_group *bfqg, 574 struct bfq_group *parent) 575 { 576 struct bfq_entity *entity; 577 578 entity = &bfqg->entity; 579 entity->parent = parent->my_entity; 580 entity->sched_data = &parent->sched_data; 581 } 582 583 static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg) 584 { 585 struct bfq_group *parent; 586 struct bfq_entity *entity; 587 588 /* 589 * Update chain of bfq_groups as we might be handling a leaf group 590 * which, along with some of its relatives, has not been hooked yet 591 * to the private hierarchy of BFQ. 592 */ 593 entity = &bfqg->entity; 594 for_each_entity(entity) { 595 struct bfq_group *curr_bfqg = container_of(entity, 596 struct bfq_group, entity); 597 if (curr_bfqg != bfqd->root_group) { 598 parent = bfqg_parent(curr_bfqg); 599 if (!parent) 600 parent = bfqd->root_group; 601 bfq_group_set_parent(curr_bfqg, parent); 602 } 603 } 604 } 605 606 struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio) 607 { 608 struct blkcg_gq *blkg = bio->bi_blkg; 609 struct bfq_group *bfqg; 610 611 while (blkg) { 612 if (!blkg->online) { 613 blkg = blkg->parent; 614 continue; 615 } 616 bfqg = blkg_to_bfqg(blkg); 617 if (bfqg->online) { 618 bio_associate_blkg_from_css(bio, &blkg->blkcg->css); 619 return bfqg; 620 } 621 blkg = blkg->parent; 622 } 623 bio_associate_blkg_from_css(bio, 624 &bfqg_to_blkg(bfqd->root_group)->blkcg->css); 625 return bfqd->root_group; 626 } 627 628 /** 629 * bfq_bfqq_move - migrate @bfqq to @bfqg. 630 * @bfqd: queue descriptor. 631 * @bfqq: the queue to move. 632 * @bfqg: the group to move to. 633 * 634 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating 635 * it on the new one. Avoid putting the entity on the old group idle tree. 636 * 637 * Must be called under the scheduler lock, to make sure that the blkg 638 * owning @bfqg does not disappear (see comments in 639 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg 640 * objects). 641 */ 642 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, 643 struct bfq_group *bfqg) 644 { 645 struct bfq_entity *entity = &bfqq->entity; 646 struct bfq_group *old_parent = bfqq_group(bfqq); 647 bool has_pending_reqs = false; 648 649 /* 650 * No point to move bfqq to the same group, which can happen when 651 * root group is offlined 652 */ 653 if (old_parent == bfqg) 654 return; 655 656 /* 657 * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group 658 * until elevator exit. 659 */ 660 if (bfqq == &bfqd->oom_bfqq) 661 return; 662 /* 663 * Get extra reference to prevent bfqq from being freed in 664 * next possible expire or deactivate. 665 */ 666 bfqq->ref++; 667 668 if (entity->in_groups_with_pending_reqs) { 669 has_pending_reqs = true; 670 bfq_del_bfqq_in_groups_with_pending_reqs(bfqq); 671 } 672 673 /* If bfqq is empty, then bfq_bfqq_expire also invokes 674 * bfq_del_bfqq_busy, thereby removing bfqq and its entity 675 * from data structures related to current group. Otherwise we 676 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as 677 * we do below. 678 */ 679 if (bfqq == bfqd->in_service_queue) 680 bfq_bfqq_expire(bfqd, bfqd->in_service_queue, 681 false, BFQQE_PREEMPTED); 682 683 if (bfq_bfqq_busy(bfqq)) 684 bfq_deactivate_bfqq(bfqd, bfqq, false, false); 685 else if (entity->on_st_or_in_serv) 686 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); 687 bfqg_and_blkg_put(old_parent); 688 689 if (entity->parent && 690 entity->parent->last_bfqq_created == bfqq) 691 entity->parent->last_bfqq_created = NULL; 692 else if (bfqd->last_bfqq_created == bfqq) 693 bfqd->last_bfqq_created = NULL; 694 695 entity->parent = bfqg->my_entity; 696 entity->sched_data = &bfqg->sched_data; 697 /* pin down bfqg and its associated blkg */ 698 bfqg_and_blkg_get(bfqg); 699 700 if (has_pending_reqs) 701 bfq_add_bfqq_in_groups_with_pending_reqs(bfqq); 702 703 if (bfq_bfqq_busy(bfqq)) { 704 if (unlikely(!bfqd->nonrot_with_queueing)) 705 bfq_pos_tree_add_move(bfqd, bfqq); 706 bfq_activate_bfqq(bfqd, bfqq); 707 } 708 709 if (!bfqd->in_service_queue && !bfqd->tot_rq_in_driver) 710 bfq_schedule_dispatch(bfqd); 711 /* release extra ref taken above, bfqq may happen to be freed now */ 712 bfq_put_queue(bfqq); 713 } 714 715 static void bfq_sync_bfqq_move(struct bfq_data *bfqd, 716 struct bfq_queue *sync_bfqq, 717 struct bfq_io_cq *bic, 718 struct bfq_group *bfqg, 719 unsigned int act_idx) 720 { 721 struct bfq_queue *bfqq; 722 723 if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) { 724 /* We are the only user of this bfqq, just move it */ 725 if (sync_bfqq->entity.sched_data != &bfqg->sched_data) 726 bfq_bfqq_move(bfqd, sync_bfqq, bfqg); 727 return; 728 } 729 730 /* 731 * The queue was merged to a different queue. Check 732 * that the merge chain still belongs to the same 733 * cgroup. 734 */ 735 for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq) 736 if (bfqq->entity.sched_data != &bfqg->sched_data) 737 break; 738 if (bfqq) { 739 /* 740 * Some queue changed cgroup so the merge is not valid 741 * anymore. We cannot easily just cancel the merge (by 742 * clearing new_bfqq) as there may be other processes 743 * using this queue and holding refs to all queues 744 * below sync_bfqq->new_bfqq. Similarly if the merge 745 * already happened, we need to detach from bfqq now 746 * so that we cannot merge bio to a request from the 747 * old cgroup. 748 */ 749 bfq_put_cooperator(sync_bfqq); 750 bfq_release_process_ref(bfqd, sync_bfqq); 751 bic_set_bfqq(bic, NULL, true, act_idx); 752 } 753 } 754 755 /** 756 * __bfq_bic_change_cgroup - move @bic to @bfqg. 757 * @bfqd: the queue descriptor. 758 * @bic: the bic to move. 759 * @bfqg: the group to move to. 760 * 761 * Move bic to blkcg, assuming that bfqd->lock is held; which makes 762 * sure that the reference to cgroup is valid across the call (see 763 * comments in bfq_bic_update_cgroup on this issue) 764 */ 765 static void __bfq_bic_change_cgroup(struct bfq_data *bfqd, 766 struct bfq_io_cq *bic, 767 struct bfq_group *bfqg) 768 { 769 unsigned int act_idx; 770 771 for (act_idx = 0; act_idx < bfqd->num_actuators; act_idx++) { 772 struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false, act_idx); 773 struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true, act_idx); 774 775 if (async_bfqq && 776 async_bfqq->entity.sched_data != &bfqg->sched_data) { 777 bic_set_bfqq(bic, NULL, false, act_idx); 778 bfq_release_process_ref(bfqd, async_bfqq); 779 } 780 781 if (sync_bfqq) 782 bfq_sync_bfqq_move(bfqd, sync_bfqq, bic, bfqg, act_idx); 783 } 784 } 785 786 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) 787 { 788 struct bfq_data *bfqd = bic_to_bfqd(bic); 789 struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio); 790 uint64_t serial_nr; 791 792 serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr; 793 794 /* 795 * Check whether blkcg has changed. The condition may trigger 796 * spuriously on a newly created cic but there's no harm. 797 */ 798 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) 799 return; 800 801 /* 802 * New cgroup for this process. Make sure it is linked to bfq internal 803 * cgroup hierarchy. 804 */ 805 bfq_link_bfqg(bfqd, bfqg); 806 __bfq_bic_change_cgroup(bfqd, bic, bfqg); 807 /* 808 * Update blkg_path for bfq_log_* functions. We cache this 809 * path, and update it here, for the following 810 * reasons. Operations on blkg objects in blk-cgroup are 811 * protected with the request_queue lock, and not with the 812 * lock that protects the instances of this scheduler 813 * (bfqd->lock). This exposes BFQ to the following sort of 814 * race. 815 * 816 * The blkg_lookup performed in bfq_get_queue, protected 817 * through rcu, may happen to return the address of a copy of 818 * the original blkg. If this is the case, then the 819 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down 820 * the blkg, is useless: it does not prevent blk-cgroup code 821 * from destroying both the original blkg and all objects 822 * directly or indirectly referred by the copy of the 823 * blkg. 824 * 825 * On the bright side, destroy operations on a blkg invoke, as 826 * a first step, hooks of the scheduler associated with the 827 * blkg. And these hooks are executed with bfqd->lock held for 828 * BFQ. As a consequence, for any blkg associated with the 829 * request queue this instance of the scheduler is attached 830 * to, we are guaranteed that such a blkg is not destroyed, and 831 * that all the pointers it contains are consistent, while we 832 * are holding bfqd->lock. A blkg_lookup performed with 833 * bfqd->lock held then returns a fully consistent blkg, which 834 * remains consistent until this lock is held. 835 * 836 * Thanks to the last fact, and to the fact that: (1) bfqg has 837 * been obtained through a blkg_lookup in the above 838 * assignment, and (2) bfqd->lock is being held, here we can 839 * safely use the policy data for the involved blkg (i.e., the 840 * field bfqg->pd) to get to the blkg associated with bfqg, 841 * and then we can safely use any field of blkg. After we 842 * release bfqd->lock, even just getting blkg through this 843 * bfqg may cause dangling references to be traversed, as 844 * bfqg->pd may not exist any more. 845 * 846 * In view of the above facts, here we cache, in the bfqg, any 847 * blkg data we may need for this bic, and for its associated 848 * bfq_queue. As of now, we need to cache only the path of the 849 * blkg, which is used in the bfq_log_* functions. 850 * 851 * Finally, note that bfqg itself needs to be protected from 852 * destruction on the blkg_free of the original blkg (which 853 * invokes bfq_pd_free). We use an additional private 854 * refcounter for bfqg, to let it disappear only after no 855 * bfq_queue refers to it any longer. 856 */ 857 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path)); 858 bic->blkcg_serial_nr = serial_nr; 859 } 860 861 /** 862 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. 863 * @st: the service tree being flushed. 864 */ 865 static void bfq_flush_idle_tree(struct bfq_service_tree *st) 866 { 867 struct bfq_entity *entity = st->first_idle; 868 869 for (; entity ; entity = st->first_idle) 870 __bfq_deactivate_entity(entity, false); 871 } 872 873 /** 874 * bfq_reparent_leaf_entity - move leaf entity to the root_group. 875 * @bfqd: the device data structure with the root group. 876 * @entity: the entity to move, if entity is a leaf; or the parent entity 877 * of an active leaf entity to move, if entity is not a leaf. 878 * @ioprio_class: I/O priority class to reparent. 879 */ 880 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, 881 struct bfq_entity *entity, 882 int ioprio_class) 883 { 884 struct bfq_queue *bfqq; 885 struct bfq_entity *child_entity = entity; 886 887 while (child_entity->my_sched_data) { /* leaf not reached yet */ 888 struct bfq_sched_data *child_sd = child_entity->my_sched_data; 889 struct bfq_service_tree *child_st = child_sd->service_tree + 890 ioprio_class; 891 struct rb_root *child_active = &child_st->active; 892 893 child_entity = bfq_entity_of(rb_first(child_active)); 894 895 if (!child_entity) 896 child_entity = child_sd->in_service_entity; 897 } 898 899 bfqq = bfq_entity_to_bfqq(child_entity); 900 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); 901 } 902 903 /** 904 * bfq_reparent_active_queues - move to the root group all active queues. 905 * @bfqd: the device data structure with the root group. 906 * @bfqg: the group to move from. 907 * @st: the service tree to start the search from. 908 * @ioprio_class: I/O priority class to reparent. 909 */ 910 static void bfq_reparent_active_queues(struct bfq_data *bfqd, 911 struct bfq_group *bfqg, 912 struct bfq_service_tree *st, 913 int ioprio_class) 914 { 915 struct rb_root *active = &st->active; 916 struct bfq_entity *entity; 917 918 while ((entity = bfq_entity_of(rb_first(active)))) 919 bfq_reparent_leaf_entity(bfqd, entity, ioprio_class); 920 921 if (bfqg->sched_data.in_service_entity) 922 bfq_reparent_leaf_entity(bfqd, 923 bfqg->sched_data.in_service_entity, 924 ioprio_class); 925 } 926 927 /** 928 * bfq_pd_offline - deactivate the entity associated with @pd, 929 * and reparent its children entities. 930 * @pd: descriptor of the policy going offline. 931 * 932 * blkio already grabs the queue_lock for us, so no need to use 933 * RCU-based magic 934 */ 935 static void bfq_pd_offline(struct blkg_policy_data *pd) 936 { 937 struct bfq_service_tree *st; 938 struct bfq_group *bfqg = pd_to_bfqg(pd); 939 struct bfq_data *bfqd = bfqg->bfqd; 940 struct bfq_entity *entity = bfqg->my_entity; 941 unsigned long flags; 942 int i; 943 944 spin_lock_irqsave(&bfqd->lock, flags); 945 946 if (!entity) /* root group */ 947 goto put_async_queues; 948 949 /* 950 * Empty all service_trees belonging to this group before 951 * deactivating the group itself. 952 */ 953 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { 954 st = bfqg->sched_data.service_tree + i; 955 956 /* 957 * It may happen that some queues are still active 958 * (busy) upon group destruction (if the corresponding 959 * processes have been forced to terminate). We move 960 * all the leaf entities corresponding to these queues 961 * to the root_group. 962 * Also, it may happen that the group has an entity 963 * in service, which is disconnected from the active 964 * tree: it must be moved, too. 965 * There is no need to put the sync queues, as the 966 * scheduler has taken no reference. 967 */ 968 bfq_reparent_active_queues(bfqd, bfqg, st, i); 969 970 /* 971 * The idle tree may still contain bfq_queues 972 * belonging to exited task because they never 973 * migrated to a different cgroup from the one being 974 * destroyed now. In addition, even 975 * bfq_reparent_active_queues() may happen to add some 976 * entities to the idle tree. It happens if, in some 977 * of the calls to bfq_bfqq_move() performed by 978 * bfq_reparent_active_queues(), the queue to move is 979 * empty and gets expired. 980 */ 981 bfq_flush_idle_tree(st); 982 } 983 984 __bfq_deactivate_entity(entity, false); 985 986 put_async_queues: 987 bfq_put_async_queues(bfqd, bfqg); 988 bfqg->online = false; 989 990 spin_unlock_irqrestore(&bfqd->lock, flags); 991 /* 992 * @blkg is going offline and will be ignored by 993 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so 994 * that they don't get lost. If IOs complete after this point, the 995 * stats for them will be lost. Oh well... 996 */ 997 bfqg_stats_xfer_dead(bfqg); 998 } 999 1000 void bfq_end_wr_async(struct bfq_data *bfqd) 1001 { 1002 struct blkcg_gq *blkg; 1003 1004 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { 1005 struct bfq_group *bfqg = blkg_to_bfqg(blkg); 1006 1007 bfq_end_wr_async_queues(bfqd, bfqg); 1008 } 1009 bfq_end_wr_async_queues(bfqd, bfqd->root_group); 1010 } 1011 1012 static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v) 1013 { 1014 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 1015 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); 1016 unsigned int val = 0; 1017 1018 if (bfqgd) 1019 val = bfqgd->weight; 1020 1021 seq_printf(sf, "%u\n", val); 1022 1023 return 0; 1024 } 1025 1026 static u64 bfqg_prfill_weight_device(struct seq_file *sf, 1027 struct blkg_policy_data *pd, int off) 1028 { 1029 struct bfq_group *bfqg = pd_to_bfqg(pd); 1030 1031 if (!bfqg->entity.dev_weight) 1032 return 0; 1033 return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight); 1034 } 1035 1036 static int bfq_io_show_weight(struct seq_file *sf, void *v) 1037 { 1038 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 1039 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); 1040 1041 seq_printf(sf, "default %u\n", bfqgd->weight); 1042 blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device, 1043 &blkcg_policy_bfq, 0, false); 1044 return 0; 1045 } 1046 1047 static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight) 1048 { 1049 weight = dev_weight ?: weight; 1050 1051 bfqg->entity.dev_weight = dev_weight; 1052 /* 1053 * Setting the prio_changed flag of the entity 1054 * to 1 with new_weight == weight would re-set 1055 * the value of the weight to its ioprio mapping. 1056 * Set the flag only if necessary. 1057 */ 1058 if ((unsigned short)weight != bfqg->entity.new_weight) { 1059 bfqg->entity.new_weight = (unsigned short)weight; 1060 /* 1061 * Make sure that the above new value has been 1062 * stored in bfqg->entity.new_weight before 1063 * setting the prio_changed flag. In fact, 1064 * this flag may be read asynchronously (in 1065 * critical sections protected by a different 1066 * lock than that held here), and finding this 1067 * flag set may cause the execution of the code 1068 * for updating parameters whose value may 1069 * depend also on bfqg->entity.new_weight (in 1070 * __bfq_entity_update_weight_prio). 1071 * This barrier makes sure that the new value 1072 * of bfqg->entity.new_weight is correctly 1073 * seen in that code. 1074 */ 1075 smp_wmb(); 1076 bfqg->entity.prio_changed = 1; 1077 } 1078 } 1079 1080 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css, 1081 struct cftype *cftype, 1082 u64 val) 1083 { 1084 struct blkcg *blkcg = css_to_blkcg(css); 1085 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); 1086 struct blkcg_gq *blkg; 1087 int ret = -ERANGE; 1088 1089 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT) 1090 return ret; 1091 1092 ret = 0; 1093 spin_lock_irq(&blkcg->lock); 1094 bfqgd->weight = (unsigned short)val; 1095 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 1096 struct bfq_group *bfqg = blkg_to_bfqg(blkg); 1097 1098 if (bfqg) 1099 bfq_group_set_weight(bfqg, val, 0); 1100 } 1101 spin_unlock_irq(&blkcg->lock); 1102 1103 return ret; 1104 } 1105 1106 static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of, 1107 char *buf, size_t nbytes, 1108 loff_t off) 1109 { 1110 int ret; 1111 struct blkg_conf_ctx ctx; 1112 struct blkcg *blkcg = css_to_blkcg(of_css(of)); 1113 struct bfq_group *bfqg; 1114 u64 v; 1115 1116 ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx); 1117 if (ret) 1118 return ret; 1119 1120 if (sscanf(ctx.body, "%llu", &v) == 1) { 1121 /* require "default" on dfl */ 1122 ret = -ERANGE; 1123 if (!v) 1124 goto out; 1125 } else if (!strcmp(strim(ctx.body), "default")) { 1126 v = 0; 1127 } else { 1128 ret = -EINVAL; 1129 goto out; 1130 } 1131 1132 bfqg = blkg_to_bfqg(ctx.blkg); 1133 1134 ret = -ERANGE; 1135 if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) { 1136 bfq_group_set_weight(bfqg, bfqg->entity.weight, v); 1137 ret = 0; 1138 } 1139 out: 1140 blkg_conf_finish(&ctx); 1141 return ret ?: nbytes; 1142 } 1143 1144 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, 1145 char *buf, size_t nbytes, 1146 loff_t off) 1147 { 1148 char *endp; 1149 int ret; 1150 u64 v; 1151 1152 buf = strim(buf); 1153 1154 /* "WEIGHT" or "default WEIGHT" sets the default weight */ 1155 v = simple_strtoull(buf, &endp, 0); 1156 if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) { 1157 ret = bfq_io_set_weight_legacy(of_css(of), NULL, v); 1158 return ret ?: nbytes; 1159 } 1160 1161 return bfq_io_set_device_weight(of, buf, nbytes, off); 1162 } 1163 1164 static int bfqg_print_rwstat(struct seq_file *sf, void *v) 1165 { 1166 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat, 1167 &blkcg_policy_bfq, seq_cft(sf)->private, true); 1168 return 0; 1169 } 1170 1171 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf, 1172 struct blkg_policy_data *pd, int off) 1173 { 1174 struct blkg_rwstat_sample sum; 1175 1176 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum); 1177 return __blkg_prfill_rwstat(sf, pd, &sum); 1178 } 1179 1180 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v) 1181 { 1182 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 1183 bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq, 1184 seq_cft(sf)->private, true); 1185 return 0; 1186 } 1187 1188 #ifdef CONFIG_BFQ_CGROUP_DEBUG 1189 static int bfqg_print_stat(struct seq_file *sf, void *v) 1190 { 1191 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat, 1192 &blkcg_policy_bfq, seq_cft(sf)->private, false); 1193 return 0; 1194 } 1195 1196 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf, 1197 struct blkg_policy_data *pd, int off) 1198 { 1199 struct blkcg_gq *blkg = pd_to_blkg(pd); 1200 struct blkcg_gq *pos_blkg; 1201 struct cgroup_subsys_state *pos_css; 1202 u64 sum = 0; 1203 1204 lockdep_assert_held(&blkg->disk->queue->queue_lock); 1205 1206 rcu_read_lock(); 1207 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { 1208 struct bfq_stat *stat; 1209 1210 if (!pos_blkg->online) 1211 continue; 1212 1213 stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off; 1214 sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt); 1215 } 1216 rcu_read_unlock(); 1217 1218 return __blkg_prfill_u64(sf, pd, sum); 1219 } 1220 1221 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v) 1222 { 1223 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 1224 bfqg_prfill_stat_recursive, &blkcg_policy_bfq, 1225 seq_cft(sf)->private, false); 1226 return 0; 1227 } 1228 1229 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd, 1230 int off) 1231 { 1232 struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg); 1233 u64 sum = blkg_rwstat_total(&bfqg->stats.bytes); 1234 1235 return __blkg_prfill_u64(sf, pd, sum >> 9); 1236 } 1237 1238 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v) 1239 { 1240 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 1241 bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false); 1242 return 0; 1243 } 1244 1245 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf, 1246 struct blkg_policy_data *pd, int off) 1247 { 1248 struct blkg_rwstat_sample tmp; 1249 1250 blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq, 1251 offsetof(struct bfq_group, stats.bytes), &tmp); 1252 1253 return __blkg_prfill_u64(sf, pd, 1254 (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9); 1255 } 1256 1257 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v) 1258 { 1259 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 1260 bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0, 1261 false); 1262 return 0; 1263 } 1264 1265 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf, 1266 struct blkg_policy_data *pd, int off) 1267 { 1268 struct bfq_group *bfqg = pd_to_bfqg(pd); 1269 u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples); 1270 u64 v = 0; 1271 1272 if (samples) { 1273 v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum); 1274 v = div64_u64(v, samples); 1275 } 1276 __blkg_prfill_u64(sf, pd, v); 1277 return 0; 1278 } 1279 1280 /* print avg_queue_size */ 1281 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v) 1282 { 1283 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 1284 bfqg_prfill_avg_queue_size, &blkcg_policy_bfq, 1285 0, false); 1286 return 0; 1287 } 1288 #endif /* CONFIG_BFQ_CGROUP_DEBUG */ 1289 1290 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) 1291 { 1292 int ret; 1293 1294 ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq); 1295 if (ret) 1296 return NULL; 1297 1298 return blkg_to_bfqg(bfqd->queue->root_blkg); 1299 } 1300 1301 struct blkcg_policy blkcg_policy_bfq = { 1302 .dfl_cftypes = bfq_blkg_files, 1303 .legacy_cftypes = bfq_blkcg_legacy_files, 1304 1305 .cpd_alloc_fn = bfq_cpd_alloc, 1306 .cpd_init_fn = bfq_cpd_init, 1307 .cpd_bind_fn = bfq_cpd_init, 1308 .cpd_free_fn = bfq_cpd_free, 1309 1310 .pd_alloc_fn = bfq_pd_alloc, 1311 .pd_init_fn = bfq_pd_init, 1312 .pd_offline_fn = bfq_pd_offline, 1313 .pd_free_fn = bfq_pd_free, 1314 .pd_reset_stats_fn = bfq_pd_reset_stats, 1315 }; 1316 1317 struct cftype bfq_blkcg_legacy_files[] = { 1318 { 1319 .name = "bfq.weight", 1320 .flags = CFTYPE_NOT_ON_ROOT, 1321 .seq_show = bfq_io_show_weight_legacy, 1322 .write_u64 = bfq_io_set_weight_legacy, 1323 }, 1324 { 1325 .name = "bfq.weight_device", 1326 .flags = CFTYPE_NOT_ON_ROOT, 1327 .seq_show = bfq_io_show_weight, 1328 .write = bfq_io_set_weight, 1329 }, 1330 1331 /* statistics, covers only the tasks in the bfqg */ 1332 { 1333 .name = "bfq.io_service_bytes", 1334 .private = offsetof(struct bfq_group, stats.bytes), 1335 .seq_show = bfqg_print_rwstat, 1336 }, 1337 { 1338 .name = "bfq.io_serviced", 1339 .private = offsetof(struct bfq_group, stats.ios), 1340 .seq_show = bfqg_print_rwstat, 1341 }, 1342 #ifdef CONFIG_BFQ_CGROUP_DEBUG 1343 { 1344 .name = "bfq.time", 1345 .private = offsetof(struct bfq_group, stats.time), 1346 .seq_show = bfqg_print_stat, 1347 }, 1348 { 1349 .name = "bfq.sectors", 1350 .seq_show = bfqg_print_stat_sectors, 1351 }, 1352 { 1353 .name = "bfq.io_service_time", 1354 .private = offsetof(struct bfq_group, stats.service_time), 1355 .seq_show = bfqg_print_rwstat, 1356 }, 1357 { 1358 .name = "bfq.io_wait_time", 1359 .private = offsetof(struct bfq_group, stats.wait_time), 1360 .seq_show = bfqg_print_rwstat, 1361 }, 1362 { 1363 .name = "bfq.io_merged", 1364 .private = offsetof(struct bfq_group, stats.merged), 1365 .seq_show = bfqg_print_rwstat, 1366 }, 1367 { 1368 .name = "bfq.io_queued", 1369 .private = offsetof(struct bfq_group, stats.queued), 1370 .seq_show = bfqg_print_rwstat, 1371 }, 1372 #endif /* CONFIG_BFQ_CGROUP_DEBUG */ 1373 1374 /* the same statistics which cover the bfqg and its descendants */ 1375 { 1376 .name = "bfq.io_service_bytes_recursive", 1377 .private = offsetof(struct bfq_group, stats.bytes), 1378 .seq_show = bfqg_print_rwstat_recursive, 1379 }, 1380 { 1381 .name = "bfq.io_serviced_recursive", 1382 .private = offsetof(struct bfq_group, stats.ios), 1383 .seq_show = bfqg_print_rwstat_recursive, 1384 }, 1385 #ifdef CONFIG_BFQ_CGROUP_DEBUG 1386 { 1387 .name = "bfq.time_recursive", 1388 .private = offsetof(struct bfq_group, stats.time), 1389 .seq_show = bfqg_print_stat_recursive, 1390 }, 1391 { 1392 .name = "bfq.sectors_recursive", 1393 .seq_show = bfqg_print_stat_sectors_recursive, 1394 }, 1395 { 1396 .name = "bfq.io_service_time_recursive", 1397 .private = offsetof(struct bfq_group, stats.service_time), 1398 .seq_show = bfqg_print_rwstat_recursive, 1399 }, 1400 { 1401 .name = "bfq.io_wait_time_recursive", 1402 .private = offsetof(struct bfq_group, stats.wait_time), 1403 .seq_show = bfqg_print_rwstat_recursive, 1404 }, 1405 { 1406 .name = "bfq.io_merged_recursive", 1407 .private = offsetof(struct bfq_group, stats.merged), 1408 .seq_show = bfqg_print_rwstat_recursive, 1409 }, 1410 { 1411 .name = "bfq.io_queued_recursive", 1412 .private = offsetof(struct bfq_group, stats.queued), 1413 .seq_show = bfqg_print_rwstat_recursive, 1414 }, 1415 { 1416 .name = "bfq.avg_queue_size", 1417 .seq_show = bfqg_print_avg_queue_size, 1418 }, 1419 { 1420 .name = "bfq.group_wait_time", 1421 .private = offsetof(struct bfq_group, stats.group_wait_time), 1422 .seq_show = bfqg_print_stat, 1423 }, 1424 { 1425 .name = "bfq.idle_time", 1426 .private = offsetof(struct bfq_group, stats.idle_time), 1427 .seq_show = bfqg_print_stat, 1428 }, 1429 { 1430 .name = "bfq.empty_time", 1431 .private = offsetof(struct bfq_group, stats.empty_time), 1432 .seq_show = bfqg_print_stat, 1433 }, 1434 { 1435 .name = "bfq.dequeue", 1436 .private = offsetof(struct bfq_group, stats.dequeue), 1437 .seq_show = bfqg_print_stat, 1438 }, 1439 #endif /* CONFIG_BFQ_CGROUP_DEBUG */ 1440 { } /* terminate */ 1441 }; 1442 1443 struct cftype bfq_blkg_files[] = { 1444 { 1445 .name = "bfq.weight", 1446 .flags = CFTYPE_NOT_ON_ROOT, 1447 .seq_show = bfq_io_show_weight, 1448 .write = bfq_io_set_weight, 1449 }, 1450 {} /* terminate */ 1451 }; 1452 1453 #else /* CONFIG_BFQ_GROUP_IOSCHED */ 1454 1455 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, 1456 struct bfq_group *bfqg) {} 1457 1458 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) 1459 { 1460 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); 1461 1462 entity->weight = entity->new_weight; 1463 entity->orig_weight = entity->new_weight; 1464 if (bfqq) { 1465 bfqq->ioprio = bfqq->new_ioprio; 1466 bfqq->ioprio_class = bfqq->new_ioprio_class; 1467 } 1468 entity->sched_data = &bfqg->sched_data; 1469 } 1470 1471 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {} 1472 1473 void bfq_end_wr_async(struct bfq_data *bfqd) 1474 { 1475 bfq_end_wr_async_queues(bfqd, bfqd->root_group); 1476 } 1477 1478 struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio) 1479 { 1480 return bfqd->root_group; 1481 } 1482 1483 struct bfq_group *bfqq_group(struct bfq_queue *bfqq) 1484 { 1485 return bfqq->bfqd->root_group; 1486 } 1487 1488 void bfqg_and_blkg_put(struct bfq_group *bfqg) {} 1489 1490 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) 1491 { 1492 struct bfq_group *bfqg; 1493 int i; 1494 1495 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); 1496 if (!bfqg) 1497 return NULL; 1498 1499 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) 1500 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; 1501 1502 return bfqg; 1503 } 1504 #endif /* CONFIG_BFQ_GROUP_IOSCHED */ 1505