X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=block%2Fcfq-iosched.c;h=08db2fc70c2900bfad9eb64ac143eb2714cf3b0f;hb=8bd435b30ecacb69bbb8b2d3e251f770b807c5b2;hp=a1f37dfd1b8b64dc9505493059c9282b50131215;hpb=155fead9b6347ead90e0b0396cb108a6ba6126c6;p=deliverable%2Flinux.git diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index a1f37dfd1b8b..08db2fc70c29 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -17,7 +17,7 @@ #include "blk.h" #include "blk-cgroup.h" -static struct blkio_policy_type blkio_policy_cfq; +static struct blkio_policy_type blkio_policy_cfq __maybe_unused; /* * tunables @@ -224,7 +224,7 @@ struct cfq_group { u64 vdisktime; unsigned int weight; unsigned int new_weight; - bool needs_update; + unsigned int dev_weight; /* number of cfqq currently on this group */ int nr_cfqq; @@ -541,14 +541,13 @@ static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) #else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ -static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, - struct cfq_group *curr_cfqg) { } -static void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { } -static void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { } -static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { } -static void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { } -static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { } -static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { } +static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { } +static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { } +static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { } +static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { } +static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { } +static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { } +static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { } #endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ @@ -838,7 +837,7 @@ static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg) { u64 d = delta << CFQ_SERVICE_SHIFT; - d = d * BLKIO_WEIGHT_DEFAULT; + d = d * CFQ_WEIGHT_DEFAULT; do_div(d, cfqg->weight); return d; } @@ -1165,9 +1164,9 @@ static void cfq_update_group_weight(struct cfq_group *cfqg) { BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); - if (cfqg->needs_update) { + if (cfqg->new_weight) { cfqg->weight = cfqg->new_weight; - cfqg->needs_update = false; + cfqg->new_weight = 0; } } @@ -1325,21 +1324,12 @@ static void cfq_init_cfqg_base(struct cfq_group *cfqg) } #ifdef CONFIG_CFQ_GROUP_IOSCHED -static void cfq_update_blkio_group_weight(struct blkio_group *blkg, - unsigned int weight) -{ - struct cfq_group *cfqg = blkg_to_cfqg(blkg); - - cfqg->new_weight = weight; - cfqg->needs_update = true; -} - static void cfq_init_blkio_group(struct blkio_group *blkg) { struct cfq_group *cfqg = blkg_to_cfqg(blkg); cfq_init_cfqg_base(cfqg); - cfqg->weight = blkg->blkcg->weight; + cfqg->weight = blkg->blkcg->cfq_weight; } /* @@ -1377,36 +1367,37 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) cfqg_get(cfqg); } -static u64 blkg_prfill_weight_device(struct seq_file *sf, - struct blkg_policy_data *pd, int off) +static u64 cfqg_prfill_weight_device(struct seq_file *sf, void *pdata, int off) { - if (!pd->conf.weight) + struct cfq_group *cfqg = pdata; + + if (!cfqg->dev_weight) return 0; - return __blkg_prfill_u64(sf, pd, pd->conf.weight); + return __blkg_prfill_u64(sf, pdata, cfqg->dev_weight); } -static int blkcg_print_weight_device(struct cgroup *cgrp, struct cftype *cft, - struct seq_file *sf) +static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft, + struct seq_file *sf) { blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp), - blkg_prfill_weight_device, BLKIO_POLICY_PROP, 0, + cfqg_prfill_weight_device, &blkio_policy_cfq, 0, false); return 0; } -static int blkcg_print_weight(struct cgroup *cgrp, struct cftype *cft, - struct seq_file *sf) +static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft, + struct seq_file *sf) { - seq_printf(sf, "%u\n", cgroup_to_blkio_cgroup(cgrp)->weight); + seq_printf(sf, "%u\n", cgroup_to_blkio_cgroup(cgrp)->cfq_weight); return 0; } -static int blkcg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, - const char *buf) +static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, + const char *buf) { struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); - struct blkg_policy_data *pd; struct blkg_conf_ctx ctx; + struct cfq_group *cfqg; int ret; ret = blkg_conf_prep(blkcg, buf, &ctx); @@ -1414,11 +1405,11 @@ static int blkcg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, return ret; ret = -EINVAL; - pd = ctx.blkg->pd[BLKIO_POLICY_PROP]; - if (pd && (!ctx.v || (ctx.v >= BLKIO_WEIGHT_MIN && - ctx.v <= BLKIO_WEIGHT_MAX))) { - pd->conf.weight = ctx.v; - cfq_update_blkio_group_weight(ctx.blkg, ctx.v ?: blkcg->weight); + cfqg = blkg_to_cfqg(ctx.blkg); + if (cfqg && (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && + ctx.v <= CFQ_WEIGHT_MAX))) { + cfqg->dev_weight = ctx.v; + cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight; ret = 0; } @@ -1426,34 +1417,53 @@ static int blkcg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, return ret; } -static int blkcg_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val) +static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val) { struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); struct blkio_group *blkg; struct hlist_node *n; - if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX) + if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX) return -EINVAL; spin_lock_irq(&blkcg->lock); - blkcg->weight = (unsigned int)val; + blkcg->cfq_weight = (unsigned int)val; hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { - struct blkg_policy_data *pd = blkg->pd[BLKIO_POLICY_PROP]; + struct cfq_group *cfqg = blkg_to_cfqg(blkg); - if (pd && !pd->conf.weight) - cfq_update_blkio_group_weight(blkg, blkcg->weight); + if (cfqg && !cfqg->dev_weight) + cfqg->new_weight = blkcg->cfq_weight; } spin_unlock_irq(&blkcg->lock); return 0; } +static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft, + struct seq_file *sf) +{ + struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); + + blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkio_policy_cfq, + cft->private, false); + return 0; +} + +static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft, + struct seq_file *sf) +{ + struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); + + blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkio_policy_cfq, + cft->private, true); + return 0; +} + #ifdef CONFIG_DEBUG_BLK_CGROUP -static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, - struct blkg_policy_data *pd, int off) +static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, void *pdata, int off) { - struct cfq_group *cfqg = (void *)pd->pdata; + struct cfq_group *cfqg = pdata; u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples); u64 v = 0; @@ -1461,7 +1471,7 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum); do_div(v, samples); } - __blkg_prfill_u64(sf, pd, v); + __blkg_prfill_u64(sf, pdata, v); return 0; } @@ -1472,7 +1482,7 @@ static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft, struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size, - BLKIO_POLICY_PROP, 0, false); + &blkio_policy_cfq, 0, false); return 0; } #endif /* CONFIG_DEBUG_BLK_CGROUP */ @@ -1480,62 +1490,54 @@ static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft, static struct cftype cfq_blkcg_files[] = { { .name = "weight_device", - .read_seq_string = blkcg_print_weight_device, - .write_string = blkcg_set_weight_device, + .read_seq_string = cfqg_print_weight_device, + .write_string = cfqg_set_weight_device, .max_write_len = 256, }, { .name = "weight", - .read_seq_string = blkcg_print_weight, - .write_u64 = blkcg_set_weight, + .read_seq_string = cfq_print_weight, + .write_u64 = cfq_set_weight, }, { .name = "time", - .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, - offsetof(struct cfq_group, stats.time)), - .read_seq_string = blkcg_print_stat, + .private = offsetof(struct cfq_group, stats.time), + .read_seq_string = cfqg_print_stat, }, { .name = "sectors", - .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, - offsetof(struct cfq_group, stats.sectors)), - .read_seq_string = blkcg_print_stat, + .private = offsetof(struct cfq_group, stats.sectors), + .read_seq_string = cfqg_print_stat, }, { .name = "io_service_bytes", - .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, - offsetof(struct cfq_group, stats.service_bytes)), - .read_seq_string = blkcg_print_rwstat, + .private = offsetof(struct cfq_group, stats.service_bytes), + .read_seq_string = cfqg_print_rwstat, }, { .name = "io_serviced", - .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, - offsetof(struct cfq_group, stats.serviced)), - .read_seq_string = blkcg_print_rwstat, + .private = offsetof(struct cfq_group, stats.serviced), + .read_seq_string = cfqg_print_rwstat, }, { .name = "io_service_time", - .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, - offsetof(struct cfq_group, stats.service_time)), - .read_seq_string = blkcg_print_rwstat, + .private = offsetof(struct cfq_group, stats.service_time), + .read_seq_string = cfqg_print_rwstat, }, { .name = "io_wait_time", - .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, - offsetof(struct cfq_group, stats.wait_time)), - .read_seq_string = blkcg_print_rwstat, + .private = offsetof(struct cfq_group, stats.wait_time), + .read_seq_string = cfqg_print_rwstat, }, { .name = "io_merged", - .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, - offsetof(struct cfq_group, stats.merged)), - .read_seq_string = blkcg_print_rwstat, + .private = offsetof(struct cfq_group, stats.merged), + .read_seq_string = cfqg_print_rwstat, }, { .name = "io_queued", - .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, - offsetof(struct cfq_group, stats.queued)), - .read_seq_string = blkcg_print_rwstat, + .private = offsetof(struct cfq_group, stats.queued), + .read_seq_string = cfqg_print_rwstat, }, #ifdef CONFIG_DEBUG_BLK_CGROUP { @@ -1544,33 +1546,28 @@ static struct cftype cfq_blkcg_files[] = { }, { .name = "group_wait_time", - .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, - offsetof(struct cfq_group, stats.group_wait_time)), - .read_seq_string = blkcg_print_stat, + .private = offsetof(struct cfq_group, stats.group_wait_time), + .read_seq_string = cfqg_print_stat, }, { .name = "idle_time", - .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, - offsetof(struct cfq_group, stats.idle_time)), - .read_seq_string = blkcg_print_stat, + .private = offsetof(struct cfq_group, stats.idle_time), + .read_seq_string = cfqg_print_stat, }, { .name = "empty_time", - .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, - offsetof(struct cfq_group, stats.empty_time)), - .read_seq_string = blkcg_print_stat, + .private = offsetof(struct cfq_group, stats.empty_time), + .read_seq_string = cfqg_print_stat, }, { .name = "dequeue", - .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, - offsetof(struct cfq_group, stats.dequeue)), - .read_seq_string = blkcg_print_stat, + .private = offsetof(struct cfq_group, stats.dequeue), + .read_seq_string = cfqg_print_stat, }, { .name = "unaccounted_time", - .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, - offsetof(struct cfq_group, stats.unaccounted_time)), - .read_seq_string = blkcg_print_stat, + .private = offsetof(struct cfq_group, stats.unaccounted_time), + .read_seq_string = cfqg_print_stat, }, #endif /* CONFIG_DEBUG_BLK_CGROUP */ { } /* terminate */ @@ -3941,7 +3938,7 @@ static void cfq_exit_queue(struct elevator_queue *e) #ifndef CONFIG_CFQ_GROUP_IOSCHED kfree(cfqd->root_group); #endif - update_root_blkg_pd(q, BLKIO_POLICY_PROP); + update_root_blkg_pd(q, &blkio_policy_cfq); kfree(cfqd); } @@ -3983,7 +3980,7 @@ static int cfq_init_queue(struct request_queue *q) return -ENOMEM; } - cfqd->root_group->weight = 2*BLKIO_WEIGHT_DEFAULT; + cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT; /* * Not strictly needed (since RB_ROOT just clears the node and we @@ -4160,7 +4157,6 @@ static struct blkio_policy_type blkio_policy_cfq = { .blkio_init_group_fn = cfq_init_blkio_group, .blkio_reset_group_stats_fn = cfqg_stats_reset, }, - .plid = BLKIO_POLICY_PROP, .pdata_size = sizeof(struct cfq_group), .cftypes = cfq_blkcg_files, }; @@ -4184,27 +4180,31 @@ static int __init cfq_init(void) #else cfq_group_idle = 0; #endif + + ret = blkio_policy_register(&blkio_policy_cfq); + if (ret) + return ret; + cfq_pool = KMEM_CACHE(cfq_queue, 0); if (!cfq_pool) - return -ENOMEM; + goto err_pol_unreg; ret = elv_register(&iosched_cfq); - if (ret) { - kmem_cache_destroy(cfq_pool); - return ret; - } + if (ret) + goto err_free_pool; -#ifdef CONFIG_CFQ_GROUP_IOSCHED - blkio_policy_register(&blkio_policy_cfq); -#endif return 0; + +err_free_pool: + kmem_cache_destroy(cfq_pool); +err_pol_unreg: + blkio_policy_unregister(&blkio_policy_cfq); + return ret; } static void __exit cfq_exit(void) { -#ifdef CONFIG_CFQ_GROUP_IOSCHED blkio_policy_unregister(&blkio_policy_cfq); -#endif elv_unregister(&iosched_cfq); kmem_cache_destroy(cfq_pool); }