mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
Merge branch 'for-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
Pull cgroup updates from Tejun Heo: "A lot of activities on the cgroup front. Most changes aren't visible to userland at all at this point and are laying foundation for the planned unified hierarchy. - The biggest change is decoupling the lifetime management of css (cgroup_subsys_state) from that of cgroup's. Because controllers (cpu, memory, block and so on) will need to be dynamically enabled and disabled, css which is the association point between a cgroup and a controller may come and go dynamically across the lifetime of a cgroup. Till now, css's were created when the associated cgroup was created and stayed till the cgroup got destroyed. Assumptions around this tight coupling permeated through cgroup core and controllers. These assumptions are gradually removed, which consists bulk of patches, and css destruction path is completely decoupled from cgroup destruction path. Note that decoupling of creation path is relatively easy on top of these changes and the patchset is pending for the next window. - cgroup has its own event mechanism cgroup.event_control, which is only used by memcg. It is overly complex trying to achieve high flexibility whose benefits seem dubious at best. Going forward, new events will simply generate file modified event and the existing mechanism is being made specific to memcg. This pull request contains prepatory patches for such change. - Various fixes and cleanups" Fixed up conflict in kernel/cgroup.c as per Tejun. * 'for-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup: (69 commits) cgroup: fix cgroup_css() invocation in css_from_id() cgroup: make cgroup_write_event_control() use css_from_dir() instead of __d_cgrp() cgroup: make cgroup_event hold onto cgroup_subsys_state instead of cgroup cgroup: implement CFTYPE_NO_PREFIX cgroup: make cgroup_css() take cgroup_subsys * instead and allow NULL subsys cgroup: rename cgroup_css_from_dir() to css_from_dir() and update its syntax cgroup: fix cgroup_write_event_control() cgroup: fix subsystem file accesses on the root cgroup cgroup: change cgroup_from_id() to css_from_id() cgroup: use css_get() in cgroup_create() to check CSS_ROOT cpuset: remove an unncessary forward declaration cgroup: RCU protect each cgroup_subsys_state release cgroup: move subsys file removal to kill_css() cgroup: factor out kill_css() cgroup: decouple cgroup_subsys_state destruction from cgroup destruction cgroup: replace cgroup->css_kill_cnt with ->nr_css cgroup: bounce cgroup_subsys_state ref kill confirmation to a work item cgroup: move cgroup->subsys[] assignment to online_css() cgroup: reorganize css init / exit paths cgroup: add __rcu modifier to cgroup->subsys[] ...
This commit is contained in:
commit
32dad03d16
@ -437,10 +437,10 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl,
|
||||
return &blkg->rl;
|
||||
}
|
||||
|
||||
static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
|
||||
u64 val)
|
||||
static int blkcg_reset_stats(struct cgroup_subsys_state *css,
|
||||
struct cftype *cftype, u64 val)
|
||||
{
|
||||
struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
|
||||
struct blkcg *blkcg = css_to_blkcg(css);
|
||||
struct blkcg_gq *blkg;
|
||||
int i;
|
||||
|
||||
@ -614,15 +614,13 @@ u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
|
||||
{
|
||||
struct blkcg_policy *pol = blkcg_policy[pd->plid];
|
||||
struct blkcg_gq *pos_blkg;
|
||||
struct cgroup *pos_cgrp;
|
||||
u64 sum;
|
||||
struct cgroup_subsys_state *pos_css;
|
||||
u64 sum = 0;
|
||||
|
||||
lockdep_assert_held(pd->blkg->q->queue_lock);
|
||||
|
||||
sum = blkg_stat_read((void *)pd + off);
|
||||
|
||||
rcu_read_lock();
|
||||
blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
|
||||
blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
|
||||
struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
|
||||
struct blkg_stat *stat = (void *)pos_pd + off;
|
||||
|
||||
@ -649,16 +647,14 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
|
||||
{
|
||||
struct blkcg_policy *pol = blkcg_policy[pd->plid];
|
||||
struct blkcg_gq *pos_blkg;
|
||||
struct cgroup *pos_cgrp;
|
||||
struct blkg_rwstat sum;
|
||||
struct cgroup_subsys_state *pos_css;
|
||||
struct blkg_rwstat sum = { };
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(pd->blkg->q->queue_lock);
|
||||
|
||||
sum = blkg_rwstat_read((void *)pd + off);
|
||||
|
||||
rcu_read_lock();
|
||||
blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
|
||||
blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
|
||||
struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
|
||||
struct blkg_rwstat *rwstat = (void *)pos_pd + off;
|
||||
struct blkg_rwstat tmp;
|
||||
@ -765,18 +761,18 @@ struct cftype blkcg_files[] = {
|
||||
|
||||
/**
|
||||
* blkcg_css_offline - cgroup css_offline callback
|
||||
* @cgroup: cgroup of interest
|
||||
* @css: css of interest
|
||||
*
|
||||
* This function is called when @cgroup is about to go away and responsible
|
||||
* for shooting down all blkgs associated with @cgroup. blkgs should be
|
||||
* This function is called when @css is about to go away and responsible
|
||||
* for shooting down all blkgs associated with @css. blkgs should be
|
||||
* removed while holding both q and blkcg locks. As blkcg lock is nested
|
||||
* inside q lock, this function performs reverse double lock dancing.
|
||||
*
|
||||
* This is the blkcg counterpart of ioc_release_fn().
|
||||
*/
|
||||
static void blkcg_css_offline(struct cgroup *cgroup)
|
||||
static void blkcg_css_offline(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
|
||||
struct blkcg *blkcg = css_to_blkcg(css);
|
||||
|
||||
spin_lock_irq(&blkcg->lock);
|
||||
|
||||
@ -798,21 +794,21 @@ static void blkcg_css_offline(struct cgroup *cgroup)
|
||||
spin_unlock_irq(&blkcg->lock);
|
||||
}
|
||||
|
||||
static void blkcg_css_free(struct cgroup *cgroup)
|
||||
static void blkcg_css_free(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
|
||||
struct blkcg *blkcg = css_to_blkcg(css);
|
||||
|
||||
if (blkcg != &blkcg_root)
|
||||
kfree(blkcg);
|
||||
}
|
||||
|
||||
static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup)
|
||||
static struct cgroup_subsys_state *
|
||||
blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||
{
|
||||
static atomic64_t id_seq = ATOMIC64_INIT(0);
|
||||
struct blkcg *blkcg;
|
||||
struct cgroup *parent = cgroup->parent;
|
||||
|
||||
if (!parent) {
|
||||
if (!parent_css) {
|
||||
blkcg = &blkcg_root;
|
||||
goto done;
|
||||
}
|
||||
@ -883,14 +879,15 @@ void blkcg_exit_queue(struct request_queue *q)
|
||||
* of the main cic data structures. For now we allow a task to change
|
||||
* its cgroup only if it's the only owner of its ioc.
|
||||
*/
|
||||
static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
|
||||
static int blkcg_can_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
struct task_struct *task;
|
||||
struct io_context *ioc;
|
||||
int ret = 0;
|
||||
|
||||
/* task_lock() is needed to avoid races with exit_io_context() */
|
||||
cgroup_taskset_for_each(task, cgrp, tset) {
|
||||
cgroup_taskset_for_each(task, css, tset) {
|
||||
task_lock(task);
|
||||
ioc = task->io_context;
|
||||
if (ioc && atomic_read(&ioc->nr_tasks) > 1)
|
||||
@ -1127,7 +1124,7 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
|
||||
|
||||
/* kill the intf files first */
|
||||
if (pol->cftypes)
|
||||
cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
|
||||
cgroup_rm_cftypes(pol->cftypes);
|
||||
|
||||
/* unregister and update blkgs */
|
||||
blkcg_policy[pol->plid] = NULL;
|
||||
|
@ -179,22 +179,20 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
||||
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
|
||||
|
||||
|
||||
static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
|
||||
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
|
||||
{
|
||||
return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
|
||||
struct blkcg, css);
|
||||
return css ? container_of(css, struct blkcg, css) : NULL;
|
||||
}
|
||||
|
||||
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
|
||||
{
|
||||
return container_of(task_subsys_state(tsk, blkio_subsys_id),
|
||||
struct blkcg, css);
|
||||
return css_to_blkcg(task_css(tsk, blkio_subsys_id));
|
||||
}
|
||||
|
||||
static inline struct blkcg *bio_blkcg(struct bio *bio)
|
||||
{
|
||||
if (bio && bio->bi_css)
|
||||
return container_of(bio->bi_css, struct blkcg, css);
|
||||
return css_to_blkcg(bio->bi_css);
|
||||
return task_blkcg(current);
|
||||
}
|
||||
|
||||
@ -206,9 +204,7 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
|
||||
*/
|
||||
static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
|
||||
{
|
||||
struct cgroup *pcg = blkcg->css.cgroup->parent;
|
||||
|
||||
return pcg ? cgroup_to_blkcg(pcg) : NULL;
|
||||
return css_to_blkcg(css_parent(&blkcg->css));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -288,32 +284,33 @@ struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
|
||||
/**
|
||||
* blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
|
||||
* @d_blkg: loop cursor pointing to the current descendant
|
||||
* @pos_cgrp: used for iteration
|
||||
* @pos_css: used for iteration
|
||||
* @p_blkg: target blkg to walk descendants of
|
||||
*
|
||||
* Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
|
||||
* read locked. If called under either blkcg or queue lock, the iteration
|
||||
* is guaranteed to include all and only online blkgs. The caller may
|
||||
* update @pos_cgrp by calling cgroup_rightmost_descendant() to skip
|
||||
* subtree.
|
||||
* update @pos_css by calling css_rightmost_descendant() to skip subtree.
|
||||
* @p_blkg is included in the iteration and the first node to be visited.
|
||||
*/
|
||||
#define blkg_for_each_descendant_pre(d_blkg, pos_cgrp, p_blkg) \
|
||||
cgroup_for_each_descendant_pre((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \
|
||||
if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \
|
||||
#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
|
||||
css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
|
||||
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
|
||||
(p_blkg)->q, false)))
|
||||
|
||||
/**
|
||||
* blkg_for_each_descendant_post - post-order walk of a blkg's descendants
|
||||
* @d_blkg: loop cursor pointing to the current descendant
|
||||
* @pos_cgrp: used for iteration
|
||||
* @pos_css: used for iteration
|
||||
* @p_blkg: target blkg to walk descendants of
|
||||
*
|
||||
* Similar to blkg_for_each_descendant_pre() but performs post-order
|
||||
* traversal instead. Synchronization rules are the same.
|
||||
* traversal instead. Synchronization rules are the same. @p_blkg is
|
||||
* included in the iteration and the last node to be visited.
|
||||
*/
|
||||
#define blkg_for_each_descendant_post(d_blkg, pos_cgrp, p_blkg) \
|
||||
cgroup_for_each_descendant_post((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \
|
||||
if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \
|
||||
#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
|
||||
css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
|
||||
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
|
||||
(p_blkg)->q, false)))
|
||||
|
||||
/**
|
||||
@ -576,7 +573,6 @@ static inline int blkcg_activate_policy(struct request_queue *q,
|
||||
static inline void blkcg_deactivate_policy(struct request_queue *q,
|
||||
const struct blkcg_policy *pol) { }
|
||||
|
||||
static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
|
||||
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
|
||||
|
||||
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
|
||||
|
@ -1293,10 +1293,10 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
|
||||
return __blkg_prfill_rwstat(sf, pd, &rwstat);
|
||||
}
|
||||
|
||||
static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
|
||||
struct seq_file *sf)
|
||||
static int tg_print_cpu_rwstat(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct seq_file *sf)
|
||||
{
|
||||
struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
||||
struct blkcg *blkcg = css_to_blkcg(css);
|
||||
|
||||
blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
|
||||
cft->private, true);
|
||||
@ -1325,31 +1325,31 @@ static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
|
||||
return __blkg_prfill_u64(sf, pd, v);
|
||||
}
|
||||
|
||||
static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
|
||||
struct seq_file *sf)
|
||||
static int tg_print_conf_u64(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct seq_file *sf)
|
||||
{
|
||||
blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
|
||||
blkcg_print_blkgs(sf, css_to_blkcg(css), tg_prfill_conf_u64,
|
||||
&blkcg_policy_throtl, cft->private, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
|
||||
struct seq_file *sf)
|
||||
static int tg_print_conf_uint(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct seq_file *sf)
|
||||
{
|
||||
blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
|
||||
blkcg_print_blkgs(sf, css_to_blkcg(css), tg_prfill_conf_uint,
|
||||
&blkcg_policy_throtl, cft->private, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
|
||||
bool is_u64)
|
||||
static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
const char *buf, bool is_u64)
|
||||
{
|
||||
struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
||||
struct blkcg *blkcg = css_to_blkcg(css);
|
||||
struct blkg_conf_ctx ctx;
|
||||
struct throtl_grp *tg;
|
||||
struct throtl_service_queue *sq;
|
||||
struct blkcg_gq *blkg;
|
||||
struct cgroup *pos_cgrp;
|
||||
struct cgroup_subsys_state *pos_css;
|
||||
int ret;
|
||||
|
||||
ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
|
||||
@ -1379,8 +1379,7 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
|
||||
* restrictions in the whole hierarchy and allows them to bypass
|
||||
* blk-throttle.
|
||||
*/
|
||||
tg_update_has_rules(tg);
|
||||
blkg_for_each_descendant_pre(blkg, pos_cgrp, ctx.blkg)
|
||||
blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg)
|
||||
tg_update_has_rules(blkg_to_tg(blkg));
|
||||
|
||||
/*
|
||||
@ -1403,16 +1402,16 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
|
||||
static int tg_set_conf_u64(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
const char *buf)
|
||||
{
|
||||
return tg_set_conf(cgrp, cft, buf, true);
|
||||
return tg_set_conf(css, cft, buf, true);
|
||||
}
|
||||
|
||||
static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
|
||||
static int tg_set_conf_uint(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
const char *buf)
|
||||
{
|
||||
return tg_set_conf(cgrp, cft, buf, false);
|
||||
return tg_set_conf(css, cft, buf, false);
|
||||
}
|
||||
|
||||
static struct cftype throtl_files[] = {
|
||||
@ -1623,7 +1622,7 @@ void blk_throtl_drain(struct request_queue *q)
|
||||
{
|
||||
struct throtl_data *td = q->td;
|
||||
struct blkcg_gq *blkg;
|
||||
struct cgroup *pos_cgrp;
|
||||
struct cgroup_subsys_state *pos_css;
|
||||
struct bio *bio;
|
||||
int rw;
|
||||
|
||||
@ -1636,11 +1635,9 @@ void blk_throtl_drain(struct request_queue *q)
|
||||
* better to walk service_queue tree directly but blkg walk is
|
||||
* easier.
|
||||
*/
|
||||
blkg_for_each_descendant_post(blkg, pos_cgrp, td->queue->root_blkg)
|
||||
blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
|
||||
tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
|
||||
|
||||
tg_drain_bios(&td_root_tg(td)->service_queue);
|
||||
|
||||
/* finally, transfer bios from top-level tg's into the td */
|
||||
tg_drain_bios(&td->service_queue);
|
||||
|
||||
|
@ -1607,12 +1607,11 @@ static u64 cfqg_prfill_weight_device(struct seq_file *sf,
|
||||
return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
|
||||
}
|
||||
|
||||
static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
|
||||
struct seq_file *sf)
|
||||
static int cfqg_print_weight_device(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct seq_file *sf)
|
||||
{
|
||||
blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
|
||||
cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
|
||||
false);
|
||||
blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_weight_device,
|
||||
&blkcg_policy_cfq, 0, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1626,35 +1625,34 @@ static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
|
||||
return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
|
||||
}
|
||||
|
||||
static int cfqg_print_leaf_weight_device(struct cgroup *cgrp,
|
||||
static int cfqg_print_leaf_weight_device(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft,
|
||||
struct seq_file *sf)
|
||||
{
|
||||
blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
|
||||
cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, 0,
|
||||
false);
|
||||
blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_leaf_weight_device,
|
||||
&blkcg_policy_cfq, 0, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
|
||||
static int cfq_print_weight(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
struct seq_file *sf)
|
||||
{
|
||||
seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
|
||||
seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_weight);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cfq_print_leaf_weight(struct cgroup *cgrp, struct cftype *cft,
|
||||
struct seq_file *sf)
|
||||
static int cfq_print_leaf_weight(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct seq_file *sf)
|
||||
{
|
||||
seq_printf(sf, "%u\n",
|
||||
cgroup_to_blkcg(cgrp)->cfq_leaf_weight);
|
||||
seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_leaf_weight);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
|
||||
const char *buf, bool is_leaf_weight)
|
||||
static int __cfqg_set_weight_device(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, const char *buf,
|
||||
bool is_leaf_weight)
|
||||
{
|
||||
struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
||||
struct blkcg *blkcg = css_to_blkcg(css);
|
||||
struct blkg_conf_ctx ctx;
|
||||
struct cfq_group *cfqg;
|
||||
int ret;
|
||||
@ -1680,22 +1678,22 @@ static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
|
||||
const char *buf)
|
||||
static int cfqg_set_weight_device(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, const char *buf)
|
||||
{
|
||||
return __cfqg_set_weight_device(cgrp, cft, buf, false);
|
||||
return __cfqg_set_weight_device(css, cft, buf, false);
|
||||
}
|
||||
|
||||
static int cfqg_set_leaf_weight_device(struct cgroup *cgrp, struct cftype *cft,
|
||||
const char *buf)
|
||||
static int cfqg_set_leaf_weight_device(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, const char *buf)
|
||||
{
|
||||
return __cfqg_set_weight_device(cgrp, cft, buf, true);
|
||||
return __cfqg_set_weight_device(css, cft, buf, true);
|
||||
}
|
||||
|
||||
static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
|
||||
bool is_leaf_weight)
|
||||
static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
u64 val, bool is_leaf_weight)
|
||||
{
|
||||
struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
||||
struct blkcg *blkcg = css_to_blkcg(css);
|
||||
struct blkcg_gq *blkg;
|
||||
|
||||
if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
|
||||
@ -1727,30 +1725,32 @@ static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
|
||||
static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
u64 val)
|
||||
{
|
||||
return __cfq_set_weight(cgrp, cft, val, false);
|
||||
return __cfq_set_weight(css, cft, val, false);
|
||||
}
|
||||
|
||||
static int cfq_set_leaf_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
|
||||
static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, u64 val)
|
||||
{
|
||||
return __cfq_set_weight(cgrp, cft, val, true);
|
||||
return __cfq_set_weight(css, cft, val, true);
|
||||
}
|
||||
|
||||
static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
|
||||
static int cfqg_print_stat(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
struct seq_file *sf)
|
||||
{
|
||||
struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
||||
struct blkcg *blkcg = css_to_blkcg(css);
|
||||
|
||||
blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
|
||||
cft->private, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
|
||||
struct seq_file *sf)
|
||||
static int cfqg_print_rwstat(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct seq_file *sf)
|
||||
{
|
||||
struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
||||
struct blkcg *blkcg = css_to_blkcg(css);
|
||||
|
||||
blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
|
||||
cft->private, true);
|
||||
@ -1773,20 +1773,20 @@ static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
|
||||
return __blkg_prfill_rwstat(sf, pd, &sum);
|
||||
}
|
||||
|
||||
static int cfqg_print_stat_recursive(struct cgroup *cgrp, struct cftype *cft,
|
||||
struct seq_file *sf)
|
||||
static int cfqg_print_stat_recursive(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct seq_file *sf)
|
||||
{
|
||||
struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
||||
struct blkcg *blkcg = css_to_blkcg(css);
|
||||
|
||||
blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive,
|
||||
&blkcg_policy_cfq, cft->private, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cfqg_print_rwstat_recursive(struct cgroup *cgrp, struct cftype *cft,
|
||||
struct seq_file *sf)
|
||||
static int cfqg_print_rwstat_recursive(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct seq_file *sf)
|
||||
{
|
||||
struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
||||
struct blkcg *blkcg = css_to_blkcg(css);
|
||||
|
||||
blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive,
|
||||
&blkcg_policy_cfq, cft->private, true);
|
||||
@ -1810,10 +1810,10 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
|
||||
}
|
||||
|
||||
/* print avg_queue_size */
|
||||
static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
|
||||
struct seq_file *sf)
|
||||
static int cfqg_print_avg_queue_size(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct seq_file *sf)
|
||||
{
|
||||
struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
||||
struct blkcg *blkcg = css_to_blkcg(css);
|
||||
|
||||
blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
|
||||
&blkcg_policy_cfq, 0, false);
|
||||
|
2
fs/bio.c
2
fs/bio.c
@ -1956,7 +1956,7 @@ int bio_associate_current(struct bio *bio)
|
||||
|
||||
/* associate blkcg if exists */
|
||||
rcu_read_lock();
|
||||
css = task_subsys_state(current, blkio_subsys_id);
|
||||
css = task_css(current, blkio_subsys_id);
|
||||
if (css && css_tryget(css))
|
||||
bio->bi_css = css;
|
||||
rcu_read_unlock();
|
||||
|
@ -66,22 +66,25 @@ enum cgroup_subsys_id {
|
||||
|
||||
/* Per-subsystem/per-cgroup state maintained by the system. */
|
||||
struct cgroup_subsys_state {
|
||||
/*
|
||||
* The cgroup that this subsystem is attached to. Useful
|
||||
* for subsystems that want to know about the cgroup
|
||||
* hierarchy structure
|
||||
*/
|
||||
/* the cgroup that this css is attached to */
|
||||
struct cgroup *cgroup;
|
||||
|
||||
/* the cgroup subsystem that this css is attached to */
|
||||
struct cgroup_subsys *ss;
|
||||
|
||||
/* reference count - access via css_[try]get() and css_put() */
|
||||
struct percpu_ref refcnt;
|
||||
|
||||
/* the parent css */
|
||||
struct cgroup_subsys_state *parent;
|
||||
|
||||
unsigned long flags;
|
||||
/* ID for this css, if possible */
|
||||
struct css_id __rcu *id;
|
||||
|
||||
/* Used to put @cgroup->dentry on the last css_put() */
|
||||
struct work_struct dput_work;
|
||||
/* percpu_ref killing and RCU release */
|
||||
struct rcu_head rcu_head;
|
||||
struct work_struct destroy_work;
|
||||
};
|
||||
|
||||
/* bits in struct cgroup_subsys_state flags field */
|
||||
@ -161,7 +164,16 @@ struct cgroup_name {
|
||||
struct cgroup {
|
||||
unsigned long flags; /* "unsigned long" so bitops work */
|
||||
|
||||
int id; /* ida allocated in-hierarchy ID */
|
||||
/*
|
||||
* idr allocated in-hierarchy ID.
|
||||
*
|
||||
* The ID of the root cgroup is always 0, and a new cgroup
|
||||
* will be assigned with a smallest available ID.
|
||||
*/
|
||||
int id;
|
||||
|
||||
/* the number of attached css's */
|
||||
int nr_css;
|
||||
|
||||
/*
|
||||
* We link our 'sibling' struct into our parent's 'children'.
|
||||
@ -196,7 +208,7 @@ struct cgroup {
|
||||
struct cgroup_name __rcu *name;
|
||||
|
||||
/* Private pointers for each registered subsystem */
|
||||
struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
|
||||
struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
|
||||
|
||||
struct cgroupfs_root *root;
|
||||
|
||||
@ -220,10 +232,12 @@ struct cgroup {
|
||||
struct list_head pidlists;
|
||||
struct mutex pidlist_mutex;
|
||||
|
||||
/* dummy css with NULL ->ss, points back to this cgroup */
|
||||
struct cgroup_subsys_state dummy_css;
|
||||
|
||||
/* For css percpu_ref killing and RCU-protected deletion */
|
||||
struct rcu_head rcu_head;
|
||||
struct work_struct destroy_work;
|
||||
atomic_t css_kill_cnt;
|
||||
|
||||
/* List of events which userspace want to receive */
|
||||
struct list_head event_list;
|
||||
@ -322,7 +336,7 @@ struct cgroupfs_root {
|
||||
unsigned long flags;
|
||||
|
||||
/* IDs for cgroups in this hierarchy */
|
||||
struct ida cgroup_ida;
|
||||
struct idr cgroup_idr;
|
||||
|
||||
/* The path to use for release notifications. */
|
||||
char release_agent_path[PATH_MAX];
|
||||
@ -394,9 +408,10 @@ struct cgroup_map_cb {
|
||||
|
||||
/* cftype->flags */
|
||||
enum {
|
||||
CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cg */
|
||||
CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cg */
|
||||
CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
|
||||
CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
|
||||
CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */
|
||||
CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
|
||||
};
|
||||
|
||||
#define MAX_CFTYPE_NAME 64
|
||||
@ -424,35 +439,41 @@ struct cftype {
|
||||
/* CFTYPE_* flags */
|
||||
unsigned int flags;
|
||||
|
||||
/*
|
||||
* The subsys this file belongs to. Initialized automatically
|
||||
* during registration. NULL for cgroup core files.
|
||||
*/
|
||||
struct cgroup_subsys *ss;
|
||||
|
||||
int (*open)(struct inode *inode, struct file *file);
|
||||
ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
|
||||
ssize_t (*read)(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
struct file *file,
|
||||
char __user *buf, size_t nbytes, loff_t *ppos);
|
||||
/*
|
||||
* read_u64() is a shortcut for the common case of returning a
|
||||
* single integer. Use it in place of read()
|
||||
*/
|
||||
u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft);
|
||||
u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
|
||||
/*
|
||||
* read_s64() is a signed version of read_u64()
|
||||
*/
|
||||
s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft);
|
||||
s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
|
||||
/*
|
||||
* read_map() is used for defining a map of key/value
|
||||
* pairs. It should call cb->fill(cb, key, value) for each
|
||||
* entry. The key/value pairs (and their ordering) should not
|
||||
* change between reboots.
|
||||
*/
|
||||
int (*read_map)(struct cgroup *cgrp, struct cftype *cft,
|
||||
int (*read_map)(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
struct cgroup_map_cb *cb);
|
||||
/*
|
||||
* read_seq_string() is used for outputting a simple sequence
|
||||
* using seqfile.
|
||||
*/
|
||||
int (*read_seq_string)(struct cgroup *cgrp, struct cftype *cft,
|
||||
struct seq_file *m);
|
||||
int (*read_seq_string)(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct seq_file *m);
|
||||
|
||||
ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft,
|
||||
ssize_t (*write)(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
struct file *file,
|
||||
const char __user *buf, size_t nbytes, loff_t *ppos);
|
||||
|
||||
@ -461,18 +482,20 @@ struct cftype {
|
||||
* a single integer (as parsed by simple_strtoull) from
|
||||
* userspace. Use in place of write(); return 0 or error.
|
||||
*/
|
||||
int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val);
|
||||
int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
u64 val);
|
||||
/*
|
||||
* write_s64() is a signed version of write_u64()
|
||||
*/
|
||||
int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val);
|
||||
int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
s64 val);
|
||||
|
||||
/*
|
||||
* write_string() is passed a nul-terminated kernelspace
|
||||
* buffer of maximum length determined by max_write_len.
|
||||
* Returns 0 or -ve error code.
|
||||
*/
|
||||
int (*write_string)(struct cgroup *cgrp, struct cftype *cft,
|
||||
int (*write_string)(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
const char *buffer);
|
||||
/*
|
||||
* trigger() callback can be used to get some kick from the
|
||||
@ -480,7 +503,7 @@ struct cftype {
|
||||
* at all. The private field can be used to determine the
|
||||
* kick type for multiplexing.
|
||||
*/
|
||||
int (*trigger)(struct cgroup *cgrp, unsigned int event);
|
||||
int (*trigger)(struct cgroup_subsys_state *css, unsigned int event);
|
||||
|
||||
int (*release)(struct inode *inode, struct file *file);
|
||||
|
||||
@ -490,16 +513,18 @@ struct cftype {
|
||||
* you want to provide this functionality. Use eventfd_signal()
|
||||
* on eventfd to send notification to userspace.
|
||||
*/
|
||||
int (*register_event)(struct cgroup *cgrp, struct cftype *cft,
|
||||
struct eventfd_ctx *eventfd, const char *args);
|
||||
int (*register_event)(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct eventfd_ctx *eventfd,
|
||||
const char *args);
|
||||
/*
|
||||
* unregister_event() callback will be called when userspace
|
||||
* closes the eventfd or on cgroup removing.
|
||||
* This callback must be implemented, if you want provide
|
||||
* notification functionality.
|
||||
*/
|
||||
void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft,
|
||||
struct eventfd_ctx *eventfd);
|
||||
void (*unregister_event)(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft,
|
||||
struct eventfd_ctx *eventfd);
|
||||
};
|
||||
|
||||
/*
|
||||
@ -512,15 +537,6 @@ struct cftype_set {
|
||||
struct cftype *cfts;
|
||||
};
|
||||
|
||||
struct cgroup_scanner {
|
||||
struct cgroup *cg;
|
||||
int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan);
|
||||
void (*process_task)(struct task_struct *p,
|
||||
struct cgroup_scanner *scan);
|
||||
struct ptr_heap *heap;
|
||||
void *data;
|
||||
};
|
||||
|
||||
/*
|
||||
* See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This
|
||||
* function can be called as long as @cgrp is accessible.
|
||||
@ -537,7 +553,7 @@ static inline const char *cgroup_name(const struct cgroup *cgrp)
|
||||
}
|
||||
|
||||
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
|
||||
int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
|
||||
int cgroup_rm_cftypes(struct cftype *cfts);
|
||||
|
||||
bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
|
||||
|
||||
@ -553,20 +569,22 @@ int cgroup_task_count(const struct cgroup *cgrp);
|
||||
struct cgroup_taskset;
|
||||
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
|
||||
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
|
||||
struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset);
|
||||
struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset,
|
||||
int subsys_id);
|
||||
int cgroup_taskset_size(struct cgroup_taskset *tset);
|
||||
|
||||
/**
|
||||
* cgroup_taskset_for_each - iterate cgroup_taskset
|
||||
* @task: the loop cursor
|
||||
* @skip_cgrp: skip if task's cgroup matches this, %NULL to iterate through all
|
||||
* @skip_css: skip if task's css matches this, %NULL to iterate through all
|
||||
* @tset: taskset to iterate
|
||||
*/
|
||||
#define cgroup_taskset_for_each(task, skip_cgrp, tset) \
|
||||
#define cgroup_taskset_for_each(task, skip_css, tset) \
|
||||
for ((task) = cgroup_taskset_first((tset)); (task); \
|
||||
(task) = cgroup_taskset_next((tset))) \
|
||||
if (!(skip_cgrp) || \
|
||||
cgroup_taskset_cur_cgroup((tset)) != (skip_cgrp))
|
||||
if (!(skip_css) || \
|
||||
cgroup_taskset_cur_css((tset), \
|
||||
(skip_css)->ss->subsys_id) != (skip_css))
|
||||
|
||||
/*
|
||||
* Control Group subsystem type.
|
||||
@ -574,18 +592,22 @@ int cgroup_taskset_size(struct cgroup_taskset *tset);
|
||||
*/
|
||||
|
||||
struct cgroup_subsys {
|
||||
struct cgroup_subsys_state *(*css_alloc)(struct cgroup *cgrp);
|
||||
int (*css_online)(struct cgroup *cgrp);
|
||||
void (*css_offline)(struct cgroup *cgrp);
|
||||
void (*css_free)(struct cgroup *cgrp);
|
||||
struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
|
||||
int (*css_online)(struct cgroup_subsys_state *css);
|
||||
void (*css_offline)(struct cgroup_subsys_state *css);
|
||||
void (*css_free)(struct cgroup_subsys_state *css);
|
||||
|
||||
int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
|
||||
void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
|
||||
void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
|
||||
int (*can_attach)(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset);
|
||||
void (*cancel_attach)(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset);
|
||||
void (*attach)(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset);
|
||||
void (*fork)(struct task_struct *task);
|
||||
void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp,
|
||||
void (*exit)(struct cgroup_subsys_state *css,
|
||||
struct cgroup_subsys_state *old_css,
|
||||
struct task_struct *task);
|
||||
void (*bind)(struct cgroup *root);
|
||||
void (*bind)(struct cgroup_subsys_state *root_css);
|
||||
|
||||
int subsys_id;
|
||||
int disabled;
|
||||
@ -641,10 +663,17 @@ struct cgroup_subsys {
|
||||
#undef IS_SUBSYS_ENABLED
|
||||
#undef SUBSYS
|
||||
|
||||
static inline struct cgroup_subsys_state *cgroup_subsys_state(
|
||||
struct cgroup *cgrp, int subsys_id)
|
||||
/**
|
||||
* css_parent - find the parent css
|
||||
* @css: the target cgroup_subsys_state
|
||||
*
|
||||
* Return the parent css of @css. This function is guaranteed to return
|
||||
* non-NULL parent as long as @css isn't the root.
|
||||
*/
|
||||
static inline
|
||||
struct cgroup_subsys_state *css_parent(struct cgroup_subsys_state *css)
|
||||
{
|
||||
return cgrp->subsys[subsys_id];
|
||||
return css->parent;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -672,7 +701,7 @@ extern struct mutex cgroup_mutex;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* task_subsys_state_check - obtain css for (task, subsys) w/ extra access conds
|
||||
* task_css_check - obtain css for (task, subsys) w/ extra access conds
|
||||
* @task: the target task
|
||||
* @subsys_id: the target subsystem ID
|
||||
* @__c: extra condition expression to be passed to rcu_dereference_check()
|
||||
@ -680,7 +709,7 @@ extern struct mutex cgroup_mutex;
|
||||
* Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
|
||||
* synchronization rules are the same as task_css_set_check().
|
||||
*/
|
||||
#define task_subsys_state_check(task, subsys_id, __c) \
|
||||
#define task_css_check(task, subsys_id, __c) \
|
||||
task_css_set_check((task), (__c))->subsys[(subsys_id)]
|
||||
|
||||
/**
|
||||
@ -695,87 +724,92 @@ static inline struct css_set *task_css_set(struct task_struct *task)
|
||||
}
|
||||
|
||||
/**
|
||||
* task_subsys_state - obtain css for (task, subsys)
|
||||
* task_css - obtain css for (task, subsys)
|
||||
* @task: the target task
|
||||
* @subsys_id: the target subsystem ID
|
||||
*
|
||||
* See task_subsys_state_check().
|
||||
* See task_css_check().
|
||||
*/
|
||||
static inline struct cgroup_subsys_state *
|
||||
task_subsys_state(struct task_struct *task, int subsys_id)
|
||||
static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
|
||||
int subsys_id)
|
||||
{
|
||||
return task_subsys_state_check(task, subsys_id, false);
|
||||
return task_css_check(task, subsys_id, false);
|
||||
}
|
||||
|
||||
static inline struct cgroup* task_cgroup(struct task_struct *task,
|
||||
int subsys_id)
|
||||
static inline struct cgroup *task_cgroup(struct task_struct *task,
|
||||
int subsys_id)
|
||||
{
|
||||
return task_subsys_state(task, subsys_id)->cgroup;
|
||||
return task_css(task, subsys_id)->cgroup;
|
||||
}
|
||||
|
||||
struct cgroup *cgroup_next_sibling(struct cgroup *pos);
|
||||
struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
|
||||
struct cgroup_subsys_state *parent);
|
||||
|
||||
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
|
||||
|
||||
/**
|
||||
* cgroup_for_each_child - iterate through children of a cgroup
|
||||
* @pos: the cgroup * to use as the loop cursor
|
||||
* @cgrp: cgroup whose children to walk
|
||||
* css_for_each_child - iterate through children of a css
|
||||
* @pos: the css * to use as the loop cursor
|
||||
* @parent: css whose children to walk
|
||||
*
|
||||
* Walk @cgrp's children. Must be called under rcu_read_lock(). A child
|
||||
* cgroup which hasn't finished ->css_online() or already has finished
|
||||
* Walk @parent's children. Must be called under rcu_read_lock(). A child
|
||||
* css which hasn't finished ->css_online() or already has finished
|
||||
* ->css_offline() may show up during traversal and it's each subsystem's
|
||||
* responsibility to verify that each @pos is alive.
|
||||
*
|
||||
* If a subsystem synchronizes against the parent in its ->css_online() and
|
||||
* before starting iterating, a cgroup which finished ->css_online() is
|
||||
* before starting iterating, a css which finished ->css_online() is
|
||||
* guaranteed to be visible in the future iterations.
|
||||
*
|
||||
* It is allowed to temporarily drop RCU read lock during iteration. The
|
||||
* caller is responsible for ensuring that @pos remains accessible until
|
||||
* the start of the next iteration by, for example, bumping the css refcnt.
|
||||
*/
|
||||
#define cgroup_for_each_child(pos, cgrp) \
|
||||
for ((pos) = list_first_or_null_rcu(&(cgrp)->children, \
|
||||
struct cgroup, sibling); \
|
||||
(pos); (pos) = cgroup_next_sibling((pos)))
|
||||
#define css_for_each_child(pos, parent) \
|
||||
for ((pos) = css_next_child(NULL, (parent)); (pos); \
|
||||
(pos) = css_next_child((pos), (parent)))
|
||||
|
||||
struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
|
||||
struct cgroup *cgroup);
|
||||
struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
|
||||
struct cgroup_subsys_state *
|
||||
css_next_descendant_pre(struct cgroup_subsys_state *pos,
|
||||
struct cgroup_subsys_state *css);
|
||||
|
||||
struct cgroup_subsys_state *
|
||||
css_rightmost_descendant(struct cgroup_subsys_state *pos);
|
||||
|
||||
/**
|
||||
* cgroup_for_each_descendant_pre - pre-order walk of a cgroup's descendants
|
||||
* @pos: the cgroup * to use as the loop cursor
|
||||
* @cgroup: cgroup whose descendants to walk
|
||||
* css_for_each_descendant_pre - pre-order walk of a css's descendants
|
||||
* @pos: the css * to use as the loop cursor
|
||||
* @root: css whose descendants to walk
|
||||
*
|
||||
* Walk @cgroup's descendants. Must be called under rcu_read_lock(). A
|
||||
* descendant cgroup which hasn't finished ->css_online() or already has
|
||||
* Walk @root's descendants. @root is included in the iteration and the
|
||||
* first node to be visited. Must be called under rcu_read_lock(). A
|
||||
* descendant css which hasn't finished ->css_online() or already has
|
||||
* finished ->css_offline() may show up during traversal and it's each
|
||||
* subsystem's responsibility to verify that each @pos is alive.
|
||||
*
|
||||
* If a subsystem synchronizes against the parent in its ->css_online() and
|
||||
* before starting iterating, and synchronizes against @pos on each
|
||||
* iteration, any descendant cgroup which finished ->css_online() is
|
||||
* iteration, any descendant css which finished ->css_online() is
|
||||
* guaranteed to be visible in the future iterations.
|
||||
*
|
||||
* In other words, the following guarantees that a descendant can't escape
|
||||
* state updates of its ancestors.
|
||||
*
|
||||
* my_online(@cgrp)
|
||||
* my_online(@css)
|
||||
* {
|
||||
* Lock @cgrp->parent and @cgrp;
|
||||
* Inherit state from @cgrp->parent;
|
||||
* Lock @css's parent and @css;
|
||||
* Inherit state from the parent;
|
||||
* Unlock both.
|
||||
* }
|
||||
*
|
||||
* my_update_state(@cgrp)
|
||||
* my_update_state(@css)
|
||||
* {
|
||||
* Lock @cgrp;
|
||||
* Update @cgrp's state;
|
||||
* Unlock @cgrp;
|
||||
*
|
||||
* cgroup_for_each_descendant_pre(@pos, @cgrp) {
|
||||
* css_for_each_descendant_pre(@pos, @css) {
|
||||
* Lock @pos;
|
||||
* Verify @pos is alive and inherit state from @pos->parent;
|
||||
* if (@pos == @css)
|
||||
* Update @css's state;
|
||||
* else
|
||||
* Verify @pos is alive and inherit state from its parent;
|
||||
* Unlock @pos;
|
||||
* }
|
||||
* }
|
||||
@ -786,8 +820,7 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
|
||||
* visible by walking order and, as long as inheriting operations to the
|
||||
* same @pos are atomic to each other, multiple updates racing each other
|
||||
* still result in the correct state. It's guaranateed that at least one
|
||||
* inheritance happens for any cgroup after the latest update to its
|
||||
* parent.
|
||||
* inheritance happens for any css after the latest update to its parent.
|
||||
*
|
||||
* If checking parent's state requires locking the parent, each inheriting
|
||||
* iteration should lock and unlock both @pos->parent and @pos.
|
||||
@ -800,52 +833,45 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
|
||||
* caller is responsible for ensuring that @pos remains accessible until
|
||||
* the start of the next iteration by, for example, bumping the css refcnt.
|
||||
*/
|
||||
#define cgroup_for_each_descendant_pre(pos, cgroup) \
|
||||
for (pos = cgroup_next_descendant_pre(NULL, (cgroup)); (pos); \
|
||||
pos = cgroup_next_descendant_pre((pos), (cgroup)))
|
||||
#define css_for_each_descendant_pre(pos, css) \
|
||||
for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
|
||||
(pos) = css_next_descendant_pre((pos), (css)))
|
||||
|
||||
struct cgroup *cgroup_next_descendant_post(struct cgroup *pos,
|
||||
struct cgroup *cgroup);
|
||||
struct cgroup_subsys_state *
|
||||
css_next_descendant_post(struct cgroup_subsys_state *pos,
|
||||
struct cgroup_subsys_state *css);
|
||||
|
||||
/**
|
||||
* cgroup_for_each_descendant_post - post-order walk of a cgroup's descendants
|
||||
* @pos: the cgroup * to use as the loop cursor
|
||||
* @cgroup: cgroup whose descendants to walk
|
||||
* css_for_each_descendant_post - post-order walk of a css's descendants
|
||||
* @pos: the css * to use as the loop cursor
|
||||
* @css: css whose descendants to walk
|
||||
*
|
||||
* Similar to cgroup_for_each_descendant_pre() but performs post-order
|
||||
* traversal instead. Note that the walk visibility guarantee described in
|
||||
* pre-order walk doesn't apply the same to post-order walks.
|
||||
* Similar to css_for_each_descendant_pre() but performs post-order
|
||||
* traversal instead. @root is included in the iteration and the last
|
||||
* node to be visited. Note that the walk visibility guarantee described
|
||||
* in pre-order walk doesn't apply the same to post-order walks.
|
||||
*/
|
||||
#define cgroup_for_each_descendant_post(pos, cgroup) \
|
||||
for (pos = cgroup_next_descendant_post(NULL, (cgroup)); (pos); \
|
||||
pos = cgroup_next_descendant_post((pos), (cgroup)))
|
||||
#define css_for_each_descendant_post(pos, css) \
|
||||
for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
|
||||
(pos) = css_next_descendant_post((pos), (css)))
|
||||
|
||||
/* A cgroup_iter should be treated as an opaque object */
|
||||
struct cgroup_iter {
|
||||
struct list_head *cset_link;
|
||||
struct list_head *task;
|
||||
/* A css_task_iter should be treated as an opaque object */
|
||||
struct css_task_iter {
|
||||
struct cgroup_subsys_state *origin_css;
|
||||
struct list_head *cset_link;
|
||||
struct list_head *task;
|
||||
};
|
||||
|
||||
/*
|
||||
* To iterate across the tasks in a cgroup:
|
||||
*
|
||||
* 1) call cgroup_iter_start to initialize an iterator
|
||||
*
|
||||
* 2) call cgroup_iter_next() to retrieve member tasks until it
|
||||
* returns NULL or until you want to end the iteration
|
||||
*
|
||||
* 3) call cgroup_iter_end() to destroy the iterator.
|
||||
*
|
||||
* Or, call cgroup_scan_tasks() to iterate through every task in a
|
||||
* cgroup - cgroup_scan_tasks() holds the css_set_lock when calling
|
||||
* the test_task() callback, but not while calling the process_task()
|
||||
* callback.
|
||||
*/
|
||||
void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
|
||||
struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
|
||||
struct cgroup_iter *it);
|
||||
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
|
||||
int cgroup_scan_tasks(struct cgroup_scanner *scan);
|
||||
void css_task_iter_start(struct cgroup_subsys_state *css,
|
||||
struct css_task_iter *it);
|
||||
struct task_struct *css_task_iter_next(struct css_task_iter *it);
|
||||
void css_task_iter_end(struct css_task_iter *it);
|
||||
|
||||
int css_scan_tasks(struct cgroup_subsys_state *css,
|
||||
bool (*test)(struct task_struct *, void *),
|
||||
void (*process)(struct task_struct *, void *),
|
||||
void *data, struct ptr_heap *heap);
|
||||
|
||||
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
|
||||
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
|
||||
|
||||
@ -878,7 +904,8 @@ bool css_is_ancestor(struct cgroup_subsys_state *cg,
|
||||
|
||||
/* Get id and depth of css */
|
||||
unsigned short css_id(struct cgroup_subsys_state *css);
|
||||
struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
|
||||
struct cgroup_subsys_state *css_from_dir(struct dentry *dentry,
|
||||
struct cgroup_subsys *ss);
|
||||
|
||||
#else /* !CONFIG_CGROUPS */
|
||||
|
||||
|
@ -85,7 +85,7 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
|
||||
extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
|
||||
|
||||
extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
|
||||
extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
|
||||
extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
|
||||
|
||||
static inline
|
||||
bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
|
||||
|
@ -34,10 +34,12 @@ extern void vmpressure_cleanup(struct vmpressure *vmpr);
|
||||
extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
|
||||
extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr);
|
||||
extern struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css);
|
||||
extern int vmpressure_register_event(struct cgroup *cg, struct cftype *cft,
|
||||
extern int vmpressure_register_event(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft,
|
||||
struct eventfd_ctx *eventfd,
|
||||
const char *args);
|
||||
extern void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft,
|
||||
extern void vmpressure_unregister_event(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft,
|
||||
struct eventfd_ctx *eventfd);
|
||||
#else
|
||||
static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
|
||||
|
@ -35,7 +35,7 @@ static inline u32 task_cls_classid(struct task_struct *p)
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
classid = container_of(task_subsys_state(p, net_cls_subsys_id),
|
||||
classid = container_of(task_css(p, net_cls_subsys_id),
|
||||
struct cgroup_cls_state, css)->classid;
|
||||
rcu_read_unlock();
|
||||
|
||||
@ -51,7 +51,7 @@ static inline u32 task_cls_classid(struct task_struct *p)
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
css = task_subsys_state(p, net_cls_subsys_id);
|
||||
css = task_css(p, net_cls_subsys_id);
|
||||
if (css)
|
||||
classid = container_of(css,
|
||||
struct cgroup_cls_state, css)->classid;
|
||||
|
@ -25,10 +25,6 @@ struct netprio_map {
|
||||
u32 priomap[];
|
||||
};
|
||||
|
||||
struct cgroup_netprio_state {
|
||||
struct cgroup_subsys_state css;
|
||||
};
|
||||
|
||||
extern void sock_update_netprioidx(struct sock *sk);
|
||||
|
||||
#if IS_BUILTIN(CONFIG_NETPRIO_CGROUP)
|
||||
@ -39,7 +35,7 @@ static inline u32 task_netprioidx(struct task_struct *p)
|
||||
u32 idx;
|
||||
|
||||
rcu_read_lock();
|
||||
css = task_subsys_state(p, net_prio_subsys_id);
|
||||
css = task_css(p, net_prio_subsys_id);
|
||||
idx = css->cgroup->id;
|
||||
rcu_read_unlock();
|
||||
return idx;
|
||||
@ -53,7 +49,7 @@ static inline u32 task_netprioidx(struct task_struct *p)
|
||||
u32 idx = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
css = task_subsys_state(p, net_prio_subsys_id);
|
||||
css = task_css(p, net_prio_subsys_id);
|
||||
if (css)
|
||||
idx = css->cgroup->id;
|
||||
rcu_read_unlock();
|
||||
|
1649
kernel/cgroup.c
1649
kernel/cgroup.c
File diff suppressed because it is too large
Load Diff
@ -45,25 +45,19 @@ struct freezer {
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
static inline struct freezer *cgroup_freezer(struct cgroup *cgroup)
|
||||
static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
|
||||
{
|
||||
return container_of(cgroup_subsys_state(cgroup, freezer_subsys_id),
|
||||
struct freezer, css);
|
||||
return css ? container_of(css, struct freezer, css) : NULL;
|
||||
}
|
||||
|
||||
static inline struct freezer *task_freezer(struct task_struct *task)
|
||||
{
|
||||
return container_of(task_subsys_state(task, freezer_subsys_id),
|
||||
struct freezer, css);
|
||||
return css_freezer(task_css(task, freezer_subsys_id));
|
||||
}
|
||||
|
||||
static struct freezer *parent_freezer(struct freezer *freezer)
|
||||
{
|
||||
struct cgroup *pcg = freezer->css.cgroup->parent;
|
||||
|
||||
if (pcg)
|
||||
return cgroup_freezer(pcg);
|
||||
return NULL;
|
||||
return css_freezer(css_parent(&freezer->css));
|
||||
}
|
||||
|
||||
bool cgroup_freezing(struct task_struct *task)
|
||||
@ -92,7 +86,8 @@ static const char *freezer_state_strs(unsigned int state)
|
||||
|
||||
struct cgroup_subsys freezer_subsys;
|
||||
|
||||
static struct cgroup_subsys_state *freezer_css_alloc(struct cgroup *cgroup)
|
||||
static struct cgroup_subsys_state *
|
||||
freezer_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||
{
|
||||
struct freezer *freezer;
|
||||
|
||||
@ -105,22 +100,22 @@ static struct cgroup_subsys_state *freezer_css_alloc(struct cgroup *cgroup)
|
||||
}
|
||||
|
||||
/**
|
||||
* freezer_css_online - commit creation of a freezer cgroup
|
||||
* @cgroup: cgroup being created
|
||||
* freezer_css_online - commit creation of a freezer css
|
||||
* @css: css being created
|
||||
*
|
||||
* We're committing to creation of @cgroup. Mark it online and inherit
|
||||
* We're committing to creation of @css. Mark it online and inherit
|
||||
* parent's freezing state while holding both parent's and our
|
||||
* freezer->lock.
|
||||
*/
|
||||
static int freezer_css_online(struct cgroup *cgroup)
|
||||
static int freezer_css_online(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct freezer *freezer = cgroup_freezer(cgroup);
|
||||
struct freezer *freezer = css_freezer(css);
|
||||
struct freezer *parent = parent_freezer(freezer);
|
||||
|
||||
/*
|
||||
* The following double locking and freezing state inheritance
|
||||
* guarantee that @cgroup can never escape ancestors' freezing
|
||||
* states. See cgroup_for_each_descendant_pre() for details.
|
||||
* states. See css_for_each_descendant_pre() for details.
|
||||
*/
|
||||
if (parent)
|
||||
spin_lock_irq(&parent->lock);
|
||||
@ -141,15 +136,15 @@ static int freezer_css_online(struct cgroup *cgroup)
|
||||
}
|
||||
|
||||
/**
|
||||
* freezer_css_offline - initiate destruction of @cgroup
|
||||
* @cgroup: cgroup being destroyed
|
||||
* freezer_css_offline - initiate destruction of a freezer css
|
||||
* @css: css being destroyed
|
||||
*
|
||||
* @cgroup is going away. Mark it dead and decrement system_freezing_count
|
||||
* if it was holding one.
|
||||
* @css is going away. Mark it dead and decrement system_freezing_count if
|
||||
* it was holding one.
|
||||
*/
|
||||
static void freezer_css_offline(struct cgroup *cgroup)
|
||||
static void freezer_css_offline(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct freezer *freezer = cgroup_freezer(cgroup);
|
||||
struct freezer *freezer = css_freezer(css);
|
||||
|
||||
spin_lock_irq(&freezer->lock);
|
||||
|
||||
@ -161,9 +156,9 @@ static void freezer_css_offline(struct cgroup *cgroup)
|
||||
spin_unlock_irq(&freezer->lock);
|
||||
}
|
||||
|
||||
static void freezer_css_free(struct cgroup *cgroup)
|
||||
static void freezer_css_free(struct cgroup_subsys_state *css)
|
||||
{
|
||||
kfree(cgroup_freezer(cgroup));
|
||||
kfree(css_freezer(css));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -175,25 +170,26 @@ static void freezer_css_free(struct cgroup *cgroup)
|
||||
* @freezer->lock. freezer_attach() makes the new tasks conform to the
|
||||
* current state and all following state changes can see the new tasks.
|
||||
*/
|
||||
static void freezer_attach(struct cgroup *new_cgrp, struct cgroup_taskset *tset)
|
||||
static void freezer_attach(struct cgroup_subsys_state *new_css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
struct freezer *freezer = cgroup_freezer(new_cgrp);
|
||||
struct freezer *freezer = css_freezer(new_css);
|
||||
struct task_struct *task;
|
||||
bool clear_frozen = false;
|
||||
|
||||
spin_lock_irq(&freezer->lock);
|
||||
|
||||
/*
|
||||
* Make the new tasks conform to the current state of @new_cgrp.
|
||||
* Make the new tasks conform to the current state of @new_css.
|
||||
* For simplicity, when migrating any task to a FROZEN cgroup, we
|
||||
* revert it to FREEZING and let update_if_frozen() determine the
|
||||
* correct state later.
|
||||
*
|
||||
* Tasks in @tset are on @new_cgrp but may not conform to its
|
||||
* Tasks in @tset are on @new_css but may not conform to its
|
||||
* current state before executing the following - !frozen tasks may
|
||||
* be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
|
||||
*/
|
||||
cgroup_taskset_for_each(task, new_cgrp, tset) {
|
||||
cgroup_taskset_for_each(task, new_css, tset) {
|
||||
if (!(freezer->state & CGROUP_FREEZING)) {
|
||||
__thaw_task(task);
|
||||
} else {
|
||||
@ -231,7 +227,7 @@ static void freezer_fork(struct task_struct *task)
|
||||
* The root cgroup is non-freezable, so we can skip the
|
||||
* following check.
|
||||
*/
|
||||
if (!freezer->css.cgroup->parent)
|
||||
if (!parent_freezer(freezer))
|
||||
goto out;
|
||||
|
||||
spin_lock_irq(&freezer->lock);
|
||||
@ -244,7 +240,7 @@ static void freezer_fork(struct task_struct *task)
|
||||
|
||||
/**
|
||||
* update_if_frozen - update whether a cgroup finished freezing
|
||||
* @cgroup: cgroup of interest
|
||||
* @css: css of interest
|
||||
*
|
||||
* Once FREEZING is initiated, transition to FROZEN is lazily updated by
|
||||
* calling this function. If the current state is FREEZING but not FROZEN,
|
||||
@ -255,14 +251,14 @@ static void freezer_fork(struct task_struct *task)
|
||||
* update_if_frozen() on all descendants prior to invoking this function.
|
||||
*
|
||||
* Task states and freezer state might disagree while tasks are being
|
||||
* migrated into or out of @cgroup, so we can't verify task states against
|
||||
* migrated into or out of @css, so we can't verify task states against
|
||||
* @freezer state here. See freezer_attach() for details.
|
||||
*/
|
||||
static void update_if_frozen(struct cgroup *cgroup)
|
||||
static void update_if_frozen(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct freezer *freezer = cgroup_freezer(cgroup);
|
||||
struct cgroup *pos;
|
||||
struct cgroup_iter it;
|
||||
struct freezer *freezer = css_freezer(css);
|
||||
struct cgroup_subsys_state *pos;
|
||||
struct css_task_iter it;
|
||||
struct task_struct *task;
|
||||
|
||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||
@ -274,8 +270,8 @@ static void update_if_frozen(struct cgroup *cgroup)
|
||||
goto out_unlock;
|
||||
|
||||
/* are all (live) children frozen? */
|
||||
cgroup_for_each_child(pos, cgroup) {
|
||||
struct freezer *child = cgroup_freezer(pos);
|
||||
css_for_each_child(pos, css) {
|
||||
struct freezer *child = css_freezer(pos);
|
||||
|
||||
if ((child->state & CGROUP_FREEZER_ONLINE) &&
|
||||
!(child->state & CGROUP_FROZEN))
|
||||
@ -283,9 +279,9 @@ static void update_if_frozen(struct cgroup *cgroup)
|
||||
}
|
||||
|
||||
/* are all tasks frozen? */
|
||||
cgroup_iter_start(cgroup, &it);
|
||||
css_task_iter_start(css, &it);
|
||||
|
||||
while ((task = cgroup_iter_next(cgroup, &it))) {
|
||||
while ((task = css_task_iter_next(&it))) {
|
||||
if (freezing(task)) {
|
||||
/*
|
||||
* freezer_should_skip() indicates that the task
|
||||
@ -300,52 +296,49 @@ static void update_if_frozen(struct cgroup *cgroup)
|
||||
|
||||
freezer->state |= CGROUP_FROZEN;
|
||||
out_iter_end:
|
||||
cgroup_iter_end(cgroup, &it);
|
||||
css_task_iter_end(&it);
|
||||
out_unlock:
|
||||
spin_unlock_irq(&freezer->lock);
|
||||
}
|
||||
|
||||
static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
|
||||
static int freezer_read(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
struct seq_file *m)
|
||||
{
|
||||
struct cgroup *pos;
|
||||
struct cgroup_subsys_state *pos;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
/* update states bottom-up */
|
||||
cgroup_for_each_descendant_post(pos, cgroup)
|
||||
css_for_each_descendant_post(pos, css)
|
||||
update_if_frozen(pos);
|
||||
update_if_frozen(cgroup);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
seq_puts(m, freezer_state_strs(cgroup_freezer(cgroup)->state));
|
||||
seq_puts(m, freezer_state_strs(css_freezer(css)->state));
|
||||
seq_putc(m, '\n');
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void freeze_cgroup(struct freezer *freezer)
|
||||
{
|
||||
struct cgroup *cgroup = freezer->css.cgroup;
|
||||
struct cgroup_iter it;
|
||||
struct css_task_iter it;
|
||||
struct task_struct *task;
|
||||
|
||||
cgroup_iter_start(cgroup, &it);
|
||||
while ((task = cgroup_iter_next(cgroup, &it)))
|
||||
css_task_iter_start(&freezer->css, &it);
|
||||
while ((task = css_task_iter_next(&it)))
|
||||
freeze_task(task);
|
||||
cgroup_iter_end(cgroup, &it);
|
||||
css_task_iter_end(&it);
|
||||
}
|
||||
|
||||
static void unfreeze_cgroup(struct freezer *freezer)
|
||||
{
|
||||
struct cgroup *cgroup = freezer->css.cgroup;
|
||||
struct cgroup_iter it;
|
||||
struct css_task_iter it;
|
||||
struct task_struct *task;
|
||||
|
||||
cgroup_iter_start(cgroup, &it);
|
||||
while ((task = cgroup_iter_next(cgroup, &it)))
|
||||
css_task_iter_start(&freezer->css, &it);
|
||||
while ((task = css_task_iter_next(&it)))
|
||||
__thaw_task(task);
|
||||
cgroup_iter_end(cgroup, &it);
|
||||
css_task_iter_end(&it);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -395,12 +388,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
|
||||
*/
|
||||
static void freezer_change_state(struct freezer *freezer, bool freeze)
|
||||
{
|
||||
struct cgroup *pos;
|
||||
|
||||
/* update @freezer */
|
||||
spin_lock_irq(&freezer->lock);
|
||||
freezer_apply_state(freezer, freeze, CGROUP_FREEZING_SELF);
|
||||
spin_unlock_irq(&freezer->lock);
|
||||
struct cgroup_subsys_state *pos;
|
||||
|
||||
/*
|
||||
* Update all its descendants in pre-order traversal. Each
|
||||
@ -408,24 +396,33 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
|
||||
* CGROUP_FREEZING_PARENT.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
cgroup_for_each_descendant_pre(pos, freezer->css.cgroup) {
|
||||
struct freezer *pos_f = cgroup_freezer(pos);
|
||||
css_for_each_descendant_pre(pos, &freezer->css) {
|
||||
struct freezer *pos_f = css_freezer(pos);
|
||||
struct freezer *parent = parent_freezer(pos_f);
|
||||
|
||||
/*
|
||||
* Our update to @parent->state is already visible which is
|
||||
* all we need. No need to lock @parent. For more info on
|
||||
* synchronization, see freezer_post_create().
|
||||
*/
|
||||
spin_lock_irq(&pos_f->lock);
|
||||
freezer_apply_state(pos_f, parent->state & CGROUP_FREEZING,
|
||||
CGROUP_FREEZING_PARENT);
|
||||
|
||||
if (pos_f == freezer) {
|
||||
freezer_apply_state(pos_f, freeze,
|
||||
CGROUP_FREEZING_SELF);
|
||||
} else {
|
||||
/*
|
||||
* Our update to @parent->state is already visible
|
||||
* which is all we need. No need to lock @parent.
|
||||
* For more info on synchronization, see
|
||||
* freezer_post_create().
|
||||
*/
|
||||
freezer_apply_state(pos_f,
|
||||
parent->state & CGROUP_FREEZING,
|
||||
CGROUP_FREEZING_PARENT);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&pos_f->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int freezer_write(struct cgroup *cgroup, struct cftype *cft,
|
||||
static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
const char *buffer)
|
||||
{
|
||||
bool freeze;
|
||||
@ -437,20 +434,22 @@ static int freezer_write(struct cgroup *cgroup, struct cftype *cft,
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
freezer_change_state(cgroup_freezer(cgroup), freeze);
|
||||
freezer_change_state(css_freezer(css), freeze);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 freezer_self_freezing_read(struct cgroup *cgroup, struct cftype *cft)
|
||||
static u64 freezer_self_freezing_read(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft)
|
||||
{
|
||||
struct freezer *freezer = cgroup_freezer(cgroup);
|
||||
struct freezer *freezer = css_freezer(css);
|
||||
|
||||
return (bool)(freezer->state & CGROUP_FREEZING_SELF);
|
||||
}
|
||||
|
||||
static u64 freezer_parent_freezing_read(struct cgroup *cgroup, struct cftype *cft)
|
||||
static u64 freezer_parent_freezing_read(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft)
|
||||
{
|
||||
struct freezer *freezer = cgroup_freezer(cgroup);
|
||||
struct freezer *freezer = css_freezer(css);
|
||||
|
||||
return (bool)(freezer->state & CGROUP_FREEZING_PARENT);
|
||||
}
|
||||
|
317
kernel/cpuset.c
317
kernel/cpuset.c
@ -68,10 +68,6 @@
|
||||
*/
|
||||
int number_of_cpusets __read_mostly;
|
||||
|
||||
/* Forward declare cgroup structures */
|
||||
struct cgroup_subsys cpuset_subsys;
|
||||
struct cpuset;
|
||||
|
||||
/* See "Frequency meter" comments, below. */
|
||||
|
||||
struct fmeter {
|
||||
@ -115,27 +111,20 @@ struct cpuset {
|
||||
int relax_domain_level;
|
||||
};
|
||||
|
||||
/* Retrieve the cpuset for a cgroup */
|
||||
static inline struct cpuset *cgroup_cs(struct cgroup *cgrp)
|
||||
static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
|
||||
{
|
||||
return container_of(cgroup_subsys_state(cgrp, cpuset_subsys_id),
|
||||
struct cpuset, css);
|
||||
return css ? container_of(css, struct cpuset, css) : NULL;
|
||||
}
|
||||
|
||||
/* Retrieve the cpuset for a task */
|
||||
static inline struct cpuset *task_cs(struct task_struct *task)
|
||||
{
|
||||
return container_of(task_subsys_state(task, cpuset_subsys_id),
|
||||
struct cpuset, css);
|
||||
return css_cs(task_css(task, cpuset_subsys_id));
|
||||
}
|
||||
|
||||
static inline struct cpuset *parent_cs(const struct cpuset *cs)
|
||||
static inline struct cpuset *parent_cs(struct cpuset *cs)
|
||||
{
|
||||
struct cgroup *pcgrp = cs->css.cgroup->parent;
|
||||
|
||||
if (pcgrp)
|
||||
return cgroup_cs(pcgrp);
|
||||
return NULL;
|
||||
return css_cs(css_parent(&cs->css));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
@ -212,29 +201,30 @@ static struct cpuset top_cpuset = {
|
||||
/**
|
||||
* cpuset_for_each_child - traverse online children of a cpuset
|
||||
* @child_cs: loop cursor pointing to the current child
|
||||
* @pos_cgrp: used for iteration
|
||||
* @pos_css: used for iteration
|
||||
* @parent_cs: target cpuset to walk children of
|
||||
*
|
||||
* Walk @child_cs through the online children of @parent_cs. Must be used
|
||||
* with RCU read locked.
|
||||
*/
|
||||
#define cpuset_for_each_child(child_cs, pos_cgrp, parent_cs) \
|
||||
cgroup_for_each_child((pos_cgrp), (parent_cs)->css.cgroup) \
|
||||
if (is_cpuset_online(((child_cs) = cgroup_cs((pos_cgrp)))))
|
||||
#define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
|
||||
css_for_each_child((pos_css), &(parent_cs)->css) \
|
||||
if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
|
||||
|
||||
/**
|
||||
* cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
|
||||
* @des_cs: loop cursor pointing to the current descendant
|
||||
* @pos_cgrp: used for iteration
|
||||
* @pos_css: used for iteration
|
||||
* @root_cs: target cpuset to walk ancestor of
|
||||
*
|
||||
* Walk @des_cs through the online descendants of @root_cs. Must be used
|
||||
* with RCU read locked. The caller may modify @pos_cgrp by calling
|
||||
* cgroup_rightmost_descendant() to skip subtree.
|
||||
* with RCU read locked. The caller may modify @pos_css by calling
|
||||
* css_rightmost_descendant() to skip subtree. @root_cs is included in the
|
||||
* iteration and the first node to be visited.
|
||||
*/
|
||||
#define cpuset_for_each_descendant_pre(des_cs, pos_cgrp, root_cs) \
|
||||
cgroup_for_each_descendant_pre((pos_cgrp), (root_cs)->css.cgroup) \
|
||||
if (is_cpuset_online(((des_cs) = cgroup_cs((pos_cgrp)))))
|
||||
#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
|
||||
css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
|
||||
if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
|
||||
|
||||
/*
|
||||
* There are two global mutexes guarding cpuset structures - cpuset_mutex
|
||||
@ -320,8 +310,7 @@ static struct file_system_type cpuset_fs_type = {
|
||||
*
|
||||
* Call with callback_mutex held.
|
||||
*/
|
||||
static void guarantee_online_cpus(const struct cpuset *cs,
|
||||
struct cpumask *pmask)
|
||||
static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
|
||||
{
|
||||
while (!cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
|
||||
cs = parent_cs(cs);
|
||||
@ -339,7 +328,7 @@ static void guarantee_online_cpus(const struct cpuset *cs,
|
||||
*
|
||||
* Call with callback_mutex held.
|
||||
*/
|
||||
static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
|
||||
static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
|
||||
{
|
||||
while (!nodes_intersects(cs->mems_allowed, node_states[N_MEMORY]))
|
||||
cs = parent_cs(cs);
|
||||
@ -384,7 +373,7 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
|
||||
* alloc_trial_cpuset - allocate a trial cpuset
|
||||
* @cs: the cpuset that the trial cpuset duplicates
|
||||
*/
|
||||
static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
|
||||
static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
|
||||
{
|
||||
struct cpuset *trial;
|
||||
|
||||
@ -431,9 +420,9 @@ static void free_trial_cpuset(struct cpuset *trial)
|
||||
* Return 0 if valid, -errno if not.
|
||||
*/
|
||||
|
||||
static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
|
||||
static int validate_change(struct cpuset *cur, struct cpuset *trial)
|
||||
{
|
||||
struct cgroup *cgrp;
|
||||
struct cgroup_subsys_state *css;
|
||||
struct cpuset *c, *par;
|
||||
int ret;
|
||||
|
||||
@ -441,7 +430,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
|
||||
|
||||
/* Each of our child cpusets must be a subset of us */
|
||||
ret = -EBUSY;
|
||||
cpuset_for_each_child(c, cgrp, cur)
|
||||
cpuset_for_each_child(c, css, cur)
|
||||
if (!is_cpuset_subset(c, trial))
|
||||
goto out;
|
||||
|
||||
@ -462,7 +451,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
|
||||
* overlap
|
||||
*/
|
||||
ret = -EINVAL;
|
||||
cpuset_for_each_child(c, cgrp, par) {
|
||||
cpuset_for_each_child(c, css, par) {
|
||||
if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
|
||||
c != cur &&
|
||||
cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
|
||||
@ -515,13 +504,16 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
|
||||
struct cpuset *root_cs)
|
||||
{
|
||||
struct cpuset *cp;
|
||||
struct cgroup *pos_cgrp;
|
||||
struct cgroup_subsys_state *pos_css;
|
||||
|
||||
rcu_read_lock();
|
||||
cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
|
||||
cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
|
||||
if (cp == root_cs)
|
||||
continue;
|
||||
|
||||
/* skip the whole subtree if @cp doesn't have any CPU */
|
||||
if (cpumask_empty(cp->cpus_allowed)) {
|
||||
pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
|
||||
pos_css = css_rightmost_descendant(pos_css);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -596,7 +588,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
|
||||
struct sched_domain_attr *dattr; /* attributes for custom domains */
|
||||
int ndoms = 0; /* number of sched domains in result */
|
||||
int nslot; /* next empty doms[] struct cpumask slot */
|
||||
struct cgroup *pos_cgrp;
|
||||
struct cgroup_subsys_state *pos_css;
|
||||
|
||||
doms = NULL;
|
||||
dattr = NULL;
|
||||
@ -625,7 +617,9 @@ static int generate_sched_domains(cpumask_var_t **domains,
|
||||
csn = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
cpuset_for_each_descendant_pre(cp, pos_cgrp, &top_cpuset) {
|
||||
cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
|
||||
if (cp == &top_cpuset)
|
||||
continue;
|
||||
/*
|
||||
* Continue traversing beyond @cp iff @cp has some CPUs and
|
||||
* isn't load balancing. The former is obvious. The
|
||||
@ -642,7 +636,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
|
||||
csa[csn++] = cp;
|
||||
|
||||
/* skip @cp's subtree */
|
||||
pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
|
||||
pos_css = css_rightmost_descendant(pos_css);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
@ -837,52 +831,45 @@ static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
|
||||
/**
|
||||
* cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
|
||||
* @tsk: task to test
|
||||
* @scan: struct cgroup_scanner containing the cgroup of the task
|
||||
* @data: cpuset to @tsk belongs to
|
||||
*
|
||||
* Called by cgroup_scan_tasks() for each task in a cgroup whose
|
||||
* cpus_allowed mask needs to be changed.
|
||||
* Called by css_scan_tasks() for each task in a cgroup whose cpus_allowed
|
||||
* mask needs to be changed.
|
||||
*
|
||||
* We don't need to re-check for the cgroup/cpuset membership, since we're
|
||||
* holding cpuset_mutex at this point.
|
||||
*/
|
||||
static void cpuset_change_cpumask(struct task_struct *tsk,
|
||||
struct cgroup_scanner *scan)
|
||||
static void cpuset_change_cpumask(struct task_struct *tsk, void *data)
|
||||
{
|
||||
struct cpuset *cpus_cs;
|
||||
struct cpuset *cs = data;
|
||||
struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
|
||||
|
||||
cpus_cs = effective_cpumask_cpuset(cgroup_cs(scan->cg));
|
||||
set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed);
|
||||
}
|
||||
|
||||
/**
|
||||
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
|
||||
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
|
||||
* @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
|
||||
* @heap: if NULL, defer allocating heap memory to css_scan_tasks()
|
||||
*
|
||||
* Called with cpuset_mutex held
|
||||
*
|
||||
* The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
|
||||
* The css_scan_tasks() function will scan all the tasks in a cgroup,
|
||||
* calling callback functions for each.
|
||||
*
|
||||
* No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
|
||||
* No return value. It's guaranteed that css_scan_tasks() always returns 0
|
||||
* if @heap != NULL.
|
||||
*/
|
||||
static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
|
||||
{
|
||||
struct cgroup_scanner scan;
|
||||
|
||||
scan.cg = cs->css.cgroup;
|
||||
scan.test_task = NULL;
|
||||
scan.process_task = cpuset_change_cpumask;
|
||||
scan.heap = heap;
|
||||
cgroup_scan_tasks(&scan);
|
||||
css_scan_tasks(&cs->css, NULL, cpuset_change_cpumask, cs, heap);
|
||||
}
|
||||
|
||||
/*
|
||||
* update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
|
||||
* @root_cs: the root cpuset of the hierarchy
|
||||
* @update_root: update root cpuset or not?
|
||||
* @heap: the heap used by cgroup_scan_tasks()
|
||||
* @heap: the heap used by css_scan_tasks()
|
||||
*
|
||||
* This will update cpumasks of tasks in @root_cs and all other empty cpusets
|
||||
* which take on cpumask of @root_cs.
|
||||
@ -893,17 +880,19 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs,
|
||||
bool update_root, struct ptr_heap *heap)
|
||||
{
|
||||
struct cpuset *cp;
|
||||
struct cgroup *pos_cgrp;
|
||||
|
||||
if (update_root)
|
||||
update_tasks_cpumask(root_cs, heap);
|
||||
struct cgroup_subsys_state *pos_css;
|
||||
|
||||
rcu_read_lock();
|
||||
cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
|
||||
/* skip the whole subtree if @cp have some CPU */
|
||||
if (!cpumask_empty(cp->cpus_allowed)) {
|
||||
pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
|
||||
continue;
|
||||
cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
|
||||
if (cp == root_cs) {
|
||||
if (!update_root)
|
||||
continue;
|
||||
} else {
|
||||
/* skip the whole subtree if @cp have some CPU */
|
||||
if (!cpumask_empty(cp->cpus_allowed)) {
|
||||
pos_css = css_rightmost_descendant(pos_css);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (!css_tryget(&cp->css))
|
||||
continue;
|
||||
@ -1059,20 +1048,24 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
|
||||
task_unlock(tsk);
|
||||
}
|
||||
|
||||
struct cpuset_change_nodemask_arg {
|
||||
struct cpuset *cs;
|
||||
nodemask_t *newmems;
|
||||
};
|
||||
|
||||
/*
|
||||
* Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
|
||||
* of it to cpuset's new mems_allowed, and migrate pages to new nodes if
|
||||
* memory_migrate flag is set. Called with cpuset_mutex held.
|
||||
*/
|
||||
static void cpuset_change_nodemask(struct task_struct *p,
|
||||
struct cgroup_scanner *scan)
|
||||
static void cpuset_change_nodemask(struct task_struct *p, void *data)
|
||||
{
|
||||
struct cpuset *cs = cgroup_cs(scan->cg);
|
||||
struct cpuset_change_nodemask_arg *arg = data;
|
||||
struct cpuset *cs = arg->cs;
|
||||
struct mm_struct *mm;
|
||||
int migrate;
|
||||
nodemask_t *newmems = scan->data;
|
||||
|
||||
cpuset_change_task_nodemask(p, newmems);
|
||||
cpuset_change_task_nodemask(p, arg->newmems);
|
||||
|
||||
mm = get_task_mm(p);
|
||||
if (!mm)
|
||||
@ -1082,7 +1075,7 @@ static void cpuset_change_nodemask(struct task_struct *p,
|
||||
|
||||
mpol_rebind_mm(mm, &cs->mems_allowed);
|
||||
if (migrate)
|
||||
cpuset_migrate_mm(mm, &cs->old_mems_allowed, newmems);
|
||||
cpuset_migrate_mm(mm, &cs->old_mems_allowed, arg->newmems);
|
||||
mmput(mm);
|
||||
}
|
||||
|
||||
@ -1091,28 +1084,22 @@ static void *cpuset_being_rebound;
|
||||
/**
|
||||
* update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
|
||||
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
|
||||
* @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
|
||||
* @heap: if NULL, defer allocating heap memory to css_scan_tasks()
|
||||
*
|
||||
* Called with cpuset_mutex held
|
||||
* No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
|
||||
* if @heap != NULL.
|
||||
* Called with cpuset_mutex held. No return value. It's guaranteed that
|
||||
* css_scan_tasks() always returns 0 if @heap != NULL.
|
||||
*/
|
||||
static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
|
||||
{
|
||||
static nodemask_t newmems; /* protected by cpuset_mutex */
|
||||
struct cgroup_scanner scan;
|
||||
struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
|
||||
struct cpuset_change_nodemask_arg arg = { .cs = cs,
|
||||
.newmems = &newmems };
|
||||
|
||||
cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
|
||||
|
||||
guarantee_online_mems(mems_cs, &newmems);
|
||||
|
||||
scan.cg = cs->css.cgroup;
|
||||
scan.test_task = NULL;
|
||||
scan.process_task = cpuset_change_nodemask;
|
||||
scan.heap = heap;
|
||||
scan.data = &newmems;
|
||||
|
||||
/*
|
||||
* The mpol_rebind_mm() call takes mmap_sem, which we couldn't
|
||||
* take while holding tasklist_lock. Forks can happen - the
|
||||
@ -1123,7 +1110,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
|
||||
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
|
||||
* is idempotent. Also migrate pages in each mm to new nodes.
|
||||
*/
|
||||
cgroup_scan_tasks(&scan);
|
||||
css_scan_tasks(&cs->css, NULL, cpuset_change_nodemask, &arg, heap);
|
||||
|
||||
/*
|
||||
* All the tasks' nodemasks have been updated, update
|
||||
@ -1139,7 +1126,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
|
||||
* update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy.
|
||||
* @cs: the root cpuset of the hierarchy
|
||||
* @update_root: update the root cpuset or not?
|
||||
* @heap: the heap used by cgroup_scan_tasks()
|
||||
* @heap: the heap used by css_scan_tasks()
|
||||
*
|
||||
* This will update nodemasks of tasks in @root_cs and all other empty cpusets
|
||||
* which take on nodemask of @root_cs.
|
||||
@ -1150,17 +1137,19 @@ static void update_tasks_nodemask_hier(struct cpuset *root_cs,
|
||||
bool update_root, struct ptr_heap *heap)
|
||||
{
|
||||
struct cpuset *cp;
|
||||
struct cgroup *pos_cgrp;
|
||||
|
||||
if (update_root)
|
||||
update_tasks_nodemask(root_cs, heap);
|
||||
struct cgroup_subsys_state *pos_css;
|
||||
|
||||
rcu_read_lock();
|
||||
cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
|
||||
/* skip the whole subtree if @cp have some CPU */
|
||||
if (!nodes_empty(cp->mems_allowed)) {
|
||||
pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
|
||||
continue;
|
||||
cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
|
||||
if (cp == root_cs) {
|
||||
if (!update_root)
|
||||
continue;
|
||||
} else {
|
||||
/* skip the whole subtree if @cp have some CPU */
|
||||
if (!nodes_empty(cp->mems_allowed)) {
|
||||
pos_css = css_rightmost_descendant(pos_css);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (!css_tryget(&cp->css))
|
||||
continue;
|
||||
@ -1267,44 +1256,39 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* cpuset_change_flag - make a task's spread flags the same as its cpuset's
|
||||
* @tsk: task to be updated
|
||||
* @scan: struct cgroup_scanner containing the cgroup of the task
|
||||
* @data: cpuset to @tsk belongs to
|
||||
*
|
||||
* Called by cgroup_scan_tasks() for each task in a cgroup.
|
||||
* Called by css_scan_tasks() for each task in a cgroup.
|
||||
*
|
||||
* We don't need to re-check for the cgroup/cpuset membership, since we're
|
||||
* holding cpuset_mutex at this point.
|
||||
*/
|
||||
static void cpuset_change_flag(struct task_struct *tsk,
|
||||
struct cgroup_scanner *scan)
|
||||
static void cpuset_change_flag(struct task_struct *tsk, void *data)
|
||||
{
|
||||
cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
|
||||
struct cpuset *cs = data;
|
||||
|
||||
cpuset_update_task_spread_flag(cs, tsk);
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* update_tasks_flags - update the spread flags of tasks in the cpuset.
|
||||
* @cs: the cpuset in which each task's spread flags needs to be changed
|
||||
* @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
|
||||
* @heap: if NULL, defer allocating heap memory to css_scan_tasks()
|
||||
*
|
||||
* Called with cpuset_mutex held
|
||||
*
|
||||
* The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
|
||||
* The css_scan_tasks() function will scan all the tasks in a cgroup,
|
||||
* calling callback functions for each.
|
||||
*
|
||||
* No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
|
||||
* No return value. It's guaranteed that css_scan_tasks() always returns 0
|
||||
* if @heap != NULL.
|
||||
*/
|
||||
static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
|
||||
{
|
||||
struct cgroup_scanner scan;
|
||||
|
||||
scan.cg = cs->css.cgroup;
|
||||
scan.test_task = NULL;
|
||||
scan.process_task = cpuset_change_flag;
|
||||
scan.heap = heap;
|
||||
cgroup_scan_tasks(&scan);
|
||||
css_scan_tasks(&cs->css, NULL, cpuset_change_flag, cs, heap);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1462,9 +1446,10 @@ static int fmeter_getrate(struct fmeter *fmp)
|
||||
}
|
||||
|
||||
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
|
||||
static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
|
||||
static int cpuset_can_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
struct cpuset *cs = cgroup_cs(cgrp);
|
||||
struct cpuset *cs = css_cs(css);
|
||||
struct task_struct *task;
|
||||
int ret;
|
||||
|
||||
@ -1475,11 +1460,11 @@ static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
|
||||
* flag is set.
|
||||
*/
|
||||
ret = -ENOSPC;
|
||||
if (!cgroup_sane_behavior(cgrp) &&
|
||||
if (!cgroup_sane_behavior(css->cgroup) &&
|
||||
(cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
|
||||
goto out_unlock;
|
||||
|
||||
cgroup_taskset_for_each(task, cgrp, tset) {
|
||||
cgroup_taskset_for_each(task, css, tset) {
|
||||
/*
|
||||
* Kthreads which disallow setaffinity shouldn't be moved
|
||||
* to a new cpuset; we don't want to change their cpu
|
||||
@ -1508,11 +1493,11 @@ static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cpuset_cancel_attach(struct cgroup *cgrp,
|
||||
static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
mutex_lock(&cpuset_mutex);
|
||||
cgroup_cs(cgrp)->attach_in_progress--;
|
||||
css_cs(css)->attach_in_progress--;
|
||||
mutex_unlock(&cpuset_mutex);
|
||||
}
|
||||
|
||||
@ -1523,16 +1508,18 @@ static void cpuset_cancel_attach(struct cgroup *cgrp,
|
||||
*/
|
||||
static cpumask_var_t cpus_attach;
|
||||
|
||||
static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
|
||||
static void cpuset_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
/* static buf protected by cpuset_mutex */
|
||||
static nodemask_t cpuset_attach_nodemask_to;
|
||||
struct mm_struct *mm;
|
||||
struct task_struct *task;
|
||||
struct task_struct *leader = cgroup_taskset_first(tset);
|
||||
struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
|
||||
struct cpuset *cs = cgroup_cs(cgrp);
|
||||
struct cpuset *oldcs = cgroup_cs(oldcgrp);
|
||||
struct cgroup_subsys_state *oldcss = cgroup_taskset_cur_css(tset,
|
||||
cpuset_subsys_id);
|
||||
struct cpuset *cs = css_cs(css);
|
||||
struct cpuset *oldcs = css_cs(oldcss);
|
||||
struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
|
||||
struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
|
||||
|
||||
@ -1546,7 +1533,7 @@ static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
|
||||
|
||||
guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
|
||||
|
||||
cgroup_taskset_for_each(task, cgrp, tset) {
|
||||
cgroup_taskset_for_each(task, css, tset) {
|
||||
/*
|
||||
* can_attach beforehand should guarantee that this doesn't
|
||||
* fail. TODO: have a better way to handle failure here
|
||||
@ -1608,9 +1595,10 @@ typedef enum {
|
||||
FILE_SPREAD_SLAB,
|
||||
} cpuset_filetype_t;
|
||||
|
||||
static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
|
||||
static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
u64 val)
|
||||
{
|
||||
struct cpuset *cs = cgroup_cs(cgrp);
|
||||
struct cpuset *cs = css_cs(css);
|
||||
cpuset_filetype_t type = cft->private;
|
||||
int retval = 0;
|
||||
|
||||
@ -1657,9 +1645,10 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
|
||||
static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
s64 val)
|
||||
{
|
||||
struct cpuset *cs = cgroup_cs(cgrp);
|
||||
struct cpuset *cs = css_cs(css);
|
||||
cpuset_filetype_t type = cft->private;
|
||||
int retval = -ENODEV;
|
||||
|
||||
@ -1683,10 +1672,10 @@ static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
|
||||
/*
|
||||
* Common handling for a write to a "cpus" or "mems" file.
|
||||
*/
|
||||
static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
|
||||
const char *buf)
|
||||
static int cpuset_write_resmask(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, const char *buf)
|
||||
{
|
||||
struct cpuset *cs = cgroup_cs(cgrp);
|
||||
struct cpuset *cs = css_cs(css);
|
||||
struct cpuset *trialcs;
|
||||
int retval = -ENODEV;
|
||||
|
||||
@ -1765,13 +1754,12 @@ static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs)
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t cpuset_common_file_read(struct cgroup *cgrp,
|
||||
struct cftype *cft,
|
||||
struct file *file,
|
||||
char __user *buf,
|
||||
size_t nbytes, loff_t *ppos)
|
||||
static ssize_t cpuset_common_file_read(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct file *file,
|
||||
char __user *buf, size_t nbytes,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct cpuset *cs = cgroup_cs(cgrp);
|
||||
struct cpuset *cs = css_cs(css);
|
||||
cpuset_filetype_t type = cft->private;
|
||||
char *page;
|
||||
ssize_t retval = 0;
|
||||
@ -1801,9 +1789,9 @@ static ssize_t cpuset_common_file_read(struct cgroup *cgrp,
|
||||
return retval;
|
||||
}
|
||||
|
||||
static u64 cpuset_read_u64(struct cgroup *cgrp, struct cftype *cft)
|
||||
static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
|
||||
{
|
||||
struct cpuset *cs = cgroup_cs(cgrp);
|
||||
struct cpuset *cs = css_cs(css);
|
||||
cpuset_filetype_t type = cft->private;
|
||||
switch (type) {
|
||||
case FILE_CPU_EXCLUSIVE:
|
||||
@ -1832,9 +1820,9 @@ static u64 cpuset_read_u64(struct cgroup *cgrp, struct cftype *cft)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static s64 cpuset_read_s64(struct cgroup *cgrp, struct cftype *cft)
|
||||
static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
|
||||
{
|
||||
struct cpuset *cs = cgroup_cs(cgrp);
|
||||
struct cpuset *cs = css_cs(css);
|
||||
cpuset_filetype_t type = cft->private;
|
||||
switch (type) {
|
||||
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
|
||||
@ -1949,11 +1937,12 @@ static struct cftype files[] = {
|
||||
* cgrp: control group that the new cpuset will be part of
|
||||
*/
|
||||
|
||||
static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cgrp)
|
||||
static struct cgroup_subsys_state *
|
||||
cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||
{
|
||||
struct cpuset *cs;
|
||||
|
||||
if (!cgrp->parent)
|
||||
if (!parent_css)
|
||||
return &top_cpuset.css;
|
||||
|
||||
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
|
||||
@ -1973,12 +1962,12 @@ static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cgrp)
|
||||
return &cs->css;
|
||||
}
|
||||
|
||||
static int cpuset_css_online(struct cgroup *cgrp)
|
||||
static int cpuset_css_online(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct cpuset *cs = cgroup_cs(cgrp);
|
||||
struct cpuset *cs = css_cs(css);
|
||||
struct cpuset *parent = parent_cs(cs);
|
||||
struct cpuset *tmp_cs;
|
||||
struct cgroup *pos_cg;
|
||||
struct cgroup_subsys_state *pos_css;
|
||||
|
||||
if (!parent)
|
||||
return 0;
|
||||
@ -1993,7 +1982,7 @@ static int cpuset_css_online(struct cgroup *cgrp)
|
||||
|
||||
number_of_cpusets++;
|
||||
|
||||
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags))
|
||||
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
@ -2010,7 +1999,7 @@ static int cpuset_css_online(struct cgroup *cgrp)
|
||||
* (and likewise for mems) to the new cgroup.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
cpuset_for_each_child(tmp_cs, pos_cg, parent) {
|
||||
cpuset_for_each_child(tmp_cs, pos_css, parent) {
|
||||
if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
|
||||
rcu_read_unlock();
|
||||
goto out_unlock;
|
||||
@ -2027,9 +2016,15 @@ static int cpuset_css_online(struct cgroup *cgrp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cpuset_css_offline(struct cgroup *cgrp)
|
||||
/*
|
||||
* If the cpuset being removed has its flag 'sched_load_balance'
|
||||
* enabled, then simulate turning sched_load_balance off, which
|
||||
* will call rebuild_sched_domains_locked().
|
||||
*/
|
||||
|
||||
static void cpuset_css_offline(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct cpuset *cs = cgroup_cs(cgrp);
|
||||
struct cpuset *cs = css_cs(css);
|
||||
|
||||
mutex_lock(&cpuset_mutex);
|
||||
|
||||
@ -2042,15 +2037,9 @@ static void cpuset_css_offline(struct cgroup *cgrp)
|
||||
mutex_unlock(&cpuset_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the cpuset being removed has its flag 'sched_load_balance'
|
||||
* enabled, then simulate turning sched_load_balance off, which
|
||||
* will call rebuild_sched_domains_locked().
|
||||
*/
|
||||
|
||||
static void cpuset_css_free(struct cgroup *cgrp)
|
||||
static void cpuset_css_free(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct cpuset *cs = cgroup_cs(cgrp);
|
||||
struct cpuset *cs = css_cs(css);
|
||||
|
||||
free_cpumask_var(cs->cpus_allowed);
|
||||
kfree(cs);
|
||||
@ -2257,11 +2246,11 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
|
||||
/* if cpus or mems changed, we need to propagate to descendants */
|
||||
if (cpus_updated || mems_updated) {
|
||||
struct cpuset *cs;
|
||||
struct cgroup *pos_cgrp;
|
||||
struct cgroup_subsys_state *pos_css;
|
||||
|
||||
rcu_read_lock();
|
||||
cpuset_for_each_descendant_pre(cs, pos_cgrp, &top_cpuset) {
|
||||
if (!css_tryget(&cs->css))
|
||||
cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
|
||||
if (cs == &top_cpuset || !css_tryget(&cs->css))
|
||||
continue;
|
||||
rcu_read_unlock();
|
||||
|
||||
@ -2350,7 +2339,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
|
||||
|
||||
void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
|
||||
{
|
||||
const struct cpuset *cpus_cs;
|
||||
struct cpuset *cpus_cs;
|
||||
|
||||
rcu_read_lock();
|
||||
cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
|
||||
@ -2423,7 +2412,7 @@ int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
|
||||
* callback_mutex. If no ancestor is mem_exclusive or mem_hardwall
|
||||
* (an unusual configuration), then returns the root cpuset.
|
||||
*/
|
||||
static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
|
||||
static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
|
||||
{
|
||||
while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
|
||||
cs = parent_cs(cs);
|
||||
@ -2493,7 +2482,7 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
|
||||
*/
|
||||
int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
|
||||
{
|
||||
const struct cpuset *cs; /* current cpuset ancestors */
|
||||
struct cpuset *cs; /* current cpuset ancestors */
|
||||
int allowed; /* is allocation in zone z allowed? */
|
||||
|
||||
if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
|
||||
@ -2731,7 +2720,7 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v)
|
||||
goto out_free;
|
||||
|
||||
rcu_read_lock();
|
||||
css = task_subsys_state(tsk, cpuset_subsys_id);
|
||||
css = task_css(tsk, cpuset_subsys_id);
|
||||
retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
|
||||
rcu_read_unlock();
|
||||
if (retval < 0)
|
||||
|
@ -340,8 +340,8 @@ struct perf_cgroup {
|
||||
static inline struct perf_cgroup *
|
||||
perf_cgroup_from_task(struct task_struct *task)
|
||||
{
|
||||
return container_of(task_subsys_state(task, perf_subsys_id),
|
||||
struct perf_cgroup, css);
|
||||
return container_of(task_css(task, perf_subsys_id),
|
||||
struct perf_cgroup, css);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
@ -591,7 +591,9 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
|
||||
if (!f.file)
|
||||
return -EBADF;
|
||||
|
||||
css = cgroup_css_from_dir(f.file, perf_subsys_id);
|
||||
rcu_read_lock();
|
||||
|
||||
css = css_from_dir(f.file->f_dentry, &perf_subsys);
|
||||
if (IS_ERR(css)) {
|
||||
ret = PTR_ERR(css);
|
||||
goto out;
|
||||
@ -617,6 +619,7 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
|
||||
ret = -EINVAL;
|
||||
}
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
fdput(f);
|
||||
return ret;
|
||||
}
|
||||
@ -7798,7 +7801,8 @@ static int __init perf_event_sysfs_init(void)
|
||||
device_initcall(perf_event_sysfs_init);
|
||||
|
||||
#ifdef CONFIG_CGROUP_PERF
|
||||
static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
|
||||
static struct cgroup_subsys_state *
|
||||
perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||
{
|
||||
struct perf_cgroup *jc;
|
||||
|
||||
@ -7815,11 +7819,10 @@ static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
|
||||
return &jc->css;
|
||||
}
|
||||
|
||||
static void perf_cgroup_css_free(struct cgroup *cont)
|
||||
static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct perf_cgroup *jc;
|
||||
jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
|
||||
struct perf_cgroup, css);
|
||||
struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
|
||||
|
||||
free_percpu(jc->info);
|
||||
kfree(jc);
|
||||
}
|
||||
@ -7831,15 +7834,17 @@ static int __perf_cgroup_move(void *info)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
|
||||
static void perf_cgroup_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
struct task_struct *task;
|
||||
|
||||
cgroup_taskset_for_each(task, cgrp, tset)
|
||||
cgroup_taskset_for_each(task, css, tset)
|
||||
task_function_call(task, __perf_cgroup_move, task);
|
||||
}
|
||||
|
||||
static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
|
||||
static void perf_cgroup_exit(struct cgroup_subsys_state *css,
|
||||
struct cgroup_subsys_state *old_css,
|
||||
struct task_struct *task)
|
||||
{
|
||||
/*
|
||||
|
@ -6815,7 +6815,7 @@ void sched_move_task(struct task_struct *tsk)
|
||||
if (unlikely(running))
|
||||
tsk->sched_class->put_prev_task(rq, tsk);
|
||||
|
||||
tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
|
||||
tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id,
|
||||
lockdep_is_held(&tsk->sighand->siglock)),
|
||||
struct task_group, css);
|
||||
tg = autogroup_task_group(tsk, tg);
|
||||
@ -7137,23 +7137,22 @@ int sched_rt_handler(struct ctl_table *table, int write,
|
||||
|
||||
#ifdef CONFIG_CGROUP_SCHED
|
||||
|
||||
/* return corresponding task_group object of a cgroup */
|
||||
static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
|
||||
static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
|
||||
{
|
||||
return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
|
||||
struct task_group, css);
|
||||
return css ? container_of(css, struct task_group, css) : NULL;
|
||||
}
|
||||
|
||||
static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
|
||||
static struct cgroup_subsys_state *
|
||||
cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||
{
|
||||
struct task_group *tg, *parent;
|
||||
struct task_group *parent = css_tg(parent_css);
|
||||
struct task_group *tg;
|
||||
|
||||
if (!cgrp->parent) {
|
||||
if (!parent) {
|
||||
/* This is early initialization for the top cgroup */
|
||||
return &root_task_group.css;
|
||||
}
|
||||
|
||||
parent = cgroup_tg(cgrp->parent);
|
||||
tg = sched_create_group(parent);
|
||||
if (IS_ERR(tg))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -7161,41 +7160,38 @@ static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
|
||||
return &tg->css;
|
||||
}
|
||||
|
||||
static int cpu_cgroup_css_online(struct cgroup *cgrp)
|
||||
static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct task_group *tg = cgroup_tg(cgrp);
|
||||
struct task_group *parent;
|
||||
struct task_group *tg = css_tg(css);
|
||||
struct task_group *parent = css_tg(css_parent(css));
|
||||
|
||||
if (!cgrp->parent)
|
||||
return 0;
|
||||
|
||||
parent = cgroup_tg(cgrp->parent);
|
||||
sched_online_group(tg, parent);
|
||||
if (parent)
|
||||
sched_online_group(tg, parent);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cpu_cgroup_css_free(struct cgroup *cgrp)
|
||||
static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct task_group *tg = cgroup_tg(cgrp);
|
||||
struct task_group *tg = css_tg(css);
|
||||
|
||||
sched_destroy_group(tg);
|
||||
}
|
||||
|
||||
static void cpu_cgroup_css_offline(struct cgroup *cgrp)
|
||||
static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct task_group *tg = cgroup_tg(cgrp);
|
||||
struct task_group *tg = css_tg(css);
|
||||
|
||||
sched_offline_group(tg);
|
||||
}
|
||||
|
||||
static int cpu_cgroup_can_attach(struct cgroup *cgrp,
|
||||
static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
struct task_struct *task;
|
||||
|
||||
cgroup_taskset_for_each(task, cgrp, tset) {
|
||||
cgroup_taskset_for_each(task, css, tset) {
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
|
||||
if (!sched_rt_can_attach(css_tg(css), task))
|
||||
return -EINVAL;
|
||||
#else
|
||||
/* We don't support RT-tasks being in separate groups */
|
||||
@ -7206,18 +7202,18 @@ static int cpu_cgroup_can_attach(struct cgroup *cgrp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cpu_cgroup_attach(struct cgroup *cgrp,
|
||||
static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
struct task_struct *task;
|
||||
|
||||
cgroup_taskset_for_each(task, cgrp, tset)
|
||||
cgroup_taskset_for_each(task, css, tset)
|
||||
sched_move_task(task);
|
||||
}
|
||||
|
||||
static void
|
||||
cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
|
||||
struct task_struct *task)
|
||||
static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
|
||||
struct cgroup_subsys_state *old_css,
|
||||
struct task_struct *task)
|
||||
{
|
||||
/*
|
||||
* cgroup_exit() is called in the copy_process() failure path.
|
||||
@ -7231,15 +7227,16 @@ cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
|
||||
u64 shareval)
|
||||
static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
|
||||
struct cftype *cftype, u64 shareval)
|
||||
{
|
||||
return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
|
||||
return sched_group_set_shares(css_tg(css), scale_load(shareval));
|
||||
}
|
||||
|
||||
static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
|
||||
static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft)
|
||||
{
|
||||
struct task_group *tg = cgroup_tg(cgrp);
|
||||
struct task_group *tg = css_tg(css);
|
||||
|
||||
return (u64) scale_load_down(tg->shares);
|
||||
}
|
||||
@ -7361,26 +7358,28 @@ long tg_get_cfs_period(struct task_group *tg)
|
||||
return cfs_period_us;
|
||||
}
|
||||
|
||||
static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
|
||||
static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft)
|
||||
{
|
||||
return tg_get_cfs_quota(cgroup_tg(cgrp));
|
||||
return tg_get_cfs_quota(css_tg(css));
|
||||
}
|
||||
|
||||
static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
|
||||
s64 cfs_quota_us)
|
||||
static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
|
||||
struct cftype *cftype, s64 cfs_quota_us)
|
||||
{
|
||||
return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
|
||||
return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
|
||||
}
|
||||
|
||||
static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
|
||||
static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft)
|
||||
{
|
||||
return tg_get_cfs_period(cgroup_tg(cgrp));
|
||||
return tg_get_cfs_period(css_tg(css));
|
||||
}
|
||||
|
||||
static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
|
||||
u64 cfs_period_us)
|
||||
static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
|
||||
struct cftype *cftype, u64 cfs_period_us)
|
||||
{
|
||||
return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
|
||||
return tg_set_cfs_period(css_tg(css), cfs_period_us);
|
||||
}
|
||||
|
||||
struct cfs_schedulable_data {
|
||||
@ -7461,10 +7460,10 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
|
||||
static int cpu_stats_show(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
struct cgroup_map_cb *cb)
|
||||
{
|
||||
struct task_group *tg = cgroup_tg(cgrp);
|
||||
struct task_group *tg = css_tg(css);
|
||||
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
|
||||
|
||||
cb->fill(cb, "nr_periods", cfs_b->nr_periods);
|
||||
@ -7477,26 +7476,28 @@ static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
|
||||
s64 val)
|
||||
static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, s64 val)
|
||||
{
|
||||
return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
|
||||
return sched_group_set_rt_runtime(css_tg(css), val);
|
||||
}
|
||||
|
||||
static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
|
||||
static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft)
|
||||
{
|
||||
return sched_group_rt_runtime(cgroup_tg(cgrp));
|
||||
return sched_group_rt_runtime(css_tg(css));
|
||||
}
|
||||
|
||||
static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
|
||||
u64 rt_period_us)
|
||||
static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
|
||||
struct cftype *cftype, u64 rt_period_us)
|
||||
{
|
||||
return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
|
||||
return sched_group_set_rt_period(css_tg(css), rt_period_us);
|
||||
}
|
||||
|
||||
static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
|
||||
static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft)
|
||||
{
|
||||
return sched_group_rt_period(cgroup_tg(cgrp));
|
||||
return sched_group_rt_period(css_tg(css));
|
||||
}
|
||||
#endif /* CONFIG_RT_GROUP_SCHED */
|
||||
|
||||
|
@ -33,30 +33,20 @@ struct cpuacct {
|
||||
struct kernel_cpustat __percpu *cpustat;
|
||||
};
|
||||
|
||||
/* return cpu accounting group corresponding to this container */
|
||||
static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
|
||||
static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
|
||||
{
|
||||
return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
|
||||
struct cpuacct, css);
|
||||
return css ? container_of(css, struct cpuacct, css) : NULL;
|
||||
}
|
||||
|
||||
/* return cpu accounting group to which this task belongs */
|
||||
static inline struct cpuacct *task_ca(struct task_struct *tsk)
|
||||
{
|
||||
return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
|
||||
struct cpuacct, css);
|
||||
}
|
||||
|
||||
static inline struct cpuacct *__parent_ca(struct cpuacct *ca)
|
||||
{
|
||||
return cgroup_ca(ca->css.cgroup->parent);
|
||||
return css_ca(task_css(tsk, cpuacct_subsys_id));
|
||||
}
|
||||
|
||||
static inline struct cpuacct *parent_ca(struct cpuacct *ca)
|
||||
{
|
||||
if (!ca->css.cgroup->parent)
|
||||
return NULL;
|
||||
return cgroup_ca(ca->css.cgroup->parent);
|
||||
return css_ca(css_parent(&ca->css));
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage);
|
||||
@ -66,11 +56,12 @@ static struct cpuacct root_cpuacct = {
|
||||
};
|
||||
|
||||
/* create a new cpu accounting group */
|
||||
static struct cgroup_subsys_state *cpuacct_css_alloc(struct cgroup *cgrp)
|
||||
static struct cgroup_subsys_state *
|
||||
cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||
{
|
||||
struct cpuacct *ca;
|
||||
|
||||
if (!cgrp->parent)
|
||||
if (!parent_css)
|
||||
return &root_cpuacct.css;
|
||||
|
||||
ca = kzalloc(sizeof(*ca), GFP_KERNEL);
|
||||
@ -96,9 +87,9 @@ static struct cgroup_subsys_state *cpuacct_css_alloc(struct cgroup *cgrp)
|
||||
}
|
||||
|
||||
/* destroy an existing cpu accounting group */
|
||||
static void cpuacct_css_free(struct cgroup *cgrp)
|
||||
static void cpuacct_css_free(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct cpuacct *ca = cgroup_ca(cgrp);
|
||||
struct cpuacct *ca = css_ca(css);
|
||||
|
||||
free_percpu(ca->cpustat);
|
||||
free_percpu(ca->cpuusage);
|
||||
@ -141,9 +132,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
|
||||
}
|
||||
|
||||
/* return total cpu usage (in nanoseconds) of a group */
|
||||
static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
|
||||
static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft)
|
||||
{
|
||||
struct cpuacct *ca = cgroup_ca(cgrp);
|
||||
struct cpuacct *ca = css_ca(css);
|
||||
u64 totalcpuusage = 0;
|
||||
int i;
|
||||
|
||||
@ -153,10 +144,10 @@ static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
|
||||
return totalcpuusage;
|
||||
}
|
||||
|
||||
static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
|
||||
u64 reset)
|
||||
static int cpuusage_write(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
u64 reset)
|
||||
{
|
||||
struct cpuacct *ca = cgroup_ca(cgrp);
|
||||
struct cpuacct *ca = css_ca(css);
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
@ -172,10 +163,10 @@ static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
|
||||
struct seq_file *m)
|
||||
static int cpuacct_percpu_seq_read(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct seq_file *m)
|
||||
{
|
||||
struct cpuacct *ca = cgroup_ca(cgroup);
|
||||
struct cpuacct *ca = css_ca(css);
|
||||
u64 percpu;
|
||||
int i;
|
||||
|
||||
@ -192,10 +183,10 @@ static const char * const cpuacct_stat_desc[] = {
|
||||
[CPUACCT_STAT_SYSTEM] = "system",
|
||||
};
|
||||
|
||||
static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
|
||||
struct cgroup_map_cb *cb)
|
||||
static int cpuacct_stats_show(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct cgroup_map_cb *cb)
|
||||
{
|
||||
struct cpuacct *ca = cgroup_ca(cgrp);
|
||||
struct cpuacct *ca = css_ca(css);
|
||||
int cpu;
|
||||
s64 val = 0;
|
||||
|
||||
@ -281,7 +272,7 @@ void cpuacct_account_field(struct task_struct *p, int index, u64 val)
|
||||
while (ca != &root_cpuacct) {
|
||||
kcpustat = this_cpu_ptr(ca->cpustat);
|
||||
kcpustat->cpustat[index] += val;
|
||||
ca = __parent_ca(ca);
|
||||
ca = parent_ca(ca);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -665,9 +665,9 @@ extern int group_balance_cpu(struct sched_group *sg);
|
||||
/*
|
||||
* Return the group to which this tasks belongs.
|
||||
*
|
||||
* We cannot use task_subsys_state() and friends because the cgroup
|
||||
* subsystem changes that value before the cgroup_subsys::attach() method
|
||||
* is called, therefore we cannot pin it and might observe the wrong value.
|
||||
* We cannot use task_css() and friends because the cgroup subsystem
|
||||
* changes that value before the cgroup_subsys::attach() method is called,
|
||||
* therefore we cannot pin it and might observe the wrong value.
|
||||
*
|
||||
* The same is true for autogroup's p->signal->autogroup->tg, the autogroup
|
||||
* core changes this before calling sched_move_task().
|
||||
|
@ -36,21 +36,13 @@ static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
|
||||
static inline
|
||||
struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
|
||||
{
|
||||
return container_of(s, struct hugetlb_cgroup, css);
|
||||
}
|
||||
|
||||
static inline
|
||||
struct hugetlb_cgroup *hugetlb_cgroup_from_cgroup(struct cgroup *cgroup)
|
||||
{
|
||||
return hugetlb_cgroup_from_css(cgroup_subsys_state(cgroup,
|
||||
hugetlb_subsys_id));
|
||||
return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
|
||||
{
|
||||
return hugetlb_cgroup_from_css(task_subsys_state(task,
|
||||
hugetlb_subsys_id));
|
||||
return hugetlb_cgroup_from_css(task_css(task, hugetlb_subsys_id));
|
||||
}
|
||||
|
||||
static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
|
||||
@ -58,17 +50,15 @@ static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
|
||||
return (h_cg == root_h_cgroup);
|
||||
}
|
||||
|
||||
static inline struct hugetlb_cgroup *parent_hugetlb_cgroup(struct cgroup *cg)
|
||||
static inline struct hugetlb_cgroup *
|
||||
parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
|
||||
{
|
||||
if (!cg->parent)
|
||||
return NULL;
|
||||
return hugetlb_cgroup_from_cgroup(cg->parent);
|
||||
return hugetlb_cgroup_from_css(css_parent(&h_cg->css));
|
||||
}
|
||||
|
||||
static inline bool hugetlb_cgroup_have_usage(struct cgroup *cg)
|
||||
static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
|
||||
{
|
||||
int idx;
|
||||
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cg);
|
||||
|
||||
for (idx = 0; idx < hugetlb_max_hstate; idx++) {
|
||||
if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0)
|
||||
@ -77,19 +67,18 @@ static inline bool hugetlb_cgroup_have_usage(struct cgroup *cg)
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct cgroup_subsys_state *hugetlb_cgroup_css_alloc(struct cgroup *cgroup)
|
||||
static struct cgroup_subsys_state *
|
||||
hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||
{
|
||||
struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
|
||||
struct hugetlb_cgroup *h_cgroup;
|
||||
int idx;
|
||||
struct cgroup *parent_cgroup;
|
||||
struct hugetlb_cgroup *h_cgroup, *parent_h_cgroup;
|
||||
|
||||
h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
|
||||
if (!h_cgroup)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
parent_cgroup = cgroup->parent;
|
||||
if (parent_cgroup) {
|
||||
parent_h_cgroup = hugetlb_cgroup_from_cgroup(parent_cgroup);
|
||||
if (parent_h_cgroup) {
|
||||
for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
|
||||
res_counter_init(&h_cgroup->hugepage[idx],
|
||||
&parent_h_cgroup->hugepage[idx]);
|
||||
@ -101,11 +90,11 @@ static struct cgroup_subsys_state *hugetlb_cgroup_css_alloc(struct cgroup *cgrou
|
||||
return &h_cgroup->css;
|
||||
}
|
||||
|
||||
static void hugetlb_cgroup_css_free(struct cgroup *cgroup)
|
||||
static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct hugetlb_cgroup *h_cgroup;
|
||||
|
||||
h_cgroup = hugetlb_cgroup_from_cgroup(cgroup);
|
||||
h_cgroup = hugetlb_cgroup_from_css(css);
|
||||
kfree(h_cgroup);
|
||||
}
|
||||
|
||||
@ -117,15 +106,14 @@ static void hugetlb_cgroup_css_free(struct cgroup *cgroup)
|
||||
* page reference and test for page active here. This function
|
||||
* cannot fail.
|
||||
*/
|
||||
static void hugetlb_cgroup_move_parent(int idx, struct cgroup *cgroup,
|
||||
static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
|
||||
struct page *page)
|
||||
{
|
||||
int csize;
|
||||
struct res_counter *counter;
|
||||
struct res_counter *fail_res;
|
||||
struct hugetlb_cgroup *page_hcg;
|
||||
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
|
||||
struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(cgroup);
|
||||
struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
|
||||
|
||||
page_hcg = hugetlb_cgroup_from_page(page);
|
||||
/*
|
||||
@ -155,8 +143,9 @@ static void hugetlb_cgroup_move_parent(int idx, struct cgroup *cgroup,
|
||||
* Force the hugetlb cgroup to empty the hugetlb resources by moving them to
|
||||
* the parent cgroup.
|
||||
*/
|
||||
static void hugetlb_cgroup_css_offline(struct cgroup *cgroup)
|
||||
static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
|
||||
struct hstate *h;
|
||||
struct page *page;
|
||||
int idx = 0;
|
||||
@ -165,13 +154,13 @@ static void hugetlb_cgroup_css_offline(struct cgroup *cgroup)
|
||||
for_each_hstate(h) {
|
||||
spin_lock(&hugetlb_lock);
|
||||
list_for_each_entry(page, &h->hugepage_activelist, lru)
|
||||
hugetlb_cgroup_move_parent(idx, cgroup, page);
|
||||
hugetlb_cgroup_move_parent(idx, h_cg, page);
|
||||
|
||||
spin_unlock(&hugetlb_lock);
|
||||
idx++;
|
||||
}
|
||||
cond_resched();
|
||||
} while (hugetlb_cgroup_have_usage(cgroup));
|
||||
} while (hugetlb_cgroup_have_usage(h_cg));
|
||||
}
|
||||
|
||||
int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
|
||||
@ -253,14 +242,15 @@ void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
|
||||
return;
|
||||
}
|
||||
|
||||
static ssize_t hugetlb_cgroup_read(struct cgroup *cgroup, struct cftype *cft,
|
||||
struct file *file, char __user *buf,
|
||||
size_t nbytes, loff_t *ppos)
|
||||
static ssize_t hugetlb_cgroup_read(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct file *file,
|
||||
char __user *buf, size_t nbytes,
|
||||
loff_t *ppos)
|
||||
{
|
||||
u64 val;
|
||||
char str[64];
|
||||
int idx, name, len;
|
||||
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
|
||||
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
|
||||
|
||||
idx = MEMFILE_IDX(cft->private);
|
||||
name = MEMFILE_ATTR(cft->private);
|
||||
@ -270,12 +260,12 @@ static ssize_t hugetlb_cgroup_read(struct cgroup *cgroup, struct cftype *cft,
|
||||
return simple_read_from_buffer(buf, nbytes, ppos, str, len);
|
||||
}
|
||||
|
||||
static int hugetlb_cgroup_write(struct cgroup *cgroup, struct cftype *cft,
|
||||
const char *buffer)
|
||||
static int hugetlb_cgroup_write(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, const char *buffer)
|
||||
{
|
||||
int idx, name, ret;
|
||||
unsigned long long val;
|
||||
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
|
||||
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
|
||||
|
||||
idx = MEMFILE_IDX(cft->private);
|
||||
name = MEMFILE_ATTR(cft->private);
|
||||
@ -300,10 +290,11 @@ static int hugetlb_cgroup_write(struct cgroup *cgroup, struct cftype *cft,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hugetlb_cgroup_reset(struct cgroup *cgroup, unsigned int event)
|
||||
static int hugetlb_cgroup_reset(struct cgroup_subsys_state *css,
|
||||
unsigned int event)
|
||||
{
|
||||
int idx, name, ret = 0;
|
||||
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
|
||||
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
|
||||
|
||||
idx = MEMFILE_IDX(event);
|
||||
name = MEMFILE_ATTR(event);
|
||||
|
223
mm/memcontrol.c
223
mm/memcontrol.c
@ -483,10 +483,9 @@ enum res_type {
|
||||
*/
|
||||
static DEFINE_MUTEX(memcg_create_mutex);
|
||||
|
||||
static inline
|
||||
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
|
||||
{
|
||||
return container_of(s, struct mem_cgroup, css);
|
||||
return s ? container_of(s, struct mem_cgroup, css) : NULL;
|
||||
}
|
||||
|
||||
/* Some nice accessors for the vmpressure. */
|
||||
@ -1035,12 +1034,6 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
|
||||
{
|
||||
return mem_cgroup_from_css(
|
||||
cgroup_subsys_state(cont, mem_cgroup_subsys_id));
|
||||
}
|
||||
|
||||
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
|
||||
{
|
||||
/*
|
||||
@ -1051,7 +1044,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
|
||||
if (unlikely(!p))
|
||||
return NULL;
|
||||
|
||||
return mem_cgroup_from_css(task_subsys_state(p, mem_cgroup_subsys_id));
|
||||
return mem_cgroup_from_css(task_css(p, mem_cgroup_subsys_id));
|
||||
}
|
||||
|
||||
struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
|
||||
@ -1084,20 +1077,11 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
|
||||
static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
|
||||
struct mem_cgroup *last_visited)
|
||||
{
|
||||
struct cgroup *prev_cgroup, *next_cgroup;
|
||||
struct cgroup_subsys_state *prev_css, *next_css;
|
||||
|
||||
/*
|
||||
* Root is not visited by cgroup iterators so it needs an
|
||||
* explicit visit.
|
||||
*/
|
||||
if (!last_visited)
|
||||
return root;
|
||||
|
||||
prev_cgroup = (last_visited == root) ? NULL
|
||||
: last_visited->css.cgroup;
|
||||
prev_css = last_visited ? &last_visited->css : NULL;
|
||||
skip_node:
|
||||
next_cgroup = cgroup_next_descendant_pre(
|
||||
prev_cgroup, root->css.cgroup);
|
||||
next_css = css_next_descendant_pre(prev_css, &root->css);
|
||||
|
||||
/*
|
||||
* Even if we found a group we have to make sure it is
|
||||
@ -1106,13 +1090,13 @@ static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
|
||||
* last_visited css is safe to use because it is
|
||||
* protected by css_get and the tree walk is rcu safe.
|
||||
*/
|
||||
if (next_cgroup) {
|
||||
struct mem_cgroup *mem = mem_cgroup_from_cont(
|
||||
next_cgroup);
|
||||
if (next_css) {
|
||||
struct mem_cgroup *mem = mem_cgroup_from_css(next_css);
|
||||
|
||||
if (css_tryget(&mem->css))
|
||||
return mem;
|
||||
else {
|
||||
prev_cgroup = next_cgroup;
|
||||
prev_css = next_css;
|
||||
goto skip_node;
|
||||
}
|
||||
}
|
||||
@ -1525,10 +1509,8 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
|
||||
|
||||
int mem_cgroup_swappiness(struct mem_cgroup *memcg)
|
||||
{
|
||||
struct cgroup *cgrp = memcg->css.cgroup;
|
||||
|
||||
/* root ? */
|
||||
if (cgrp->parent == NULL)
|
||||
if (!css_parent(&memcg->css))
|
||||
return vm_swappiness;
|
||||
|
||||
return memcg->swappiness;
|
||||
@ -1805,12 +1787,11 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
||||
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
|
||||
totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
|
||||
for_each_mem_cgroup_tree(iter, memcg) {
|
||||
struct cgroup *cgroup = iter->css.cgroup;
|
||||
struct cgroup_iter it;
|
||||
struct css_task_iter it;
|
||||
struct task_struct *task;
|
||||
|
||||
cgroup_iter_start(cgroup, &it);
|
||||
while ((task = cgroup_iter_next(cgroup, &it))) {
|
||||
css_task_iter_start(&iter->css, &it);
|
||||
while ((task = css_task_iter_next(&it))) {
|
||||
switch (oom_scan_process_thread(task, totalpages, NULL,
|
||||
false)) {
|
||||
case OOM_SCAN_SELECT:
|
||||
@ -1823,7 +1804,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
||||
case OOM_SCAN_CONTINUE:
|
||||
continue;
|
||||
case OOM_SCAN_ABORT:
|
||||
cgroup_iter_end(cgroup, &it);
|
||||
css_task_iter_end(&it);
|
||||
mem_cgroup_iter_break(memcg, iter);
|
||||
if (chosen)
|
||||
put_task_struct(chosen);
|
||||
@ -1840,7 +1821,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
||||
get_task_struct(chosen);
|
||||
}
|
||||
}
|
||||
cgroup_iter_end(cgroup, &it);
|
||||
css_task_iter_end(&it);
|
||||
}
|
||||
|
||||
if (!chosen)
|
||||
@ -2954,10 +2935,10 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SLABINFO
|
||||
static int mem_cgroup_slabinfo_read(struct cgroup *cont, struct cftype *cft,
|
||||
struct seq_file *m)
|
||||
static int mem_cgroup_slabinfo_read(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct seq_file *m)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
struct memcg_cache_params *params;
|
||||
|
||||
if (!memcg_can_account_kmem(memcg))
|
||||
@ -4943,10 +4924,10 @@ static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
|
||||
*/
|
||||
static inline bool __memcg_has_children(struct mem_cgroup *memcg)
|
||||
{
|
||||
struct cgroup *pos;
|
||||
struct cgroup_subsys_state *pos;
|
||||
|
||||
/* bounce at first found */
|
||||
cgroup_for_each_child(pos, memcg->css.cgroup)
|
||||
css_for_each_child(pos, &memcg->css)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
@ -5002,9 +4983,10 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
|
||||
static int mem_cgroup_force_empty_write(struct cgroup_subsys_state *css,
|
||||
unsigned int event)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
int ret;
|
||||
|
||||
if (mem_cgroup_is_root(memcg))
|
||||
@ -5017,21 +4999,18 @@ static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
|
||||
}
|
||||
|
||||
|
||||
static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
|
||||
static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft)
|
||||
{
|
||||
return mem_cgroup_from_cont(cont)->use_hierarchy;
|
||||
return mem_cgroup_from_css(css)->use_hierarchy;
|
||||
}
|
||||
|
||||
static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
|
||||
u64 val)
|
||||
static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, u64 val)
|
||||
{
|
||||
int retval = 0;
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
|
||||
struct cgroup *parent = cont->parent;
|
||||
struct mem_cgroup *parent_memcg = NULL;
|
||||
|
||||
if (parent)
|
||||
parent_memcg = mem_cgroup_from_cont(parent);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
struct mem_cgroup *parent_memcg = mem_cgroup_from_css(css_parent(&memcg->css));
|
||||
|
||||
mutex_lock(&memcg_create_mutex);
|
||||
|
||||
@ -5101,11 +5080,11 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
|
||||
return val << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
|
||||
struct file *file, char __user *buf,
|
||||
size_t nbytes, loff_t *ppos)
|
||||
static ssize_t mem_cgroup_read(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct file *file,
|
||||
char __user *buf, size_t nbytes, loff_t *ppos)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
char str[64];
|
||||
u64 val;
|
||||
int name, len;
|
||||
@ -5138,11 +5117,11 @@ static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
|
||||
return simple_read_from_buffer(buf, nbytes, ppos, str, len);
|
||||
}
|
||||
|
||||
static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
|
||||
static int memcg_update_kmem_limit(struct cgroup_subsys_state *css, u64 val)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
/*
|
||||
* For simplicity, we won't allow this to be disabled. It also can't
|
||||
* be changed if the cgroup has children already, or if tasks had
|
||||
@ -5158,7 +5137,7 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
|
||||
mutex_lock(&memcg_create_mutex);
|
||||
mutex_lock(&set_limit_mutex);
|
||||
if (!memcg->kmem_account_flags && val != RESOURCE_MAX) {
|
||||
if (cgroup_task_count(cont) || memcg_has_children(memcg)) {
|
||||
if (cgroup_task_count(css->cgroup) || memcg_has_children(memcg)) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
@ -5228,10 +5207,10 @@ static int memcg_propagate_kmem(struct mem_cgroup *memcg)
|
||||
* The user of this function is...
|
||||
* RES_LIMIT.
|
||||
*/
|
||||
static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
|
||||
static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
const char *buffer)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
enum res_type type;
|
||||
int name;
|
||||
unsigned long long val;
|
||||
@ -5255,7 +5234,7 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
|
||||
else if (type == _MEMSWAP)
|
||||
ret = mem_cgroup_resize_memsw_limit(memcg, val);
|
||||
else if (type == _KMEM)
|
||||
ret = memcg_update_kmem_limit(cont, val);
|
||||
ret = memcg_update_kmem_limit(css, val);
|
||||
else
|
||||
return -EINVAL;
|
||||
break;
|
||||
@ -5283,18 +5262,15 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
|
||||
static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
|
||||
unsigned long long *mem_limit, unsigned long long *memsw_limit)
|
||||
{
|
||||
struct cgroup *cgroup;
|
||||
unsigned long long min_limit, min_memsw_limit, tmp;
|
||||
|
||||
min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
|
||||
min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
|
||||
cgroup = memcg->css.cgroup;
|
||||
if (!memcg->use_hierarchy)
|
||||
goto out;
|
||||
|
||||
while (cgroup->parent) {
|
||||
cgroup = cgroup->parent;
|
||||
memcg = mem_cgroup_from_cont(cgroup);
|
||||
while (css_parent(&memcg->css)) {
|
||||
memcg = mem_cgroup_from_css(css_parent(&memcg->css));
|
||||
if (!memcg->use_hierarchy)
|
||||
break;
|
||||
tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
|
||||
@ -5307,9 +5283,9 @@ static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
|
||||
*memsw_limit = min_memsw_limit;
|
||||
}
|
||||
|
||||
static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
|
||||
static int mem_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
int name;
|
||||
enum res_type type;
|
||||
|
||||
@ -5342,17 +5318,17 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
|
||||
static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft)
|
||||
{
|
||||
return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
|
||||
return mem_cgroup_from_css(css)->move_charge_at_immigrate;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
|
||||
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, u64 val)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
|
||||
if (val >= (1 << NR_MOVE_TYPE))
|
||||
return -EINVAL;
|
||||
@ -5367,7 +5343,7 @@ static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
|
||||
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, u64 val)
|
||||
{
|
||||
return -ENOSYS;
|
||||
@ -5375,13 +5351,13 @@ static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft,
|
||||
struct seq_file *m)
|
||||
static int memcg_numa_stat_show(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct seq_file *m)
|
||||
{
|
||||
int nid;
|
||||
unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
|
||||
unsigned long node_nr;
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
|
||||
total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
|
||||
seq_printf(m, "total=%lu", total_nr);
|
||||
@ -5426,10 +5402,10 @@ static inline void mem_cgroup_lru_names_not_uptodate(void)
|
||||
BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
|
||||
}
|
||||
|
||||
static int memcg_stat_show(struct cgroup *cont, struct cftype *cft,
|
||||
static int memcg_stat_show(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
struct seq_file *m)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
struct mem_cgroup *mi;
|
||||
unsigned int i;
|
||||
|
||||
@ -5513,27 +5489,23 @@ static int memcg_stat_show(struct cgroup *cont, struct cftype *cft,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
|
||||
static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
|
||||
return mem_cgroup_swappiness(memcg);
|
||||
}
|
||||
|
||||
static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
|
||||
u64 val)
|
||||
static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, u64 val)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
|
||||
struct mem_cgroup *parent;
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
|
||||
|
||||
if (val > 100)
|
||||
if (val > 100 || !parent)
|
||||
return -EINVAL;
|
||||
|
||||
if (cgrp->parent == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
parent = mem_cgroup_from_cont(cgrp->parent);
|
||||
|
||||
mutex_lock(&memcg_create_mutex);
|
||||
|
||||
/* If under hierarchy, only empty-root can set this value */
|
||||
@ -5636,10 +5608,10 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
|
||||
mem_cgroup_oom_notify_cb(iter);
|
||||
}
|
||||
|
||||
static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
|
||||
static int mem_cgroup_usage_register_event(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
struct mem_cgroup_thresholds *thresholds;
|
||||
struct mem_cgroup_threshold_ary *new;
|
||||
enum res_type type = MEMFILE_TYPE(cft->private);
|
||||
@ -5719,10 +5691,10 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
|
||||
static void mem_cgroup_usage_unregister_event(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct eventfd_ctx *eventfd)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
struct mem_cgroup_thresholds *thresholds;
|
||||
struct mem_cgroup_threshold_ary *new;
|
||||
enum res_type type = MEMFILE_TYPE(cft->private);
|
||||
@ -5798,10 +5770,10 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
|
||||
mutex_unlock(&memcg->thresholds_lock);
|
||||
}
|
||||
|
||||
static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
|
||||
static int mem_cgroup_oom_register_event(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
struct mem_cgroup_eventfd_list *event;
|
||||
enum res_type type = MEMFILE_TYPE(cft->private);
|
||||
|
||||
@ -5823,10 +5795,10 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
|
||||
static void mem_cgroup_oom_unregister_event(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct eventfd_ctx *eventfd)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
struct mem_cgroup_eventfd_list *ev, *tmp;
|
||||
enum res_type type = MEMFILE_TYPE(cft->private);
|
||||
|
||||
@ -5844,10 +5816,10 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
|
||||
spin_unlock(&memcg_oom_lock);
|
||||
}
|
||||
|
||||
static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
|
||||
static int mem_cgroup_oom_control_read(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct cgroup_map_cb *cb)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
|
||||
cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
|
||||
|
||||
@ -5858,18 +5830,16 @@ static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
|
||||
static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, u64 val)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
|
||||
struct mem_cgroup *parent;
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
|
||||
|
||||
/* cannot set to root cgroup and only 0 and 1 are allowed */
|
||||
if (!cgrp->parent || !((val == 0) || (val == 1)))
|
||||
if (!parent || !((val == 0) || (val == 1)))
|
||||
return -EINVAL;
|
||||
|
||||
parent = mem_cgroup_from_cont(cgrp->parent);
|
||||
|
||||
mutex_lock(&memcg_create_mutex);
|
||||
/* oom-kill-disable is a flag for subhierarchy. */
|
||||
if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
|
||||
@ -6228,7 +6198,7 @@ static void __init mem_cgroup_soft_limit_tree_init(void)
|
||||
}
|
||||
|
||||
static struct cgroup_subsys_state * __ref
|
||||
mem_cgroup_css_alloc(struct cgroup *cont)
|
||||
mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||
{
|
||||
struct mem_cgroup *memcg;
|
||||
long error = -ENOMEM;
|
||||
@ -6243,7 +6213,7 @@ mem_cgroup_css_alloc(struct cgroup *cont)
|
||||
goto free_out;
|
||||
|
||||
/* root ? */
|
||||
if (cont->parent == NULL) {
|
||||
if (parent_css == NULL) {
|
||||
root_mem_cgroup = memcg;
|
||||
res_counter_init(&memcg->res, NULL);
|
||||
res_counter_init(&memcg->memsw, NULL);
|
||||
@ -6265,17 +6235,16 @@ mem_cgroup_css_alloc(struct cgroup *cont)
|
||||
}
|
||||
|
||||
static int
|
||||
mem_cgroup_css_online(struct cgroup *cont)
|
||||
mem_cgroup_css_online(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct mem_cgroup *memcg, *parent;
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
|
||||
int error = 0;
|
||||
|
||||
if (!cont->parent)
|
||||
if (!parent)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&memcg_create_mutex);
|
||||
memcg = mem_cgroup_from_cont(cont);
|
||||
parent = mem_cgroup_from_cont(cont->parent);
|
||||
|
||||
memcg->use_hierarchy = parent->use_hierarchy;
|
||||
memcg->oom_kill_disable = parent->oom_kill_disable;
|
||||
@ -6326,9 +6295,9 @@ static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
|
||||
mem_cgroup_iter_invalidate(root_mem_cgroup);
|
||||
}
|
||||
|
||||
static void mem_cgroup_css_offline(struct cgroup *cont)
|
||||
static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
|
||||
kmem_cgroup_css_offline(memcg);
|
||||
|
||||
@ -6338,9 +6307,9 @@ static void mem_cgroup_css_offline(struct cgroup *cont)
|
||||
vmpressure_cleanup(&memcg->vmpressure);
|
||||
}
|
||||
|
||||
static void mem_cgroup_css_free(struct cgroup *cont)
|
||||
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
|
||||
memcg_destroy_kmem(memcg);
|
||||
__mem_cgroup_free(memcg);
|
||||
@ -6710,12 +6679,12 @@ static void mem_cgroup_clear_mc(void)
|
||||
mem_cgroup_end_move(from);
|
||||
}
|
||||
|
||||
static int mem_cgroup_can_attach(struct cgroup *cgroup,
|
||||
static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
struct task_struct *p = cgroup_taskset_first(tset);
|
||||
int ret = 0;
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
unsigned long move_charge_at_immigrate;
|
||||
|
||||
/*
|
||||
@ -6757,7 +6726,7 @@ static int mem_cgroup_can_attach(struct cgroup *cgroup,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
|
||||
static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
mem_cgroup_clear_mc();
|
||||
@ -6905,7 +6874,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
|
||||
up_read(&mm->mmap_sem);
|
||||
}
|
||||
|
||||
static void mem_cgroup_move_task(struct cgroup *cont,
|
||||
static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
struct task_struct *p = cgroup_taskset_first(tset);
|
||||
@ -6920,16 +6889,16 @@ static void mem_cgroup_move_task(struct cgroup *cont,
|
||||
mem_cgroup_clear_mc();
|
||||
}
|
||||
#else /* !CONFIG_MMU */
|
||||
static int mem_cgroup_can_attach(struct cgroup *cgroup,
|
||||
static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
|
||||
static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
}
|
||||
static void mem_cgroup_move_task(struct cgroup *cont,
|
||||
static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
}
|
||||
@ -6939,15 +6908,15 @@ static void mem_cgroup_move_task(struct cgroup *cont,
|
||||
* Cgroup retains root cgroups across [un]mount cycles making it necessary
|
||||
* to verify sane_behavior flag on each mount attempt.
|
||||
*/
|
||||
static void mem_cgroup_bind(struct cgroup *root)
|
||||
static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
|
||||
{
|
||||
/*
|
||||
* use_hierarchy is forced with sane_behavior. cgroup core
|
||||
* guarantees that @root doesn't have any children, so turning it
|
||||
* on for the root memcg is enough.
|
||||
*/
|
||||
if (cgroup_sane_behavior(root))
|
||||
mem_cgroup_from_cont(root)->use_hierarchy = true;
|
||||
if (cgroup_sane_behavior(root_css->cgroup))
|
||||
mem_cgroup_from_css(root_css)->use_hierarchy = true;
|
||||
}
|
||||
|
||||
struct cgroup_subsys mem_cgroup_subsys = {
|
||||
|
@ -74,15 +74,10 @@ static struct vmpressure *work_to_vmpressure(struct work_struct *work)
|
||||
return container_of(work, struct vmpressure, work);
|
||||
}
|
||||
|
||||
static struct vmpressure *cg_to_vmpressure(struct cgroup *cg)
|
||||
{
|
||||
return css_to_vmpressure(cgroup_subsys_state(cg, mem_cgroup_subsys_id));
|
||||
}
|
||||
|
||||
static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
|
||||
{
|
||||
struct cgroup *cg = vmpressure_to_css(vmpr)->cgroup;
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cg);
|
||||
struct cgroup_subsys_state *css = vmpressure_to_css(vmpr);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
|
||||
memcg = parent_mem_cgroup(memcg);
|
||||
if (!memcg)
|
||||
@ -283,7 +278,7 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
|
||||
|
||||
/**
|
||||
* vmpressure_register_event() - Bind vmpressure notifications to an eventfd
|
||||
* @cg: cgroup that is interested in vmpressure notifications
|
||||
* @css: css that is interested in vmpressure notifications
|
||||
* @cft: cgroup control files handle
|
||||
* @eventfd: eventfd context to link notifications with
|
||||
* @args: event arguments (used to set up a pressure level threshold)
|
||||
@ -298,10 +293,11 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
|
||||
* cftype).register_event, and then cgroup core will handle everything by
|
||||
* itself.
|
||||
*/
|
||||
int vmpressure_register_event(struct cgroup *cg, struct cftype *cft,
|
||||
struct eventfd_ctx *eventfd, const char *args)
|
||||
int vmpressure_register_event(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct eventfd_ctx *eventfd,
|
||||
const char *args)
|
||||
{
|
||||
struct vmpressure *vmpr = cg_to_vmpressure(cg);
|
||||
struct vmpressure *vmpr = css_to_vmpressure(css);
|
||||
struct vmpressure_event *ev;
|
||||
int level;
|
||||
|
||||
@ -329,7 +325,7 @@ int vmpressure_register_event(struct cgroup *cg, struct cftype *cft,
|
||||
|
||||
/**
|
||||
* vmpressure_unregister_event() - Unbind eventfd from vmpressure
|
||||
* @cg: cgroup handle
|
||||
* @css: css handle
|
||||
* @cft: cgroup control files handle
|
||||
* @eventfd: eventfd context that was used to link vmpressure with the @cg
|
||||
*
|
||||
@ -341,10 +337,11 @@ int vmpressure_register_event(struct cgroup *cg, struct cftype *cft,
|
||||
* cftype).unregister_event, and then cgroup core will handle everything
|
||||
* by itself.
|
||||
*/
|
||||
void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft,
|
||||
void vmpressure_unregister_event(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft,
|
||||
struct eventfd_ctx *eventfd)
|
||||
{
|
||||
struct vmpressure *vmpr = cg_to_vmpressure(cg);
|
||||
struct vmpressure *vmpr = css_to_vmpressure(css);
|
||||
struct vmpressure_event *ev;
|
||||
|
||||
mutex_lock(&vmpr->events_lock);
|
||||
|
@ -29,12 +29,6 @@
|
||||
|
||||
#define PRIOMAP_MIN_SZ 128
|
||||
|
||||
static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp)
|
||||
{
|
||||
return container_of(cgroup_subsys_state(cgrp, net_prio_subsys_id),
|
||||
struct cgroup_netprio_state, css);
|
||||
}
|
||||
|
||||
/*
|
||||
* Extend @dev->priomap so that it's large enough to accomodate
|
||||
* @target_idx. @dev->priomap.priomap_len > @target_idx after successful
|
||||
@ -87,67 +81,70 @@ static int extend_netdev_table(struct net_device *dev, u32 target_idx)
|
||||
|
||||
/**
|
||||
* netprio_prio - return the effective netprio of a cgroup-net_device pair
|
||||
* @cgrp: cgroup part of the target pair
|
||||
* @css: css part of the target pair
|
||||
* @dev: net_device part of the target pair
|
||||
*
|
||||
* Should be called under RCU read or rtnl lock.
|
||||
*/
|
||||
static u32 netprio_prio(struct cgroup *cgrp, struct net_device *dev)
|
||||
static u32 netprio_prio(struct cgroup_subsys_state *css, struct net_device *dev)
|
||||
{
|
||||
struct netprio_map *map = rcu_dereference_rtnl(dev->priomap);
|
||||
int id = css->cgroup->id;
|
||||
|
||||
if (map && cgrp->id < map->priomap_len)
|
||||
return map->priomap[cgrp->id];
|
||||
if (map && id < map->priomap_len)
|
||||
return map->priomap[id];
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* netprio_set_prio - set netprio on a cgroup-net_device pair
|
||||
* @cgrp: cgroup part of the target pair
|
||||
* @css: css part of the target pair
|
||||
* @dev: net_device part of the target pair
|
||||
* @prio: prio to set
|
||||
*
|
||||
* Set netprio to @prio on @cgrp-@dev pair. Should be called under rtnl
|
||||
* Set netprio to @prio on @css-@dev pair. Should be called under rtnl
|
||||
* lock and may fail under memory pressure for non-zero @prio.
|
||||
*/
|
||||
static int netprio_set_prio(struct cgroup *cgrp, struct net_device *dev,
|
||||
u32 prio)
|
||||
static int netprio_set_prio(struct cgroup_subsys_state *css,
|
||||
struct net_device *dev, u32 prio)
|
||||
{
|
||||
struct netprio_map *map;
|
||||
int id = css->cgroup->id;
|
||||
int ret;
|
||||
|
||||
/* avoid extending priomap for zero writes */
|
||||
map = rtnl_dereference(dev->priomap);
|
||||
if (!prio && (!map || map->priomap_len <= cgrp->id))
|
||||
if (!prio && (!map || map->priomap_len <= id))
|
||||
return 0;
|
||||
|
||||
ret = extend_netdev_table(dev, cgrp->id);
|
||||
ret = extend_netdev_table(dev, id);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
map = rtnl_dereference(dev->priomap);
|
||||
map->priomap[cgrp->id] = prio;
|
||||
map->priomap[id] = prio;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
|
||||
static struct cgroup_subsys_state *
|
||||
cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||
{
|
||||
struct cgroup_netprio_state *cs;
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
|
||||
if (!cs)
|
||||
css = kzalloc(sizeof(*css), GFP_KERNEL);
|
||||
if (!css)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
return &cs->css;
|
||||
return css;
|
||||
}
|
||||
|
||||
static int cgrp_css_online(struct cgroup *cgrp)
|
||||
static int cgrp_css_online(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct cgroup *parent = cgrp->parent;
|
||||
struct cgroup_subsys_state *parent_css = css_parent(css);
|
||||
struct net_device *dev;
|
||||
int ret = 0;
|
||||
|
||||
if (!parent)
|
||||
if (!parent_css)
|
||||
return 0;
|
||||
|
||||
rtnl_lock();
|
||||
@ -156,9 +153,9 @@ static int cgrp_css_online(struct cgroup *cgrp)
|
||||
* onlining, there is no need to clear them on offline.
|
||||
*/
|
||||
for_each_netdev(&init_net, dev) {
|
||||
u32 prio = netprio_prio(parent, dev);
|
||||
u32 prio = netprio_prio(parent_css, dev);
|
||||
|
||||
ret = netprio_set_prio(cgrp, dev, prio);
|
||||
ret = netprio_set_prio(css, dev, prio);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
@ -166,29 +163,29 @@ static int cgrp_css_online(struct cgroup *cgrp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cgrp_css_free(struct cgroup *cgrp)
|
||||
static void cgrp_css_free(struct cgroup_subsys_state *css)
|
||||
{
|
||||
kfree(cgrp_netprio_state(cgrp));
|
||||
kfree(css);
|
||||
}
|
||||
|
||||
static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft)
|
||||
static u64 read_prioidx(struct cgroup_subsys_state *css, struct cftype *cft)
|
||||
{
|
||||
return cgrp->id;
|
||||
return css->cgroup->id;
|
||||
}
|
||||
|
||||
static int read_priomap(struct cgroup *cont, struct cftype *cft,
|
||||
static int read_priomap(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
struct cgroup_map_cb *cb)
|
||||
{
|
||||
struct net_device *dev;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_netdev_rcu(&init_net, dev)
|
||||
cb->fill(cb, dev->name, netprio_prio(cont, dev));
|
||||
cb->fill(cb, dev->name, netprio_prio(css, dev));
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
|
||||
static int write_priomap(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
const char *buffer)
|
||||
{
|
||||
char devname[IFNAMSIZ + 1];
|
||||
@ -205,7 +202,7 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
ret = netprio_set_prio(cgrp, dev, prio);
|
||||
ret = netprio_set_prio(css, dev, prio);
|
||||
|
||||
rtnl_unlock();
|
||||
dev_put(dev);
|
||||
@ -221,12 +218,13 @@ static int update_netprio(const void *v, struct file *file, unsigned n)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
|
||||
static void net_prio_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
struct task_struct *p;
|
||||
void *v;
|
||||
|
||||
cgroup_taskset_for_each(p, cgrp, tset) {
|
||||
cgroup_taskset_for_each(p, css, tset) {
|
||||
task_lock(p);
|
||||
v = (void *)(unsigned long)task_netprioidx(p);
|
||||
iterate_fd(p->files, 0, update_netprio, v);
|
||||
|
@ -132,10 +132,10 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft,
|
||||
static int tcp_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
const char *buffer)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
unsigned long long val;
|
||||
int ret = 0;
|
||||
|
||||
@ -180,9 +180,9 @@ static u64 tcp_read_usage(struct mem_cgroup *memcg)
|
||||
return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
|
||||
}
|
||||
|
||||
static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft)
|
||||
static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
u64 val;
|
||||
|
||||
switch (cft->private) {
|
||||
@ -202,13 +202,13 @@ static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft)
|
||||
return val;
|
||||
}
|
||||
|
||||
static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event)
|
||||
static int tcp_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
|
||||
{
|
||||
struct mem_cgroup *memcg;
|
||||
struct tcp_memcontrol *tcp;
|
||||
struct cg_proto *cg_proto;
|
||||
|
||||
memcg = mem_cgroup_from_cont(cont);
|
||||
memcg = mem_cgroup_from_css(css);
|
||||
cg_proto = tcp_prot.proto_cgroup(memcg);
|
||||
if (!cg_proto)
|
||||
return 0;
|
||||
|
@ -23,19 +23,18 @@
|
||||
#include <net/sock.h>
|
||||
#include <net/cls_cgroup.h>
|
||||
|
||||
static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
|
||||
static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state *css)
|
||||
{
|
||||
return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
|
||||
struct cgroup_cls_state, css);
|
||||
return css ? container_of(css, struct cgroup_cls_state, css) : NULL;
|
||||
}
|
||||
|
||||
static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
|
||||
{
|
||||
return container_of(task_subsys_state(p, net_cls_subsys_id),
|
||||
struct cgroup_cls_state, css);
|
||||
return css_cls_state(task_css(p, net_cls_subsys_id));
|
||||
}
|
||||
|
||||
static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
|
||||
static struct cgroup_subsys_state *
|
||||
cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||
{
|
||||
struct cgroup_cls_state *cs;
|
||||
|
||||
@ -45,17 +44,19 @@ static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
|
||||
return &cs->css;
|
||||
}
|
||||
|
||||
static int cgrp_css_online(struct cgroup *cgrp)
|
||||
static int cgrp_css_online(struct cgroup_subsys_state *css)
|
||||
{
|
||||
if (cgrp->parent)
|
||||
cgrp_cls_state(cgrp)->classid =
|
||||
cgrp_cls_state(cgrp->parent)->classid;
|
||||
struct cgroup_cls_state *cs = css_cls_state(css);
|
||||
struct cgroup_cls_state *parent = css_cls_state(css_parent(css));
|
||||
|
||||
if (parent)
|
||||
cs->classid = parent->classid;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cgrp_css_free(struct cgroup *cgrp)
|
||||
static void cgrp_css_free(struct cgroup_subsys_state *css)
|
||||
{
|
||||
kfree(cgrp_cls_state(cgrp));
|
||||
kfree(css_cls_state(css));
|
||||
}
|
||||
|
||||
static int update_classid(const void *v, struct file *file, unsigned n)
|
||||
@ -67,12 +68,13 @@ static int update_classid(const void *v, struct file *file, unsigned n)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cgrp_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
|
||||
static void cgrp_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
{
|
||||
struct task_struct *p;
|
||||
void *v;
|
||||
|
||||
cgroup_taskset_for_each(p, cgrp, tset) {
|
||||
cgroup_taskset_for_each(p, css, tset) {
|
||||
task_lock(p);
|
||||
v = (void *)(unsigned long)task_cls_classid(p);
|
||||
iterate_fd(p->files, 0, update_classid, v);
|
||||
@ -80,14 +82,15 @@ static void cgrp_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
|
||||
}
|
||||
}
|
||||
|
||||
static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
|
||||
static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
|
||||
{
|
||||
return cgrp_cls_state(cgrp)->classid;
|
||||
return css_cls_state(css)->classid;
|
||||
}
|
||||
|
||||
static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
|
||||
static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
u64 value)
|
||||
{
|
||||
cgrp_cls_state(cgrp)->classid = (u32) value;
|
||||
css_cls_state(css)->classid = (u32) value;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -53,22 +53,17 @@ struct dev_cgroup {
|
||||
|
||||
static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
|
||||
{
|
||||
return container_of(s, struct dev_cgroup, css);
|
||||
}
|
||||
|
||||
static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
|
||||
{
|
||||
return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
|
||||
return s ? container_of(s, struct dev_cgroup, css) : NULL;
|
||||
}
|
||||
|
||||
static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
|
||||
{
|
||||
return css_to_devcgroup(task_subsys_state(task, devices_subsys_id));
|
||||
return css_to_devcgroup(task_css(task, devices_subsys_id));
|
||||
}
|
||||
|
||||
struct cgroup_subsys devices_subsys;
|
||||
|
||||
static int devcgroup_can_attach(struct cgroup *new_cgrp,
|
||||
static int devcgroup_can_attach(struct cgroup_subsys_state *new_css,
|
||||
struct cgroup_taskset *set)
|
||||
{
|
||||
struct task_struct *task = cgroup_taskset_first(set);
|
||||
@ -193,18 +188,16 @@ static inline bool is_devcg_online(const struct dev_cgroup *devcg)
|
||||
/**
|
||||
* devcgroup_online - initializes devcgroup's behavior and exceptions based on
|
||||
* parent's
|
||||
* @cgroup: cgroup getting online
|
||||
* @css: css getting online
|
||||
* returns 0 in case of success, error code otherwise
|
||||
*/
|
||||
static int devcgroup_online(struct cgroup *cgroup)
|
||||
static int devcgroup_online(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct dev_cgroup *dev_cgroup, *parent_dev_cgroup = NULL;
|
||||
struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
|
||||
struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css_parent(css));
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&devcgroup_mutex);
|
||||
dev_cgroup = cgroup_to_devcgroup(cgroup);
|
||||
if (cgroup->parent)
|
||||
parent_dev_cgroup = cgroup_to_devcgroup(cgroup->parent);
|
||||
|
||||
if (parent_dev_cgroup == NULL)
|
||||
dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
|
||||
@ -219,9 +212,9 @@ static int devcgroup_online(struct cgroup *cgroup)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void devcgroup_offline(struct cgroup *cgroup)
|
||||
static void devcgroup_offline(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct dev_cgroup *dev_cgroup = cgroup_to_devcgroup(cgroup);
|
||||
struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
|
||||
|
||||
mutex_lock(&devcgroup_mutex);
|
||||
dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
|
||||
@ -231,7 +224,8 @@ static void devcgroup_offline(struct cgroup *cgroup)
|
||||
/*
|
||||
* called from kernel/cgroup.c with cgroup_lock() held.
|
||||
*/
|
||||
static struct cgroup_subsys_state *devcgroup_css_alloc(struct cgroup *cgroup)
|
||||
static struct cgroup_subsys_state *
|
||||
devcgroup_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||
{
|
||||
struct dev_cgroup *dev_cgroup;
|
||||
|
||||
@ -244,11 +238,10 @@ static struct cgroup_subsys_state *devcgroup_css_alloc(struct cgroup *cgroup)
|
||||
return &dev_cgroup->css;
|
||||
}
|
||||
|
||||
static void devcgroup_css_free(struct cgroup *cgroup)
|
||||
static void devcgroup_css_free(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct dev_cgroup *dev_cgroup;
|
||||
struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
|
||||
|
||||
dev_cgroup = cgroup_to_devcgroup(cgroup);
|
||||
__dev_exception_clean(dev_cgroup);
|
||||
kfree(dev_cgroup);
|
||||
}
|
||||
@ -291,10 +284,10 @@ static void set_majmin(char *str, unsigned m)
|
||||
sprintf(str, "%u", m);
|
||||
}
|
||||
|
||||
static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
|
||||
struct seq_file *m)
|
||||
static int devcgroup_seq_read(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, struct seq_file *m)
|
||||
{
|
||||
struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
|
||||
struct dev_cgroup *devcgroup = css_to_devcgroup(css);
|
||||
struct dev_exception_item *ex;
|
||||
char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
|
||||
|
||||
@ -394,12 +387,10 @@ static bool may_access(struct dev_cgroup *dev_cgroup,
|
||||
static int parent_has_perm(struct dev_cgroup *childcg,
|
||||
struct dev_exception_item *ex)
|
||||
{
|
||||
struct cgroup *pcg = childcg->css.cgroup->parent;
|
||||
struct dev_cgroup *parent;
|
||||
struct dev_cgroup *parent = css_to_devcgroup(css_parent(&childcg->css));
|
||||
|
||||
if (!pcg)
|
||||
if (!parent)
|
||||
return 1;
|
||||
parent = cgroup_to_devcgroup(pcg);
|
||||
return may_access(parent, ex, childcg->behavior);
|
||||
}
|
||||
|
||||
@ -451,13 +442,13 @@ static void revalidate_active_exceptions(struct dev_cgroup *devcg)
|
||||
static int propagate_exception(struct dev_cgroup *devcg_root,
|
||||
struct dev_exception_item *ex)
|
||||
{
|
||||
struct cgroup *root = devcg_root->css.cgroup, *pos;
|
||||
struct cgroup_subsys_state *pos;
|
||||
int rc = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
cgroup_for_each_descendant_pre(pos, root) {
|
||||
struct dev_cgroup *devcg = cgroup_to_devcgroup(pos);
|
||||
css_for_each_descendant_pre(pos, &devcg_root->css) {
|
||||
struct dev_cgroup *devcg = css_to_devcgroup(pos);
|
||||
|
||||
/*
|
||||
* Because devcgroup_mutex is held, no devcg will become
|
||||
@ -465,7 +456,7 @@ static int propagate_exception(struct dev_cgroup *devcg_root,
|
||||
* methods), and online ones are safe to access outside RCU
|
||||
* read lock without bumping refcnt.
|
||||
*/
|
||||
if (!is_devcg_online(devcg))
|
||||
if (pos == &devcg_root->css || !is_devcg_online(devcg))
|
||||
continue;
|
||||
|
||||
rcu_read_unlock();
|
||||
@ -524,15 +515,11 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
|
||||
char temp[12]; /* 11 + 1 characters needed for a u32 */
|
||||
int count, rc = 0;
|
||||
struct dev_exception_item ex;
|
||||
struct cgroup *p = devcgroup->css.cgroup;
|
||||
struct dev_cgroup *parent = NULL;
|
||||
struct dev_cgroup *parent = css_to_devcgroup(css_parent(&devcgroup->css));
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (p->parent)
|
||||
parent = cgroup_to_devcgroup(p->parent);
|
||||
|
||||
memset(&ex, 0, sizeof(ex));
|
||||
b = buffer;
|
||||
|
||||
@ -677,13 +664,13 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
|
||||
const char *buffer)
|
||||
static int devcgroup_access_write(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, const char *buffer)
|
||||
{
|
||||
int retval;
|
||||
|
||||
mutex_lock(&devcgroup_mutex);
|
||||
retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
|
||||
retval = devcgroup_update_access(css_to_devcgroup(css),
|
||||
cft->private, buffer);
|
||||
mutex_unlock(&devcgroup_mutex);
|
||||
return retval;
|
||||
|
Loading…
Reference in New Issue
Block a user