From: David Rientjes Date: Thu, 2 Apr 2009 23:57:54 +0000 (-0700) Subject: cpusets: replace zone allowed functions with node allowed X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=a1bc5a4eee990a1f290735c8694d0aebdad095fa;p=GitHub%2FLineageOS%2FG12%2Fandroid_kernel_amlogic_linux-4.9.git cpusets: replace zone allowed functions with node allowed The cpuset_zone_allowed() variants are actually only a function of the zone's node. Cc: Paul Menage Acked-by: Christoph Lameter Cc: Randy Dunlap Signed-off-by: David Rientjes Cc: Ingo Molnar Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 2e0d79678deb..05ea1dd7d681 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -12,6 +12,7 @@ #include #include #include +#include #ifdef CONFIG_CPUSETS @@ -29,19 +30,29 @@ void cpuset_init_current_mems_allowed(void); void cpuset_update_task_memory_state(void); int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); -extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask); -extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask); +extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); +extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); -static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) +static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) { return number_of_cpusets <= 1 || - __cpuset_zone_allowed_softwall(z, gfp_mask); + __cpuset_node_allowed_softwall(node, gfp_mask); } -static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) +static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) { return number_of_cpusets <= 1 || - __cpuset_zone_allowed_hardwall(z, gfp_mask); + __cpuset_node_allowed_hardwall(node, gfp_mask); +} + +static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) +{ + return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); +} + +static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) +{ + return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); } extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, @@ -112,6 +123,16 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) return 1; } +static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) +{ + return 1; +} + +static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) +{ + return 1; +} + static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) { return 1; diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 0619f109d38d..3ff910eb30d3 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -2181,26 +2181,24 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs) } /** - * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node? - * @z: is this zone on an allowed node? + * cpuset_node_allowed_softwall - Can we allocate on a memory node? + * @node: is this an allowed node? * @gfp_mask: memory allocation flags * - * If we're in interrupt, yes, we can always allocate. If - * __GFP_THISNODE is set, yes, we can always allocate. If zone - * z's node is in our tasks mems_allowed, yes. If it's not a - * __GFP_HARDWALL request and this zone's nodes is in the nearest - * hardwalled cpuset ancestor to this tasks cpuset, yes. - * If the task has been OOM killed and has access to memory reserves - * as specified by the TIF_MEMDIE flag, yes. + * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is + * set, yes, we can always allocate. If node is in our task's mems_allowed, + * yes. If it's not a __GFP_HARDWALL request and this node is in the nearest + * hardwalled cpuset ancestor to this task's cpuset, yes. If the task has been + * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE + * flag, yes. * Otherwise, no. * - * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall() - * reduces to cpuset_zone_allowed_hardwall(). Otherwise, - * cpuset_zone_allowed_softwall() might sleep, and might allow a zone - * from an enclosing cpuset. + * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to + * cpuset_node_allowed_hardwall(). Otherwise, cpuset_node_allowed_softwall() + * might sleep, and might allow a node from an enclosing cpuset. * - * cpuset_zone_allowed_hardwall() only handles the simpler case of - * hardwall cpusets, and never sleeps. + * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall + * cpusets, and never sleeps. * * The __GFP_THISNODE placement logic is really handled elsewhere, * by forcibly using a zonelist starting at a specified node, and by @@ -2239,20 +2237,17 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs) * GFP_USER - only nodes in current tasks mems allowed ok. * * Rule: - * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you + * Don't call cpuset_node_allowed_softwall if you can't sleep, unless you * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables * the code that might scan up ancestor cpusets and sleep. */ - -int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) +int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) { - int node; /* node that zone z is on */ const struct cpuset *cs; /* current cpuset ancestors */ int allowed; /* is allocation in zone z allowed? */ if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) return 1; - node = zone_to_nid(z); might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); if (node_isset(node, current->mems_allowed)) return 1; @@ -2281,15 +2276,15 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) } /* - * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node? - * @z: is this zone on an allowed node? + * cpuset_node_allowed_hardwall - Can we allocate on a memory node? + * @node: is this an allowed node? * @gfp_mask: memory allocation flags * - * If we're in interrupt, yes, we can always allocate. - * If __GFP_THISNODE is set, yes, we can always allocate. If zone - * z's node is in our tasks mems_allowed, yes. If the task has been - * OOM killed and has access to memory reserves as specified by the - * TIF_MEMDIE flag, yes. Otherwise, no. + * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is + * set, yes, we can always allocate. If node is in our task's mems_allowed, + * yes. If the task has been OOM killed and has access to memory reserves as + * specified by the TIF_MEMDIE flag, yes. + * Otherwise, no. * * The __GFP_THISNODE placement logic is really handled elsewhere, * by forcibly using a zonelist starting at a specified node, and by @@ -2297,20 +2292,16 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) * any node on the zonelist except the first. By the time any such * calls get to this routine, we should just shut up and say 'yes'. * - * Unlike the cpuset_zone_allowed_softwall() variant, above, - * this variant requires that the zone be in the current tasks + * Unlike the cpuset_node_allowed_softwall() variant, above, + * this variant requires that the node be in the current task's * mems_allowed or that we're in interrupt. It does not scan up the * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset. * It never sleeps. */ - -int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) +int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) { - int node; /* node that zone z is on */ - if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) return 1; - node = zone_to_nid(z); if (node_isset(node, current->mems_allowed)) return 1; /*