Commit 6f848600 authored by Andrew Morton's avatar Andrew Morton Committed by james toy

ERROR: code indent should use tabs where possible

#99: FILE: mm/oom_kill.c:209:
+ ^I * to kill current.We have to random task kill in this case.$

ERROR: code indent should use tabs where possible
#100: FILE: mm/oom_kill.c:210:
+ ^I * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.$

ERROR: code indent should use tabs where possible
#101: FILE: mm/oom_kill.c:211:
+ ^I */$

ERROR: code indent should use tabs where possible
#107: FILE: mm/oom_kill.c:216:
+ ^I * The nodemask here is a nodemask passed to alloc_pages(). Now,$

ERROR: code indent should use tabs where possible
#108: FILE: mm/oom_kill.c:217:
+ ^I * cpuset doesn't use this nodemask for its hardwall/softwall/hierarchy$

ERROR: code indent should use tabs where possible
#109: FILE: mm/oom_kill.c:218:
+ ^I * feature. mempolicy is an only user of nodemask here.$

ERROR: code indent should use tabs where possible
#111: FILE: mm/oom_kill.c:220:
+ ^I */$

ERROR: code indent should use tabs where possible
#169: FILE: mm/page_alloc.c:1672:
+^I ^I* GFP_THISNODE contains __GFP_NORETRY and we never hit this.$

ERROR: code indent should use tabs where possible
#170: FILE: mm/page_alloc.c:1673:
+^I ^I* Sanity check for bare calls of __GFP_THISNODE, not real OOM.$

ERROR: code indent should use tabs where possible
#171: FILE: mm/page_alloc.c:1674:
+^I ^I* The caller should handle page allocation failure by itself if$

ERROR: code indent should use tabs where possible
#172: FILE: mm/page_alloc.c:1675:
+^I ^I* it specifies __GFP_THISNODE.$

ERROR: code indent should use tabs where possible
#173: FILE: mm/page_alloc.c:1676:
+^I ^I* Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.$

ERROR: code indent should use tabs where possible
#174: FILE: mm/page_alloc.c:1677:
+^I ^I*/$

total: 13 errors, 0 warnings, 125 lines checked

./patches/oom-kill-fix-numa-consraint-check-with-nodemask-v42.patch has style problems, please review.  If any of these errors
are false positives report them to the maintainer, see
CHECKPATCH in MAINTAINERS.

Please run checkpatch prior to sending patches

Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: David Rientjes <rientjes@google.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hioryu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 7b927491
...@@ -206,18 +206,18 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist, ...@@ -206,18 +206,18 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
/* /*
* Reach here only when __GFP_NOFAIL is used. So, we should avoid * Reach here only when __GFP_NOFAIL is used. So, we should avoid
* to kill current.We have to random task kill in this case. * to kill current.We have to random task kill in this case.
* Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
*/ */
if (gfp_mask & __GFP_THISNODE) if (gfp_mask & __GFP_THISNODE)
return CONSTRAINT_NONE; return CONSTRAINT_NONE;
/* /*
* The nodemask here is a nodemask passed to alloc_pages(). Now, * The nodemask here is a nodemask passed to alloc_pages(). Now,
* cpuset doesn't use this nodemask for its hardwall/softwall/hierarchy * cpuset doesn't use this nodemask for its hardwall/softwall/hierarchy
* feature. mempolicy is an only user of nodemask here. * feature. mempolicy is an only user of nodemask here.
* check mempolicy's nodemask contains all N_HIGH_MEMORY * check mempolicy's nodemask contains all N_HIGH_MEMORY
*/ */
if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask))
return CONSTRAINT_MEMORY_POLICY; return CONSTRAINT_MEMORY_POLICY;
......
...@@ -1669,12 +1669,12 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, ...@@ -1669,12 +1669,12 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
if (order > PAGE_ALLOC_COSTLY_ORDER) if (order > PAGE_ALLOC_COSTLY_ORDER)
goto out; goto out;
/* /*
* GFP_THISNODE contains __GFP_NORETRY and we never hit this. * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
* Sanity check for bare calls of __GFP_THISNODE, not real OOM. * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
* The caller should handle page allocation failure by itself if * The caller should handle page allocation failure by itself if
* it specifies __GFP_THISNODE. * it specifies __GFP_THISNODE.
* Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER. * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
*/ */
if (gfp_mask & __GFP_THISNODE) if (gfp_mask & __GFP_THISNODE)
goto out; goto out;
} }
...@@ -3143,7 +3143,7 @@ static int __cpuinit process_zones(int cpu) ...@@ -3143,7 +3143,7 @@ static int __cpuinit process_zones(int cpu)
if (percpu_pagelist_fraction) if (percpu_pagelist_fraction)
setup_pagelist_highmark(zone_pcp(zone, cpu), setup_pagelist_highmark(zone_pcp(zone, cpu),
(zone->present_pages / percpu_pagelist_fraction)); (zone->present_pages / percpu_pagelist_fraction));
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment