Commit cafe8816 authored by Tejun Heo's avatar Tejun Heo

percpu: use negative for auto for pcpu_setup_first_chunk() arguments

Impact: argument semantic cleanup

In pcpu_setup_first_chunk(), zero @unit_size and @dyn_size meant
auto-sizing.  It's okay for @unit_size as 0 doesn't make sense but 0
dynamic reserve size is valid.  Alos, if arch @dyn_size is calculated
from other parameters, it might end up passing in 0 @dyn_size and
malfunction when the size is automatically adjusted.

This patch makes both @unit_size and @dyn_size ssize_t and use -1 for
auto sizing.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 61ace7fa
...@@ -344,7 +344,7 @@ static ssize_t __init setup_pcpu_4k(size_t static_size) ...@@ -344,7 +344,7 @@ static ssize_t __init setup_pcpu_4k(size_t static_size)
pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
pcpu4k_nr_static_pages, static_size); pcpu4k_nr_static_pages, static_size);
ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL, ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, -1, -1, NULL,
pcpu4k_populate_pte); pcpu4k_populate_pte);
goto out_free_ar; goto out_free_ar;
......
...@@ -117,8 +117,9 @@ typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno); ...@@ -117,8 +117,9 @@ typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
size_t static_size, size_t unit_size, size_t static_size,
size_t dyn_size, void *base_addr, ssize_t unit_size, ssize_t dyn_size,
void *base_addr,
pcpu_populate_pte_fn_t populate_pte_fn); pcpu_populate_pte_fn_t populate_pte_fn);
/* /*
......
...@@ -824,8 +824,8 @@ EXPORT_SYMBOL_GPL(free_percpu); ...@@ -824,8 +824,8 @@ EXPORT_SYMBOL_GPL(free_percpu);
* pcpu_setup_first_chunk - initialize the first percpu chunk * pcpu_setup_first_chunk - initialize the first percpu chunk
* @get_page_fn: callback to fetch page pointer * @get_page_fn: callback to fetch page pointer
* @static_size: the size of static percpu area in bytes * @static_size: the size of static percpu area in bytes
* @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, 0 for auto * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
* @dyn_size: free size for dynamic allocation in bytes, 0 for auto * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
* @base_addr: mapped address, NULL for auto * @base_addr: mapped address, NULL for auto
* @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
* *
...@@ -842,13 +842,14 @@ EXPORT_SYMBOL_GPL(free_percpu); ...@@ -842,13 +842,14 @@ EXPORT_SYMBOL_GPL(free_percpu);
* indicates end of pages for the cpu. Note that @get_page_fn() must * indicates end of pages for the cpu. Note that @get_page_fn() must
* return the same number of pages for all cpus. * return the same number of pages for all cpus.
* *
* @unit_size, if non-zero, determines unit size and must be aligned * @unit_size, if non-negative, specifies unit size and must be
* to PAGE_SIZE and equal to or larger than @static_size + @dyn_size. * aligned to PAGE_SIZE and equal to or larger than @static_size +
* @dyn_size.
* *
* @dyn_size determines the number of free bytes after the static * @dyn_size, if non-negative, limits the number of bytes available
* area in the first chunk. If zero, whatever left is available. * for dynamic allocation in the first chunk. Specifying non-negative
* Specifying non-zero value make percpu leave the area after * value make percpu leave alone the area beyond @static_size +
* @static_size + @dyn_size alone. * @dyn_size.
* *
* Non-null @base_addr means that the caller already allocated virtual * Non-null @base_addr means that the caller already allocated virtual
* region for the first chunk and mapped it. percpu must not mess * region for the first chunk and mapped it. percpu must not mess
...@@ -863,8 +864,9 @@ EXPORT_SYMBOL_GPL(free_percpu); ...@@ -863,8 +864,9 @@ EXPORT_SYMBOL_GPL(free_percpu);
* percpu access. * percpu access.
*/ */
size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
size_t static_size, size_t unit_size, size_t static_size,
size_t dyn_size, void *base_addr, ssize_t unit_size, ssize_t dyn_size,
void *base_addr,
pcpu_populate_pte_fn_t populate_pte_fn) pcpu_populate_pte_fn_t populate_pte_fn)
{ {
static struct vm_struct first_vm; static struct vm_struct first_vm;
...@@ -877,13 +879,17 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, ...@@ -877,13 +879,17 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
/* santiy checks */ /* santiy checks */
BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC); BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC);
BUG_ON(!static_size); BUG_ON(!static_size);
BUG_ON(!unit_size && dyn_size); if (unit_size >= 0) {
BUG_ON(unit_size && unit_size < static_size + dyn_size); BUG_ON(unit_size < static_size +
BUG_ON(unit_size & ~PAGE_MASK); (dyn_size >= 0 ? dyn_size : 0));
BUG_ON(base_addr && !unit_size); BUG_ON(unit_size & ~PAGE_MASK);
} else {
BUG_ON(dyn_size >= 0);
BUG_ON(base_addr);
}
BUG_ON(base_addr && populate_pte_fn); BUG_ON(base_addr && populate_pte_fn);
if (unit_size) if (unit_size >= 0)
pcpu_unit_pages = unit_size >> PAGE_SHIFT; pcpu_unit_pages = unit_size >> PAGE_SHIFT;
else else
pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT, pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT,
...@@ -894,6 +900,9 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, ...@@ -894,6 +900,9 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) pcpu_chunk_struct_size = sizeof(struct pcpu_chunk)
+ num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *); + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *);
if (dyn_size < 0)
dyn_size = pcpu_unit_size - static_size;
/* /*
* Allocate chunk slots. The additional last slot is for * Allocate chunk slots. The additional last slot is for
* empty chunks. * empty chunks.
...@@ -909,12 +918,7 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, ...@@ -909,12 +918,7 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
schunk->vm = &first_vm; schunk->vm = &first_vm;
schunk->map = smap; schunk->map = smap;
schunk->map_alloc = ARRAY_SIZE(smap); schunk->map_alloc = ARRAY_SIZE(smap);
schunk->free_size = dyn_size;
if (dyn_size)
schunk->free_size = dyn_size;
else
schunk->free_size = pcpu_unit_size - static_size;
schunk->contig_hint = schunk->free_size; schunk->contig_hint = schunk->free_size;
schunk->map[schunk->map_used++] = -static_size; schunk->map[schunk->map_used++] = -static_size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment