static struct pcpu_chunk *pcpu_reserved_chunk;
static int pcpu_reserved_chunk_limit;
-/*
- * Free path accesses and alters only the index data structures and can be
- * safely called from atomic context. When memory needs to be returned to
- * the system, free path schedules reclaim_work.
- */
static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
* @size: size of area to allocate in bytes
* @align: alignment of area (max PAGE_SIZE)
* @reserved: allocate from the reserved chunk if available
+ * @gfp: allocation flags
*
- * Allocate percpu area of @size bytes aligned at @align.
- *
- * CONTEXT:
- * Does GFP_KERNEL allocation.
+ * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
+ * contain %GFP_KERNEL, the allocation is atomic.
*
* RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure.
*/
-static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
+static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
+ gfp_t gfp)
{
static int warn_limit = 10;
struct pcpu_chunk *chunk;
const char *err;
+ bool is_atomic = !(gfp & GFP_KERNEL);
int slot, off, new_alloc, cpu, ret;
unsigned long flags;
void __percpu *ptr;
while ((new_alloc = pcpu_need_to_extend(chunk))) {
spin_unlock_irqrestore(&pcpu_lock, flags);
- if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
+ if (is_atomic ||
+ pcpu_extend_area_map(chunk, new_alloc) < 0) {
err = "failed to extend area map of reserved chunk";
goto fail;
}
spin_lock_irqsave(&pcpu_lock, flags);
}
- off = pcpu_alloc_area(chunk, size, align, false);
+ off = pcpu_alloc_area(chunk, size, align, is_atomic);
if (off >= 0)
goto area_found;
new_alloc = pcpu_need_to_extend(chunk);
if (new_alloc) {
+ if (is_atomic)
+ continue;
spin_unlock_irqrestore(&pcpu_lock, flags);
if (pcpu_extend_area_map(chunk,
new_alloc) < 0) {
goto restart;
}
- off = pcpu_alloc_area(chunk, size, align, false);
+ off = pcpu_alloc_area(chunk, size, align, is_atomic);
if (off >= 0)
goto area_found;
}
* tasks to create chunks simultaneously. Serialize and create iff
* there's still no empty chunk after grabbing the mutex.
*/
+ if (is_atomic)
+ goto fail;
+
mutex_lock(&pcpu_alloc_mutex);
if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
spin_unlock_irqrestore(&pcpu_lock, flags);
/* populate if not all pages are already there */
- if (true) {
+ if (!is_atomic) {
int page_start, page_end, rs, re;
mutex_lock(&pcpu_alloc_mutex);
fail_unlock:
spin_unlock_irqrestore(&pcpu_lock, flags);
fail:
- if (warn_limit) {
- pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
- "%s\n", size, align, err);
+ if (!is_atomic && warn_limit) {
+ pr_warning("PERCPU: allocation failed, size=%zu align=%zu atomic=%d, %s\n",
+ size, align, is_atomic, err);
dump_stack();
if (!--warn_limit)
pr_info("PERCPU: limit reached, disable warning\n");
}
/**
- * __alloc_percpu - allocate dynamic percpu area
+ * __alloc_percpu_gfp - allocate dynamic percpu area
* @size: size of area to allocate in bytes
* @align: alignment of area (max PAGE_SIZE)
+ * @gfp: allocation flags
*
- * Allocate zero-filled percpu area of @size bytes aligned at @align.
- * Might sleep. Might trigger writeouts.
- *
- * CONTEXT:
- * Does GFP_KERNEL allocation.
+ * Allocate zero-filled percpu area of @size bytes aligned at @align. If
+ * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
+ * be called from any context but is a lot more likely to fail.
*
* RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure.
*/
+void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
+{
+ return pcpu_alloc(size, align, false, gfp);
+}
+EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
+
+/**
+ * __alloc_percpu - allocate dynamic percpu area
+ * @size: size of area to allocate in bytes
+ * @align: alignment of area (max PAGE_SIZE)
+ *
+ * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
+ */
void __percpu *__alloc_percpu(size_t size, size_t align)
{
- return pcpu_alloc(size, align, false);
+ return pcpu_alloc(size, align, false, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(__alloc_percpu);
*/
void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
{
- return pcpu_alloc(size, align, true);
+ return pcpu_alloc(size, align, true, GFP_KERNEL);
}
/**