memory hotplug: register section/node id to free
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / memory_hotplug.h
CommitLineData
208d54e5
DH
1#ifndef __LINUX_MEMORY_HOTPLUG_H
2#define __LINUX_MEMORY_HOTPLUG_H
3
4#include <linux/mmzone.h>
5#include <linux/spinlock.h>
3947be19 6#include <linux/notifier.h>
208d54e5 7
78679302
KH
8struct page;
9struct zone;
10struct pglist_data;
ea01ea93 11struct mem_section;
78679302 12
208d54e5 13#ifdef CONFIG_MEMORY_HOTPLUG
04753278
YG
14
15/*
16 * Magic number for free bootmem.
17 * The normal smallest mapcount is -1. Here is smaller value than it.
18 */
19#define SECTION_INFO 0xfffffffe
20#define MIX_INFO 0xfffffffd
21#define NODE_INFO 0xfffffffc
22
208d54e5
DH
23/*
24 * pgdat resizing functions
25 */
26static inline
27void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
28{
29 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
30}
31static inline
32void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
33{
bdc8cb98 34 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
208d54e5
DH
35}
36static inline
37void pgdat_resize_init(struct pglist_data *pgdat)
38{
39 spin_lock_init(&pgdat->node_size_lock);
40}
bdc8cb98
DH
41/*
42 * Zone resizing functions
43 */
44static inline unsigned zone_span_seqbegin(struct zone *zone)
45{
46 return read_seqbegin(&zone->span_seqlock);
47}
48static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
49{
50 return read_seqretry(&zone->span_seqlock, iv);
51}
52static inline void zone_span_writelock(struct zone *zone)
53{
54 write_seqlock(&zone->span_seqlock);
55}
56static inline void zone_span_writeunlock(struct zone *zone)
57{
58 write_sequnlock(&zone->span_seqlock);
59}
60static inline void zone_seqlock_init(struct zone *zone)
61{
62 seqlock_init(&zone->span_seqlock);
63}
3947be19
DH
64extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
65extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
66extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
67/* need some defines for these for archs that don't support it */
68extern void online_page(struct page *page);
69/* VM interface that may be used by firmware interface */
3947be19 70extern int online_pages(unsigned long, unsigned long);
0c0e6195 71extern void __offline_isolated_pages(unsigned long, unsigned long);
48e94196
KH
72extern int offline_pages(unsigned long, unsigned long, unsigned long);
73
3947be19
DH
74/* reasonably generic interface to expand the physical pages in a zone */
75extern int __add_pages(struct zone *zone, unsigned long start_pfn,
76 unsigned long nr_pages);
ea01ea93
BP
77extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
78 unsigned long nr_pages);
bc02af93 79
75884fb1 80/*
ea01ea93 81 * Walk through all memory which is registered as resource.
75884fb1
KH
82 * arg is (start_pfn, nr_pages, private_arg_pointer)
83 */
84extern int walk_memory_resource(unsigned long start_pfn,
85 unsigned long nr_pages, void *arg,
86 int (*func)(unsigned long, unsigned long, void *));
87
bc02af93
YG
88#ifdef CONFIG_NUMA
89extern int memory_add_physaddr_to_nid(u64 start);
90#else
91static inline int memory_add_physaddr_to_nid(u64 start)
92{
93 return 0;
94}
95#endif
96
306d6cbe
YG
97#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
98/*
99 * For supporting node-hotadd, we have to allocate a new pgdat.
100 *
101 * If an arch has generic style NODE_DATA(),
102 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
103 *
104 * In general, generic_alloc_nodedata() is used.
105 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
106 *
107 */
dd0932d9
YG
108extern pg_data_t *arch_alloc_nodedata(int nid);
109extern void arch_free_nodedata(pg_data_t *pgdat);
7049027c 110extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
306d6cbe
YG
111
112#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
113
114#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
115#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
116
117#ifdef CONFIG_NUMA
118/*
119 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
120 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
121 * Because, pgdat for the new node is not allocated/initialized yet itself.
122 * To use new node's memory, more consideration will be necessary.
123 */
124#define generic_alloc_nodedata(nid) \
125({ \
126 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
127})
128/*
129 * This definition is just for error path in node hotadd.
130 * For node hotremove, we have to replace this.
131 */
132#define generic_free_nodedata(pgdat) kfree(pgdat)
133
10ad400b
YG
134extern pg_data_t *node_data[];
135static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
136{
137 node_data[nid] = pgdat;
138}
139
306d6cbe
YG
140#else /* !CONFIG_NUMA */
141
142/* never called */
143static inline pg_data_t *generic_alloc_nodedata(int nid)
144{
145 BUG();
146 return NULL;
147}
148static inline void generic_free_nodedata(pg_data_t *pgdat)
149{
150}
10ad400b
YG
151static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
152{
153}
306d6cbe
YG
154#endif /* CONFIG_NUMA */
155#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
156
04753278
YG
157#ifdef CONFIG_SPARSEMEM_VMEMMAP
158static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
159{
160}
161static inline void put_page_bootmem(struct page *page)
162{
163}
164#else
165extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
166extern void put_page_bootmem(struct page *page);
167#endif
168
208d54e5
DH
169#else /* ! CONFIG_MEMORY_HOTPLUG */
170/*
171 * Stub functions for when hotplug is off
172 */
173static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
174static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
175static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
bdc8cb98
DH
176
177static inline unsigned zone_span_seqbegin(struct zone *zone)
178{
179 return 0;
180}
181static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
182{
183 return 0;
184}
185static inline void zone_span_writelock(struct zone *zone) {}
186static inline void zone_span_writeunlock(struct zone *zone) {}
187static inline void zone_seqlock_init(struct zone *zone) {}
3947be19
DH
188
189static inline int mhp_notimplemented(const char *func)
190{
191 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
192 dump_stack();
193 return -ENOSYS;
194}
195
04753278
YG
196static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
197{
198}
199
bdc8cb98 200#endif /* ! CONFIG_MEMORY_HOTPLUG */
9d99aaa3 201
bc02af93
YG
202extern int add_memory(int nid, u64 start, u64 size);
203extern int arch_add_memory(int nid, u64 start, u64 size);
9d99aaa3 204extern int remove_memory(u64 start, u64 size);
f28c5edc
KM
205extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
206 int nr_pages);
ea01ea93 207extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
04753278
YG
208extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
209 unsigned long pnum);
9d99aaa3 210
208d54e5 211#endif /* __LINUX_MEMORY_HOTPLUG_H */