include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / platforms / cell / spufs / lscsa_alloc.c
1 /*
2 * SPU local store allocation routines
3 *
4 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21 #undef DEBUG
22
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27
28 #include <asm/spu.h>
29 #include <asm/spu_csa.h>
30 #include <asm/mmu.h>
31
32 #include "spufs.h"
33
34 static int spu_alloc_lscsa_std(struct spu_state *csa)
35 {
36 struct spu_lscsa *lscsa;
37 unsigned char *p;
38
39 lscsa = vmalloc(sizeof(struct spu_lscsa));
40 if (!lscsa)
41 return -ENOMEM;
42 memset(lscsa, 0, sizeof(struct spu_lscsa));
43 csa->lscsa = lscsa;
44
45 /* Set LS pages reserved to allow for user-space mapping. */
46 for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
47 SetPageReserved(vmalloc_to_page(p));
48
49 return 0;
50 }
51
52 static void spu_free_lscsa_std(struct spu_state *csa)
53 {
54 /* Clear reserved bit before vfree. */
55 unsigned char *p;
56
57 if (csa->lscsa == NULL)
58 return;
59
60 for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
61 ClearPageReserved(vmalloc_to_page(p));
62
63 vfree(csa->lscsa);
64 }
65
66 #ifdef CONFIG_SPU_FS_64K_LS
67
68 #define SPU_64K_PAGE_SHIFT 16
69 #define SPU_64K_PAGE_ORDER (SPU_64K_PAGE_SHIFT - PAGE_SHIFT)
70 #define SPU_64K_PAGE_COUNT (1ul << SPU_64K_PAGE_ORDER)
71
72 int spu_alloc_lscsa(struct spu_state *csa)
73 {
74 struct page **pgarray;
75 unsigned char *p;
76 int i, j, n_4k;
77
78 /* Check availability of 64K pages */
79 if (!spu_64k_pages_available())
80 goto fail;
81
82 csa->use_big_pages = 1;
83
84 pr_debug("spu_alloc_lscsa(csa=0x%p), trying to allocate 64K pages\n",
85 csa);
86
87 /* First try to allocate our 64K pages. We need 5 of them
88 * with the current implementation. In the future, we should try
89 * to separate the lscsa with the actual local store image, thus
90 * allowing us to require only 4 64K pages per context
91 */
92 for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) {
93 /* XXX This is likely to fail, we should use a special pool
94 * similiar to what hugetlbfs does.
95 */
96 csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL,
97 SPU_64K_PAGE_ORDER);
98 if (csa->lscsa_pages[i] == NULL)
99 goto fail;
100 }
101
102 pr_debug(" success ! creating vmap...\n");
103
104 /* Now we need to create a vmalloc mapping of these for the kernel
105 * and SPU context switch code to use. Currently, we stick to a
106 * normal kernel vmalloc mapping, which in our case will be 4K
107 */
108 n_4k = SPU_64K_PAGE_COUNT * SPU_LSCSA_NUM_BIG_PAGES;
109 pgarray = kmalloc(sizeof(struct page *) * n_4k, GFP_KERNEL);
110 if (pgarray == NULL)
111 goto fail;
112 for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
113 for (j = 0; j < SPU_64K_PAGE_COUNT; j++)
114 /* We assume all the struct page's are contiguous
115 * which should be hopefully the case for an order 4
116 * allocation..
117 */
118 pgarray[i * SPU_64K_PAGE_COUNT + j] =
119 csa->lscsa_pages[i] + j;
120 csa->lscsa = vmap(pgarray, n_4k, VM_USERMAP, PAGE_KERNEL);
121 kfree(pgarray);
122 if (csa->lscsa == NULL)
123 goto fail;
124
125 memset(csa->lscsa, 0, sizeof(struct spu_lscsa));
126
127 /* Set LS pages reserved to allow for user-space mapping.
128 *
129 * XXX isn't that a bit obsolete ? I think we should just
130 * make sure the page count is high enough. Anyway, won't harm
131 * for now
132 */
133 for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
134 SetPageReserved(vmalloc_to_page(p));
135
136 pr_debug(" all good !\n");
137
138 return 0;
139 fail:
140 pr_debug("spufs: failed to allocate lscsa 64K pages, falling back\n");
141 spu_free_lscsa(csa);
142 return spu_alloc_lscsa_std(csa);
143 }
144
145 void spu_free_lscsa(struct spu_state *csa)
146 {
147 unsigned char *p;
148 int i;
149
150 if (!csa->use_big_pages) {
151 spu_free_lscsa_std(csa);
152 return;
153 }
154 csa->use_big_pages = 0;
155
156 if (csa->lscsa == NULL)
157 goto free_pages;
158
159 for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
160 ClearPageReserved(vmalloc_to_page(p));
161
162 vunmap(csa->lscsa);
163 csa->lscsa = NULL;
164
165 free_pages:
166
167 for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
168 if (csa->lscsa_pages[i])
169 __free_pages(csa->lscsa_pages[i], SPU_64K_PAGE_ORDER);
170 }
171
172 #else /* CONFIG_SPU_FS_64K_LS */
173
174 int spu_alloc_lscsa(struct spu_state *csa)
175 {
176 return spu_alloc_lscsa_std(csa);
177 }
178
179 void spu_free_lscsa(struct spu_state *csa)
180 {
181 spu_free_lscsa_std(csa);
182 }
183
184 #endif /* !defined(CONFIG_SPU_FS_64K_LS) */