[RAMEN9610-20880]wlbt: Driver changes for VTS Q Support for Auto Channel Selection
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / fs / 9p / cache.c
1 /*
2 * V9FS cache definitions.
3 *
4 * Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to:
17 * Free Software Foundation
18 * 51 Franklin Street, Fifth Floor
19 * Boston, MA 02111-1301 USA
20 *
21 */
22
23 #include <linux/jiffies.h>
24 #include <linux/file.h>
25 #include <linux/slab.h>
26 #include <linux/stat.h>
27 #include <linux/sched.h>
28 #include <linux/fs.h>
29 #include <net/9p/9p.h>
30
31 #include "v9fs.h"
32 #include "cache.h"
33
34 #define CACHETAG_LEN 11
35
36 struct fscache_netfs v9fs_cache_netfs = {
37 .name = "9p",
38 .version = 0,
39 };
40
41 /**
42 * v9fs_random_cachetag - Generate a random tag to be associated
43 * with a new cache session.
44 *
45 * The value of jiffies is used for a fairly randomly cache tag.
46 */
47
48 static
49 int v9fs_random_cachetag(struct v9fs_session_info *v9ses)
50 {
51 v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL);
52 if (!v9ses->cachetag)
53 return -ENOMEM;
54
55 return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies);
56 }
57
58 static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data,
59 void *buffer, uint16_t bufmax)
60 {
61 struct v9fs_session_info *v9ses;
62 uint16_t klen = 0;
63
64 v9ses = (struct v9fs_session_info *)cookie_netfs_data;
65 p9_debug(P9_DEBUG_FSC, "session %p buf %p size %u\n",
66 v9ses, buffer, bufmax);
67
68 if (v9ses->cachetag)
69 klen = strlen(v9ses->cachetag);
70
71 if (klen > bufmax)
72 return 0;
73
74 memcpy(buffer, v9ses->cachetag, klen);
75 p9_debug(P9_DEBUG_FSC, "cache session tag %s\n", v9ses->cachetag);
76 return klen;
77 }
78
79 const struct fscache_cookie_def v9fs_cache_session_index_def = {
80 .name = "9P.session",
81 .type = FSCACHE_COOKIE_TYPE_INDEX,
82 .get_key = v9fs_cache_session_get_key,
83 };
84
85 void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
86 {
87 /* If no cache session tag was specified, we generate a random one. */
88 if (!v9ses->cachetag)
89 v9fs_random_cachetag(v9ses);
90
91 v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
92 &v9fs_cache_session_index_def,
93 v9ses, true);
94 p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
95 v9ses, v9ses->fscache);
96 }
97
98 void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
99 {
100 p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n",
101 v9ses, v9ses->fscache);
102 fscache_relinquish_cookie(v9ses->fscache, 0);
103 v9ses->fscache = NULL;
104 }
105
106
107 static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data,
108 void *buffer, uint16_t bufmax)
109 {
110 const struct v9fs_inode *v9inode = cookie_netfs_data;
111 memcpy(buffer, &v9inode->qid.path, sizeof(v9inode->qid.path));
112 p9_debug(P9_DEBUG_FSC, "inode %p get key %llu\n",
113 &v9inode->vfs_inode, v9inode->qid.path);
114 return sizeof(v9inode->qid.path);
115 }
116
117 static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data,
118 uint64_t *size)
119 {
120 const struct v9fs_inode *v9inode = cookie_netfs_data;
121 *size = i_size_read(&v9inode->vfs_inode);
122
123 p9_debug(P9_DEBUG_FSC, "inode %p get attr %llu\n",
124 &v9inode->vfs_inode, *size);
125 }
126
127 static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data,
128 void *buffer, uint16_t buflen)
129 {
130 const struct v9fs_inode *v9inode = cookie_netfs_data;
131 memcpy(buffer, &v9inode->qid.version, sizeof(v9inode->qid.version));
132 p9_debug(P9_DEBUG_FSC, "inode %p get aux %u\n",
133 &v9inode->vfs_inode, v9inode->qid.version);
134 return sizeof(v9inode->qid.version);
135 }
136
137 static enum
138 fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
139 const void *buffer,
140 uint16_t buflen)
141 {
142 const struct v9fs_inode *v9inode = cookie_netfs_data;
143
144 if (buflen != sizeof(v9inode->qid.version))
145 return FSCACHE_CHECKAUX_OBSOLETE;
146
147 if (memcmp(buffer, &v9inode->qid.version,
148 sizeof(v9inode->qid.version)))
149 return FSCACHE_CHECKAUX_OBSOLETE;
150
151 return FSCACHE_CHECKAUX_OKAY;
152 }
153
154 const struct fscache_cookie_def v9fs_cache_inode_index_def = {
155 .name = "9p.inode",
156 .type = FSCACHE_COOKIE_TYPE_DATAFILE,
157 .get_key = v9fs_cache_inode_get_key,
158 .get_attr = v9fs_cache_inode_get_attr,
159 .get_aux = v9fs_cache_inode_get_aux,
160 .check_aux = v9fs_cache_inode_check_aux,
161 };
162
163 void v9fs_cache_inode_get_cookie(struct inode *inode)
164 {
165 struct v9fs_inode *v9inode;
166 struct v9fs_session_info *v9ses;
167
168 if (!S_ISREG(inode->i_mode))
169 return;
170
171 v9inode = V9FS_I(inode);
172 if (v9inode->fscache)
173 return;
174
175 v9ses = v9fs_inode2v9ses(inode);
176 v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
177 &v9fs_cache_inode_index_def,
178 v9inode, true);
179
180 p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
181 inode, v9inode->fscache);
182 }
183
184 void v9fs_cache_inode_put_cookie(struct inode *inode)
185 {
186 struct v9fs_inode *v9inode = V9FS_I(inode);
187
188 if (!v9inode->fscache)
189 return;
190 p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n",
191 inode, v9inode->fscache);
192
193 fscache_relinquish_cookie(v9inode->fscache, 0);
194 v9inode->fscache = NULL;
195 }
196
197 void v9fs_cache_inode_flush_cookie(struct inode *inode)
198 {
199 struct v9fs_inode *v9inode = V9FS_I(inode);
200
201 if (!v9inode->fscache)
202 return;
203 p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n",
204 inode, v9inode->fscache);
205
206 fscache_relinquish_cookie(v9inode->fscache, 1);
207 v9inode->fscache = NULL;
208 }
209
210 void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
211 {
212 struct v9fs_inode *v9inode = V9FS_I(inode);
213
214 if (!v9inode->fscache)
215 return;
216
217 mutex_lock(&v9inode->fscache_lock);
218
219 if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
220 v9fs_cache_inode_flush_cookie(inode);
221 else
222 v9fs_cache_inode_get_cookie(inode);
223
224 mutex_unlock(&v9inode->fscache_lock);
225 }
226
227 void v9fs_cache_inode_reset_cookie(struct inode *inode)
228 {
229 struct v9fs_inode *v9inode = V9FS_I(inode);
230 struct v9fs_session_info *v9ses;
231 struct fscache_cookie *old;
232
233 if (!v9inode->fscache)
234 return;
235
236 old = v9inode->fscache;
237
238 mutex_lock(&v9inode->fscache_lock);
239 fscache_relinquish_cookie(v9inode->fscache, 1);
240
241 v9ses = v9fs_inode2v9ses(inode);
242 v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
243 &v9fs_cache_inode_index_def,
244 v9inode, true);
245 p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
246 inode, old, v9inode->fscache);
247
248 mutex_unlock(&v9inode->fscache_lock);
249 }
250
251 int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
252 {
253 struct inode *inode = page->mapping->host;
254 struct v9fs_inode *v9inode = V9FS_I(inode);
255
256 BUG_ON(!v9inode->fscache);
257
258 return fscache_maybe_release_page(v9inode->fscache, page, gfp);
259 }
260
261 void __v9fs_fscache_invalidate_page(struct page *page)
262 {
263 struct inode *inode = page->mapping->host;
264 struct v9fs_inode *v9inode = V9FS_I(inode);
265
266 BUG_ON(!v9inode->fscache);
267
268 if (PageFsCache(page)) {
269 fscache_wait_on_page_write(v9inode->fscache, page);
270 BUG_ON(!PageLocked(page));
271 fscache_uncache_page(v9inode->fscache, page);
272 }
273 }
274
275 static void v9fs_vfs_readpage_complete(struct page *page, void *data,
276 int error)
277 {
278 if (!error)
279 SetPageUptodate(page);
280
281 unlock_page(page);
282 }
283
284 /**
285 * __v9fs_readpage_from_fscache - read a page from cache
286 *
287 * Returns 0 if the pages are in cache and a BIO is submitted,
288 * 1 if the pages are not in cache and -error otherwise.
289 */
290
291 int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
292 {
293 int ret;
294 const struct v9fs_inode *v9inode = V9FS_I(inode);
295
296 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
297 if (!v9inode->fscache)
298 return -ENOBUFS;
299
300 ret = fscache_read_or_alloc_page(v9inode->fscache,
301 page,
302 v9fs_vfs_readpage_complete,
303 NULL,
304 GFP_KERNEL);
305 switch (ret) {
306 case -ENOBUFS:
307 case -ENODATA:
308 p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret);
309 return 1;
310 case 0:
311 p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
312 return ret;
313 default:
314 p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
315 return ret;
316 }
317 }
318
319 /**
320 * __v9fs_readpages_from_fscache - read multiple pages from cache
321 *
322 * Returns 0 if the pages are in cache and a BIO is submitted,
323 * 1 if the pages are not in cache and -error otherwise.
324 */
325
326 int __v9fs_readpages_from_fscache(struct inode *inode,
327 struct address_space *mapping,
328 struct list_head *pages,
329 unsigned *nr_pages)
330 {
331 int ret;
332 const struct v9fs_inode *v9inode = V9FS_I(inode);
333
334 p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages);
335 if (!v9inode->fscache)
336 return -ENOBUFS;
337
338 ret = fscache_read_or_alloc_pages(v9inode->fscache,
339 mapping, pages, nr_pages,
340 v9fs_vfs_readpage_complete,
341 NULL,
342 mapping_gfp_mask(mapping));
343 switch (ret) {
344 case -ENOBUFS:
345 case -ENODATA:
346 p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret);
347 return 1;
348 case 0:
349 BUG_ON(!list_empty(pages));
350 BUG_ON(*nr_pages != 0);
351 p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
352 return ret;
353 default:
354 p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
355 return ret;
356 }
357 }
358
359 /**
360 * __v9fs_readpage_to_fscache - write a page to the cache
361 *
362 */
363
364 void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
365 {
366 int ret;
367 const struct v9fs_inode *v9inode = V9FS_I(inode);
368
369 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
370 ret = fscache_write_page(v9inode->fscache, page, GFP_KERNEL);
371 p9_debug(P9_DEBUG_FSC, "ret = %d\n", ret);
372 if (ret != 0)
373 v9fs_uncache_page(inode, page);
374 }
375
376 /*
377 * wait for a page to complete writing to the cache
378 */
379 void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
380 {
381 const struct v9fs_inode *v9inode = V9FS_I(inode);
382 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
383 if (PageFsCache(page))
384 fscache_wait_on_page_write(v9inode->fscache, page);
385 }