mm: remove unused memclear_highpage_flush()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / stat.c
1 /*
2 * linux/fs/stat.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/export.h>
8 #include <linux/mm.h>
9 #include <linux/errno.h>
10 #include <linux/file.h>
11 #include <linux/highuid.h>
12 #include <linux/fs.h>
13 #include <linux/namei.h>
14 #include <linux/security.h>
15 #include <linux/syscalls.h>
16 #include <linux/pagemap.h>
17
18 #include <asm/uaccess.h>
19 #include <asm/unistd.h>
20
21 void generic_fillattr(struct inode *inode, struct kstat *stat)
22 {
23 stat->dev = inode->i_sb->s_dev;
24 stat->ino = inode->i_ino;
25 stat->mode = inode->i_mode;
26 stat->nlink = inode->i_nlink;
27 stat->uid = inode->i_uid;
28 stat->gid = inode->i_gid;
29 stat->rdev = inode->i_rdev;
30 stat->size = i_size_read(inode);
31 stat->atime = inode->i_atime;
32 stat->mtime = inode->i_mtime;
33 stat->ctime = inode->i_ctime;
34 stat->blksize = (1 << inode->i_blkbits);
35 stat->blocks = inode->i_blocks;
36 }
37
38 EXPORT_SYMBOL(generic_fillattr);
39
40 int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
41 {
42 struct inode *inode = dentry->d_inode;
43 int retval;
44
45 retval = security_inode_getattr(mnt, dentry);
46 if (retval)
47 return retval;
48
49 if (inode->i_op->getattr)
50 return inode->i_op->getattr(mnt, dentry, stat);
51
52 generic_fillattr(inode, stat);
53 return 0;
54 }
55
56 EXPORT_SYMBOL(vfs_getattr);
57
58 int vfs_fstat(unsigned int fd, struct kstat *stat)
59 {
60 struct fd f = fdget_raw(fd);
61 int error = -EBADF;
62
63 if (f.file) {
64 error = vfs_getattr(f.file->f_path.mnt, f.file->f_path.dentry,
65 stat);
66 fdput(f);
67 }
68 return error;
69 }
70 EXPORT_SYMBOL(vfs_fstat);
71
72 int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
73 int flag)
74 {
75 struct path path;
76 int error = -EINVAL;
77 unsigned int lookup_flags = 0;
78
79 if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
80 AT_EMPTY_PATH)) != 0)
81 goto out;
82
83 if (!(flag & AT_SYMLINK_NOFOLLOW))
84 lookup_flags |= LOOKUP_FOLLOW;
85 if (flag & AT_EMPTY_PATH)
86 lookup_flags |= LOOKUP_EMPTY;
87 retry:
88 error = user_path_at(dfd, filename, lookup_flags, &path);
89 if (error)
90 goto out;
91
92 error = vfs_getattr(path.mnt, path.dentry, stat);
93 path_put(&path);
94 if (retry_estale(error, lookup_flags)) {
95 lookup_flags |= LOOKUP_REVAL;
96 goto retry;
97 }
98 out:
99 return error;
100 }
101 EXPORT_SYMBOL(vfs_fstatat);
102
103 int vfs_stat(const char __user *name, struct kstat *stat)
104 {
105 return vfs_fstatat(AT_FDCWD, name, stat, 0);
106 }
107 EXPORT_SYMBOL(vfs_stat);
108
109 int vfs_lstat(const char __user *name, struct kstat *stat)
110 {
111 return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
112 }
113 EXPORT_SYMBOL(vfs_lstat);
114
115
116 #ifdef __ARCH_WANT_OLD_STAT
117
118 /*
119 * For backward compatibility? Maybe this should be moved
120 * into arch/i386 instead?
121 */
122 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
123 {
124 static int warncount = 5;
125 struct __old_kernel_stat tmp;
126
127 if (warncount > 0) {
128 warncount--;
129 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
130 current->comm);
131 } else if (warncount < 0) {
132 /* it's laughable, but... */
133 warncount = 0;
134 }
135
136 memset(&tmp, 0, sizeof(struct __old_kernel_stat));
137 tmp.st_dev = old_encode_dev(stat->dev);
138 tmp.st_ino = stat->ino;
139 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
140 return -EOVERFLOW;
141 tmp.st_mode = stat->mode;
142 tmp.st_nlink = stat->nlink;
143 if (tmp.st_nlink != stat->nlink)
144 return -EOVERFLOW;
145 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
146 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
147 tmp.st_rdev = old_encode_dev(stat->rdev);
148 #if BITS_PER_LONG == 32
149 if (stat->size > MAX_NON_LFS)
150 return -EOVERFLOW;
151 #endif
152 tmp.st_size = stat->size;
153 tmp.st_atime = stat->atime.tv_sec;
154 tmp.st_mtime = stat->mtime.tv_sec;
155 tmp.st_ctime = stat->ctime.tv_sec;
156 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
157 }
158
159 SYSCALL_DEFINE2(stat, const char __user *, filename,
160 struct __old_kernel_stat __user *, statbuf)
161 {
162 struct kstat stat;
163 int error;
164
165 error = vfs_stat(filename, &stat);
166 if (error)
167 return error;
168
169 return cp_old_stat(&stat, statbuf);
170 }
171
172 SYSCALL_DEFINE2(lstat, const char __user *, filename,
173 struct __old_kernel_stat __user *, statbuf)
174 {
175 struct kstat stat;
176 int error;
177
178 error = vfs_lstat(filename, &stat);
179 if (error)
180 return error;
181
182 return cp_old_stat(&stat, statbuf);
183 }
184
185 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
186 {
187 struct kstat stat;
188 int error = vfs_fstat(fd, &stat);
189
190 if (!error)
191 error = cp_old_stat(&stat, statbuf);
192
193 return error;
194 }
195
196 #endif /* __ARCH_WANT_OLD_STAT */
197
198 #if BITS_PER_LONG == 32
199 # define choose_32_64(a,b) a
200 #else
201 # define choose_32_64(a,b) b
202 #endif
203
204 #define valid_dev(x) choose_32_64(old_valid_dev,new_valid_dev)(x)
205 #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
206
207 #ifndef INIT_STRUCT_STAT_PADDING
208 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
209 #endif
210
211 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
212 {
213 struct stat tmp;
214
215 if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
216 return -EOVERFLOW;
217 #if BITS_PER_LONG == 32
218 if (stat->size > MAX_NON_LFS)
219 return -EOVERFLOW;
220 #endif
221
222 INIT_STRUCT_STAT_PADDING(tmp);
223 tmp.st_dev = encode_dev(stat->dev);
224 tmp.st_ino = stat->ino;
225 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
226 return -EOVERFLOW;
227 tmp.st_mode = stat->mode;
228 tmp.st_nlink = stat->nlink;
229 if (tmp.st_nlink != stat->nlink)
230 return -EOVERFLOW;
231 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
232 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
233 tmp.st_rdev = encode_dev(stat->rdev);
234 tmp.st_size = stat->size;
235 tmp.st_atime = stat->atime.tv_sec;
236 tmp.st_mtime = stat->mtime.tv_sec;
237 tmp.st_ctime = stat->ctime.tv_sec;
238 #ifdef STAT_HAVE_NSEC
239 tmp.st_atime_nsec = stat->atime.tv_nsec;
240 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
241 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
242 #endif
243 tmp.st_blocks = stat->blocks;
244 tmp.st_blksize = stat->blksize;
245 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
246 }
247
248 SYSCALL_DEFINE2(newstat, const char __user *, filename,
249 struct stat __user *, statbuf)
250 {
251 struct kstat stat;
252 int error = vfs_stat(filename, &stat);
253
254 if (error)
255 return error;
256 return cp_new_stat(&stat, statbuf);
257 }
258
259 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
260 struct stat __user *, statbuf)
261 {
262 struct kstat stat;
263 int error;
264
265 error = vfs_lstat(filename, &stat);
266 if (error)
267 return error;
268
269 return cp_new_stat(&stat, statbuf);
270 }
271
272 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
273 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
274 struct stat __user *, statbuf, int, flag)
275 {
276 struct kstat stat;
277 int error;
278
279 error = vfs_fstatat(dfd, filename, &stat, flag);
280 if (error)
281 return error;
282 return cp_new_stat(&stat, statbuf);
283 }
284 #endif
285
286 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
287 {
288 struct kstat stat;
289 int error = vfs_fstat(fd, &stat);
290
291 if (!error)
292 error = cp_new_stat(&stat, statbuf);
293
294 return error;
295 }
296
297 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
298 char __user *, buf, int, bufsiz)
299 {
300 struct path path;
301 int error;
302 int empty = 0;
303 unsigned int lookup_flags = LOOKUP_EMPTY;
304
305 if (bufsiz <= 0)
306 return -EINVAL;
307
308 retry:
309 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
310 if (!error) {
311 struct inode *inode = path.dentry->d_inode;
312
313 error = empty ? -ENOENT : -EINVAL;
314 if (inode->i_op->readlink) {
315 error = security_inode_readlink(path.dentry);
316 if (!error) {
317 touch_atime(&path);
318 error = inode->i_op->readlink(path.dentry,
319 buf, bufsiz);
320 }
321 }
322 path_put(&path);
323 if (retry_estale(error, lookup_flags)) {
324 lookup_flags |= LOOKUP_REVAL;
325 goto retry;
326 }
327 }
328 return error;
329 }
330
331 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
332 int, bufsiz)
333 {
334 return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
335 }
336
337
338 /* ---------- LFS-64 ----------- */
339 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
340
341 #ifndef INIT_STRUCT_STAT64_PADDING
342 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
343 #endif
344
345 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
346 {
347 struct stat64 tmp;
348
349 INIT_STRUCT_STAT64_PADDING(tmp);
350 #ifdef CONFIG_MIPS
351 /* mips has weird padding, so we don't get 64 bits there */
352 if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev))
353 return -EOVERFLOW;
354 tmp.st_dev = new_encode_dev(stat->dev);
355 tmp.st_rdev = new_encode_dev(stat->rdev);
356 #else
357 tmp.st_dev = huge_encode_dev(stat->dev);
358 tmp.st_rdev = huge_encode_dev(stat->rdev);
359 #endif
360 tmp.st_ino = stat->ino;
361 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
362 return -EOVERFLOW;
363 #ifdef STAT64_HAS_BROKEN_ST_INO
364 tmp.__st_ino = stat->ino;
365 #endif
366 tmp.st_mode = stat->mode;
367 tmp.st_nlink = stat->nlink;
368 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
369 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
370 tmp.st_atime = stat->atime.tv_sec;
371 tmp.st_atime_nsec = stat->atime.tv_nsec;
372 tmp.st_mtime = stat->mtime.tv_sec;
373 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
374 tmp.st_ctime = stat->ctime.tv_sec;
375 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
376 tmp.st_size = stat->size;
377 tmp.st_blocks = stat->blocks;
378 tmp.st_blksize = stat->blksize;
379 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
380 }
381
382 SYSCALL_DEFINE2(stat64, const char __user *, filename,
383 struct stat64 __user *, statbuf)
384 {
385 struct kstat stat;
386 int error = vfs_stat(filename, &stat);
387
388 if (!error)
389 error = cp_new_stat64(&stat, statbuf);
390
391 return error;
392 }
393
394 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
395 struct stat64 __user *, statbuf)
396 {
397 struct kstat stat;
398 int error = vfs_lstat(filename, &stat);
399
400 if (!error)
401 error = cp_new_stat64(&stat, statbuf);
402
403 return error;
404 }
405
406 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
407 {
408 struct kstat stat;
409 int error = vfs_fstat(fd, &stat);
410
411 if (!error)
412 error = cp_new_stat64(&stat, statbuf);
413
414 return error;
415 }
416
417 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
418 struct stat64 __user *, statbuf, int, flag)
419 {
420 struct kstat stat;
421 int error;
422
423 error = vfs_fstatat(dfd, filename, &stat, flag);
424 if (error)
425 return error;
426 return cp_new_stat64(&stat, statbuf);
427 }
428 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
429
430 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
431 void __inode_add_bytes(struct inode *inode, loff_t bytes)
432 {
433 inode->i_blocks += bytes >> 9;
434 bytes &= 511;
435 inode->i_bytes += bytes;
436 if (inode->i_bytes >= 512) {
437 inode->i_blocks++;
438 inode->i_bytes -= 512;
439 }
440 }
441
442 void inode_add_bytes(struct inode *inode, loff_t bytes)
443 {
444 spin_lock(&inode->i_lock);
445 __inode_add_bytes(inode, bytes);
446 spin_unlock(&inode->i_lock);
447 }
448
449 EXPORT_SYMBOL(inode_add_bytes);
450
451 void inode_sub_bytes(struct inode *inode, loff_t bytes)
452 {
453 spin_lock(&inode->i_lock);
454 inode->i_blocks -= bytes >> 9;
455 bytes &= 511;
456 if (inode->i_bytes < bytes) {
457 inode->i_blocks--;
458 inode->i_bytes += 512;
459 }
460 inode->i_bytes -= bytes;
461 spin_unlock(&inode->i_lock);
462 }
463
464 EXPORT_SYMBOL(inode_sub_bytes);
465
466 loff_t inode_get_bytes(struct inode *inode)
467 {
468 loff_t ret;
469
470 spin_lock(&inode->i_lock);
471 ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
472 spin_unlock(&inode->i_lock);
473 return ret;
474 }
475
476 EXPORT_SYMBOL(inode_get_bytes);
477
478 void inode_set_bytes(struct inode *inode, loff_t bytes)
479 {
480 /* Caller is here responsible for sufficient locking
481 * (ie. inode->i_lock) */
482 inode->i_blocks = bytes >> 9;
483 inode->i_bytes = bytes & 511;
484 }
485
486 EXPORT_SYMBOL(inode_set_bytes);