FreeBSD/Linux Kernel Cross Reference
sys/fs/file_table.c
1 /*
2 * linux/fs/file_table.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/smp_lock.h>
14 #include <linux/iobuf.h>
15
16 /* sysctl tunables... */
17 struct files_stat_struct files_stat = {0, 0, NR_FILE};
18
19 /* Here the new files go */
20 static LIST_HEAD(anon_list);
21 /* And here the free ones sit */
22 static LIST_HEAD(free_list);
23 /* public *and* exported. Not pretty! */
24 spinlock_t files_lock = SPIN_LOCK_UNLOCKED;
25
26 /* Find an unused file structure and return a pointer to it.
27 * Returns NULL, if there are no more free file structures or
28 * we run out of memory.
29 *
30 * SMP-safe.
31 */
32 struct file * get_empty_filp(void)
33 {
34 static int old_max = 0;
35 struct file * f;
36
37 file_list_lock();
38 if (files_stat.nr_free_files > NR_RESERVED_FILES) {
39 used_one:
40 f = list_entry(free_list.next, struct file, f_list);
41 list_del(&f->f_list);
42 files_stat.nr_free_files--;
43 new_one:
44 memset(f, 0, sizeof(*f));
45 atomic_set(&f->f_count,1);
46 f->f_version = ++event;
47 f->f_uid = current->fsuid;
48 f->f_gid = current->fsgid;
49 list_add(&f->f_list, &anon_list);
50 file_list_unlock();
51 return f;
52 }
53 /*
54 * Use a reserved one if we're the superuser
55 */
56 if (files_stat.nr_free_files && !current->euid)
57 goto used_one;
58 /*
59 * Allocate a new one if we're below the limit.
60 */
61 if (files_stat.nr_files < files_stat.max_files) {
62 file_list_unlock();
63 f = kmem_cache_alloc(filp_cachep, SLAB_KERNEL);
64 file_list_lock();
65 if (f) {
66 files_stat.nr_files++;
67 goto new_one;
68 }
69 /* Big problems... */
70 printk(KERN_WARNING "VFS: filp allocation failed\n");
71
72 } else if (files_stat.max_files > old_max) {
73 printk(KERN_INFO "VFS: file-max limit %d reached\n", files_stat.max_files);
74 old_max = files_stat.max_files;
75 }
76 file_list_unlock();
77 return NULL;
78 }
79
80 /*
81 * Clear and initialize a (private) struct file for the given dentry,
82 * and call the open function (if any). The caller must verify that
83 * inode->i_fop is not NULL.
84 */
85 int init_private_file(struct file *filp, struct dentry *dentry, int mode)
86 {
87 memset(filp, 0, sizeof(*filp));
88 filp->f_mode = mode;
89 atomic_set(&filp->f_count, 1);
90 filp->f_dentry = dentry;
91 filp->f_uid = current->fsuid;
92 filp->f_gid = current->fsgid;
93 filp->f_op = dentry->d_inode->i_fop;
94 if (filp->f_op->open)
95 return filp->f_op->open(dentry->d_inode, filp);
96 else
97 return 0;
98 }
99
100 void fput(struct file * file)
101 {
102 struct dentry * dentry = file->f_dentry;
103 struct vfsmount * mnt = file->f_vfsmnt;
104 struct inode * inode = dentry->d_inode;
105
106 if (atomic_dec_and_test(&file->f_count)) {
107 locks_remove_flock(file);
108
109 if (file->f_iobuf)
110 free_kiovec(1, &file->f_iobuf);
111
112 if (file->f_op && file->f_op->release)
113 file->f_op->release(inode, file);
114 fops_put(file->f_op);
115 if (file->f_mode & FMODE_WRITE)
116 put_write_access(inode);
117 file_list_lock();
118 file->f_dentry = NULL;
119 file->f_vfsmnt = NULL;
120 list_del(&file->f_list);
121 list_add(&file->f_list, &free_list);
122 files_stat.nr_free_files++;
123 file_list_unlock();
124 dput(dentry);
125 mntput(mnt);
126 }
127 }
128
129 struct file * fget(unsigned int fd)
130 {
131 struct file * file;
132 struct files_struct *files = current->files;
133
134 read_lock(&files->file_lock);
135 file = fcheck(fd);
136 if (file)
137 get_file(file);
138 read_unlock(&files->file_lock);
139 return file;
140 }
141
142 /* Here. put_filp() is SMP-safe now. */
143
144 void put_filp(struct file *file)
145 {
146 if(atomic_dec_and_test(&file->f_count)) {
147 file_list_lock();
148 list_del(&file->f_list);
149 list_add(&file->f_list, &free_list);
150 files_stat.nr_free_files++;
151 file_list_unlock();
152 }
153 }
154
155 void file_move(struct file *file, struct list_head *list)
156 {
157 if (!list)
158 return;
159 file_list_lock();
160 list_del(&file->f_list);
161 list_add(&file->f_list, list);
162 file_list_unlock();
163 }
164
165 int fs_may_remount_ro(struct super_block *sb)
166 {
167 struct list_head *p;
168
169 /* Check that no files are currently opened for writing. */
170 file_list_lock();
171 for (p = sb->s_files.next; p != &sb->s_files; p = p->next) {
172 struct file *file = list_entry(p, struct file, f_list);
173 struct inode *inode = file->f_dentry->d_inode;
174
175 /* File with pending delete? */
176 if (inode->i_nlink == 0)
177 goto too_bad;
178
179 /* Writable file? */
180 if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
181 goto too_bad;
182 }
183 file_list_unlock();
184 return 1; /* Tis' cool bro. */
185 too_bad:
186 file_list_unlock();
187 return 0;
188 }
189
190 void __init files_init(unsigned long mempages)
191 {
192 int n;
193 /* One file with associated inode and dcache is very roughly 1K.
194 * Per default don't use more than 10% of our memory for files.
195 */
196
197 n = (mempages * (PAGE_SIZE / 1024)) / 10;
198 files_stat.max_files = n;
199 if (files_stat.max_files < NR_FILE)
200 files_stat.max_files = NR_FILE;
201 }
202
Cache object: 8534df3aebb3d0adfe13b7213dec22b8
|