1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
6  *
7  * Created by David Woodhouse <dwmw2@infradead.org>
8  *
9  * For licensing information, see the file 'LICENCE' in this directory.
10  *
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/capability.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/cred.h>
19 #include <linux/fs.h>
20 #include <linux/list.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/pagemap.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/vfs.h>
26 #include <linux/crc32.h>
27 #include "nodelist.h"
28 
29 static int jffs2_flash_setup(struct jffs2_sb_info *c);
30 
jffs2_do_setattr(struct inode * inode,struct iattr * iattr)31 int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
32 {
33 	struct jffs2_full_dnode *old_metadata, *new_metadata;
34 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
35 	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
36 	struct jffs2_raw_inode *ri;
37 	union jffs2_device_node dev;
38 	unsigned char *mdata = NULL;
39 	int mdatalen = 0;
40 	unsigned int ivalid;
41 	uint32_t alloclen;
42 	int ret;
43 	int alloc_type = ALLOC_NORMAL;
44 
45 	jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
46 
47 	/* Special cases - we don't want more than one data node
48 	   for these types on the medium at any time. So setattr
49 	   must read the original data associated with the node
50 	   (i.e. the device numbers or the target name) and write
51 	   it out again with the appropriate data attached */
52 	if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
53 		/* For these, we don't actually need to read the old node */
54 		mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
55 		mdata = (char *)&dev;
56 		jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
57 			  __func__, mdatalen);
58 	} else if (S_ISLNK(inode->i_mode)) {
59 		mutex_lock(&f->sem);
60 		mdatalen = f->metadata->size;
61 		mdata = kmalloc(f->metadata->size, GFP_USER);
62 		if (!mdata) {
63 			mutex_unlock(&f->sem);
64 			return -ENOMEM;
65 		}
66 		ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
67 		if (ret) {
68 			mutex_unlock(&f->sem);
69 			kfree(mdata);
70 			return ret;
71 		}
72 		mutex_unlock(&f->sem);
73 		jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
74 			  __func__, mdatalen);
75 	}
76 
77 	ri = jffs2_alloc_raw_inode();
78 	if (!ri) {
79 		if (S_ISLNK(inode->i_mode))
80 			kfree(mdata);
81 		return -ENOMEM;
82 	}
83 
84 	ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
85 				  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
86 	if (ret) {
87 		jffs2_free_raw_inode(ri);
88 		if (S_ISLNK(inode->i_mode))
89 			 kfree(mdata);
90 		return ret;
91 	}
92 	mutex_lock(&f->sem);
93 	ivalid = iattr->ia_valid;
94 
95 	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
96 	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
97 	ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
98 	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
99 
100 	ri->ino = cpu_to_je32(inode->i_ino);
101 	ri->version = cpu_to_je32(++f->highest_version);
102 
103 	ri->uid = cpu_to_je16((ivalid & ATTR_UID)?
104 		from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode));
105 	ri->gid = cpu_to_je16((ivalid & ATTR_GID)?
106 		from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode));
107 
108 	if (ivalid & ATTR_MODE)
109 		ri->mode = cpu_to_jemode(iattr->ia_mode);
110 	else
111 		ri->mode = cpu_to_jemode(inode->i_mode);
112 
113 
114 	ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
115 	ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
116 	ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
117 	ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
118 
119 	ri->offset = cpu_to_je32(0);
120 	ri->csize = ri->dsize = cpu_to_je32(mdatalen);
121 	ri->compr = JFFS2_COMPR_NONE;
122 	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
123 		/* It's an extension. Make it a hole node */
124 		ri->compr = JFFS2_COMPR_ZERO;
125 		ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
126 		ri->offset = cpu_to_je32(inode->i_size);
127 	} else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
128 		/* For truncate-to-zero, treat it as deletion because
129 		   it'll always be obsoleting all previous nodes */
130 		alloc_type = ALLOC_DELETION;
131 	}
132 	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
133 	if (mdatalen)
134 		ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
135 	else
136 		ri->data_crc = cpu_to_je32(0);
137 
138 	new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
139 	if (S_ISLNK(inode->i_mode))
140 		kfree(mdata);
141 
142 	if (IS_ERR(new_metadata)) {
143 		jffs2_complete_reservation(c);
144 		jffs2_free_raw_inode(ri);
145 		mutex_unlock(&f->sem);
146 		return PTR_ERR(new_metadata);
147 	}
148 	/* It worked. Update the inode */
149 	inode->i_atime = ITIME(je32_to_cpu(ri->atime));
150 	inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
151 	inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
152 	inode->i_mode = jemode_to_cpu(ri->mode);
153 	i_uid_write(inode, je16_to_cpu(ri->uid));
154 	i_gid_write(inode, je16_to_cpu(ri->gid));
155 
156 
157 	old_metadata = f->metadata;
158 
159 	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
160 		jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
161 
162 	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
163 		jffs2_add_full_dnode_to_inode(c, f, new_metadata);
164 		inode->i_size = iattr->ia_size;
165 		inode->i_blocks = (inode->i_size + 511) >> 9;
166 		f->metadata = NULL;
167 	} else {
168 		f->metadata = new_metadata;
169 	}
170 	if (old_metadata) {
171 		jffs2_mark_node_obsolete(c, old_metadata->raw);
172 		jffs2_free_full_dnode(old_metadata);
173 	}
174 	jffs2_free_raw_inode(ri);
175 
176 	mutex_unlock(&f->sem);
177 	jffs2_complete_reservation(c);
178 
179 	/* We have to do the truncate_setsize() without f->sem held, since
180 	   some pages may be locked and waiting for it in readpage().
181 	   We are protected from a simultaneous write() extending i_size
182 	   back past iattr->ia_size, because do_truncate() holds the
183 	   generic inode semaphore. */
184 	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
185 		truncate_setsize(inode, iattr->ia_size);
186 		inode->i_blocks = (inode->i_size + 511) >> 9;
187 	}
188 
189 	return 0;
190 }
191 
jffs2_setattr(struct dentry * dentry,struct iattr * iattr)192 int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
193 {
194 	struct inode *inode = d_inode(dentry);
195 	int rc;
196 
197 	rc = setattr_prepare(dentry, iattr);
198 	if (rc)
199 		return rc;
200 
201 	rc = jffs2_do_setattr(inode, iattr);
202 	if (!rc && (iattr->ia_valid & ATTR_MODE))
203 		rc = posix_acl_chmod(inode, inode->i_mode);
204 
205 	return rc;
206 }
207 
jffs2_statfs(struct dentry * dentry,struct kstatfs * buf)208 int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
209 {
210 	struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
211 	unsigned long avail;
212 
213 	buf->f_type = JFFS2_SUPER_MAGIC;
214 	buf->f_bsize = 1 << PAGE_SHIFT;
215 	buf->f_blocks = c->flash_size >> PAGE_SHIFT;
216 	buf->f_files = 0;
217 	buf->f_ffree = 0;
218 	buf->f_namelen = JFFS2_MAX_NAME_LEN;
219 	buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
220 	buf->f_fsid.val[1] = c->mtd->index;
221 
222 	spin_lock(&c->erase_completion_lock);
223 	avail = c->dirty_size + c->free_size;
224 	if (avail > c->sector_size * c->resv_blocks_write)
225 		avail -= c->sector_size * c->resv_blocks_write;
226 	else
227 		avail = 0;
228 	spin_unlock(&c->erase_completion_lock);
229 
230 	buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
231 
232 	return 0;
233 }
234 
235 
jffs2_evict_inode(struct inode * inode)236 void jffs2_evict_inode (struct inode *inode)
237 {
238 	/* We can forget about this inode for now - drop all
239 	 *  the nodelists associated with it, etc.
240 	 */
241 	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
242 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
243 
244 	jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
245 		  __func__, inode->i_ino, inode->i_mode);
246 	truncate_inode_pages_final(&inode->i_data);
247 	clear_inode(inode);
248 	jffs2_do_clear_inode(c, f);
249 }
250 
jffs2_iget(struct super_block * sb,unsigned long ino)251 struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
252 {
253 	struct jffs2_inode_info *f;
254 	struct jffs2_sb_info *c;
255 	struct jffs2_raw_inode latest_node;
256 	union jffs2_device_node jdev;
257 	struct inode *inode;
258 	dev_t rdev = 0;
259 	int ret;
260 
261 	jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
262 
263 	inode = iget_locked(sb, ino);
264 	if (!inode)
265 		return ERR_PTR(-ENOMEM);
266 	if (!(inode->i_state & I_NEW))
267 		return inode;
268 
269 	f = JFFS2_INODE_INFO(inode);
270 	c = JFFS2_SB_INFO(inode->i_sb);
271 
272 	jffs2_init_inode_info(f);
273 	mutex_lock(&f->sem);
274 
275 	ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
276 	if (ret)
277 		goto error;
278 
279 	inode->i_mode = jemode_to_cpu(latest_node.mode);
280 	i_uid_write(inode, je16_to_cpu(latest_node.uid));
281 	i_gid_write(inode, je16_to_cpu(latest_node.gid));
282 	inode->i_size = je32_to_cpu(latest_node.isize);
283 	inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
284 	inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
285 	inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
286 
287 	set_nlink(inode, f->inocache->pino_nlink);
288 
289 	inode->i_blocks = (inode->i_size + 511) >> 9;
290 
291 	switch (inode->i_mode & S_IFMT) {
292 
293 	case S_IFLNK:
294 		inode->i_op = &jffs2_symlink_inode_operations;
295 		inode->i_link = f->target;
296 		break;
297 
298 	case S_IFDIR:
299 	{
300 		struct jffs2_full_dirent *fd;
301 		set_nlink(inode, 2); /* parent and '.' */
302 
303 		for (fd=f->dents; fd; fd = fd->next) {
304 			if (fd->type == DT_DIR && fd->ino)
305 				inc_nlink(inode);
306 		}
307 		/* Root dir gets i_nlink 3 for some reason */
308 		if (inode->i_ino == 1)
309 			inc_nlink(inode);
310 
311 		inode->i_op = &jffs2_dir_inode_operations;
312 		inode->i_fop = &jffs2_dir_operations;
313 		break;
314 	}
315 	case S_IFREG:
316 		inode->i_op = &jffs2_file_inode_operations;
317 		inode->i_fop = &jffs2_file_operations;
318 		inode->i_mapping->a_ops = &jffs2_file_address_operations;
319 		inode->i_mapping->nrpages = 0;
320 		break;
321 
322 	case S_IFBLK:
323 	case S_IFCHR:
324 		/* Read the device numbers from the media */
325 		if (f->metadata->size != sizeof(jdev.old_id) &&
326 		    f->metadata->size != sizeof(jdev.new_id)) {
327 			pr_notice("Device node has strange size %d\n",
328 				  f->metadata->size);
329 			goto error_io;
330 		}
331 		jffs2_dbg(1, "Reading device numbers from flash\n");
332 		ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
333 		if (ret < 0) {
334 			/* Eep */
335 			pr_notice("Read device numbers for inode %lu failed\n",
336 				  (unsigned long)inode->i_ino);
337 			goto error;
338 		}
339 		if (f->metadata->size == sizeof(jdev.old_id))
340 			rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
341 		else
342 			rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
343 
344 	case S_IFSOCK:
345 	case S_IFIFO:
346 		inode->i_op = &jffs2_file_inode_operations;
347 		init_special_inode(inode, inode->i_mode, rdev);
348 		break;
349 
350 	default:
351 		pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
352 			__func__, inode->i_mode, (unsigned long)inode->i_ino);
353 	}
354 
355 	mutex_unlock(&f->sem);
356 
357 	jffs2_dbg(1, "jffs2_read_inode() returning\n");
358 	unlock_new_inode(inode);
359 	return inode;
360 
361 error_io:
362 	ret = -EIO;
363 error:
364 	mutex_unlock(&f->sem);
365 	iget_failed(inode);
366 	return ERR_PTR(ret);
367 }
368 
jffs2_dirty_inode(struct inode * inode,int flags)369 void jffs2_dirty_inode(struct inode *inode, int flags)
370 {
371 	struct iattr iattr;
372 
373 	if (!(inode->i_state & I_DIRTY_DATASYNC)) {
374 		jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
375 			  __func__, inode->i_ino);
376 		return;
377 	}
378 
379 	jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
380 		  __func__, inode->i_ino);
381 
382 	iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
383 	iattr.ia_mode = inode->i_mode;
384 	iattr.ia_uid = inode->i_uid;
385 	iattr.ia_gid = inode->i_gid;
386 	iattr.ia_atime = inode->i_atime;
387 	iattr.ia_mtime = inode->i_mtime;
388 	iattr.ia_ctime = inode->i_ctime;
389 
390 	jffs2_do_setattr(inode, &iattr);
391 }
392 
jffs2_do_remount_fs(struct super_block * sb,int * flags,char * data)393 int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
394 {
395 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
396 
397 	if (c->flags & JFFS2_SB_FLAG_RO && !sb_rdonly(sb))
398 		return -EROFS;
399 
400 	/* We stop if it was running, then restart if it needs to.
401 	   This also catches the case where it was stopped and this
402 	   is just a remount to restart it.
403 	   Flush the writebuffer, if neccecary, else we loose it */
404 	if (!sb_rdonly(sb)) {
405 		jffs2_stop_garbage_collect_thread(c);
406 		mutex_lock(&c->alloc_sem);
407 		jffs2_flush_wbuf_pad(c);
408 		mutex_unlock(&c->alloc_sem);
409 	}
410 
411 	if (!(*flags & SB_RDONLY))
412 		jffs2_start_garbage_collect_thread(c);
413 
414 	*flags |= SB_NOATIME;
415 	return 0;
416 }
417 
418 /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
419    fill in the raw_inode while you're at it. */
jffs2_new_inode(struct inode * dir_i,umode_t mode,struct jffs2_raw_inode * ri)420 struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
421 {
422 	struct inode *inode;
423 	struct super_block *sb = dir_i->i_sb;
424 	struct jffs2_sb_info *c;
425 	struct jffs2_inode_info *f;
426 	int ret;
427 
428 	jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
429 		  __func__, dir_i->i_ino, mode);
430 
431 	c = JFFS2_SB_INFO(sb);
432 
433 	inode = new_inode(sb);
434 
435 	if (!inode)
436 		return ERR_PTR(-ENOMEM);
437 
438 	f = JFFS2_INODE_INFO(inode);
439 	jffs2_init_inode_info(f);
440 	mutex_lock(&f->sem);
441 
442 	memset(ri, 0, sizeof(*ri));
443 	/* Set OS-specific defaults for new inodes */
444 	ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid()));
445 
446 	if (dir_i->i_mode & S_ISGID) {
447 		ri->gid = cpu_to_je16(i_gid_read(dir_i));
448 		if (S_ISDIR(mode))
449 			mode |= S_ISGID;
450 	} else {
451 		ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid()));
452 	}
453 
454 	/* POSIX ACLs have to be processed now, at least partly.
455 	   The umask is only applied if there's no default ACL */
456 	ret = jffs2_init_acl_pre(dir_i, inode, &mode);
457 	if (ret) {
458 		mutex_unlock(&f->sem);
459 		make_bad_inode(inode);
460 		iput(inode);
461 		return ERR_PTR(ret);
462 	}
463 	ret = jffs2_do_new_inode (c, f, mode, ri);
464 	if (ret) {
465 		mutex_unlock(&f->sem);
466 		make_bad_inode(inode);
467 		iput(inode);
468 		return ERR_PTR(ret);
469 	}
470 	set_nlink(inode, 1);
471 	inode->i_ino = je32_to_cpu(ri->ino);
472 	inode->i_mode = jemode_to_cpu(ri->mode);
473 	i_gid_write(inode, je16_to_cpu(ri->gid));
474 	i_uid_write(inode, je16_to_cpu(ri->uid));
475 	inode->i_atime = inode->i_ctime = inode->i_mtime = current_time(inode);
476 	ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
477 
478 	inode->i_blocks = 0;
479 	inode->i_size = 0;
480 
481 	if (insert_inode_locked(inode) < 0) {
482 		mutex_unlock(&f->sem);
483 		make_bad_inode(inode);
484 		iput(inode);
485 		return ERR_PTR(-EINVAL);
486 	}
487 
488 	return inode;
489 }
490 
calculate_inocache_hashsize(uint32_t flash_size)491 static int calculate_inocache_hashsize(uint32_t flash_size)
492 {
493 	/*
494 	 * Pick a inocache hash size based on the size of the medium.
495 	 * Count how many megabytes we're dealing with, apply a hashsize twice
496 	 * that size, but rounding down to the usual big powers of 2. And keep
497 	 * to sensible bounds.
498 	 */
499 
500 	int size_mb = flash_size / 1024 / 1024;
501 	int hashsize = (size_mb * 2) & ~0x3f;
502 
503 	if (hashsize < INOCACHE_HASHSIZE_MIN)
504 		return INOCACHE_HASHSIZE_MIN;
505 	if (hashsize > INOCACHE_HASHSIZE_MAX)
506 		return INOCACHE_HASHSIZE_MAX;
507 
508 	return hashsize;
509 }
510 
jffs2_do_fill_super(struct super_block * sb,void * data,int silent)511 int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
512 {
513 	struct jffs2_sb_info *c;
514 	struct inode *root_i;
515 	int ret;
516 	size_t blocks;
517 
518 	c = JFFS2_SB_INFO(sb);
519 
520 	/* Do not support the MLC nand */
521 	if (c->mtd->type == MTD_MLCNANDFLASH)
522 		return -EINVAL;
523 
524 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
525 	if (c->mtd->type == MTD_NANDFLASH) {
526 		pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
527 		return -EINVAL;
528 	}
529 	if (c->mtd->type == MTD_DATAFLASH) {
530 		pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n");
531 		return -EINVAL;
532 	}
533 #endif
534 
535 	c->flash_size = c->mtd->size;
536 	c->sector_size = c->mtd->erasesize;
537 	blocks = c->flash_size / c->sector_size;
538 
539 	/*
540 	 * Size alignment check
541 	 */
542 	if ((c->sector_size * blocks) != c->flash_size) {
543 		c->flash_size = c->sector_size * blocks;
544 		pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n",
545 			c->flash_size / 1024);
546 	}
547 
548 	if (c->flash_size < 5*c->sector_size) {
549 		pr_err("Too few erase blocks (%d)\n",
550 		       c->flash_size / c->sector_size);
551 		return -EINVAL;
552 	}
553 
554 	c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
555 
556 	/* NAND (or other bizarre) flash... do setup accordingly */
557 	ret = jffs2_flash_setup(c);
558 	if (ret)
559 		return ret;
560 
561 	c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
562 	c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
563 	if (!c->inocache_list) {
564 		ret = -ENOMEM;
565 		goto out_wbuf;
566 	}
567 
568 	jffs2_init_xattr_subsystem(c);
569 
570 	if ((ret = jffs2_do_mount_fs(c)))
571 		goto out_inohash;
572 
573 	jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
574 	root_i = jffs2_iget(sb, 1);
575 	if (IS_ERR(root_i)) {
576 		jffs2_dbg(1, "get root inode failed\n");
577 		ret = PTR_ERR(root_i);
578 		goto out_root;
579 	}
580 
581 	ret = -ENOMEM;
582 
583 	jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
584 	sb->s_root = d_make_root(root_i);
585 	if (!sb->s_root)
586 		goto out_root;
587 
588 	sb->s_maxbytes = 0xFFFFFFFF;
589 	sb->s_blocksize = PAGE_SIZE;
590 	sb->s_blocksize_bits = PAGE_SHIFT;
591 	sb->s_magic = JFFS2_SUPER_MAGIC;
592 	if (!sb_rdonly(sb))
593 		jffs2_start_garbage_collect_thread(c);
594 	return 0;
595 
596 out_root:
597 	jffs2_free_ino_caches(c);
598 	jffs2_free_raw_node_refs(c);
599 	kvfree(c->blocks);
600 	jffs2_clear_xattr_subsystem(c);
601 	jffs2_sum_exit(c);
602  out_inohash:
603 	kfree(c->inocache_list);
604  out_wbuf:
605 	jffs2_flash_cleanup(c);
606 
607 	return ret;
608 }
609 
jffs2_gc_release_inode(struct jffs2_sb_info * c,struct jffs2_inode_info * f)610 void jffs2_gc_release_inode(struct jffs2_sb_info *c,
611 				   struct jffs2_inode_info *f)
612 {
613 	iput(OFNI_EDONI_2SFFJ(f));
614 }
615 
jffs2_gc_fetch_inode(struct jffs2_sb_info * c,int inum,int unlinked)616 struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
617 					      int inum, int unlinked)
618 {
619 	struct inode *inode;
620 	struct jffs2_inode_cache *ic;
621 
622 	if (unlinked) {
623 		/* The inode has zero nlink but its nodes weren't yet marked
624 		   obsolete. This has to be because we're still waiting for
625 		   the final (close() and) iput() to happen.
626 
627 		   There's a possibility that the final iput() could have
628 		   happened while we were contemplating. In order to ensure
629 		   that we don't cause a new read_inode() (which would fail)
630 		   for the inode in question, we use ilookup() in this case
631 		   instead of iget().
632 
633 		   The nlink can't _become_ zero at this point because we're
634 		   holding the alloc_sem, and jffs2_do_unlink() would also
635 		   need that while decrementing nlink on any inode.
636 		*/
637 		inode = ilookup(OFNI_BS_2SFFJ(c), inum);
638 		if (!inode) {
639 			jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
640 				  inum);
641 
642 			spin_lock(&c->inocache_lock);
643 			ic = jffs2_get_ino_cache(c, inum);
644 			if (!ic) {
645 				jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
646 					  inum);
647 				spin_unlock(&c->inocache_lock);
648 				return NULL;
649 			}
650 			if (ic->state != INO_STATE_CHECKEDABSENT) {
651 				/* Wait for progress. Don't just loop */
652 				jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
653 					  ic->ino, ic->state);
654 				sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
655 			} else {
656 				spin_unlock(&c->inocache_lock);
657 			}
658 
659 			return NULL;
660 		}
661 	} else {
662 		/* Inode has links to it still; they're not going away because
663 		   jffs2_do_unlink() would need the alloc_sem and we have it.
664 		   Just iget() it, and if read_inode() is necessary that's OK.
665 		*/
666 		inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
667 		if (IS_ERR(inode))
668 			return ERR_CAST(inode);
669 	}
670 	if (is_bad_inode(inode)) {
671 		pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
672 			  inum, unlinked);
673 		/* NB. This will happen again. We need to do something appropriate here. */
674 		iput(inode);
675 		return ERR_PTR(-EIO);
676 	}
677 
678 	return JFFS2_INODE_INFO(inode);
679 }
680 
jffs2_gc_fetch_page(struct jffs2_sb_info * c,struct jffs2_inode_info * f,unsigned long offset,unsigned long * priv)681 unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
682 				   struct jffs2_inode_info *f,
683 				   unsigned long offset,
684 				   unsigned long *priv)
685 {
686 	struct inode *inode = OFNI_EDONI_2SFFJ(f);
687 	struct page *pg;
688 
689 	pg = read_cache_page(inode->i_mapping, offset >> PAGE_SHIFT,
690 			     (void *)jffs2_do_readpage_unlock, inode);
691 	if (IS_ERR(pg))
692 		return (void *)pg;
693 
694 	*priv = (unsigned long)pg;
695 	return kmap(pg);
696 }
697 
jffs2_gc_release_page(struct jffs2_sb_info * c,unsigned char * ptr,unsigned long * priv)698 void jffs2_gc_release_page(struct jffs2_sb_info *c,
699 			   unsigned char *ptr,
700 			   unsigned long *priv)
701 {
702 	struct page *pg = (void *)*priv;
703 
704 	kunmap(pg);
705 	put_page(pg);
706 }
707 
jffs2_flash_setup(struct jffs2_sb_info * c)708 static int jffs2_flash_setup(struct jffs2_sb_info *c) {
709 	int ret = 0;
710 
711 	if (jffs2_cleanmarker_oob(c)) {
712 		/* NAND flash... do setup accordingly */
713 		ret = jffs2_nand_flash_setup(c);
714 		if (ret)
715 			return ret;
716 	}
717 
718 	/* and Dataflash */
719 	if (jffs2_dataflash(c)) {
720 		ret = jffs2_dataflash_setup(c);
721 		if (ret)
722 			return ret;
723 	}
724 
725 	/* and Intel "Sibley" flash */
726 	if (jffs2_nor_wbuf_flash(c)) {
727 		ret = jffs2_nor_wbuf_flash_setup(c);
728 		if (ret)
729 			return ret;
730 	}
731 
732 	/* and an UBI volume */
733 	if (jffs2_ubivol(c)) {
734 		ret = jffs2_ubivol_setup(c);
735 		if (ret)
736 			return ret;
737 	}
738 
739 	return ret;
740 }
741 
jffs2_flash_cleanup(struct jffs2_sb_info * c)742 void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
743 
744 	if (jffs2_cleanmarker_oob(c)) {
745 		jffs2_nand_flash_cleanup(c);
746 	}
747 
748 	/* and DataFlash */
749 	if (jffs2_dataflash(c)) {
750 		jffs2_dataflash_cleanup(c);
751 	}
752 
753 	/* and Intel "Sibley" flash */
754 	if (jffs2_nor_wbuf_flash(c)) {
755 		jffs2_nor_wbuf_flash_cleanup(c);
756 	}
757 
758 	/* and an UBI volume */
759 	if (jffs2_ubivol(c)) {
760 		jffs2_ubivol_cleanup(c);
761 	}
762 }
763