1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * file.c
5  *
6  * File open, close, extend, truncate
7  *
8  * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  */
25 
26 #include <linux/capability.h>
27 #include <linux/fs.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/uio.h>
33 #include <linux/sched.h>
34 #include <linux/splice.h>
35 #include <linux/mount.h>
36 #include <linux/writeback.h>
37 #include <linux/falloc.h>
38 #include <linux/quotaops.h>
39 #include <linux/blkdev.h>
40 #include <linux/backing-dev.h>
41 
42 #include <cluster/masklog.h>
43 
44 #include "ocfs2.h"
45 
46 #include "alloc.h"
47 #include "aops.h"
48 #include "dir.h"
49 #include "dlmglue.h"
50 #include "extent_map.h"
51 #include "file.h"
52 #include "sysfile.h"
53 #include "inode.h"
54 #include "ioctl.h"
55 #include "journal.h"
56 #include "locks.h"
57 #include "mmap.h"
58 #include "suballoc.h"
59 #include "super.h"
60 #include "xattr.h"
61 #include "acl.h"
62 #include "quota.h"
63 #include "refcounttree.h"
64 #include "ocfs2_trace.h"
65 
66 #include "buffer_head_io.h"
67 
ocfs2_init_file_private(struct inode * inode,struct file * file)68 static int ocfs2_init_file_private(struct inode *inode, struct file *file)
69 {
70 	struct ocfs2_file_private *fp;
71 
72 	fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
73 	if (!fp)
74 		return -ENOMEM;
75 
76 	fp->fp_file = file;
77 	mutex_init(&fp->fp_mutex);
78 	ocfs2_file_lock_res_init(&fp->fp_flock, fp);
79 	file->private_data = fp;
80 
81 	return 0;
82 }
83 
ocfs2_free_file_private(struct inode * inode,struct file * file)84 static void ocfs2_free_file_private(struct inode *inode, struct file *file)
85 {
86 	struct ocfs2_file_private *fp = file->private_data;
87 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
88 
89 	if (fp) {
90 		ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
91 		ocfs2_lock_res_free(&fp->fp_flock);
92 		kfree(fp);
93 		file->private_data = NULL;
94 	}
95 }
96 
ocfs2_file_open(struct inode * inode,struct file * file)97 static int ocfs2_file_open(struct inode *inode, struct file *file)
98 {
99 	int status;
100 	int mode = file->f_flags;
101 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
102 
103 	trace_ocfs2_file_open(inode, file, file->f_path.dentry,
104 			      (unsigned long long)oi->ip_blkno,
105 			      file->f_path.dentry->d_name.len,
106 			      file->f_path.dentry->d_name.name, mode);
107 
108 	if (file->f_mode & FMODE_WRITE) {
109 		status = dquot_initialize(inode);
110 		if (status)
111 			goto leave;
112 	}
113 
114 	spin_lock(&oi->ip_lock);
115 
116 	/* Check that the inode hasn't been wiped from disk by another
117 	 * node. If it hasn't then we're safe as long as we hold the
118 	 * spin lock until our increment of open count. */
119 	if (oi->ip_flags & OCFS2_INODE_DELETED) {
120 		spin_unlock(&oi->ip_lock);
121 
122 		status = -ENOENT;
123 		goto leave;
124 	}
125 
126 	if (mode & O_DIRECT)
127 		oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
128 
129 	oi->ip_open_count++;
130 	spin_unlock(&oi->ip_lock);
131 
132 	status = ocfs2_init_file_private(inode, file);
133 	if (status) {
134 		/*
135 		 * We want to set open count back if we're failing the
136 		 * open.
137 		 */
138 		spin_lock(&oi->ip_lock);
139 		oi->ip_open_count--;
140 		spin_unlock(&oi->ip_lock);
141 	}
142 
143 	file->f_mode |= FMODE_NOWAIT;
144 
145 leave:
146 	return status;
147 }
148 
ocfs2_file_release(struct inode * inode,struct file * file)149 static int ocfs2_file_release(struct inode *inode, struct file *file)
150 {
151 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
152 
153 	spin_lock(&oi->ip_lock);
154 	if (!--oi->ip_open_count)
155 		oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
156 
157 	trace_ocfs2_file_release(inode, file, file->f_path.dentry,
158 				 oi->ip_blkno,
159 				 file->f_path.dentry->d_name.len,
160 				 file->f_path.dentry->d_name.name,
161 				 oi->ip_open_count);
162 	spin_unlock(&oi->ip_lock);
163 
164 	ocfs2_free_file_private(inode, file);
165 
166 	return 0;
167 }
168 
ocfs2_dir_open(struct inode * inode,struct file * file)169 static int ocfs2_dir_open(struct inode *inode, struct file *file)
170 {
171 	return ocfs2_init_file_private(inode, file);
172 }
173 
ocfs2_dir_release(struct inode * inode,struct file * file)174 static int ocfs2_dir_release(struct inode *inode, struct file *file)
175 {
176 	ocfs2_free_file_private(inode, file);
177 	return 0;
178 }
179 
ocfs2_sync_file(struct file * file,loff_t start,loff_t end,int datasync)180 static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
181 			   int datasync)
182 {
183 	int err = 0;
184 	struct inode *inode = file->f_mapping->host;
185 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
186 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
187 	journal_t *journal = osb->journal->j_journal;
188 	int ret;
189 	tid_t commit_tid;
190 	bool needs_barrier = false;
191 
192 	trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
193 			      oi->ip_blkno,
194 			      file->f_path.dentry->d_name.len,
195 			      file->f_path.dentry->d_name.name,
196 			      (unsigned long long)datasync);
197 
198 	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
199 		return -EROFS;
200 
201 	err = file_write_and_wait_range(file, start, end);
202 	if (err)
203 		return err;
204 
205 	commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid;
206 	if (journal->j_flags & JBD2_BARRIER &&
207 	    !jbd2_trans_will_send_data_barrier(journal, commit_tid))
208 		needs_barrier = true;
209 	err = jbd2_complete_transaction(journal, commit_tid);
210 	if (needs_barrier) {
211 		ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
212 		if (!err)
213 			err = ret;
214 	}
215 
216 	if (err)
217 		mlog_errno(err);
218 
219 	return (err < 0) ? -EIO : 0;
220 }
221 
ocfs2_should_update_atime(struct inode * inode,struct vfsmount * vfsmnt)222 int ocfs2_should_update_atime(struct inode *inode,
223 			      struct vfsmount *vfsmnt)
224 {
225 	struct timespec64 now;
226 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
227 
228 	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
229 		return 0;
230 
231 	if ((inode->i_flags & S_NOATIME) ||
232 	    ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
233 		return 0;
234 
235 	/*
236 	 * We can be called with no vfsmnt structure - NFSD will
237 	 * sometimes do this.
238 	 *
239 	 * Note that our action here is different than touch_atime() -
240 	 * if we can't tell whether this is a noatime mount, then we
241 	 * don't know whether to trust the value of s_atime_quantum.
242 	 */
243 	if (vfsmnt == NULL)
244 		return 0;
245 
246 	if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
247 	    ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
248 		return 0;
249 
250 	if (vfsmnt->mnt_flags & MNT_RELATIME) {
251 		if ((timespec64_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
252 		    (timespec64_compare(&inode->i_atime, &inode->i_ctime) <= 0))
253 			return 1;
254 
255 		return 0;
256 	}
257 
258 	now = current_time(inode);
259 	if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
260 		return 0;
261 	else
262 		return 1;
263 }
264 
ocfs2_update_inode_atime(struct inode * inode,struct buffer_head * bh)265 int ocfs2_update_inode_atime(struct inode *inode,
266 			     struct buffer_head *bh)
267 {
268 	int ret;
269 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
270 	handle_t *handle;
271 	struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
272 
273 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
274 	if (IS_ERR(handle)) {
275 		ret = PTR_ERR(handle);
276 		mlog_errno(ret);
277 		goto out;
278 	}
279 
280 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
281 				      OCFS2_JOURNAL_ACCESS_WRITE);
282 	if (ret) {
283 		mlog_errno(ret);
284 		goto out_commit;
285 	}
286 
287 	/*
288 	 * Don't use ocfs2_mark_inode_dirty() here as we don't always
289 	 * have i_mutex to guard against concurrent changes to other
290 	 * inode fields.
291 	 */
292 	inode->i_atime = current_time(inode);
293 	di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
294 	di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
295 	ocfs2_update_inode_fsync_trans(handle, inode, 0);
296 	ocfs2_journal_dirty(handle, bh);
297 
298 out_commit:
299 	ocfs2_commit_trans(osb, handle);
300 out:
301 	return ret;
302 }
303 
ocfs2_set_inode_size(handle_t * handle,struct inode * inode,struct buffer_head * fe_bh,u64 new_i_size)304 int ocfs2_set_inode_size(handle_t *handle,
305 				struct inode *inode,
306 				struct buffer_head *fe_bh,
307 				u64 new_i_size)
308 {
309 	int status;
310 
311 	i_size_write(inode, new_i_size);
312 	inode->i_blocks = ocfs2_inode_sector_count(inode);
313 	inode->i_ctime = inode->i_mtime = current_time(inode);
314 
315 	status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
316 	if (status < 0) {
317 		mlog_errno(status);
318 		goto bail;
319 	}
320 
321 bail:
322 	return status;
323 }
324 
ocfs2_simple_size_update(struct inode * inode,struct buffer_head * di_bh,u64 new_i_size)325 int ocfs2_simple_size_update(struct inode *inode,
326 			     struct buffer_head *di_bh,
327 			     u64 new_i_size)
328 {
329 	int ret;
330 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
331 	handle_t *handle = NULL;
332 
333 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
334 	if (IS_ERR(handle)) {
335 		ret = PTR_ERR(handle);
336 		mlog_errno(ret);
337 		goto out;
338 	}
339 
340 	ret = ocfs2_set_inode_size(handle, inode, di_bh,
341 				   new_i_size);
342 	if (ret < 0)
343 		mlog_errno(ret);
344 
345 	ocfs2_update_inode_fsync_trans(handle, inode, 0);
346 	ocfs2_commit_trans(osb, handle);
347 out:
348 	return ret;
349 }
350 
ocfs2_cow_file_pos(struct inode * inode,struct buffer_head * fe_bh,u64 offset)351 static int ocfs2_cow_file_pos(struct inode *inode,
352 			      struct buffer_head *fe_bh,
353 			      u64 offset)
354 {
355 	int status;
356 	u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
357 	unsigned int num_clusters = 0;
358 	unsigned int ext_flags = 0;
359 
360 	/*
361 	 * If the new offset is aligned to the range of the cluster, there is
362 	 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
363 	 * CoW either.
364 	 */
365 	if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
366 		return 0;
367 
368 	status = ocfs2_get_clusters(inode, cpos, &phys,
369 				    &num_clusters, &ext_flags);
370 	if (status) {
371 		mlog_errno(status);
372 		goto out;
373 	}
374 
375 	if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
376 		goto out;
377 
378 	return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
379 
380 out:
381 	return status;
382 }
383 
ocfs2_orphan_for_truncate(struct ocfs2_super * osb,struct inode * inode,struct buffer_head * fe_bh,u64 new_i_size)384 static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
385 				     struct inode *inode,
386 				     struct buffer_head *fe_bh,
387 				     u64 new_i_size)
388 {
389 	int status;
390 	handle_t *handle;
391 	struct ocfs2_dinode *di;
392 	u64 cluster_bytes;
393 
394 	/*
395 	 * We need to CoW the cluster contains the offset if it is reflinked
396 	 * since we will call ocfs2_zero_range_for_truncate later which will
397 	 * write "0" from offset to the end of the cluster.
398 	 */
399 	status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
400 	if (status) {
401 		mlog_errno(status);
402 		return status;
403 	}
404 
405 	/* TODO: This needs to actually orphan the inode in this
406 	 * transaction. */
407 
408 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
409 	if (IS_ERR(handle)) {
410 		status = PTR_ERR(handle);
411 		mlog_errno(status);
412 		goto out;
413 	}
414 
415 	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
416 					 OCFS2_JOURNAL_ACCESS_WRITE);
417 	if (status < 0) {
418 		mlog_errno(status);
419 		goto out_commit;
420 	}
421 
422 	/*
423 	 * Do this before setting i_size.
424 	 */
425 	cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
426 	status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
427 					       cluster_bytes);
428 	if (status) {
429 		mlog_errno(status);
430 		goto out_commit;
431 	}
432 
433 	i_size_write(inode, new_i_size);
434 	inode->i_ctime = inode->i_mtime = current_time(inode);
435 
436 	di = (struct ocfs2_dinode *) fe_bh->b_data;
437 	di->i_size = cpu_to_le64(new_i_size);
438 	di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
439 	di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
440 	ocfs2_update_inode_fsync_trans(handle, inode, 0);
441 
442 	ocfs2_journal_dirty(handle, fe_bh);
443 
444 out_commit:
445 	ocfs2_commit_trans(osb, handle);
446 out:
447 	return status;
448 }
449 
ocfs2_truncate_file(struct inode * inode,struct buffer_head * di_bh,u64 new_i_size)450 int ocfs2_truncate_file(struct inode *inode,
451 			       struct buffer_head *di_bh,
452 			       u64 new_i_size)
453 {
454 	int status = 0;
455 	struct ocfs2_dinode *fe = NULL;
456 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
457 
458 	/* We trust di_bh because it comes from ocfs2_inode_lock(), which
459 	 * already validated it */
460 	fe = (struct ocfs2_dinode *) di_bh->b_data;
461 
462 	trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
463 				  (unsigned long long)le64_to_cpu(fe->i_size),
464 				  (unsigned long long)new_i_size);
465 
466 	mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
467 			"Inode %llu, inode i_size = %lld != di "
468 			"i_size = %llu, i_flags = 0x%x\n",
469 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
470 			i_size_read(inode),
471 			(unsigned long long)le64_to_cpu(fe->i_size),
472 			le32_to_cpu(fe->i_flags));
473 
474 	if (new_i_size > le64_to_cpu(fe->i_size)) {
475 		trace_ocfs2_truncate_file_error(
476 			(unsigned long long)le64_to_cpu(fe->i_size),
477 			(unsigned long long)new_i_size);
478 		status = -EINVAL;
479 		mlog_errno(status);
480 		goto bail;
481 	}
482 
483 	down_write(&OCFS2_I(inode)->ip_alloc_sem);
484 
485 	ocfs2_resv_discard(&osb->osb_la_resmap,
486 			   &OCFS2_I(inode)->ip_la_data_resv);
487 
488 	/*
489 	 * The inode lock forced other nodes to sync and drop their
490 	 * pages, which (correctly) happens even if we have a truncate
491 	 * without allocation change - ocfs2 cluster sizes can be much
492 	 * greater than page size, so we have to truncate them
493 	 * anyway.
494 	 */
495 
496 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
497 		unmap_mapping_range(inode->i_mapping,
498 				    new_i_size + PAGE_SIZE - 1, 0, 1);
499 		truncate_inode_pages(inode->i_mapping, new_i_size);
500 		status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
501 					       i_size_read(inode), 1);
502 		if (status)
503 			mlog_errno(status);
504 
505 		goto bail_unlock_sem;
506 	}
507 
508 	/* alright, we're going to need to do a full blown alloc size
509 	 * change. Orphan the inode so that recovery can complete the
510 	 * truncate if necessary. This does the task of marking
511 	 * i_size. */
512 	status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
513 	if (status < 0) {
514 		mlog_errno(status);
515 		goto bail_unlock_sem;
516 	}
517 
518 	unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
519 	truncate_inode_pages(inode->i_mapping, new_i_size);
520 
521 	status = ocfs2_commit_truncate(osb, inode, di_bh);
522 	if (status < 0) {
523 		mlog_errno(status);
524 		goto bail_unlock_sem;
525 	}
526 
527 	/* TODO: orphan dir cleanup here. */
528 bail_unlock_sem:
529 	up_write(&OCFS2_I(inode)->ip_alloc_sem);
530 
531 bail:
532 	if (!status && OCFS2_I(inode)->ip_clusters == 0)
533 		status = ocfs2_try_remove_refcount_tree(inode, di_bh);
534 
535 	return status;
536 }
537 
538 /*
539  * extend file allocation only here.
540  * we'll update all the disk stuff, and oip->alloc_size
541  *
542  * expect stuff to be locked, a transaction started and enough data /
543  * metadata reservations in the contexts.
544  *
545  * Will return -EAGAIN, and a reason if a restart is needed.
546  * If passed in, *reason will always be set, even in error.
547  */
ocfs2_add_inode_data(struct ocfs2_super * osb,struct inode * inode,u32 * logical_offset,u32 clusters_to_add,int mark_unwritten,struct buffer_head * fe_bh,handle_t * handle,struct ocfs2_alloc_context * data_ac,struct ocfs2_alloc_context * meta_ac,enum ocfs2_alloc_restarted * reason_ret)548 int ocfs2_add_inode_data(struct ocfs2_super *osb,
549 			 struct inode *inode,
550 			 u32 *logical_offset,
551 			 u32 clusters_to_add,
552 			 int mark_unwritten,
553 			 struct buffer_head *fe_bh,
554 			 handle_t *handle,
555 			 struct ocfs2_alloc_context *data_ac,
556 			 struct ocfs2_alloc_context *meta_ac,
557 			 enum ocfs2_alloc_restarted *reason_ret)
558 {
559 	int ret;
560 	struct ocfs2_extent_tree et;
561 
562 	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
563 	ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
564 					  clusters_to_add, mark_unwritten,
565 					  data_ac, meta_ac, reason_ret);
566 
567 	return ret;
568 }
569 
ocfs2_extend_allocation(struct inode * inode,u32 logical_start,u32 clusters_to_add,int mark_unwritten)570 static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
571 				   u32 clusters_to_add, int mark_unwritten)
572 {
573 	int status = 0;
574 	int restart_func = 0;
575 	int credits;
576 	u32 prev_clusters;
577 	struct buffer_head *bh = NULL;
578 	struct ocfs2_dinode *fe = NULL;
579 	handle_t *handle = NULL;
580 	struct ocfs2_alloc_context *data_ac = NULL;
581 	struct ocfs2_alloc_context *meta_ac = NULL;
582 	enum ocfs2_alloc_restarted why = RESTART_NONE;
583 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
584 	struct ocfs2_extent_tree et;
585 	int did_quota = 0;
586 
587 	/*
588 	 * Unwritten extent only exists for file systems which
589 	 * support holes.
590 	 */
591 	BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
592 
593 	status = ocfs2_read_inode_block(inode, &bh);
594 	if (status < 0) {
595 		mlog_errno(status);
596 		goto leave;
597 	}
598 	fe = (struct ocfs2_dinode *) bh->b_data;
599 
600 restart_all:
601 	BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
602 
603 	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
604 	status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
605 				       &data_ac, &meta_ac);
606 	if (status) {
607 		mlog_errno(status);
608 		goto leave;
609 	}
610 
611 	credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
612 	handle = ocfs2_start_trans(osb, credits);
613 	if (IS_ERR(handle)) {
614 		status = PTR_ERR(handle);
615 		handle = NULL;
616 		mlog_errno(status);
617 		goto leave;
618 	}
619 
620 restarted_transaction:
621 	trace_ocfs2_extend_allocation(
622 		(unsigned long long)OCFS2_I(inode)->ip_blkno,
623 		(unsigned long long)i_size_read(inode),
624 		le32_to_cpu(fe->i_clusters), clusters_to_add,
625 		why, restart_func);
626 
627 	status = dquot_alloc_space_nodirty(inode,
628 			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
629 	if (status)
630 		goto leave;
631 	did_quota = 1;
632 
633 	/* reserve a write to the file entry early on - that we if we
634 	 * run out of credits in the allocation path, we can still
635 	 * update i_size. */
636 	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
637 					 OCFS2_JOURNAL_ACCESS_WRITE);
638 	if (status < 0) {
639 		mlog_errno(status);
640 		goto leave;
641 	}
642 
643 	prev_clusters = OCFS2_I(inode)->ip_clusters;
644 
645 	status = ocfs2_add_inode_data(osb,
646 				      inode,
647 				      &logical_start,
648 				      clusters_to_add,
649 				      mark_unwritten,
650 				      bh,
651 				      handle,
652 				      data_ac,
653 				      meta_ac,
654 				      &why);
655 	if ((status < 0) && (status != -EAGAIN)) {
656 		if (status != -ENOSPC)
657 			mlog_errno(status);
658 		goto leave;
659 	}
660 	ocfs2_update_inode_fsync_trans(handle, inode, 1);
661 	ocfs2_journal_dirty(handle, bh);
662 
663 	spin_lock(&OCFS2_I(inode)->ip_lock);
664 	clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
665 	spin_unlock(&OCFS2_I(inode)->ip_lock);
666 	/* Release unused quota reservation */
667 	dquot_free_space(inode,
668 			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
669 	did_quota = 0;
670 
671 	if (why != RESTART_NONE && clusters_to_add) {
672 		if (why == RESTART_META) {
673 			restart_func = 1;
674 			status = 0;
675 		} else {
676 			BUG_ON(why != RESTART_TRANS);
677 
678 			status = ocfs2_allocate_extend_trans(handle, 1);
679 			if (status < 0) {
680 				/* handle still has to be committed at
681 				 * this point. */
682 				status = -ENOMEM;
683 				mlog_errno(status);
684 				goto leave;
685 			}
686 			goto restarted_transaction;
687 		}
688 	}
689 
690 	trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
691 	     le32_to_cpu(fe->i_clusters),
692 	     (unsigned long long)le64_to_cpu(fe->i_size),
693 	     OCFS2_I(inode)->ip_clusters,
694 	     (unsigned long long)i_size_read(inode));
695 
696 leave:
697 	if (status < 0 && did_quota)
698 		dquot_free_space(inode,
699 			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
700 	if (handle) {
701 		ocfs2_commit_trans(osb, handle);
702 		handle = NULL;
703 	}
704 	if (data_ac) {
705 		ocfs2_free_alloc_context(data_ac);
706 		data_ac = NULL;
707 	}
708 	if (meta_ac) {
709 		ocfs2_free_alloc_context(meta_ac);
710 		meta_ac = NULL;
711 	}
712 	if ((!status) && restart_func) {
713 		restart_func = 0;
714 		goto restart_all;
715 	}
716 	brelse(bh);
717 	bh = NULL;
718 
719 	return status;
720 }
721 
722 /*
723  * While a write will already be ordering the data, a truncate will not.
724  * Thus, we need to explicitly order the zeroed pages.
725  */
ocfs2_zero_start_ordered_transaction(struct inode * inode,struct buffer_head * di_bh)726 static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
727 						struct buffer_head *di_bh)
728 {
729 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
730 	handle_t *handle = NULL;
731 	int ret = 0;
732 
733 	if (!ocfs2_should_order_data(inode))
734 		goto out;
735 
736 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
737 	if (IS_ERR(handle)) {
738 		ret = -ENOMEM;
739 		mlog_errno(ret);
740 		goto out;
741 	}
742 
743 	ret = ocfs2_jbd2_file_inode(handle, inode);
744 	if (ret < 0) {
745 		mlog_errno(ret);
746 		goto out;
747 	}
748 
749 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
750 				      OCFS2_JOURNAL_ACCESS_WRITE);
751 	if (ret)
752 		mlog_errno(ret);
753 	ocfs2_update_inode_fsync_trans(handle, inode, 1);
754 
755 out:
756 	if (ret) {
757 		if (!IS_ERR(handle))
758 			ocfs2_commit_trans(osb, handle);
759 		handle = ERR_PTR(ret);
760 	}
761 	return handle;
762 }
763 
764 /* Some parts of this taken from generic_cont_expand, which turned out
765  * to be too fragile to do exactly what we need without us having to
766  * worry about recursive locking in ->write_begin() and ->write_end(). */
ocfs2_write_zero_page(struct inode * inode,u64 abs_from,u64 abs_to,struct buffer_head * di_bh)767 static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
768 				 u64 abs_to, struct buffer_head *di_bh)
769 {
770 	struct address_space *mapping = inode->i_mapping;
771 	struct page *page;
772 	unsigned long index = abs_from >> PAGE_SHIFT;
773 	handle_t *handle;
774 	int ret = 0;
775 	unsigned zero_from, zero_to, block_start, block_end;
776 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
777 
778 	BUG_ON(abs_from >= abs_to);
779 	BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
780 	BUG_ON(abs_from & (inode->i_blkbits - 1));
781 
782 	handle = ocfs2_zero_start_ordered_transaction(inode, di_bh);
783 	if (IS_ERR(handle)) {
784 		ret = PTR_ERR(handle);
785 		goto out;
786 	}
787 
788 	page = find_or_create_page(mapping, index, GFP_NOFS);
789 	if (!page) {
790 		ret = -ENOMEM;
791 		mlog_errno(ret);
792 		goto out_commit_trans;
793 	}
794 
795 	/* Get the offsets within the page that we want to zero */
796 	zero_from = abs_from & (PAGE_SIZE - 1);
797 	zero_to = abs_to & (PAGE_SIZE - 1);
798 	if (!zero_to)
799 		zero_to = PAGE_SIZE;
800 
801 	trace_ocfs2_write_zero_page(
802 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
803 			(unsigned long long)abs_from,
804 			(unsigned long long)abs_to,
805 			index, zero_from, zero_to);
806 
807 	/* We know that zero_from is block aligned */
808 	for (block_start = zero_from; block_start < zero_to;
809 	     block_start = block_end) {
810 		block_end = block_start + i_blocksize(inode);
811 
812 		/*
813 		 * block_start is block-aligned.  Bump it by one to force
814 		 * __block_write_begin and block_commit_write to zero the
815 		 * whole block.
816 		 */
817 		ret = __block_write_begin(page, block_start + 1, 0,
818 					  ocfs2_get_block);
819 		if (ret < 0) {
820 			mlog_errno(ret);
821 			goto out_unlock;
822 		}
823 
824 
825 		/* must not update i_size! */
826 		ret = block_commit_write(page, block_start + 1,
827 					 block_start + 1);
828 		if (ret < 0)
829 			mlog_errno(ret);
830 		else
831 			ret = 0;
832 	}
833 
834 	/*
835 	 * fs-writeback will release the dirty pages without page lock
836 	 * whose offset are over inode size, the release happens at
837 	 * block_write_full_page().
838 	 */
839 	i_size_write(inode, abs_to);
840 	inode->i_blocks = ocfs2_inode_sector_count(inode);
841 	di->i_size = cpu_to_le64((u64)i_size_read(inode));
842 	inode->i_mtime = inode->i_ctime = current_time(inode);
843 	di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
844 	di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
845 	di->i_mtime_nsec = di->i_ctime_nsec;
846 	if (handle) {
847 		ocfs2_journal_dirty(handle, di_bh);
848 		ocfs2_update_inode_fsync_trans(handle, inode, 1);
849 	}
850 
851 out_unlock:
852 	unlock_page(page);
853 	put_page(page);
854 out_commit_trans:
855 	if (handle)
856 		ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
857 out:
858 	return ret;
859 }
860 
861 /*
862  * Find the next range to zero.  We do this in terms of bytes because
863  * that's what ocfs2_zero_extend() wants, and it is dealing with the
864  * pagecache.  We may return multiple extents.
865  *
866  * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
867  * needs to be zeroed.  range_start and range_end return the next zeroing
868  * range.  A subsequent call should pass the previous range_end as its
869  * zero_start.  If range_end is 0, there's nothing to do.
870  *
871  * Unwritten extents are skipped over.  Refcounted extents are CoWd.
872  */
ocfs2_zero_extend_get_range(struct inode * inode,struct buffer_head * di_bh,u64 zero_start,u64 zero_end,u64 * range_start,u64 * range_end)873 static int ocfs2_zero_extend_get_range(struct inode *inode,
874 				       struct buffer_head *di_bh,
875 				       u64 zero_start, u64 zero_end,
876 				       u64 *range_start, u64 *range_end)
877 {
878 	int rc = 0, needs_cow = 0;
879 	u32 p_cpos, zero_clusters = 0;
880 	u32 zero_cpos =
881 		zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
882 	u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
883 	unsigned int num_clusters = 0;
884 	unsigned int ext_flags = 0;
885 
886 	while (zero_cpos < last_cpos) {
887 		rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
888 					&num_clusters, &ext_flags);
889 		if (rc) {
890 			mlog_errno(rc);
891 			goto out;
892 		}
893 
894 		if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
895 			zero_clusters = num_clusters;
896 			if (ext_flags & OCFS2_EXT_REFCOUNTED)
897 				needs_cow = 1;
898 			break;
899 		}
900 
901 		zero_cpos += num_clusters;
902 	}
903 	if (!zero_clusters) {
904 		*range_end = 0;
905 		goto out;
906 	}
907 
908 	while ((zero_cpos + zero_clusters) < last_cpos) {
909 		rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
910 					&p_cpos, &num_clusters,
911 					&ext_flags);
912 		if (rc) {
913 			mlog_errno(rc);
914 			goto out;
915 		}
916 
917 		if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
918 			break;
919 		if (ext_flags & OCFS2_EXT_REFCOUNTED)
920 			needs_cow = 1;
921 		zero_clusters += num_clusters;
922 	}
923 	if ((zero_cpos + zero_clusters) > last_cpos)
924 		zero_clusters = last_cpos - zero_cpos;
925 
926 	if (needs_cow) {
927 		rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
928 					zero_clusters, UINT_MAX);
929 		if (rc) {
930 			mlog_errno(rc);
931 			goto out;
932 		}
933 	}
934 
935 	*range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
936 	*range_end = ocfs2_clusters_to_bytes(inode->i_sb,
937 					     zero_cpos + zero_clusters);
938 
939 out:
940 	return rc;
941 }
942 
943 /*
944  * Zero one range returned from ocfs2_zero_extend_get_range().  The caller
945  * has made sure that the entire range needs zeroing.
946  */
ocfs2_zero_extend_range(struct inode * inode,u64 range_start,u64 range_end,struct buffer_head * di_bh)947 static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
948 				   u64 range_end, struct buffer_head *di_bh)
949 {
950 	int rc = 0;
951 	u64 next_pos;
952 	u64 zero_pos = range_start;
953 
954 	trace_ocfs2_zero_extend_range(
955 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
956 			(unsigned long long)range_start,
957 			(unsigned long long)range_end);
958 	BUG_ON(range_start >= range_end);
959 
960 	while (zero_pos < range_end) {
961 		next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
962 		if (next_pos > range_end)
963 			next_pos = range_end;
964 		rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
965 		if (rc < 0) {
966 			mlog_errno(rc);
967 			break;
968 		}
969 		zero_pos = next_pos;
970 
971 		/*
972 		 * Very large extends have the potential to lock up
973 		 * the cpu for extended periods of time.
974 		 */
975 		cond_resched();
976 	}
977 
978 	return rc;
979 }
980 
ocfs2_zero_extend(struct inode * inode,struct buffer_head * di_bh,loff_t zero_to_size)981 int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
982 		      loff_t zero_to_size)
983 {
984 	int ret = 0;
985 	u64 zero_start, range_start = 0, range_end = 0;
986 	struct super_block *sb = inode->i_sb;
987 
988 	zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
989 	trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
990 				(unsigned long long)zero_start,
991 				(unsigned long long)i_size_read(inode));
992 	while (zero_start < zero_to_size) {
993 		ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
994 						  zero_to_size,
995 						  &range_start,
996 						  &range_end);
997 		if (ret) {
998 			mlog_errno(ret);
999 			break;
1000 		}
1001 		if (!range_end)
1002 			break;
1003 		/* Trim the ends */
1004 		if (range_start < zero_start)
1005 			range_start = zero_start;
1006 		if (range_end > zero_to_size)
1007 			range_end = zero_to_size;
1008 
1009 		ret = ocfs2_zero_extend_range(inode, range_start,
1010 					      range_end, di_bh);
1011 		if (ret) {
1012 			mlog_errno(ret);
1013 			break;
1014 		}
1015 		zero_start = range_end;
1016 	}
1017 
1018 	return ret;
1019 }
1020 
ocfs2_extend_no_holes(struct inode * inode,struct buffer_head * di_bh,u64 new_i_size,u64 zero_to)1021 int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
1022 			  u64 new_i_size, u64 zero_to)
1023 {
1024 	int ret;
1025 	u32 clusters_to_add;
1026 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1027 
1028 	/*
1029 	 * Only quota files call this without a bh, and they can't be
1030 	 * refcounted.
1031 	 */
1032 	BUG_ON(!di_bh && ocfs2_is_refcount_inode(inode));
1033 	BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
1034 
1035 	clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
1036 	if (clusters_to_add < oi->ip_clusters)
1037 		clusters_to_add = 0;
1038 	else
1039 		clusters_to_add -= oi->ip_clusters;
1040 
1041 	if (clusters_to_add) {
1042 		ret = ocfs2_extend_allocation(inode, oi->ip_clusters,
1043 					      clusters_to_add, 0);
1044 		if (ret) {
1045 			mlog_errno(ret);
1046 			goto out;
1047 		}
1048 	}
1049 
1050 	/*
1051 	 * Call this even if we don't add any clusters to the tree. We
1052 	 * still need to zero the area between the old i_size and the
1053 	 * new i_size.
1054 	 */
1055 	ret = ocfs2_zero_extend(inode, di_bh, zero_to);
1056 	if (ret < 0)
1057 		mlog_errno(ret);
1058 
1059 out:
1060 	return ret;
1061 }
1062 
ocfs2_extend_file(struct inode * inode,struct buffer_head * di_bh,u64 new_i_size)1063 static int ocfs2_extend_file(struct inode *inode,
1064 			     struct buffer_head *di_bh,
1065 			     u64 new_i_size)
1066 {
1067 	int ret = 0;
1068 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1069 
1070 	BUG_ON(!di_bh);
1071 
1072 	/* setattr sometimes calls us like this. */
1073 	if (new_i_size == 0)
1074 		goto out;
1075 
1076 	if (i_size_read(inode) == new_i_size)
1077 		goto out;
1078 	BUG_ON(new_i_size < i_size_read(inode));
1079 
1080 	/*
1081 	 * The alloc sem blocks people in read/write from reading our
1082 	 * allocation until we're done changing it. We depend on
1083 	 * i_mutex to block other extend/truncate calls while we're
1084 	 * here.  We even have to hold it for sparse files because there
1085 	 * might be some tail zeroing.
1086 	 */
1087 	down_write(&oi->ip_alloc_sem);
1088 
1089 	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1090 		/*
1091 		 * We can optimize small extends by keeping the inodes
1092 		 * inline data.
1093 		 */
1094 		if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1095 			up_write(&oi->ip_alloc_sem);
1096 			goto out_update_size;
1097 		}
1098 
1099 		ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1100 		if (ret) {
1101 			up_write(&oi->ip_alloc_sem);
1102 			mlog_errno(ret);
1103 			goto out;
1104 		}
1105 	}
1106 
1107 	if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1108 		ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
1109 	else
1110 		ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
1111 					    new_i_size);
1112 
1113 	up_write(&oi->ip_alloc_sem);
1114 
1115 	if (ret < 0) {
1116 		mlog_errno(ret);
1117 		goto out;
1118 	}
1119 
1120 out_update_size:
1121 	ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1122 	if (ret < 0)
1123 		mlog_errno(ret);
1124 
1125 out:
1126 	return ret;
1127 }
1128 
ocfs2_setattr(struct dentry * dentry,struct iattr * attr)1129 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1130 {
1131 	int status = 0, size_change;
1132 	int inode_locked = 0;
1133 	struct inode *inode = d_inode(dentry);
1134 	struct super_block *sb = inode->i_sb;
1135 	struct ocfs2_super *osb = OCFS2_SB(sb);
1136 	struct buffer_head *bh = NULL;
1137 	handle_t *handle = NULL;
1138 	struct dquot *transfer_to[MAXQUOTAS] = { };
1139 	int qtype;
1140 	int had_lock;
1141 	struct ocfs2_lock_holder oh;
1142 
1143 	trace_ocfs2_setattr(inode, dentry,
1144 			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
1145 			    dentry->d_name.len, dentry->d_name.name,
1146 			    attr->ia_valid, attr->ia_mode,
1147 			    from_kuid(&init_user_ns, attr->ia_uid),
1148 			    from_kgid(&init_user_ns, attr->ia_gid));
1149 
1150 	/* ensuring we don't even attempt to truncate a symlink */
1151 	if (S_ISLNK(inode->i_mode))
1152 		attr->ia_valid &= ~ATTR_SIZE;
1153 
1154 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1155 			   | ATTR_GID | ATTR_UID | ATTR_MODE)
1156 	if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
1157 		return 0;
1158 
1159 	status = setattr_prepare(dentry, attr);
1160 	if (status)
1161 		return status;
1162 
1163 	if (is_quota_modification(inode, attr)) {
1164 		status = dquot_initialize(inode);
1165 		if (status)
1166 			return status;
1167 	}
1168 	size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1169 	if (size_change) {
1170 		/*
1171 		 * Here we should wait dio to finish before inode lock
1172 		 * to avoid a deadlock between ocfs2_setattr() and
1173 		 * ocfs2_dio_end_io_write()
1174 		 */
1175 		inode_dio_wait(inode);
1176 
1177 		status = ocfs2_rw_lock(inode, 1);
1178 		if (status < 0) {
1179 			mlog_errno(status);
1180 			goto bail;
1181 		}
1182 	}
1183 
1184 	had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
1185 	if (had_lock < 0) {
1186 		status = had_lock;
1187 		goto bail_unlock_rw;
1188 	} else if (had_lock) {
1189 		/*
1190 		 * As far as we know, ocfs2_setattr() could only be the first
1191 		 * VFS entry point in the call chain of recursive cluster
1192 		 * locking issue.
1193 		 *
1194 		 * For instance:
1195 		 * chmod_common()
1196 		 *  notify_change()
1197 		 *   ocfs2_setattr()
1198 		 *    posix_acl_chmod()
1199 		 *     ocfs2_iop_get_acl()
1200 		 *
1201 		 * But, we're not 100% sure if it's always true, because the
1202 		 * ordering of the VFS entry points in the call chain is out
1203 		 * of our control. So, we'd better dump the stack here to
1204 		 * catch the other cases of recursive locking.
1205 		 */
1206 		mlog(ML_ERROR, "Another case of recursive locking:\n");
1207 		dump_stack();
1208 	}
1209 	inode_locked = 1;
1210 
1211 	if (size_change) {
1212 		status = inode_newsize_ok(inode, attr->ia_size);
1213 		if (status)
1214 			goto bail_unlock;
1215 
1216 		if (i_size_read(inode) >= attr->ia_size) {
1217 			if (ocfs2_should_order_data(inode)) {
1218 				status = ocfs2_begin_ordered_truncate(inode,
1219 								      attr->ia_size);
1220 				if (status)
1221 					goto bail_unlock;
1222 			}
1223 			status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1224 		} else
1225 			status = ocfs2_extend_file(inode, bh, attr->ia_size);
1226 		if (status < 0) {
1227 			if (status != -ENOSPC)
1228 				mlog_errno(status);
1229 			status = -ENOSPC;
1230 			goto bail_unlock;
1231 		}
1232 	}
1233 
1234 	if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
1235 	    (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
1236 		/*
1237 		 * Gather pointers to quota structures so that allocation /
1238 		 * freeing of quota structures happens here and not inside
1239 		 * dquot_transfer() where we have problems with lock ordering
1240 		 */
1241 		if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)
1242 		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1243 		    OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1244 			transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
1245 			if (IS_ERR(transfer_to[USRQUOTA])) {
1246 				status = PTR_ERR(transfer_to[USRQUOTA]);
1247 				goto bail_unlock;
1248 			}
1249 		}
1250 		if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)
1251 		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1252 		    OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1253 			transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
1254 			if (IS_ERR(transfer_to[GRPQUOTA])) {
1255 				status = PTR_ERR(transfer_to[GRPQUOTA]);
1256 				goto bail_unlock;
1257 			}
1258 		}
1259 		down_write(&OCFS2_I(inode)->ip_alloc_sem);
1260 		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1261 					   2 * ocfs2_quota_trans_credits(sb));
1262 		if (IS_ERR(handle)) {
1263 			status = PTR_ERR(handle);
1264 			mlog_errno(status);
1265 			goto bail_unlock_alloc;
1266 		}
1267 		status = __dquot_transfer(inode, transfer_to);
1268 		if (status < 0)
1269 			goto bail_commit;
1270 	} else {
1271 		down_write(&OCFS2_I(inode)->ip_alloc_sem);
1272 		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1273 		if (IS_ERR(handle)) {
1274 			status = PTR_ERR(handle);
1275 			mlog_errno(status);
1276 			goto bail_unlock_alloc;
1277 		}
1278 	}
1279 
1280 	setattr_copy(inode, attr);
1281 	mark_inode_dirty(inode);
1282 
1283 	status = ocfs2_mark_inode_dirty(handle, inode, bh);
1284 	if (status < 0)
1285 		mlog_errno(status);
1286 
1287 bail_commit:
1288 	ocfs2_commit_trans(osb, handle);
1289 bail_unlock_alloc:
1290 	up_write(&OCFS2_I(inode)->ip_alloc_sem);
1291 bail_unlock:
1292 	if (status && inode_locked) {
1293 		ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1294 		inode_locked = 0;
1295 	}
1296 bail_unlock_rw:
1297 	if (size_change)
1298 		ocfs2_rw_unlock(inode, 1);
1299 bail:
1300 
1301 	/* Release quota pointers in case we acquired them */
1302 	for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
1303 		dqput(transfer_to[qtype]);
1304 
1305 	if (!status && attr->ia_valid & ATTR_MODE) {
1306 		status = ocfs2_acl_chmod(inode, bh);
1307 		if (status < 0)
1308 			mlog_errno(status);
1309 	}
1310 	if (inode_locked)
1311 		ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1312 
1313 	brelse(bh);
1314 	return status;
1315 }
1316 
ocfs2_getattr(const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)1317 int ocfs2_getattr(const struct path *path, struct kstat *stat,
1318 		  u32 request_mask, unsigned int flags)
1319 {
1320 	struct inode *inode = d_inode(path->dentry);
1321 	struct super_block *sb = path->dentry->d_sb;
1322 	struct ocfs2_super *osb = sb->s_fs_info;
1323 	int err;
1324 
1325 	err = ocfs2_inode_revalidate(path->dentry);
1326 	if (err) {
1327 		if (err != -ENOENT)
1328 			mlog_errno(err);
1329 		goto bail;
1330 	}
1331 
1332 	generic_fillattr(inode, stat);
1333 	/*
1334 	 * If there is inline data in the inode, the inode will normally not
1335 	 * have data blocks allocated (it may have an external xattr block).
1336 	 * Report at least one sector for such files, so tools like tar, rsync,
1337 	 * others don't incorrectly think the file is completely sparse.
1338 	 */
1339 	if (unlikely(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1340 		stat->blocks += (stat->size + 511)>>9;
1341 
1342 	/* We set the blksize from the cluster size for performance */
1343 	stat->blksize = osb->s_clustersize;
1344 
1345 bail:
1346 	return err;
1347 }
1348 
ocfs2_permission(struct inode * inode,int mask)1349 int ocfs2_permission(struct inode *inode, int mask)
1350 {
1351 	int ret, had_lock;
1352 	struct ocfs2_lock_holder oh;
1353 
1354 	if (mask & MAY_NOT_BLOCK)
1355 		return -ECHILD;
1356 
1357 	had_lock = ocfs2_inode_lock_tracker(inode, NULL, 0, &oh);
1358 	if (had_lock < 0) {
1359 		ret = had_lock;
1360 		goto out;
1361 	} else if (had_lock) {
1362 		/* See comments in ocfs2_setattr() for details.
1363 		 * The call chain of this case could be:
1364 		 * do_sys_open()
1365 		 *  may_open()
1366 		 *   inode_permission()
1367 		 *    ocfs2_permission()
1368 		 *     ocfs2_iop_get_acl()
1369 		 */
1370 		mlog(ML_ERROR, "Another case of recursive locking:\n");
1371 		dump_stack();
1372 	}
1373 
1374 	ret = generic_permission(inode, mask);
1375 
1376 	ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
1377 out:
1378 	return ret;
1379 }
1380 
__ocfs2_write_remove_suid(struct inode * inode,struct buffer_head * bh)1381 static int __ocfs2_write_remove_suid(struct inode *inode,
1382 				     struct buffer_head *bh)
1383 {
1384 	int ret;
1385 	handle_t *handle;
1386 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1387 	struct ocfs2_dinode *di;
1388 
1389 	trace_ocfs2_write_remove_suid(
1390 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1391 			inode->i_mode);
1392 
1393 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1394 	if (IS_ERR(handle)) {
1395 		ret = PTR_ERR(handle);
1396 		mlog_errno(ret);
1397 		goto out;
1398 	}
1399 
1400 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1401 				      OCFS2_JOURNAL_ACCESS_WRITE);
1402 	if (ret < 0) {
1403 		mlog_errno(ret);
1404 		goto out_trans;
1405 	}
1406 
1407 	inode->i_mode &= ~S_ISUID;
1408 	if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1409 		inode->i_mode &= ~S_ISGID;
1410 
1411 	di = (struct ocfs2_dinode *) bh->b_data;
1412 	di->i_mode = cpu_to_le16(inode->i_mode);
1413 	ocfs2_update_inode_fsync_trans(handle, inode, 0);
1414 
1415 	ocfs2_journal_dirty(handle, bh);
1416 
1417 out_trans:
1418 	ocfs2_commit_trans(osb, handle);
1419 out:
1420 	return ret;
1421 }
1422 
ocfs2_write_remove_suid(struct inode * inode)1423 static int ocfs2_write_remove_suid(struct inode *inode)
1424 {
1425 	int ret;
1426 	struct buffer_head *bh = NULL;
1427 
1428 	ret = ocfs2_read_inode_block(inode, &bh);
1429 	if (ret < 0) {
1430 		mlog_errno(ret);
1431 		goto out;
1432 	}
1433 
1434 	ret =  __ocfs2_write_remove_suid(inode, bh);
1435 out:
1436 	brelse(bh);
1437 	return ret;
1438 }
1439 
1440 /*
1441  * Allocate enough extents to cover the region starting at byte offset
1442  * start for len bytes. Existing extents are skipped, any extents
1443  * added are marked as "unwritten".
1444  */
ocfs2_allocate_unwritten_extents(struct inode * inode,u64 start,u64 len)1445 static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1446 					    u64 start, u64 len)
1447 {
1448 	int ret;
1449 	u32 cpos, phys_cpos, clusters, alloc_size;
1450 	u64 end = start + len;
1451 	struct buffer_head *di_bh = NULL;
1452 
1453 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1454 		ret = ocfs2_read_inode_block(inode, &di_bh);
1455 		if (ret) {
1456 			mlog_errno(ret);
1457 			goto out;
1458 		}
1459 
1460 		/*
1461 		 * Nothing to do if the requested reservation range
1462 		 * fits within the inode.
1463 		 */
1464 		if (ocfs2_size_fits_inline_data(di_bh, end))
1465 			goto out;
1466 
1467 		ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1468 		if (ret) {
1469 			mlog_errno(ret);
1470 			goto out;
1471 		}
1472 	}
1473 
1474 	/*
1475 	 * We consider both start and len to be inclusive.
1476 	 */
1477 	cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1478 	clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1479 	clusters -= cpos;
1480 
1481 	while (clusters) {
1482 		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1483 					 &alloc_size, NULL);
1484 		if (ret) {
1485 			mlog_errno(ret);
1486 			goto out;
1487 		}
1488 
1489 		/*
1490 		 * Hole or existing extent len can be arbitrary, so
1491 		 * cap it to our own allocation request.
1492 		 */
1493 		if (alloc_size > clusters)
1494 			alloc_size = clusters;
1495 
1496 		if (phys_cpos) {
1497 			/*
1498 			 * We already have an allocation at this
1499 			 * region so we can safely skip it.
1500 			 */
1501 			goto next;
1502 		}
1503 
1504 		ret = ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1505 		if (ret) {
1506 			if (ret != -ENOSPC)
1507 				mlog_errno(ret);
1508 			goto out;
1509 		}
1510 
1511 next:
1512 		cpos += alloc_size;
1513 		clusters -= alloc_size;
1514 	}
1515 
1516 	ret = 0;
1517 out:
1518 
1519 	brelse(di_bh);
1520 	return ret;
1521 }
1522 
1523 /*
1524  * Truncate a byte range, avoiding pages within partial clusters. This
1525  * preserves those pages for the zeroing code to write to.
1526  */
ocfs2_truncate_cluster_pages(struct inode * inode,u64 byte_start,u64 byte_len)1527 static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1528 					 u64 byte_len)
1529 {
1530 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1531 	loff_t start, end;
1532 	struct address_space *mapping = inode->i_mapping;
1533 
1534 	start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1535 	end = byte_start + byte_len;
1536 	end = end & ~(osb->s_clustersize - 1);
1537 
1538 	if (start < end) {
1539 		unmap_mapping_range(mapping, start, end - start, 0);
1540 		truncate_inode_pages_range(mapping, start, end - 1);
1541 	}
1542 }
1543 
1544 /*
1545  * zero out partial blocks of one cluster.
1546  *
1547  * start: file offset where zero starts, will be made upper block aligned.
1548  * len: it will be trimmed to the end of current cluster if "start + len"
1549  *      is bigger than it.
1550  */
ocfs2_zeroout_partial_cluster(struct inode * inode,u64 start,u64 len)1551 static int ocfs2_zeroout_partial_cluster(struct inode *inode,
1552 					u64 start, u64 len)
1553 {
1554 	int ret;
1555 	u64 start_block, end_block, nr_blocks;
1556 	u64 p_block, offset;
1557 	u32 cluster, p_cluster, nr_clusters;
1558 	struct super_block *sb = inode->i_sb;
1559 	u64 end = ocfs2_align_bytes_to_clusters(sb, start);
1560 
1561 	if (start + len < end)
1562 		end = start + len;
1563 
1564 	start_block = ocfs2_blocks_for_bytes(sb, start);
1565 	end_block = ocfs2_blocks_for_bytes(sb, end);
1566 	nr_blocks = end_block - start_block;
1567 	if (!nr_blocks)
1568 		return 0;
1569 
1570 	cluster = ocfs2_bytes_to_clusters(sb, start);
1571 	ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
1572 				&nr_clusters, NULL);
1573 	if (ret)
1574 		return ret;
1575 	if (!p_cluster)
1576 		return 0;
1577 
1578 	offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
1579 	p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
1580 	return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
1581 }
1582 
ocfs2_zero_partial_clusters(struct inode * inode,u64 start,u64 len)1583 static int ocfs2_zero_partial_clusters(struct inode *inode,
1584 				       u64 start, u64 len)
1585 {
1586 	int ret = 0;
1587 	u64 tmpend = 0;
1588 	u64 end = start + len;
1589 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1590 	unsigned int csize = osb->s_clustersize;
1591 	handle_t *handle;
1592 	loff_t isize = i_size_read(inode);
1593 
1594 	/*
1595 	 * The "start" and "end" values are NOT necessarily part of
1596 	 * the range whose allocation is being deleted. Rather, this
1597 	 * is what the user passed in with the request. We must zero
1598 	 * partial clusters here. There's no need to worry about
1599 	 * physical allocation - the zeroing code knows to skip holes.
1600 	 */
1601 	trace_ocfs2_zero_partial_clusters(
1602 		(unsigned long long)OCFS2_I(inode)->ip_blkno,
1603 		(unsigned long long)start, (unsigned long long)end);
1604 
1605 	/*
1606 	 * If both edges are on a cluster boundary then there's no
1607 	 * zeroing required as the region is part of the allocation to
1608 	 * be truncated.
1609 	 */
1610 	if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1611 		goto out;
1612 
1613 	/* No page cache for EOF blocks, issue zero out to disk. */
1614 	if (end > isize) {
1615 		/*
1616 		 * zeroout eof blocks in last cluster starting from
1617 		 * "isize" even "start" > "isize" because it is
1618 		 * complicated to zeroout just at "start" as "start"
1619 		 * may be not aligned with block size, buffer write
1620 		 * would be required to do that, but out of eof buffer
1621 		 * write is not supported.
1622 		 */
1623 		ret = ocfs2_zeroout_partial_cluster(inode, isize,
1624 					end - isize);
1625 		if (ret) {
1626 			mlog_errno(ret);
1627 			goto out;
1628 		}
1629 		if (start >= isize)
1630 			goto out;
1631 		end = isize;
1632 	}
1633 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1634 	if (IS_ERR(handle)) {
1635 		ret = PTR_ERR(handle);
1636 		mlog_errno(ret);
1637 		goto out;
1638 	}
1639 
1640 	/*
1641 	 * If start is on a cluster boundary and end is somewhere in another
1642 	 * cluster, we have not COWed the cluster starting at start, unless
1643 	 * end is also within the same cluster. So, in this case, we skip this
1644 	 * first call to ocfs2_zero_range_for_truncate() truncate and move on
1645 	 * to the next one.
1646 	 */
1647 	if ((start & (csize - 1)) != 0) {
1648 		/*
1649 		 * We want to get the byte offset of the end of the 1st
1650 		 * cluster.
1651 		 */
1652 		tmpend = (u64)osb->s_clustersize +
1653 			(start & ~(osb->s_clustersize - 1));
1654 		if (tmpend > end)
1655 			tmpend = end;
1656 
1657 		trace_ocfs2_zero_partial_clusters_range1(
1658 			(unsigned long long)start,
1659 			(unsigned long long)tmpend);
1660 
1661 		ret = ocfs2_zero_range_for_truncate(inode, handle, start,
1662 						    tmpend);
1663 		if (ret)
1664 			mlog_errno(ret);
1665 	}
1666 
1667 	if (tmpend < end) {
1668 		/*
1669 		 * This may make start and end equal, but the zeroing
1670 		 * code will skip any work in that case so there's no
1671 		 * need to catch it up here.
1672 		 */
1673 		start = end & ~(osb->s_clustersize - 1);
1674 
1675 		trace_ocfs2_zero_partial_clusters_range2(
1676 			(unsigned long long)start, (unsigned long long)end);
1677 
1678 		ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1679 		if (ret)
1680 			mlog_errno(ret);
1681 	}
1682 	ocfs2_update_inode_fsync_trans(handle, inode, 1);
1683 
1684 	ocfs2_commit_trans(osb, handle);
1685 out:
1686 	return ret;
1687 }
1688 
ocfs2_find_rec(struct ocfs2_extent_list * el,u32 pos)1689 static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1690 {
1691 	int i;
1692 	struct ocfs2_extent_rec *rec = NULL;
1693 
1694 	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1695 
1696 		rec = &el->l_recs[i];
1697 
1698 		if (le32_to_cpu(rec->e_cpos) < pos)
1699 			break;
1700 	}
1701 
1702 	return i;
1703 }
1704 
1705 /*
1706  * Helper to calculate the punching pos and length in one run, we handle the
1707  * following three cases in order:
1708  *
1709  * - remove the entire record
1710  * - remove a partial record
1711  * - no record needs to be removed (hole-punching completed)
1712 */
ocfs2_calc_trunc_pos(struct inode * inode,struct ocfs2_extent_list * el,struct ocfs2_extent_rec * rec,u32 trunc_start,u32 * trunc_cpos,u32 * trunc_len,u32 * trunc_end,u64 * blkno,int * done)1713 static void ocfs2_calc_trunc_pos(struct inode *inode,
1714 				 struct ocfs2_extent_list *el,
1715 				 struct ocfs2_extent_rec *rec,
1716 				 u32 trunc_start, u32 *trunc_cpos,
1717 				 u32 *trunc_len, u32 *trunc_end,
1718 				 u64 *blkno, int *done)
1719 {
1720 	int ret = 0;
1721 	u32 coff, range;
1722 
1723 	range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1724 
1725 	if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1726 		/*
1727 		 * remove an entire extent record.
1728 		 */
1729 		*trunc_cpos = le32_to_cpu(rec->e_cpos);
1730 		/*
1731 		 * Skip holes if any.
1732 		 */
1733 		if (range < *trunc_end)
1734 			*trunc_end = range;
1735 		*trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1736 		*blkno = le64_to_cpu(rec->e_blkno);
1737 		*trunc_end = le32_to_cpu(rec->e_cpos);
1738 	} else if (range > trunc_start) {
1739 		/*
1740 		 * remove a partial extent record, which means we're
1741 		 * removing the last extent record.
1742 		 */
1743 		*trunc_cpos = trunc_start;
1744 		/*
1745 		 * skip hole if any.
1746 		 */
1747 		if (range < *trunc_end)
1748 			*trunc_end = range;
1749 		*trunc_len = *trunc_end - trunc_start;
1750 		coff = trunc_start - le32_to_cpu(rec->e_cpos);
1751 		*blkno = le64_to_cpu(rec->e_blkno) +
1752 				ocfs2_clusters_to_blocks(inode->i_sb, coff);
1753 		*trunc_end = trunc_start;
1754 	} else {
1755 		/*
1756 		 * It may have two following possibilities:
1757 		 *
1758 		 * - last record has been removed
1759 		 * - trunc_start was within a hole
1760 		 *
1761 		 * both two cases mean the completion of hole punching.
1762 		 */
1763 		ret = 1;
1764 	}
1765 
1766 	*done = ret;
1767 }
1768 
ocfs2_remove_inode_range(struct inode * inode,struct buffer_head * di_bh,u64 byte_start,u64 byte_len)1769 int ocfs2_remove_inode_range(struct inode *inode,
1770 			     struct buffer_head *di_bh, u64 byte_start,
1771 			     u64 byte_len)
1772 {
1773 	int ret = 0, flags = 0, done = 0, i;
1774 	u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1775 	u32 cluster_in_el;
1776 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1777 	struct ocfs2_cached_dealloc_ctxt dealloc;
1778 	struct address_space *mapping = inode->i_mapping;
1779 	struct ocfs2_extent_tree et;
1780 	struct ocfs2_path *path = NULL;
1781 	struct ocfs2_extent_list *el = NULL;
1782 	struct ocfs2_extent_rec *rec = NULL;
1783 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1784 	u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
1785 
1786 	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1787 	ocfs2_init_dealloc_ctxt(&dealloc);
1788 
1789 	trace_ocfs2_remove_inode_range(
1790 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1791 			(unsigned long long)byte_start,
1792 			(unsigned long long)byte_len);
1793 
1794 	if (byte_len == 0)
1795 		return 0;
1796 
1797 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1798 		ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1799 					    byte_start + byte_len, 0);
1800 		if (ret) {
1801 			mlog_errno(ret);
1802 			goto out;
1803 		}
1804 		/*
1805 		 * There's no need to get fancy with the page cache
1806 		 * truncate of an inline-data inode. We're talking
1807 		 * about less than a page here, which will be cached
1808 		 * in the dinode buffer anyway.
1809 		 */
1810 		unmap_mapping_range(mapping, 0, 0, 0);
1811 		truncate_inode_pages(mapping, 0);
1812 		goto out;
1813 	}
1814 
1815 	/*
1816 	 * For reflinks, we may need to CoW 2 clusters which might be
1817 	 * partially zero'd later, if hole's start and end offset were
1818 	 * within one cluster(means is not exactly aligned to clustersize).
1819 	 */
1820 
1821 	if (ocfs2_is_refcount_inode(inode)) {
1822 		ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1823 		if (ret) {
1824 			mlog_errno(ret);
1825 			goto out;
1826 		}
1827 
1828 		ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1829 		if (ret) {
1830 			mlog_errno(ret);
1831 			goto out;
1832 		}
1833 	}
1834 
1835 	trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1836 	trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1837 	cluster_in_el = trunc_end;
1838 
1839 	ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1840 	if (ret) {
1841 		mlog_errno(ret);
1842 		goto out;
1843 	}
1844 
1845 	path = ocfs2_new_path_from_et(&et);
1846 	if (!path) {
1847 		ret = -ENOMEM;
1848 		mlog_errno(ret);
1849 		goto out;
1850 	}
1851 
1852 	while (trunc_end > trunc_start) {
1853 
1854 		ret = ocfs2_find_path(INODE_CACHE(inode), path,
1855 				      cluster_in_el);
1856 		if (ret) {
1857 			mlog_errno(ret);
1858 			goto out;
1859 		}
1860 
1861 		el = path_leaf_el(path);
1862 
1863 		i = ocfs2_find_rec(el, trunc_end);
1864 		/*
1865 		 * Need to go to previous extent block.
1866 		 */
1867 		if (i < 0) {
1868 			if (path->p_tree_depth == 0)
1869 				break;
1870 
1871 			ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1872 							    path,
1873 							    &cluster_in_el);
1874 			if (ret) {
1875 				mlog_errno(ret);
1876 				goto out;
1877 			}
1878 
1879 			/*
1880 			 * We've reached the leftmost extent block,
1881 			 * it's safe to leave.
1882 			 */
1883 			if (cluster_in_el == 0)
1884 				break;
1885 
1886 			/*
1887 			 * The 'pos' searched for previous extent block is
1888 			 * always one cluster less than actual trunc_end.
1889 			 */
1890 			trunc_end = cluster_in_el + 1;
1891 
1892 			ocfs2_reinit_path(path, 1);
1893 
1894 			continue;
1895 
1896 		} else
1897 			rec = &el->l_recs[i];
1898 
1899 		ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1900 				     &trunc_len, &trunc_end, &blkno, &done);
1901 		if (done)
1902 			break;
1903 
1904 		flags = rec->e_flags;
1905 		phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1906 
1907 		ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1908 					       phys_cpos, trunc_len, flags,
1909 					       &dealloc, refcount_loc, false);
1910 		if (ret < 0) {
1911 			mlog_errno(ret);
1912 			goto out;
1913 		}
1914 
1915 		cluster_in_el = trunc_end;
1916 
1917 		ocfs2_reinit_path(path, 1);
1918 	}
1919 
1920 	ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1921 
1922 out:
1923 	ocfs2_free_path(path);
1924 	ocfs2_schedule_truncate_log_flush(osb, 1);
1925 	ocfs2_run_deallocs(osb, &dealloc);
1926 
1927 	return ret;
1928 }
1929 
1930 /*
1931  * Parts of this function taken from xfs_change_file_space()
1932  */
__ocfs2_change_file_space(struct file * file,struct inode * inode,loff_t f_pos,unsigned int cmd,struct ocfs2_space_resv * sr,int change_size)1933 static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1934 				     loff_t f_pos, unsigned int cmd,
1935 				     struct ocfs2_space_resv *sr,
1936 				     int change_size)
1937 {
1938 	int ret;
1939 	s64 llen;
1940 	loff_t size, orig_isize;
1941 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1942 	struct buffer_head *di_bh = NULL;
1943 	handle_t *handle;
1944 	unsigned long long max_off = inode->i_sb->s_maxbytes;
1945 
1946 	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1947 		return -EROFS;
1948 
1949 	inode_lock(inode);
1950 
1951 	/*
1952 	 * This prevents concurrent writes on other nodes
1953 	 */
1954 	ret = ocfs2_rw_lock(inode, 1);
1955 	if (ret) {
1956 		mlog_errno(ret);
1957 		goto out;
1958 	}
1959 
1960 	ret = ocfs2_inode_lock(inode, &di_bh, 1);
1961 	if (ret) {
1962 		mlog_errno(ret);
1963 		goto out_rw_unlock;
1964 	}
1965 
1966 	if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1967 		ret = -EPERM;
1968 		goto out_inode_unlock;
1969 	}
1970 
1971 	switch (sr->l_whence) {
1972 	case 0: /*SEEK_SET*/
1973 		break;
1974 	case 1: /*SEEK_CUR*/
1975 		sr->l_start += f_pos;
1976 		break;
1977 	case 2: /*SEEK_END*/
1978 		sr->l_start += i_size_read(inode);
1979 		break;
1980 	default:
1981 		ret = -EINVAL;
1982 		goto out_inode_unlock;
1983 	}
1984 	sr->l_whence = 0;
1985 
1986 	llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1987 
1988 	if (sr->l_start < 0
1989 	    || sr->l_start > max_off
1990 	    || (sr->l_start + llen) < 0
1991 	    || (sr->l_start + llen) > max_off) {
1992 		ret = -EINVAL;
1993 		goto out_inode_unlock;
1994 	}
1995 	size = sr->l_start + sr->l_len;
1996 
1997 	if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64 ||
1998 	    cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) {
1999 		if (sr->l_len <= 0) {
2000 			ret = -EINVAL;
2001 			goto out_inode_unlock;
2002 		}
2003 	}
2004 
2005 	if (file && should_remove_suid(file->f_path.dentry)) {
2006 		ret = __ocfs2_write_remove_suid(inode, di_bh);
2007 		if (ret) {
2008 			mlog_errno(ret);
2009 			goto out_inode_unlock;
2010 		}
2011 	}
2012 
2013 	down_write(&OCFS2_I(inode)->ip_alloc_sem);
2014 	switch (cmd) {
2015 	case OCFS2_IOC_RESVSP:
2016 	case OCFS2_IOC_RESVSP64:
2017 		/*
2018 		 * This takes unsigned offsets, but the signed ones we
2019 		 * pass have been checked against overflow above.
2020 		 */
2021 		ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
2022 						       sr->l_len);
2023 		break;
2024 	case OCFS2_IOC_UNRESVSP:
2025 	case OCFS2_IOC_UNRESVSP64:
2026 		ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
2027 					       sr->l_len);
2028 		break;
2029 	default:
2030 		ret = -EINVAL;
2031 	}
2032 
2033 	orig_isize = i_size_read(inode);
2034 	/* zeroout eof blocks in the cluster. */
2035 	if (!ret && change_size && orig_isize < size) {
2036 		ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
2037 					size - orig_isize);
2038 		if (!ret)
2039 			i_size_write(inode, size);
2040 	}
2041 	up_write(&OCFS2_I(inode)->ip_alloc_sem);
2042 	if (ret) {
2043 		mlog_errno(ret);
2044 		goto out_inode_unlock;
2045 	}
2046 
2047 	/*
2048 	 * We update c/mtime for these changes
2049 	 */
2050 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
2051 	if (IS_ERR(handle)) {
2052 		ret = PTR_ERR(handle);
2053 		mlog_errno(ret);
2054 		goto out_inode_unlock;
2055 	}
2056 
2057 	inode->i_ctime = inode->i_mtime = current_time(inode);
2058 	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
2059 	if (ret < 0)
2060 		mlog_errno(ret);
2061 
2062 	if (file && (file->f_flags & O_SYNC))
2063 		handle->h_sync = 1;
2064 
2065 	ocfs2_commit_trans(osb, handle);
2066 
2067 out_inode_unlock:
2068 	brelse(di_bh);
2069 	ocfs2_inode_unlock(inode, 1);
2070 out_rw_unlock:
2071 	ocfs2_rw_unlock(inode, 1);
2072 
2073 out:
2074 	inode_unlock(inode);
2075 	return ret;
2076 }
2077 
ocfs2_change_file_space(struct file * file,unsigned int cmd,struct ocfs2_space_resv * sr)2078 int ocfs2_change_file_space(struct file *file, unsigned int cmd,
2079 			    struct ocfs2_space_resv *sr)
2080 {
2081 	struct inode *inode = file_inode(file);
2082 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2083 	int ret;
2084 
2085 	if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
2086 	    !ocfs2_writes_unwritten_extents(osb))
2087 		return -ENOTTY;
2088 	else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
2089 		 !ocfs2_sparse_alloc(osb))
2090 		return -ENOTTY;
2091 
2092 	if (!S_ISREG(inode->i_mode))
2093 		return -EINVAL;
2094 
2095 	if (!(file->f_mode & FMODE_WRITE))
2096 		return -EBADF;
2097 
2098 	ret = mnt_want_write_file(file);
2099 	if (ret)
2100 		return ret;
2101 	ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
2102 	mnt_drop_write_file(file);
2103 	return ret;
2104 }
2105 
ocfs2_fallocate(struct file * file,int mode,loff_t offset,loff_t len)2106 static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
2107 			    loff_t len)
2108 {
2109 	struct inode *inode = file_inode(file);
2110 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2111 	struct ocfs2_space_resv sr;
2112 	int change_size = 1;
2113 	int cmd = OCFS2_IOC_RESVSP64;
2114 	int ret = 0;
2115 
2116 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2117 		return -EOPNOTSUPP;
2118 	if (!ocfs2_writes_unwritten_extents(osb))
2119 		return -EOPNOTSUPP;
2120 
2121 	if (mode & FALLOC_FL_KEEP_SIZE) {
2122 		change_size = 0;
2123 	} else {
2124 		ret = inode_newsize_ok(inode, offset + len);
2125 		if (ret)
2126 			return ret;
2127 	}
2128 
2129 	if (mode & FALLOC_FL_PUNCH_HOLE)
2130 		cmd = OCFS2_IOC_UNRESVSP64;
2131 
2132 	sr.l_whence = 0;
2133 	sr.l_start = (s64)offset;
2134 	sr.l_len = (s64)len;
2135 
2136 	return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
2137 					 change_size);
2138 }
2139 
ocfs2_check_range_for_refcount(struct inode * inode,loff_t pos,size_t count)2140 int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
2141 				   size_t count)
2142 {
2143 	int ret = 0;
2144 	unsigned int extent_flags;
2145 	u32 cpos, clusters, extent_len, phys_cpos;
2146 	struct super_block *sb = inode->i_sb;
2147 
2148 	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
2149 	    !ocfs2_is_refcount_inode(inode) ||
2150 	    OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2151 		return 0;
2152 
2153 	cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
2154 	clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
2155 
2156 	while (clusters) {
2157 		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
2158 					 &extent_flags);
2159 		if (ret < 0) {
2160 			mlog_errno(ret);
2161 			goto out;
2162 		}
2163 
2164 		if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
2165 			ret = 1;
2166 			break;
2167 		}
2168 
2169 		if (extent_len > clusters)
2170 			extent_len = clusters;
2171 
2172 		clusters -= extent_len;
2173 		cpos += extent_len;
2174 	}
2175 out:
2176 	return ret;
2177 }
2178 
ocfs2_is_io_unaligned(struct inode * inode,size_t count,loff_t pos)2179 static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
2180 {
2181 	int blockmask = inode->i_sb->s_blocksize - 1;
2182 	loff_t final_size = pos + count;
2183 
2184 	if ((pos & blockmask) || (final_size & blockmask))
2185 		return 1;
2186 	return 0;
2187 }
2188 
ocfs2_inode_lock_for_extent_tree(struct inode * inode,struct buffer_head ** di_bh,int meta_level,int write_sem,int wait)2189 static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
2190 					    struct buffer_head **di_bh,
2191 					    int meta_level,
2192 					    int write_sem,
2193 					    int wait)
2194 {
2195 	int ret = 0;
2196 
2197 	if (wait)
2198 		ret = ocfs2_inode_lock(inode, di_bh, meta_level);
2199 	else
2200 		ret = ocfs2_try_inode_lock(inode, di_bh, meta_level);
2201 	if (ret < 0)
2202 		goto out;
2203 
2204 	if (wait) {
2205 		if (write_sem)
2206 			down_write(&OCFS2_I(inode)->ip_alloc_sem);
2207 		else
2208 			down_read(&OCFS2_I(inode)->ip_alloc_sem);
2209 	} else {
2210 		if (write_sem)
2211 			ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2212 		else
2213 			ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2214 
2215 		if (!ret) {
2216 			ret = -EAGAIN;
2217 			goto out_unlock;
2218 		}
2219 	}
2220 
2221 	return ret;
2222 
2223 out_unlock:
2224 	brelse(*di_bh);
2225 	*di_bh = NULL;
2226 	ocfs2_inode_unlock(inode, meta_level);
2227 out:
2228 	return ret;
2229 }
2230 
ocfs2_inode_unlock_for_extent_tree(struct inode * inode,struct buffer_head ** di_bh,int meta_level,int write_sem)2231 static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
2232 					       struct buffer_head **di_bh,
2233 					       int meta_level,
2234 					       int write_sem)
2235 {
2236 	if (write_sem)
2237 		up_write(&OCFS2_I(inode)->ip_alloc_sem);
2238 	else
2239 		up_read(&OCFS2_I(inode)->ip_alloc_sem);
2240 
2241 	brelse(*di_bh);
2242 	*di_bh = NULL;
2243 
2244 	if (meta_level >= 0)
2245 		ocfs2_inode_unlock(inode, meta_level);
2246 }
2247 
ocfs2_prepare_inode_for_write(struct file * file,loff_t pos,size_t count,int wait)2248 static int ocfs2_prepare_inode_for_write(struct file *file,
2249 					 loff_t pos, size_t count, int wait)
2250 {
2251 	int ret = 0, meta_level = 0, overwrite_io = 0;
2252 	int write_sem = 0;
2253 	struct dentry *dentry = file->f_path.dentry;
2254 	struct inode *inode = d_inode(dentry);
2255 	struct buffer_head *di_bh = NULL;
2256 	loff_t end;
2257 	u32 cpos;
2258 	u32 clusters;
2259 
2260 	/*
2261 	 * We start with a read level meta lock and only jump to an ex
2262 	 * if we need to make modifications here.
2263 	 */
2264 	for(;;) {
2265 		ret = ocfs2_inode_lock_for_extent_tree(inode,
2266 						       &di_bh,
2267 						       meta_level,
2268 						       write_sem,
2269 						       wait);
2270 		if (ret < 0) {
2271 			if (ret != -EAGAIN)
2272 				mlog_errno(ret);
2273 			goto out;
2274 		}
2275 
2276 		/*
2277 		 * Check if IO will overwrite allocated blocks in case
2278 		 * IOCB_NOWAIT flag is set.
2279 		 */
2280 		if (!wait && !overwrite_io) {
2281 			overwrite_io = 1;
2282 
2283 			ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
2284 			if (ret < 0) {
2285 				if (ret != -EAGAIN)
2286 					mlog_errno(ret);
2287 				goto out_unlock;
2288 			}
2289 		}
2290 
2291 		/* Clear suid / sgid if necessary. We do this here
2292 		 * instead of later in the write path because
2293 		 * remove_suid() calls ->setattr without any hint that
2294 		 * we may have already done our cluster locking. Since
2295 		 * ocfs2_setattr() *must* take cluster locks to
2296 		 * proceed, this will lead us to recursively lock the
2297 		 * inode. There's also the dinode i_size state which
2298 		 * can be lost via setattr during extending writes (we
2299 		 * set inode->i_size at the end of a write. */
2300 		if (should_remove_suid(dentry)) {
2301 			if (meta_level == 0) {
2302 				ocfs2_inode_unlock_for_extent_tree(inode,
2303 								   &di_bh,
2304 								   meta_level,
2305 								   write_sem);
2306 				meta_level = 1;
2307 				continue;
2308 			}
2309 
2310 			ret = ocfs2_write_remove_suid(inode);
2311 			if (ret < 0) {
2312 				mlog_errno(ret);
2313 				goto out_unlock;
2314 			}
2315 		}
2316 
2317 		end = pos + count;
2318 
2319 		ret = ocfs2_check_range_for_refcount(inode, pos, count);
2320 		if (ret == 1) {
2321 			ocfs2_inode_unlock_for_extent_tree(inode,
2322 							   &di_bh,
2323 							   meta_level,
2324 							   write_sem);
2325 			meta_level = 1;
2326 			write_sem = 1;
2327 			ret = ocfs2_inode_lock_for_extent_tree(inode,
2328 							       &di_bh,
2329 							       meta_level,
2330 							       write_sem,
2331 							       wait);
2332 			if (ret < 0) {
2333 				if (ret != -EAGAIN)
2334 					mlog_errno(ret);
2335 				goto out;
2336 			}
2337 
2338 			cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2339 			clusters =
2340 				ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2341 			ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
2342 		}
2343 
2344 		if (ret < 0) {
2345 			if (ret != -EAGAIN)
2346 				mlog_errno(ret);
2347 			goto out_unlock;
2348 		}
2349 
2350 		break;
2351 	}
2352 
2353 out_unlock:
2354 	trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
2355 					    pos, count, wait);
2356 
2357 	ocfs2_inode_unlock_for_extent_tree(inode,
2358 					   &di_bh,
2359 					   meta_level,
2360 					   write_sem);
2361 
2362 out:
2363 	return ret;
2364 }
2365 
ocfs2_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2366 static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
2367 				    struct iov_iter *from)
2368 {
2369 	int rw_level;
2370 	ssize_t written = 0;
2371 	ssize_t ret;
2372 	size_t count = iov_iter_count(from);
2373 	struct file *file = iocb->ki_filp;
2374 	struct inode *inode = file_inode(file);
2375 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2376 	int full_coherency = !(osb->s_mount_opt &
2377 			       OCFS2_MOUNT_COHERENCY_BUFFERED);
2378 	void *saved_ki_complete = NULL;
2379 	int append_write = ((iocb->ki_pos + count) >=
2380 			i_size_read(inode) ? 1 : 0);
2381 	int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2382 	int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2383 
2384 	trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
2385 		(unsigned long long)OCFS2_I(inode)->ip_blkno,
2386 		file->f_path.dentry->d_name.len,
2387 		file->f_path.dentry->d_name.name,
2388 		(unsigned int)from->nr_segs);	/* GRRRRR */
2389 
2390 	if (!direct_io && nowait)
2391 		return -EOPNOTSUPP;
2392 
2393 	if (count == 0)
2394 		return 0;
2395 
2396 	if (nowait) {
2397 		if (!inode_trylock(inode))
2398 			return -EAGAIN;
2399 	} else
2400 		inode_lock(inode);
2401 
2402 	/*
2403 	 * Concurrent O_DIRECT writes are allowed with
2404 	 * mount_option "coherency=buffered".
2405 	 * For append write, we must take rw EX.
2406 	 */
2407 	rw_level = (!direct_io || full_coherency || append_write);
2408 
2409 	if (nowait)
2410 		ret = ocfs2_try_rw_lock(inode, rw_level);
2411 	else
2412 		ret = ocfs2_rw_lock(inode, rw_level);
2413 	if (ret < 0) {
2414 		if (ret != -EAGAIN)
2415 			mlog_errno(ret);
2416 		goto out_mutex;
2417 	}
2418 
2419 	/*
2420 	 * O_DIRECT writes with "coherency=full" need to take EX cluster
2421 	 * inode_lock to guarantee coherency.
2422 	 */
2423 	if (direct_io && full_coherency) {
2424 		/*
2425 		 * We need to take and drop the inode lock to force
2426 		 * other nodes to drop their caches.  Buffered I/O
2427 		 * already does this in write_begin().
2428 		 */
2429 		if (nowait)
2430 			ret = ocfs2_try_inode_lock(inode, NULL, 1);
2431 		else
2432 			ret = ocfs2_inode_lock(inode, NULL, 1);
2433 		if (ret < 0) {
2434 			if (ret != -EAGAIN)
2435 				mlog_errno(ret);
2436 			goto out;
2437 		}
2438 
2439 		ocfs2_inode_unlock(inode, 1);
2440 	}
2441 
2442 	ret = generic_write_checks(iocb, from);
2443 	if (ret <= 0) {
2444 		if (ret)
2445 			mlog_errno(ret);
2446 		goto out;
2447 	}
2448 	count = ret;
2449 
2450 	ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, !nowait);
2451 	if (ret < 0) {
2452 		if (ret != -EAGAIN)
2453 			mlog_errno(ret);
2454 		goto out;
2455 	}
2456 
2457 	if (direct_io && !is_sync_kiocb(iocb) &&
2458 	    ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) {
2459 		/*
2460 		 * Make it a sync io if it's an unaligned aio.
2461 		 */
2462 		saved_ki_complete = xchg(&iocb->ki_complete, NULL);
2463 	}
2464 
2465 	/* communicate with ocfs2_dio_end_io */
2466 	ocfs2_iocb_set_rw_locked(iocb, rw_level);
2467 
2468 	written = __generic_file_write_iter(iocb, from);
2469 	/* buffered aio wouldn't have proper lock coverage today */
2470 	BUG_ON(written == -EIOCBQUEUED && !direct_io);
2471 
2472 	/*
2473 	 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2474 	 * function pointer which is called when o_direct io completes so that
2475 	 * it can unlock our rw lock.
2476 	 * Unfortunately there are error cases which call end_io and others
2477 	 * that don't.  so we don't have to unlock the rw_lock if either an
2478 	 * async dio is going to do it in the future or an end_io after an
2479 	 * error has already done it.
2480 	 */
2481 	if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2482 		rw_level = -1;
2483 	}
2484 
2485 	if (unlikely(written <= 0))
2486 		goto out;
2487 
2488 	if (((file->f_flags & O_DSYNC) && !direct_io) ||
2489 	    IS_SYNC(inode)) {
2490 		ret = filemap_fdatawrite_range(file->f_mapping,
2491 					       iocb->ki_pos - written,
2492 					       iocb->ki_pos - 1);
2493 		if (ret < 0)
2494 			written = ret;
2495 
2496 		if (!ret) {
2497 			ret = jbd2_journal_force_commit(osb->journal->j_journal);
2498 			if (ret < 0)
2499 				written = ret;
2500 		}
2501 
2502 		if (!ret)
2503 			ret = filemap_fdatawait_range(file->f_mapping,
2504 						      iocb->ki_pos - written,
2505 						      iocb->ki_pos - 1);
2506 	}
2507 
2508 out:
2509 	if (saved_ki_complete)
2510 		xchg(&iocb->ki_complete, saved_ki_complete);
2511 
2512 	if (rw_level != -1)
2513 		ocfs2_rw_unlock(inode, rw_level);
2514 
2515 out_mutex:
2516 	inode_unlock(inode);
2517 
2518 	if (written)
2519 		ret = written;
2520 	return ret;
2521 }
2522 
ocfs2_file_read_iter(struct kiocb * iocb,struct iov_iter * to)2523 static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
2524 				   struct iov_iter *to)
2525 {
2526 	int ret = 0, rw_level = -1, lock_level = 0;
2527 	struct file *filp = iocb->ki_filp;
2528 	struct inode *inode = file_inode(filp);
2529 	int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2530 	int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2531 
2532 	trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
2533 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
2534 			filp->f_path.dentry->d_name.len,
2535 			filp->f_path.dentry->d_name.name,
2536 			to->nr_segs);	/* GRRRRR */
2537 
2538 
2539 	if (!inode) {
2540 		ret = -EINVAL;
2541 		mlog_errno(ret);
2542 		goto bail;
2543 	}
2544 
2545 	if (!direct_io && nowait)
2546 		return -EOPNOTSUPP;
2547 
2548 	/*
2549 	 * buffered reads protect themselves in ->readpage().  O_DIRECT reads
2550 	 * need locks to protect pending reads from racing with truncate.
2551 	 */
2552 	if (direct_io) {
2553 		if (nowait)
2554 			ret = ocfs2_try_rw_lock(inode, 0);
2555 		else
2556 			ret = ocfs2_rw_lock(inode, 0);
2557 
2558 		if (ret < 0) {
2559 			if (ret != -EAGAIN)
2560 				mlog_errno(ret);
2561 			goto bail;
2562 		}
2563 		rw_level = 0;
2564 		/* communicate with ocfs2_dio_end_io */
2565 		ocfs2_iocb_set_rw_locked(iocb, rw_level);
2566 	}
2567 
2568 	/*
2569 	 * We're fine letting folks race truncates and extending
2570 	 * writes with read across the cluster, just like they can
2571 	 * locally. Hence no rw_lock during read.
2572 	 *
2573 	 * Take and drop the meta data lock to update inode fields
2574 	 * like i_size. This allows the checks down below
2575 	 * generic_file_read_iter() a chance of actually working.
2576 	 */
2577 	ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level,
2578 				     !nowait);
2579 	if (ret < 0) {
2580 		if (ret != -EAGAIN)
2581 			mlog_errno(ret);
2582 		goto bail;
2583 	}
2584 	ocfs2_inode_unlock(inode, lock_level);
2585 
2586 	ret = generic_file_read_iter(iocb, to);
2587 	trace_generic_file_read_iter_ret(ret);
2588 
2589 	/* buffered aio wouldn't have proper lock coverage today */
2590 	BUG_ON(ret == -EIOCBQUEUED && !direct_io);
2591 
2592 	/* see ocfs2_file_write_iter */
2593 	if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2594 		rw_level = -1;
2595 	}
2596 
2597 bail:
2598 	if (rw_level != -1)
2599 		ocfs2_rw_unlock(inode, rw_level);
2600 
2601 	return ret;
2602 }
2603 
2604 /* Refer generic_file_llseek_unlocked() */
ocfs2_file_llseek(struct file * file,loff_t offset,int whence)2605 static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence)
2606 {
2607 	struct inode *inode = file->f_mapping->host;
2608 	int ret = 0;
2609 
2610 	inode_lock(inode);
2611 
2612 	switch (whence) {
2613 	case SEEK_SET:
2614 		break;
2615 	case SEEK_END:
2616 		/* SEEK_END requires the OCFS2 inode lock for the file
2617 		 * because it references the file's size.
2618 		 */
2619 		ret = ocfs2_inode_lock(inode, NULL, 0);
2620 		if (ret < 0) {
2621 			mlog_errno(ret);
2622 			goto out;
2623 		}
2624 		offset += i_size_read(inode);
2625 		ocfs2_inode_unlock(inode, 0);
2626 		break;
2627 	case SEEK_CUR:
2628 		if (offset == 0) {
2629 			offset = file->f_pos;
2630 			goto out;
2631 		}
2632 		offset += file->f_pos;
2633 		break;
2634 	case SEEK_DATA:
2635 	case SEEK_HOLE:
2636 		ret = ocfs2_seek_data_hole_offset(file, &offset, whence);
2637 		if (ret)
2638 			goto out;
2639 		break;
2640 	default:
2641 		ret = -EINVAL;
2642 		goto out;
2643 	}
2644 
2645 	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2646 
2647 out:
2648 	inode_unlock(inode);
2649 	if (ret)
2650 		return ret;
2651 	return offset;
2652 }
2653 
ocfs2_file_clone_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,u64 len)2654 static int ocfs2_file_clone_range(struct file *file_in,
2655 				  loff_t pos_in,
2656 				  struct file *file_out,
2657 				  loff_t pos_out,
2658 				  u64 len)
2659 {
2660 	return ocfs2_reflink_remap_range(file_in, pos_in, file_out, pos_out,
2661 					 len, false);
2662 }
2663 
ocfs2_file_dedupe_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,u64 len)2664 static int ocfs2_file_dedupe_range(struct file *file_in,
2665 				   loff_t pos_in,
2666 				   struct file *file_out,
2667 				   loff_t pos_out,
2668 				   u64 len)
2669 {
2670 	return ocfs2_reflink_remap_range(file_in, pos_in, file_out, pos_out,
2671 					  len, true);
2672 }
2673 
2674 const struct inode_operations ocfs2_file_iops = {
2675 	.setattr	= ocfs2_setattr,
2676 	.getattr	= ocfs2_getattr,
2677 	.permission	= ocfs2_permission,
2678 	.listxattr	= ocfs2_listxattr,
2679 	.fiemap		= ocfs2_fiemap,
2680 	.get_acl	= ocfs2_iop_get_acl,
2681 	.set_acl	= ocfs2_iop_set_acl,
2682 };
2683 
2684 const struct inode_operations ocfs2_special_file_iops = {
2685 	.setattr	= ocfs2_setattr,
2686 	.getattr	= ocfs2_getattr,
2687 	.permission	= ocfs2_permission,
2688 	.get_acl	= ocfs2_iop_get_acl,
2689 	.set_acl	= ocfs2_iop_set_acl,
2690 };
2691 
2692 /*
2693  * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2694  * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2695  */
2696 const struct file_operations ocfs2_fops = {
2697 	.llseek		= ocfs2_file_llseek,
2698 	.mmap		= ocfs2_mmap,
2699 	.fsync		= ocfs2_sync_file,
2700 	.release	= ocfs2_file_release,
2701 	.open		= ocfs2_file_open,
2702 	.read_iter	= ocfs2_file_read_iter,
2703 	.write_iter	= ocfs2_file_write_iter,
2704 	.unlocked_ioctl	= ocfs2_ioctl,
2705 #ifdef CONFIG_COMPAT
2706 	.compat_ioctl   = ocfs2_compat_ioctl,
2707 #endif
2708 	.lock		= ocfs2_lock,
2709 	.flock		= ocfs2_flock,
2710 	.splice_read	= generic_file_splice_read,
2711 	.splice_write	= iter_file_splice_write,
2712 	.fallocate	= ocfs2_fallocate,
2713 	.clone_file_range = ocfs2_file_clone_range,
2714 	.dedupe_file_range = ocfs2_file_dedupe_range,
2715 };
2716 
2717 const struct file_operations ocfs2_dops = {
2718 	.llseek		= generic_file_llseek,
2719 	.read		= generic_read_dir,
2720 	.iterate	= ocfs2_readdir,
2721 	.fsync		= ocfs2_sync_file,
2722 	.release	= ocfs2_dir_release,
2723 	.open		= ocfs2_dir_open,
2724 	.unlocked_ioctl	= ocfs2_ioctl,
2725 #ifdef CONFIG_COMPAT
2726 	.compat_ioctl   = ocfs2_compat_ioctl,
2727 #endif
2728 	.lock		= ocfs2_lock,
2729 	.flock		= ocfs2_flock,
2730 };
2731 
2732 /*
2733  * POSIX-lockless variants of our file_operations.
2734  *
2735  * These will be used if the underlying cluster stack does not support
2736  * posix file locking, if the user passes the "localflocks" mount
2737  * option, or if we have a local-only fs.
2738  *
2739  * ocfs2_flock is in here because all stacks handle UNIX file locks,
2740  * so we still want it in the case of no stack support for
2741  * plocks. Internally, it will do the right thing when asked to ignore
2742  * the cluster.
2743  */
2744 const struct file_operations ocfs2_fops_no_plocks = {
2745 	.llseek		= ocfs2_file_llseek,
2746 	.mmap		= ocfs2_mmap,
2747 	.fsync		= ocfs2_sync_file,
2748 	.release	= ocfs2_file_release,
2749 	.open		= ocfs2_file_open,
2750 	.read_iter	= ocfs2_file_read_iter,
2751 	.write_iter	= ocfs2_file_write_iter,
2752 	.unlocked_ioctl	= ocfs2_ioctl,
2753 #ifdef CONFIG_COMPAT
2754 	.compat_ioctl   = ocfs2_compat_ioctl,
2755 #endif
2756 	.flock		= ocfs2_flock,
2757 	.splice_read	= generic_file_splice_read,
2758 	.splice_write	= iter_file_splice_write,
2759 	.fallocate	= ocfs2_fallocate,
2760 	.clone_file_range = ocfs2_file_clone_range,
2761 	.dedupe_file_range = ocfs2_file_dedupe_range,
2762 };
2763 
2764 const struct file_operations ocfs2_dops_no_plocks = {
2765 	.llseek		= generic_file_llseek,
2766 	.read		= generic_read_dir,
2767 	.iterate	= ocfs2_readdir,
2768 	.fsync		= ocfs2_sync_file,
2769 	.release	= ocfs2_dir_release,
2770 	.open		= ocfs2_dir_open,
2771 	.unlocked_ioctl	= ocfs2_ioctl,
2772 #ifdef CONFIG_COMPAT
2773 	.compat_ioctl   = ocfs2_compat_ioctl,
2774 #endif
2775 	.flock		= ocfs2_flock,
2776 };
2777