1 /*
2  *   Copyright (C) International Business Machines Corp., 2000-2005
3  *   Portions Copyright (C) Christoph Hellwig, 2001-2002
4  *
5  *   This program is free software;  you can redistribute it and/or modify
6  *   it under the terms of the GNU General Public License as published by
7  *   the Free Software Foundation; either version 2 of the License, or
8  *   (at your option) any later version.
9  *
10  *   This program is distributed in the hope that it will be useful,
11  *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
13  *   the GNU General Public License for more details.
14  *
15  *   You should have received a copy of the GNU General Public License
16  *   along with this program;  if not, write to the Free Software
17  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18  */
19 
20 /*
21  *	jfs_txnmgr.c: transaction manager
22  *
23  * notes:
24  * transaction starts with txBegin() and ends with txCommit()
25  * or txAbort().
26  *
27  * tlock is acquired at the time of update;
28  * (obviate scan at commit time for xtree and dtree)
29  * tlock and mp points to each other;
30  * (no hashlist for mp -> tlock).
31  *
32  * special cases:
33  * tlock on in-memory inode:
34  * in-place tlock in the in-memory inode itself;
35  * converted to page lock by iWrite() at commit time.
36  *
37  * tlock during write()/mmap() under anonymous transaction (tid = 0):
38  * transferred (?) to transaction at commit time.
39  *
40  * use the page itself to update allocation maps
41  * (obviate intermediate replication of allocation/deallocation data)
42  * hold on to mp+lock thru update of maps
43  */
44 
45 #include <linux/fs.h>
46 #include <linux/vmalloc.h>
47 #include <linux/completion.h>
48 #include <linux/freezer.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/kthread.h>
52 #include <linux/seq_file.h>
53 #include "jfs_incore.h"
54 #include "jfs_inode.h"
55 #include "jfs_filsys.h"
56 #include "jfs_metapage.h"
57 #include "jfs_dinode.h"
58 #include "jfs_imap.h"
59 #include "jfs_dmap.h"
60 #include "jfs_superblock.h"
61 #include "jfs_debug.h"
62 
63 /*
64  *	transaction management structures
65  */
66 static struct {
67 	int freetid;		/* index of a free tid structure */
68 	int freelock;		/* index first free lock word */
69 	wait_queue_head_t freewait;	/* eventlist of free tblock */
70 	wait_queue_head_t freelockwait;	/* eventlist of free tlock */
71 	wait_queue_head_t lowlockwait;	/* eventlist of ample tlocks */
72 	int tlocksInUse;	/* Number of tlocks in use */
73 	spinlock_t LazyLock;	/* synchronize sync_queue & unlock_queue */
74 /*	struct tblock *sync_queue; * Transactions waiting for data sync */
75 	struct list_head unlock_queue;	/* Txns waiting to be released */
76 	struct list_head anon_list;	/* inodes having anonymous txns */
77 	struct list_head anon_list2;	/* inodes having anonymous txns
78 					   that couldn't be sync'ed */
79 } TxAnchor;
80 
81 int jfs_tlocks_low;		/* Indicates low number of available tlocks */
82 
83 #ifdef CONFIG_JFS_STATISTICS
84 static struct {
85 	uint txBegin;
86 	uint txBegin_barrier;
87 	uint txBegin_lockslow;
88 	uint txBegin_freetid;
89 	uint txBeginAnon;
90 	uint txBeginAnon_barrier;
91 	uint txBeginAnon_lockslow;
92 	uint txLockAlloc;
93 	uint txLockAlloc_freelock;
94 } TxStat;
95 #endif
96 
97 static int nTxBlock = -1;	/* number of transaction blocks */
98 module_param(nTxBlock, int, 0);
99 MODULE_PARM_DESC(nTxBlock,
100 		 "Number of transaction blocks (max:65536)");
101 
102 static int nTxLock = -1;	/* number of transaction locks */
103 module_param(nTxLock, int, 0);
104 MODULE_PARM_DESC(nTxLock,
105 		 "Number of transaction locks (max:65536)");
106 
107 struct tblock *TxBlock;	/* transaction block table */
108 static int TxLockLWM;	/* Low water mark for number of txLocks used */
109 static int TxLockHWM;	/* High water mark for number of txLocks used */
110 static int TxLockVHWM;	/* Very High water mark */
111 struct tlock *TxLock;	/* transaction lock table */
112 
113 /*
114  *	transaction management lock
115  */
116 static DEFINE_SPINLOCK(jfsTxnLock);
117 
118 #define TXN_LOCK()		spin_lock(&jfsTxnLock)
119 #define TXN_UNLOCK()		spin_unlock(&jfsTxnLock)
120 
121 #define LAZY_LOCK_INIT()	spin_lock_init(&TxAnchor.LazyLock);
122 #define LAZY_LOCK(flags)	spin_lock_irqsave(&TxAnchor.LazyLock, flags)
123 #define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags)
124 
125 static DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait);
126 static int jfs_commit_thread_waking;
127 
128 /*
129  * Retry logic exist outside these macros to protect from spurrious wakeups.
130  */
TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)131 static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
132 {
133 	DECLARE_WAITQUEUE(wait, current);
134 
135 	add_wait_queue(event, &wait);
136 	set_current_state(TASK_UNINTERRUPTIBLE);
137 	TXN_UNLOCK();
138 	io_schedule();
139 	remove_wait_queue(event, &wait);
140 }
141 
142 #define TXN_SLEEP(event)\
143 {\
144 	TXN_SLEEP_DROP_LOCK(event);\
145 	TXN_LOCK();\
146 }
147 
148 #define TXN_WAKEUP(event) wake_up_all(event)
149 
150 /*
151  *	statistics
152  */
153 static struct {
154 	tid_t maxtid;		/* 4: biggest tid ever used */
155 	lid_t maxlid;		/* 4: biggest lid ever used */
156 	int ntid;		/* 4: # of transactions performed */
157 	int nlid;		/* 4: # of tlocks acquired */
158 	int waitlock;		/* 4: # of tlock wait */
159 } stattx;
160 
161 /*
162  * forward references
163  */
164 static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
165 		struct tlock * tlck, struct commit * cd);
166 static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
167 		struct tlock * tlck);
168 static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
169 		struct tlock * tlck);
170 static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
171 		struct tlock * tlck);
172 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
173 		struct tblock * tblk);
174 static void txForce(struct tblock * tblk);
175 static int txLog(struct jfs_log * log, struct tblock * tblk,
176 		struct commit * cd);
177 static void txUpdateMap(struct tblock * tblk);
178 static void txRelease(struct tblock * tblk);
179 static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
180 	   struct tlock * tlck);
181 static void LogSyncRelease(struct metapage * mp);
182 
183 /*
184  *		transaction block/lock management
185  *		---------------------------------
186  */
187 
188 /*
189  * Get a transaction lock from the free list.  If the number in use is
190  * greater than the high water mark, wake up the sync daemon.  This should
191  * free some anonymous transaction locks.  (TXN_LOCK must be held.)
192  */
txLockAlloc(void)193 static lid_t txLockAlloc(void)
194 {
195 	lid_t lid;
196 
197 	INCREMENT(TxStat.txLockAlloc);
198 	if (!TxAnchor.freelock) {
199 		INCREMENT(TxStat.txLockAlloc_freelock);
200 	}
201 
202 	while (!(lid = TxAnchor.freelock))
203 		TXN_SLEEP(&TxAnchor.freelockwait);
204 	TxAnchor.freelock = TxLock[lid].next;
205 	HIGHWATERMARK(stattx.maxlid, lid);
206 	if ((++TxAnchor.tlocksInUse > TxLockHWM) && (jfs_tlocks_low == 0)) {
207 		jfs_info("txLockAlloc tlocks low");
208 		jfs_tlocks_low = 1;
209 		wake_up_process(jfsSyncThread);
210 	}
211 
212 	return lid;
213 }
214 
txLockFree(lid_t lid)215 static void txLockFree(lid_t lid)
216 {
217 	TxLock[lid].tid = 0;
218 	TxLock[lid].next = TxAnchor.freelock;
219 	TxAnchor.freelock = lid;
220 	TxAnchor.tlocksInUse--;
221 	if (jfs_tlocks_low && (TxAnchor.tlocksInUse < TxLockLWM)) {
222 		jfs_info("txLockFree jfs_tlocks_low no more");
223 		jfs_tlocks_low = 0;
224 		TXN_WAKEUP(&TxAnchor.lowlockwait);
225 	}
226 	TXN_WAKEUP(&TxAnchor.freelockwait);
227 }
228 
229 /*
230  * NAME:	txInit()
231  *
232  * FUNCTION:	initialize transaction management structures
233  *
234  * RETURN:
235  *
236  * serialization: single thread at jfs_init()
237  */
txInit(void)238 int txInit(void)
239 {
240 	int k, size;
241 	struct sysinfo si;
242 
243 	/* Set defaults for nTxLock and nTxBlock if unset */
244 
245 	if (nTxLock == -1) {
246 		if (nTxBlock == -1) {
247 			/* Base default on memory size */
248 			si_meminfo(&si);
249 			if (si.totalram > (256 * 1024)) /* 1 GB */
250 				nTxLock = 64 * 1024;
251 			else
252 				nTxLock = si.totalram >> 2;
253 		} else if (nTxBlock > (8 * 1024))
254 			nTxLock = 64 * 1024;
255 		else
256 			nTxLock = nTxBlock << 3;
257 	}
258 	if (nTxBlock == -1)
259 		nTxBlock = nTxLock >> 3;
260 
261 	/* Verify tunable parameters */
262 	if (nTxBlock < 16)
263 		nTxBlock = 16;	/* No one should set it this low */
264 	if (nTxBlock > 65536)
265 		nTxBlock = 65536;
266 	if (nTxLock < 256)
267 		nTxLock = 256;	/* No one should set it this low */
268 	if (nTxLock > 65536)
269 		nTxLock = 65536;
270 
271 	printk(KERN_INFO "JFS: nTxBlock = %d, nTxLock = %d\n",
272 	       nTxBlock, nTxLock);
273 	/*
274 	 * initialize transaction block (tblock) table
275 	 *
276 	 * transaction id (tid) = tblock index
277 	 * tid = 0 is reserved.
278 	 */
279 	TxLockLWM = (nTxLock * 4) / 10;
280 	TxLockHWM = (nTxLock * 7) / 10;
281 	TxLockVHWM = (nTxLock * 8) / 10;
282 
283 	size = sizeof(struct tblock) * nTxBlock;
284 	TxBlock = vmalloc(size);
285 	if (TxBlock == NULL)
286 		return -ENOMEM;
287 
288 	for (k = 1; k < nTxBlock - 1; k++) {
289 		TxBlock[k].next = k + 1;
290 		init_waitqueue_head(&TxBlock[k].gcwait);
291 		init_waitqueue_head(&TxBlock[k].waitor);
292 	}
293 	TxBlock[k].next = 0;
294 	init_waitqueue_head(&TxBlock[k].gcwait);
295 	init_waitqueue_head(&TxBlock[k].waitor);
296 
297 	TxAnchor.freetid = 1;
298 	init_waitqueue_head(&TxAnchor.freewait);
299 
300 	stattx.maxtid = 1;	/* statistics */
301 
302 	/*
303 	 * initialize transaction lock (tlock) table
304 	 *
305 	 * transaction lock id = tlock index
306 	 * tlock id = 0 is reserved.
307 	 */
308 	size = sizeof(struct tlock) * nTxLock;
309 	TxLock = vmalloc(size);
310 	if (TxLock == NULL) {
311 		vfree(TxBlock);
312 		return -ENOMEM;
313 	}
314 
315 	/* initialize tlock table */
316 	for (k = 1; k < nTxLock - 1; k++)
317 		TxLock[k].next = k + 1;
318 	TxLock[k].next = 0;
319 	init_waitqueue_head(&TxAnchor.freelockwait);
320 	init_waitqueue_head(&TxAnchor.lowlockwait);
321 
322 	TxAnchor.freelock = 1;
323 	TxAnchor.tlocksInUse = 0;
324 	INIT_LIST_HEAD(&TxAnchor.anon_list);
325 	INIT_LIST_HEAD(&TxAnchor.anon_list2);
326 
327 	LAZY_LOCK_INIT();
328 	INIT_LIST_HEAD(&TxAnchor.unlock_queue);
329 
330 	stattx.maxlid = 1;	/* statistics */
331 
332 	return 0;
333 }
334 
335 /*
336  * NAME:	txExit()
337  *
338  * FUNCTION:	clean up when module is unloaded
339  */
txExit(void)340 void txExit(void)
341 {
342 	vfree(TxLock);
343 	TxLock = NULL;
344 	vfree(TxBlock);
345 	TxBlock = NULL;
346 }
347 
348 /*
349  * NAME:	txBegin()
350  *
351  * FUNCTION:	start a transaction.
352  *
353  * PARAMETER:	sb	- superblock
354  *		flag	- force for nested tx;
355  *
356  * RETURN:	tid	- transaction id
357  *
358  * note: flag force allows to start tx for nested tx
359  * to prevent deadlock on logsync barrier;
360  */
txBegin(struct super_block * sb,int flag)361 tid_t txBegin(struct super_block *sb, int flag)
362 {
363 	tid_t t;
364 	struct tblock *tblk;
365 	struct jfs_log *log;
366 
367 	jfs_info("txBegin: flag = 0x%x", flag);
368 	log = JFS_SBI(sb)->log;
369 
370 	if (!log) {
371 		jfs_error(sb, "read-only filesystem\n");
372 		return 0;
373 	}
374 
375 	TXN_LOCK();
376 
377 	INCREMENT(TxStat.txBegin);
378 
379       retry:
380 	if (!(flag & COMMIT_FORCE)) {
381 		/*
382 		 * synchronize with logsync barrier
383 		 */
384 		if (test_bit(log_SYNCBARRIER, &log->flag) ||
385 		    test_bit(log_QUIESCE, &log->flag)) {
386 			INCREMENT(TxStat.txBegin_barrier);
387 			TXN_SLEEP(&log->syncwait);
388 			goto retry;
389 		}
390 	}
391 	if (flag == 0) {
392 		/*
393 		 * Don't begin transaction if we're getting starved for tlocks
394 		 * unless COMMIT_FORCE or COMMIT_INODE (which may ultimately
395 		 * free tlocks)
396 		 */
397 		if (TxAnchor.tlocksInUse > TxLockVHWM) {
398 			INCREMENT(TxStat.txBegin_lockslow);
399 			TXN_SLEEP(&TxAnchor.lowlockwait);
400 			goto retry;
401 		}
402 	}
403 
404 	/*
405 	 * allocate transaction id/block
406 	 */
407 	if ((t = TxAnchor.freetid) == 0) {
408 		jfs_info("txBegin: waiting for free tid");
409 		INCREMENT(TxStat.txBegin_freetid);
410 		TXN_SLEEP(&TxAnchor.freewait);
411 		goto retry;
412 	}
413 
414 	tblk = tid_to_tblock(t);
415 
416 	if ((tblk->next == 0) && !(flag & COMMIT_FORCE)) {
417 		/* Don't let a non-forced transaction take the last tblk */
418 		jfs_info("txBegin: waiting for free tid");
419 		INCREMENT(TxStat.txBegin_freetid);
420 		TXN_SLEEP(&TxAnchor.freewait);
421 		goto retry;
422 	}
423 
424 	TxAnchor.freetid = tblk->next;
425 
426 	/*
427 	 * initialize transaction
428 	 */
429 
430 	/*
431 	 * We can't zero the whole thing or we screw up another thread being
432 	 * awakened after sleeping on tblk->waitor
433 	 *
434 	 * memset(tblk, 0, sizeof(struct tblock));
435 	 */
436 	tblk->next = tblk->last = tblk->xflag = tblk->flag = tblk->lsn = 0;
437 
438 	tblk->sb = sb;
439 	++log->logtid;
440 	tblk->logtid = log->logtid;
441 
442 	++log->active;
443 
444 	HIGHWATERMARK(stattx.maxtid, t);	/* statistics */
445 	INCREMENT(stattx.ntid);	/* statistics */
446 
447 	TXN_UNLOCK();
448 
449 	jfs_info("txBegin: returning tid = %d", t);
450 
451 	return t;
452 }
453 
454 /*
455  * NAME:	txBeginAnon()
456  *
457  * FUNCTION:	start an anonymous transaction.
458  *		Blocks if logsync or available tlocks are low to prevent
459  *		anonymous tlocks from depleting supply.
460  *
461  * PARAMETER:	sb	- superblock
462  *
463  * RETURN:	none
464  */
txBeginAnon(struct super_block * sb)465 void txBeginAnon(struct super_block *sb)
466 {
467 	struct jfs_log *log;
468 
469 	log = JFS_SBI(sb)->log;
470 
471 	TXN_LOCK();
472 	INCREMENT(TxStat.txBeginAnon);
473 
474       retry:
475 	/*
476 	 * synchronize with logsync barrier
477 	 */
478 	if (test_bit(log_SYNCBARRIER, &log->flag) ||
479 	    test_bit(log_QUIESCE, &log->flag)) {
480 		INCREMENT(TxStat.txBeginAnon_barrier);
481 		TXN_SLEEP(&log->syncwait);
482 		goto retry;
483 	}
484 
485 	/*
486 	 * Don't begin transaction if we're getting starved for tlocks
487 	 */
488 	if (TxAnchor.tlocksInUse > TxLockVHWM) {
489 		INCREMENT(TxStat.txBeginAnon_lockslow);
490 		TXN_SLEEP(&TxAnchor.lowlockwait);
491 		goto retry;
492 	}
493 	TXN_UNLOCK();
494 }
495 
496 /*
497  *	txEnd()
498  *
499  * function: free specified transaction block.
500  *
501  *	logsync barrier processing:
502  *
503  * serialization:
504  */
txEnd(tid_t tid)505 void txEnd(tid_t tid)
506 {
507 	struct tblock *tblk = tid_to_tblock(tid);
508 	struct jfs_log *log;
509 
510 	jfs_info("txEnd: tid = %d", tid);
511 	TXN_LOCK();
512 
513 	/*
514 	 * wakeup transactions waiting on the page locked
515 	 * by the current transaction
516 	 */
517 	TXN_WAKEUP(&tblk->waitor);
518 
519 	log = JFS_SBI(tblk->sb)->log;
520 
521 	/*
522 	 * Lazy commit thread can't free this guy until we mark it UNLOCKED,
523 	 * otherwise, we would be left with a transaction that may have been
524 	 * reused.
525 	 *
526 	 * Lazy commit thread will turn off tblkGC_LAZY before calling this
527 	 * routine.
528 	 */
529 	if (tblk->flag & tblkGC_LAZY) {
530 		jfs_info("txEnd called w/lazy tid: %d, tblk = 0x%p", tid, tblk);
531 		TXN_UNLOCK();
532 
533 		spin_lock_irq(&log->gclock);	// LOGGC_LOCK
534 		tblk->flag |= tblkGC_UNLOCKED;
535 		spin_unlock_irq(&log->gclock);	// LOGGC_UNLOCK
536 		return;
537 	}
538 
539 	jfs_info("txEnd: tid: %d, tblk = 0x%p", tid, tblk);
540 
541 	assert(tblk->next == 0);
542 
543 	/*
544 	 * insert tblock back on freelist
545 	 */
546 	tblk->next = TxAnchor.freetid;
547 	TxAnchor.freetid = tid;
548 
549 	/*
550 	 * mark the tblock not active
551 	 */
552 	if (--log->active == 0) {
553 		clear_bit(log_FLUSH, &log->flag);
554 
555 		/*
556 		 * synchronize with logsync barrier
557 		 */
558 		if (test_bit(log_SYNCBARRIER, &log->flag)) {
559 			TXN_UNLOCK();
560 
561 			/* write dirty metadata & forward log syncpt */
562 			jfs_syncpt(log, 1);
563 
564 			jfs_info("log barrier off: 0x%x", log->lsn);
565 
566 			/* enable new transactions start */
567 			clear_bit(log_SYNCBARRIER, &log->flag);
568 
569 			/* wakeup all waitors for logsync barrier */
570 			TXN_WAKEUP(&log->syncwait);
571 
572 			goto wakeup;
573 		}
574 	}
575 
576 	TXN_UNLOCK();
577 wakeup:
578 	/*
579 	 * wakeup all waitors for a free tblock
580 	 */
581 	TXN_WAKEUP(&TxAnchor.freewait);
582 }
583 
584 /*
585  *	txLock()
586  *
587  * function: acquire a transaction lock on the specified <mp>
588  *
589  * parameter:
590  *
591  * return:	transaction lock id
592  *
593  * serialization:
594  */
txLock(tid_t tid,struct inode * ip,struct metapage * mp,int type)595 struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
596 		     int type)
597 {
598 	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
599 	int dir_xtree = 0;
600 	lid_t lid;
601 	tid_t xtid;
602 	struct tlock *tlck;
603 	struct xtlock *xtlck;
604 	struct linelock *linelock;
605 	xtpage_t *p;
606 	struct tblock *tblk;
607 
608 	TXN_LOCK();
609 
610 	if (S_ISDIR(ip->i_mode) && (type & tlckXTREE) &&
611 	    !(mp->xflag & COMMIT_PAGE)) {
612 		/*
613 		 * Directory inode is special.  It can have both an xtree tlock
614 		 * and a dtree tlock associated with it.
615 		 */
616 		dir_xtree = 1;
617 		lid = jfs_ip->xtlid;
618 	} else
619 		lid = mp->lid;
620 
621 	/* is page not locked by a transaction ? */
622 	if (lid == 0)
623 		goto allocateLock;
624 
625 	jfs_info("txLock: tid:%d ip:0x%p mp:0x%p lid:%d", tid, ip, mp, lid);
626 
627 	/* is page locked by the requester transaction ? */
628 	tlck = lid_to_tlock(lid);
629 	if ((xtid = tlck->tid) == tid) {
630 		TXN_UNLOCK();
631 		goto grantLock;
632 	}
633 
634 	/*
635 	 * is page locked by anonymous transaction/lock ?
636 	 *
637 	 * (page update without transaction (i.e., file write) is
638 	 * locked under anonymous transaction tid = 0:
639 	 * anonymous tlocks maintained on anonymous tlock list of
640 	 * the inode of the page and available to all anonymous
641 	 * transactions until txCommit() time at which point
642 	 * they are transferred to the transaction tlock list of
643 	 * the committing transaction of the inode)
644 	 */
645 	if (xtid == 0) {
646 		tlck->tid = tid;
647 		TXN_UNLOCK();
648 		tblk = tid_to_tblock(tid);
649 		/*
650 		 * The order of the tlocks in the transaction is important
651 		 * (during truncate, child xtree pages must be freed before
652 		 * parent's tlocks change the working map).
653 		 * Take tlock off anonymous list and add to tail of
654 		 * transaction list
655 		 *
656 		 * Note:  We really need to get rid of the tid & lid and
657 		 * use list_head's.  This code is getting UGLY!
658 		 */
659 		if (jfs_ip->atlhead == lid) {
660 			if (jfs_ip->atltail == lid) {
661 				/* only anonymous txn.
662 				 * Remove from anon_list
663 				 */
664 				TXN_LOCK();
665 				list_del_init(&jfs_ip->anon_inode_list);
666 				TXN_UNLOCK();
667 			}
668 			jfs_ip->atlhead = tlck->next;
669 		} else {
670 			lid_t last;
671 			for (last = jfs_ip->atlhead;
672 			     lid_to_tlock(last)->next != lid;
673 			     last = lid_to_tlock(last)->next) {
674 				assert(last);
675 			}
676 			lid_to_tlock(last)->next = tlck->next;
677 			if (jfs_ip->atltail == lid)
678 				jfs_ip->atltail = last;
679 		}
680 
681 		/* insert the tlock at tail of transaction tlock list */
682 
683 		if (tblk->next)
684 			lid_to_tlock(tblk->last)->next = lid;
685 		else
686 			tblk->next = lid;
687 		tlck->next = 0;
688 		tblk->last = lid;
689 
690 		goto grantLock;
691 	}
692 
693 	goto waitLock;
694 
695 	/*
696 	 * allocate a tlock
697 	 */
698       allocateLock:
699 	lid = txLockAlloc();
700 	tlck = lid_to_tlock(lid);
701 
702 	/*
703 	 * initialize tlock
704 	 */
705 	tlck->tid = tid;
706 
707 	TXN_UNLOCK();
708 
709 	/* mark tlock for meta-data page */
710 	if (mp->xflag & COMMIT_PAGE) {
711 
712 		tlck->flag = tlckPAGELOCK;
713 
714 		/* mark the page dirty and nohomeok */
715 		metapage_nohomeok(mp);
716 
717 		jfs_info("locking mp = 0x%p, nohomeok = %d tid = %d tlck = 0x%p",
718 			 mp, mp->nohomeok, tid, tlck);
719 
720 		/* if anonymous transaction, and buffer is on the group
721 		 * commit synclist, mark inode to show this.  This will
722 		 * prevent the buffer from being marked nohomeok for too
723 		 * long a time.
724 		 */
725 		if ((tid == 0) && mp->lsn)
726 			set_cflag(COMMIT_Synclist, ip);
727 	}
728 	/* mark tlock for in-memory inode */
729 	else
730 		tlck->flag = tlckINODELOCK;
731 
732 	if (S_ISDIR(ip->i_mode))
733 		tlck->flag |= tlckDIRECTORY;
734 
735 	tlck->type = 0;
736 
737 	/* bind the tlock and the page */
738 	tlck->ip = ip;
739 	tlck->mp = mp;
740 	if (dir_xtree)
741 		jfs_ip->xtlid = lid;
742 	else
743 		mp->lid = lid;
744 
745 	/*
746 	 * enqueue transaction lock to transaction/inode
747 	 */
748 	/* insert the tlock at tail of transaction tlock list */
749 	if (tid) {
750 		tblk = tid_to_tblock(tid);
751 		if (tblk->next)
752 			lid_to_tlock(tblk->last)->next = lid;
753 		else
754 			tblk->next = lid;
755 		tlck->next = 0;
756 		tblk->last = lid;
757 	}
758 	/* anonymous transaction:
759 	 * insert the tlock at head of inode anonymous tlock list
760 	 */
761 	else {
762 		tlck->next = jfs_ip->atlhead;
763 		jfs_ip->atlhead = lid;
764 		if (tlck->next == 0) {
765 			/* This inode's first anonymous transaction */
766 			jfs_ip->atltail = lid;
767 			TXN_LOCK();
768 			list_add_tail(&jfs_ip->anon_inode_list,
769 				      &TxAnchor.anon_list);
770 			TXN_UNLOCK();
771 		}
772 	}
773 
774 	/* initialize type dependent area for linelock */
775 	linelock = (struct linelock *) & tlck->lock;
776 	linelock->next = 0;
777 	linelock->flag = tlckLINELOCK;
778 	linelock->maxcnt = TLOCKSHORT;
779 	linelock->index = 0;
780 
781 	switch (type & tlckTYPE) {
782 	case tlckDTREE:
783 		linelock->l2linesize = L2DTSLOTSIZE;
784 		break;
785 
786 	case tlckXTREE:
787 		linelock->l2linesize = L2XTSLOTSIZE;
788 
789 		xtlck = (struct xtlock *) linelock;
790 		xtlck->header.offset = 0;
791 		xtlck->header.length = 2;
792 
793 		if (type & tlckNEW) {
794 			xtlck->lwm.offset = XTENTRYSTART;
795 		} else {
796 			if (mp->xflag & COMMIT_PAGE)
797 				p = (xtpage_t *) mp->data;
798 			else
799 				p = &jfs_ip->i_xtroot;
800 			xtlck->lwm.offset =
801 			    le16_to_cpu(p->header.nextindex);
802 		}
803 		xtlck->lwm.length = 0;	/* ! */
804 		xtlck->twm.offset = 0;
805 		xtlck->hwm.offset = 0;
806 
807 		xtlck->index = 2;
808 		break;
809 
810 	case tlckINODE:
811 		linelock->l2linesize = L2INODESLOTSIZE;
812 		break;
813 
814 	case tlckDATA:
815 		linelock->l2linesize = L2DATASLOTSIZE;
816 		break;
817 
818 	default:
819 		jfs_err("UFO tlock:0x%p", tlck);
820 	}
821 
822 	/*
823 	 * update tlock vector
824 	 */
825       grantLock:
826 	tlck->type |= type;
827 
828 	return tlck;
829 
830 	/*
831 	 * page is being locked by another transaction:
832 	 */
833       waitLock:
834 	/* Only locks on ipimap or ipaimap should reach here */
835 	/* assert(jfs_ip->fileset == AGGREGATE_I); */
836 	if (jfs_ip->fileset != AGGREGATE_I) {
837 		printk(KERN_ERR "txLock: trying to lock locked page!");
838 		print_hex_dump(KERN_ERR, "ip: ", DUMP_PREFIX_ADDRESS, 16, 4,
839 			       ip, sizeof(*ip), 0);
840 		print_hex_dump(KERN_ERR, "mp: ", DUMP_PREFIX_ADDRESS, 16, 4,
841 			       mp, sizeof(*mp), 0);
842 		print_hex_dump(KERN_ERR, "Locker's tblock: ",
843 			       DUMP_PREFIX_ADDRESS, 16, 4, tid_to_tblock(tid),
844 			       sizeof(struct tblock), 0);
845 		print_hex_dump(KERN_ERR, "Tlock: ", DUMP_PREFIX_ADDRESS, 16, 4,
846 			       tlck, sizeof(*tlck), 0);
847 		BUG();
848 	}
849 	INCREMENT(stattx.waitlock);	/* statistics */
850 	TXN_UNLOCK();
851 	release_metapage(mp);
852 	TXN_LOCK();
853 	xtid = tlck->tid;	/* reacquire after dropping TXN_LOCK */
854 
855 	jfs_info("txLock: in waitLock, tid = %d, xtid = %d, lid = %d",
856 		 tid, xtid, lid);
857 
858 	/* Recheck everything since dropping TXN_LOCK */
859 	if (xtid && (tlck->mp == mp) && (mp->lid == lid))
860 		TXN_SLEEP_DROP_LOCK(&tid_to_tblock(xtid)->waitor);
861 	else
862 		TXN_UNLOCK();
863 	jfs_info("txLock: awakened     tid = %d, lid = %d", tid, lid);
864 
865 	return NULL;
866 }
867 
868 /*
869  * NAME:	txRelease()
870  *
871  * FUNCTION:	Release buffers associated with transaction locks, but don't
872  *		mark homeok yet.  The allows other transactions to modify
873  *		buffers, but won't let them go to disk until commit record
874  *		actually gets written.
875  *
876  * PARAMETER:
877  *		tblk	-
878  *
879  * RETURN:	Errors from subroutines.
880  */
txRelease(struct tblock * tblk)881 static void txRelease(struct tblock * tblk)
882 {
883 	struct metapage *mp;
884 	lid_t lid;
885 	struct tlock *tlck;
886 
887 	TXN_LOCK();
888 
889 	for (lid = tblk->next; lid; lid = tlck->next) {
890 		tlck = lid_to_tlock(lid);
891 		if ((mp = tlck->mp) != NULL &&
892 		    (tlck->type & tlckBTROOT) == 0) {
893 			assert(mp->xflag & COMMIT_PAGE);
894 			mp->lid = 0;
895 		}
896 	}
897 
898 	/*
899 	 * wakeup transactions waiting on a page locked
900 	 * by the current transaction
901 	 */
902 	TXN_WAKEUP(&tblk->waitor);
903 
904 	TXN_UNLOCK();
905 }
906 
907 /*
908  * NAME:	txUnlock()
909  *
910  * FUNCTION:	Initiates pageout of pages modified by tid in journalled
911  *		objects and frees their lockwords.
912  */
txUnlock(struct tblock * tblk)913 static void txUnlock(struct tblock * tblk)
914 {
915 	struct tlock *tlck;
916 	struct linelock *linelock;
917 	lid_t lid, next, llid, k;
918 	struct metapage *mp;
919 	struct jfs_log *log;
920 	int difft, diffp;
921 	unsigned long flags;
922 
923 	jfs_info("txUnlock: tblk = 0x%p", tblk);
924 	log = JFS_SBI(tblk->sb)->log;
925 
926 	/*
927 	 * mark page under tlock homeok (its log has been written):
928 	 */
929 	for (lid = tblk->next; lid; lid = next) {
930 		tlck = lid_to_tlock(lid);
931 		next = tlck->next;
932 
933 		jfs_info("unlocking lid = %d, tlck = 0x%p", lid, tlck);
934 
935 		/* unbind page from tlock */
936 		if ((mp = tlck->mp) != NULL &&
937 		    (tlck->type & tlckBTROOT) == 0) {
938 			assert(mp->xflag & COMMIT_PAGE);
939 
940 			/* hold buffer
941 			 */
942 			hold_metapage(mp);
943 
944 			assert(mp->nohomeok > 0);
945 			_metapage_homeok(mp);
946 
947 			/* inherit younger/larger clsn */
948 			LOGSYNC_LOCK(log, flags);
949 			if (mp->clsn) {
950 				logdiff(difft, tblk->clsn, log);
951 				logdiff(diffp, mp->clsn, log);
952 				if (difft > diffp)
953 					mp->clsn = tblk->clsn;
954 			} else
955 				mp->clsn = tblk->clsn;
956 			LOGSYNC_UNLOCK(log, flags);
957 
958 			assert(!(tlck->flag & tlckFREEPAGE));
959 
960 			put_metapage(mp);
961 		}
962 
963 		/* insert tlock, and linelock(s) of the tlock if any,
964 		 * at head of freelist
965 		 */
966 		TXN_LOCK();
967 
968 		llid = ((struct linelock *) & tlck->lock)->next;
969 		while (llid) {
970 			linelock = (struct linelock *) lid_to_tlock(llid);
971 			k = linelock->next;
972 			txLockFree(llid);
973 			llid = k;
974 		}
975 		txLockFree(lid);
976 
977 		TXN_UNLOCK();
978 	}
979 	tblk->next = tblk->last = 0;
980 
981 	/*
982 	 * remove tblock from logsynclist
983 	 * (allocation map pages inherited lsn of tblk and
984 	 * has been inserted in logsync list at txUpdateMap())
985 	 */
986 	if (tblk->lsn) {
987 		LOGSYNC_LOCK(log, flags);
988 		log->count--;
989 		list_del(&tblk->synclist);
990 		LOGSYNC_UNLOCK(log, flags);
991 	}
992 }
993 
994 /*
995  *	txMaplock()
996  *
997  * function: allocate a transaction lock for freed page/entry;
998  *	for freed page, maplock is used as xtlock/dtlock type;
999  */
txMaplock(tid_t tid,struct inode * ip,int type)1000 struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
1001 {
1002 	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
1003 	lid_t lid;
1004 	struct tblock *tblk;
1005 	struct tlock *tlck;
1006 	struct maplock *maplock;
1007 
1008 	TXN_LOCK();
1009 
1010 	/*
1011 	 * allocate a tlock
1012 	 */
1013 	lid = txLockAlloc();
1014 	tlck = lid_to_tlock(lid);
1015 
1016 	/*
1017 	 * initialize tlock
1018 	 */
1019 	tlck->tid = tid;
1020 
1021 	/* bind the tlock and the object */
1022 	tlck->flag = tlckINODELOCK;
1023 	if (S_ISDIR(ip->i_mode))
1024 		tlck->flag |= tlckDIRECTORY;
1025 	tlck->ip = ip;
1026 	tlck->mp = NULL;
1027 
1028 	tlck->type = type;
1029 
1030 	/*
1031 	 * enqueue transaction lock to transaction/inode
1032 	 */
1033 	/* insert the tlock at tail of transaction tlock list */
1034 	if (tid) {
1035 		tblk = tid_to_tblock(tid);
1036 		if (tblk->next)
1037 			lid_to_tlock(tblk->last)->next = lid;
1038 		else
1039 			tblk->next = lid;
1040 		tlck->next = 0;
1041 		tblk->last = lid;
1042 	}
1043 	/* anonymous transaction:
1044 	 * insert the tlock at head of inode anonymous tlock list
1045 	 */
1046 	else {
1047 		tlck->next = jfs_ip->atlhead;
1048 		jfs_ip->atlhead = lid;
1049 		if (tlck->next == 0) {
1050 			/* This inode's first anonymous transaction */
1051 			jfs_ip->atltail = lid;
1052 			list_add_tail(&jfs_ip->anon_inode_list,
1053 				      &TxAnchor.anon_list);
1054 		}
1055 	}
1056 
1057 	TXN_UNLOCK();
1058 
1059 	/* initialize type dependent area for maplock */
1060 	maplock = (struct maplock *) & tlck->lock;
1061 	maplock->next = 0;
1062 	maplock->maxcnt = 0;
1063 	maplock->index = 0;
1064 
1065 	return tlck;
1066 }
1067 
1068 /*
1069  *	txLinelock()
1070  *
1071  * function: allocate a transaction lock for log vector list
1072  */
txLinelock(struct linelock * tlock)1073 struct linelock *txLinelock(struct linelock * tlock)
1074 {
1075 	lid_t lid;
1076 	struct tlock *tlck;
1077 	struct linelock *linelock;
1078 
1079 	TXN_LOCK();
1080 
1081 	/* allocate a TxLock structure */
1082 	lid = txLockAlloc();
1083 	tlck = lid_to_tlock(lid);
1084 
1085 	TXN_UNLOCK();
1086 
1087 	/* initialize linelock */
1088 	linelock = (struct linelock *) tlck;
1089 	linelock->next = 0;
1090 	linelock->flag = tlckLINELOCK;
1091 	linelock->maxcnt = TLOCKLONG;
1092 	linelock->index = 0;
1093 	if (tlck->flag & tlckDIRECTORY)
1094 		linelock->flag |= tlckDIRECTORY;
1095 
1096 	/* append linelock after tlock */
1097 	linelock->next = tlock->next;
1098 	tlock->next = lid;
1099 
1100 	return linelock;
1101 }
1102 
1103 /*
1104  *		transaction commit management
1105  *		-----------------------------
1106  */
1107 
1108 /*
1109  * NAME:	txCommit()
1110  *
1111  * FUNCTION:	commit the changes to the objects specified in
1112  *		clist.  For journalled segments only the
1113  *		changes of the caller are committed, ie by tid.
1114  *		for non-journalled segments the data are flushed to
1115  *		disk and then the change to the disk inode and indirect
1116  *		blocks committed (so blocks newly allocated to the
1117  *		segment will be made a part of the segment atomically).
1118  *
1119  *		all of the segments specified in clist must be in
1120  *		one file system. no more than 6 segments are needed
1121  *		to handle all unix svcs.
1122  *
1123  *		if the i_nlink field (i.e. disk inode link count)
1124  *		is zero, and the type of inode is a regular file or
1125  *		directory, or symbolic link , the inode is truncated
1126  *		to zero length. the truncation is committed but the
1127  *		VM resources are unaffected until it is closed (see
1128  *		iput and iclose).
1129  *
1130  * PARAMETER:
1131  *
1132  * RETURN:
1133  *
1134  * serialization:
1135  *		on entry the inode lock on each segment is assumed
1136  *		to be held.
1137  *
1138  * i/o error:
1139  */
txCommit(tid_t tid,int nip,struct inode ** iplist,int flag)1140 int txCommit(tid_t tid,		/* transaction identifier */
1141 	     int nip,		/* number of inodes to commit */
1142 	     struct inode **iplist,	/* list of inode to commit */
1143 	     int flag)
1144 {
1145 	int rc = 0;
1146 	struct commit cd;
1147 	struct jfs_log *log;
1148 	struct tblock *tblk;
1149 	struct lrd *lrd;
1150 	struct inode *ip;
1151 	struct jfs_inode_info *jfs_ip;
1152 	int k, n;
1153 	ino_t top;
1154 	struct super_block *sb;
1155 
1156 	jfs_info("txCommit, tid = %d, flag = %d", tid, flag);
1157 	/* is read-only file system ? */
1158 	if (isReadOnly(iplist[0])) {
1159 		rc = -EROFS;
1160 		goto TheEnd;
1161 	}
1162 
1163 	sb = cd.sb = iplist[0]->i_sb;
1164 	cd.tid = tid;
1165 
1166 	if (tid == 0)
1167 		tid = txBegin(sb, 0);
1168 	tblk = tid_to_tblock(tid);
1169 
1170 	/*
1171 	 * initialize commit structure
1172 	 */
1173 	log = JFS_SBI(sb)->log;
1174 	cd.log = log;
1175 
1176 	/* initialize log record descriptor in commit */
1177 	lrd = &cd.lrd;
1178 	lrd->logtid = cpu_to_le32(tblk->logtid);
1179 	lrd->backchain = 0;
1180 
1181 	tblk->xflag |= flag;
1182 
1183 	if ((flag & (COMMIT_FORCE | COMMIT_SYNC)) == 0)
1184 		tblk->xflag |= COMMIT_LAZY;
1185 	/*
1186 	 *	prepare non-journaled objects for commit
1187 	 *
1188 	 * flush data pages of non-journaled file
1189 	 * to prevent the file getting non-initialized disk blocks
1190 	 * in case of crash.
1191 	 * (new blocks - )
1192 	 */
1193 	cd.iplist = iplist;
1194 	cd.nip = nip;
1195 
1196 	/*
1197 	 *	acquire transaction lock on (on-disk) inodes
1198 	 *
1199 	 * update on-disk inode from in-memory inode
1200 	 * acquiring transaction locks for AFTER records
1201 	 * on the on-disk inode of file object
1202 	 *
1203 	 * sort the inodes array by inode number in descending order
1204 	 * to prevent deadlock when acquiring transaction lock
1205 	 * of on-disk inodes on multiple on-disk inode pages by
1206 	 * multiple concurrent transactions
1207 	 */
1208 	for (k = 0; k < cd.nip; k++) {
1209 		top = (cd.iplist[k])->i_ino;
1210 		for (n = k + 1; n < cd.nip; n++) {
1211 			ip = cd.iplist[n];
1212 			if (ip->i_ino > top) {
1213 				top = ip->i_ino;
1214 				cd.iplist[n] = cd.iplist[k];
1215 				cd.iplist[k] = ip;
1216 			}
1217 		}
1218 
1219 		ip = cd.iplist[k];
1220 		jfs_ip = JFS_IP(ip);
1221 
1222 		/*
1223 		 * BUGBUG - This code has temporarily been removed.  The
1224 		 * intent is to ensure that any file data is written before
1225 		 * the metadata is committed to the journal.  This prevents
1226 		 * uninitialized data from appearing in a file after the
1227 		 * journal has been replayed.  (The uninitialized data
1228 		 * could be sensitive data removed by another user.)
1229 		 *
1230 		 * The problem now is that we are holding the IWRITELOCK
1231 		 * on the inode, and calling filemap_fdatawrite on an
1232 		 * unmapped page will cause a deadlock in jfs_get_block.
1233 		 *
1234 		 * The long term solution is to pare down the use of
1235 		 * IWRITELOCK.  We are currently holding it too long.
1236 		 * We could also be smarter about which data pages need
1237 		 * to be written before the transaction is committed and
1238 		 * when we don't need to worry about it at all.
1239 		 *
1240 		 * if ((!S_ISDIR(ip->i_mode))
1241 		 *    && (tblk->flag & COMMIT_DELETE) == 0)
1242 		 *	filemap_write_and_wait(ip->i_mapping);
1243 		 */
1244 
1245 		/*
1246 		 * Mark inode as not dirty.  It will still be on the dirty
1247 		 * inode list, but we'll know not to commit it again unless
1248 		 * it gets marked dirty again
1249 		 */
1250 		clear_cflag(COMMIT_Dirty, ip);
1251 
1252 		/* inherit anonymous tlock(s) of inode */
1253 		if (jfs_ip->atlhead) {
1254 			lid_to_tlock(jfs_ip->atltail)->next = tblk->next;
1255 			tblk->next = jfs_ip->atlhead;
1256 			if (!tblk->last)
1257 				tblk->last = jfs_ip->atltail;
1258 			jfs_ip->atlhead = jfs_ip->atltail = 0;
1259 			TXN_LOCK();
1260 			list_del_init(&jfs_ip->anon_inode_list);
1261 			TXN_UNLOCK();
1262 		}
1263 
1264 		/*
1265 		 * acquire transaction lock on on-disk inode page
1266 		 * (become first tlock of the tblk's tlock list)
1267 		 */
1268 		if (((rc = diWrite(tid, ip))))
1269 			goto out;
1270 	}
1271 
1272 	/*
1273 	 *	write log records from transaction locks
1274 	 *
1275 	 * txUpdateMap() resets XAD_NEW in XAD.
1276 	 */
1277 	if ((rc = txLog(log, tblk, &cd)))
1278 		goto TheEnd;
1279 
1280 	/*
1281 	 * Ensure that inode isn't reused before
1282 	 * lazy commit thread finishes processing
1283 	 */
1284 	if (tblk->xflag & COMMIT_DELETE) {
1285 		ihold(tblk->u.ip);
1286 		/*
1287 		 * Avoid a rare deadlock
1288 		 *
1289 		 * If the inode is locked, we may be blocked in
1290 		 * jfs_commit_inode.  If so, we don't want the
1291 		 * lazy_commit thread doing the last iput() on the inode
1292 		 * since that may block on the locked inode.  Instead,
1293 		 * commit the transaction synchronously, so the last iput
1294 		 * will be done by the calling thread (or later)
1295 		 */
1296 		/*
1297 		 * I believe this code is no longer needed.  Splitting I_LOCK
1298 		 * into two bits, I_NEW and I_SYNC should prevent this
1299 		 * deadlock as well.  But since I don't have a JFS testload
1300 		 * to verify this, only a trivial s/I_LOCK/I_SYNC/ was done.
1301 		 * Joern
1302 		 */
1303 		if (tblk->u.ip->i_state & I_SYNC)
1304 			tblk->xflag &= ~COMMIT_LAZY;
1305 	}
1306 
1307 	ASSERT((!(tblk->xflag & COMMIT_DELETE)) ||
1308 	       ((tblk->u.ip->i_nlink == 0) &&
1309 		!test_cflag(COMMIT_Nolink, tblk->u.ip)));
1310 
1311 	/*
1312 	 *	write COMMIT log record
1313 	 */
1314 	lrd->type = cpu_to_le16(LOG_COMMIT);
1315 	lrd->length = 0;
1316 	lmLog(log, tblk, lrd, NULL);
1317 
1318 	lmGroupCommit(log, tblk);
1319 
1320 	/*
1321 	 *	- transaction is now committed -
1322 	 */
1323 
1324 	/*
1325 	 * force pages in careful update
1326 	 * (imap addressing structure update)
1327 	 */
1328 	if (flag & COMMIT_FORCE)
1329 		txForce(tblk);
1330 
1331 	/*
1332 	 *	update allocation map.
1333 	 *
1334 	 * update inode allocation map and inode:
1335 	 * free pager lock on memory object of inode if any.
1336 	 * update block allocation map.
1337 	 *
1338 	 * txUpdateMap() resets XAD_NEW in XAD.
1339 	 */
1340 	if (tblk->xflag & COMMIT_FORCE)
1341 		txUpdateMap(tblk);
1342 
1343 	/*
1344 	 *	free transaction locks and pageout/free pages
1345 	 */
1346 	txRelease(tblk);
1347 
1348 	if ((tblk->flag & tblkGC_LAZY) == 0)
1349 		txUnlock(tblk);
1350 
1351 
1352 	/*
1353 	 *	reset in-memory object state
1354 	 */
1355 	for (k = 0; k < cd.nip; k++) {
1356 		ip = cd.iplist[k];
1357 		jfs_ip = JFS_IP(ip);
1358 
1359 		/*
1360 		 * reset in-memory inode state
1361 		 */
1362 		jfs_ip->bxflag = 0;
1363 		jfs_ip->blid = 0;
1364 	}
1365 
1366       out:
1367 	if (rc != 0)
1368 		txAbort(tid, 1);
1369 
1370       TheEnd:
1371 	jfs_info("txCommit: tid = %d, returning %d", tid, rc);
1372 	return rc;
1373 }
1374 
1375 /*
1376  * NAME:	txLog()
1377  *
1378  * FUNCTION:	Writes AFTER log records for all lines modified
1379  *		by tid for segments specified by inodes in comdata.
1380  *		Code assumes only WRITELOCKS are recorded in lockwords.
1381  *
1382  * PARAMETERS:
1383  *
1384  * RETURN :
1385  */
txLog(struct jfs_log * log,struct tblock * tblk,struct commit * cd)1386 static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd)
1387 {
1388 	int rc = 0;
1389 	struct inode *ip;
1390 	lid_t lid;
1391 	struct tlock *tlck;
1392 	struct lrd *lrd = &cd->lrd;
1393 
1394 	/*
1395 	 * write log record(s) for each tlock of transaction,
1396 	 */
1397 	for (lid = tblk->next; lid; lid = tlck->next) {
1398 		tlck = lid_to_tlock(lid);
1399 
1400 		tlck->flag |= tlckLOG;
1401 
1402 		/* initialize lrd common */
1403 		ip = tlck->ip;
1404 		lrd->aggregate = cpu_to_le32(JFS_SBI(ip->i_sb)->aggregate);
1405 		lrd->log.redopage.fileset = cpu_to_le32(JFS_IP(ip)->fileset);
1406 		lrd->log.redopage.inode = cpu_to_le32(ip->i_ino);
1407 
1408 		/* write log record of page from the tlock */
1409 		switch (tlck->type & tlckTYPE) {
1410 		case tlckXTREE:
1411 			xtLog(log, tblk, lrd, tlck);
1412 			break;
1413 
1414 		case tlckDTREE:
1415 			dtLog(log, tblk, lrd, tlck);
1416 			break;
1417 
1418 		case tlckINODE:
1419 			diLog(log, tblk, lrd, tlck, cd);
1420 			break;
1421 
1422 		case tlckMAP:
1423 			mapLog(log, tblk, lrd, tlck);
1424 			break;
1425 
1426 		case tlckDATA:
1427 			dataLog(log, tblk, lrd, tlck);
1428 			break;
1429 
1430 		default:
1431 			jfs_err("UFO tlock:0x%p", tlck);
1432 		}
1433 	}
1434 
1435 	return rc;
1436 }
1437 
1438 /*
1439  *	diLog()
1440  *
1441  * function:	log inode tlock and format maplock to update bmap;
1442  */
diLog(struct jfs_log * log,struct tblock * tblk,struct lrd * lrd,struct tlock * tlck,struct commit * cd)1443 static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1444 		 struct tlock * tlck, struct commit * cd)
1445 {
1446 	int rc = 0;
1447 	struct metapage *mp;
1448 	pxd_t *pxd;
1449 	struct pxd_lock *pxdlock;
1450 
1451 	mp = tlck->mp;
1452 
1453 	/* initialize as REDOPAGE record format */
1454 	lrd->log.redopage.type = cpu_to_le16(LOG_INODE);
1455 	lrd->log.redopage.l2linesize = cpu_to_le16(L2INODESLOTSIZE);
1456 
1457 	pxd = &lrd->log.redopage.pxd;
1458 
1459 	/*
1460 	 *	inode after image
1461 	 */
1462 	if (tlck->type & tlckENTRY) {
1463 		/* log after-image for logredo(): */
1464 		lrd->type = cpu_to_le16(LOG_REDOPAGE);
1465 		PXDaddress(pxd, mp->index);
1466 		PXDlength(pxd,
1467 			  mp->logical_size >> tblk->sb->s_blocksize_bits);
1468 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1469 
1470 		/* mark page as homeward bound */
1471 		tlck->flag |= tlckWRITEPAGE;
1472 	} else if (tlck->type & tlckFREE) {
1473 		/*
1474 		 *	free inode extent
1475 		 *
1476 		 * (pages of the freed inode extent have been invalidated and
1477 		 * a maplock for free of the extent has been formatted at
1478 		 * txLock() time);
1479 		 *
1480 		 * the tlock had been acquired on the inode allocation map page
1481 		 * (iag) that specifies the freed extent, even though the map
1482 		 * page is not itself logged, to prevent pageout of the map
1483 		 * page before the log;
1484 		 */
1485 
1486 		/* log LOG_NOREDOINOEXT of the freed inode extent for
1487 		 * logredo() to start NoRedoPage filters, and to update
1488 		 * imap and bmap for free of the extent;
1489 		 */
1490 		lrd->type = cpu_to_le16(LOG_NOREDOINOEXT);
1491 		/*
1492 		 * For the LOG_NOREDOINOEXT record, we need
1493 		 * to pass the IAG number and inode extent
1494 		 * index (within that IAG) from which the
1495 		 * the extent being released.  These have been
1496 		 * passed to us in the iplist[1] and iplist[2].
1497 		 */
1498 		lrd->log.noredoinoext.iagnum =
1499 		    cpu_to_le32((u32) (size_t) cd->iplist[1]);
1500 		lrd->log.noredoinoext.inoext_idx =
1501 		    cpu_to_le32((u32) (size_t) cd->iplist[2]);
1502 
1503 		pxdlock = (struct pxd_lock *) & tlck->lock;
1504 		*pxd = pxdlock->pxd;
1505 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1506 
1507 		/* update bmap */
1508 		tlck->flag |= tlckUPDATEMAP;
1509 
1510 		/* mark page as homeward bound */
1511 		tlck->flag |= tlckWRITEPAGE;
1512 	} else
1513 		jfs_err("diLog: UFO type tlck:0x%p", tlck);
1514 #ifdef  _JFS_WIP
1515 	/*
1516 	 *	alloc/free external EA extent
1517 	 *
1518 	 * a maplock for txUpdateMap() to update bPWMAP for alloc/free
1519 	 * of the extent has been formatted at txLock() time;
1520 	 */
1521 	else {
1522 		assert(tlck->type & tlckEA);
1523 
1524 		/* log LOG_UPDATEMAP for logredo() to update bmap for
1525 		 * alloc of new (and free of old) external EA extent;
1526 		 */
1527 		lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1528 		pxdlock = (struct pxd_lock *) & tlck->lock;
1529 		nlock = pxdlock->index;
1530 		for (i = 0; i < nlock; i++, pxdlock++) {
1531 			if (pxdlock->flag & mlckALLOCPXD)
1532 				lrd->log.updatemap.type =
1533 				    cpu_to_le16(LOG_ALLOCPXD);
1534 			else
1535 				lrd->log.updatemap.type =
1536 				    cpu_to_le16(LOG_FREEPXD);
1537 			lrd->log.updatemap.nxd = cpu_to_le16(1);
1538 			lrd->log.updatemap.pxd = pxdlock->pxd;
1539 			lrd->backchain =
1540 			    cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1541 		}
1542 
1543 		/* update bmap */
1544 		tlck->flag |= tlckUPDATEMAP;
1545 	}
1546 #endif				/* _JFS_WIP */
1547 
1548 	return rc;
1549 }
1550 
1551 /*
1552  *	dataLog()
1553  *
1554  * function:	log data tlock
1555  */
dataLog(struct jfs_log * log,struct tblock * tblk,struct lrd * lrd,struct tlock * tlck)1556 static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1557 	    struct tlock * tlck)
1558 {
1559 	struct metapage *mp;
1560 	pxd_t *pxd;
1561 
1562 	mp = tlck->mp;
1563 
1564 	/* initialize as REDOPAGE record format */
1565 	lrd->log.redopage.type = cpu_to_le16(LOG_DATA);
1566 	lrd->log.redopage.l2linesize = cpu_to_le16(L2DATASLOTSIZE);
1567 
1568 	pxd = &lrd->log.redopage.pxd;
1569 
1570 	/* log after-image for logredo(): */
1571 	lrd->type = cpu_to_le16(LOG_REDOPAGE);
1572 
1573 	if (jfs_dirtable_inline(tlck->ip)) {
1574 		/*
1575 		 * The table has been truncated, we've must have deleted
1576 		 * the last entry, so don't bother logging this
1577 		 */
1578 		mp->lid = 0;
1579 		grab_metapage(mp);
1580 		metapage_homeok(mp);
1581 		discard_metapage(mp);
1582 		tlck->mp = NULL;
1583 		return 0;
1584 	}
1585 
1586 	PXDaddress(pxd, mp->index);
1587 	PXDlength(pxd, mp->logical_size >> tblk->sb->s_blocksize_bits);
1588 
1589 	lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1590 
1591 	/* mark page as homeward bound */
1592 	tlck->flag |= tlckWRITEPAGE;
1593 
1594 	return 0;
1595 }
1596 
1597 /*
1598  *	dtLog()
1599  *
1600  * function:	log dtree tlock and format maplock to update bmap;
1601  */
dtLog(struct jfs_log * log,struct tblock * tblk,struct lrd * lrd,struct tlock * tlck)1602 static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1603 	   struct tlock * tlck)
1604 {
1605 	struct metapage *mp;
1606 	struct pxd_lock *pxdlock;
1607 	pxd_t *pxd;
1608 
1609 	mp = tlck->mp;
1610 
1611 	/* initialize as REDOPAGE/NOREDOPAGE record format */
1612 	lrd->log.redopage.type = cpu_to_le16(LOG_DTREE);
1613 	lrd->log.redopage.l2linesize = cpu_to_le16(L2DTSLOTSIZE);
1614 
1615 	pxd = &lrd->log.redopage.pxd;
1616 
1617 	if (tlck->type & tlckBTROOT)
1618 		lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1619 
1620 	/*
1621 	 *	page extension via relocation: entry insertion;
1622 	 *	page extension in-place: entry insertion;
1623 	 *	new right page from page split, reinitialized in-line
1624 	 *	root from root page split: entry insertion;
1625 	 */
1626 	if (tlck->type & (tlckNEW | tlckEXTEND)) {
1627 		/* log after-image of the new page for logredo():
1628 		 * mark log (LOG_NEW) for logredo() to initialize
1629 		 * freelist and update bmap for alloc of the new page;
1630 		 */
1631 		lrd->type = cpu_to_le16(LOG_REDOPAGE);
1632 		if (tlck->type & tlckEXTEND)
1633 			lrd->log.redopage.type |= cpu_to_le16(LOG_EXTEND);
1634 		else
1635 			lrd->log.redopage.type |= cpu_to_le16(LOG_NEW);
1636 		PXDaddress(pxd, mp->index);
1637 		PXDlength(pxd,
1638 			  mp->logical_size >> tblk->sb->s_blocksize_bits);
1639 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1640 
1641 		/* format a maplock for txUpdateMap() to update bPMAP for
1642 		 * alloc of the new page;
1643 		 */
1644 		if (tlck->type & tlckBTROOT)
1645 			return;
1646 		tlck->flag |= tlckUPDATEMAP;
1647 		pxdlock = (struct pxd_lock *) & tlck->lock;
1648 		pxdlock->flag = mlckALLOCPXD;
1649 		pxdlock->pxd = *pxd;
1650 
1651 		pxdlock->index = 1;
1652 
1653 		/* mark page as homeward bound */
1654 		tlck->flag |= tlckWRITEPAGE;
1655 		return;
1656 	}
1657 
1658 	/*
1659 	 *	entry insertion/deletion,
1660 	 *	sibling page link update (old right page before split);
1661 	 */
1662 	if (tlck->type & (tlckENTRY | tlckRELINK)) {
1663 		/* log after-image for logredo(): */
1664 		lrd->type = cpu_to_le16(LOG_REDOPAGE);
1665 		PXDaddress(pxd, mp->index);
1666 		PXDlength(pxd,
1667 			  mp->logical_size >> tblk->sb->s_blocksize_bits);
1668 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1669 
1670 		/* mark page as homeward bound */
1671 		tlck->flag |= tlckWRITEPAGE;
1672 		return;
1673 	}
1674 
1675 	/*
1676 	 *	page deletion: page has been invalidated
1677 	 *	page relocation: source extent
1678 	 *
1679 	 *	a maplock for free of the page has been formatted
1680 	 *	at txLock() time);
1681 	 */
1682 	if (tlck->type & (tlckFREE | tlckRELOCATE)) {
1683 		/* log LOG_NOREDOPAGE of the deleted page for logredo()
1684 		 * to start NoRedoPage filter and to update bmap for free
1685 		 * of the deletd page
1686 		 */
1687 		lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1688 		pxdlock = (struct pxd_lock *) & tlck->lock;
1689 		*pxd = pxdlock->pxd;
1690 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1691 
1692 		/* a maplock for txUpdateMap() for free of the page
1693 		 * has been formatted at txLock() time;
1694 		 */
1695 		tlck->flag |= tlckUPDATEMAP;
1696 	}
1697 	return;
1698 }
1699 
1700 /*
1701  *	xtLog()
1702  *
1703  * function:	log xtree tlock and format maplock to update bmap;
1704  */
xtLog(struct jfs_log * log,struct tblock * tblk,struct lrd * lrd,struct tlock * tlck)1705 static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1706 	   struct tlock * tlck)
1707 {
1708 	struct inode *ip;
1709 	struct metapage *mp;
1710 	xtpage_t *p;
1711 	struct xtlock *xtlck;
1712 	struct maplock *maplock;
1713 	struct xdlistlock *xadlock;
1714 	struct pxd_lock *pxdlock;
1715 	pxd_t *page_pxd;
1716 	int next, lwm, hwm;
1717 
1718 	ip = tlck->ip;
1719 	mp = tlck->mp;
1720 
1721 	/* initialize as REDOPAGE/NOREDOPAGE record format */
1722 	lrd->log.redopage.type = cpu_to_le16(LOG_XTREE);
1723 	lrd->log.redopage.l2linesize = cpu_to_le16(L2XTSLOTSIZE);
1724 
1725 	page_pxd = &lrd->log.redopage.pxd;
1726 
1727 	if (tlck->type & tlckBTROOT) {
1728 		lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1729 		p = &JFS_IP(ip)->i_xtroot;
1730 		if (S_ISDIR(ip->i_mode))
1731 			lrd->log.redopage.type |=
1732 			    cpu_to_le16(LOG_DIR_XTREE);
1733 	} else
1734 		p = (xtpage_t *) mp->data;
1735 	next = le16_to_cpu(p->header.nextindex);
1736 
1737 	xtlck = (struct xtlock *) & tlck->lock;
1738 
1739 	maplock = (struct maplock *) & tlck->lock;
1740 	xadlock = (struct xdlistlock *) maplock;
1741 
1742 	/*
1743 	 *	entry insertion/extension;
1744 	 *	sibling page link update (old right page before split);
1745 	 */
1746 	if (tlck->type & (tlckNEW | tlckGROW | tlckRELINK)) {
1747 		/* log after-image for logredo():
1748 		 * logredo() will update bmap for alloc of new/extended
1749 		 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
1750 		 * after-image of XADlist;
1751 		 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
1752 		 * applying the after-image to the meta-data page.
1753 		 */
1754 		lrd->type = cpu_to_le16(LOG_REDOPAGE);
1755 		PXDaddress(page_pxd, mp->index);
1756 		PXDlength(page_pxd,
1757 			  mp->logical_size >> tblk->sb->s_blocksize_bits);
1758 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1759 
1760 		/* format a maplock for txUpdateMap() to update bPMAP
1761 		 * for alloc of new/extended extents of XAD[lwm:next)
1762 		 * from the page itself;
1763 		 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
1764 		 */
1765 		lwm = xtlck->lwm.offset;
1766 		if (lwm == 0)
1767 			lwm = XTPAGEMAXSLOT;
1768 
1769 		if (lwm == next)
1770 			goto out;
1771 		if (lwm > next) {
1772 			jfs_err("xtLog: lwm > next");
1773 			goto out;
1774 		}
1775 		tlck->flag |= tlckUPDATEMAP;
1776 		xadlock->flag = mlckALLOCXADLIST;
1777 		xadlock->count = next - lwm;
1778 		if ((xadlock->count <= 4) && (tblk->xflag & COMMIT_LAZY)) {
1779 			int i;
1780 			pxd_t *pxd;
1781 			/*
1782 			 * Lazy commit may allow xtree to be modified before
1783 			 * txUpdateMap runs.  Copy xad into linelock to
1784 			 * preserve correct data.
1785 			 *
1786 			 * We can fit twice as may pxd's as xads in the lock
1787 			 */
1788 			xadlock->flag = mlckALLOCPXDLIST;
1789 			pxd = xadlock->xdlist = &xtlck->pxdlock;
1790 			for (i = 0; i < xadlock->count; i++) {
1791 				PXDaddress(pxd, addressXAD(&p->xad[lwm + i]));
1792 				PXDlength(pxd, lengthXAD(&p->xad[lwm + i]));
1793 				p->xad[lwm + i].flag &=
1794 				    ~(XAD_NEW | XAD_EXTENDED);
1795 				pxd++;
1796 			}
1797 		} else {
1798 			/*
1799 			 * xdlist will point to into inode's xtree, ensure
1800 			 * that transaction is not committed lazily.
1801 			 */
1802 			xadlock->flag = mlckALLOCXADLIST;
1803 			xadlock->xdlist = &p->xad[lwm];
1804 			tblk->xflag &= ~COMMIT_LAZY;
1805 		}
1806 		jfs_info("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d count:%d",
1807 			 tlck->ip, mp, tlck, lwm, xadlock->count);
1808 
1809 		maplock->index = 1;
1810 
1811 	      out:
1812 		/* mark page as homeward bound */
1813 		tlck->flag |= tlckWRITEPAGE;
1814 
1815 		return;
1816 	}
1817 
1818 	/*
1819 	 *	page deletion: file deletion/truncation (ref. xtTruncate())
1820 	 *
1821 	 * (page will be invalidated after log is written and bmap
1822 	 * is updated from the page);
1823 	 */
1824 	if (tlck->type & tlckFREE) {
1825 		/* LOG_NOREDOPAGE log for NoRedoPage filter:
1826 		 * if page free from file delete, NoRedoFile filter from
1827 		 * inode image of zero link count will subsume NoRedoPage
1828 		 * filters for each page;
1829 		 * if page free from file truncattion, write NoRedoPage
1830 		 * filter;
1831 		 *
1832 		 * upadte of block allocation map for the page itself:
1833 		 * if page free from deletion and truncation, LOG_UPDATEMAP
1834 		 * log for the page itself is generated from processing
1835 		 * its parent page xad entries;
1836 		 */
1837 		/* if page free from file truncation, log LOG_NOREDOPAGE
1838 		 * of the deleted page for logredo() to start NoRedoPage
1839 		 * filter for the page;
1840 		 */
1841 		if (tblk->xflag & COMMIT_TRUNCATE) {
1842 			/* write NOREDOPAGE for the page */
1843 			lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1844 			PXDaddress(page_pxd, mp->index);
1845 			PXDlength(page_pxd,
1846 				  mp->logical_size >> tblk->sb->
1847 				  s_blocksize_bits);
1848 			lrd->backchain =
1849 			    cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1850 
1851 			if (tlck->type & tlckBTROOT) {
1852 				/* Empty xtree must be logged */
1853 				lrd->type = cpu_to_le16(LOG_REDOPAGE);
1854 				lrd->backchain =
1855 				    cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1856 			}
1857 		}
1858 
1859 		/* init LOG_UPDATEMAP of the freed extents
1860 		 * XAD[XTENTRYSTART:hwm) from the deleted page itself
1861 		 * for logredo() to update bmap;
1862 		 */
1863 		lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1864 		lrd->log.updatemap.type = cpu_to_le16(LOG_FREEXADLIST);
1865 		xtlck = (struct xtlock *) & tlck->lock;
1866 		hwm = xtlck->hwm.offset;
1867 		lrd->log.updatemap.nxd =
1868 		    cpu_to_le16(hwm - XTENTRYSTART + 1);
1869 		/* reformat linelock for lmLog() */
1870 		xtlck->header.offset = XTENTRYSTART;
1871 		xtlck->header.length = hwm - XTENTRYSTART + 1;
1872 		xtlck->index = 1;
1873 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1874 
1875 		/* format a maplock for txUpdateMap() to update bmap
1876 		 * to free extents of XAD[XTENTRYSTART:hwm) from the
1877 		 * deleted page itself;
1878 		 */
1879 		tlck->flag |= tlckUPDATEMAP;
1880 		xadlock->count = hwm - XTENTRYSTART + 1;
1881 		if ((xadlock->count <= 4) && (tblk->xflag & COMMIT_LAZY)) {
1882 			int i;
1883 			pxd_t *pxd;
1884 			/*
1885 			 * Lazy commit may allow xtree to be modified before
1886 			 * txUpdateMap runs.  Copy xad into linelock to
1887 			 * preserve correct data.
1888 			 *
1889 			 * We can fit twice as may pxd's as xads in the lock
1890 			 */
1891 			xadlock->flag = mlckFREEPXDLIST;
1892 			pxd = xadlock->xdlist = &xtlck->pxdlock;
1893 			for (i = 0; i < xadlock->count; i++) {
1894 				PXDaddress(pxd,
1895 					addressXAD(&p->xad[XTENTRYSTART + i]));
1896 				PXDlength(pxd,
1897 					lengthXAD(&p->xad[XTENTRYSTART + i]));
1898 				pxd++;
1899 			}
1900 		} else {
1901 			/*
1902 			 * xdlist will point to into inode's xtree, ensure
1903 			 * that transaction is not committed lazily.
1904 			 */
1905 			xadlock->flag = mlckFREEXADLIST;
1906 			xadlock->xdlist = &p->xad[XTENTRYSTART];
1907 			tblk->xflag &= ~COMMIT_LAZY;
1908 		}
1909 		jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d lwm:2",
1910 			 tlck->ip, mp, xadlock->count);
1911 
1912 		maplock->index = 1;
1913 
1914 		/* mark page as invalid */
1915 		if (((tblk->xflag & COMMIT_PWMAP) || S_ISDIR(ip->i_mode))
1916 		    && !(tlck->type & tlckBTROOT))
1917 			tlck->flag |= tlckFREEPAGE;
1918 		/*
1919 		   else (tblk->xflag & COMMIT_PMAP)
1920 		   ? release the page;
1921 		 */
1922 		return;
1923 	}
1924 
1925 	/*
1926 	 *	page/entry truncation: file truncation (ref. xtTruncate())
1927 	 *
1928 	 *	|----------+------+------+---------------|
1929 	 *		   |      |      |
1930 	 *		   |      |     hwm - hwm before truncation
1931 	 *		   |     next - truncation point
1932 	 *		  lwm - lwm before truncation
1933 	 * header ?
1934 	 */
1935 	if (tlck->type & tlckTRUNCATE) {
1936 		pxd_t pxd;	/* truncated extent of xad */
1937 		int twm;
1938 
1939 		/*
1940 		 * For truncation the entire linelock may be used, so it would
1941 		 * be difficult to store xad list in linelock itself.
1942 		 * Therefore, we'll just force transaction to be committed
1943 		 * synchronously, so that xtree pages won't be changed before
1944 		 * txUpdateMap runs.
1945 		 */
1946 		tblk->xflag &= ~COMMIT_LAZY;
1947 		lwm = xtlck->lwm.offset;
1948 		if (lwm == 0)
1949 			lwm = XTPAGEMAXSLOT;
1950 		hwm = xtlck->hwm.offset;
1951 		twm = xtlck->twm.offset;
1952 
1953 		/*
1954 		 *	write log records
1955 		 */
1956 		/* log after-image for logredo():
1957 		 *
1958 		 * logredo() will update bmap for alloc of new/extended
1959 		 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
1960 		 * after-image of XADlist;
1961 		 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
1962 		 * applying the after-image to the meta-data page.
1963 		 */
1964 		lrd->type = cpu_to_le16(LOG_REDOPAGE);
1965 		PXDaddress(page_pxd, mp->index);
1966 		PXDlength(page_pxd,
1967 			  mp->logical_size >> tblk->sb->s_blocksize_bits);
1968 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1969 
1970 		/*
1971 		 * truncate entry XAD[twm == next - 1]:
1972 		 */
1973 		if (twm == next - 1) {
1974 			/* init LOG_UPDATEMAP for logredo() to update bmap for
1975 			 * free of truncated delta extent of the truncated
1976 			 * entry XAD[next - 1]:
1977 			 * (xtlck->pxdlock = truncated delta extent);
1978 			 */
1979 			pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
1980 			/* assert(pxdlock->type & tlckTRUNCATE); */
1981 			lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1982 			lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
1983 			lrd->log.updatemap.nxd = cpu_to_le16(1);
1984 			lrd->log.updatemap.pxd = pxdlock->pxd;
1985 			pxd = pxdlock->pxd;	/* save to format maplock */
1986 			lrd->backchain =
1987 			    cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1988 		}
1989 
1990 		/*
1991 		 * free entries XAD[next:hwm]:
1992 		 */
1993 		if (hwm >= next) {
1994 			/* init LOG_UPDATEMAP of the freed extents
1995 			 * XAD[next:hwm] from the deleted page itself
1996 			 * for logredo() to update bmap;
1997 			 */
1998 			lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1999 			lrd->log.updatemap.type =
2000 			    cpu_to_le16(LOG_FREEXADLIST);
2001 			xtlck = (struct xtlock *) & tlck->lock;
2002 			hwm = xtlck->hwm.offset;
2003 			lrd->log.updatemap.nxd =
2004 			    cpu_to_le16(hwm - next + 1);
2005 			/* reformat linelock for lmLog() */
2006 			xtlck->header.offset = next;
2007 			xtlck->header.length = hwm - next + 1;
2008 			xtlck->index = 1;
2009 			lrd->backchain =
2010 			    cpu_to_le32(lmLog(log, tblk, lrd, tlck));
2011 		}
2012 
2013 		/*
2014 		 *	format maplock(s) for txUpdateMap() to update bmap
2015 		 */
2016 		maplock->index = 0;
2017 
2018 		/*
2019 		 * allocate entries XAD[lwm:next):
2020 		 */
2021 		if (lwm < next) {
2022 			/* format a maplock for txUpdateMap() to update bPMAP
2023 			 * for alloc of new/extended extents of XAD[lwm:next)
2024 			 * from the page itself;
2025 			 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
2026 			 */
2027 			tlck->flag |= tlckUPDATEMAP;
2028 			xadlock->flag = mlckALLOCXADLIST;
2029 			xadlock->count = next - lwm;
2030 			xadlock->xdlist = &p->xad[lwm];
2031 
2032 			jfs_info("xtLog: alloc ip:0x%p mp:0x%p count:%d lwm:%d next:%d",
2033 				 tlck->ip, mp, xadlock->count, lwm, next);
2034 			maplock->index++;
2035 			xadlock++;
2036 		}
2037 
2038 		/*
2039 		 * truncate entry XAD[twm == next - 1]:
2040 		 */
2041 		if (twm == next - 1) {
2042 			/* format a maplock for txUpdateMap() to update bmap
2043 			 * to free truncated delta extent of the truncated
2044 			 * entry XAD[next - 1];
2045 			 * (xtlck->pxdlock = truncated delta extent);
2046 			 */
2047 			tlck->flag |= tlckUPDATEMAP;
2048 			pxdlock = (struct pxd_lock *) xadlock;
2049 			pxdlock->flag = mlckFREEPXD;
2050 			pxdlock->count = 1;
2051 			pxdlock->pxd = pxd;
2052 
2053 			jfs_info("xtLog: truncate ip:0x%p mp:0x%p count:%d hwm:%d",
2054 				 ip, mp, pxdlock->count, hwm);
2055 			maplock->index++;
2056 			xadlock++;
2057 		}
2058 
2059 		/*
2060 		 * free entries XAD[next:hwm]:
2061 		 */
2062 		if (hwm >= next) {
2063 			/* format a maplock for txUpdateMap() to update bmap
2064 			 * to free extents of XAD[next:hwm] from thedeleted
2065 			 * page itself;
2066 			 */
2067 			tlck->flag |= tlckUPDATEMAP;
2068 			xadlock->flag = mlckFREEXADLIST;
2069 			xadlock->count = hwm - next + 1;
2070 			xadlock->xdlist = &p->xad[next];
2071 
2072 			jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d next:%d hwm:%d",
2073 				 tlck->ip, mp, xadlock->count, next, hwm);
2074 			maplock->index++;
2075 		}
2076 
2077 		/* mark page as homeward bound */
2078 		tlck->flag |= tlckWRITEPAGE;
2079 	}
2080 	return;
2081 }
2082 
2083 /*
2084  *	mapLog()
2085  *
2086  * function:	log from maplock of freed data extents;
2087  */
mapLog(struct jfs_log * log,struct tblock * tblk,struct lrd * lrd,struct tlock * tlck)2088 static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
2089 		   struct tlock * tlck)
2090 {
2091 	struct pxd_lock *pxdlock;
2092 	int i, nlock;
2093 	pxd_t *pxd;
2094 
2095 	/*
2096 	 *	page relocation: free the source page extent
2097 	 *
2098 	 * a maplock for txUpdateMap() for free of the page
2099 	 * has been formatted at txLock() time saving the src
2100 	 * relocated page address;
2101 	 */
2102 	if (tlck->type & tlckRELOCATE) {
2103 		/* log LOG_NOREDOPAGE of the old relocated page
2104 		 * for logredo() to start NoRedoPage filter;
2105 		 */
2106 		lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
2107 		pxdlock = (struct pxd_lock *) & tlck->lock;
2108 		pxd = &lrd->log.redopage.pxd;
2109 		*pxd = pxdlock->pxd;
2110 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2111 
2112 		/* (N.B. currently, logredo() does NOT update bmap
2113 		 * for free of the page itself for (LOG_XTREE|LOG_NOREDOPAGE);
2114 		 * if page free from relocation, LOG_UPDATEMAP log is
2115 		 * specifically generated now for logredo()
2116 		 * to update bmap for free of src relocated page;
2117 		 * (new flag LOG_RELOCATE may be introduced which will
2118 		 * inform logredo() to start NORedoPage filter and also
2119 		 * update block allocation map at the same time, thus
2120 		 * avoiding an extra log write);
2121 		 */
2122 		lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2123 		lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
2124 		lrd->log.updatemap.nxd = cpu_to_le16(1);
2125 		lrd->log.updatemap.pxd = pxdlock->pxd;
2126 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2127 
2128 		/* a maplock for txUpdateMap() for free of the page
2129 		 * has been formatted at txLock() time;
2130 		 */
2131 		tlck->flag |= tlckUPDATEMAP;
2132 		return;
2133 	}
2134 	/*
2135 
2136 	 * Otherwise it's not a relocate request
2137 	 *
2138 	 */
2139 	else {
2140 		/* log LOG_UPDATEMAP for logredo() to update bmap for
2141 		 * free of truncated/relocated delta extent of the data;
2142 		 * e.g.: external EA extent, relocated/truncated extent
2143 		 * from xtTailgate();
2144 		 */
2145 		lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2146 		pxdlock = (struct pxd_lock *) & tlck->lock;
2147 		nlock = pxdlock->index;
2148 		for (i = 0; i < nlock; i++, pxdlock++) {
2149 			if (pxdlock->flag & mlckALLOCPXD)
2150 				lrd->log.updatemap.type =
2151 				    cpu_to_le16(LOG_ALLOCPXD);
2152 			else
2153 				lrd->log.updatemap.type =
2154 				    cpu_to_le16(LOG_FREEPXD);
2155 			lrd->log.updatemap.nxd = cpu_to_le16(1);
2156 			lrd->log.updatemap.pxd = pxdlock->pxd;
2157 			lrd->backchain =
2158 			    cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2159 			jfs_info("mapLog: xaddr:0x%lx xlen:0x%x",
2160 				 (ulong) addressPXD(&pxdlock->pxd),
2161 				 lengthPXD(&pxdlock->pxd));
2162 		}
2163 
2164 		/* update bmap */
2165 		tlck->flag |= tlckUPDATEMAP;
2166 	}
2167 }
2168 
2169 /*
2170  *	txEA()
2171  *
2172  * function:	acquire maplock for EA/ACL extents or
2173  *		set COMMIT_INLINE flag;
2174  */
txEA(tid_t tid,struct inode * ip,dxd_t * oldea,dxd_t * newea)2175 void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
2176 {
2177 	struct tlock *tlck = NULL;
2178 	struct pxd_lock *maplock = NULL, *pxdlock = NULL;
2179 
2180 	/*
2181 	 * format maplock for alloc of new EA extent
2182 	 */
2183 	if (newea) {
2184 		/* Since the newea could be a completely zeroed entry we need to
2185 		 * check for the two flags which indicate we should actually
2186 		 * commit new EA data
2187 		 */
2188 		if (newea->flag & DXD_EXTENT) {
2189 			tlck = txMaplock(tid, ip, tlckMAP);
2190 			maplock = (struct pxd_lock *) & tlck->lock;
2191 			pxdlock = (struct pxd_lock *) maplock;
2192 			pxdlock->flag = mlckALLOCPXD;
2193 			PXDaddress(&pxdlock->pxd, addressDXD(newea));
2194 			PXDlength(&pxdlock->pxd, lengthDXD(newea));
2195 			pxdlock++;
2196 			maplock->index = 1;
2197 		} else if (newea->flag & DXD_INLINE) {
2198 			tlck = NULL;
2199 
2200 			set_cflag(COMMIT_Inlineea, ip);
2201 		}
2202 	}
2203 
2204 	/*
2205 	 * format maplock for free of old EA extent
2206 	 */
2207 	if (!test_cflag(COMMIT_Nolink, ip) && oldea->flag & DXD_EXTENT) {
2208 		if (tlck == NULL) {
2209 			tlck = txMaplock(tid, ip, tlckMAP);
2210 			maplock = (struct pxd_lock *) & tlck->lock;
2211 			pxdlock = (struct pxd_lock *) maplock;
2212 			maplock->index = 0;
2213 		}
2214 		pxdlock->flag = mlckFREEPXD;
2215 		PXDaddress(&pxdlock->pxd, addressDXD(oldea));
2216 		PXDlength(&pxdlock->pxd, lengthDXD(oldea));
2217 		maplock->index++;
2218 	}
2219 }
2220 
2221 /*
2222  *	txForce()
2223  *
2224  * function: synchronously write pages locked by transaction
2225  *	     after txLog() but before txUpdateMap();
2226  */
txForce(struct tblock * tblk)2227 static void txForce(struct tblock * tblk)
2228 {
2229 	struct tlock *tlck;
2230 	lid_t lid, next;
2231 	struct metapage *mp;
2232 
2233 	/*
2234 	 * reverse the order of transaction tlocks in
2235 	 * careful update order of address index pages
2236 	 * (right to left, bottom up)
2237 	 */
2238 	tlck = lid_to_tlock(tblk->next);
2239 	lid = tlck->next;
2240 	tlck->next = 0;
2241 	while (lid) {
2242 		tlck = lid_to_tlock(lid);
2243 		next = tlck->next;
2244 		tlck->next = tblk->next;
2245 		tblk->next = lid;
2246 		lid = next;
2247 	}
2248 
2249 	/*
2250 	 * synchronously write the page, and
2251 	 * hold the page for txUpdateMap();
2252 	 */
2253 	for (lid = tblk->next; lid; lid = next) {
2254 		tlck = lid_to_tlock(lid);
2255 		next = tlck->next;
2256 
2257 		if ((mp = tlck->mp) != NULL &&
2258 		    (tlck->type & tlckBTROOT) == 0) {
2259 			assert(mp->xflag & COMMIT_PAGE);
2260 
2261 			if (tlck->flag & tlckWRITEPAGE) {
2262 				tlck->flag &= ~tlckWRITEPAGE;
2263 
2264 				/* do not release page to freelist */
2265 				force_metapage(mp);
2266 #if 0
2267 				/*
2268 				 * The "right" thing to do here is to
2269 				 * synchronously write the metadata.
2270 				 * With the current implementation this
2271 				 * is hard since write_metapage requires
2272 				 * us to kunmap & remap the page.  If we
2273 				 * have tlocks pointing into the metadata
2274 				 * pages, we don't want to do this.  I think
2275 				 * we can get by with synchronously writing
2276 				 * the pages when they are released.
2277 				 */
2278 				assert(mp->nohomeok);
2279 				set_bit(META_dirty, &mp->flag);
2280 				set_bit(META_sync, &mp->flag);
2281 #endif
2282 			}
2283 		}
2284 	}
2285 }
2286 
2287 /*
2288  *	txUpdateMap()
2289  *
2290  * function:	update persistent allocation map (and working map
2291  *		if appropriate);
2292  *
2293  * parameter:
2294  */
txUpdateMap(struct tblock * tblk)2295 static void txUpdateMap(struct tblock * tblk)
2296 {
2297 	struct inode *ip;
2298 	struct inode *ipimap;
2299 	lid_t lid;
2300 	struct tlock *tlck;
2301 	struct maplock *maplock;
2302 	struct pxd_lock pxdlock;
2303 	int maptype;
2304 	int k, nlock;
2305 	struct metapage *mp = NULL;
2306 
2307 	ipimap = JFS_SBI(tblk->sb)->ipimap;
2308 
2309 	maptype = (tblk->xflag & COMMIT_PMAP) ? COMMIT_PMAP : COMMIT_PWMAP;
2310 
2311 
2312 	/*
2313 	 *	update block allocation map
2314 	 *
2315 	 * update allocation state in pmap (and wmap) and
2316 	 * update lsn of the pmap page;
2317 	 */
2318 	/*
2319 	 * scan each tlock/page of transaction for block allocation/free:
2320 	 *
2321 	 * for each tlock/page of transaction, update map.
2322 	 *  ? are there tlock for pmap and pwmap at the same time ?
2323 	 */
2324 	for (lid = tblk->next; lid; lid = tlck->next) {
2325 		tlck = lid_to_tlock(lid);
2326 
2327 		if ((tlck->flag & tlckUPDATEMAP) == 0)
2328 			continue;
2329 
2330 		if (tlck->flag & tlckFREEPAGE) {
2331 			/*
2332 			 * Another thread may attempt to reuse freed space
2333 			 * immediately, so we want to get rid of the metapage
2334 			 * before anyone else has a chance to get it.
2335 			 * Lock metapage, update maps, then invalidate
2336 			 * the metapage.
2337 			 */
2338 			mp = tlck->mp;
2339 			ASSERT(mp->xflag & COMMIT_PAGE);
2340 			grab_metapage(mp);
2341 		}
2342 
2343 		/*
2344 		 * extent list:
2345 		 * . in-line PXD list:
2346 		 * . out-of-line XAD list:
2347 		 */
2348 		maplock = (struct maplock *) & tlck->lock;
2349 		nlock = maplock->index;
2350 
2351 		for (k = 0; k < nlock; k++, maplock++) {
2352 			/*
2353 			 * allocate blocks in persistent map:
2354 			 *
2355 			 * blocks have been allocated from wmap at alloc time;
2356 			 */
2357 			if (maplock->flag & mlckALLOC) {
2358 				txAllocPMap(ipimap, maplock, tblk);
2359 			}
2360 			/*
2361 			 * free blocks in persistent and working map:
2362 			 * blocks will be freed in pmap and then in wmap;
2363 			 *
2364 			 * ? tblock specifies the PMAP/PWMAP based upon
2365 			 * transaction
2366 			 *
2367 			 * free blocks in persistent map:
2368 			 * blocks will be freed from wmap at last reference
2369 			 * release of the object for regular files;
2370 			 *
2371 			 * Alway free blocks from both persistent & working
2372 			 * maps for directories
2373 			 */
2374 			else {	/* (maplock->flag & mlckFREE) */
2375 
2376 				if (tlck->flag & tlckDIRECTORY)
2377 					txFreeMap(ipimap, maplock,
2378 						  tblk, COMMIT_PWMAP);
2379 				else
2380 					txFreeMap(ipimap, maplock,
2381 						  tblk, maptype);
2382 			}
2383 		}
2384 		if (tlck->flag & tlckFREEPAGE) {
2385 			if (!(tblk->flag & tblkGC_LAZY)) {
2386 				/* This is equivalent to txRelease */
2387 				ASSERT(mp->lid == lid);
2388 				tlck->mp->lid = 0;
2389 			}
2390 			assert(mp->nohomeok == 1);
2391 			metapage_homeok(mp);
2392 			discard_metapage(mp);
2393 			tlck->mp = NULL;
2394 		}
2395 	}
2396 	/*
2397 	 *	update inode allocation map
2398 	 *
2399 	 * update allocation state in pmap and
2400 	 * update lsn of the pmap page;
2401 	 * update in-memory inode flag/state
2402 	 *
2403 	 * unlock mapper/write lock
2404 	 */
2405 	if (tblk->xflag & COMMIT_CREATE) {
2406 		diUpdatePMap(ipimap, tblk->ino, false, tblk);
2407 		/* update persistent block allocation map
2408 		 * for the allocation of inode extent;
2409 		 */
2410 		pxdlock.flag = mlckALLOCPXD;
2411 		pxdlock.pxd = tblk->u.ixpxd;
2412 		pxdlock.index = 1;
2413 		txAllocPMap(ipimap, (struct maplock *) & pxdlock, tblk);
2414 	} else if (tblk->xflag & COMMIT_DELETE) {
2415 		ip = tblk->u.ip;
2416 		diUpdatePMap(ipimap, ip->i_ino, true, tblk);
2417 		iput(ip);
2418 	}
2419 }
2420 
2421 /*
2422  *	txAllocPMap()
2423  *
2424  * function: allocate from persistent map;
2425  *
2426  * parameter:
2427  *	ipbmap	-
2428  *	malock	-
2429  *		xad list:
2430  *		pxd:
2431  *
2432  *	maptype -
2433  *		allocate from persistent map;
2434  *		free from persistent map;
2435  *		(e.g., tmp file - free from working map at releae
2436  *		 of last reference);
2437  *		free from persistent and working map;
2438  *
2439  *	lsn	- log sequence number;
2440  */
txAllocPMap(struct inode * ip,struct maplock * maplock,struct tblock * tblk)2441 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
2442 			struct tblock * tblk)
2443 {
2444 	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2445 	struct xdlistlock *xadlistlock;
2446 	xad_t *xad;
2447 	s64 xaddr;
2448 	int xlen;
2449 	struct pxd_lock *pxdlock;
2450 	struct xdlistlock *pxdlistlock;
2451 	pxd_t *pxd;
2452 	int n;
2453 
2454 	/*
2455 	 * allocate from persistent map;
2456 	 */
2457 	if (maplock->flag & mlckALLOCXADLIST) {
2458 		xadlistlock = (struct xdlistlock *) maplock;
2459 		xad = xadlistlock->xdlist;
2460 		for (n = 0; n < xadlistlock->count; n++, xad++) {
2461 			if (xad->flag & (XAD_NEW | XAD_EXTENDED)) {
2462 				xaddr = addressXAD(xad);
2463 				xlen = lengthXAD(xad);
2464 				dbUpdatePMap(ipbmap, false, xaddr,
2465 					     (s64) xlen, tblk);
2466 				xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
2467 				jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2468 					 (ulong) xaddr, xlen);
2469 			}
2470 		}
2471 	} else if (maplock->flag & mlckALLOCPXD) {
2472 		pxdlock = (struct pxd_lock *) maplock;
2473 		xaddr = addressPXD(&pxdlock->pxd);
2474 		xlen = lengthPXD(&pxdlock->pxd);
2475 		dbUpdatePMap(ipbmap, false, xaddr, (s64) xlen, tblk);
2476 		jfs_info("allocPMap: xaddr:0x%lx xlen:%d", (ulong) xaddr, xlen);
2477 	} else {		/* (maplock->flag & mlckALLOCPXDLIST) */
2478 
2479 		pxdlistlock = (struct xdlistlock *) maplock;
2480 		pxd = pxdlistlock->xdlist;
2481 		for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2482 			xaddr = addressPXD(pxd);
2483 			xlen = lengthPXD(pxd);
2484 			dbUpdatePMap(ipbmap, false, xaddr, (s64) xlen,
2485 				     tblk);
2486 			jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2487 				 (ulong) xaddr, xlen);
2488 		}
2489 	}
2490 }
2491 
2492 /*
2493  *	txFreeMap()
2494  *
2495  * function:	free from persistent and/or working map;
2496  *
2497  * todo: optimization
2498  */
txFreeMap(struct inode * ip,struct maplock * maplock,struct tblock * tblk,int maptype)2499 void txFreeMap(struct inode *ip,
2500 	       struct maplock * maplock, struct tblock * tblk, int maptype)
2501 {
2502 	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2503 	struct xdlistlock *xadlistlock;
2504 	xad_t *xad;
2505 	s64 xaddr;
2506 	int xlen;
2507 	struct pxd_lock *pxdlock;
2508 	struct xdlistlock *pxdlistlock;
2509 	pxd_t *pxd;
2510 	int n;
2511 
2512 	jfs_info("txFreeMap: tblk:0x%p maplock:0x%p maptype:0x%x",
2513 		 tblk, maplock, maptype);
2514 
2515 	/*
2516 	 * free from persistent map;
2517 	 */
2518 	if (maptype == COMMIT_PMAP || maptype == COMMIT_PWMAP) {
2519 		if (maplock->flag & mlckFREEXADLIST) {
2520 			xadlistlock = (struct xdlistlock *) maplock;
2521 			xad = xadlistlock->xdlist;
2522 			for (n = 0; n < xadlistlock->count; n++, xad++) {
2523 				if (!(xad->flag & XAD_NEW)) {
2524 					xaddr = addressXAD(xad);
2525 					xlen = lengthXAD(xad);
2526 					dbUpdatePMap(ipbmap, true, xaddr,
2527 						     (s64) xlen, tblk);
2528 					jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2529 						 (ulong) xaddr, xlen);
2530 				}
2531 			}
2532 		} else if (maplock->flag & mlckFREEPXD) {
2533 			pxdlock = (struct pxd_lock *) maplock;
2534 			xaddr = addressPXD(&pxdlock->pxd);
2535 			xlen = lengthPXD(&pxdlock->pxd);
2536 			dbUpdatePMap(ipbmap, true, xaddr, (s64) xlen,
2537 				     tblk);
2538 			jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2539 				 (ulong) xaddr, xlen);
2540 		} else {	/* (maplock->flag & mlckALLOCPXDLIST) */
2541 
2542 			pxdlistlock = (struct xdlistlock *) maplock;
2543 			pxd = pxdlistlock->xdlist;
2544 			for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2545 				xaddr = addressPXD(pxd);
2546 				xlen = lengthPXD(pxd);
2547 				dbUpdatePMap(ipbmap, true, xaddr,
2548 					     (s64) xlen, tblk);
2549 				jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2550 					 (ulong) xaddr, xlen);
2551 			}
2552 		}
2553 	}
2554 
2555 	/*
2556 	 * free from working map;
2557 	 */
2558 	if (maptype == COMMIT_PWMAP || maptype == COMMIT_WMAP) {
2559 		if (maplock->flag & mlckFREEXADLIST) {
2560 			xadlistlock = (struct xdlistlock *) maplock;
2561 			xad = xadlistlock->xdlist;
2562 			for (n = 0; n < xadlistlock->count; n++, xad++) {
2563 				xaddr = addressXAD(xad);
2564 				xlen = lengthXAD(xad);
2565 				dbFree(ip, xaddr, (s64) xlen);
2566 				xad->flag = 0;
2567 				jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2568 					 (ulong) xaddr, xlen);
2569 			}
2570 		} else if (maplock->flag & mlckFREEPXD) {
2571 			pxdlock = (struct pxd_lock *) maplock;
2572 			xaddr = addressPXD(&pxdlock->pxd);
2573 			xlen = lengthPXD(&pxdlock->pxd);
2574 			dbFree(ip, xaddr, (s64) xlen);
2575 			jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2576 				 (ulong) xaddr, xlen);
2577 		} else {	/* (maplock->flag & mlckFREEPXDLIST) */
2578 
2579 			pxdlistlock = (struct xdlistlock *) maplock;
2580 			pxd = pxdlistlock->xdlist;
2581 			for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2582 				xaddr = addressPXD(pxd);
2583 				xlen = lengthPXD(pxd);
2584 				dbFree(ip, xaddr, (s64) xlen);
2585 				jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2586 					 (ulong) xaddr, xlen);
2587 			}
2588 		}
2589 	}
2590 }
2591 
2592 /*
2593  *	txFreelock()
2594  *
2595  * function:	remove tlock from inode anonymous locklist
2596  */
txFreelock(struct inode * ip)2597 void txFreelock(struct inode *ip)
2598 {
2599 	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
2600 	struct tlock *xtlck, *tlck;
2601 	lid_t xlid = 0, lid;
2602 
2603 	if (!jfs_ip->atlhead)
2604 		return;
2605 
2606 	TXN_LOCK();
2607 	xtlck = (struct tlock *) &jfs_ip->atlhead;
2608 
2609 	while ((lid = xtlck->next) != 0) {
2610 		tlck = lid_to_tlock(lid);
2611 		if (tlck->flag & tlckFREELOCK) {
2612 			xtlck->next = tlck->next;
2613 			txLockFree(lid);
2614 		} else {
2615 			xtlck = tlck;
2616 			xlid = lid;
2617 		}
2618 	}
2619 
2620 	if (jfs_ip->atlhead)
2621 		jfs_ip->atltail = xlid;
2622 	else {
2623 		jfs_ip->atltail = 0;
2624 		/*
2625 		 * If inode was on anon_list, remove it
2626 		 */
2627 		list_del_init(&jfs_ip->anon_inode_list);
2628 	}
2629 	TXN_UNLOCK();
2630 }
2631 
2632 /*
2633  *	txAbort()
2634  *
2635  * function: abort tx before commit;
2636  *
2637  * frees line-locks and segment locks for all
2638  * segments in comdata structure.
2639  * Optionally sets state of file-system to FM_DIRTY in super-block.
2640  * log age of page-frames in memory for which caller has
2641  * are reset to 0 (to avoid logwarap).
2642  */
txAbort(tid_t tid,int dirty)2643 void txAbort(tid_t tid, int dirty)
2644 {
2645 	lid_t lid, next;
2646 	struct metapage *mp;
2647 	struct tblock *tblk = tid_to_tblock(tid);
2648 	struct tlock *tlck;
2649 
2650 	/*
2651 	 * free tlocks of the transaction
2652 	 */
2653 	for (lid = tblk->next; lid; lid = next) {
2654 		tlck = lid_to_tlock(lid);
2655 		next = tlck->next;
2656 		mp = tlck->mp;
2657 		JFS_IP(tlck->ip)->xtlid = 0;
2658 
2659 		if (mp) {
2660 			mp->lid = 0;
2661 
2662 			/*
2663 			 * reset lsn of page to avoid logwarap:
2664 			 *
2665 			 * (page may have been previously committed by another
2666 			 * transaction(s) but has not been paged, i.e.,
2667 			 * it may be on logsync list even though it has not
2668 			 * been logged for the current tx.)
2669 			 */
2670 			if (mp->xflag & COMMIT_PAGE && mp->lsn)
2671 				LogSyncRelease(mp);
2672 		}
2673 		/* insert tlock at head of freelist */
2674 		TXN_LOCK();
2675 		txLockFree(lid);
2676 		TXN_UNLOCK();
2677 	}
2678 
2679 	/* caller will free the transaction block */
2680 
2681 	tblk->next = tblk->last = 0;
2682 
2683 	/*
2684 	 * mark filesystem dirty
2685 	 */
2686 	if (dirty)
2687 		jfs_error(tblk->sb, "\n");
2688 
2689 	return;
2690 }
2691 
2692 /*
2693  *	txLazyCommit(void)
2694  *
2695  *	All transactions except those changing ipimap (COMMIT_FORCE) are
2696  *	processed by this routine.  This insures that the inode and block
2697  *	allocation maps are updated in order.  For synchronous transactions,
2698  *	let the user thread finish processing after txUpdateMap() is called.
2699  */
txLazyCommit(struct tblock * tblk)2700 static void txLazyCommit(struct tblock * tblk)
2701 {
2702 	struct jfs_log *log;
2703 
2704 	while (((tblk->flag & tblkGC_READY) == 0) &&
2705 	       ((tblk->flag & tblkGC_UNLOCKED) == 0)) {
2706 		/* We must have gotten ahead of the user thread
2707 		 */
2708 		jfs_info("jfs_lazycommit: tblk 0x%p not unlocked", tblk);
2709 		yield();
2710 	}
2711 
2712 	jfs_info("txLazyCommit: processing tblk 0x%p", tblk);
2713 
2714 	txUpdateMap(tblk);
2715 
2716 	log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
2717 
2718 	spin_lock_irq(&log->gclock);	// LOGGC_LOCK
2719 
2720 	tblk->flag |= tblkGC_COMMITTED;
2721 
2722 	if (tblk->flag & tblkGC_READY)
2723 		log->gcrtc--;
2724 
2725 	wake_up_all(&tblk->gcwait);	// LOGGC_WAKEUP
2726 
2727 	/*
2728 	 * Can't release log->gclock until we've tested tblk->flag
2729 	 */
2730 	if (tblk->flag & tblkGC_LAZY) {
2731 		spin_unlock_irq(&log->gclock);	// LOGGC_UNLOCK
2732 		txUnlock(tblk);
2733 		tblk->flag &= ~tblkGC_LAZY;
2734 		txEnd(tblk - TxBlock);	/* Convert back to tid */
2735 	} else
2736 		spin_unlock_irq(&log->gclock);	// LOGGC_UNLOCK
2737 
2738 	jfs_info("txLazyCommit: done: tblk = 0x%p", tblk);
2739 }
2740 
2741 /*
2742  *	jfs_lazycommit(void)
2743  *
2744  *	To be run as a kernel daemon.  If lbmIODone is called in an interrupt
2745  *	context, or where blocking is not wanted, this routine will process
2746  *	committed transactions from the unlock queue.
2747  */
jfs_lazycommit(void * arg)2748 int jfs_lazycommit(void *arg)
2749 {
2750 	int WorkDone;
2751 	struct tblock *tblk;
2752 	unsigned long flags;
2753 	struct jfs_sb_info *sbi;
2754 
2755 	do {
2756 		LAZY_LOCK(flags);
2757 		jfs_commit_thread_waking = 0;	/* OK to wake another thread */
2758 		while (!list_empty(&TxAnchor.unlock_queue)) {
2759 			WorkDone = 0;
2760 			list_for_each_entry(tblk, &TxAnchor.unlock_queue,
2761 					    cqueue) {
2762 
2763 				sbi = JFS_SBI(tblk->sb);
2764 				/*
2765 				 * For each volume, the transactions must be
2766 				 * handled in order.  If another commit thread
2767 				 * is handling a tblk for this superblock,
2768 				 * skip it
2769 				 */
2770 				if (sbi->commit_state & IN_LAZYCOMMIT)
2771 					continue;
2772 
2773 				sbi->commit_state |= IN_LAZYCOMMIT;
2774 				WorkDone = 1;
2775 
2776 				/*
2777 				 * Remove transaction from queue
2778 				 */
2779 				list_del(&tblk->cqueue);
2780 
2781 				LAZY_UNLOCK(flags);
2782 				txLazyCommit(tblk);
2783 				LAZY_LOCK(flags);
2784 
2785 				sbi->commit_state &= ~IN_LAZYCOMMIT;
2786 				/*
2787 				 * Don't continue in the for loop.  (We can't
2788 				 * anyway, it's unsafe!)  We want to go back to
2789 				 * the beginning of the list.
2790 				 */
2791 				break;
2792 			}
2793 
2794 			/* If there was nothing to do, don't continue */
2795 			if (!WorkDone)
2796 				break;
2797 		}
2798 		/* In case a wakeup came while all threads were active */
2799 		jfs_commit_thread_waking = 0;
2800 
2801 		if (freezing(current)) {
2802 			LAZY_UNLOCK(flags);
2803 			try_to_freeze();
2804 		} else {
2805 			DECLARE_WAITQUEUE(wq, current);
2806 
2807 			add_wait_queue(&jfs_commit_thread_wait, &wq);
2808 			set_current_state(TASK_INTERRUPTIBLE);
2809 			LAZY_UNLOCK(flags);
2810 			schedule();
2811 			remove_wait_queue(&jfs_commit_thread_wait, &wq);
2812 		}
2813 	} while (!kthread_should_stop());
2814 
2815 	if (!list_empty(&TxAnchor.unlock_queue))
2816 		jfs_err("jfs_lazycommit being killed w/pending transactions!");
2817 	else
2818 		jfs_info("jfs_lazycommit being killed");
2819 	return 0;
2820 }
2821 
txLazyUnlock(struct tblock * tblk)2822 void txLazyUnlock(struct tblock * tblk)
2823 {
2824 	unsigned long flags;
2825 
2826 	LAZY_LOCK(flags);
2827 
2828 	list_add_tail(&tblk->cqueue, &TxAnchor.unlock_queue);
2829 	/*
2830 	 * Don't wake up a commit thread if there is already one servicing
2831 	 * this superblock, or if the last one we woke up hasn't started yet.
2832 	 */
2833 	if (!(JFS_SBI(tblk->sb)->commit_state & IN_LAZYCOMMIT) &&
2834 	    !jfs_commit_thread_waking) {
2835 		jfs_commit_thread_waking = 1;
2836 		wake_up(&jfs_commit_thread_wait);
2837 	}
2838 	LAZY_UNLOCK(flags);
2839 }
2840 
LogSyncRelease(struct metapage * mp)2841 static void LogSyncRelease(struct metapage * mp)
2842 {
2843 	struct jfs_log *log = mp->log;
2844 
2845 	assert(mp->nohomeok);
2846 	assert(log);
2847 	metapage_homeok(mp);
2848 }
2849 
2850 /*
2851  *	txQuiesce
2852  *
2853  *	Block all new transactions and push anonymous transactions to
2854  *	completion
2855  *
2856  *	This does almost the same thing as jfs_sync below.  We don't
2857  *	worry about deadlocking when jfs_tlocks_low is set, since we would
2858  *	expect jfs_sync to get us out of that jam.
2859  */
txQuiesce(struct super_block * sb)2860 void txQuiesce(struct super_block *sb)
2861 {
2862 	struct inode *ip;
2863 	struct jfs_inode_info *jfs_ip;
2864 	struct jfs_log *log = JFS_SBI(sb)->log;
2865 	tid_t tid;
2866 
2867 	set_bit(log_QUIESCE, &log->flag);
2868 
2869 	TXN_LOCK();
2870 restart:
2871 	while (!list_empty(&TxAnchor.anon_list)) {
2872 		jfs_ip = list_entry(TxAnchor.anon_list.next,
2873 				    struct jfs_inode_info,
2874 				    anon_inode_list);
2875 		ip = &jfs_ip->vfs_inode;
2876 
2877 		/*
2878 		 * inode will be removed from anonymous list
2879 		 * when it is committed
2880 		 */
2881 		TXN_UNLOCK();
2882 		tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE);
2883 		mutex_lock(&jfs_ip->commit_mutex);
2884 		txCommit(tid, 1, &ip, 0);
2885 		txEnd(tid);
2886 		mutex_unlock(&jfs_ip->commit_mutex);
2887 		/*
2888 		 * Just to be safe.  I don't know how
2889 		 * long we can run without blocking
2890 		 */
2891 		cond_resched();
2892 		TXN_LOCK();
2893 	}
2894 
2895 	/*
2896 	 * If jfs_sync is running in parallel, there could be some inodes
2897 	 * on anon_list2.  Let's check.
2898 	 */
2899 	if (!list_empty(&TxAnchor.anon_list2)) {
2900 		list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2901 		goto restart;
2902 	}
2903 	TXN_UNLOCK();
2904 
2905 	/*
2906 	 * We may need to kick off the group commit
2907 	 */
2908 	jfs_flush_journal(log, 0);
2909 }
2910 
2911 /*
2912  * txResume()
2913  *
2914  * Allows transactions to start again following txQuiesce
2915  */
txResume(struct super_block * sb)2916 void txResume(struct super_block *sb)
2917 {
2918 	struct jfs_log *log = JFS_SBI(sb)->log;
2919 
2920 	clear_bit(log_QUIESCE, &log->flag);
2921 	TXN_WAKEUP(&log->syncwait);
2922 }
2923 
2924 /*
2925  *	jfs_sync(void)
2926  *
2927  *	To be run as a kernel daemon.  This is awakened when tlocks run low.
2928  *	We write any inodes that have anonymous tlocks so they will become
2929  *	available.
2930  */
jfs_sync(void * arg)2931 int jfs_sync(void *arg)
2932 {
2933 	struct inode *ip;
2934 	struct jfs_inode_info *jfs_ip;
2935 	tid_t tid;
2936 
2937 	do {
2938 		/*
2939 		 * write each inode on the anonymous inode list
2940 		 */
2941 		TXN_LOCK();
2942 		while (jfs_tlocks_low && !list_empty(&TxAnchor.anon_list)) {
2943 			jfs_ip = list_entry(TxAnchor.anon_list.next,
2944 					    struct jfs_inode_info,
2945 					    anon_inode_list);
2946 			ip = &jfs_ip->vfs_inode;
2947 
2948 			if (! igrab(ip)) {
2949 				/*
2950 				 * Inode is being freed
2951 				 */
2952 				list_del_init(&jfs_ip->anon_inode_list);
2953 			} else if (mutex_trylock(&jfs_ip->commit_mutex)) {
2954 				/*
2955 				 * inode will be removed from anonymous list
2956 				 * when it is committed
2957 				 */
2958 				TXN_UNLOCK();
2959 				tid = txBegin(ip->i_sb, COMMIT_INODE);
2960 				txCommit(tid, 1, &ip, 0);
2961 				txEnd(tid);
2962 				mutex_unlock(&jfs_ip->commit_mutex);
2963 
2964 				iput(ip);
2965 				/*
2966 				 * Just to be safe.  I don't know how
2967 				 * long we can run without blocking
2968 				 */
2969 				cond_resched();
2970 				TXN_LOCK();
2971 			} else {
2972 				/* We can't get the commit mutex.  It may
2973 				 * be held by a thread waiting for tlock's
2974 				 * so let's not block here.  Save it to
2975 				 * put back on the anon_list.
2976 				 */
2977 
2978 				/* Move from anon_list to anon_list2 */
2979 				list_move(&jfs_ip->anon_inode_list,
2980 					  &TxAnchor.anon_list2);
2981 
2982 				TXN_UNLOCK();
2983 				iput(ip);
2984 				TXN_LOCK();
2985 			}
2986 		}
2987 		/* Add anon_list2 back to anon_list */
2988 		list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2989 
2990 		if (freezing(current)) {
2991 			TXN_UNLOCK();
2992 			try_to_freeze();
2993 		} else {
2994 			set_current_state(TASK_INTERRUPTIBLE);
2995 			TXN_UNLOCK();
2996 			schedule();
2997 		}
2998 	} while (!kthread_should_stop());
2999 
3000 	jfs_info("jfs_sync being killed");
3001 	return 0;
3002 }
3003 
3004 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG)
jfs_txanchor_proc_show(struct seq_file * m,void * v)3005 int jfs_txanchor_proc_show(struct seq_file *m, void *v)
3006 {
3007 	char *freewait;
3008 	char *freelockwait;
3009 	char *lowlockwait;
3010 
3011 	freewait =
3012 	    waitqueue_active(&TxAnchor.freewait) ? "active" : "empty";
3013 	freelockwait =
3014 	    waitqueue_active(&TxAnchor.freelockwait) ? "active" : "empty";
3015 	lowlockwait =
3016 	    waitqueue_active(&TxAnchor.lowlockwait) ? "active" : "empty";
3017 
3018 	seq_printf(m,
3019 		       "JFS TxAnchor\n"
3020 		       "============\n"
3021 		       "freetid = %d\n"
3022 		       "freewait = %s\n"
3023 		       "freelock = %d\n"
3024 		       "freelockwait = %s\n"
3025 		       "lowlockwait = %s\n"
3026 		       "tlocksInUse = %d\n"
3027 		       "jfs_tlocks_low = %d\n"
3028 		       "unlock_queue is %sempty\n",
3029 		       TxAnchor.freetid,
3030 		       freewait,
3031 		       TxAnchor.freelock,
3032 		       freelockwait,
3033 		       lowlockwait,
3034 		       TxAnchor.tlocksInUse,
3035 		       jfs_tlocks_low,
3036 		       list_empty(&TxAnchor.unlock_queue) ? "" : "not ");
3037 	return 0;
3038 }
3039 #endif
3040 
3041 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_STATISTICS)
jfs_txstats_proc_show(struct seq_file * m,void * v)3042 int jfs_txstats_proc_show(struct seq_file *m, void *v)
3043 {
3044 	seq_printf(m,
3045 		       "JFS TxStats\n"
3046 		       "===========\n"
3047 		       "calls to txBegin = %d\n"
3048 		       "txBegin blocked by sync barrier = %d\n"
3049 		       "txBegin blocked by tlocks low = %d\n"
3050 		       "txBegin blocked by no free tid = %d\n"
3051 		       "calls to txBeginAnon = %d\n"
3052 		       "txBeginAnon blocked by sync barrier = %d\n"
3053 		       "txBeginAnon blocked by tlocks low = %d\n"
3054 		       "calls to txLockAlloc = %d\n"
3055 		       "tLockAlloc blocked by no free lock = %d\n",
3056 		       TxStat.txBegin,
3057 		       TxStat.txBegin_barrier,
3058 		       TxStat.txBegin_lockslow,
3059 		       TxStat.txBegin_freetid,
3060 		       TxStat.txBeginAnon,
3061 		       TxStat.txBeginAnon_barrier,
3062 		       TxStat.txBeginAnon_lockslow,
3063 		       TxStat.txLockAlloc,
3064 		       TxStat.txLockAlloc_freelock);
3065 	return 0;
3066 }
3067 #endif
3068