forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 072de836f53be56a70cecf70b43ae43b7ce17376
kernel/fs/xfs/xfs_trans_dquot.c
....@@ -11,11 +11,11 @@
1111 #include "xfs_trans_resv.h"
1212 #include "xfs_mount.h"
1313 #include "xfs_inode.h"
14
-#include "xfs_error.h"
1514 #include "xfs_trans.h"
1615 #include "xfs_trans_priv.h"
1716 #include "xfs_quota.h"
1817 #include "xfs_qm.h"
18
+#include "xfs_trace.h"
1919
2020 STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
2121
....@@ -26,10 +26,9 @@
2626 */
2727 void
2828 xfs_trans_dqjoin(
29
- xfs_trans_t *tp,
30
- xfs_dquot_t *dqp)
29
+ struct xfs_trans *tp,
30
+ struct xfs_dquot *dqp)
3131 {
32
- ASSERT(dqp->q_transp != tp);
3332 ASSERT(XFS_DQ_IS_LOCKED(dqp));
3433 ASSERT(dqp->q_logitem.qli_dquot == dqp);
3534
....@@ -37,14 +36,7 @@
3736 * Get a log_item_desc to point at the new item.
3837 */
3938 xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
40
-
41
- /*
42
- * Initialize d_transp so we can later determine if this dquot is
43
- * associated with this transaction.
44
- */
45
- dqp->q_transp = tp;
4639 }
47
-
4840
4941 /*
5042 * This is called to mark the dquot as needing
....@@ -58,11 +50,16 @@
5850 */
5951 void
6052 xfs_trans_log_dquot(
61
- xfs_trans_t *tp,
62
- xfs_dquot_t *dqp)
53
+ struct xfs_trans *tp,
54
+ struct xfs_dquot *dqp)
6355 {
64
- ASSERT(dqp->q_transp == tp);
6556 ASSERT(XFS_DQ_IS_LOCKED(dqp));
57
+
58
+ /* Upgrade the dquot to bigtime format if possible. */
59
+ if (dqp->q_id != 0 &&
60
+ xfs_sb_version_hasbigtime(&tp->t_mountp->m_sb) &&
61
+ !(dqp->q_type & XFS_DQTYPE_BIGTIME))
62
+ dqp->q_type |= XFS_DQTYPE_BIGTIME;
6663
6764 tp->t_flags |= XFS_TRANS_DIRTY;
6865 set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags);
....@@ -74,13 +71,13 @@
7471 */
7572 void
7673 xfs_trans_dup_dqinfo(
77
- xfs_trans_t *otp,
78
- xfs_trans_t *ntp)
74
+ struct xfs_trans *otp,
75
+ struct xfs_trans *ntp)
7976 {
80
- xfs_dqtrx_t *oq, *nq;
81
- int i, j;
82
- xfs_dqtrx_t *oqa, *nqa;
83
- ulong blk_res_used;
77
+ struct xfs_dqtrx *oq, *nq;
78
+ int i, j;
79
+ struct xfs_dqtrx *oqa, *nqa;
80
+ uint64_t blk_res_used;
8481
8582 if (!otp->t_dqinfo)
8683 return;
....@@ -137,7 +134,7 @@
137134 xfs_trans_t *tp,
138135 xfs_inode_t *ip,
139136 uint field,
140
- long delta)
137
+ int64_t delta)
141138 {
142139 xfs_mount_t *mp = tp->t_mountp;
143140
....@@ -165,14 +162,19 @@
165162 int i;
166163 struct xfs_dqtrx *qa;
167164
168
- if (XFS_QM_ISUDQ(dqp))
165
+ switch (xfs_dquot_type(dqp)) {
166
+ case XFS_DQTYPE_USER:
169167 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
170
- else if (XFS_QM_ISGDQ(dqp))
168
+ break;
169
+ case XFS_DQTYPE_GROUP:
171170 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
172
- else if (XFS_QM_ISPDQ(dqp))
171
+ break;
172
+ case XFS_DQTYPE_PROJ:
173173 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
174
- else
174
+ break;
175
+ default:
175176 return NULL;
177
+ }
176178
177179 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
178180 if (qa[i].qt_dquot == NULL ||
....@@ -191,12 +193,12 @@
191193 */
192194 void
193195 xfs_trans_mod_dquot(
194
- xfs_trans_t *tp,
195
- xfs_dquot_t *dqp,
196
- uint field,
197
- long delta)
196
+ struct xfs_trans *tp,
197
+ struct xfs_dquot *dqp,
198
+ uint field,
199
+ int64_t delta)
198200 {
199
- xfs_dqtrx_t *qtrx;
201
+ struct xfs_dqtrx *qtrx;
200202
201203 ASSERT(tp);
202204 ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
....@@ -213,69 +215,65 @@
213215 if (qtrx->qt_dquot == NULL)
214216 qtrx->qt_dquot = dqp;
215217
218
+ if (delta) {
219
+ trace_xfs_trans_mod_dquot_before(qtrx);
220
+ trace_xfs_trans_mod_dquot(tp, dqp, field, delta);
221
+ }
222
+
216223 switch (field) {
217
-
218
- /*
219
- * regular disk blk reservation
220
- */
221
- case XFS_TRANS_DQ_RES_BLKS:
222
- qtrx->qt_blk_res += (ulong)delta;
224
+ /* regular disk blk reservation */
225
+ case XFS_TRANS_DQ_RES_BLKS:
226
+ qtrx->qt_blk_res += delta;
223227 break;
224228
225
- /*
226
- * inode reservation
227
- */
228
- case XFS_TRANS_DQ_RES_INOS:
229
- qtrx->qt_ino_res += (ulong)delta;
229
+ /* inode reservation */
230
+ case XFS_TRANS_DQ_RES_INOS:
231
+ qtrx->qt_ino_res += delta;
230232 break;
231233
232
- /*
233
- * disk blocks used.
234
- */
235
- case XFS_TRANS_DQ_BCOUNT:
234
+ /* disk blocks used. */
235
+ case XFS_TRANS_DQ_BCOUNT:
236236 qtrx->qt_bcount_delta += delta;
237237 break;
238238
239
- case XFS_TRANS_DQ_DELBCOUNT:
239
+ case XFS_TRANS_DQ_DELBCOUNT:
240240 qtrx->qt_delbcnt_delta += delta;
241241 break;
242242
243
- /*
244
- * Inode Count
245
- */
246
- case XFS_TRANS_DQ_ICOUNT:
243
+ /* Inode Count */
244
+ case XFS_TRANS_DQ_ICOUNT:
247245 if (qtrx->qt_ino_res && delta > 0) {
248
- qtrx->qt_ino_res_used += (ulong)delta;
246
+ qtrx->qt_ino_res_used += delta;
249247 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
250248 }
251249 qtrx->qt_icount_delta += delta;
252250 break;
253251
254
- /*
255
- * rtblk reservation
256
- */
257
- case XFS_TRANS_DQ_RES_RTBLKS:
258
- qtrx->qt_rtblk_res += (ulong)delta;
252
+ /* rtblk reservation */
253
+ case XFS_TRANS_DQ_RES_RTBLKS:
254
+ qtrx->qt_rtblk_res += delta;
259255 break;
260256
261
- /*
262
- * rtblk count
263
- */
264
- case XFS_TRANS_DQ_RTBCOUNT:
257
+ /* rtblk count */
258
+ case XFS_TRANS_DQ_RTBCOUNT:
265259 if (qtrx->qt_rtblk_res && delta > 0) {
266
- qtrx->qt_rtblk_res_used += (ulong)delta;
260
+ qtrx->qt_rtblk_res_used += delta;
267261 ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
268262 }
269263 qtrx->qt_rtbcount_delta += delta;
270264 break;
271265
272
- case XFS_TRANS_DQ_DELRTBCOUNT:
266
+ case XFS_TRANS_DQ_DELRTBCOUNT:
273267 qtrx->qt_delrtb_delta += delta;
274268 break;
275269
276
- default:
270
+ default:
277271 ASSERT(0);
278272 }
273
+
274
+ if (delta)
275
+ trace_xfs_trans_mod_dquot_after(qtrx);
276
+
279277 tp->t_flags |= XFS_TRANS_DQ_DIRTY;
280278 }
281279
....@@ -288,8 +286,8 @@
288286 */
289287 STATIC void
290288 xfs_trans_dqlockedjoin(
291
- xfs_trans_t *tp,
292
- xfs_dqtrx_t *q)
289
+ struct xfs_trans *tp,
290
+ struct xfs_dqtrx *q)
293291 {
294292 ASSERT(q[0].qt_dquot != NULL);
295293 if (q[1].qt_dquot == NULL) {
....@@ -303,6 +301,37 @@
303301 }
304302 }
305303
304
+/* Apply dqtrx changes to the quota reservation counters. */
305
+static inline void
306
+xfs_apply_quota_reservation_deltas(
307
+ struct xfs_dquot_res *res,
308
+ uint64_t reserved,
309
+ int64_t res_used,
310
+ int64_t count_delta)
311
+{
312
+ if (reserved != 0) {
313
+ /*
314
+ * Subtle math here: If reserved > res_used (the normal case),
315
+ * we're simply subtracting the unused transaction quota
316
+ * reservation from the dquot reservation.
317
+ *
318
+ * If, however, res_used > reserved, then we have allocated
319
+ * more quota blocks than were reserved for the transaction.
320
+ * We must add that excess to the dquot reservation since it
321
+ * tracks (usage + resv) and by definition we didn't reserve
322
+ * that excess.
323
+ */
324
+ res->reserved -= abs(reserved - res_used);
325
+ } else if (count_delta != 0) {
326
+ /*
327
+ * These blks were never reserved, either inside a transaction
328
+ * or outside one (in a delayed allocation). Also, this isn't
329
+ * always a negative number since we sometimes deliberately
330
+ * skip quota reservations.
331
+ */
332
+ res->reserved += count_delta;
333
+ }
334
+}
306335
307336 /*
308337 * Called by xfs_trans_commit() and similar in spirit to
....@@ -319,9 +348,8 @@
319348 int i, j;
320349 struct xfs_dquot *dqp;
321350 struct xfs_dqtrx *qtrx, *qa;
322
- struct xfs_disk_dquot *d;
323
- long totalbdelta;
324
- long totalrtbdelta;
351
+ int64_t totalbdelta;
352
+ int64_t totalrtbdelta;
325353
326354 if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
327355 return;
....@@ -338,6 +366,8 @@
338366 xfs_trans_dqlockedjoin(tp, qa);
339367
340368 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
369
+ uint64_t blk_res_used;
370
+
341371 qtrx = &qa[i];
342372 /*
343373 * The array of dquots is filled
....@@ -347,12 +377,10 @@
347377 break;
348378
349379 ASSERT(XFS_DQ_IS_LOCKED(dqp));
350
- ASSERT(dqp->q_transp == tp);
351380
352381 /*
353382 * adjust the actual number of blocks used
354383 */
355
- d = &dqp->q_core;
356384
357385 /*
358386 * The issue here is - sometimes we don't make a blkquota
....@@ -371,38 +399,46 @@
371399 qtrx->qt_delbcnt_delta;
372400 totalrtbdelta = qtrx->qt_rtbcount_delta +
373401 qtrx->qt_delrtb_delta;
402
+
403
+ if (totalbdelta != 0 || totalrtbdelta != 0 ||
404
+ qtrx->qt_icount_delta != 0) {
405
+ trace_xfs_trans_apply_dquot_deltas_before(dqp);
406
+ trace_xfs_trans_apply_dquot_deltas(qtrx);
407
+ }
408
+
374409 #ifdef DEBUG
375410 if (totalbdelta < 0)
376
- ASSERT(be64_to_cpu(d->d_bcount) >=
377
- -totalbdelta);
411
+ ASSERT(dqp->q_blk.count >= -totalbdelta);
378412
379413 if (totalrtbdelta < 0)
380
- ASSERT(be64_to_cpu(d->d_rtbcount) >=
381
- -totalrtbdelta);
414
+ ASSERT(dqp->q_rtb.count >= -totalrtbdelta);
382415
383416 if (qtrx->qt_icount_delta < 0)
384
- ASSERT(be64_to_cpu(d->d_icount) >=
385
- -qtrx->qt_icount_delta);
417
+ ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta);
386418 #endif
387419 if (totalbdelta)
388
- be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta);
420
+ dqp->q_blk.count += totalbdelta;
389421
390422 if (qtrx->qt_icount_delta)
391
- be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta);
423
+ dqp->q_ino.count += qtrx->qt_icount_delta;
392424
393425 if (totalrtbdelta)
394
- be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta);
426
+ dqp->q_rtb.count += totalrtbdelta;
427
+
428
+ if (totalbdelta != 0 || totalrtbdelta != 0 ||
429
+ qtrx->qt_icount_delta != 0)
430
+ trace_xfs_trans_apply_dquot_deltas_after(dqp);
395431
396432 /*
397433 * Get any default limits in use.
398434 * Start/reset the timer(s) if needed.
399435 */
400
- if (d->d_id) {
401
- xfs_qm_adjust_dqlimits(tp->t_mountp, dqp);
402
- xfs_qm_adjust_dqtimers(tp->t_mountp, d);
436
+ if (dqp->q_id) {
437
+ xfs_qm_adjust_dqlimits(dqp);
438
+ xfs_qm_adjust_dqtimers(dqp);
403439 }
404440
405
- dqp->dq_flags |= XFS_DQ_DIRTY;
441
+ dqp->q_flags |= XFS_DQFLAG_DIRTY;
406442 /*
407443 * add this to the list of items to get logged
408444 */
....@@ -412,78 +448,31 @@
412448 * In case of delayed allocations, there's no
413449 * reservation that a transaction structure knows of.
414450 */
415
- if (qtrx->qt_blk_res != 0) {
416
- ulong blk_res_used = 0;
451
+ blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta);
452
+ xfs_apply_quota_reservation_deltas(&dqp->q_blk,
453
+ qtrx->qt_blk_res, blk_res_used,
454
+ qtrx->qt_bcount_delta);
417455
418
- if (qtrx->qt_bcount_delta > 0)
419
- blk_res_used = qtrx->qt_bcount_delta;
420
-
421
- if (qtrx->qt_blk_res != blk_res_used) {
422
- if (qtrx->qt_blk_res > blk_res_used)
423
- dqp->q_res_bcount -= (xfs_qcnt_t)
424
- (qtrx->qt_blk_res -
425
- blk_res_used);
426
- else
427
- dqp->q_res_bcount -= (xfs_qcnt_t)
428
- (blk_res_used -
429
- qtrx->qt_blk_res);
430
- }
431
- } else {
432
- /*
433
- * These blks were never reserved, either inside
434
- * a transaction or outside one (in a delayed
435
- * allocation). Also, this isn't always a
436
- * negative number since we sometimes
437
- * deliberately skip quota reservations.
438
- */
439
- if (qtrx->qt_bcount_delta) {
440
- dqp->q_res_bcount +=
441
- (xfs_qcnt_t)qtrx->qt_bcount_delta;
442
- }
443
- }
444456 /*
445457 * Adjust the RT reservation.
446458 */
447
- if (qtrx->qt_rtblk_res != 0) {
448
- if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) {
449
- if (qtrx->qt_rtblk_res >
450
- qtrx->qt_rtblk_res_used)
451
- dqp->q_res_rtbcount -= (xfs_qcnt_t)
452
- (qtrx->qt_rtblk_res -
453
- qtrx->qt_rtblk_res_used);
454
- else
455
- dqp->q_res_rtbcount -= (xfs_qcnt_t)
456
- (qtrx->qt_rtblk_res_used -
457
- qtrx->qt_rtblk_res);
458
- }
459
- } else {
460
- if (qtrx->qt_rtbcount_delta)
461
- dqp->q_res_rtbcount +=
462
- (xfs_qcnt_t)qtrx->qt_rtbcount_delta;
463
- }
459
+ xfs_apply_quota_reservation_deltas(&dqp->q_rtb,
460
+ qtrx->qt_rtblk_res,
461
+ qtrx->qt_rtblk_res_used,
462
+ qtrx->qt_rtbcount_delta);
464463
465464 /*
466465 * Adjust the inode reservation.
467466 */
468
- if (qtrx->qt_ino_res != 0) {
469
- ASSERT(qtrx->qt_ino_res >=
470
- qtrx->qt_ino_res_used);
471
- if (qtrx->qt_ino_res > qtrx->qt_ino_res_used)
472
- dqp->q_res_icount -= (xfs_qcnt_t)
473
- (qtrx->qt_ino_res -
474
- qtrx->qt_ino_res_used);
475
- } else {
476
- if (qtrx->qt_icount_delta)
477
- dqp->q_res_icount +=
478
- (xfs_qcnt_t)qtrx->qt_icount_delta;
479
- }
467
+ ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
468
+ xfs_apply_quota_reservation_deltas(&dqp->q_ino,
469
+ qtrx->qt_ino_res,
470
+ qtrx->qt_ino_res_used,
471
+ qtrx->qt_icount_delta);
480472
481
- ASSERT(dqp->q_res_bcount >=
482
- be64_to_cpu(dqp->q_core.d_bcount));
483
- ASSERT(dqp->q_res_icount >=
484
- be64_to_cpu(dqp->q_core.d_icount));
485
- ASSERT(dqp->q_res_rtbcount >=
486
- be64_to_cpu(dqp->q_core.d_rtbcount));
473
+ ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
474
+ ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
475
+ ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
487476 }
488477 }
489478 }
....@@ -497,12 +486,12 @@
497486 */
498487 void
499488 xfs_trans_unreserve_and_mod_dquots(
500
- xfs_trans_t *tp)
489
+ struct xfs_trans *tp)
501490 {
502491 int i, j;
503
- xfs_dquot_t *dqp;
504
- xfs_dqtrx_t *qtrx, *qa;
505
- bool locked;
492
+ struct xfs_dquot *dqp;
493
+ struct xfs_dqtrx *qtrx, *qa;
494
+ bool locked;
506495
507496 if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
508497 return;
....@@ -527,7 +516,7 @@
527516 if (qtrx->qt_blk_res) {
528517 xfs_dqlock(dqp);
529518 locked = true;
530
- dqp->q_res_bcount -=
519
+ dqp->q_blk.reserved -=
531520 (xfs_qcnt_t)qtrx->qt_blk_res;
532521 }
533522 if (qtrx->qt_ino_res) {
....@@ -535,7 +524,7 @@
535524 xfs_dqlock(dqp);
536525 locked = true;
537526 }
538
- dqp->q_res_icount -=
527
+ dqp->q_ino.reserved -=
539528 (xfs_qcnt_t)qtrx->qt_ino_res;
540529 }
541530
....@@ -544,7 +533,7 @@
544533 xfs_dqlock(dqp);
545534 locked = true;
546535 }
547
- dqp->q_res_rtbcount -=
536
+ dqp->q_rtb.reserved -=
548537 (xfs_qcnt_t)qtrx->qt_rtblk_res;
549538 }
550539 if (locked)
....@@ -560,18 +549,76 @@
560549 struct xfs_dquot *dqp,
561550 int type)
562551 {
563
- enum quota_type qtype;
552
+ enum quota_type qtype;
564553
565
- if (dqp->dq_flags & XFS_DQ_PROJ)
554
+ switch (xfs_dquot_type(dqp)) {
555
+ case XFS_DQTYPE_PROJ:
566556 qtype = PRJQUOTA;
567
- else if (dqp->dq_flags & XFS_DQ_USER)
557
+ break;
558
+ case XFS_DQTYPE_USER:
568559 qtype = USRQUOTA;
569
- else
560
+ break;
561
+ case XFS_DQTYPE_GROUP:
570562 qtype = GRPQUOTA;
563
+ break;
564
+ default:
565
+ return;
566
+ }
571567
572
- quota_send_warning(make_kqid(&init_user_ns, qtype,
573
- be32_to_cpu(dqp->q_core.d_id)),
568
+ quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id),
574569 mp->m_super->s_dev, type);
570
+}
571
+
572
+/*
573
+ * Decide if we can make an additional reservation against a quota resource.
574
+ * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal.
575
+ *
576
+ * Note that we assume that the numeric difference between the inode and block
577
+ * warning codes will always be 3 since it's userspace ABI now, and will never
578
+ * decrease the quota reservation, so the *BELOW messages are irrelevant.
579
+ */
580
+static inline int
581
+xfs_dqresv_check(
582
+ struct xfs_dquot_res *res,
583
+ struct xfs_quota_limits *qlim,
584
+ int64_t delta,
585
+ bool *fatal)
586
+{
587
+ xfs_qcnt_t hardlimit = res->hardlimit;
588
+ xfs_qcnt_t softlimit = res->softlimit;
589
+ xfs_qcnt_t total_count = res->reserved + delta;
590
+
591
+ BUILD_BUG_ON(QUOTA_NL_BHARDWARN != QUOTA_NL_IHARDWARN + 3);
592
+ BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3);
593
+ BUILD_BUG_ON(QUOTA_NL_BSOFTWARN != QUOTA_NL_ISOFTWARN + 3);
594
+
595
+ *fatal = false;
596
+ if (delta <= 0)
597
+ return QUOTA_NL_NOWARN;
598
+
599
+ if (!hardlimit)
600
+ hardlimit = qlim->hard;
601
+ if (!softlimit)
602
+ softlimit = qlim->soft;
603
+
604
+ if (hardlimit && total_count > hardlimit) {
605
+ *fatal = true;
606
+ return QUOTA_NL_IHARDWARN;
607
+ }
608
+
609
+ if (softlimit && total_count > softlimit) {
610
+ time64_t now = ktime_get_real_seconds();
611
+
612
+ if ((res->timer != 0 && now > res->timer) ||
613
+ (res->warnings != 0 && res->warnings >= qlim->warn)) {
614
+ *fatal = true;
615
+ return QUOTA_NL_ISOFTLONGWARN;
616
+ }
617
+
618
+ return QUOTA_NL_ISOFTWARN;
619
+ }
620
+
621
+ return QUOTA_NL_NOWARN;
575622 }
576623
577624 /*
....@@ -582,115 +629,65 @@
582629 */
583630 STATIC int
584631 xfs_trans_dqresv(
585
- xfs_trans_t *tp,
586
- xfs_mount_t *mp,
587
- xfs_dquot_t *dqp,
588
- long nblks,
589
- long ninos,
590
- uint flags)
632
+ struct xfs_trans *tp,
633
+ struct xfs_mount *mp,
634
+ struct xfs_dquot *dqp,
635
+ int64_t nblks,
636
+ long ninos,
637
+ uint flags)
591638 {
592
- xfs_qcnt_t hardlimit;
593
- xfs_qcnt_t softlimit;
594
- time_t timer;
595
- xfs_qwarncnt_t warns;
596
- xfs_qwarncnt_t warnlimit;
597
- xfs_qcnt_t total_count;
598
- xfs_qcnt_t *resbcountp;
599
- xfs_quotainfo_t *q = mp->m_quotainfo;
639
+ struct xfs_quotainfo *q = mp->m_quotainfo;
600640 struct xfs_def_quota *defq;
601
-
641
+ struct xfs_dquot_res *blkres;
642
+ struct xfs_quota_limits *qlim;
602643
603644 xfs_dqlock(dqp);
604645
605
- defq = xfs_get_defquota(dqp, q);
646
+ defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
606647
607648 if (flags & XFS_TRANS_DQ_RES_BLKS) {
608
- hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
609
- if (!hardlimit)
610
- hardlimit = defq->bhardlimit;
611
- softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit);
612
- if (!softlimit)
613
- softlimit = defq->bsoftlimit;
614
- timer = be32_to_cpu(dqp->q_core.d_btimer);
615
- warns = be16_to_cpu(dqp->q_core.d_bwarns);
616
- warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit;
617
- resbcountp = &dqp->q_res_bcount;
649
+ blkres = &dqp->q_blk;
650
+ qlim = &defq->blk;
618651 } else {
619
- ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
620
- hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit);
621
- if (!hardlimit)
622
- hardlimit = defq->rtbhardlimit;
623
- softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit);
624
- if (!softlimit)
625
- softlimit = defq->rtbsoftlimit;
626
- timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
627
- warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
628
- warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit;
629
- resbcountp = &dqp->q_res_rtbcount;
652
+ blkres = &dqp->q_rtb;
653
+ qlim = &defq->rtb;
630654 }
631655
632
- if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
633
- dqp->q_core.d_id &&
634
- ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) ||
635
- (XFS_IS_GQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISGDQ(dqp)) ||
636
- (XFS_IS_PQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISPDQ(dqp)))) {
637
- if (nblks > 0) {
656
+ if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id &&
657
+ xfs_dquot_is_enforced(dqp)) {
658
+ int quota_nl;
659
+ bool fatal;
660
+
661
+ /*
662
+ * dquot is locked already. See if we'd go over the hardlimit
663
+ * or exceed the timelimit if we'd reserve resources.
664
+ */
665
+ quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal);
666
+ if (quota_nl != QUOTA_NL_NOWARN) {
638667 /*
639
- * dquot is locked already. See if we'd go over the
640
- * hardlimit or exceed the timelimit if we allocate
641
- * nblks.
668
+ * Quota block warning codes are 3 more than the inode
669
+ * codes, which we check above.
642670 */
643
- total_count = *resbcountp + nblks;
644
- if (hardlimit && total_count > hardlimit) {
645
- xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN);
671
+ xfs_quota_warn(mp, dqp, quota_nl + 3);
672
+ if (fatal)
646673 goto error_return;
647
- }
648
- if (softlimit && total_count > softlimit) {
649
- if ((timer != 0 && get_seconds() > timer) ||
650
- (warns != 0 && warns >= warnlimit)) {
651
- xfs_quota_warn(mp, dqp,
652
- QUOTA_NL_BSOFTLONGWARN);
653
- goto error_return;
654
- }
655
-
656
- xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN);
657
- }
658674 }
659
- if (ninos > 0) {
660
- total_count = dqp->q_res_icount + ninos;
661
- timer = be32_to_cpu(dqp->q_core.d_itimer);
662
- warns = be16_to_cpu(dqp->q_core.d_iwarns);
663
- warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
664
- hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
665
- if (!hardlimit)
666
- hardlimit = defq->ihardlimit;
667
- softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
668
- if (!softlimit)
669
- softlimit = defq->isoftlimit;
670675
671
- if (hardlimit && total_count > hardlimit) {
672
- xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN);
676
+ quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos,
677
+ &fatal);
678
+ if (quota_nl != QUOTA_NL_NOWARN) {
679
+ xfs_quota_warn(mp, dqp, quota_nl);
680
+ if (fatal)
673681 goto error_return;
674
- }
675
- if (softlimit && total_count > softlimit) {
676
- if ((timer != 0 && get_seconds() > timer) ||
677
- (warns != 0 && warns >= warnlimit)) {
678
- xfs_quota_warn(mp, dqp,
679
- QUOTA_NL_ISOFTLONGWARN);
680
- goto error_return;
681
- }
682
- xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN);
683
- }
684682 }
685683 }
686684
687685 /*
688686 * Change the reservation, but not the actual usage.
689
- * Note that q_res_bcount = q_core.d_bcount + resv
687
+ * Note that q_blk.reserved = q_blk.count + resv
690688 */
691
- (*resbcountp) += (xfs_qcnt_t)nblks;
692
- if (ninos != 0)
693
- dqp->q_res_icount += (xfs_qcnt_t)ninos;
689
+ blkres->reserved += (xfs_qcnt_t)nblks;
690
+ dqp->q_ino.reserved += (xfs_qcnt_t)ninos;
694691
695692 /*
696693 * note the reservation amt in the trans struct too,
....@@ -711,16 +708,16 @@
711708 XFS_TRANS_DQ_RES_INOS,
712709 ninos);
713710 }
714
- ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount));
715
- ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount));
716
- ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));
711
+ ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
712
+ ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
713
+ ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
717714
718715 xfs_dqunlock(dqp);
719716 return 0;
720717
721718 error_return:
722719 xfs_dqunlock(dqp);
723
- if (flags & XFS_QMOPT_ENOSPC)
720
+ if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ)
724721 return -ENOSPC;
725722 return -EDQUOT;
726723 }
....@@ -745,7 +742,7 @@
745742 struct xfs_dquot *udqp,
746743 struct xfs_dquot *gdqp,
747744 struct xfs_dquot *pdqp,
748
- long nblks,
745
+ int64_t nblks,
749746 long ninos,
750747 uint flags)
751748 {
....@@ -760,8 +757,7 @@
760757 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
761758
762759 if (udqp) {
763
- error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos,
764
- (flags & ~XFS_QMOPT_ENOSPC));
760
+ error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags);
765761 if (error)
766762 return error;
767763 }
....@@ -804,7 +800,7 @@
804800 xfs_trans_reserve_quota_nblks(
805801 struct xfs_trans *tp,
806802 struct xfs_inode *ip,
807
- long nblks,
803
+ int64_t nblks,
808804 long ninos,
809805 uint flags)
810806 {
....@@ -812,16 +808,12 @@
812808
813809 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
814810 return 0;
815
- if (XFS_IS_PQUOTA_ON(mp))
816
- flags |= XFS_QMOPT_ENOSPC;
817811
818812 ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
819813
820814 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
821
- ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
822
- XFS_TRANS_DQ_RES_RTBLKS ||
823
- (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
824
- XFS_TRANS_DQ_RES_BLKS);
815
+ ASSERT((flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_RTBLKS ||
816
+ (flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_BLKS);
825817
826818 /*
827819 * Reserve nblks against these dquots, with trans as the mediator.
....@@ -835,13 +827,13 @@
835827 /*
836828 * This routine is called to allocate a quotaoff log item.
837829 */
838
-xfs_qoff_logitem_t *
830
+struct xfs_qoff_logitem *
839831 xfs_trans_get_qoff_item(
840
- xfs_trans_t *tp,
841
- xfs_qoff_logitem_t *startqoff,
832
+ struct xfs_trans *tp,
833
+ struct xfs_qoff_logitem *startqoff,
842834 uint flags)
843835 {
844
- xfs_qoff_logitem_t *q;
836
+ struct xfs_qoff_logitem *q;
845837
846838 ASSERT(tp != NULL);
847839
....@@ -863,8 +855,8 @@
863855 */
864856 void
865857 xfs_trans_log_quotaoff_item(
866
- xfs_trans_t *tp,
867
- xfs_qoff_logitem_t *qlp)
858
+ struct xfs_trans *tp,
859
+ struct xfs_qoff_logitem *qlp)
868860 {
869861 tp->t_flags |= XFS_TRANS_DIRTY;
870862 set_bit(XFS_LI_DIRTY, &qlp->qql_item.li_flags);
....@@ -874,7 +866,8 @@
874866 xfs_trans_alloc_dqinfo(
875867 xfs_trans_t *tp)
876868 {
877
- tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, KM_SLEEP);
869
+ tp->t_dqinfo = kmem_cache_zalloc(xfs_qm_dqtrxzone,
870
+ GFP_KERNEL | __GFP_NOFAIL);
878871 }
879872
880873 void
....@@ -883,6 +876,6 @@
883876 {
884877 if (!tp->t_dqinfo)
885878 return;
886
- kmem_zone_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
879
+ kmem_cache_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
887880 tp->t_dqinfo = NULL;
888881 }