dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
- err = dmu_tx_assign(tx, TXG_NOWAIT);
+ err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
- if (err == ERESTART)
- dmu_tx_wait(tx);
-
dmu_tx_abort(tx);
#ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
filemap_dirty_folio(page_mapping(pp), page_folio(pp));
{
boolean_t *for_sync = data;
fstrans_cookie_t cookie;
+ int ret;
ASSERT(PageLocked(pp));
ASSERT(!PageWriteback(pp));
cookie = spl_fstrans_mark();
- (void) zfs_putpage(pp->mapping->host, pp, wbc, *for_sync);
+ ret = zfs_putpage(pp->mapping->host, pp, wbc, *for_sync);
spl_fstrans_unmark(cookie);
- return (0);
+ return (ret);
}
#ifdef HAVE_WRITEPAGE_T_FOLIO
static int
zpl_putfolio(struct folio *pp, struct writeback_control *wbc, void *data)
{
- (void) zpl_putpage(&pp->page, wbc, data);
- return (0);
+ return (zpl_putpage(&pp->page, wbc, data));
}
#endif
/* Flush any mmap()'d data to disk */
if (zn_has_cached_data(zp, 0, file_sz - 1))
- zn_flush_cached_data(zp, B_FALSE);
+ zn_flush_cached_data(zp, B_TRUE);
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_READER);
error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
}
}
+ /* Flush any mmap()'d data to disk */
+ if (zn_has_cached_data(inzp, inoff, inoff + len - 1))
+ zn_flush_cached_data(inzp, B_TRUE);
+
/*
* Maintain predictable lock order.
*/