]> git.proxmox.com Git - mirror_zfs.git/blobdiff - module/zfs/zio.c
OpenZFS 6531 - Provide mechanism to artificially limit disk performance
[mirror_zfs.git] / module / zfs / zio.c
index 523a924d67b0973a94549f69599af32ce7ff0087..4063703adf2dd576c9b5bd1bb93fb98e7eb02e30 100644 (file)
@@ -40,6 +40,7 @@
 #include <sys/blkptr.h>
 #include <sys/zfeature.h>
 #include <sys/time.h>
+#include <sys/trace_zio.h>
 
 /*
  * ==========================================================================
@@ -1390,6 +1391,76 @@ zio_interrupt(zio_t *zio)
        zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
 }
 
+void
+zio_delay_interrupt(zio_t *zio)
+{
+       /*
+        * The timeout_generic() function isn't defined in userspace, so
+        * rather than trying to implement the function, the zio delay
+        * functionality has been disabled for userspace builds.
+        */
+
+#ifdef _KERNEL
+       /*
+        * If io_target_timestamp is zero, then no delay has been registered
+        * for this IO, thus jump to the end of this function and "skip" the
+        * delay; issuing it directly to the zio layer.
+        */
+       if (zio->io_target_timestamp != 0) {
+               hrtime_t now = gethrtime();
+
+               if (now >= zio->io_target_timestamp) {
+                       /*
+                        * This IO has already taken longer than the target
+                        * delay to complete, so we don't want to delay it
+                        * any longer; we "miss" the delay and issue it
+                        * directly to the zio layer. This is likely due to
+                        * the target latency being set to a value less than
+                        * the underlying hardware can satisfy (e.g. delay
+                        * set to 1ms, but the disks take 10ms to complete an
+                        * IO request).
+                        */
+
+                       DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
+                           hrtime_t, now);
+
+                       zio_interrupt(zio);
+               } else {
+                       taskqid_t tid;
+                       hrtime_t diff = zio->io_target_timestamp - now;
+                       clock_t expire_at_tick = ddi_get_lbolt() +
+                           NSEC_TO_TICK(diff);
+
+                       DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
+                           hrtime_t, now, hrtime_t, diff);
+
+                       if (NSEC_TO_TICK(diff) == 0) {
+                               /* Our delay is less than a jiffy - just spin */
+                               zfs_sleep_until(zio->io_target_timestamp);
+                       } else {
+                               /*
+                                * Use taskq_dispatch_delay() in the place of
+                                * OpenZFS's timeout_generic().
+                                */
+                               tid = taskq_dispatch_delay(system_taskq,
+                                   (task_func_t *) zio_interrupt,
+                                   zio, TQ_NOSLEEP, expire_at_tick);
+                               if (!tid) {
+                                       /*
+                                        * Couldn't allocate a task.  Just
+                                        * finish the zio without a delay.
+                                        */
+                                       zio_interrupt(zio);
+                               }
+                       }
+               }
+               return;
+       }
+#endif
+       DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
+       zio_interrupt(zio);
+}
+
 /*
  * Execute the I/O pipeline until one of the following occurs:
  * (1) the I/O completes; (2) the pipeline stalls waiting for