]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - kernel/trace/blktrace.c
Merge tag 'trace-v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / blktrace.c
index b5c4b9ade9603a4810dd4ee551d189f3fcdd6c17..fb0fe4c66b84a117cf9b8dab7cba79b1ce95581f 100644 (file)
@@ -458,14 +458,9 @@ static const struct rchan_callbacks blk_relay_callbacks = {
 static void blk_trace_setup_lba(struct blk_trace *bt,
                                struct block_device *bdev)
 {
-       struct hd_struct *part = NULL;
-
-       if (bdev)
-               part = bdev->bd_part;
-
-       if (part) {
-               bt->start_lba = part->start_sect;
-               bt->end_lba = part->start_sect + part->nr_sects;
+       if (bdev) {
+               bt->start_lba = bdev->bd_start_sect;
+               bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev);
        } else {
                bt->start_lba = 0;
                bt->end_lba = -1ULL;
@@ -800,12 +795,12 @@ static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
 #endif
 
 static u64
-blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
+blk_trace_request_get_cgid(struct request *rq)
 {
        if (!rq->bio)
                return 0;
        /* Use the first bio */
-       return blk_trace_bio_get_cgid(q, rq->bio);
+       return blk_trace_bio_get_cgid(rq->q, rq->bio);
 }
 
 /*
@@ -846,40 +841,35 @@ static void blk_add_trace_rq(struct request *rq, int error,
        rcu_read_unlock();
 }
 
-static void blk_add_trace_rq_insert(void *ignore,
-                                   struct request_queue *q, struct request *rq)
+static void blk_add_trace_rq_insert(void *ignore, struct request *rq)
 {
        blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
-                        blk_trace_request_get_cgid(q, rq));
+                        blk_trace_request_get_cgid(rq));
 }
 
-static void blk_add_trace_rq_issue(void *ignore,
-                                  struct request_queue *q, struct request *rq)
+static void blk_add_trace_rq_issue(void *ignore, struct request *rq)
 {
        blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
-                        blk_trace_request_get_cgid(q, rq));
+                        blk_trace_request_get_cgid(rq));
 }
 
-static void blk_add_trace_rq_merge(void *ignore,
-                                  struct request_queue *q, struct request *rq)
+static void blk_add_trace_rq_merge(void *ignore, struct request *rq)
 {
        blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
-                        blk_trace_request_get_cgid(q, rq));
+                        blk_trace_request_get_cgid(rq));
 }
 
-static void blk_add_trace_rq_requeue(void *ignore,
-                                    struct request_queue *q,
-                                    struct request *rq)
+static void blk_add_trace_rq_requeue(void *ignore, struct request *rq)
 {
        blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
-                        blk_trace_request_get_cgid(q, rq));
+                        blk_trace_request_get_cgid(rq));
 }
 
 static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
                        int error, unsigned int nr_bytes)
 {
        blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
-                        blk_trace_request_get_cgid(rq->q, rq));
+                        blk_trace_request_get_cgid(rq));
 }
 
 /**
@@ -911,10 +901,9 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
        rcu_read_unlock();
 }
 
-static void blk_add_trace_bio_bounce(void *ignore,
-                                    struct request_queue *q, struct bio *bio)
+static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
+       blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_BOUNCE, 0);
 }
 
 static void blk_add_trace_bio_complete(void *ignore,
@@ -924,63 +913,24 @@ static void blk_add_trace_bio_complete(void *ignore,
                          blk_status_to_errno(bio->bi_status));
 }
 
-static void blk_add_trace_bio_backmerge(void *ignore,
-                                       struct request_queue *q,
-                                       struct request *rq,
-                                       struct bio *bio)
+static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
+       blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_BACKMERGE, 0);
 }
 
-static void blk_add_trace_bio_frontmerge(void *ignore,
-                                        struct request_queue *q,
-                                        struct request *rq,
-                                        struct bio *bio)
+static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
+       blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_FRONTMERGE, 0);
 }
 
-static void blk_add_trace_bio_queue(void *ignore,
-                                   struct request_queue *q, struct bio *bio)
+static void blk_add_trace_bio_queue(void *ignore, struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
+       blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_QUEUE, 0);
 }
 
-static void blk_add_trace_getrq(void *ignore,
-                               struct request_queue *q,
-                               struct bio *bio, int rw)
+static void blk_add_trace_getrq(void *ignore, struct bio *bio)
 {
-       if (bio)
-               blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
-       else {
-               struct blk_trace *bt;
-
-               rcu_read_lock();
-               bt = rcu_dereference(q->blk_trace);
-               if (bt)
-                       __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
-                                       NULL, 0);
-               rcu_read_unlock();
-       }
-}
-
-
-static void blk_add_trace_sleeprq(void *ignore,
-                                 struct request_queue *q,
-                                 struct bio *bio, int rw)
-{
-       if (bio)
-               blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
-       else {
-               struct blk_trace *bt;
-
-               rcu_read_lock();
-               bt = rcu_dereference(q->blk_trace);
-               if (bt)
-                       __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
-                                       0, 0, NULL, 0);
-               rcu_read_unlock();
-       }
+       blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_GETRQ, 0);
 }
 
 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
@@ -1015,10 +965,9 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
        rcu_read_unlock();
 }
 
-static void blk_add_trace_split(void *ignore,
-                               struct request_queue *q, struct bio *bio,
-                               unsigned int pdu)
+static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
 {
+       struct request_queue *q = bio->bi_disk->queue;
        struct blk_trace *bt;
 
        rcu_read_lock();
@@ -1039,20 +988,16 @@ static void blk_add_trace_split(void *ignore,
 /**
  * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
  * @ignore:    trace callback data parameter (not used)
- * @q:         queue the io is for
  * @bio:       the source bio
- * @dev:       target device
+ * @dev:       source device
  * @from:      source sector
  *
- * Description:
- *     Device mapper or raid target sometimes need to split a bio because
- *     it spans a stripe (or similar). Add a trace for that action.
- *
+ * Called after a bio is remapped to a different device and/or sector.
  **/
-static void blk_add_trace_bio_remap(void *ignore,
-                                   struct request_queue *q, struct bio *bio,
-                                   dev_t dev, sector_t from)
+static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
+                                   sector_t from)
 {
+       struct request_queue *q = bio->bi_disk->queue;
        struct blk_trace *bt;
        struct blk_io_trace_remap r;
 
@@ -1077,7 +1022,6 @@ static void blk_add_trace_bio_remap(void *ignore,
 /**
  * blk_add_trace_rq_remap - Add a trace for a request-remap operation
  * @ignore:    trace callback data parameter (not used)
- * @q:         queue the io is for
  * @rq:                the source request
  * @dev:       target device
  * @from:      source sector
@@ -1087,16 +1031,14 @@ static void blk_add_trace_bio_remap(void *ignore,
  *     Add a trace for that action.
  *
  **/
-static void blk_add_trace_rq_remap(void *ignore,
-                                  struct request_queue *q,
-                                  struct request *rq, dev_t dev,
+static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
                                   sector_t from)
 {
        struct blk_trace *bt;
        struct blk_io_trace_remap r;
 
        rcu_read_lock();
-       bt = rcu_dereference(q->blk_trace);
+       bt = rcu_dereference(rq->q->blk_trace);
        if (likely(!bt)) {
                rcu_read_unlock();
                return;
@@ -1108,13 +1050,12 @@ static void blk_add_trace_rq_remap(void *ignore,
 
        __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
                        rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
-                       sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
+                       sizeof(r), &r, blk_trace_request_get_cgid(rq));
        rcu_read_unlock();
 }
 
 /**
  * blk_add_driver_data - Add binary message with driver-specific data
- * @q:         queue the io is for
  * @rq:                io request
  * @data:      driver-specific data
  * @len:       length of driver-specific data
@@ -1123,14 +1064,12 @@ static void blk_add_trace_rq_remap(void *ignore,
  *     Some drivers might want to write driver-specific data per request.
  *
  **/
-void blk_add_driver_data(struct request_queue *q,
-                        struct request *rq,
-                        void *data, size_t len)
+void blk_add_driver_data(struct request *rq, void *data, size_t len)
 {
        struct blk_trace *bt;
 
        rcu_read_lock();
-       bt = rcu_dereference(q->blk_trace);
+       bt = rcu_dereference(rq->q->blk_trace);
        if (likely(!bt)) {
                rcu_read_unlock();
                return;
@@ -1138,7 +1077,7 @@ void blk_add_driver_data(struct request_queue *q,
 
        __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
                                BLK_TA_DRV_DATA, 0, len, data,
-                               blk_trace_request_get_cgid(q, rq));
+                               blk_trace_request_get_cgid(rq));
        rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(blk_add_driver_data);
@@ -1169,8 +1108,6 @@ static void blk_register_tracepoints(void)
        WARN_ON(ret);
        ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
        WARN_ON(ret);
-       ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
-       WARN_ON(ret);
        ret = register_trace_block_plug(blk_add_trace_plug, NULL);
        WARN_ON(ret);
        ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
@@ -1190,7 +1127,6 @@ static void blk_unregister_tracepoints(void)
        unregister_trace_block_split(blk_add_trace_split, NULL);
        unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
        unregister_trace_block_plug(blk_add_trace_plug, NULL);
-       unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
        unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
        unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
        unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
@@ -1343,7 +1279,7 @@ static void blk_log_action(struct trace_iterator *iter, const char *act,
                         * ones now use the 64bit ino as the whole ID and
                         * no longer use generation.
                         *
-                        * Regarldess of the content, always output
+                        * Regardless of the content, always output
                         * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
                         * be mapped back to @id on both 64 and 32bit ino
                         * setups.  See __kernfs_fh_to_dentry().
@@ -1385,7 +1321,7 @@ static void blk_log_dump_pdu(struct trace_seq *s,
                                 i == 0 ? "" : " ", pdu_buf[i]);
 
                /*
-                * stop when the rest is just zeroes and indicate so
+                * stop when the rest is just zeros and indicate so
                 * with a ".." appended
                 */
                if (i == end && end != pdu_len - 1) {
@@ -1815,30 +1751,15 @@ static ssize_t blk_trace_mask2str(char *buf, int mask)
        return p - buf;
 }
 
-static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
-{
-       if (bdev->bd_disk == NULL)
-               return NULL;
-
-       return bdev_get_queue(bdev);
-}
-
 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
                                         struct device_attribute *attr,
                                         char *buf)
 {
-       struct block_device *bdev = bdget_part(dev_to_part(dev));
-       struct request_queue *q;
+       struct block_device *bdev = dev_to_bdev(dev);
+       struct request_queue *q = bdev_get_queue(bdev);
        struct blk_trace *bt;
        ssize_t ret = -ENXIO;
 
-       if (bdev == NULL)
-               goto out;
-
-       q = blk_trace_get_queue(bdev);
-       if (q == NULL)
-               goto out_bdput;
-
        mutex_lock(&q->debugfs_mutex);
 
        bt = rcu_dereference_protected(q->blk_trace,
@@ -1861,9 +1782,6 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
 
 out_unlock_bdev:
        mutex_unlock(&q->debugfs_mutex);
-out_bdput:
-       bdput(bdev);
-out:
        return ret;
 }
 
@@ -1871,8 +1789,8 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
                                          struct device_attribute *attr,
                                          const char *buf, size_t count)
 {
-       struct block_device *bdev;
-       struct request_queue *q;
+       struct block_device *bdev = dev_to_bdev(dev);
+       struct request_queue *q = bdev_get_queue(bdev);
        struct blk_trace *bt;
        u64 value;
        ssize_t ret = -EINVAL;
@@ -1888,17 +1806,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
                                goto out;
                        value = ret;
                }
-       } else if (kstrtoull(buf, 0, &value))
-               goto out;
-
-       ret = -ENXIO;
-       bdev = bdget_part(dev_to_part(dev));
-       if (bdev == NULL)
-               goto out;
-
-       q = blk_trace_get_queue(bdev);
-       if (q == NULL)
-               goto out_bdput;
+       } else {
+               if (kstrtoull(buf, 0, &value))
+                       goto out;
+       }
 
        mutex_lock(&q->debugfs_mutex);
 
@@ -1936,8 +1847,6 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
 
 out_unlock_bdev:
        mutex_unlock(&q->debugfs_mutex);
-out_bdput:
-       bdput(bdev);
 out:
        return ret ? ret : count;
 }