]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/linux/nvme-fc-driver.h
f2fs: fix deadlock between quota writes and checkpoint
[mirror_ubuntu-jammy-kernel.git] / include / linux / nvme-fc-driver.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2016, Avago Technologies
4 */
5
6 #ifndef _NVME_FC_DRIVER_H
7 #define _NVME_FC_DRIVER_H 1
8
9 #include <linux/scatterlist.h>
10
11
12 /*
13 * ********************** FC-NVME LS API ********************
14 *
15 * Data structures used by both FC-NVME hosts and FC-NVME
16 * targets to perform FC-NVME LS requests or transmit
17 * responses.
18 *
19 * ***********************************************************
20 */
21
22 /**
23 * struct nvmefc_ls_req - Request structure passed from the transport
24 * to the LLDD to perform a NVME-FC LS request and obtain
25 * a response.
26 * Used by nvme-fc transport (host) to send LS's such as
27 * Create Association, Create Connection and Disconnect
28 * Association.
29 * Used by the nvmet-fc transport (controller) to send
30 * LS's such as Disconnect Association.
31 *
32 * Values set by the requestor prior to calling the LLDD ls_req entrypoint:
33 * @rqstaddr: pointer to request buffer
34 * @rqstdma: PCI DMA address of request buffer
35 * @rqstlen: Length, in bytes, of request buffer
36 * @rspaddr: pointer to response buffer
37 * @rspdma: PCI DMA address of response buffer
38 * @rsplen: Length, in bytes, of response buffer
39 * @timeout: Maximum amount of time, in seconds, to wait for the LS response.
40 * If timeout exceeded, LLDD to abort LS exchange and complete
41 * LS request with error status.
42 * @private: pointer to memory allocated alongside the ls request structure
43 * that is specifically for the LLDD to use while processing the
44 * request. The length of the buffer corresponds to the
45 * lsrqst_priv_sz value specified in the xxx_template supplied
46 * by the LLDD.
47 * @done: The callback routine the LLDD is to invoke upon completion of
48 * the LS request. req argument is the pointer to the original LS
49 * request structure. Status argument must be 0 upon success, a
50 * negative errno on failure (example: -ENXIO).
51 */
52 struct nvmefc_ls_req {
53 void *rqstaddr;
54 dma_addr_t rqstdma;
55 u32 rqstlen;
56 void *rspaddr;
57 dma_addr_t rspdma;
58 u32 rsplen;
59 u32 timeout;
60
61 void *private;
62
63 void (*done)(struct nvmefc_ls_req *req, int status);
64
65 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
66
67
68 /**
69 * struct nvmefc_ls_rsp - Structure passed from the transport to the LLDD
70 * to request the transmit the NVME-FC LS response to a
71 * NVME-FC LS request. The structure originates in the LLDD
72 * and is given to the transport via the xxx_rcv_ls_req()
73 * transport routine. As such, the structure represents the
74 * FC exchange context for the NVME-FC LS request that was
75 * received and which the response is to be sent for.
76 * Used by the LLDD to pass the nvmet-fc transport (controller)
77 * received LS's such as Create Association, Create Connection
78 * and Disconnect Association.
79 * Used by the LLDD to pass the nvme-fc transport (host)
80 * received LS's such as Disconnect Association or Disconnect
81 * Connection.
82 *
83 * The structure is allocated by the LLDD whenever a LS Request is received
84 * from the FC link. The address of the structure is passed to the nvmet-fc
85 * or nvme-fc layer via the xxx_rcv_ls_req() transport routines.
86 *
87 * The address of the structure is to be passed back to the LLDD
88 * when the response is to be transmit. The LLDD will use the address to
89 * map back to the LLDD exchange structure which maintains information such
90 * the remote N_Port that sent the LS as well as any FC exchange context.
91 * Upon completion of the LS response transmit, the LLDD will pass the
92 * address of the structure back to the transport LS rsp done() routine,
93 * allowing the transport release dma resources. Upon completion of
94 * the done() routine, no further access to the structure will be made by
95 * the transport and the LLDD can de-allocate the structure.
96 *
97 * Field initialization:
98 * At the time of the xxx_rcv_ls_req() call, there is no content that
99 * is valid in the structure.
100 *
101 * When the structure is used for the LLDD->xmt_ls_rsp() call, the
102 * transport layer will fully set the fields in order to specify the
103 * response payload buffer and its length as well as the done routine
104 * to be called upon completion of the transmit. The transport layer
105 * will also set a private pointer for its own use in the done routine.
106 *
107 * Values set by the transport layer prior to calling the LLDD xmt_ls_rsp
108 * entrypoint:
109 * @rspbuf: pointer to the LS response buffer
110 * @rspdma: PCI DMA address of the LS response buffer
111 * @rsplen: Length, in bytes, of the LS response buffer
112 * @done: The callback routine the LLDD is to invoke upon completion of
113 * transmitting the LS response. req argument is the pointer to
114 * the original ls request.
115 * @nvme_fc_private: pointer to an internal transport-specific structure
116 * used as part of the transport done() processing. The LLDD is
117 * not to access this pointer.
118 */
119 struct nvmefc_ls_rsp {
120 void *rspbuf;
121 dma_addr_t rspdma;
122 u16 rsplen;
123
124 void (*done)(struct nvmefc_ls_rsp *rsp);
125 void *nvme_fc_private; /* LLDD is not to access !! */
126 };
127
128
129
130 /*
131 * ********************** LLDD FC-NVME Host API ********************
132 *
133 * For FC LLDD's that are the NVME Host role.
134 *
135 * ******************************************************************
136 */
137
138
139 /**
140 * struct nvme_fc_port_info - port-specific ids and FC connection-specific
141 * data element used during NVME Host role
142 * registrations
143 *
144 * Static fields describing the port being registered:
145 * @node_name: FC WWNN for the port
146 * @port_name: FC WWPN for the port
147 * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx)
148 * @dev_loss_tmo: maximum delay for reconnects to an association on
149 * this device. Used only on a remoteport.
150 *
151 * Initialization values for dynamic port fields:
152 * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
153 * be set to 0.
154 */
155 struct nvme_fc_port_info {
156 u64 node_name;
157 u64 port_name;
158 u32 port_role;
159 u32 port_id;
160 u32 dev_loss_tmo;
161 };
162
163 enum nvmefc_fcp_datadir {
164 NVMEFC_FCP_NODATA, /* payload_length and sg_cnt will be zero */
165 NVMEFC_FCP_WRITE,
166 NVMEFC_FCP_READ,
167 };
168
169
170 /**
171 * struct nvmefc_fcp_req - Request structure passed from NVME-FC transport
172 * to LLDD in order to perform a NVME FCP IO operation.
173 *
174 * Values set by the NVME-FC layer prior to calling the LLDD fcp_io
175 * entrypoint.
176 * @cmdaddr: pointer to the FCP CMD IU buffer
177 * @rspaddr: pointer to the FCP RSP IU buffer
178 * @cmddma: PCI DMA address of the FCP CMD IU buffer
179 * @rspdma: PCI DMA address of the FCP RSP IU buffer
180 * @cmdlen: Length, in bytes, of the FCP CMD IU buffer
181 * @rsplen: Length, in bytes, of the FCP RSP IU buffer
182 * @payload_length: Length of DATA_IN or DATA_OUT payload data to transfer
183 * @sg_table: scatter/gather structure for payload data
184 * @first_sgl: memory for 1st scatter/gather list segment for payload data
185 * @sg_cnt: number of elements in the scatter/gather list
186 * @io_dir: direction of the FCP request (see NVMEFC_FCP_xxx)
187 * @sqid: The nvme SQID the command is being issued on
188 * @done: The callback routine the LLDD is to invoke upon completion of
189 * the FCP operation. req argument is the pointer to the original
190 * FCP IO operation.
191 * @private: pointer to memory allocated alongside the FCP operation
192 * request structure that is specifically for the LLDD to use
193 * while processing the operation. The length of the buffer
194 * corresponds to the fcprqst_priv_sz value specified in the
195 * nvme_fc_port_template supplied by the LLDD.
196 *
197 * Values set by the LLDD indicating completion status of the FCP operation.
198 * Must be set prior to calling the done() callback.
199 * @transferred_length: amount of payload data, in bytes, that were
200 * transferred. Should equal payload_length on success.
201 * @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
202 * @status: Completion status of the FCP operation. must be 0 upon success,
203 * negative errno value upon failure (ex: -EIO). Note: this is
204 * NOT a reflection of the NVME CQE completion status. Only the
205 * status of the FCP operation at the NVME-FC level.
206 */
207 struct nvmefc_fcp_req {
208 void *cmdaddr;
209 void *rspaddr;
210 dma_addr_t cmddma;
211 dma_addr_t rspdma;
212 u16 cmdlen;
213 u16 rsplen;
214
215 u32 payload_length;
216 struct sg_table sg_table;
217 struct scatterlist *first_sgl;
218 int sg_cnt;
219 enum nvmefc_fcp_datadir io_dir;
220
221 __le16 sqid;
222
223 void (*done)(struct nvmefc_fcp_req *req);
224
225 void *private;
226
227 u32 transferred_length;
228 u16 rcv_rsplen;
229 u32 status;
230 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
231
232
233 /*
234 * Direct copy of fc_port_state enum. For later merging
235 */
236 enum nvme_fc_obj_state {
237 FC_OBJSTATE_UNKNOWN,
238 FC_OBJSTATE_NOTPRESENT,
239 FC_OBJSTATE_ONLINE,
240 FC_OBJSTATE_OFFLINE, /* User has taken Port Offline */
241 FC_OBJSTATE_BLOCKED,
242 FC_OBJSTATE_BYPASSED,
243 FC_OBJSTATE_DIAGNOSTICS,
244 FC_OBJSTATE_LINKDOWN,
245 FC_OBJSTATE_ERROR,
246 FC_OBJSTATE_LOOPBACK,
247 FC_OBJSTATE_DELETED,
248 };
249
250
251 /**
252 * struct nvme_fc_local_port - structure used between NVME-FC transport and
253 * a LLDD to reference a local NVME host port.
254 * Allocated/created by the nvme_fc_register_localport()
255 * transport interface.
256 *
257 * Fields with static values for the port. Initialized by the
258 * port_info struct supplied to the registration call.
259 * @port_num: NVME-FC transport host port number
260 * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx)
261 * @node_name: FC WWNN for the port
262 * @port_name: FC WWPN for the port
263 * @private: pointer to memory allocated alongside the local port
264 * structure that is specifically for the LLDD to use.
265 * The length of the buffer corresponds to the local_priv_sz
266 * value specified in the nvme_fc_port_template supplied by
267 * the LLDD.
268 * @dev_loss_tmo: maximum delay for reconnects to an association on
269 * this device. To modify, lldd must call
270 * nvme_fc_set_remoteport_devloss().
271 *
272 * Fields with dynamic values. Values may change base on link state. LLDD
273 * may reference fields directly to change them. Initialized by the
274 * port_info struct supplied to the registration call.
275 * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
276 * be set to 0.
277 * @port_state: Operational state of the port.
278 */
279 struct nvme_fc_local_port {
280 /* static/read-only fields */
281 u32 port_num;
282 u32 port_role;
283 u64 node_name;
284 u64 port_name;
285
286 void *private;
287
288 /* dynamic fields */
289 u32 port_id;
290 enum nvme_fc_obj_state port_state;
291 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
292
293
294 /**
295 * struct nvme_fc_remote_port - structure used between NVME-FC transport and
296 * a LLDD to reference a remote NVME subsystem port.
297 * Allocated/created by the nvme_fc_register_remoteport()
298 * transport interface.
299 *
300 * Fields with static values for the port. Initialized by the
301 * port_info struct supplied to the registration call.
302 * @port_num: NVME-FC transport remote subsystem port number
303 * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx)
304 * @node_name: FC WWNN for the port
305 * @port_name: FC WWPN for the port
306 * @localport: pointer to the NVME-FC local host port the subsystem is
307 * connected to.
308 * @private: pointer to memory allocated alongside the remote port
309 * structure that is specifically for the LLDD to use.
310 * The length of the buffer corresponds to the remote_priv_sz
311 * value specified in the nvme_fc_port_template supplied by
312 * the LLDD.
313 *
314 * Fields with dynamic values. Values may change base on link or login
315 * state. LLDD may reference fields directly to change them. Initialized by
316 * the port_info struct supplied to the registration call.
317 * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
318 * be set to 0.
319 * @port_state: Operational state of the remote port. Valid values are
320 * ONLINE or UNKNOWN.
321 */
322 struct nvme_fc_remote_port {
323 /* static fields */
324 u32 port_num;
325 u32 port_role;
326 u64 node_name;
327 u64 port_name;
328 struct nvme_fc_local_port *localport;
329 void *private;
330 u32 dev_loss_tmo;
331
332 /* dynamic fields */
333 u32 port_id;
334 enum nvme_fc_obj_state port_state;
335 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
336
337
338 /**
339 * struct nvme_fc_port_template - structure containing static entrypoints and
340 * operational parameters for an LLDD that supports NVME host
341 * behavior. Passed by reference in port registrations.
342 * NVME-FC transport remembers template reference and may
343 * access it during runtime operation.
344 *
345 * Host/Initiator Transport Entrypoints/Parameters:
346 *
347 * @localport_delete: The LLDD initiates deletion of a localport via
348 * nvme_fc_deregister_localport(). However, the teardown is
349 * asynchronous. This routine is called upon the completion of the
350 * teardown to inform the LLDD that the localport has been deleted.
351 * Entrypoint is Mandatory.
352 *
353 * @remoteport_delete: The LLDD initiates deletion of a remoteport via
354 * nvme_fc_deregister_remoteport(). However, the teardown is
355 * asynchronous. This routine is called upon the completion of the
356 * teardown to inform the LLDD that the remoteport has been deleted.
357 * Entrypoint is Mandatory.
358 *
359 * @create_queue: Upon creating a host<->controller association, queues are
360 * created such that they can be affinitized to cpus/cores. This
361 * callback into the LLDD to notify that a controller queue is being
362 * created. The LLDD may choose to allocate an associated hw queue
363 * or map it onto a shared hw queue. Upon return from the call, the
364 * LLDD specifies a handle that will be given back to it for any
365 * command that is posted to the controller queue. The handle can
366 * be used by the LLDD to map quickly to the proper hw queue for
367 * command execution. The mask of cpu's that will map to this queue
368 * at the block-level is also passed in. The LLDD should use the
369 * queue id and/or cpu masks to ensure proper affinitization of the
370 * controller queue to the hw queue.
371 * Entrypoint is Optional.
372 *
373 * @delete_queue: This is the inverse of the crete_queue. During
374 * host<->controller association teardown, this routine is called
375 * when a controller queue is being terminated. Any association with
376 * a hw queue should be termined. If there is a unique hw queue, the
377 * hw queue should be torn down.
378 * Entrypoint is Optional.
379 *
380 * @poll_queue: Called to poll for the completion of an io on a blk queue.
381 * Entrypoint is Optional.
382 *
383 * @ls_req: Called to issue a FC-NVME FC-4 LS service request.
384 * The nvme_fc_ls_req structure will fully describe the buffers for
385 * the request payload and where to place the response payload. The
386 * LLDD is to allocate an exchange, issue the LS request, obtain the
387 * LS response, and call the "done" routine specified in the request
388 * structure (argument to done is the ls request structure itself).
389 * Entrypoint is Mandatory.
390 *
391 * @fcp_io: called to issue a FC-NVME I/O request. The I/O may be for
392 * an admin queue or an i/o queue. The nvmefc_fcp_req structure will
393 * fully describe the io: the buffer containing the FC-NVME CMD IU
394 * (which contains the SQE), the sg list for the payload if applicable,
395 * and the buffer to place the FC-NVME RSP IU into. The LLDD will
396 * complete the i/o, indicating the amount of data transferred or
397 * any transport error, and call the "done" routine specified in the
398 * request structure (argument to done is the fcp request structure
399 * itself).
400 * Entrypoint is Mandatory.
401 *
402 * @ls_abort: called to request the LLDD to abort the indicated ls request.
403 * The call may return before the abort has completed. After aborting
404 * the request, the LLDD must still call the ls request done routine
405 * indicating an FC transport Aborted status.
406 * Entrypoint is Mandatory.
407 *
408 * @fcp_abort: called to request the LLDD to abort the indicated fcp request.
409 * The call may return before the abort has completed. After aborting
410 * the request, the LLDD must still call the fcp request done routine
411 * indicating an FC transport Aborted status.
412 * Entrypoint is Mandatory.
413 *
414 * @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service.
415 * The nvmefc_ls_rsp structure is the same LLDD-supplied exchange
416 * structure specified in the nvme_fc_rcv_ls_req() call made when
417 * the LS request was received. The structure will fully describe
418 * the buffers for the response payload and the dma address of the
419 * payload. The LLDD is to transmit the response (or return a
420 * non-zero errno status), and upon completion of the transmit, call
421 * the "done" routine specified in the nvmefc_ls_rsp structure
422 * (argument to done is the address of the nvmefc_ls_rsp structure
423 * itself). Upon the completion of the done routine, the LLDD shall
424 * consider the LS handling complete and the nvmefc_ls_rsp structure
425 * may be freed/released.
426 * Entrypoint is mandatory if the LLDD calls the nvme_fc_rcv_ls_req()
427 * entrypoint.
428 *
429 * @max_hw_queues: indicates the maximum number of hw queues the LLDD
430 * supports for cpu affinitization.
431 * Value is Mandatory. Must be at least 1.
432 *
433 * @max_sgl_segments: indicates the maximum number of sgl segments supported
434 * by the LLDD
435 * Value is Mandatory. Must be at least 1. Recommend at least 256.
436 *
437 * @max_dif_sgl_segments: indicates the maximum number of sgl segments
438 * supported by the LLDD for DIF operations.
439 * Value is Mandatory. Must be at least 1. Recommend at least 256.
440 *
441 * @dma_boundary: indicates the dma address boundary where dma mappings
442 * will be split across.
443 * Value is Mandatory. Typical value is 0xFFFFFFFF to split across
444 * 4Gig address boundarys
445 *
446 * @local_priv_sz: The LLDD sets this field to the amount of additional
447 * memory that it would like fc nvme layer to allocate on the LLDD's
448 * behalf whenever a localport is allocated. The additional memory
449 * area solely for the of the LLDD and its location is specified by
450 * the localport->private pointer.
451 * Value is Mandatory. Allowed to be zero.
452 *
453 * @remote_priv_sz: The LLDD sets this field to the amount of additional
454 * memory that it would like fc nvme layer to allocate on the LLDD's
455 * behalf whenever a remoteport is allocated. The additional memory
456 * area solely for the of the LLDD and its location is specified by
457 * the remoteport->private pointer.
458 * Value is Mandatory. Allowed to be zero.
459 *
460 * @lsrqst_priv_sz: The LLDD sets this field to the amount of additional
461 * memory that it would like fc nvme layer to allocate on the LLDD's
462 * behalf whenever a ls request structure is allocated. The additional
463 * memory area is solely for use by the LLDD and its location is
464 * specified by the ls_request->private pointer.
465 * Value is Mandatory. Allowed to be zero.
466 *
467 * @fcprqst_priv_sz: The LLDD sets this field to the amount of additional
468 * memory that it would like fc nvme layer to allocate on the LLDD's
469 * behalf whenever a fcp request structure is allocated. The additional
470 * memory area solely for the of the LLDD and its location is
471 * specified by the fcp_request->private pointer.
472 * Value is Mandatory. Allowed to be zero.
473 */
474 struct nvme_fc_port_template {
475 /* initiator-based functions */
476 void (*localport_delete)(struct nvme_fc_local_port *);
477 void (*remoteport_delete)(struct nvme_fc_remote_port *);
478 int (*create_queue)(struct nvme_fc_local_port *,
479 unsigned int qidx, u16 qsize,
480 void **handle);
481 void (*delete_queue)(struct nvme_fc_local_port *,
482 unsigned int qidx, void *handle);
483 int (*ls_req)(struct nvme_fc_local_port *,
484 struct nvme_fc_remote_port *,
485 struct nvmefc_ls_req *);
486 int (*fcp_io)(struct nvme_fc_local_port *,
487 struct nvme_fc_remote_port *,
488 void *hw_queue_handle,
489 struct nvmefc_fcp_req *);
490 void (*ls_abort)(struct nvme_fc_local_port *,
491 struct nvme_fc_remote_port *,
492 struct nvmefc_ls_req *);
493 void (*fcp_abort)(struct nvme_fc_local_port *,
494 struct nvme_fc_remote_port *,
495 void *hw_queue_handle,
496 struct nvmefc_fcp_req *);
497 int (*xmt_ls_rsp)(struct nvme_fc_local_port *localport,
498 struct nvme_fc_remote_port *rport,
499 struct nvmefc_ls_rsp *ls_rsp);
500
501 u32 max_hw_queues;
502 u16 max_sgl_segments;
503 u16 max_dif_sgl_segments;
504 u64 dma_boundary;
505
506 /* sizes of additional private data for data structures */
507 u32 local_priv_sz;
508 u32 remote_priv_sz;
509 u32 lsrqst_priv_sz;
510 u32 fcprqst_priv_sz;
511 };
512
513
514 /*
515 * Initiator/Host functions
516 */
517
518 int nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
519 struct nvme_fc_port_template *template,
520 struct device *dev,
521 struct nvme_fc_local_port **lport_p);
522
523 int nvme_fc_unregister_localport(struct nvme_fc_local_port *localport);
524
525 int nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
526 struct nvme_fc_port_info *pinfo,
527 struct nvme_fc_remote_port **rport_p);
528
529 int nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *remoteport);
530
531 void nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport);
532
533 int nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *remoteport,
534 u32 dev_loss_tmo);
535
536 /*
537 * Routine called to pass a NVME-FC LS request, received by the lldd,
538 * to the nvme-fc transport.
539 *
540 * If the return value is zero: the LS was successfully accepted by the
541 * transport.
542 * If the return value is non-zero: the transport has not accepted the
543 * LS. The lldd should ABTS-LS the LS.
544 *
545 * Note: if the LLDD receives and ABTS for the LS prior to the transport
546 * calling the ops->xmt_ls_rsp() routine to transmit a response, the LLDD
547 * shall mark the LS as aborted, and when the xmt_ls_rsp() is called: the
548 * response shall not be transmit and the struct nvmefc_ls_rsp() done
549 * routine shall be called. The LLDD may transmit the ABTS response as
550 * soon as the LS was marked or can delay until the xmt_ls_rsp() call is
551 * made.
552 * Note: if an RCV LS was successfully posted to the transport and the
553 * remoteport is then unregistered before xmt_ls_rsp() was called for
554 * the lsrsp structure, the transport will still call xmt_ls_rsp()
555 * afterward to cleanup the outstanding lsrsp structure. The LLDD should
556 * noop the transmission of the rsp and call the lsrsp->done() routine
557 * to allow the lsrsp structure to be released.
558 */
559 int nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *remoteport,
560 struct nvmefc_ls_rsp *lsrsp,
561 void *lsreqbuf, u32 lsreqbuf_len);
562
563
564
565 /*
566 * *************** LLDD FC-NVME Target/Subsystem API ***************
567 *
568 * For FC LLDD's that are the NVME Subsystem role
569 *
570 * ******************************************************************
571 */
572
573 /**
574 * struct nvmet_fc_port_info - port-specific ids and FC connection-specific
575 * data element used during NVME Subsystem role
576 * registrations
577 *
578 * Static fields describing the port being registered:
579 * @node_name: FC WWNN for the port
580 * @port_name: FC WWPN for the port
581 *
582 * Initialization values for dynamic port fields:
583 * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
584 * be set to 0.
585 */
586 struct nvmet_fc_port_info {
587 u64 node_name;
588 u64 port_name;
589 u32 port_id;
590 };
591
592
593 /* Operations that NVME-FC layer may request the LLDD to perform for FCP */
594 enum {
595 NVMET_FCOP_READDATA = 1, /* xmt data to initiator */
596 NVMET_FCOP_WRITEDATA = 2, /* xmt data from initiator */
597 NVMET_FCOP_READDATA_RSP = 3, /* xmt data to initiator and send
598 * rsp as well
599 */
600 NVMET_FCOP_RSP = 4, /* send rsp frame */
601 };
602
603 /**
604 * struct nvmefc_tgt_fcp_req - Structure used between LLDD and NVMET-FC
605 * layer to represent the exchange context and
606 * the specific FC-NVME IU operation(s) to perform
607 * for a FC-NVME FCP IO.
608 *
609 * Structure used between LLDD and nvmet-fc layer to represent the exchange
610 * context for a FC-NVME FCP I/O operation (e.g. a nvme sqe, the sqe-related
611 * memory transfers, and its assocated cqe transfer).
612 *
613 * The structure is allocated by the LLDD whenever a FCP CMD IU is received
614 * from the FC link. The address of the structure is passed to the nvmet-fc
615 * layer via the nvmet_fc_rcv_fcp_req() call. The address of the structure
616 * will be passed back to the LLDD for the data operations and transmit of
617 * the response. The LLDD is to use the address to map back to the LLDD
618 * exchange structure which maintains information such as the targetport
619 * the FCP I/O was received on, the remote FC NVME initiator that sent the
620 * FCP I/O, and any FC exchange context. Upon completion of the FCP target
621 * operation, the address of the structure will be passed back to the FCP
622 * op done() routine, allowing the nvmet-fc layer to release dma resources.
623 * Upon completion of the done() routine for either RSP or ABORT ops, no
624 * further access will be made by the nvmet-fc layer and the LLDD can
625 * de-allocate the structure.
626 *
627 * Field initialization:
628 * At the time of the nvmet_fc_rcv_fcp_req() call, there is no content that
629 * is valid in the structure.
630 *
631 * When the structure is used for an FCP target operation, the nvmet-fc
632 * layer will fully set the fields in order to specify the scattergather
633 * list, the transfer length, as well as the done routine to be called
634 * upon compeletion of the operation. The nvmet-fc layer will also set a
635 * private pointer for its own use in the done routine.
636 *
637 * Values set by the NVMET-FC layer prior to calling the LLDD fcp_op
638 * entrypoint.
639 * @op: Indicates the FCP IU operation to perform (see NVMET_FCOP_xxx)
640 * @hwqid: Specifies the hw queue index (0..N-1, where N is the
641 * max_hw_queues value from the LLD's nvmet_fc_target_template)
642 * that the operation is to use.
643 * @offset: Indicates the DATA_OUT/DATA_IN payload offset to be tranferred.
644 * Field is only valid on WRITEDATA, READDATA, or READDATA_RSP ops.
645 * @timeout: amount of time, in seconds, to wait for a response from the NVME
646 * host. A value of 0 is an infinite wait.
647 * Valid only for the following ops:
648 * WRITEDATA: caps the wait for data reception
649 * READDATA_RSP & RSP: caps wait for FCP_CONF reception (if used)
650 * @transfer_length: the length, in bytes, of the DATA_OUT or DATA_IN payload
651 * that is to be transferred.
652 * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops.
653 * @ba_rjt: Contains the BA_RJT payload that is to be transferred.
654 * Valid only for the NVMET_FCOP_BA_RJT op.
655 * @sg: Scatter/gather list for the DATA_OUT/DATA_IN payload data.
656 * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops.
657 * @sg_cnt: Number of valid entries in the scatter/gather list.
658 * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops.
659 * @rspaddr: pointer to the FCP RSP IU buffer to be transmit
660 * Used by RSP and READDATA_RSP ops
661 * @rspdma: PCI DMA address of the FCP RSP IU buffer
662 * Used by RSP and READDATA_RSP ops
663 * @rsplen: Length, in bytes, of the FCP RSP IU buffer
664 * Used by RSP and READDATA_RSP ops
665 * @done: The callback routine the LLDD is to invoke upon completion of
666 * the operation. req argument is the pointer to the original
667 * FCP subsystem op request.
668 * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used
669 * as part of the NVMET-FC processing. The LLDD is not to
670 * reference this field.
671 *
672 * Values set by the LLDD indicating completion status of the FCP operation.
673 * Must be set prior to calling the done() callback.
674 * @transferred_length: amount of DATA_OUT payload data received by a
675 * a WRITEDATA operation. If not a WRITEDATA operation, value must
676 * be set to 0. Should equal transfer_length on success.
677 * @fcp_error: status of the FCP operation. Must be 0 on success; on failure
678 * must be a NVME_SC_FC_xxxx value.
679 */
680 struct nvmefc_tgt_fcp_req {
681 u8 op;
682 u16 hwqid;
683 u32 offset;
684 u32 timeout;
685 u32 transfer_length;
686 struct fc_ba_rjt ba_rjt;
687 struct scatterlist *sg;
688 int sg_cnt;
689 void *rspaddr;
690 dma_addr_t rspdma;
691 u16 rsplen;
692
693 void (*done)(struct nvmefc_tgt_fcp_req *);
694
695 void *nvmet_fc_private; /* LLDD is not to access !! */
696
697 u32 transferred_length;
698 int fcp_error;
699 };
700
701
702 /* Target Features (Bit fields) LLDD supports */
703 enum {
704 NVMET_FCTGTFEAT_READDATA_RSP = (1 << 0),
705 /* Bit 0: supports the NVMET_FCPOP_READDATA_RSP op, which
706 * sends (the last) Read Data sequence followed by the RSP
707 * sequence in one LLDD operation. Errors during Data
708 * sequence transmit must not allow RSP sequence to be sent.
709 */
710 };
711
712
713 /**
714 * struct nvmet_fc_target_port - structure used between NVME-FC transport and
715 * a LLDD to reference a local NVME subsystem port.
716 * Allocated/created by the nvme_fc_register_targetport()
717 * transport interface.
718 *
719 * Fields with static values for the port. Initialized by the
720 * port_info struct supplied to the registration call.
721 * @port_num: NVME-FC transport subsytem port number
722 * @node_name: FC WWNN for the port
723 * @port_name: FC WWPN for the port
724 * @private: pointer to memory allocated alongside the local port
725 * structure that is specifically for the LLDD to use.
726 * The length of the buffer corresponds to the target_priv_sz
727 * value specified in the nvme_fc_target_template supplied by
728 * the LLDD.
729 *
730 * Fields with dynamic values. Values may change base on link state. LLDD
731 * may reference fields directly to change them. Initialized by the
732 * port_info struct supplied to the registration call.
733 * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
734 * be set to 0.
735 * @port_state: Operational state of the port.
736 */
737 struct nvmet_fc_target_port {
738 /* static/read-only fields */
739 u32 port_num;
740 u64 node_name;
741 u64 port_name;
742
743 void *private;
744
745 /* dynamic fields */
746 u32 port_id;
747 enum nvme_fc_obj_state port_state;
748 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
749
750
751 /**
752 * struct nvmet_fc_target_template - structure containing static entrypoints
753 * and operational parameters for an LLDD that supports NVME
754 * subsystem behavior. Passed by reference in port
755 * registrations. NVME-FC transport remembers template
756 * reference and may access it during runtime operation.
757 *
758 * Subsystem/Target Transport Entrypoints/Parameters:
759 *
760 * @targetport_delete: The LLDD initiates deletion of a targetport via
761 * nvmet_fc_unregister_targetport(). However, the teardown is
762 * asynchronous. This routine is called upon the completion of the
763 * teardown to inform the LLDD that the targetport has been deleted.
764 * Entrypoint is Mandatory.
765 *
766 * @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service.
767 * The nvmefc_ls_rsp structure is the same LLDD-supplied exchange
768 * structure specified in the nvmet_fc_rcv_ls_req() call made when
769 * the LS request was received. The structure will fully describe
770 * the buffers for the response payload and the dma address of the
771 * payload. The LLDD is to transmit the response (or return a
772 * non-zero errno status), and upon completion of the transmit, call
773 * the "done" routine specified in the nvmefc_ls_rsp structure
774 * (argument to done is the address of the nvmefc_ls_rsp structure
775 * itself). Upon the completion of the done() routine, the LLDD shall
776 * consider the LS handling complete and the nvmefc_ls_rsp structure
777 * may be freed/released.
778 * The transport will always call the xmt_ls_rsp() routine for any
779 * LS received.
780 * Entrypoint is Mandatory.
781 *
782 * @fcp_op: Called to perform a data transfer or transmit a response.
783 * The nvmefc_tgt_fcp_req structure is the same LLDD-supplied
784 * exchange structure specified in the nvmet_fc_rcv_fcp_req() call
785 * made when the FCP CMD IU was received. The op field in the
786 * structure shall indicate the operation for the LLDD to perform
787 * relative to the io.
788 * NVMET_FCOP_READDATA operation: the LLDD is to send the
789 * payload data (described by sglist) to the host in 1 or
790 * more FC sequences (preferrably 1). Note: the fc-nvme layer
791 * may call the READDATA operation multiple times for longer
792 * payloads.
793 * NVMET_FCOP_WRITEDATA operation: the LLDD is to receive the
794 * payload data (described by sglist) from the host via 1 or
795 * more FC sequences (preferrably 1). The LLDD is to generate
796 * the XFER_RDY IU(s) corresponding to the data being requested.
797 * Note: the FC-NVME layer may call the WRITEDATA operation
798 * multiple times for longer payloads.
799 * NVMET_FCOP_READDATA_RSP operation: the LLDD is to send the
800 * payload data (described by sglist) to the host in 1 or
801 * more FC sequences (preferrably 1). If an error occurs during
802 * payload data transmission, the LLDD is to set the
803 * nvmefc_tgt_fcp_req fcp_error and transferred_length field, then
804 * consider the operation complete. On error, the LLDD is to not
805 * transmit the FCP_RSP iu. If all payload data is transferred
806 * successfully, the LLDD is to update the nvmefc_tgt_fcp_req
807 * transferred_length field and may subsequently transmit the
808 * FCP_RSP iu payload (described by rspbuf, rspdma, rsplen).
809 * If FCP_CONF is supported, the LLDD is to await FCP_CONF
810 * reception to confirm the RSP reception by the host. The LLDD
811 * may retramsit the FCP_RSP iu if necessary per FC-NVME. Upon
812 * transmission of the FCP_RSP iu if FCP_CONF is not supported,
813 * or upon success/failure of FCP_CONF if it is supported, the
814 * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
815 * consider the operation complete.
816 * NVMET_FCOP_RSP: the LLDD is to transmit the FCP_RSP iu payload
817 * (described by rspbuf, rspdma, rsplen). If FCP_CONF is
818 * supported, the LLDD is to await FCP_CONF reception to confirm
819 * the RSP reception by the host. The LLDD may retramsit the
820 * FCP_RSP iu if FCP_CONF is not received per FC-NVME. Upon
821 * transmission of the FCP_RSP iu if FCP_CONF is not supported,
822 * or upon success/failure of FCP_CONF if it is supported, the
823 * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
824 * consider the operation complete.
825 * Upon completing the indicated operation, the LLDD is to set the
826 * status fields for the operation (tranferred_length and fcp_error
827 * status) in the request, then call the "done" routine
828 * indicated in the fcp request. After the operation completes,
829 * regardless of whether the FCP_RSP iu was successfully transmit,
830 * the LLDD-supplied exchange structure must remain valid until the
831 * transport calls the fcp_req_release() callback to return ownership
832 * of the exchange structure back to the LLDD so that it may be used
833 * for another fcp command.
834 * Note: when calling the done routine for READDATA or WRITEDATA
835 * operations, the fc-nvme layer may immediate convert, in the same
836 * thread and before returning to the LLDD, the fcp operation to
837 * the next operation for the fcp io and call the LLDDs fcp_op
838 * call again. If fields in the fcp request are to be accessed post
839 * the done call, the LLDD should save their values prior to calling
840 * the done routine, and inspect the save values after the done
841 * routine.
842 * Returns 0 on success, -<errno> on failure (Ex: -EIO)
843 * Entrypoint is Mandatory.
844 *
845 * @fcp_abort: Called by the transport to abort an active command.
846 * The command may be in-between operations (nothing active in LLDD)
847 * or may have an active WRITEDATA operation pending. The LLDD is to
848 * initiate the ABTS process for the command and return from the
849 * callback. The ABTS does not need to be complete on the command.
850 * The fcp_abort callback inherently cannot fail. After the
851 * fcp_abort() callback completes, the transport will wait for any
852 * outstanding operation (if there was one) to complete, then will
853 * call the fcp_req_release() callback to return the command's
854 * exchange context back to the LLDD.
855 * Entrypoint is Mandatory.
856 *
857 * @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req
858 * to the LLDD after all operations on the fcp operation are complete.
859 * This may be due to the command completing or upon completion of
860 * abort cleanup.
861 * Entrypoint is Mandatory.
862 *
863 * @defer_rcv: Called by the transport to signal the LLLD that it has
864 * begun processing of a previously received NVME CMD IU. The LLDD
865 * is now free to re-use the rcv buffer associated with the
866 * nvmefc_tgt_fcp_req.
867 * Entrypoint is Optional.
868 *
869 * @discovery_event: Called by the transport to generate an RSCN
870 * change notifications to NVME initiators. The RSCN notifications
871 * should cause the initiator to rescan the discovery controller
872 * on the targetport.
873 *
874 * @ls_req: Called to issue a FC-NVME FC-4 LS service request.
875 * The nvme_fc_ls_req structure will fully describe the buffers for
876 * the request payload and where to place the response payload.
877 * The targetport that is to issue the LS request is identified by
878 * the targetport argument. The remote port that is to receive the
879 * LS request is identified by the hosthandle argument. The nvmet-fc
880 * transport is only allowed to issue FC-NVME LS's on behalf of an
881 * association that was created prior by a Create Association LS.
882 * The hosthandle will originate from the LLDD in the struct
883 * nvmefc_ls_rsp structure for the Create Association LS that
884 * was delivered to the transport. The transport will save the
885 * hosthandle as an attribute of the association. If the LLDD
886 * loses connectivity with the remote port, it must call the
887 * nvmet_fc_invalidate_host() routine to remove any references to
888 * the remote port in the transport.
889 * The LLDD is to allocate an exchange, issue the LS request, obtain
890 * the LS response, and call the "done" routine specified in the
891 * request structure (argument to done is the ls request structure
892 * itself).
893 * Entrypoint is Optional - but highly recommended.
894 *
895 * @ls_abort: called to request the LLDD to abort the indicated ls request.
896 * The call may return before the abort has completed. After aborting
897 * the request, the LLDD must still call the ls request done routine
898 * indicating an FC transport Aborted status.
899 * Entrypoint is Mandatory if the ls_req entry point is specified.
900 *
901 * @host_release: called to inform the LLDD that the request to invalidate
902 * the host port indicated by the hosthandle has been fully completed.
903 * No associations exist with the host port and there will be no
904 * further references to hosthandle.
905 * Entrypoint is Mandatory if the lldd calls nvmet_fc_invalidate_host().
906 *
907 * @max_hw_queues: indicates the maximum number of hw queues the LLDD
908 * supports for cpu affinitization.
909 * Value is Mandatory. Must be at least 1.
910 *
911 * @max_sgl_segments: indicates the maximum number of sgl segments supported
912 * by the LLDD
913 * Value is Mandatory. Must be at least 1. Recommend at least 256.
914 *
915 * @max_dif_sgl_segments: indicates the maximum number of sgl segments
916 * supported by the LLDD for DIF operations.
917 * Value is Mandatory. Must be at least 1. Recommend at least 256.
918 *
919 * @dma_boundary: indicates the dma address boundary where dma mappings
920 * will be split across.
921 * Value is Mandatory. Typical value is 0xFFFFFFFF to split across
922 * 4Gig address boundarys
923 *
924 * @target_features: The LLDD sets bits in this field to correspond to
925 * optional features that are supported by the LLDD.
926 * Refer to the NVMET_FCTGTFEAT_xxx values.
927 * Value is Mandatory. Allowed to be zero.
928 *
929 * @target_priv_sz: The LLDD sets this field to the amount of additional
930 * memory that it would like fc nvme layer to allocate on the LLDD's
931 * behalf whenever a targetport is allocated. The additional memory
932 * area solely for the of the LLDD and its location is specified by
933 * the targetport->private pointer.
934 * Value is Mandatory. Allowed to be zero.
935 *
936 * @lsrqst_priv_sz: The LLDD sets this field to the amount of additional
937 * memory that it would like nvmet-fc layer to allocate on the LLDD's
938 * behalf whenever a ls request structure is allocated. The additional
939 * memory area is solely for use by the LLDD and its location is
940 * specified by the ls_request->private pointer.
941 * Value is Mandatory. Allowed to be zero.
942 *
943 */
944 struct nvmet_fc_target_template {
945 void (*targetport_delete)(struct nvmet_fc_target_port *tgtport);
946 int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport,
947 struct nvmefc_ls_rsp *ls_rsp);
948 int (*fcp_op)(struct nvmet_fc_target_port *tgtport,
949 struct nvmefc_tgt_fcp_req *fcpreq);
950 void (*fcp_abort)(struct nvmet_fc_target_port *tgtport,
951 struct nvmefc_tgt_fcp_req *fcpreq);
952 void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
953 struct nvmefc_tgt_fcp_req *fcpreq);
954 void (*defer_rcv)(struct nvmet_fc_target_port *tgtport,
955 struct nvmefc_tgt_fcp_req *fcpreq);
956 void (*discovery_event)(struct nvmet_fc_target_port *tgtport);
957 int (*ls_req)(struct nvmet_fc_target_port *targetport,
958 void *hosthandle, struct nvmefc_ls_req *lsreq);
959 void (*ls_abort)(struct nvmet_fc_target_port *targetport,
960 void *hosthandle, struct nvmefc_ls_req *lsreq);
961 void (*host_release)(void *hosthandle);
962
963 u32 max_hw_queues;
964 u16 max_sgl_segments;
965 u16 max_dif_sgl_segments;
966 u64 dma_boundary;
967
968 u32 target_features;
969
970 /* sizes of additional private data for data structures */
971 u32 target_priv_sz;
972 u32 lsrqst_priv_sz;
973 };
974
975
976 int nvmet_fc_register_targetport(struct nvmet_fc_port_info *portinfo,
977 struct nvmet_fc_target_template *template,
978 struct device *dev,
979 struct nvmet_fc_target_port **tgtport_p);
980
981 int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *tgtport);
982
983 /*
984 * Routine called to pass a NVME-FC LS request, received by the lldd,
985 * to the nvmet-fc transport.
986 *
987 * If the return value is zero: the LS was successfully accepted by the
988 * transport.
989 * If the return value is non-zero: the transport has not accepted the
990 * LS. The lldd should ABTS-LS the LS.
991 *
992 * Note: if the LLDD receives and ABTS for the LS prior to the transport
993 * calling the ops->xmt_ls_rsp() routine to transmit a response, the LLDD
994 * shall mark the LS as aborted, and when the xmt_ls_rsp() is called: the
995 * response shall not be transmit and the struct nvmefc_ls_rsp() done
996 * routine shall be called. The LLDD may transmit the ABTS response as
997 * soon as the LS was marked or can delay until the xmt_ls_rsp() call is
998 * made.
999 * Note: if an RCV LS was successfully posted to the transport and the
1000 * targetport is then unregistered before xmt_ls_rsp() was called for
1001 * the lsrsp structure, the transport will still call xmt_ls_rsp()
1002 * afterward to cleanup the outstanding lsrsp structure. The LLDD should
1003 * noop the transmission of the rsp and call the lsrsp->done() routine
1004 * to allow the lsrsp structure to be released.
1005 */
1006 int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *tgtport,
1007 void *hosthandle,
1008 struct nvmefc_ls_rsp *rsp,
1009 void *lsreqbuf, u32 lsreqbuf_len);
1010
1011 /*
1012 * Routine called by the LLDD whenever it has a logout or loss of
1013 * connectivity to a NVME-FC host port which there had been active
1014 * NVMe controllers for. The host port is indicated by the
1015 * hosthandle. The hosthandle is given to the nvmet-fc transport
1016 * when a NVME LS was received, typically to create a new association.
1017 * The nvmet-fc transport will cache the hostport value with the
1018 * association for use in LS requests for the association.
1019 * When the LLDD calls this routine, the nvmet-fc transport will
1020 * immediately terminate all associations that were created with
1021 * the hosthandle host port.
1022 * The LLDD, after calling this routine and having control returned,
1023 * must assume the transport may subsequently utilize hosthandle as
1024 * part of sending LS's to terminate the association. The LLDD
1025 * should reject the LS's if they are attempted.
1026 * Once the last association has terminated for the hosthandle host
1027 * port, the nvmet-fc transport will call the ops->host_release()
1028 * callback. As of the callback, the nvmet-fc transport will no
1029 * longer reference hosthandle.
1030 */
1031 void nvmet_fc_invalidate_host(struct nvmet_fc_target_port *tgtport,
1032 void *hosthandle);
1033
1034 /*
1035 * If nvmet_fc_rcv_fcp_req returns non-zero, the transport has not accepted
1036 * the FCP cmd. The lldd should ABTS-LS the cmd.
1037 */
1038 int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport,
1039 struct nvmefc_tgt_fcp_req *fcpreq,
1040 void *cmdiubuf, u32 cmdiubuf_len);
1041
1042 void nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *tgtport,
1043 struct nvmefc_tgt_fcp_req *fcpreq);
1044
1045 #endif /* _NVME_FC_DRIVER_H */