]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/megaraid/megaraid_sas_fusion.h
scsi: megaraid_sas: Retry with reduced queue depth when alloc fails for higher QD
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / megaraid / megaraid_sas_fusion.h
1 /*
2 * Linux MegaRAID driver for SAS based RAID controllers
3 *
4 * Copyright (c) 2009-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * FILE: megaraid_sas_fusion.h
21 *
22 * Authors: Avago Technologies
23 * Manoj Jose
24 * Sumant Patro
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
27 *
28 * Send feedback to: megaraidlinux.pdl@avagotech.com
29 *
30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31 * San Jose, California 95131
32 */
33
34 #ifndef _MEGARAID_SAS_FUSION_H_
35 #define _MEGARAID_SAS_FUSION_H_
36
37 /* Fusion defines */
38 #define MEGASAS_CHAIN_FRAME_SZ_MIN 1024
39 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
40 #define MEGASAS_MAX_CHAIN_SHIFT 5
41 #define MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK 0x400000
42 #define MEGASAS_MAX_CHAIN_SIZE_MASK 0x3E0
43 #define MEGASAS_256K_IO 128
44 #define MEGASAS_1MB_IO (MEGASAS_256K_IO * 4)
45 #define MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
46 #define MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0
47 #define MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1
48 #define MEGASAS_LOAD_BALANCE_FLAG 0x1
49 #define MEGASAS_DCMD_MBOX_PEND_FLAG 0x1
50 #define HOST_DIAG_WRITE_ENABLE 0x80
51 #define HOST_DIAG_RESET_ADAPTER 0x4
52 #define MEGASAS_FUSION_MAX_RESET_TRIES 3
53 #define MAX_MSIX_QUEUES_FUSION 128
54
55 /* Invader defines */
56 #define MPI2_TYPE_CUDA 0x2
57 #define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000
58 #define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00
59 #define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
60 #define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
61 #define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
62 #define MR_RL_WRITE_THROUGH_MODE 0x00
63 #define MR_RL_WRITE_BACK_MODE 0x01
64
65 /* T10 PI defines */
66 #define MR_PROT_INFO_TYPE_CONTROLLER 0x8
67 #define MEGASAS_SCSI_VARIABLE_LENGTH_CMD 0x7f
68 #define MEGASAS_SCSI_SERVICE_ACTION_READ32 0x9
69 #define MEGASAS_SCSI_SERVICE_ACTION_WRITE32 0xB
70 #define MEGASAS_SCSI_ADDL_CDB_LEN 0x18
71 #define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20
72 #define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60
73
74 #define MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C)
75 #define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
76
77 /*
78 * Raid context flags
79 */
80
81 #define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4
82 #define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30
83 enum MR_RAID_FLAGS_IO_SUB_TYPE {
84 MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
85 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
86 MR_RAID_FLAGS_IO_SUB_TYPE_RMW_DATA = 2,
87 MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P = 3,
88 MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q = 4,
89 MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6,
90 MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7
91 };
92
93 /*
94 * Request descriptor types
95 */
96 #define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7
97 #define MEGASAS_REQ_DESCRIPT_FLAGS_MFA 0x1
98 #define MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2
99 #define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1
100
101 #define MEGASAS_FP_CMD_LEN 16
102 #define MEGASAS_FUSION_IN_RESET 0
103 #define THRESHOLD_REPLY_COUNT 50
104 #define RAID_1_PEER_CMDS 2
105 #define JBOD_MAPS_COUNT 2
106 #define MEGASAS_REDUCE_QD_COUNT 64
107 #define IOC_INIT_FRAME_SIZE 4096
108
109 /*
110 * Raid Context structure which describes MegaRAID specific IO Parameters
111 * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
112 */
113
114 struct RAID_CONTEXT {
115 #if defined(__BIG_ENDIAN_BITFIELD)
116 u8 nseg:4;
117 u8 type:4;
118 #else
119 u8 type:4;
120 u8 nseg:4;
121 #endif
122 u8 resvd0;
123 __le16 timeout_value;
124 u8 reg_lock_flags;
125 u8 resvd1;
126 __le16 virtual_disk_tgt_id;
127 __le64 reg_lock_row_lba;
128 __le32 reg_lock_length;
129 __le16 next_lmid;
130 u8 ex_status;
131 u8 status;
132 u8 raid_flags;
133 u8 num_sge;
134 __le16 config_seq_num;
135 u8 span_arm;
136 u8 priority;
137 u8 num_sge_ext;
138 u8 resvd2;
139 };
140
141 /*
142 * Raid Context structure which describes ventura MegaRAID specific
143 * IO Paramenters ,This resides at offset 0x60 where the SGL normally
144 * starts in MPT IO Frames
145 */
146 struct RAID_CONTEXT_G35 {
147 #define RAID_CONTEXT_NSEG_MASK 0x00F0
148 #define RAID_CONTEXT_NSEG_SHIFT 4
149 #define RAID_CONTEXT_TYPE_MASK 0x000F
150 #define RAID_CONTEXT_TYPE_SHIFT 0
151 u16 nseg_type;
152 u16 timeout_value; /* 0x02 -0x03 */
153 u16 routing_flags; // 0x04 -0x05 routing flags
154 u16 virtual_disk_tgt_id; /* 0x06 -0x07 */
155 u64 reg_lock_row_lba; /* 0x08 - 0x0F */
156 u32 reg_lock_length; /* 0x10 - 0x13 */
157 union {
158 u16 next_lmid; /* 0x14 - 0x15 */
159 u16 peer_smid; /* used for the raid 1/10 fp writes */
160 } smid;
161 u8 ex_status; /* 0x16 : OUT */
162 u8 status; /* 0x17 status */
163 u8 raid_flags; /* 0x18 resvd[7:6], ioSubType[5:4],
164 * resvd[3:1], preferredCpu[0]
165 */
166 u8 span_arm; /* 0x1C span[7:5], arm[4:0] */
167 u16 config_seq_num; /* 0x1A -0x1B */
168 union {
169 /*
170 * Bit format:
171 * ---------------------------------
172 * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
173 * ---------------------------------
174 * Byte0 | numSGE[7]- numSGE[0] |
175 * ---------------------------------
176 * Byte1 |SD | resvd | numSGE 8-11 |
177 * --------------------------------
178 */
179 #define NUM_SGE_MASK_LOWER 0xFF
180 #define NUM_SGE_MASK_UPPER 0x0F
181 #define NUM_SGE_SHIFT_UPPER 8
182 #define STREAM_DETECT_SHIFT 7
183 #define STREAM_DETECT_MASK 0x80
184 struct {
185 #if defined(__BIG_ENDIAN_BITFIELD) /* 0x1C - 0x1D */
186 u16 stream_detected:1;
187 u16 reserved:3;
188 u16 num_sge:12;
189 #else
190 u16 num_sge:12;
191 u16 reserved:3;
192 u16 stream_detected:1;
193 #endif
194 } bits;
195 u8 bytes[2];
196 } u;
197 u8 resvd2[2]; /* 0x1E-0x1F */
198 };
199
200 #define MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT 1
201 #define MR_RAID_CTX_ROUTINGFLAGS_C2D_SHIFT 2
202 #define MR_RAID_CTX_ROUTINGFLAGS_FWD_SHIFT 3
203 #define MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT 4
204 #define MR_RAID_CTX_ROUTINGFLAGS_SBS_SHIFT 5
205 #define MR_RAID_CTX_ROUTINGFLAGS_RW_SHIFT 6
206 #define MR_RAID_CTX_ROUTINGFLAGS_LOG_SHIFT 7
207 #define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT 8
208 #define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_MASK 0x0F00
209 #define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_SHIFT 12
210 #define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_MASK 0xF000
211
212 static inline void set_num_sge(struct RAID_CONTEXT_G35 *rctx_g35,
213 u16 sge_count)
214 {
215 rctx_g35->u.bytes[0] = (u8)(sge_count & NUM_SGE_MASK_LOWER);
216 rctx_g35->u.bytes[1] |= (u8)((sge_count >> NUM_SGE_SHIFT_UPPER)
217 & NUM_SGE_MASK_UPPER);
218 }
219
220 static inline u16 get_num_sge(struct RAID_CONTEXT_G35 *rctx_g35)
221 {
222 u16 sge_count;
223
224 sge_count = (u16)(((rctx_g35->u.bytes[1] & NUM_SGE_MASK_UPPER)
225 << NUM_SGE_SHIFT_UPPER) | (rctx_g35->u.bytes[0]));
226 return sge_count;
227 }
228
229 #define SET_STREAM_DETECTED(rctx_g35) \
230 (rctx_g35.u.bytes[1] |= STREAM_DETECT_MASK)
231
232 #define CLEAR_STREAM_DETECTED(rctx_g35) \
233 (rctx_g35.u.bytes[1] &= ~(STREAM_DETECT_MASK))
234
235 static inline bool is_stream_detected(struct RAID_CONTEXT_G35 *rctx_g35)
236 {
237 return ((rctx_g35->u.bytes[1] & STREAM_DETECT_MASK));
238 }
239
240 union RAID_CONTEXT_UNION {
241 struct RAID_CONTEXT raid_context;
242 struct RAID_CONTEXT_G35 raid_context_g35;
243 };
244
245 #define RAID_CTX_SPANARM_ARM_SHIFT (0)
246 #define RAID_CTX_SPANARM_ARM_MASK (0x1f)
247
248 #define RAID_CTX_SPANARM_SPAN_SHIFT (5)
249 #define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
250
251 /* number of bits per index in U32 TrackStream */
252 #define BITS_PER_INDEX_STREAM 4
253 #define INVALID_STREAM_NUM 16
254 #define MR_STREAM_BITMAP 0x76543210
255 #define STREAM_MASK ((1 << BITS_PER_INDEX_STREAM) - 1)
256 #define ZERO_LAST_STREAM 0x0fffffff
257 #define MAX_STREAMS_TRACKED 8
258
259 /*
260 * define region lock types
261 */
262 enum REGION_TYPE {
263 REGION_TYPE_UNUSED = 0,
264 REGION_TYPE_SHARED_READ = 1,
265 REGION_TYPE_SHARED_WRITE = 2,
266 REGION_TYPE_EXCLUSIVE = 3,
267 };
268
269 /* MPI2 defines */
270 #define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
271 #define MPI2_WHOINIT_HOST_DRIVER (0x04)
272 #define MPI2_VERSION_MAJOR (0x02)
273 #define MPI2_VERSION_MINOR (0x00)
274 #define MPI2_VERSION_MAJOR_MASK (0xFF00)
275 #define MPI2_VERSION_MAJOR_SHIFT (8)
276 #define MPI2_VERSION_MINOR_MASK (0x00FF)
277 #define MPI2_VERSION_MINOR_SHIFT (0)
278 #define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
279 MPI2_VERSION_MINOR)
280 #define MPI2_HEADER_VERSION_UNIT (0x10)
281 #define MPI2_HEADER_VERSION_DEV (0x00)
282 #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
283 #define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
284 #define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
285 #define MPI2_HEADER_VERSION_DEV_SHIFT (0)
286 #define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \
287 MPI2_HEADER_VERSION_DEV)
288 #define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
289 #define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
290 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
291 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
292 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
293 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
294 #define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
295 /* EEDP escape mode */
296 #define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040)
297 #define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
298 #define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01)
299 #define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x03)
300 #define MPI2_REQ_DESCRIPT_FLAGS_FP_IO (0x06)
301 #define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
302 #define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
303 #define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
304 #define MPI2_SCSIIO_CONTROL_READ (0x02000000)
305 #define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
306 #define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
307 #define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
308 #define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
309 #define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
310 #define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
311 #define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
312 #define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
313 #define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
314 #define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
315 #define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
316 #define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
317
318 struct MPI25_IEEE_SGE_CHAIN64 {
319 __le64 Address;
320 __le32 Length;
321 __le16 Reserved1;
322 u8 NextChainOffset;
323 u8 Flags;
324 };
325
326 struct MPI2_SGE_SIMPLE_UNION {
327 __le32 FlagsLength;
328 union {
329 __le32 Address32;
330 __le64 Address64;
331 } u;
332 };
333
334 struct MPI2_SCSI_IO_CDB_EEDP32 {
335 u8 CDB[20]; /* 0x00 */
336 __be32 PrimaryReferenceTag; /* 0x14 */
337 __be16 PrimaryApplicationTag; /* 0x18 */
338 __be16 PrimaryApplicationTagMask; /* 0x1A */
339 __le32 TransferLength; /* 0x1C */
340 };
341
342 struct MPI2_SGE_CHAIN_UNION {
343 __le16 Length;
344 u8 NextChainOffset;
345 u8 Flags;
346 union {
347 __le32 Address32;
348 __le64 Address64;
349 } u;
350 };
351
352 struct MPI2_IEEE_SGE_SIMPLE32 {
353 __le32 Address;
354 __le32 FlagsLength;
355 };
356
357 struct MPI2_IEEE_SGE_CHAIN32 {
358 __le32 Address;
359 __le32 FlagsLength;
360 };
361
362 struct MPI2_IEEE_SGE_SIMPLE64 {
363 __le64 Address;
364 __le32 Length;
365 __le16 Reserved1;
366 u8 Reserved2;
367 u8 Flags;
368 };
369
370 struct MPI2_IEEE_SGE_CHAIN64 {
371 __le64 Address;
372 __le32 Length;
373 __le16 Reserved1;
374 u8 Reserved2;
375 u8 Flags;
376 };
377
378 union MPI2_IEEE_SGE_SIMPLE_UNION {
379 struct MPI2_IEEE_SGE_SIMPLE32 Simple32;
380 struct MPI2_IEEE_SGE_SIMPLE64 Simple64;
381 };
382
383 union MPI2_IEEE_SGE_CHAIN_UNION {
384 struct MPI2_IEEE_SGE_CHAIN32 Chain32;
385 struct MPI2_IEEE_SGE_CHAIN64 Chain64;
386 };
387
388 union MPI2_SGE_IO_UNION {
389 struct MPI2_SGE_SIMPLE_UNION MpiSimple;
390 struct MPI2_SGE_CHAIN_UNION MpiChain;
391 union MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
392 union MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
393 };
394
395 union MPI2_SCSI_IO_CDB_UNION {
396 u8 CDB32[32];
397 struct MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
398 struct MPI2_SGE_SIMPLE_UNION SGE;
399 };
400
401 /****************************************************************************
402 * SCSI Task Management messages
403 ****************************************************************************/
404
405 /*SCSI Task Management Request Message */
406 struct MPI2_SCSI_TASK_MANAGE_REQUEST {
407 u16 DevHandle; /*0x00 */
408 u8 ChainOffset; /*0x02 */
409 u8 Function; /*0x03 */
410 u8 Reserved1; /*0x04 */
411 u8 TaskType; /*0x05 */
412 u8 Reserved2; /*0x06 */
413 u8 MsgFlags; /*0x07 */
414 u8 VP_ID; /*0x08 */
415 u8 VF_ID; /*0x09 */
416 u16 Reserved3; /*0x0A */
417 u8 LUN[8]; /*0x0C */
418 u32 Reserved4[7]; /*0x14 */
419 u16 TaskMID; /*0x30 */
420 u16 Reserved5; /*0x32 */
421 };
422
423
424 /*SCSI Task Management Reply Message */
425 struct MPI2_SCSI_TASK_MANAGE_REPLY {
426 u16 DevHandle; /*0x00 */
427 u8 MsgLength; /*0x02 */
428 u8 Function; /*0x03 */
429 u8 ResponseCode; /*0x04 */
430 u8 TaskType; /*0x05 */
431 u8 Reserved1; /*0x06 */
432 u8 MsgFlags; /*0x07 */
433 u8 VP_ID; /*0x08 */
434 u8 VF_ID; /*0x09 */
435 u16 Reserved2; /*0x0A */
436 u16 Reserved3; /*0x0C */
437 u16 IOCStatus; /*0x0E */
438 u32 IOCLogInfo; /*0x10 */
439 u32 TerminationCount; /*0x14 */
440 u32 ResponseInfo; /*0x18 */
441 };
442
443 struct MR_TM_REQUEST {
444 char request[128];
445 };
446
447 struct MR_TM_REPLY {
448 char reply[128];
449 };
450
451 /* SCSI Task Management Request Message */
452 struct MR_TASK_MANAGE_REQUEST {
453 /*To be type casted to struct MPI2_SCSI_TASK_MANAGE_REQUEST */
454 struct MR_TM_REQUEST TmRequest;
455 union {
456 struct {
457 #if defined(__BIG_ENDIAN_BITFIELD)
458 u32 reserved1:30;
459 u32 isTMForPD:1;
460 u32 isTMForLD:1;
461 #else
462 u32 isTMForLD:1;
463 u32 isTMForPD:1;
464 u32 reserved1:30;
465 #endif
466 u32 reserved2;
467 } tmReqFlags;
468 struct MR_TM_REPLY TMReply;
469 };
470 };
471
472 /* TaskType values */
473
474 #define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
475 #define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
476 #define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
477 #define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
478 #define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
479 #define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
480 #define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
481 #define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09)
482 #define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A)
483
484 /* ResponseCode values */
485
486 #define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
487 #define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
488 #define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
489 #define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
490 #define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
491 #define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
492 #define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
493 #define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
494
495 /*
496 * RAID SCSI IO Request Message
497 * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
498 */
499 struct MPI2_RAID_SCSI_IO_REQUEST {
500 __le16 DevHandle; /* 0x00 */
501 u8 ChainOffset; /* 0x02 */
502 u8 Function; /* 0x03 */
503 __le16 Reserved1; /* 0x04 */
504 u8 Reserved2; /* 0x06 */
505 u8 MsgFlags; /* 0x07 */
506 u8 VP_ID; /* 0x08 */
507 u8 VF_ID; /* 0x09 */
508 __le16 Reserved3; /* 0x0A */
509 __le32 SenseBufferLowAddress; /* 0x0C */
510 __le16 SGLFlags; /* 0x10 */
511 u8 SenseBufferLength; /* 0x12 */
512 u8 Reserved4; /* 0x13 */
513 u8 SGLOffset0; /* 0x14 */
514 u8 SGLOffset1; /* 0x15 */
515 u8 SGLOffset2; /* 0x16 */
516 u8 SGLOffset3; /* 0x17 */
517 __le32 SkipCount; /* 0x18 */
518 __le32 DataLength; /* 0x1C */
519 __le32 BidirectionalDataLength; /* 0x20 */
520 __le16 IoFlags; /* 0x24 */
521 __le16 EEDPFlags; /* 0x26 */
522 __le32 EEDPBlockSize; /* 0x28 */
523 __le32 SecondaryReferenceTag; /* 0x2C */
524 __le16 SecondaryApplicationTag; /* 0x30 */
525 __le16 ApplicationTagTranslationMask; /* 0x32 */
526 u8 LUN[8]; /* 0x34 */
527 __le32 Control; /* 0x3C */
528 union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
529 union RAID_CONTEXT_UNION RaidContext; /* 0x60 */
530 union MPI2_SGE_IO_UNION SGL; /* 0x80 */
531 };
532
533 /*
534 * MPT RAID MFA IO Descriptor.
535 */
536 struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
537 u32 RequestFlags:8;
538 u32 MessageAddress1:24;
539 u32 MessageAddress2;
540 };
541
542 /* Default Request Descriptor */
543 struct MPI2_DEFAULT_REQUEST_DESCRIPTOR {
544 u8 RequestFlags; /* 0x00 */
545 u8 MSIxIndex; /* 0x01 */
546 __le16 SMID; /* 0x02 */
547 __le16 LMID; /* 0x04 */
548 __le16 DescriptorTypeDependent; /* 0x06 */
549 };
550
551 /* High Priority Request Descriptor */
552 struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
553 u8 RequestFlags; /* 0x00 */
554 u8 MSIxIndex; /* 0x01 */
555 __le16 SMID; /* 0x02 */
556 __le16 LMID; /* 0x04 */
557 __le16 Reserved1; /* 0x06 */
558 };
559
560 /* SCSI IO Request Descriptor */
561 struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
562 u8 RequestFlags; /* 0x00 */
563 u8 MSIxIndex; /* 0x01 */
564 __le16 SMID; /* 0x02 */
565 __le16 LMID; /* 0x04 */
566 __le16 DevHandle; /* 0x06 */
567 };
568
569 /* SCSI Target Request Descriptor */
570 struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
571 u8 RequestFlags; /* 0x00 */
572 u8 MSIxIndex; /* 0x01 */
573 __le16 SMID; /* 0x02 */
574 __le16 LMID; /* 0x04 */
575 __le16 IoIndex; /* 0x06 */
576 };
577
578 /* RAID Accelerator Request Descriptor */
579 struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
580 u8 RequestFlags; /* 0x00 */
581 u8 MSIxIndex; /* 0x01 */
582 __le16 SMID; /* 0x02 */
583 __le16 LMID; /* 0x04 */
584 __le16 Reserved; /* 0x06 */
585 };
586
587 /* union of Request Descriptors */
588 union MEGASAS_REQUEST_DESCRIPTOR_UNION {
589 struct MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
590 struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
591 struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
592 struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
593 struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
594 struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo;
595 union {
596 struct {
597 __le32 low;
598 __le32 high;
599 } u;
600 __le64 Words;
601 };
602 };
603
604 /* Default Reply Descriptor */
605 struct MPI2_DEFAULT_REPLY_DESCRIPTOR {
606 u8 ReplyFlags; /* 0x00 */
607 u8 MSIxIndex; /* 0x01 */
608 __le16 DescriptorTypeDependent1; /* 0x02 */
609 __le32 DescriptorTypeDependent2; /* 0x04 */
610 };
611
612 /* Address Reply Descriptor */
613 struct MPI2_ADDRESS_REPLY_DESCRIPTOR {
614 u8 ReplyFlags; /* 0x00 */
615 u8 MSIxIndex; /* 0x01 */
616 __le16 SMID; /* 0x02 */
617 __le32 ReplyFrameAddress; /* 0x04 */
618 };
619
620 /* SCSI IO Success Reply Descriptor */
621 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
622 u8 ReplyFlags; /* 0x00 */
623 u8 MSIxIndex; /* 0x01 */
624 __le16 SMID; /* 0x02 */
625 __le16 TaskTag; /* 0x04 */
626 __le16 Reserved1; /* 0x06 */
627 };
628
629 /* TargetAssist Success Reply Descriptor */
630 struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
631 u8 ReplyFlags; /* 0x00 */
632 u8 MSIxIndex; /* 0x01 */
633 __le16 SMID; /* 0x02 */
634 u8 SequenceNumber; /* 0x04 */
635 u8 Reserved1; /* 0x05 */
636 __le16 IoIndex; /* 0x06 */
637 };
638
639 /* Target Command Buffer Reply Descriptor */
640 struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
641 u8 ReplyFlags; /* 0x00 */
642 u8 MSIxIndex; /* 0x01 */
643 u8 VP_ID; /* 0x02 */
644 u8 Flags; /* 0x03 */
645 __le16 InitiatorDevHandle; /* 0x04 */
646 __le16 IoIndex; /* 0x06 */
647 };
648
649 /* RAID Accelerator Success Reply Descriptor */
650 struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
651 u8 ReplyFlags; /* 0x00 */
652 u8 MSIxIndex; /* 0x01 */
653 __le16 SMID; /* 0x02 */
654 __le32 Reserved; /* 0x04 */
655 };
656
657 /* union of Reply Descriptors */
658 union MPI2_REPLY_DESCRIPTORS_UNION {
659 struct MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
660 struct MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
661 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
662 struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
663 struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
664 struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
665 RAIDAcceleratorSuccess;
666 __le64 Words;
667 };
668
669 /* IOCInit Request message */
670 struct MPI2_IOC_INIT_REQUEST {
671 u8 WhoInit; /* 0x00 */
672 u8 Reserved1; /* 0x01 */
673 u8 ChainOffset; /* 0x02 */
674 u8 Function; /* 0x03 */
675 __le16 Reserved2; /* 0x04 */
676 u8 Reserved3; /* 0x06 */
677 u8 MsgFlags; /* 0x07 */
678 u8 VP_ID; /* 0x08 */
679 u8 VF_ID; /* 0x09 */
680 __le16 Reserved4; /* 0x0A */
681 __le16 MsgVersion; /* 0x0C */
682 __le16 HeaderVersion; /* 0x0E */
683 u32 Reserved5; /* 0x10 */
684 __le16 Reserved6; /* 0x14 */
685 u8 HostPageSize; /* 0x16 */
686 u8 HostMSIxVectors; /* 0x17 */
687 __le16 Reserved8; /* 0x18 */
688 __le16 SystemRequestFrameSize; /* 0x1A */
689 __le16 ReplyDescriptorPostQueueDepth; /* 0x1C */
690 __le16 ReplyFreeQueueDepth; /* 0x1E */
691 __le32 SenseBufferAddressHigh; /* 0x20 */
692 __le32 SystemReplyAddressHigh; /* 0x24 */
693 __le64 SystemRequestFrameBaseAddress; /* 0x28 */
694 __le64 ReplyDescriptorPostQueueAddress;/* 0x30 */
695 __le64 ReplyFreeQueueAddress; /* 0x38 */
696 __le64 TimeStamp; /* 0x40 */
697 };
698
699 /* mrpriv defines */
700 #define MR_PD_INVALID 0xFFFF
701 #define MR_DEVHANDLE_INVALID 0xFFFF
702 #define MAX_SPAN_DEPTH 8
703 #define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
704 #define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
705 #define MAX_ROW_SIZE 32
706 #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
707 #define MAX_LOGICAL_DRIVES 64
708 #define MAX_LOGICAL_DRIVES_EXT 256
709 #define MAX_LOGICAL_DRIVES_DYN 512
710 #define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
711 #define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
712 #define MAX_ARRAYS 128
713 #define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
714 #define MAX_ARRAYS_EXT 256
715 #define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
716 #define MAX_API_ARRAYS_DYN 512
717 #define MAX_PHYSICAL_DEVICES 256
718 #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
719 #define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512
720 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
721 #define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102
722 #define MR_DCMD_DRV_GET_TARGET_PROP 0x0200e103
723 #define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
724 #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
725 #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
726
727 struct MR_DEV_HANDLE_INFO {
728 __le16 curDevHdl;
729 u8 validHandles;
730 u8 interfaceType;
731 __le16 devHandle[2];
732 };
733
734 struct MR_ARRAY_INFO {
735 __le16 pd[MAX_RAIDMAP_ROW_SIZE];
736 };
737
738 struct MR_QUAD_ELEMENT {
739 __le64 logStart;
740 __le64 logEnd;
741 __le64 offsetInSpan;
742 __le32 diff;
743 __le32 reserved1;
744 };
745
746 struct MR_SPAN_INFO {
747 __le32 noElements;
748 __le32 reserved1;
749 struct MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH];
750 };
751
752 struct MR_LD_SPAN {
753 __le64 startBlk;
754 __le64 numBlks;
755 __le16 arrayRef;
756 u8 spanRowSize;
757 u8 spanRowDataSize;
758 u8 reserved[4];
759 };
760
761 struct MR_SPAN_BLOCK_INFO {
762 __le64 num_rows;
763 struct MR_LD_SPAN span;
764 struct MR_SPAN_INFO block_span_info;
765 };
766
767 #define MR_RAID_CTX_CPUSEL_0 0
768 #define MR_RAID_CTX_CPUSEL_1 1
769 #define MR_RAID_CTX_CPUSEL_2 2
770 #define MR_RAID_CTX_CPUSEL_3 3
771 #define MR_RAID_CTX_CPUSEL_FCFS 0xF
772
773 struct MR_CPU_AFFINITY_MASK {
774 union {
775 struct {
776 #ifndef MFI_BIG_ENDIAN
777 u8 hw_path:1;
778 u8 cpu0:1;
779 u8 cpu1:1;
780 u8 cpu2:1;
781 u8 cpu3:1;
782 u8 reserved:3;
783 #else
784 u8 reserved:3;
785 u8 cpu3:1;
786 u8 cpu2:1;
787 u8 cpu1:1;
788 u8 cpu0:1;
789 u8 hw_path:1;
790 #endif
791 };
792 u8 core_mask;
793 };
794 };
795
796 struct MR_IO_AFFINITY {
797 union {
798 struct {
799 struct MR_CPU_AFFINITY_MASK pdRead;
800 struct MR_CPU_AFFINITY_MASK pdWrite;
801 struct MR_CPU_AFFINITY_MASK ldRead;
802 struct MR_CPU_AFFINITY_MASK ldWrite;
803 };
804 u32 word;
805 };
806 u8 maxCores; /* Total cores + HW Path in ROC */
807 u8 reserved[3];
808 };
809
810 struct MR_LD_RAID {
811 struct {
812 #if defined(__BIG_ENDIAN_BITFIELD)
813 u32 reserved4:2;
814 u32 fp_cache_bypass_capable:1;
815 u32 fp_rmw_capable:1;
816 u32 disable_coalescing:1;
817 u32 fpBypassRegionLock:1;
818 u32 tmCapable:1;
819 u32 fpNonRWCapable:1;
820 u32 fpReadAcrossStripe:1;
821 u32 fpWriteAcrossStripe:1;
822 u32 fpReadCapable:1;
823 u32 fpWriteCapable:1;
824 u32 encryptionType:8;
825 u32 pdPiMode:4;
826 u32 ldPiMode:4;
827 u32 reserved5:2;
828 u32 ra_capable:1;
829 u32 fpCapable:1;
830 #else
831 u32 fpCapable:1;
832 u32 ra_capable:1;
833 u32 reserved5:2;
834 u32 ldPiMode:4;
835 u32 pdPiMode:4;
836 u32 encryptionType:8;
837 u32 fpWriteCapable:1;
838 u32 fpReadCapable:1;
839 u32 fpWriteAcrossStripe:1;
840 u32 fpReadAcrossStripe:1;
841 u32 fpNonRWCapable:1;
842 u32 tmCapable:1;
843 u32 fpBypassRegionLock:1;
844 u32 disable_coalescing:1;
845 u32 fp_rmw_capable:1;
846 u32 fp_cache_bypass_capable:1;
847 u32 reserved4:2;
848 #endif
849 } capability;
850 __le32 reserved6;
851 __le64 size;
852 u8 spanDepth;
853 u8 level;
854 u8 stripeShift;
855 u8 rowSize;
856 u8 rowDataSize;
857 u8 writeMode;
858 u8 PRL;
859 u8 SRL;
860 __le16 targetId;
861 u8 ldState;
862 u8 regTypeReqOnWrite;
863 u8 modFactor;
864 u8 regTypeReqOnRead;
865 __le16 seqNum;
866
867 struct {
868 u32 ldSyncRequired:1;
869 u32 reserved:31;
870 } flags;
871
872 u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
873 u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
874 /* Ox2D This LD accept priority boost of this type */
875 u8 ld_accept_priority_type;
876 u8 reserved2[2]; /* 0x2E - 0x2F */
877 /* 0x30 - 0x33, Logical block size for the LD */
878 u32 logical_block_length;
879 struct {
880 #ifndef MFI_BIG_ENDIAN
881 /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
882 u32 ld_pi_exp:4;
883 /* 0x34, LOGICAL BLOCKS PER PHYSICAL
884 * BLOCK EXPONENT from READ CAPACITY 16
885 */
886 u32 ld_logical_block_exp:4;
887 u32 reserved1:24; /* 0x34 */
888 #else
889 u32 reserved1:24; /* 0x34 */
890 /* 0x34, LOGICAL BLOCKS PER PHYSICAL
891 * BLOCK EXPONENT from READ CAPACITY 16
892 */
893 u32 ld_logical_block_exp:4;
894 /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
895 u32 ld_pi_exp:4;
896 #endif
897 }; /* 0x34 - 0x37 */
898 /* 0x38 - 0x3f, This will determine which
899 * core will process LD IO and PD IO.
900 */
901 struct MR_IO_AFFINITY cpuAffinity;
902 /* Bit definiations are specified by MR_IO_AFFINITY */
903 u8 reserved3[0x80 - 0x40]; /* 0x40 - 0x7f */
904 };
905
906 struct MR_LD_SPAN_MAP {
907 struct MR_LD_RAID ldRaid;
908 u8 dataArmMap[MAX_RAIDMAP_ROW_SIZE];
909 struct MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH];
910 };
911
912 struct MR_FW_RAID_MAP {
913 __le32 totalSize;
914 union {
915 struct {
916 __le32 maxLd;
917 __le32 maxSpanDepth;
918 __le32 maxRowSize;
919 __le32 maxPdCount;
920 __le32 maxArrays;
921 } validationInfo;
922 __le32 version[5];
923 };
924
925 __le32 ldCount;
926 __le32 Reserved1;
927 u8 ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+
928 MAX_RAIDMAP_VIEWS];
929 u8 fpPdIoTimeoutSec;
930 u8 reserved2[7];
931 struct MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS];
932 struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
933 struct MR_LD_SPAN_MAP ldSpanMap[1];
934 };
935
936 struct IO_REQUEST_INFO {
937 u64 ldStartBlock;
938 u32 numBlocks;
939 u16 ldTgtId;
940 u8 isRead;
941 __le16 devHandle;
942 u8 pd_interface;
943 u64 pdBlock;
944 u8 fpOkForIo;
945 u8 IoforUnevenSpan;
946 u8 start_span;
947 u8 do_fp_rlbypass;
948 u64 start_row;
949 u8 span_arm; /* span[7:5], arm[4:0] */
950 u8 pd_after_lb;
951 u16 r1_alt_dev_handle; /* raid 1/10 only */
952 bool ra_capable;
953 };
954
955 struct MR_LD_TARGET_SYNC {
956 u8 targetId;
957 u8 reserved;
958 __le16 seqNum;
959 };
960
961 /*
962 * RAID Map descriptor Types.
963 * Each element should uniquely idetify one data structure in the RAID map
964 */
965 enum MR_RAID_MAP_DESC_TYPE {
966 /* MR_DEV_HANDLE_INFO data */
967 RAID_MAP_DESC_TYPE_DEVHDL_INFO = 0x0,
968 /* target to Ld num Index map */
969 RAID_MAP_DESC_TYPE_TGTID_INFO = 0x1,
970 /* MR_ARRAY_INFO data */
971 RAID_MAP_DESC_TYPE_ARRAY_INFO = 0x2,
972 /* MR_LD_SPAN_MAP data */
973 RAID_MAP_DESC_TYPE_SPAN_INFO = 0x3,
974 RAID_MAP_DESC_TYPE_COUNT,
975 };
976
977 /*
978 * This table defines the offset, size and num elements of each descriptor
979 * type in the RAID Map buffer
980 */
981 struct MR_RAID_MAP_DESC_TABLE {
982 /* Raid map descriptor type */
983 u32 raid_map_desc_type;
984 /* Offset into the RAID map buffer where
985 * descriptor data is saved
986 */
987 u32 raid_map_desc_offset;
988 /* total size of the
989 * descriptor buffer
990 */
991 u32 raid_map_desc_buffer_size;
992 /* Number of elements contained in the
993 * descriptor buffer
994 */
995 u32 raid_map_desc_elements;
996 };
997
998 /*
999 * Dynamic Raid Map Structure.
1000 */
1001 struct MR_FW_RAID_MAP_DYNAMIC {
1002 u32 raid_map_size; /* total size of RAID Map structure */
1003 u32 desc_table_offset;/* Offset of desc table into RAID map*/
1004 u32 desc_table_size; /* Total Size of desc table */
1005 /* Total Number of elements in the desc table */
1006 u32 desc_table_num_elements;
1007 u64 reserved1;
1008 u32 reserved2[3]; /*future use */
1009 /* timeout value used by driver in FP IOs */
1010 u8 fp_pd_io_timeout_sec;
1011 u8 reserved3[3];
1012 /* when this seqNum increments, driver needs to
1013 * release RMW buffers asap
1014 */
1015 u32 rmw_fp_seq_num;
1016 u16 ld_count; /* count of lds. */
1017 u16 ar_count; /* count of arrays */
1018 u16 span_count; /* count of spans */
1019 u16 reserved4[3];
1020 /*
1021 * The below structure of pointers is only to be used by the driver.
1022 * This is added in the ,API to reduce the amount of code changes
1023 * needed in the driver to support dynamic RAID map Firmware should
1024 * not update these pointers while preparing the raid map
1025 */
1026 union {
1027 struct {
1028 struct MR_DEV_HANDLE_INFO *dev_hndl_info;
1029 u16 *ld_tgt_id_to_ld;
1030 struct MR_ARRAY_INFO *ar_map_info;
1031 struct MR_LD_SPAN_MAP *ld_span_map;
1032 };
1033 u64 ptr_structure_size[RAID_MAP_DESC_TYPE_COUNT];
1034 };
1035 /*
1036 * RAID Map descriptor table defines the layout of data in the RAID Map.
1037 * The size of the descriptor table itself could change.
1038 */
1039 /* Variable Size descriptor Table. */
1040 struct MR_RAID_MAP_DESC_TABLE
1041 raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT];
1042 /* Variable Size buffer containing all data */
1043 u32 raid_map_desc_data[1];
1044 }; /* Dynamicaly sized RAID MAp structure */
1045
1046 #define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
1047 #define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
1048 #define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
1049 #define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
1050 #define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
1051 #define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
1052 #define IEEE_SGE_FLAGS_END_OF_LIST (0x40)
1053
1054 #define MPI2_SGE_FLAGS_SHIFT (0x02)
1055 #define IEEE_SGE_FLAGS_FORMAT_MASK (0xC0)
1056 #define IEEE_SGE_FLAGS_FORMAT_IEEE (0x00)
1057 #define IEEE_SGE_FLAGS_FORMAT_NVME (0x02)
1058
1059 #define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C)
1060 #define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00)
1061 #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08)
1062 #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10)
1063
1064 struct megasas_register_set;
1065 struct megasas_instance;
1066
1067 union desc_word {
1068 u64 word;
1069 struct {
1070 u32 low;
1071 u32 high;
1072 } u;
1073 };
1074
1075 struct megasas_cmd_fusion {
1076 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1077 dma_addr_t io_request_phys_addr;
1078
1079 union MPI2_SGE_IO_UNION *sg_frame;
1080 dma_addr_t sg_frame_phys_addr;
1081
1082 u8 *sense;
1083 dma_addr_t sense_phys_addr;
1084
1085 struct list_head list;
1086 struct scsi_cmnd *scmd;
1087 struct megasas_instance *instance;
1088
1089 u8 retry_for_fw_reset;
1090 union MEGASAS_REQUEST_DESCRIPTOR_UNION *request_desc;
1091
1092 /*
1093 * Context for a MFI frame.
1094 * Used to get the mfi cmd from list when a MFI cmd is completed
1095 */
1096 u32 sync_cmd_idx;
1097 u32 index;
1098 u8 pd_r1_lb;
1099 struct completion done;
1100 u8 pd_interface;
1101 u16 r1_alt_dev_handle; /* raid 1/10 only*/
1102 bool cmd_completed; /* raid 1/10 fp writes status holder */
1103
1104 };
1105
1106 struct LD_LOAD_BALANCE_INFO {
1107 u8 loadBalanceFlag;
1108 u8 reserved1;
1109 atomic_t scsi_pending_cmds[MAX_PHYSICAL_DEVICES];
1110 u64 last_accessed_block[MAX_PHYSICAL_DEVICES];
1111 };
1112
1113 /* SPAN_SET is info caclulated from span info from Raid map per LD */
1114 typedef struct _LD_SPAN_SET {
1115 u64 log_start_lba;
1116 u64 log_end_lba;
1117 u64 span_row_start;
1118 u64 span_row_end;
1119 u64 data_strip_start;
1120 u64 data_strip_end;
1121 u64 data_row_start;
1122 u64 data_row_end;
1123 u8 strip_offset[MAX_SPAN_DEPTH];
1124 u32 span_row_data_width;
1125 u32 diff;
1126 u32 reserved[2];
1127 } LD_SPAN_SET, *PLD_SPAN_SET;
1128
1129 typedef struct LOG_BLOCK_SPAN_INFO {
1130 LD_SPAN_SET span_set[MAX_SPAN_DEPTH];
1131 } LD_SPAN_INFO, *PLD_SPAN_INFO;
1132
1133 struct MR_FW_RAID_MAP_ALL {
1134 struct MR_FW_RAID_MAP raidMap;
1135 struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
1136 } __attribute__ ((packed));
1137
1138 struct MR_DRV_RAID_MAP {
1139 /* total size of this structure, including this field.
1140 * This feild will be manupulated by driver for ext raid map,
1141 * else pick the value from firmware raid map.
1142 */
1143 __le32 totalSize;
1144
1145 union {
1146 struct {
1147 __le32 maxLd;
1148 __le32 maxSpanDepth;
1149 __le32 maxRowSize;
1150 __le32 maxPdCount;
1151 __le32 maxArrays;
1152 } validationInfo;
1153 __le32 version[5];
1154 };
1155
1156 /* timeout value used by driver in FP IOs*/
1157 u8 fpPdIoTimeoutSec;
1158 u8 reserved2[7];
1159
1160 __le16 ldCount;
1161 __le16 arCount;
1162 __le16 spanCount;
1163 __le16 reserve3;
1164
1165 struct MR_DEV_HANDLE_INFO
1166 devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN];
1167 u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN];
1168 struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN];
1169 struct MR_LD_SPAN_MAP ldSpanMap[1];
1170
1171 };
1172
1173 /* Driver raid map size is same as raid map ext
1174 * MR_DRV_RAID_MAP_ALL is created to sync with old raid.
1175 * And it is mainly for code re-use purpose.
1176 */
1177 struct MR_DRV_RAID_MAP_ALL {
1178
1179 struct MR_DRV_RAID_MAP raidMap;
1180 struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN - 1];
1181 } __packed;
1182
1183
1184
1185 struct MR_FW_RAID_MAP_EXT {
1186 /* Not usred in new map */
1187 u32 reserved;
1188
1189 union {
1190 struct {
1191 u32 maxLd;
1192 u32 maxSpanDepth;
1193 u32 maxRowSize;
1194 u32 maxPdCount;
1195 u32 maxArrays;
1196 } validationInfo;
1197 u32 version[5];
1198 };
1199
1200 u8 fpPdIoTimeoutSec;
1201 u8 reserved2[7];
1202
1203 __le16 ldCount;
1204 __le16 arCount;
1205 __le16 spanCount;
1206 __le16 reserve3;
1207
1208 struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
1209 u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
1210 struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
1211 struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT];
1212 };
1213
1214 /*
1215 * * define MR_PD_CFG_SEQ structure for system PDs
1216 * */
1217 struct MR_PD_CFG_SEQ {
1218 u16 seqNum;
1219 u16 devHandle;
1220 struct {
1221 #if defined(__BIG_ENDIAN_BITFIELD)
1222 u8 reserved:7;
1223 u8 tmCapable:1;
1224 #else
1225 u8 tmCapable:1;
1226 u8 reserved:7;
1227 #endif
1228 } capability;
1229 u8 reserved;
1230 u16 pd_target_id;
1231 } __packed;
1232
1233 struct MR_PD_CFG_SEQ_NUM_SYNC {
1234 __le32 size;
1235 __le32 count;
1236 struct MR_PD_CFG_SEQ seq[1];
1237 } __packed;
1238
1239 /* stream detection */
1240 struct STREAM_DETECT {
1241 u64 next_seq_lba; /* next LBA to match sequential access */
1242 struct megasas_cmd_fusion *first_cmd_fusion; /* first cmd in group */
1243 struct megasas_cmd_fusion *last_cmd_fusion; /* last cmd in group */
1244 u32 count_cmds_in_stream; /* count of host commands in this stream */
1245 u16 num_sges_in_group; /* total number of SGEs in grouped IOs */
1246 u8 is_read; /* SCSI OpCode for this stream */
1247 u8 group_depth; /* total number of host commands in group */
1248 /* TRUE if cannot add any more commands to this group */
1249 bool group_flush;
1250 u8 reserved[7]; /* pad to 64-bit alignment */
1251 };
1252
1253 struct LD_STREAM_DETECT {
1254 bool write_back; /* TRUE if WB, FALSE if WT */
1255 bool fp_write_enabled;
1256 bool members_ssds;
1257 bool fp_cache_bypass_capable;
1258 u32 mru_bit_map; /* bitmap used to track MRU and LRU stream indicies */
1259 /* this is the array of stream detect structures (one per stream) */
1260 struct STREAM_DETECT stream_track[MAX_STREAMS_TRACKED];
1261 };
1262
1263 struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
1264 u64 RDPQBaseAddress;
1265 u32 Reserved1;
1266 u32 Reserved2;
1267 };
1268
1269 struct fusion_context {
1270 struct megasas_cmd_fusion **cmd_list;
1271 dma_addr_t req_frames_desc_phys;
1272 u8 *req_frames_desc;
1273
1274 struct dma_pool *io_request_frames_pool;
1275 dma_addr_t io_request_frames_phys;
1276 u8 *io_request_frames;
1277
1278 struct dma_pool *sg_dma_pool;
1279 struct dma_pool *sense_dma_pool;
1280
1281 dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION];
1282 union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION];
1283 struct dma_pool *reply_frames_desc_pool;
1284
1285 u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
1286
1287 u32 reply_q_depth;
1288 u32 request_alloc_sz;
1289 u32 reply_alloc_sz;
1290 u32 io_frames_alloc_sz;
1291
1292 struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY *rdpq_virt;
1293 dma_addr_t rdpq_phys;
1294 u16 max_sge_in_main_msg;
1295 u16 max_sge_in_chain;
1296
1297 u8 chain_offset_io_request;
1298 u8 chain_offset_mfi_pthru;
1299
1300 struct MR_FW_RAID_MAP_DYNAMIC *ld_map[2];
1301 dma_addr_t ld_map_phys[2];
1302
1303 /*Non dma-able memory. Driver local copy.*/
1304 struct MR_DRV_RAID_MAP_ALL *ld_drv_map[2];
1305
1306 u32 max_map_sz;
1307 u32 current_map_sz;
1308 u32 old_map_sz;
1309 u32 new_map_sz;
1310 u32 drv_map_sz;
1311 u32 drv_map_pages;
1312 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_seq_sync[JBOD_MAPS_COUNT];
1313 dma_addr_t pd_seq_phys[JBOD_MAPS_COUNT];
1314 u8 fast_path_io;
1315 struct LD_LOAD_BALANCE_INFO *load_balance_info;
1316 u32 load_balance_info_pages;
1317 LD_SPAN_INFO *log_to_span;
1318 u32 log_to_span_pages;
1319 struct LD_STREAM_DETECT **stream_detect_by_ld;
1320 dma_addr_t ioc_init_request_phys;
1321 struct MPI2_IOC_INIT_REQUEST *ioc_init_request;
1322 struct megasas_cmd *ioc_init_cmd;
1323
1324 };
1325
1326 union desc_value {
1327 __le64 word;
1328 struct {
1329 __le32 low;
1330 __le32 high;
1331 } u;
1332 };
1333
1334 void megasas_free_cmds_fusion(struct megasas_instance *instance);
1335 int megasas_ioc_init_fusion(struct megasas_instance *instance);
1336 u8 megasas_get_map_info(struct megasas_instance *instance);
1337 int megasas_sync_map_info(struct megasas_instance *instance);
1338 void megasas_release_fusion(struct megasas_instance *instance);
1339 void megasas_reset_reply_desc(struct megasas_instance *instance);
1340 int megasas_check_mpio_paths(struct megasas_instance *instance,
1341 struct scsi_cmnd *scmd);
1342 void megasas_fusion_ocr_wq(struct work_struct *work);
1343
1344 #endif /* _MEGARAID_SAS_FUSION_H_ */