]>
Commit | Line | Data |
---|---|---|
9c915a8c AR |
1 | /* |
2 | * Linux MegaRAID driver for SAS based RAID controllers | |
3 | * | |
e399065b SS |
4 | * Copyright (c) 2009-2013 LSI Corporation |
5 | * Copyright (c) 2013-2014 Avago Technologies | |
9c915a8c AR |
6 | * |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version 2 | |
10 | * of the License, or (at your option) any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
e399065b | 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
9c915a8c AR |
19 | * |
20 | * FILE: megaraid_sas_fp.c | |
21 | * | |
e399065b | 22 | * Authors: Avago Technologies |
9c915a8c AR |
23 | * Sumant Patro |
24 | * Varad Talamacki | |
25 | * Manoj Jose | |
e399065b SS |
26 | * Kashyap Desai <kashyap.desai@avagotech.com> |
27 | * Sumit Saxena <sumit.saxena@avagotech.com> | |
9c915a8c | 28 | * |
e399065b | 29 | * Send feedback to: megaraidlinux.pdl@avagotech.com |
9c915a8c | 30 | * |
e399065b SS |
31 | * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, |
32 | * San Jose, California 95131 | |
9c915a8c AR |
33 | */ |
34 | ||
35 | #include <linux/kernel.h> | |
36 | #include <linux/types.h> | |
37 | #include <linux/pci.h> | |
38 | #include <linux/list.h> | |
39 | #include <linux/moduleparam.h> | |
40 | #include <linux/module.h> | |
41 | #include <linux/spinlock.h> | |
42 | #include <linux/interrupt.h> | |
43 | #include <linux/delay.h> | |
9c915a8c AR |
44 | #include <linux/uio.h> |
45 | #include <linux/uaccess.h> | |
46 | #include <linux/fs.h> | |
47 | #include <linux/compat.h> | |
48 | #include <linux/blkdev.h> | |
49 | #include <linux/poll.h> | |
50 | ||
51 | #include <scsi/scsi.h> | |
52 | #include <scsi/scsi_cmnd.h> | |
53 | #include <scsi/scsi_device.h> | |
54 | #include <scsi/scsi_host.h> | |
55 | ||
56 | #include "megaraid_sas_fusion.h" | |
36807e67 | 57 | #include "megaraid_sas.h" |
9c915a8c AR |
58 | #include <asm/div64.h> |
59 | ||
d2552ebe SS |
60 | #define LB_PENDING_CMDS_DEFAULT 4 |
61 | static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; | |
62 | module_param(lb_pending_cmds, int, S_IRUGO); | |
63 | MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding " | |
64 | "threshold. Valid Values are 1-128. Default: 4"); | |
65 | ||
66 | ||
9c915a8c AR |
67 | #define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) |
68 | #define MR_LD_STATE_OPTIMAL 3 | |
69 | #define FALSE 0 | |
70 | #define TRUE 1 | |
71 | ||
bc93d425 SS |
72 | #define SPAN_DEBUG 0 |
73 | #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize) | |
74 | #define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize) | |
75 | #define SPAN_INVALID 0xff | |
76 | ||
9c915a8c | 77 | /* Prototypes */ |
51087a86 | 78 | static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, |
bc93d425 SS |
79 | PLD_SPAN_INFO ldSpanInfo); |
80 | static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, | |
81 | u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, | |
51087a86 | 82 | struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map); |
bc93d425 | 83 | static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld, |
51087a86 | 84 | u64 strip, struct MR_DRV_RAID_MAP_ALL *map); |
9c915a8c AR |
85 | |
86 | u32 mega_mod64(u64 dividend, u32 divisor) | |
87 | { | |
88 | u64 d; | |
89 | u32 remainder; | |
90 | ||
91 | if (!divisor) | |
92 | printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n"); | |
93 | d = dividend; | |
94 | remainder = do_div(d, divisor); | |
95 | return remainder; | |
96 | } | |
97 | ||
98 | /** | |
99 | * @param dividend : Dividend | |
100 | * @param divisor : Divisor | |
101 | * | |
102 | * @return quotient | |
103 | **/ | |
104 | u64 mega_div64_32(uint64_t dividend, uint32_t divisor) | |
105 | { | |
106 | u32 remainder; | |
107 | u64 d; | |
108 | ||
109 | if (!divisor) | |
110 | printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n"); | |
111 | ||
112 | d = dividend; | |
113 | remainder = do_div(d, divisor); | |
114 | ||
115 | return d; | |
116 | } | |
117 | ||
51087a86 | 118 | struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) |
9c915a8c AR |
119 | { |
120 | return &map->raidMap.ldSpanMap[ld].ldRaid; | |
121 | } | |
122 | ||
123 | static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld, | |
51087a86 | 124 | struct MR_DRV_RAID_MAP_ALL |
9c915a8c AR |
125 | *map) |
126 | { | |
127 | return &map->raidMap.ldSpanMap[ld].spanBlock[0]; | |
128 | } | |
129 | ||
51087a86 | 130 | static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map) |
9c915a8c AR |
131 | { |
132 | return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; | |
133 | } | |
134 | ||
51087a86 | 135 | u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map) |
9c915a8c | 136 | { |
94cd65dd | 137 | return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]); |
9c915a8c AR |
138 | } |
139 | ||
51087a86 | 140 | u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map) |
9c915a8c | 141 | { |
94cd65dd | 142 | return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); |
9c915a8c AR |
143 | } |
144 | ||
51087a86 | 145 | u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) |
9c915a8c AR |
146 | { |
147 | return map->raidMap.devHndlInfo[pd].curDevHdl; | |
148 | } | |
149 | ||
51087a86 | 150 | u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) |
9c915a8c | 151 | { |
be26374b | 152 | return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId); |
9c915a8c AR |
153 | } |
154 | ||
51087a86 | 155 | u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map) |
9c915a8c | 156 | { |
be26374b | 157 | return map->raidMap.ldTgtIdToLd[ldTgtId]; |
9c915a8c AR |
158 | } |
159 | ||
160 | static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, | |
51087a86 | 161 | struct MR_DRV_RAID_MAP_ALL *map) |
9c915a8c AR |
162 | { |
163 | return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; | |
164 | } | |
165 | ||
51087a86 SS |
166 | /* |
167 | * This function will Populate Driver Map using firmware raid map | |
168 | */ | |
169 | void MR_PopulateDrvRaidMap(struct megasas_instance *instance) | |
170 | { | |
171 | struct fusion_context *fusion = instance->ctrl_context; | |
172 | struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL; | |
173 | struct MR_FW_RAID_MAP *pFwRaidMap = NULL; | |
174 | int i; | |
200aed58 | 175 | u16 ld_count; |
51087a86 SS |
176 | |
177 | ||
178 | struct MR_DRV_RAID_MAP_ALL *drv_map = | |
179 | fusion->ld_drv_map[(instance->map_id & 1)]; | |
180 | struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; | |
181 | ||
182 | if (instance->supportmax256vd) { | |
183 | memcpy(fusion->ld_drv_map[instance->map_id & 1], | |
184 | fusion->ld_map[instance->map_id & 1], | |
185 | fusion->current_map_sz); | |
186 | /* New Raid map will not set totalSize, so keep expected value | |
187 | * for legacy code in ValidateMapInfo | |
188 | */ | |
6e755ddc SS |
189 | pDrvRaidMap->totalSize = |
190 | cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT)); | |
51087a86 SS |
191 | } else { |
192 | fw_map_old = (struct MR_FW_RAID_MAP_ALL *) | |
193 | fusion->ld_map[(instance->map_id & 1)]; | |
194 | pFwRaidMap = &fw_map_old->raidMap; | |
200aed58 | 195 | ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount); |
51087a86 SS |
196 | |
197 | #if VD_EXT_DEBUG | |
200aed58 | 198 | for (i = 0; i < ld_count; i++) { |
51087a86 SS |
199 | dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x " |
200 | "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n", | |
201 | instance->unique_id, i, | |
202 | fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId, | |
203 | fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum, | |
204 | fw_map_old->raidMap.ldSpanMap[i].ldRaid.size); | |
205 | } | |
206 | #endif | |
207 | ||
208 | memset(drv_map, 0, fusion->drv_map_sz); | |
209 | pDrvRaidMap->totalSize = pFwRaidMap->totalSize; | |
200aed58 | 210 | pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); |
51087a86 SS |
211 | pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; |
212 | for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) | |
213 | pDrvRaidMap->ldTgtIdToLd[i] = | |
214 | (u8)pFwRaidMap->ldTgtIdToLd[i]; | |
200aed58 | 215 | for (i = 0; i < ld_count; i++) { |
51087a86 SS |
216 | pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; |
217 | #if VD_EXT_DEBUG | |
218 | dev_dbg(&instance->pdev->dev, | |
219 | "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " | |
220 | "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x " | |
221 | "size 0x%x\n", i, i, | |
222 | pFwRaidMap->ldSpanMap[i].ldRaid.targetId, | |
223 | pFwRaidMap->ldSpanMap[i].ldRaid.seqNum, | |
224 | (u32)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize); | |
225 | dev_dbg(&instance->pdev->dev, | |
226 | "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " | |
227 | "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x " | |
228 | "size 0x%x\n", i, i, | |
229 | pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, | |
230 | pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, | |
231 | (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); | |
232 | dev_dbg(&instance->pdev->dev, "Driver raid map all %p " | |
233 | "raid map %p LD RAID MAP %p/%p\n", drv_map, | |
234 | pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid, | |
235 | &pDrvRaidMap->ldSpanMap[i].ldRaid); | |
236 | #endif | |
237 | } | |
238 | memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, | |
239 | sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); | |
240 | memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo, | |
241 | sizeof(struct MR_DEV_HANDLE_INFO) * | |
242 | MAX_RAIDMAP_PHYSICAL_DEVICES); | |
243 | } | |
244 | } | |
245 | ||
9c915a8c AR |
246 | /* |
247 | * This function will validate Map info data provided by FW | |
248 | */ | |
bc93d425 | 249 | u8 MR_ValidateMapInfo(struct megasas_instance *instance) |
9c915a8c | 250 | { |
51087a86 SS |
251 | struct fusion_context *fusion; |
252 | struct MR_DRV_RAID_MAP_ALL *drv_map; | |
253 | struct MR_DRV_RAID_MAP *pDrvRaidMap; | |
254 | struct LD_LOAD_BALANCE_INFO *lbInfo; | |
255 | PLD_SPAN_INFO ldSpanInfo; | |
94cd65dd | 256 | struct MR_LD_RAID *raid; |
200aed58 | 257 | u16 ldCount, num_lds; |
94cd65dd | 258 | u16 ld; |
51087a86 | 259 | u32 expected_size; |
94cd65dd | 260 | |
9c915a8c | 261 | |
51087a86 SS |
262 | MR_PopulateDrvRaidMap(instance); |
263 | ||
264 | fusion = instance->ctrl_context; | |
265 | drv_map = fusion->ld_drv_map[(instance->map_id & 1)]; | |
266 | pDrvRaidMap = &drv_map->raidMap; | |
267 | ||
268 | lbInfo = fusion->load_balance_info; | |
269 | ldSpanInfo = fusion->log_to_span; | |
270 | ||
271 | if (instance->supportmax256vd) | |
272 | expected_size = sizeof(struct MR_FW_RAID_MAP_EXT); | |
273 | else | |
274 | expected_size = | |
275 | (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) + | |
6e755ddc | 276 | (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount))); |
51087a86 SS |
277 | |
278 | if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) { | |
279 | dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n", | |
280 | (unsigned int) expected_size); | |
281 | dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n", | |
282 | (unsigned int)sizeof(struct MR_LD_SPAN_MAP), | |
283 | le32_to_cpu(pDrvRaidMap->totalSize)); | |
9c915a8c AR |
284 | return 0; |
285 | } | |
286 | ||
bc93d425 | 287 | if (instance->UnevenSpanSupport) |
51087a86 | 288 | mr_update_span_set(drv_map, ldSpanInfo); |
bc93d425 | 289 | |
51087a86 | 290 | mr_update_load_balance_params(drv_map, lbInfo); |
9c915a8c | 291 | |
6e755ddc | 292 | num_lds = le16_to_cpu(drv_map->raidMap.ldCount); |
94cd65dd SS |
293 | |
294 | /*Convert Raid capability values to CPU arch */ | |
295 | for (ldCount = 0; ldCount < num_lds; ldCount++) { | |
51087a86 SS |
296 | ld = MR_TargetIdToLdGet(ldCount, drv_map); |
297 | raid = MR_LdRaidGet(ld, drv_map); | |
94cd65dd SS |
298 | le32_to_cpus((u32 *)&raid->capability); |
299 | } | |
300 | ||
9c915a8c AR |
301 | return 1; |
302 | } | |
303 | ||
304 | u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, | |
51087a86 | 305 | struct MR_DRV_RAID_MAP_ALL *map) |
9c915a8c AR |
306 | { |
307 | struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); | |
308 | struct MR_QUAD_ELEMENT *quad; | |
309 | struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); | |
310 | u32 span, j; | |
311 | ||
312 | for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { | |
313 | ||
94cd65dd | 314 | for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) { |
9c915a8c AR |
315 | quad = &pSpanBlock->block_span_info.quad[j]; |
316 | ||
94cd65dd | 317 | if (le32_to_cpu(quad->diff) == 0) |
bc93d425 | 318 | return SPAN_INVALID; |
94cd65dd SS |
319 | if (le64_to_cpu(quad->logStart) <= row && row <= |
320 | le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart), | |
321 | le32_to_cpu(quad->diff))) == 0) { | |
9c915a8c AR |
322 | if (span_blk != NULL) { |
323 | u64 blk, debugBlk; | |
94cd65dd | 324 | blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)); |
9c915a8c AR |
325 | debugBlk = blk; |
326 | ||
94cd65dd | 327 | blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift; |
9c915a8c AR |
328 | *span_blk = blk; |
329 | } | |
330 | return span; | |
331 | } | |
332 | } | |
333 | } | |
bc93d425 SS |
334 | return SPAN_INVALID; |
335 | } | |
336 | ||
337 | /* | |
338 | ****************************************************************************** | |
339 | * | |
340 | * Function to print info about span set created in driver from FW raid map | |
341 | * | |
342 | * Inputs : | |
343 | * map - LD map | |
344 | * ldSpanInfo - ldSpanInfo per HBA instance | |
345 | */ | |
346 | #if SPAN_DEBUG | |
51087a86 SS |
347 | static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map, |
348 | PLD_SPAN_INFO ldSpanInfo) | |
bc93d425 SS |
349 | { |
350 | ||
351 | u8 span; | |
352 | u32 element; | |
353 | struct MR_LD_RAID *raid; | |
354 | LD_SPAN_SET *span_set; | |
355 | struct MR_QUAD_ELEMENT *quad; | |
356 | int ldCount; | |
357 | u16 ld; | |
358 | ||
51087a86 | 359 | for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { |
bc93d425 | 360 | ld = MR_TargetIdToLdGet(ldCount, map); |
200aed58 | 361 | if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) |
bc93d425 SS |
362 | continue; |
363 | raid = MR_LdRaidGet(ld, map); | |
364 | dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n", | |
365 | ld, raid->spanDepth); | |
366 | for (span = 0; span < raid->spanDepth; span++) | |
367 | dev_dbg(&instance->pdev->dev, "Span=%x," | |
368 | " number of quads=%x\n", span, | |
94cd65dd SS |
369 | le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. |
370 | block_span_info.noElements)); | |
bc93d425 SS |
371 | for (element = 0; element < MAX_QUAD_DEPTH; element++) { |
372 | span_set = &(ldSpanInfo[ld].span_set[element]); | |
373 | if (span_set->span_row_data_width == 0) | |
374 | break; | |
375 | ||
376 | dev_dbg(&instance->pdev->dev, "Span Set %x:" | |
377 | "width=%x, diff=%x\n", element, | |
378 | (unsigned int)span_set->span_row_data_width, | |
379 | (unsigned int)span_set->diff); | |
380 | dev_dbg(&instance->pdev->dev, "logical LBA" | |
381 | "start=0x%08lx, end=0x%08lx\n", | |
382 | (long unsigned int)span_set->log_start_lba, | |
383 | (long unsigned int)span_set->log_end_lba); | |
384 | dev_dbg(&instance->pdev->dev, "span row start=0x%08lx," | |
385 | " end=0x%08lx\n", | |
386 | (long unsigned int)span_set->span_row_start, | |
387 | (long unsigned int)span_set->span_row_end); | |
388 | dev_dbg(&instance->pdev->dev, "data row start=0x%08lx," | |
389 | " end=0x%08lx\n", | |
390 | (long unsigned int)span_set->data_row_start, | |
391 | (long unsigned int)span_set->data_row_end); | |
392 | dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx," | |
393 | " end=0x%08lx\n", | |
394 | (long unsigned int)span_set->data_strip_start, | |
395 | (long unsigned int)span_set->data_strip_end); | |
396 | ||
397 | for (span = 0; span < raid->spanDepth; span++) { | |
94cd65dd SS |
398 | if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. |
399 | block_span_info.noElements) >= | |
bc93d425 SS |
400 | element + 1) { |
401 | quad = &map->raidMap.ldSpanMap[ld]. | |
402 | spanBlock[span].block_span_info. | |
403 | quad[element]; | |
404 | dev_dbg(&instance->pdev->dev, "Span=%x," | |
405 | "Quad=%x, diff=%x\n", span, | |
94cd65dd | 406 | element, le32_to_cpu(quad->diff)); |
bc93d425 SS |
407 | dev_dbg(&instance->pdev->dev, |
408 | "offset_in_span=0x%08lx\n", | |
94cd65dd | 409 | (long unsigned int)le64_to_cpu(quad->offsetInSpan)); |
bc93d425 SS |
410 | dev_dbg(&instance->pdev->dev, |
411 | "logical start=0x%08lx, end=0x%08lx\n", | |
94cd65dd SS |
412 | (long unsigned int)le64_to_cpu(quad->logStart), |
413 | (long unsigned int)le64_to_cpu(quad->logEnd)); | |
bc93d425 SS |
414 | } |
415 | } | |
416 | } | |
417 | } | |
418 | return 0; | |
419 | } | |
420 | #endif | |
421 | ||
422 | /* | |
423 | ****************************************************************************** | |
424 | * | |
425 | * This routine calculates the Span block for given row using spanset. | |
426 | * | |
427 | * Inputs : | |
428 | * instance - HBA instance | |
429 | * ld - Logical drive number | |
430 | * row - Row number | |
431 | * map - LD map | |
432 | * | |
433 | * Outputs : | |
434 | * | |
435 | * span - Span number | |
436 | * block - Absolute Block number in the physical disk | |
437 | * div_error - Devide error code. | |
438 | */ | |
439 | ||
440 | u32 mr_spanset_get_span_block(struct megasas_instance *instance, | |
51087a86 | 441 | u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map) |
bc93d425 SS |
442 | { |
443 | struct fusion_context *fusion = instance->ctrl_context; | |
444 | struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); | |
445 | LD_SPAN_SET *span_set; | |
446 | struct MR_QUAD_ELEMENT *quad; | |
447 | u32 span, info; | |
448 | PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; | |
449 | ||
450 | for (info = 0; info < MAX_QUAD_DEPTH; info++) { | |
451 | span_set = &(ldSpanInfo[ld].span_set[info]); | |
452 | ||
453 | if (span_set->span_row_data_width == 0) | |
454 | break; | |
455 | ||
456 | if (row > span_set->data_row_end) | |
457 | continue; | |
458 | ||
459 | for (span = 0; span < raid->spanDepth; span++) | |
94cd65dd SS |
460 | if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. |
461 | block_span_info.noElements) >= info+1) { | |
bc93d425 SS |
462 | quad = &map->raidMap.ldSpanMap[ld]. |
463 | spanBlock[span]. | |
464 | block_span_info.quad[info]; | |
6e755ddc | 465 | if (le32_to_cpu(quad->diff) == 0) |
bc93d425 | 466 | return SPAN_INVALID; |
94cd65dd SS |
467 | if (le64_to_cpu(quad->logStart) <= row && |
468 | row <= le64_to_cpu(quad->logEnd) && | |
469 | (mega_mod64(row - le64_to_cpu(quad->logStart), | |
470 | le32_to_cpu(quad->diff))) == 0) { | |
bc93d425 SS |
471 | if (span_blk != NULL) { |
472 | u64 blk; | |
473 | blk = mega_div64_32 | |
94cd65dd SS |
474 | ((row - le64_to_cpu(quad->logStart)), |
475 | le32_to_cpu(quad->diff)); | |
476 | blk = (blk + le64_to_cpu(quad->offsetInSpan)) | |
bc93d425 SS |
477 | << raid->stripeShift; |
478 | *span_blk = blk; | |
479 | } | |
480 | return span; | |
481 | } | |
482 | } | |
483 | } | |
484 | return SPAN_INVALID; | |
485 | } | |
486 | ||
487 | /* | |
488 | ****************************************************************************** | |
489 | * | |
490 | * This routine calculates the row for given strip using spanset. | |
491 | * | |
492 | * Inputs : | |
493 | * instance - HBA instance | |
494 | * ld - Logical drive number | |
495 | * Strip - Strip | |
496 | * map - LD map | |
497 | * | |
498 | * Outputs : | |
499 | * | |
500 | * row - row associated with strip | |
501 | */ | |
502 | ||
503 | static u64 get_row_from_strip(struct megasas_instance *instance, | |
51087a86 | 504 | u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map) |
bc93d425 SS |
505 | { |
506 | struct fusion_context *fusion = instance->ctrl_context; | |
507 | struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); | |
508 | LD_SPAN_SET *span_set; | |
509 | PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; | |
510 | u32 info, strip_offset, span, span_offset; | |
511 | u64 span_set_Strip, span_set_Row, retval; | |
512 | ||
513 | for (info = 0; info < MAX_QUAD_DEPTH; info++) { | |
514 | span_set = &(ldSpanInfo[ld].span_set[info]); | |
515 | ||
516 | if (span_set->span_row_data_width == 0) | |
517 | break; | |
518 | if (strip > span_set->data_strip_end) | |
519 | continue; | |
520 | ||
521 | span_set_Strip = strip - span_set->data_strip_start; | |
522 | strip_offset = mega_mod64(span_set_Strip, | |
523 | span_set->span_row_data_width); | |
524 | span_set_Row = mega_div64_32(span_set_Strip, | |
525 | span_set->span_row_data_width) * span_set->diff; | |
526 | for (span = 0, span_offset = 0; span < raid->spanDepth; span++) | |
94cd65dd | 527 | if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. |
6e755ddc | 528 | block_span_info.noElements) >= info+1) { |
bc93d425 SS |
529 | if (strip_offset >= |
530 | span_set->strip_offset[span]) | |
531 | span_offset++; | |
532 | else | |
533 | break; | |
534 | } | |
535 | #if SPAN_DEBUG | |
536 | dev_info(&instance->pdev->dev, "Strip 0x%llx," | |
537 | "span_set_Strip 0x%llx, span_set_Row 0x%llx" | |
538 | "data width 0x%llx span offset 0x%x\n", strip, | |
539 | (unsigned long long)span_set_Strip, | |
540 | (unsigned long long)span_set_Row, | |
541 | (unsigned long long)span_set->span_row_data_width, | |
542 | span_offset); | |
543 | dev_info(&instance->pdev->dev, "For strip 0x%llx" | |
544 | "row is 0x%llx\n", strip, | |
545 | (unsigned long long) span_set->data_row_start + | |
546 | (unsigned long long) span_set_Row + (span_offset - 1)); | |
547 | #endif | |
548 | retval = (span_set->data_row_start + span_set_Row + | |
549 | (span_offset - 1)); | |
550 | return retval; | |
551 | } | |
552 | return -1LLU; | |
553 | } | |
554 | ||
555 | ||
556 | /* | |
557 | ****************************************************************************** | |
558 | * | |
559 | * This routine calculates the Start Strip for given row using spanset. | |
560 | * | |
561 | * Inputs : | |
562 | * instance - HBA instance | |
563 | * ld - Logical drive number | |
564 | * row - Row number | |
565 | * map - LD map | |
566 | * | |
567 | * Outputs : | |
568 | * | |
569 | * Strip - Start strip associated with row | |
570 | */ | |
571 | ||
572 | static u64 get_strip_from_row(struct megasas_instance *instance, | |
51087a86 | 573 | u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map) |
bc93d425 SS |
574 | { |
575 | struct fusion_context *fusion = instance->ctrl_context; | |
576 | struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); | |
577 | LD_SPAN_SET *span_set; | |
578 | struct MR_QUAD_ELEMENT *quad; | |
579 | PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; | |
580 | u32 span, info; | |
581 | u64 strip; | |
582 | ||
583 | for (info = 0; info < MAX_QUAD_DEPTH; info++) { | |
584 | span_set = &(ldSpanInfo[ld].span_set[info]); | |
585 | ||
586 | if (span_set->span_row_data_width == 0) | |
587 | break; | |
588 | if (row > span_set->data_row_end) | |
589 | continue; | |
590 | ||
591 | for (span = 0; span < raid->spanDepth; span++) | |
94cd65dd SS |
592 | if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. |
593 | block_span_info.noElements) >= info+1) { | |
bc93d425 SS |
594 | quad = &map->raidMap.ldSpanMap[ld]. |
595 | spanBlock[span].block_span_info.quad[info]; | |
94cd65dd SS |
596 | if (le64_to_cpu(quad->logStart) <= row && |
597 | row <= le64_to_cpu(quad->logEnd) && | |
598 | mega_mod64((row - le64_to_cpu(quad->logStart)), | |
599 | le32_to_cpu(quad->diff)) == 0) { | |
bc93d425 SS |
600 | strip = mega_div64_32 |
601 | (((row - span_set->data_row_start) | |
94cd65dd SS |
602 | - le64_to_cpu(quad->logStart)), |
603 | le32_to_cpu(quad->diff)); | |
bc93d425 SS |
604 | strip *= span_set->span_row_data_width; |
605 | strip += span_set->data_strip_start; | |
606 | strip += span_set->strip_offset[span]; | |
607 | return strip; | |
608 | } | |
609 | } | |
610 | } | |
611 | dev_err(&instance->pdev->dev, "get_strip_from_row" | |
612 | "returns invalid strip for ld=%x, row=%lx\n", | |
613 | ld, (long unsigned int)row); | |
614 | return -1; | |
615 | } | |
616 | ||
617 | /* | |
618 | ****************************************************************************** | |
619 | * | |
620 | * This routine calculates the Physical Arm for given strip using spanset. | |
621 | * | |
622 | * Inputs : | |
623 | * instance - HBA instance | |
624 | * ld - Logical drive number | |
625 | * strip - Strip | |
626 | * map - LD map | |
627 | * | |
628 | * Outputs : | |
629 | * | |
630 | * Phys Arm - Phys Arm associated with strip | |
631 | */ | |
632 | ||
633 | static u32 get_arm_from_strip(struct megasas_instance *instance, | |
51087a86 | 634 | u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map) |
bc93d425 SS |
635 | { |
636 | struct fusion_context *fusion = instance->ctrl_context; | |
637 | struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); | |
638 | LD_SPAN_SET *span_set; | |
639 | PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; | |
640 | u32 info, strip_offset, span, span_offset, retval; | |
641 | ||
642 | for (info = 0 ; info < MAX_QUAD_DEPTH; info++) { | |
643 | span_set = &(ldSpanInfo[ld].span_set[info]); | |
644 | ||
645 | if (span_set->span_row_data_width == 0) | |
646 | break; | |
647 | if (strip > span_set->data_strip_end) | |
648 | continue; | |
649 | ||
650 | strip_offset = (uint)mega_mod64 | |
651 | ((strip - span_set->data_strip_start), | |
652 | span_set->span_row_data_width); | |
653 | ||
654 | for (span = 0, span_offset = 0; span < raid->spanDepth; span++) | |
94cd65dd SS |
655 | if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. |
656 | block_span_info.noElements) >= info+1) { | |
bc93d425 SS |
657 | if (strip_offset >= |
658 | span_set->strip_offset[span]) | |
659 | span_offset = | |
660 | span_set->strip_offset[span]; | |
661 | else | |
662 | break; | |
663 | } | |
664 | #if SPAN_DEBUG | |
665 | dev_info(&instance->pdev->dev, "get_arm_from_strip:" | |
666 | "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld, | |
667 | (long unsigned int)strip, (strip_offset - span_offset)); | |
668 | #endif | |
669 | retval = (strip_offset - span_offset); | |
670 | return retval; | |
671 | } | |
672 | ||
673 | dev_err(&instance->pdev->dev, "get_arm_from_strip" | |
674 | "returns invalid arm for ld=%x strip=%lx\n", | |
675 | ld, (long unsigned int)strip); | |
676 | ||
677 | return -1; | |
678 | } | |
679 | ||
680 | /* This Function will return Phys arm */ | |
681 | u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe, | |
51087a86 | 682 | struct MR_DRV_RAID_MAP_ALL *map) |
bc93d425 SS |
683 | { |
684 | struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); | |
685 | /* Need to check correct default value */ | |
686 | u32 arm = 0; | |
687 | ||
688 | switch (raid->level) { | |
689 | case 0: | |
690 | case 5: | |
691 | case 6: | |
692 | arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); | |
693 | break; | |
694 | case 1: | |
695 | /* start with logical arm */ | |
696 | arm = get_arm_from_strip(instance, ld, stripe, map); | |
fec3c1b4 | 697 | if (arm != -1U) |
bc93d425 SS |
698 | arm *= 2; |
699 | break; | |
700 | } | |
701 | ||
702 | return arm; | |
703 | } | |
704 | ||
705 | ||
706 | /* | |
707 | ****************************************************************************** | |
708 | * | |
709 | * This routine calculates the arm, span and block for the specified stripe and | |
710 | * reference in stripe using spanset | |
711 | * | |
712 | * Inputs : | |
713 | * | |
714 | * ld - Logical drive number | |
715 | * stripRow - Stripe number | |
716 | * stripRef - Reference in stripe | |
717 | * | |
718 | * Outputs : | |
719 | * | |
720 | * span - Span number | |
721 | * block - Absolute Block number in the physical disk | |
722 | */ | |
723 | static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, | |
724 | u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, | |
725 | struct RAID_CONTEXT *pRAID_Context, | |
51087a86 | 726 | struct MR_DRV_RAID_MAP_ALL *map) |
bc93d425 SS |
727 | { |
728 | struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); | |
729 | u32 pd, arRef; | |
730 | u8 physArm, span; | |
731 | u64 row; | |
732 | u8 retval = TRUE; | |
733 | u8 do_invader = 0; | |
734 | u64 *pdBlock = &io_info->pdBlock; | |
735 | u16 *pDevHandle = &io_info->devHandle; | |
736 | u32 logArm, rowMod, armQ, arm; | |
737 | ||
738 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER || | |
739 | instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) | |
740 | do_invader = 1; | |
741 | ||
742 | /*Get row and span from io_info for Uneven Span IO.*/ | |
743 | row = io_info->start_row; | |
744 | span = io_info->start_span; | |
745 | ||
746 | ||
747 | if (raid->level == 6) { | |
748 | logArm = get_arm_from_strip(instance, ld, stripRow, map); | |
fec3c1b4 | 749 | if (logArm == -1U) |
bc93d425 SS |
750 | return FALSE; |
751 | rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); | |
752 | armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; | |
753 | arm = armQ + 1 + logArm; | |
754 | if (arm >= SPAN_ROW_SIZE(map, ld, span)) | |
755 | arm -= SPAN_ROW_SIZE(map, ld, span); | |
756 | physArm = (u8)arm; | |
757 | } else | |
758 | /* Calculate the arm */ | |
759 | physArm = get_arm(instance, ld, span, stripRow, map); | |
760 | if (physArm == 0xFF) | |
761 | return FALSE; | |
762 | ||
763 | arRef = MR_LdSpanArrayGet(ld, span, map); | |
764 | pd = MR_ArPdGet(arRef, physArm, map); | |
765 | ||
766 | if (pd != MR_PD_INVALID) | |
767 | *pDevHandle = MR_PdDevHandleGet(pd, map); | |
768 | else { | |
769 | *pDevHandle = MR_PD_INVALID; | |
770 | if ((raid->level >= 5) && | |
771 | (!do_invader || (do_invader && | |
772 | (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) | |
773 | pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; | |
774 | else if (raid->level == 1) { | |
775 | pd = MR_ArPdGet(arRef, physArm + 1, map); | |
776 | if (pd != MR_PD_INVALID) | |
777 | *pDevHandle = MR_PdDevHandleGet(pd, map); | |
778 | } | |
779 | } | |
780 | ||
94cd65dd | 781 | *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); |
bc93d425 SS |
782 | pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | |
783 | physArm; | |
d2552ebe | 784 | io_info->span_arm = pRAID_Context->spanArm; |
bc93d425 | 785 | return retval; |
9c915a8c AR |
786 | } |
787 | ||
788 | /* | |
789 | ****************************************************************************** | |
790 | * | |
791 | * This routine calculates the arm, span and block for the specified stripe and | |
792 | * reference in stripe. | |
793 | * | |
794 | * Inputs : | |
795 | * | |
796 | * ld - Logical drive number | |
797 | * stripRow - Stripe number | |
798 | * stripRef - Reference in stripe | |
799 | * | |
800 | * Outputs : | |
801 | * | |
802 | * span - Span number | |
803 | * block - Absolute Block number in the physical disk | |
804 | */ | |
36807e67 | 805 | u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, |
bc93d425 SS |
806 | u16 stripRef, struct IO_REQUEST_INFO *io_info, |
807 | struct RAID_CONTEXT *pRAID_Context, | |
51087a86 | 808 | struct MR_DRV_RAID_MAP_ALL *map) |
9c915a8c AR |
809 | { |
810 | struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); | |
811 | u32 pd, arRef; | |
812 | u8 physArm, span; | |
813 | u64 row; | |
814 | u8 retval = TRUE; | |
21d3c710 | 815 | u8 do_invader = 0; |
bc93d425 SS |
816 | u64 *pdBlock = &io_info->pdBlock; |
817 | u16 *pDevHandle = &io_info->devHandle; | |
21d3c710 SS |
818 | |
819 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER || | |
820 | instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) | |
821 | do_invader = 1; | |
9c915a8c AR |
822 | |
823 | row = mega_div64_32(stripRow, raid->rowDataSize); | |
824 | ||
825 | if (raid->level == 6) { | |
826 | /* logical arm within row */ | |
827 | u32 logArm = mega_mod64(stripRow, raid->rowDataSize); | |
828 | u32 rowMod, armQ, arm; | |
829 | ||
830 | if (raid->rowSize == 0) | |
831 | return FALSE; | |
832 | /* get logical row mod */ | |
833 | rowMod = mega_mod64(row, raid->rowSize); | |
834 | armQ = raid->rowSize-1-rowMod; /* index of Q drive */ | |
835 | arm = armQ+1+logArm; /* data always logically follows Q */ | |
836 | if (arm >= raid->rowSize) /* handle wrap condition */ | |
837 | arm -= raid->rowSize; | |
838 | physArm = (u8)arm; | |
839 | } else { | |
840 | if (raid->modFactor == 0) | |
841 | return FALSE; | |
842 | physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, | |
843 | raid->modFactor), | |
844 | map); | |
845 | } | |
846 | ||
847 | if (raid->spanDepth == 1) { | |
848 | span = 0; | |
849 | *pdBlock = row << raid->stripeShift; | |
850 | } else { | |
bc93d425 SS |
851 | span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map); |
852 | if (span == SPAN_INVALID) | |
9c915a8c AR |
853 | return FALSE; |
854 | } | |
855 | ||
856 | /* Get the array on which this span is present */ | |
857 | arRef = MR_LdSpanArrayGet(ld, span, map); | |
858 | pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */ | |
859 | ||
860 | if (pd != MR_PD_INVALID) | |
861 | /* Get dev handle from Pd. */ | |
862 | *pDevHandle = MR_PdDevHandleGet(pd, map); | |
863 | else { | |
864 | *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ | |
36807e67 | 865 | if ((raid->level >= 5) && |
21d3c710 SS |
866 | (!do_invader || (do_invader && |
867 | (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) | |
9c915a8c AR |
868 | pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; |
869 | else if (raid->level == 1) { | |
870 | /* Get alternate Pd. */ | |
871 | pd = MR_ArPdGet(arRef, physArm + 1, map); | |
872 | if (pd != MR_PD_INVALID) | |
873 | /* Get dev handle from Pd */ | |
874 | *pDevHandle = MR_PdDevHandleGet(pd, map); | |
875 | } | |
9c915a8c AR |
876 | } |
877 | ||
94cd65dd | 878 | *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); |
9c915a8c AR |
879 | pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | |
880 | physArm; | |
d2552ebe | 881 | io_info->span_arm = pRAID_Context->spanArm; |
9c915a8c AR |
882 | return retval; |
883 | } | |
884 | ||
885 | /* | |
886 | ****************************************************************************** | |
887 | * | |
888 | * MR_BuildRaidContext function | |
889 | * | |
890 | * This function will initiate command processing. The start/end row and strip | |
891 | * information is calculated then the lock is acquired. | |
892 | * This function will return 0 if region lock was acquired OR return num strips | |
893 | */ | |
894 | u8 | |
36807e67 AR |
895 | MR_BuildRaidContext(struct megasas_instance *instance, |
896 | struct IO_REQUEST_INFO *io_info, | |
9c915a8c | 897 | struct RAID_CONTEXT *pRAID_Context, |
51087a86 | 898 | struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN) |
9c915a8c AR |
899 | { |
900 | struct MR_LD_RAID *raid; | |
901 | u32 ld, stripSize, stripe_mask; | |
902 | u64 endLba, endStrip, endRow, start_row, start_strip; | |
903 | u64 regStart; | |
904 | u32 regSize; | |
905 | u8 num_strips, numRows; | |
906 | u16 ref_in_start_stripe, ref_in_end_stripe; | |
907 | u64 ldStartBlock; | |
908 | u32 numBlocks, ldTgtId; | |
909 | u8 isRead; | |
910 | u8 retval = 0; | |
bc93d425 SS |
911 | u8 startlba_span = SPAN_INVALID; |
912 | u64 *pdBlock = &io_info->pdBlock; | |
9c915a8c AR |
913 | |
914 | ldStartBlock = io_info->ldStartBlock; | |
915 | numBlocks = io_info->numBlocks; | |
916 | ldTgtId = io_info->ldTgtId; | |
917 | isRead = io_info->isRead; | |
bc93d425 SS |
918 | io_info->IoforUnevenSpan = 0; |
919 | io_info->start_span = SPAN_INVALID; | |
9c915a8c AR |
920 | |
921 | ld = MR_TargetIdToLdGet(ldTgtId, map); | |
922 | raid = MR_LdRaidGet(ld, map); | |
923 | ||
bc93d425 SS |
924 | /* |
925 | * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero | |
926 | * return FALSE | |
927 | */ | |
928 | if (raid->rowDataSize == 0) { | |
929 | if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) | |
930 | return FALSE; | |
931 | else if (instance->UnevenSpanSupport) { | |
932 | io_info->IoforUnevenSpan = 1; | |
933 | } else { | |
934 | dev_info(&instance->pdev->dev, | |
935 | "raid->rowDataSize is 0, but has SPAN[0]" | |
936 | "rowDataSize = 0x%0x," | |
937 | "but there is _NO_ UnevenSpanSupport\n", | |
938 | MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); | |
939 | return FALSE; | |
940 | } | |
941 | } | |
942 | ||
9c915a8c AR |
943 | stripSize = 1 << raid->stripeShift; |
944 | stripe_mask = stripSize-1; | |
bc93d425 SS |
945 | |
946 | ||
9c915a8c AR |
947 | /* |
948 | * calculate starting row and stripe, and number of strips and rows | |
949 | */ | |
950 | start_strip = ldStartBlock >> raid->stripeShift; | |
951 | ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask); | |
952 | endLba = ldStartBlock + numBlocks - 1; | |
953 | ref_in_end_stripe = (u16)(endLba & stripe_mask); | |
954 | endStrip = endLba >> raid->stripeShift; | |
955 | num_strips = (u8)(endStrip - start_strip + 1); /* End strip */ | |
bc93d425 SS |
956 | |
957 | if (io_info->IoforUnevenSpan) { | |
958 | start_row = get_row_from_strip(instance, ld, start_strip, map); | |
959 | endRow = get_row_from_strip(instance, ld, endStrip, map); | |
960 | if (start_row == -1ULL || endRow == -1ULL) { | |
961 | dev_info(&instance->pdev->dev, "return from %s %d." | |
962 | "Send IO w/o region lock.\n", | |
963 | __func__, __LINE__); | |
964 | return FALSE; | |
965 | } | |
966 | ||
967 | if (raid->spanDepth == 1) { | |
968 | startlba_span = 0; | |
969 | *pdBlock = start_row << raid->stripeShift; | |
970 | } else | |
971 | startlba_span = (u8)mr_spanset_get_span_block(instance, | |
972 | ld, start_row, pdBlock, map); | |
973 | if (startlba_span == SPAN_INVALID) { | |
974 | dev_info(&instance->pdev->dev, "return from %s %d" | |
975 | "for row 0x%llx,start strip %llx" | |
976 | "endSrip %llx\n", __func__, __LINE__, | |
977 | (unsigned long long)start_row, | |
978 | (unsigned long long)start_strip, | |
979 | (unsigned long long)endStrip); | |
980 | return FALSE; | |
981 | } | |
982 | io_info->start_span = startlba_span; | |
983 | io_info->start_row = start_row; | |
984 | #if SPAN_DEBUG | |
985 | dev_dbg(&instance->pdev->dev, "Check Span number from %s %d" | |
986 | "for row 0x%llx, start strip 0x%llx end strip 0x%llx" | |
987 | " span 0x%x\n", __func__, __LINE__, | |
988 | (unsigned long long)start_row, | |
989 | (unsigned long long)start_strip, | |
990 | (unsigned long long)endStrip, startlba_span); | |
991 | dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx" | |
992 | "Start span 0x%x\n", (unsigned long long)start_row, | |
993 | (unsigned long long)endRow, startlba_span); | |
994 | #endif | |
995 | } else { | |
996 | start_row = mega_div64_32(start_strip, raid->rowDataSize); | |
997 | endRow = mega_div64_32(endStrip, raid->rowDataSize); | |
998 | } | |
999 | numRows = (u8)(endRow - start_row + 1); | |
9c915a8c AR |
1000 | |
1001 | /* | |
1002 | * calculate region info. | |
1003 | */ | |
1004 | ||
1005 | /* assume region is at the start of the first row */ | |
1006 | regStart = start_row << raid->stripeShift; | |
1007 | /* assume this IO needs the full row - we'll adjust if not true */ | |
1008 | regSize = stripSize; | |
1009 | ||
c1529fa2 AR |
1010 | /* Check if we can send this I/O via FastPath */ |
1011 | if (raid->capability.fpCapable) { | |
1012 | if (isRead) | |
1013 | io_info->fpOkForIo = (raid->capability.fpReadCapable && | |
1014 | ((num_strips == 1) || | |
1015 | raid->capability. | |
1016 | fpReadAcrossStripe)); | |
1017 | else | |
1018 | io_info->fpOkForIo = (raid->capability.fpWriteCapable && | |
1019 | ((num_strips == 1) || | |
1020 | raid->capability. | |
1021 | fpWriteAcrossStripe)); | |
1022 | } else | |
9c915a8c | 1023 | io_info->fpOkForIo = FALSE; |
9c915a8c AR |
1024 | |
1025 | if (numRows == 1) { | |
1026 | /* single-strip IOs can always lock only the data needed */ | |
1027 | if (num_strips == 1) { | |
1028 | regStart += ref_in_start_stripe; | |
1029 | regSize = numBlocks; | |
1030 | } | |
1031 | /* multi-strip IOs always need to full stripe locked */ | |
bc93d425 SS |
1032 | } else if (io_info->IoforUnevenSpan == 0) { |
1033 | /* | |
1034 | * For Even span region lock optimization. | |
1035 | * If the start strip is the last in the start row | |
1036 | */ | |
9c915a8c | 1037 | if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { |
9c915a8c | 1038 | regStart += ref_in_start_stripe; |
9c915a8c AR |
1039 | /* initialize count to sectors from startref to end |
1040 | of strip */ | |
bc93d425 | 1041 | regSize = stripSize - ref_in_start_stripe; |
9c915a8c AR |
1042 | } |
1043 | ||
bc93d425 | 1044 | /* add complete rows in the middle of the transfer */ |
9c915a8c | 1045 | if (numRows > 2) |
9c915a8c AR |
1046 | regSize += (numRows-2) << raid->stripeShift; |
1047 | ||
bc93d425 | 1048 | /* if IO ends within first strip of last row*/ |
9c915a8c AR |
1049 | if (endStrip == endRow*raid->rowDataSize) |
1050 | regSize += ref_in_end_stripe+1; | |
1051 | else | |
1052 | regSize += stripSize; | |
bc93d425 SS |
1053 | } else { |
1054 | /* | |
1055 | * For Uneven span region lock optimization. | |
1056 | * If the start strip is the last in the start row | |
1057 | */ | |
1058 | if (start_strip == (get_strip_from_row(instance, ld, start_row, map) + | |
1059 | SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { | |
1060 | regStart += ref_in_start_stripe; | |
1061 | /* initialize count to sectors from | |
1062 | * startRef to end of strip | |
1063 | */ | |
1064 | regSize = stripSize - ref_in_start_stripe; | |
1065 | } | |
1066 | /* Add complete rows in the middle of the transfer*/ | |
1067 | ||
1068 | if (numRows > 2) | |
1069 | /* Add complete rows in the middle of the transfer*/ | |
1070 | regSize += (numRows-2) << raid->stripeShift; | |
1071 | ||
1072 | /* if IO ends within first strip of last row */ | |
1073 | if (endStrip == get_strip_from_row(instance, ld, endRow, map)) | |
1074 | regSize += ref_in_end_stripe + 1; | |
1075 | else | |
1076 | regSize += stripSize; | |
9c915a8c AR |
1077 | } |
1078 | ||
26a077e7 AR |
1079 | pRAID_Context->timeoutValue = |
1080 | cpu_to_le16(raid->fpIoTimeoutForLd ? | |
1081 | raid->fpIoTimeoutForLd : | |
1082 | map->raidMap.fpPdIoTimeoutSec); | |
21d3c710 SS |
1083 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || |
1084 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) | |
36807e67 AR |
1085 | pRAID_Context->regLockFlags = (isRead) ? |
1086 | raid->regTypeReqOnRead : raid->regTypeReqOnWrite; | |
1087 | else | |
1088 | pRAID_Context->regLockFlags = (isRead) ? | |
1089 | REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; | |
9c915a8c | 1090 | pRAID_Context->VirtualDiskTgtId = raid->targetId; |
94cd65dd SS |
1091 | pRAID_Context->regLockRowLBA = cpu_to_le64(regStart); |
1092 | pRAID_Context->regLockLength = cpu_to_le32(regSize); | |
9c915a8c | 1093 | pRAID_Context->configSeqNum = raid->seqNum; |
21c9e160 AR |
1094 | /* save pointer to raid->LUN array */ |
1095 | *raidLUN = raid->LUN; | |
1096 | ||
9c915a8c AR |
1097 | |
1098 | /*Get Phy Params only if FP capable, or else leave it to MR firmware | |
1099 | to do the calculation.*/ | |
1100 | if (io_info->fpOkForIo) { | |
bc93d425 SS |
1101 | retval = io_info->IoforUnevenSpan ? |
1102 | mr_spanset_get_phy_params(instance, ld, | |
1103 | start_strip, ref_in_start_stripe, | |
1104 | io_info, pRAID_Context, map) : | |
1105 | MR_GetPhyParams(instance, ld, start_strip, | |
1106 | ref_in_start_stripe, io_info, | |
1107 | pRAID_Context, map); | |
1108 | /* If IO on an invalid Pd, then FP is not possible.*/ | |
9c915a8c AR |
1109 | if (io_info->devHandle == MR_PD_INVALID) |
1110 | io_info->fpOkForIo = FALSE; | |
1111 | return retval; | |
1112 | } else if (isRead) { | |
1113 | uint stripIdx; | |
1114 | for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { | |
bc93d425 SS |
1115 | retval = io_info->IoforUnevenSpan ? |
1116 | mr_spanset_get_phy_params(instance, ld, | |
1117 | start_strip + stripIdx, | |
1118 | ref_in_start_stripe, io_info, | |
1119 | pRAID_Context, map) : | |
1120 | MR_GetPhyParams(instance, ld, | |
1121 | start_strip + stripIdx, ref_in_start_stripe, | |
1122 | io_info, pRAID_Context, map); | |
1123 | if (!retval) | |
9c915a8c AR |
1124 | return TRUE; |
1125 | } | |
1126 | } | |
bc93d425 SS |
1127 | |
1128 | #if SPAN_DEBUG | |
1129 | /* Just for testing what arm we get for strip.*/ | |
1130 | if (io_info->IoforUnevenSpan) | |
1131 | get_arm_from_strip(instance, ld, start_strip, map); | |
1132 | #endif | |
9c915a8c AR |
1133 | return TRUE; |
1134 | } | |
1135 | ||
bc93d425 SS |
1136 | /* |
1137 | ****************************************************************************** | |
1138 | * | |
1139 | * This routine pepare spanset info from Valid Raid map and store it into | |
1140 | * local copy of ldSpanInfo per instance data structure. | |
1141 | * | |
1142 | * Inputs : | |
1143 | * map - LD map | |
1144 | * ldSpanInfo - ldSpanInfo per HBA instance | |
1145 | * | |
1146 | */ | |
51087a86 | 1147 | void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, |
d2552ebe | 1148 | PLD_SPAN_INFO ldSpanInfo) |
bc93d425 SS |
1149 | { |
1150 | u8 span, count; | |
1151 | u32 element, span_row_width; | |
1152 | u64 span_row; | |
1153 | struct MR_LD_RAID *raid; | |
1154 | LD_SPAN_SET *span_set, *span_set_prev; | |
1155 | struct MR_QUAD_ELEMENT *quad; | |
1156 | int ldCount; | |
1157 | u16 ld; | |
1158 | ||
1159 | ||
51087a86 | 1160 | for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { |
bc93d425 | 1161 | ld = MR_TargetIdToLdGet(ldCount, map); |
200aed58 | 1162 | if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) |
bc93d425 SS |
1163 | continue; |
1164 | raid = MR_LdRaidGet(ld, map); | |
1165 | for (element = 0; element < MAX_QUAD_DEPTH; element++) { | |
1166 | for (span = 0; span < raid->spanDepth; span++) { | |
94cd65dd SS |
1167 | if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. |
1168 | block_span_info.noElements) < | |
bc93d425 SS |
1169 | element + 1) |
1170 | continue; | |
1171 | span_set = &(ldSpanInfo[ld].span_set[element]); | |
1172 | quad = &map->raidMap.ldSpanMap[ld]. | |
1173 | spanBlock[span].block_span_info. | |
1174 | quad[element]; | |
1175 | ||
94cd65dd | 1176 | span_set->diff = le32_to_cpu(quad->diff); |
bc93d425 SS |
1177 | |
1178 | for (count = 0, span_row_width = 0; | |
1179 | count < raid->spanDepth; count++) { | |
94cd65dd | 1180 | if (le32_to_cpu(map->raidMap.ldSpanMap[ld]. |
bc93d425 SS |
1181 | spanBlock[count]. |
1182 | block_span_info. | |
94cd65dd | 1183 | noElements) >= element + 1) { |
bc93d425 SS |
1184 | span_set->strip_offset[count] = |
1185 | span_row_width; | |
1186 | span_row_width += | |
1187 | MR_LdSpanPtrGet | |
1188 | (ld, count, map)->spanRowDataSize; | |
1189 | printk(KERN_INFO "megasas:" | |
1190 | "span %x rowDataSize %x\n", | |
1191 | count, MR_LdSpanPtrGet | |
1192 | (ld, count, map)->spanRowDataSize); | |
1193 | } | |
1194 | } | |
1195 | ||
1196 | span_set->span_row_data_width = span_row_width; | |
94cd65dd SS |
1197 | span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) - |
1198 | le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)), | |
1199 | le32_to_cpu(quad->diff)); | |
bc93d425 SS |
1200 | |
1201 | if (element == 0) { | |
1202 | span_set->log_start_lba = 0; | |
1203 | span_set->log_end_lba = | |
1204 | ((span_row << raid->stripeShift) | |
1205 | * span_row_width) - 1; | |
1206 | ||
1207 | span_set->span_row_start = 0; | |
1208 | span_set->span_row_end = span_row - 1; | |
1209 | ||
1210 | span_set->data_strip_start = 0; | |
1211 | span_set->data_strip_end = | |
1212 | (span_row * span_row_width) - 1; | |
1213 | ||
1214 | span_set->data_row_start = 0; | |
1215 | span_set->data_row_end = | |
94cd65dd | 1216 | (span_row * le32_to_cpu(quad->diff)) - 1; |
bc93d425 SS |
1217 | } else { |
1218 | span_set_prev = &(ldSpanInfo[ld]. | |
1219 | span_set[element - 1]); | |
1220 | span_set->log_start_lba = | |
1221 | span_set_prev->log_end_lba + 1; | |
1222 | span_set->log_end_lba = | |
1223 | span_set->log_start_lba + | |
1224 | ((span_row << raid->stripeShift) | |
1225 | * span_row_width) - 1; | |
1226 | ||
1227 | span_set->span_row_start = | |
1228 | span_set_prev->span_row_end + 1; | |
1229 | span_set->span_row_end = | |
1230 | span_set->span_row_start + span_row - 1; | |
1231 | ||
1232 | span_set->data_strip_start = | |
1233 | span_set_prev->data_strip_end + 1; | |
1234 | span_set->data_strip_end = | |
1235 | span_set->data_strip_start + | |
1236 | (span_row * span_row_width) - 1; | |
1237 | ||
1238 | span_set->data_row_start = | |
1239 | span_set_prev->data_row_end + 1; | |
1240 | span_set->data_row_end = | |
1241 | span_set->data_row_start + | |
94cd65dd | 1242 | (span_row * le32_to_cpu(quad->diff)) - 1; |
bc93d425 SS |
1243 | } |
1244 | break; | |
1245 | } | |
1246 | if (span == raid->spanDepth) | |
1247 | break; | |
1248 | } | |
1249 | } | |
1250 | #if SPAN_DEBUG | |
1251 | getSpanInfo(map, ldSpanInfo); | |
1252 | #endif | |
1253 | ||
1254 | } | |
1255 | ||
d2552ebe SS |
1256 | void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map, |
1257 | struct LD_LOAD_BALANCE_INFO *lbInfo) | |
9c915a8c AR |
1258 | { |
1259 | int ldCount; | |
1260 | u16 ld; | |
1261 | struct MR_LD_RAID *raid; | |
1262 | ||
d2552ebe SS |
1263 | if (lb_pending_cmds > 128 || lb_pending_cmds < 1) |
1264 | lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; | |
1265 | ||
51087a86 | 1266 | for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { |
d2552ebe | 1267 | ld = MR_TargetIdToLdGet(ldCount, drv_map); |
51087a86 | 1268 | if (ld >= MAX_LOGICAL_DRIVES_EXT) { |
9c915a8c AR |
1269 | lbInfo[ldCount].loadBalanceFlag = 0; |
1270 | continue; | |
1271 | } | |
1272 | ||
d2552ebe SS |
1273 | raid = MR_LdRaidGet(ld, drv_map); |
1274 | if ((raid->level != 1) || | |
1275 | (raid->ldState != MR_LD_STATE_OPTIMAL)) { | |
9c915a8c | 1276 | lbInfo[ldCount].loadBalanceFlag = 0; |
d2552ebe SS |
1277 | continue; |
1278 | } | |
1279 | lbInfo[ldCount].loadBalanceFlag = 1; | |
9c915a8c AR |
1280 | } |
1281 | } | |
1282 | ||
d2552ebe SS |
1283 | u8 megasas_get_best_arm_pd(struct megasas_instance *instance, |
1284 | struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info) | |
9c915a8c | 1285 | { |
d2552ebe SS |
1286 | struct fusion_context *fusion; |
1287 | struct MR_LD_RAID *raid; | |
1288 | struct MR_DRV_RAID_MAP_ALL *drv_map; | |
1289 | u16 pend0, pend1, ld; | |
9c915a8c | 1290 | u64 diff0, diff1; |
d2552ebe SS |
1291 | u8 bestArm, pd0, pd1, span, arm; |
1292 | u32 arRef, span_row_size; | |
1293 | ||
1294 | u64 block = io_info->ldStartBlock; | |
1295 | u32 count = io_info->numBlocks; | |
1296 | ||
1297 | span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK) | |
1298 | >> RAID_CTX_SPANARM_SPAN_SHIFT); | |
1299 | arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK); | |
1300 | ||
1301 | ||
1302 | fusion = instance->ctrl_context; | |
1303 | drv_map = fusion->ld_drv_map[(instance->map_id & 1)]; | |
1304 | ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map); | |
1305 | raid = MR_LdRaidGet(ld, drv_map); | |
1306 | span_row_size = instance->UnevenSpanSupport ? | |
1307 | SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize; | |
1308 | ||
1309 | arRef = MR_LdSpanArrayGet(ld, span, drv_map); | |
1310 | pd0 = MR_ArPdGet(arRef, arm, drv_map); | |
1311 | pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ? | |
1312 | (arm + 1 - span_row_size) : arm + 1, drv_map); | |
9c915a8c AR |
1313 | |
1314 | /* get the pending cmds for the data and mirror arms */ | |
d2552ebe SS |
1315 | pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]); |
1316 | pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]); | |
9c915a8c AR |
1317 | |
1318 | /* Determine the disk whose head is nearer to the req. block */ | |
d2552ebe SS |
1319 | diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); |
1320 | diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); | |
1321 | bestArm = (diff0 <= diff1 ? arm : arm ^ 1); | |
9c915a8c | 1322 | |
d2552ebe SS |
1323 | if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) || |
1324 | (bestArm != arm && pend1 > pend0 + lb_pending_cmds)) | |
9c915a8c AR |
1325 | bestArm ^= 1; |
1326 | ||
1327 | /* Update the last accessed block on the correct pd */ | |
d2552ebe SS |
1328 | io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; |
1329 | lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1; | |
1330 | io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; | |
1331 | #if SPAN_DEBUG | |
1332 | if (arm != bestArm) | |
1333 | dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance " | |
1334 | "occur - span 0x%x arm 0x%x bestArm 0x%x " | |
1335 | "io_info->span_arm 0x%x\n", | |
1336 | span, arm, bestArm, io_info->span_arm); | |
1337 | #endif | |
1338 | return io_info->pd_after_lb; | |
9c915a8c AR |
1339 | } |
1340 | ||
d2552ebe SS |
1341 | u16 get_updated_dev_handle(struct megasas_instance *instance, |
1342 | struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info) | |
9c915a8c | 1343 | { |
d2552ebe | 1344 | u8 arm_pd; |
9c915a8c | 1345 | u16 devHandle; |
d2552ebe SS |
1346 | struct fusion_context *fusion; |
1347 | struct MR_DRV_RAID_MAP_ALL *drv_map; | |
9c915a8c | 1348 | |
d2552ebe SS |
1349 | fusion = instance->ctrl_context; |
1350 | drv_map = fusion->ld_drv_map[(instance->map_id & 1)]; | |
9c915a8c | 1351 | |
d2552ebe SS |
1352 | /* get best new arm (PD ID) */ |
1353 | arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info); | |
1354 | devHandle = MR_PdDevHandleGet(arm_pd, drv_map); | |
1355 | atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]); | |
9c915a8c AR |
1356 | return devHandle; |
1357 | } |