6 * Solaris MegaRAID device driver for SAS2.0 controllers 7 * Copyright (c) 2008-2012, LSI Logic Corporation. 8 * All rights reserved. 9 * 10 * Version: 11 * Author: 12 * Swaminathan K S 13 * Arun Chandrashekhar 14 * Manju R 15 * Rasheed 16 * Shakeel Bukhari 17 * 18 * 19 * This module contains functions for device drivers 20 * to get pd-ld mapping information. 21 * 22 * ********************************************************************** 23 */ 24 25 #include <sys/scsi/scsi.h> 26 #include "ld_pd_map.h" 27 #include "mr_sas.h" 28 29 /* 30 * This function will check if FAST IO is possible on this logical drive 31 * by checking the EVENT information availabe in the driver 32 */ 33 #define MR_LD_STATE_OPTIMAL 3 34 #define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) ) 35 36 void 37 mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo); 38 39 U8 40 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *, 41 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *); 42 43 #define FALSE 0 44 #define TRUE 1 45 46 typedef U64 REGION_KEY; 47 typedef U32 REGION_LEN; 48 extern int debug_level_g; 49 50 51 MR_LD_RAID 52 *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map) 53 { 54 return (&map->raidMap.ldSpanMap[ld].ldRaid); 55 } 56 57 U16 MR_GetLDTgtId 58 (U32 ld, MR_FW_RAID_MAP_ALL *map) 59 { 60 return (map->raidMap.ldSpanMap[ld].ldRaid.targetId); 61 } 62 63 64 static MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(U32 ld, MR_FW_RAID_MAP_ALL *map) 65 { 66 return (&map->raidMap.ldSpanMap[ld].spanBlock[0]); 67 } 68 69 static U8 MR_LdDataArmGet(U32 ld, U32 armIdx, MR_FW_RAID_MAP_ALL *map) 70 { 71 return (map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]); 72 } 73 74 static U16 MR_ArPdGet(U32 ar, U32 arm, MR_FW_RAID_MAP_ALL *map) 75 { 76 return (map->raidMap.arMapInfo[ar].pd[arm]); 77 } 78 79 static U16 MR_LdSpanArrayGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map) 80 { 81 return (map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); 82 } 83 84 static U16 MR_PdDevHandleGet(U32 pd, MR_FW_RAID_MAP_ALL *map) 85 { 86 return (map->raidMap.devHndlInfo[pd].curDevHdl); 87 } 88 89 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map) 90 { 91 return (map->raidMap.ldTgtIdToLd[ldTgtId]); 92 } 93 94 U16 MR_CheckDIF(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map) 95 { 96 MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap; 97 MR_LD_RAID *raid; 98 U32 ld; 99 100 ld = MR_TargetIdToLdGet(ldTgtId, map); 101 102 if (ld >= MAX_LOGICAL_DRIVES) { 103 return (FALSE); 104 } 105 106 raid = MR_LdRaidGet(ld, map); 107 108 return (raid->capability.ldPiMode == 0x8); 109 } 110 111 static MR_LD_SPAN *MR_LdSpanPtrGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map) 112 { 113 return (&map->raidMap.ldSpanMap[ld].spanBlock[span].span); 114 } 115 116 /* 117 * This function will validate Map info data provided by FW 118 */ 119 U8 MR_ValidateMapInfo(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo) 120 { 121 MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap; 122 123 124 if (pFwRaidMap->totalSize != 125 (sizeof (MR_FW_RAID_MAP) - sizeof (MR_LD_SPAN_MAP) + 126 (sizeof (MR_LD_SPAN_MAP) * pFwRaidMap->ldCount))) { 127 128 con_log(CL_ANN1, (CE_NOTE,\ 129 "map info structure size 0x%x\ 130 is not matching with ld count\n",\ 131 ((sizeof (MR_FW_RAID_MAP) - sizeof (MR_LD_SPAN_MAP)) +\ 132 (sizeof (MR_LD_SPAN_MAP) * pFwRaidMap->ldCount)))); 133 134 con_log(CL_ANN1, (CE_NOTE, "span map 0x%x total size 0x%x\n",\ 135 sizeof (MR_LD_SPAN_MAP), pFwRaidMap->totalSize)); 136 137 return (0); 138 } 139 140 mr_update_load_balance_params(map, lbInfo); 141 142 return (1); 143 } 144 145 U32 146 MR_GetSpanBlock(U32 ld, U64 row, U64 *span_blk, MR_FW_RAID_MAP_ALL *map, int *div_error) 147 { 148 MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); 149 MR_QUAD_ELEMENT *quad; 150 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 151 U32 span, j; 152 153 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { 154 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { 155 quad = &pSpanBlock->block_span_info.quad[j]; 156 if(quad->diff == 0) { 157 *div_error=1; 158 return (span); 159 } 160 if (quad->logStart <= row && 161 row <= quad->logEnd && 162 (((row-quad->logStart) % quad->diff)) == 0) { 163 if (span_blk != NULL) { 164 U64 blk; 165 blk = ((row-quad->logStart) / 166 (quad->diff)); 167 168 blk = (blk + quad->offsetInSpan) << raid->stripeShift; 169 *span_blk = blk; 170 } 171 return (span); 172 } 173 } 174 } 175 return (span); 176 } 177 178 179 /* 180 * ************************************************************* 181 * 182 * This routine calculates the arm, span and block for 183 * the specified stripe and reference in stripe. 184 * 185 * Inputs : 186 * 187 * ld - Logical drive number 188 * stripRow - Stripe number 189 * stripRef - Reference in stripe 190 * 191 * Outputs : 192 * 193 * span - Span number 194 * block - Absolute Block number in the physical disk 195 */ 196 U8 197 MR_GetPhyParams(struct mrsas_instance *instance, U32 ld, U64 stripRow, U16 stripRef, 198 U64 *pdBlock, U16 *pDevHandle, MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context, 199 MR_FW_RAID_MAP_ALL *map) 200 { 201 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 202 U32 pd, arRef; 203 U8 physArm, span; 204 U64 row; 205 int error_code=0; 206 U8 retval = TRUE; 207 U32 rowMod; 208 U32 armQ; 209 U32 arm; 210 211 row = (stripRow / raid->rowDataSize); 212 213 if (raid->level == 6) { 214 U32 logArm = (stripRow % (raid->rowDataSize)); 215 216 if (raid->rowSize == 0) { 217 return (FALSE); 218 } 219 rowMod = (row % (raid->rowSize)); 220 armQ = raid->rowSize-1-rowMod; 221 arm = armQ+1+logArm; 222 if (arm >= raid->rowSize) 223 arm -= raid->rowSize; 224 physArm = (U8)arm; 225 } else { 226 if (raid->modFactor == 0) 227 return FALSE; 228 physArm = MR_LdDataArmGet(ld, 229 (stripRow % (raid->modFactor)), map); 230 } 231 if (raid->spanDepth == 1) { 232 span = 0; 233 *pdBlock = row << raid->stripeShift; 234 } else 235 span = (U8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code); 236 237 if (error_code == 1) 238 return FALSE; 239 240 // Get the array on which this span is present. 241 arRef = MR_LdSpanArrayGet(ld, span, map); 242 // Get the Pd. 243 pd = MR_ArPdGet(arRef, physArm, map); 244 // Get dev handle from Pd. 245 if (pd != MR_PD_INVALID) { 246 *pDevHandle = MR_PdDevHandleGet(pd, map); 247 } 248 else { 249 *pDevHandle = MR_PD_INVALID; // set dev handle as invalid. 250 if ( (raid->level >= 5) && 251 ((instance->device_id != PCI_DEVICE_ID_LSI_INVADER) || 252 (instance->device_id == PCI_DEVICE_ID_LSI_INVADER && 253 raid->regTypeReqOnRead != REGION_TYPE_UNUSED)) ) 254 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 255 else if (raid->level == 1) { 256 pd = MR_ArPdGet(arRef, physArm + 1, map); // Get Alternate Pd. 257 if (pd != MR_PD_INVALID) 258 *pDevHandle = MR_PdDevHandleGet(pd, map); // Get dev handle from Pd. 259 } 260 } 261 262 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 263 264 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 265 266 return retval; 267 } 268 269 270 271 /* 272 * *********************************************************************** 273 * 274 * MR_BuildRaidContext function 275 * 276 * This function will initiate command processing. The start/end row and strip 277 * information is calculated then the lock is acquired. 278 * This function will return 0 if region lock 279 * was acquired OR return num strips ??? 280 */ 281 282 U8 283 MR_BuildRaidContext(struct mrsas_instance *instance, struct IO_REQUEST_INFO *io_info, 284 MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context, MR_FW_RAID_MAP_ALL *map) 285 { 286 MR_LD_RAID *raid; 287 U32 ld, stripSize, stripe_mask; 288 U64 endLba, endStrip, endRow; 289 U64 start_row, start_strip; 290 REGION_KEY regStart; 291 REGION_LEN regSize; 292 U8 num_strips, numRows; 293 U16 ref_in_start_stripe; 294 U16 ref_in_end_stripe; 295 296 U64 ldStartBlock; 297 U32 numBlocks, ldTgtId; 298 U8 isRead; 299 U8 retval = 0; 300 301 ldStartBlock = io_info->ldStartBlock; 302 numBlocks = io_info->numBlocks; 303 ldTgtId = io_info->ldTgtId; 304 isRead = io_info->isRead; 305 306 if ( map == NULL ) { 307 io_info->fpOkForIo = FALSE; 308 return (FALSE); 309 } 310 311 ld = MR_TargetIdToLdGet(ldTgtId, map); 312 313 if (ld >= MAX_LOGICAL_DRIVES) { 314 io_info->fpOkForIo = FALSE; 315 return (FALSE); 316 } 317 318 raid = MR_LdRaidGet(ld, map); 319 320 stripSize = 1 << raid->stripeShift; 321 stripe_mask = stripSize-1; 322 /* 323 * calculate starting row and stripe, and number of strips and rows 324 */ 325 start_strip = ldStartBlock >> raid->stripeShift; 326 ref_in_start_stripe = (U16)(ldStartBlock & stripe_mask); 327 endLba = ldStartBlock + numBlocks - 1; 328 ref_in_end_stripe = (U16)(endLba & stripe_mask); 329 endStrip = endLba >> raid->stripeShift; 330 num_strips = (U8)(endStrip - start_strip + 1); 331 /* Check to make sure is not deviding by zero */ 332 if (raid->rowDataSize == 0) 333 return FALSE; 334 start_row = (start_strip / raid->rowDataSize); 335 endRow = (endStrip / raid->rowDataSize); 336 // get the row count 337 numRows = (U8)(endRow - start_row + 1); 338 339 /* 340 * calculate region info. 341 */ 342 regStart = start_row << raid->stripeShift; 343 regSize = stripSize; 344 345 /* Check if we can send this I/O via FastPath */ 346 if (raid->capability.fpCapable) { 347 if (isRead) 348 io_info->fpOkForIo = (raid->capability.fpReadCapable && 349 ((num_strips == 1) || 350 raid->capability.fpReadAcrossStripe)); 351 else 352 io_info->fpOkForIo = (raid->capability.fpWriteCapable && 353 ((num_strips == 1) || 354 raid->capability.fpWriteAcrossStripe)); 355 } else 356 io_info->fpOkForIo = FALSE; 357 358 359 /* 360 * Check for DIF support 361 */ 362 if ( !raid->capability.ldPiMode ) { 363 io_info->ldPI = FALSE; 364 } else { 365 io_info->ldPI = TRUE; 366 } 367 368 if (numRows == 1) { 369 if (num_strips == 1) { 370 regStart += ref_in_start_stripe; 371 regSize = numBlocks; 372 } 373 } else { 374 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { 375 regStart += ref_in_start_stripe; 376 regSize = stripSize - ref_in_start_stripe; 377 } 378 379 if (numRows > 2) { 380 regSize += (numRows-2) << raid->stripeShift; 381 } 382 383 if (endStrip == endRow*raid->rowDataSize) { 384 regSize += ref_in_end_stripe+1; 385 } else { 386 regSize += stripSize; 387 } 388 } 389 390 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; 391 392 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) 393 pRAID_Context->regLockFlags = 394 (isRead)? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; 395 else 396 pRAID_Context->regLockFlags = 397 (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; 398 399 pRAID_Context->ldTargetId = raid->targetId; 400 pRAID_Context->regLockRowLBA = regStart; 401 pRAID_Context->regLockLength = regSize; 402 pRAID_Context->configSeqNum = raid->seqNum; 403 404 /* 405 * Get Phy Params only if FP capable, 406 * or else leave it to MR firmware to do the calculation. 407 */ 408 if (io_info->fpOkForIo) { 409 /* if fast path possible then get the physical parameters */ 410 retval = MR_GetPhyParams(instance, ld, start_strip, ref_in_start_stripe, 411 &io_info->pdBlock, &io_info->devHandle, 412 pRAID_Context, map); 413 414 if (io_info->devHandle == MR_PD_INVALID) // If IO on an invalid Pd, then FP is not possible. 415 io_info->fpOkForIo = FALSE; 416 417 return retval; 418 419 } else if (isRead) { 420 uint stripIdx; 421 for (stripIdx=0; stripIdx<num_strips; stripIdx++) { 422 if (!MR_GetPhyParams(instance, ld, start_strip + stripIdx, ref_in_start_stripe, 423 &io_info->pdBlock, &io_info->devHandle, pRAID_Context, map)) 424 return TRUE; 425 } 426 } 427 return (TRUE); 428 } 429 430 431 void 432 mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo) 433 { 434 int ldCount; 435 U16 ld; 436 MR_LD_RAID *raid; 437 438 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) 439 { 440 ld = MR_TargetIdToLdGet(ldCount, map); 441 442 if (ld >= MAX_LOGICAL_DRIVES) { 443 con_log(CL_ANN1, (CE_NOTE, 444 "mrsas: ld=%d Invalid ld \n", ld)); 445 continue; 446 } 447 448 raid = MR_LdRaidGet(ld, map); 449 450 /* Two drive Optimal RAID 1 */ 451 if ((raid->level == 1) && (raid->rowSize == 2) && (raid->spanDepth == 1) 452 && raid->ldState == MR_LD_STATE_OPTIMAL) { 453 U32 pd, arRef; 454 455 lbInfo[ldCount].loadBalanceFlag = 1; 456 //U8 physArm = 0; 457 458 arRef = MR_LdSpanArrayGet(ld, 0, map); // Get the array on which this span is present. 459 460 pd = MR_ArPdGet(arRef, 0, map); // Get the Pd. 461 lbInfo[ldCount].raid1DevHandle[0] = MR_PdDevHandleGet(pd, map); // Get dev handle from Pd. 462 463 pd = MR_ArPdGet(arRef, 1, map); // Get the Pd. 464 lbInfo[ldCount].raid1DevHandle[1] = MR_PdDevHandleGet(pd, map); // Get dev handle from Pd. 465 con_log(CL_ANN1, (CE_NOTE,\ 466 "mrsas: ld=%d load balancing enabled \n", ldCount)); 467 } else { 468 lbInfo[ldCount].loadBalanceFlag = 0; 469 } 470 } 471 } 472 473 474 U8 megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, U8 arm, U64 block, U32 count) 475 { 476 U16 pend0, pend1; 477 U64 diff0, diff1; 478 U8 bestArm; 479 480 /* get the pending cmds for the data and mirror arms */ 481 pend0 = lbInfo->scsi_pending_cmds[0]; 482 pend1 = lbInfo->scsi_pending_cmds[1]; 483 484 /* Determine the disk whose head is nearer to the req. block */ 485 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]); 486 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]); 487 bestArm = (diff0 <= diff1 ? 0 : 1); 488 489 if ((bestArm == arm && pend0 > pend1 + 16) || (bestArm != arm && pend1 > pend0 + 16)) 490 bestArm ^= 1; 491 492 /* Update the last accessed block on the correct pd */ 493 lbInfo->last_accessed_block[bestArm] = block + count - 1; 494 return bestArm; 495 } 496 497 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info) 498 { 499 U8 arm, old_arm; 500 U16 devHandle; 501 502 old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1; 503 504 /* get best new arm */ 505 arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock, io_info->numBlocks); 506 507 devHandle = lbInfo->raid1DevHandle[arm]; 508 509 lbInfo->scsi_pending_cmds[arm]++; 510 511 return devHandle; 512 }  | 6 * Solaris MegaRAID device driver for SAS2.0 controllers 7 * Copyright (c) 2008-2012, LSI Logic Corporation. 8 * All rights reserved. 9 * 10 * Version: 11 * Author: 12 * Swaminathan K S 13 * Arun Chandrashekhar 14 * Manju R 15 * Rasheed 16 * Shakeel Bukhari 17 * 18 * 19 * This module contains functions for device drivers 20 * to get pd-ld mapping information. 21 * 22 * ********************************************************************** 23 */ 24 25 #include <sys/scsi/scsi.h> 26 #include "mr_sas.h" 27 #include "ld_pd_map.h" 28 29 /* 30 * This function will check if FAST IO is possible on this logical drive 31 * by checking the EVENT information available in the driver 32 */ 33 #define MR_LD_STATE_OPTIMAL 3 34 #define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) 35 36 static void mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *, 37 PLD_LOAD_BALANCE_INFO); 38 39 #define FALSE 0 40 #define TRUE 1 41 42 typedef U64 REGION_KEY; 43 typedef U32 REGION_LEN; 44 extern int debug_level_g; 45 46 47 MR_LD_RAID 48 *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map) 49 { 50 return (&map->raidMap.ldSpanMap[ld].ldRaid); 51 } 52 53 U16 54 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map) 55 { 56 return (map->raidMap.ldSpanMap[ld].ldRaid.targetId); 57 } 58 59 60 static MR_SPAN_BLOCK_INFO * 61 MR_LdSpanInfoGet(U32 ld, MR_FW_RAID_MAP_ALL *map) 62 { 63 return (&map->raidMap.ldSpanMap[ld].spanBlock[0]); 64 } 65 66 static U8 67 MR_LdDataArmGet(U32 ld, U32 armIdx, MR_FW_RAID_MAP_ALL *map) 68 { 69 return (map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]); 70 } 71 72 static U16 73 MR_ArPdGet(U32 ar, U32 arm, MR_FW_RAID_MAP_ALL *map) 74 { 75 return (map->raidMap.arMapInfo[ar].pd[arm]); 76 } 77 78 static U16 79 MR_LdSpanArrayGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map) 80 { 81 return (map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); 82 } 83 84 static U16 85 MR_PdDevHandleGet(U32 pd, MR_FW_RAID_MAP_ALL *map) 86 { 87 return (map->raidMap.devHndlInfo[pd].curDevHdl); 88 } 89 90 U16 91 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map) 92 { 93 return (map->raidMap.ldTgtIdToLd[ldTgtId]); 94 } 95 96 U16 97 MR_CheckDIF(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map) 98 { 99 MR_LD_RAID *raid; 100 U32 ld; 101 102 ld = MR_TargetIdToLdGet(ldTgtId, map); 103 104 if (ld >= MAX_LOGICAL_DRIVES) { 105 return (FALSE); 106 } 107 108 raid = MR_LdRaidGet(ld, map); 109 110 return (raid->capability.ldPiMode == 0x8); 111 } 112 113 static MR_LD_SPAN * 114 MR_LdSpanPtrGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map) 115 { 116 return (&map->raidMap.ldSpanMap[ld].spanBlock[span].span); 117 } 118 119 /* 120 * This function will validate Map info data provided by FW 121 */ 122 U8 123 MR_ValidateMapInfo(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo) 124 { 125 MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap; 126 U32 fwsize = sizeof (MR_FW_RAID_MAP) - sizeof (MR_LD_SPAN_MAP) + 127 (sizeof (MR_LD_SPAN_MAP) * pFwRaidMap->ldCount); 128 129 if (pFwRaidMap->totalSize != fwsize) { 130 131 con_log(CL_ANN1, (CE_NOTE, 132 "map info structure size 0x%x is " 133 "not matching with ld count\n", fwsize)); 134 /* sizeof (foo) returns size_t, which is *LONG*. */ 135 con_log(CL_ANN1, (CE_NOTE, "span map 0x%x total size 0x%x\n",\ 136 (int)sizeof (MR_LD_SPAN_MAP), pFwRaidMap->totalSize)); 137 138 return (0); 139 } 140 141 mr_update_load_balance_params(map, lbInfo); 142 143 return (1); 144 } 145 146 U32 147 MR_GetSpanBlock(U32 ld, U64 row, U64 *span_blk, MR_FW_RAID_MAP_ALL *map, 148 int *div_error) 149 { 150 MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); 151 MR_QUAD_ELEMENT *qe; 152 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 153 U32 span, j; 154 155 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { 156 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { 157 qe = &pSpanBlock->block_span_info.quads[j]; 158 if (qe->diff == 0) { 159 *div_error = 1; 160 return (span); 161 } 162 if (qe->logStart <= row && row <= qe->logEnd && 163 (((row - qe->logStart) % qe->diff)) == 0) { 164 if (span_blk != NULL) { 165 U64 blk; 166 blk = ((row - qe->logStart) / 167 (qe->diff)); 168 169 blk = (blk + qe->offsetInSpan) << 170 raid->stripeShift; 171 *span_blk = blk; 172 } 173 return (span); 174 } 175 } 176 } 177 return (span); 178 } 179 180 181 /* 182 * ************************************************************* 183 * 184 * This routine calculates the arm, span and block for 185 * the specified stripe and reference in stripe. 186 * 187 * Inputs : 188 * 189 * ld - Logical drive number 190 * stripRow - Stripe number 191 * stripRef - Reference in stripe 192 * 193 * Outputs : 194 * 195 * span - Span number 196 * block - Absolute Block number in the physical disk 197 */ 198 U8 199 MR_GetPhyParams(struct mrsas_instance *instance, U32 ld, U64 stripRow, 200 U16 stripRef, U64 *pdBlock, U16 *pDevHandle, 201 MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context, MR_FW_RAID_MAP_ALL *map) 202 { 203 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 204 U32 pd, arRef; 205 U8 physArm, span; 206 U64 row; 207 int error_code = 0; 208 U8 retval = TRUE; 209 U32 rowMod; 210 U32 armQ; 211 U32 arm; 212 213 ASSERT(raid->rowDataSize != 0); 214 215 row = (stripRow / raid->rowDataSize); 216 217 if (raid->level == 6) { 218 U32 logArm = (stripRow % (raid->rowDataSize)); 219 220 if (raid->rowSize == 0) { 221 return (FALSE); 222 } 223 rowMod = (row % (raid->rowSize)); 224 armQ = raid->rowSize-1-rowMod; 225 arm = armQ + 1 + logArm; 226 if (arm >= raid->rowSize) 227 arm -= raid->rowSize; 228 physArm = (U8)arm; 229 } else { 230 if (raid->modFactor == 0) 231 return (FALSE); 232 physArm = MR_LdDataArmGet(ld, 233 (stripRow % (raid->modFactor)), map); 234 } 235 if (raid->spanDepth == 1) { 236 span = 0; 237 *pdBlock = row << raid->stripeShift; 238 } else 239 span = (U8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code); 240 241 if (error_code == 1) 242 return (FALSE); 243 244 /* Get the array on which this span is present. */ 245 arRef = MR_LdSpanArrayGet(ld, span, map); 246 /* Get the Pd. */ 247 pd = MR_ArPdGet(arRef, physArm, map); 248 /* Get dev handle from Pd. */ 249 if (pd != MR_PD_INVALID) { 250 *pDevHandle = MR_PdDevHandleGet(pd, map); 251 } else { 252 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ 253 if ((raid->level >= 5) && 254 ((instance->device_id != PCI_DEVICE_ID_LSI_INVADER) || 255 (instance->device_id == PCI_DEVICE_ID_LSI_INVADER && 256 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) { 257 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 258 } else if (raid->level == 1) { 259 /* Get Alternate Pd. */ 260 pd = MR_ArPdGet(arRef, physArm + 1, map); 261 /* Get dev handle from Pd. */ 262 if (pd != MR_PD_INVALID) 263 *pDevHandle = MR_PdDevHandleGet(pd, map); 264 } 265 } 266 267 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 268 269 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | 270 physArm; 271 272 return (retval); 273 } 274 275 276 277 /* 278 * *********************************************************************** 279 * 280 * MR_BuildRaidContext function 281 * 282 * This function will initiate command processing. The start/end row and strip 283 * information is calculated then the lock is acquired. 284 * This function will return 0 if region lock 285 * was acquired OR return num strips ??? 286 */ 287 288 U8 289 MR_BuildRaidContext(struct mrsas_instance *instance, 290 struct IO_REQUEST_INFO *io_info, MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context, 291 MR_FW_RAID_MAP_ALL *map) 292 { 293 MR_LD_RAID *raid; 294 U32 ld, stripSize, stripe_mask; 295 U64 endLba, endStrip, endRow; 296 U64 start_row, start_strip; 297 REGION_KEY regStart; 298 REGION_LEN regSize; 299 U8 num_strips, numRows; 300 U16 ref_in_start_stripe; 301 U16 ref_in_end_stripe; 302 303 U64 ldStartBlock; 304 U32 numBlocks, ldTgtId; 305 U8 isRead; 306 U8 retval = 0; 307 308 ldStartBlock = io_info->ldStartBlock; 309 numBlocks = io_info->numBlocks; 310 ldTgtId = io_info->ldTgtId; 311 isRead = io_info->isRead; 312 313 if (map == NULL) { 314 io_info->fpOkForIo = FALSE; 315 return (FALSE); 316 } 317 318 ld = MR_TargetIdToLdGet(ldTgtId, map); 319 320 if (ld >= MAX_LOGICAL_DRIVES) { 321 io_info->fpOkForIo = FALSE; 322 return (FALSE); 323 } 324 325 raid = MR_LdRaidGet(ld, map); 326 327 stripSize = 1 << raid->stripeShift; 328 stripe_mask = stripSize-1; 329 /* 330 * calculate starting row and stripe, and number of strips and rows 331 */ 332 start_strip = ldStartBlock >> raid->stripeShift; 333 ref_in_start_stripe = (U16)(ldStartBlock & stripe_mask); 334 endLba = ldStartBlock + numBlocks - 1; 335 ref_in_end_stripe = (U16)(endLba & stripe_mask); 336 endStrip = endLba >> raid->stripeShift; 337 num_strips = (U8)(endStrip - start_strip + 1); 338 /* Check to make sure is not dividing by zero */ 339 if (raid->rowDataSize == 0) 340 return (FALSE); 341 start_row = (start_strip / raid->rowDataSize); 342 endRow = (endStrip / raid->rowDataSize); 343 /* get the row count */ 344 numRows = (U8)(endRow - start_row + 1); 345 346 /* 347 * calculate region info. 348 */ 349 regStart = start_row << raid->stripeShift; 350 regSize = stripSize; 351 352 /* Check if we can send this I/O via FastPath */ 353 if (raid->capability.fpCapable) { 354 if (isRead) { 355 io_info->fpOkForIo = (raid->capability.fpReadCapable && 356 ((num_strips == 1) || 357 raid->capability.fpReadAcrossStripe)); 358 } else { 359 io_info->fpOkForIo = 360 (raid->capability.fpWriteCapable && 361 ((num_strips == 1) || 362 raid->capability.fpWriteAcrossStripe)); 363 } 364 } else 365 io_info->fpOkForIo = FALSE; 366 367 368 /* 369 * Check for DIF support 370 */ 371 if (!raid->capability.ldPiMode) { 372 io_info->ldPI = FALSE; 373 } else { 374 io_info->ldPI = TRUE; 375 } 376 377 if (numRows == 1) { 378 if (num_strips == 1) { 379 regStart += ref_in_start_stripe; 380 regSize = numBlocks; 381 } 382 } else { 383 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { 384 regStart += ref_in_start_stripe; 385 regSize = stripSize - ref_in_start_stripe; 386 } 387 388 if (numRows > 2) { 389 regSize += (numRows - 2) << raid->stripeShift; 390 } 391 392 if (endStrip == endRow * raid->rowDataSize) { 393 regSize += ref_in_end_stripe + 1; 394 } else { 395 regSize += stripSize; 396 } 397 } 398 399 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; 400 401 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) { 402 pRAID_Context->regLockFlags = (isRead) ? 403 raid->regTypeReqOnRead : raid->regTypeReqOnWrite; 404 } else { 405 pRAID_Context->regLockFlags = (isRead) ? 406 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; 407 } 408 409 pRAID_Context->ldTargetId = raid->targetId; 410 pRAID_Context->regLockRowLBA = regStart; 411 pRAID_Context->regLockLength = regSize; 412 pRAID_Context->configSeqNum = raid->seqNum; 413 414 /* 415 * Get Phy Params only if FP capable, 416 * or else leave it to MR firmware to do the calculation. 417 */ 418 if (io_info->fpOkForIo) { 419 /* if fast path possible then get the physical parameters */ 420 retval = MR_GetPhyParams(instance, ld, start_strip, 421 ref_in_start_stripe, &io_info->pdBlock, 422 &io_info->devHandle, pRAID_Context, map); 423 424 /* If IO on an invalid Pd, then FP is not possible. */ 425 if (io_info->devHandle == MR_PD_INVALID) 426 io_info->fpOkForIo = FALSE; 427 428 return (retval); 429 430 } else if (isRead) { 431 uint_t stripIdx; 432 433 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { 434 if (!MR_GetPhyParams(instance, ld, 435 start_strip + stripIdx, ref_in_start_stripe, 436 &io_info->pdBlock, &io_info->devHandle, 437 pRAID_Context, map)) { 438 return (TRUE); 439 } 440 } 441 } 442 return (TRUE); 443 } 444 445 446 void 447 mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *map, 448 PLD_LOAD_BALANCE_INFO lbInfo) 449 { 450 int ldCount; 451 U16 ld; 452 MR_LD_RAID *raid; 453 454 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 455 ld = MR_TargetIdToLdGet(ldCount, map); 456 457 if (ld >= MAX_LOGICAL_DRIVES) { 458 con_log(CL_ANN1, 459 (CE_NOTE, "mrsas: ld=%d Invalid ld \n", ld)); 460 continue; 461 } 462 463 raid = MR_LdRaidGet(ld, map); 464 465 /* Two drive Optimal RAID 1 */ 466 if ((raid->level == 1) && (raid->rowSize == 2) && 467 (raid->spanDepth == 1) && 468 raid->ldState == MR_LD_STATE_OPTIMAL) { 469 U32 pd, arRef; 470 471 lbInfo[ldCount].loadBalanceFlag = 1; 472 473 /* Get the array on which this span is present. */ 474 arRef = MR_LdSpanArrayGet(ld, 0, map); 475 476 pd = MR_ArPdGet(arRef, 0, map); /* Get the Pd. */ 477 /* Get dev handle from Pd. */ 478 lbInfo[ldCount].raid1DevHandle[0] = 479 MR_PdDevHandleGet(pd, map); 480 481 pd = MR_ArPdGet(arRef, 1, map); /* Get the Pd. */ 482 /* Get dev handle from Pd. */ 483 lbInfo[ldCount].raid1DevHandle[1] = 484 MR_PdDevHandleGet(pd, map); 485 con_log(CL_ANN1, (CE_NOTE, 486 "mrsas: ld=%d load balancing enabled \n", ldCount)); 487 } else { 488 lbInfo[ldCount].loadBalanceFlag = 0; 489 } 490 } 491 } 492 493 494 U8 495 megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, U8 arm, U64 block, 496 U32 count) 497 { 498 U16 pend0, pend1; 499 U64 diff0, diff1; 500 U8 bestArm; 501 502 /* get the pending cmds for the data and mirror arms */ 503 pend0 = lbInfo->scsi_pending_cmds[0]; 504 pend1 = lbInfo->scsi_pending_cmds[1]; 505 506 /* Determine the disk whose head is nearer to the req. block */ 507 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]); 508 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]); 509 bestArm = (diff0 <= diff1 ? 0 : 1); 510 511 if ((bestArm == arm && pend0 > pend1 + 16) || 512 (bestArm != arm && pend1 > pend0 + 16)) { 513 bestArm ^= 1; 514 } 515 516 /* Update the last accessed block on the correct pd */ 517 lbInfo->last_accessed_block[bestArm] = block + count - 1; 518 return (bestArm); 519 } 520 521 U16 522 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, 523 struct IO_REQUEST_INFO *io_info) 524 { 525 U8 arm, old_arm; 526 U16 devHandle; 527 528 old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1; 529 530 /* get best new arm */ 531 arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock, 532 io_info->numBlocks); 533 534 devHandle = lbInfo->raid1DevHandle[arm]; 535 536 lbInfo->scsi_pending_cmds[arm]++; 537 538 return (devHandle); 539 }  |