Print this page
| Split |
Close |
| Expand all |
| Collapse all |
--- old/./ld_pd_map.c
+++ new/./ld_pd_map.c
1 1 /*
2 2 * **********************************************************************
3 3 *
4 4 * ld_pd_map.c
5 5 *
6 6 * Solaris MegaRAID device driver for SAS2.0 controllers
7 7 * Copyright (c) 2008-2012, LSI Logic Corporation.
8 8 * All rights reserved.
9 9 *
10 10 * Version:
11 11 * Author:
12 12 * Swaminathan K S
13 13 * Arun Chandrashekhar
14 14 * Manju R
15 15 * Rasheed
16 16 * Shakeel Bukhari
17 17 *
18 18 *
19 19 * This module contains functions for device drivers
20 20 * to get pd-ld mapping information.
21 21 *
22 22 * **********************************************************************
23 23 */
24 24
25 25 #include <sys/scsi/scsi.h>
26 26 #include "ld_pd_map.h"
27 27 #include "mr_sas.h"
28 28
29 29 /*
30 30 * This function will check if FAST IO is possible on this logical drive
31 31 * by checking the EVENT information availabe in the driver
32 32 */
33 33 #define MR_LD_STATE_OPTIMAL 3
34 34 #define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) )
35 35
36 36 void
37 37 mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo);
38 38
39 39 U8
40 40 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
41 41 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
42 42
43 43 #define FALSE 0
44 44 #define TRUE 1
45 45
46 46 typedef U64 REGION_KEY;
47 47 typedef U32 REGION_LEN;
48 48 extern int debug_level_g;
49 49
50 50
51 51 MR_LD_RAID
52 52 *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map)
53 53 {
54 54 return (&map->raidMap.ldSpanMap[ld].ldRaid);
55 55 }
56 56
57 57 U16 MR_GetLDTgtId
58 58 (U32 ld, MR_FW_RAID_MAP_ALL *map)
59 59 {
60 60 return (map->raidMap.ldSpanMap[ld].ldRaid.targetId);
61 61 }
62 62
63 63
64 64 static MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(U32 ld, MR_FW_RAID_MAP_ALL *map)
65 65 {
66 66 return (&map->raidMap.ldSpanMap[ld].spanBlock[0]);
67 67 }
68 68
69 69 static U8 MR_LdDataArmGet(U32 ld, U32 armIdx, MR_FW_RAID_MAP_ALL *map)
70 70 {
71 71 return (map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]);
72 72 }
73 73
74 74 static U16 MR_ArPdGet(U32 ar, U32 arm, MR_FW_RAID_MAP_ALL *map)
75 75 {
76 76 return (map->raidMap.arMapInfo[ar].pd[arm]);
77 77 }
78 78
79 79 static U16 MR_LdSpanArrayGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map)
80 80 {
81 81 return (map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
82 82 }
83 83
84 84 static U16 MR_PdDevHandleGet(U32 pd, MR_FW_RAID_MAP_ALL *map)
85 85 {
86 86 return (map->raidMap.devHndlInfo[pd].curDevHdl);
87 87 }
88 88
89 89 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map)
90 90 {
91 91 return (map->raidMap.ldTgtIdToLd[ldTgtId]);
92 92 }
93 93
94 94 U16 MR_CheckDIF(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map)
95 95 {
96 96 MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
97 97 MR_LD_RAID *raid;
98 98 U32 ld;
99 99
100 100 ld = MR_TargetIdToLdGet(ldTgtId, map);
101 101
102 102 if (ld >= MAX_LOGICAL_DRIVES) {
103 103 return (FALSE);
104 104 }
105 105
106 106 raid = MR_LdRaidGet(ld, map);
107 107
108 108 return (raid->capability.ldPiMode == 0x8);
109 109 }
110 110
111 111 static MR_LD_SPAN *MR_LdSpanPtrGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map)
112 112 {
113 113 return (&map->raidMap.ldSpanMap[ld].spanBlock[span].span);
114 114 }
115 115
116 116 /*
117 117 * This function will validate Map info data provided by FW
118 118 */
119 119 U8 MR_ValidateMapInfo(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo)
120 120 {
|
↓ open down ↓ |
120 lines elided |
↑ open up ↑ |
121 121 MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
122 122
123 123
124 124 if (pFwRaidMap->totalSize !=
125 125 (sizeof (MR_FW_RAID_MAP) - sizeof (MR_LD_SPAN_MAP) +
126 126 (sizeof (MR_LD_SPAN_MAP) * pFwRaidMap->ldCount))) {
127 127
128 128 con_log(CL_ANN1, (CE_NOTE,\
129 129 "map info structure size 0x%x\
130 130 is not matching with ld count\n",\
131 - ((sizeof (MR_FW_RAID_MAP) - sizeof (MR_LD_SPAN_MAP)) +\
131 + (int)((sizeof (MR_FW_RAID_MAP) - sizeof (MR_LD_SPAN_MAP)) +\
132 132 (sizeof (MR_LD_SPAN_MAP) * pFwRaidMap->ldCount))));
133 133
134 134 con_log(CL_ANN1, (CE_NOTE, "span map 0x%x total size 0x%x\n",\
135 - sizeof (MR_LD_SPAN_MAP), pFwRaidMap->totalSize));
135 + (int)sizeof (MR_LD_SPAN_MAP), pFwRaidMap->totalSize));
136 136
137 137 return (0);
138 138 }
139 139
140 140 mr_update_load_balance_params(map, lbInfo);
141 141
142 142 return (1);
143 143 }
144 144
145 145 U32
146 146 MR_GetSpanBlock(U32 ld, U64 row, U64 *span_blk, MR_FW_RAID_MAP_ALL *map, int *div_error)
147 147 {
148 148 MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
149 149 MR_QUAD_ELEMENT *quad;
150 150 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
151 151 U32 span, j;
152 152
153 153 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
154 154 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
155 155 quad = &pSpanBlock->block_span_info.quad[j];
156 156 if(quad->diff == 0) {
157 157 *div_error=1;
158 158 return (span);
159 159 }
160 160 if (quad->logStart <= row &&
161 161 row <= quad->logEnd &&
162 162 (((row-quad->logStart) % quad->diff)) == 0) {
163 163 if (span_blk != NULL) {
164 164 U64 blk;
165 165 blk = ((row-quad->logStart) /
166 166 (quad->diff));
167 167
168 168 blk = (blk + quad->offsetInSpan) << raid->stripeShift;
169 169 *span_blk = blk;
170 170 }
171 171 return (span);
172 172 }
173 173 }
174 174 }
175 175 return (span);
176 176 }
177 177
178 178
179 179 /*
180 180 * *************************************************************
181 181 *
182 182 * This routine calculates the arm, span and block for
183 183 * the specified stripe and reference in stripe.
184 184 *
185 185 * Inputs :
186 186 *
187 187 * ld - Logical drive number
188 188 * stripRow - Stripe number
189 189 * stripRef - Reference in stripe
190 190 *
191 191 * Outputs :
192 192 *
193 193 * span - Span number
194 194 * block - Absolute Block number in the physical disk
195 195 */
196 196 U8
197 197 MR_GetPhyParams(struct mrsas_instance *instance, U32 ld, U64 stripRow, U16 stripRef,
198 198 U64 *pdBlock, U16 *pDevHandle, MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context,
199 199 MR_FW_RAID_MAP_ALL *map)
200 200 {
201 201 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
202 202 U32 pd, arRef;
203 203 U8 physArm, span;
204 204 U64 row;
205 205 int error_code=0;
206 206 U8 retval = TRUE;
207 207 U32 rowMod;
208 208 U32 armQ;
209 209 U32 arm;
210 210
211 211 row = (stripRow / raid->rowDataSize);
212 212
213 213 if (raid->level == 6) {
214 214 U32 logArm = (stripRow % (raid->rowDataSize));
215 215
216 216 if (raid->rowSize == 0) {
217 217 return (FALSE);
218 218 }
219 219 rowMod = (row % (raid->rowSize));
220 220 armQ = raid->rowSize-1-rowMod;
221 221 arm = armQ+1+logArm;
222 222 if (arm >= raid->rowSize)
223 223 arm -= raid->rowSize;
224 224 physArm = (U8)arm;
225 225 } else {
226 226 if (raid->modFactor == 0)
227 227 return FALSE;
228 228 physArm = MR_LdDataArmGet(ld,
229 229 (stripRow % (raid->modFactor)), map);
230 230 }
231 231 if (raid->spanDepth == 1) {
232 232 span = 0;
233 233 *pdBlock = row << raid->stripeShift;
234 234 } else
235 235 span = (U8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code);
236 236
237 237 if (error_code == 1)
238 238 return FALSE;
239 239
240 240 // Get the array on which this span is present.
241 241 arRef = MR_LdSpanArrayGet(ld, span, map);
242 242 // Get the Pd.
243 243 pd = MR_ArPdGet(arRef, physArm, map);
244 244 // Get dev handle from Pd.
245 245 if (pd != MR_PD_INVALID) {
246 246 *pDevHandle = MR_PdDevHandleGet(pd, map);
247 247 }
248 248 else {
249 249 *pDevHandle = MR_PD_INVALID; // set dev handle as invalid.
250 250 if ( (raid->level >= 5) &&
251 251 ((instance->device_id != PCI_DEVICE_ID_LSI_INVADER) ||
252 252 (instance->device_id == PCI_DEVICE_ID_LSI_INVADER &&
253 253 raid->regTypeReqOnRead != REGION_TYPE_UNUSED)) )
254 254 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
255 255 else if (raid->level == 1) {
256 256 pd = MR_ArPdGet(arRef, physArm + 1, map); // Get Alternate Pd.
257 257 if (pd != MR_PD_INVALID)
258 258 *pDevHandle = MR_PdDevHandleGet(pd, map); // Get dev handle from Pd.
259 259 }
260 260 }
261 261
262 262 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
263 263
264 264 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
265 265
266 266 return retval;
267 267 }
268 268
269 269
270 270
271 271 /*
272 272 * ***********************************************************************
273 273 *
274 274 * MR_BuildRaidContext function
275 275 *
276 276 * This function will initiate command processing. The start/end row and strip
277 277 * information is calculated then the lock is acquired.
278 278 * This function will return 0 if region lock
279 279 * was acquired OR return num strips ???
280 280 */
281 281
282 282 U8
283 283 MR_BuildRaidContext(struct mrsas_instance *instance, struct IO_REQUEST_INFO *io_info,
284 284 MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
285 285 {
286 286 MR_LD_RAID *raid;
287 287 U32 ld, stripSize, stripe_mask;
288 288 U64 endLba, endStrip, endRow;
289 289 U64 start_row, start_strip;
290 290 REGION_KEY regStart;
291 291 REGION_LEN regSize;
292 292 U8 num_strips, numRows;
293 293 U16 ref_in_start_stripe;
294 294 U16 ref_in_end_stripe;
295 295
296 296 U64 ldStartBlock;
297 297 U32 numBlocks, ldTgtId;
298 298 U8 isRead;
299 299 U8 retval = 0;
300 300
301 301 ldStartBlock = io_info->ldStartBlock;
302 302 numBlocks = io_info->numBlocks;
303 303 ldTgtId = io_info->ldTgtId;
304 304 isRead = io_info->isRead;
305 305
306 306 if ( map == NULL ) {
307 307 io_info->fpOkForIo = FALSE;
308 308 return (FALSE);
309 309 }
310 310
311 311 ld = MR_TargetIdToLdGet(ldTgtId, map);
312 312
313 313 if (ld >= MAX_LOGICAL_DRIVES) {
314 314 io_info->fpOkForIo = FALSE;
315 315 return (FALSE);
316 316 }
317 317
318 318 raid = MR_LdRaidGet(ld, map);
319 319
320 320 stripSize = 1 << raid->stripeShift;
321 321 stripe_mask = stripSize-1;
322 322 /*
323 323 * calculate starting row and stripe, and number of strips and rows
324 324 */
325 325 start_strip = ldStartBlock >> raid->stripeShift;
326 326 ref_in_start_stripe = (U16)(ldStartBlock & stripe_mask);
327 327 endLba = ldStartBlock + numBlocks - 1;
328 328 ref_in_end_stripe = (U16)(endLba & stripe_mask);
329 329 endStrip = endLba >> raid->stripeShift;
330 330 num_strips = (U8)(endStrip - start_strip + 1);
331 331 /* Check to make sure is not deviding by zero */
332 332 if (raid->rowDataSize == 0)
333 333 return FALSE;
334 334 start_row = (start_strip / raid->rowDataSize);
335 335 endRow = (endStrip / raid->rowDataSize);
336 336 // get the row count
337 337 numRows = (U8)(endRow - start_row + 1);
338 338
339 339 /*
340 340 * calculate region info.
341 341 */
342 342 regStart = start_row << raid->stripeShift;
343 343 regSize = stripSize;
344 344
345 345 /* Check if we can send this I/O via FastPath */
346 346 if (raid->capability.fpCapable) {
347 347 if (isRead)
348 348 io_info->fpOkForIo = (raid->capability.fpReadCapable &&
349 349 ((num_strips == 1) ||
350 350 raid->capability.fpReadAcrossStripe));
351 351 else
352 352 io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
353 353 ((num_strips == 1) ||
354 354 raid->capability.fpWriteAcrossStripe));
355 355 } else
356 356 io_info->fpOkForIo = FALSE;
357 357
358 358
359 359 /*
360 360 * Check for DIF support
361 361 */
362 362 if ( !raid->capability.ldPiMode ) {
363 363 io_info->ldPI = FALSE;
364 364 } else {
365 365 io_info->ldPI = TRUE;
366 366 }
367 367
368 368 if (numRows == 1) {
369 369 if (num_strips == 1) {
370 370 regStart += ref_in_start_stripe;
371 371 regSize = numBlocks;
372 372 }
373 373 } else {
374 374 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
375 375 regStart += ref_in_start_stripe;
376 376 regSize = stripSize - ref_in_start_stripe;
377 377 }
378 378
379 379 if (numRows > 2) {
380 380 regSize += (numRows-2) << raid->stripeShift;
381 381 }
382 382
383 383 if (endStrip == endRow*raid->rowDataSize) {
384 384 regSize += ref_in_end_stripe+1;
385 385 } else {
386 386 regSize += stripSize;
387 387 }
388 388 }
389 389
390 390 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
391 391
392 392 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER)
393 393 pRAID_Context->regLockFlags =
394 394 (isRead)? raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
395 395 else
396 396 pRAID_Context->regLockFlags =
397 397 (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
398 398
399 399 pRAID_Context->ldTargetId = raid->targetId;
400 400 pRAID_Context->regLockRowLBA = regStart;
401 401 pRAID_Context->regLockLength = regSize;
402 402 pRAID_Context->configSeqNum = raid->seqNum;
403 403
404 404 /*
405 405 * Get Phy Params only if FP capable,
406 406 * or else leave it to MR firmware to do the calculation.
407 407 */
408 408 if (io_info->fpOkForIo) {
409 409 /* if fast path possible then get the physical parameters */
410 410 retval = MR_GetPhyParams(instance, ld, start_strip, ref_in_start_stripe,
411 411 &io_info->pdBlock, &io_info->devHandle,
412 412 pRAID_Context, map);
413 413
414 414 if (io_info->devHandle == MR_PD_INVALID) // If IO on an invalid Pd, then FP is not possible.
415 415 io_info->fpOkForIo = FALSE;
416 416
417 417 return retval;
418 418
419 419 } else if (isRead) {
420 420 uint stripIdx;
421 421 for (stripIdx=0; stripIdx<num_strips; stripIdx++) {
422 422 if (!MR_GetPhyParams(instance, ld, start_strip + stripIdx, ref_in_start_stripe,
423 423 &io_info->pdBlock, &io_info->devHandle, pRAID_Context, map))
424 424 return TRUE;
425 425 }
426 426 }
427 427 return (TRUE);
428 428 }
429 429
430 430
431 431 void
432 432 mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo)
433 433 {
434 434 int ldCount;
435 435 U16 ld;
436 436 MR_LD_RAID *raid;
437 437
438 438 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++)
439 439 {
440 440 ld = MR_TargetIdToLdGet(ldCount, map);
441 441
442 442 if (ld >= MAX_LOGICAL_DRIVES) {
443 443 con_log(CL_ANN1, (CE_NOTE,
444 444 "mrsas: ld=%d Invalid ld \n", ld));
445 445 continue;
446 446 }
447 447
448 448 raid = MR_LdRaidGet(ld, map);
449 449
450 450 /* Two drive Optimal RAID 1 */
451 451 if ((raid->level == 1) && (raid->rowSize == 2) && (raid->spanDepth == 1)
452 452 && raid->ldState == MR_LD_STATE_OPTIMAL) {
453 453 U32 pd, arRef;
454 454
455 455 lbInfo[ldCount].loadBalanceFlag = 1;
456 456 //U8 physArm = 0;
457 457
458 458 arRef = MR_LdSpanArrayGet(ld, 0, map); // Get the array on which this span is present.
459 459
460 460 pd = MR_ArPdGet(arRef, 0, map); // Get the Pd.
461 461 lbInfo[ldCount].raid1DevHandle[0] = MR_PdDevHandleGet(pd, map); // Get dev handle from Pd.
462 462
463 463 pd = MR_ArPdGet(arRef, 1, map); // Get the Pd.
464 464 lbInfo[ldCount].raid1DevHandle[1] = MR_PdDevHandleGet(pd, map); // Get dev handle from Pd.
465 465 con_log(CL_ANN1, (CE_NOTE,\
466 466 "mrsas: ld=%d load balancing enabled \n", ldCount));
467 467 } else {
468 468 lbInfo[ldCount].loadBalanceFlag = 0;
469 469 }
470 470 }
471 471 }
472 472
473 473
474 474 U8 megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, U8 arm, U64 block, U32 count)
475 475 {
476 476 U16 pend0, pend1;
477 477 U64 diff0, diff1;
478 478 U8 bestArm;
479 479
480 480 /* get the pending cmds for the data and mirror arms */
481 481 pend0 = lbInfo->scsi_pending_cmds[0];
482 482 pend1 = lbInfo->scsi_pending_cmds[1];
483 483
484 484 /* Determine the disk whose head is nearer to the req. block */
485 485 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
486 486 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
487 487 bestArm = (diff0 <= diff1 ? 0 : 1);
488 488
489 489 if ((bestArm == arm && pend0 > pend1 + 16) || (bestArm != arm && pend1 > pend0 + 16))
490 490 bestArm ^= 1;
491 491
492 492 /* Update the last accessed block on the correct pd */
493 493 lbInfo->last_accessed_block[bestArm] = block + count - 1;
494 494 return bestArm;
495 495 }
496 496
497 497 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info)
498 498 {
499 499 U8 arm, old_arm;
500 500 U16 devHandle;
501 501
502 502 old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
503 503
504 504 /* get best new arm */
505 505 arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock, io_info->numBlocks);
506 506
507 507 devHandle = lbInfo->raid1DevHandle[arm];
508 508
509 509 lbInfo->scsi_pending_cmds[arm]++;
510 510
511 511 return devHandle;
512 512 }
|
↓ open down ↓ |
367 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX