Print this page
Code review comments
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/mr_sas/ld_pd_map.c
+++ new/usr/src/uts/common/io/mr_sas/ld_pd_map.c
1 1 /*
2 2 * **********************************************************************
3 3 *
4 4 * ld_pd_map.c
5 5 *
6 6 * Solaris MegaRAID device driver for SAS2.0 controllers
7 7 * Copyright (c) 2008-2012, LSI Logic Corporation.
8 8 * All rights reserved.
9 9 *
10 10 * Version:
11 11 * Author:
12 12 * Swaminathan K S
13 13 * Arun Chandrashekhar
14 14 * Manju R
15 15 * Rasheed
16 16 * Shakeel Bukhari
17 17 *
18 18 *
19 19 * This module contains functions for device drivers
20 20 * to get pd-ld mapping information.
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
21 21 *
22 22 * **********************************************************************
23 23 */
24 24
25 25 #include <sys/scsi/scsi.h>
26 26 #include "mr_sas.h"
27 27 #include "ld_pd_map.h"
28 28
29 29 /*
30 30 * This function will check if FAST IO is possible on this logical drive
31 - * by checking the EVENT information availabe in the driver
31 + * by checking the EVENT information available in the driver
32 32 */
33 33 #define MR_LD_STATE_OPTIMAL 3
34 34 #define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
35 35
36 36 static void mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *,
37 37 PLD_LOAD_BALANCE_INFO);
38 38
39 39 #define FALSE 0
40 40 #define TRUE 1
41 41
42 42 typedef U64 REGION_KEY;
43 43 typedef U32 REGION_LEN;
44 44 extern int debug_level_g;
45 45
46 46
47 47 MR_LD_RAID
48 48 *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map)
49 49 {
50 50 return (&map->raidMap.ldSpanMap[ld].ldRaid);
51 51 }
52 52
53 53 U16
54 54 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map)
55 55 {
56 56 return (map->raidMap.ldSpanMap[ld].ldRaid.targetId);
57 57 }
58 58
59 59
60 60 static MR_SPAN_BLOCK_INFO *
61 61 MR_LdSpanInfoGet(U32 ld, MR_FW_RAID_MAP_ALL *map)
62 62 {
63 63 return (&map->raidMap.ldSpanMap[ld].spanBlock[0]);
64 64 }
65 65
66 66 static U8
67 67 MR_LdDataArmGet(U32 ld, U32 armIdx, MR_FW_RAID_MAP_ALL *map)
68 68 {
69 69 return (map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]);
70 70 }
71 71
72 72 static U16
73 73 MR_ArPdGet(U32 ar, U32 arm, MR_FW_RAID_MAP_ALL *map)
74 74 {
75 75 return (map->raidMap.arMapInfo[ar].pd[arm]);
76 76 }
77 77
78 78 static U16
79 79 MR_LdSpanArrayGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map)
80 80 {
81 81 return (map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
82 82 }
83 83
84 84 static U16
85 85 MR_PdDevHandleGet(U32 pd, MR_FW_RAID_MAP_ALL *map)
86 86 {
87 87 return (map->raidMap.devHndlInfo[pd].curDevHdl);
88 88 }
89 89
90 90 U16
91 91 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map)
92 92 {
93 93 return (map->raidMap.ldTgtIdToLd[ldTgtId]);
94 94 }
95 95
96 96 U16
97 97 MR_CheckDIF(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map)
98 98 {
99 99 MR_LD_RAID *raid;
100 100 U32 ld;
101 101
102 102 ld = MR_TargetIdToLdGet(ldTgtId, map);
103 103
104 104 if (ld >= MAX_LOGICAL_DRIVES) {
105 105 return (FALSE);
106 106 }
107 107
108 108 raid = MR_LdRaidGet(ld, map);
109 109
110 110 return (raid->capability.ldPiMode == 0x8);
111 111 }
112 112
113 113 static MR_LD_SPAN *
114 114 MR_LdSpanPtrGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map)
115 115 {
116 116 return (&map->raidMap.ldSpanMap[ld].spanBlock[span].span);
117 117 }
118 118
119 119 /*
120 120 * This function will validate Map info data provided by FW
121 121 */
122 122 U8
123 123 MR_ValidateMapInfo(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo)
124 124 {
125 125 MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
126 126 U32 fwsize = sizeof (MR_FW_RAID_MAP) - sizeof (MR_LD_SPAN_MAP) +
127 127 (sizeof (MR_LD_SPAN_MAP) * pFwRaidMap->ldCount);
128 128
129 129 if (pFwRaidMap->totalSize != fwsize) {
130 130
131 131 con_log(CL_ANN1, (CE_NOTE,
132 132 "map info structure size 0x%x is "
133 133 "not matching with ld count\n", fwsize));
134 134 /* sizeof (foo) returns size_t, which is *LONG*. */
135 135 con_log(CL_ANN1, (CE_NOTE, "span map 0x%x total size 0x%x\n",\
136 136 (int)sizeof (MR_LD_SPAN_MAP), pFwRaidMap->totalSize));
137 137
138 138 return (0);
139 139 }
140 140
141 141 mr_update_load_balance_params(map, lbInfo);
142 142
143 143 return (1);
144 144 }
145 145
146 146 U32
147 147 MR_GetSpanBlock(U32 ld, U64 row, U64 *span_blk, MR_FW_RAID_MAP_ALL *map,
148 148 int *div_error)
149 149 {
150 150 MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
151 151 MR_QUAD_ELEMENT *qe;
152 152 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
153 153 U32 span, j;
154 154
155 155 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
156 156 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
157 157 qe = &pSpanBlock->block_span_info.quads[j];
158 158 if (qe->diff == 0) {
159 159 *div_error = 1;
160 160 return (span);
161 161 }
162 162 if (qe->logStart <= row && row <= qe->logEnd &&
163 163 (((row - qe->logStart) % qe->diff)) == 0) {
164 164 if (span_blk != NULL) {
165 165 U64 blk;
166 166 blk = ((row - qe->logStart) /
167 167 (qe->diff));
168 168
169 169 blk = (blk + qe->offsetInSpan) <<
170 170 raid->stripeShift;
171 171 *span_blk = blk;
172 172 }
173 173 return (span);
174 174 }
175 175 }
176 176 }
177 177 return (span);
178 178 }
179 179
180 180
181 181 /*
182 182 * *************************************************************
183 183 *
184 184 * This routine calculates the arm, span and block for
185 185 * the specified stripe and reference in stripe.
186 186 *
187 187 * Inputs :
188 188 *
189 189 * ld - Logical drive number
190 190 * stripRow - Stripe number
191 191 * stripRef - Reference in stripe
192 192 *
193 193 * Outputs :
194 194 *
195 195 * span - Span number
196 196 * block - Absolute Block number in the physical disk
197 197 */
198 198 U8
199 199 MR_GetPhyParams(struct mrsas_instance *instance, U32 ld, U64 stripRow,
200 200 U16 stripRef, U64 *pdBlock, U16 *pDevHandle,
201 201 MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
202 202 {
|
↓ open down ↓ |
161 lines elided |
↑ open up ↑ |
203 203 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
204 204 U32 pd, arRef;
205 205 U8 physArm, span;
206 206 U64 row;
207 207 int error_code = 0;
208 208 U8 retval = TRUE;
209 209 U32 rowMod;
210 210 U32 armQ;
211 211 U32 arm;
212 212
213 + ASSERT(raid->rowDataSize != 0);
214 +
213 215 row = (stripRow / raid->rowDataSize);
214 216
215 217 if (raid->level == 6) {
216 218 U32 logArm = (stripRow % (raid->rowDataSize));
217 219
218 220 if (raid->rowSize == 0) {
219 221 return (FALSE);
220 222 }
221 223 rowMod = (row % (raid->rowSize));
222 224 armQ = raid->rowSize-1-rowMod;
223 - arm = armQ+1+logArm;
225 + arm = armQ + 1 + logArm;
224 226 if (arm >= raid->rowSize)
225 227 arm -= raid->rowSize;
226 228 physArm = (U8)arm;
227 229 } else {
228 230 if (raid->modFactor == 0)
229 231 return (FALSE);
230 232 physArm = MR_LdDataArmGet(ld,
231 233 (stripRow % (raid->modFactor)), map);
232 234 }
233 235 if (raid->spanDepth == 1) {
234 236 span = 0;
235 237 *pdBlock = row << raid->stripeShift;
236 238 } else
237 239 span = (U8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code);
238 240
239 241 if (error_code == 1)
240 242 return (FALSE);
241 243
242 244 /* Get the array on which this span is present. */
243 245 arRef = MR_LdSpanArrayGet(ld, span, map);
244 246 /* Get the Pd. */
245 247 pd = MR_ArPdGet(arRef, physArm, map);
246 248 /* Get dev handle from Pd. */
247 249 if (pd != MR_PD_INVALID) {
248 250 *pDevHandle = MR_PdDevHandleGet(pd, map);
249 251 } else {
250 252 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
251 253 if ((raid->level >= 5) &&
252 254 ((instance->device_id != PCI_DEVICE_ID_LSI_INVADER) ||
253 255 (instance->device_id == PCI_DEVICE_ID_LSI_INVADER &&
254 256 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) {
255 257 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
256 258 } else if (raid->level == 1) {
257 259 /* Get Alternate Pd. */
258 260 pd = MR_ArPdGet(arRef, physArm + 1, map);
259 261 /* Get dev handle from Pd. */
260 262 if (pd != MR_PD_INVALID)
261 263 *pDevHandle = MR_PdDevHandleGet(pd, map);
262 264 }
263 265 }
264 266
265 267 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
266 268
267 269 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
268 270 physArm;
269 271
270 272 return (retval);
271 273 }
272 274
273 275
274 276
275 277 /*
276 278 * ***********************************************************************
277 279 *
278 280 * MR_BuildRaidContext function
279 281 *
280 282 * This function will initiate command processing. The start/end row and strip
281 283 * information is calculated then the lock is acquired.
282 284 * This function will return 0 if region lock
283 285 * was acquired OR return num strips ???
284 286 */
285 287
286 288 U8
287 289 MR_BuildRaidContext(struct mrsas_instance *instance,
288 290 struct IO_REQUEST_INFO *io_info, MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context,
289 291 MR_FW_RAID_MAP_ALL *map)
290 292 {
291 293 MR_LD_RAID *raid;
292 294 U32 ld, stripSize, stripe_mask;
293 295 U64 endLba, endStrip, endRow;
294 296 U64 start_row, start_strip;
295 297 REGION_KEY regStart;
296 298 REGION_LEN regSize;
297 299 U8 num_strips, numRows;
298 300 U16 ref_in_start_stripe;
299 301 U16 ref_in_end_stripe;
300 302
301 303 U64 ldStartBlock;
302 304 U32 numBlocks, ldTgtId;
303 305 U8 isRead;
304 306 U8 retval = 0;
305 307
306 308 ldStartBlock = io_info->ldStartBlock;
307 309 numBlocks = io_info->numBlocks;
308 310 ldTgtId = io_info->ldTgtId;
309 311 isRead = io_info->isRead;
310 312
311 313 if (map == NULL) {
312 314 io_info->fpOkForIo = FALSE;
313 315 return (FALSE);
314 316 }
315 317
316 318 ld = MR_TargetIdToLdGet(ldTgtId, map);
317 319
318 320 if (ld >= MAX_LOGICAL_DRIVES) {
319 321 io_info->fpOkForIo = FALSE;
320 322 return (FALSE);
321 323 }
322 324
323 325 raid = MR_LdRaidGet(ld, map);
324 326
325 327 stripSize = 1 << raid->stripeShift;
|
↓ open down ↓ |
92 lines elided |
↑ open up ↑ |
326 328 stripe_mask = stripSize-1;
327 329 /*
328 330 * calculate starting row and stripe, and number of strips and rows
329 331 */
330 332 start_strip = ldStartBlock >> raid->stripeShift;
331 333 ref_in_start_stripe = (U16)(ldStartBlock & stripe_mask);
332 334 endLba = ldStartBlock + numBlocks - 1;
333 335 ref_in_end_stripe = (U16)(endLba & stripe_mask);
334 336 endStrip = endLba >> raid->stripeShift;
335 337 num_strips = (U8)(endStrip - start_strip + 1);
336 - /* Check to make sure is not deviding by zero */
338 + /* Check to make sure is not dividing by zero */
337 339 if (raid->rowDataSize == 0)
338 340 return (FALSE);
339 341 start_row = (start_strip / raid->rowDataSize);
340 342 endRow = (endStrip / raid->rowDataSize);
341 343 /* get the row count */
342 344 numRows = (U8)(endRow - start_row + 1);
343 345
344 346 /*
345 347 * calculate region info.
346 348 */
347 349 regStart = start_row << raid->stripeShift;
348 350 regSize = stripSize;
349 351
350 352 /* Check if we can send this I/O via FastPath */
351 353 if (raid->capability.fpCapable) {
352 - if (isRead)
354 + if (isRead) {
353 355 io_info->fpOkForIo = (raid->capability.fpReadCapable &&
354 356 ((num_strips == 1) ||
355 357 raid->capability.fpReadAcrossStripe));
356 - else
358 + } else {
357 359 io_info->fpOkForIo =
358 360 (raid->capability.fpWriteCapable &&
359 361 ((num_strips == 1) ||
360 362 raid->capability.fpWriteAcrossStripe));
363 + }
361 364 } else
362 365 io_info->fpOkForIo = FALSE;
363 366
364 367
365 368 /*
366 369 * Check for DIF support
367 370 */
368 371 if (!raid->capability.ldPiMode) {
369 372 io_info->ldPI = FALSE;
370 373 } else {
371 374 io_info->ldPI = TRUE;
372 375 }
373 376
374 377 if (numRows == 1) {
375 378 if (num_strips == 1) {
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
376 379 regStart += ref_in_start_stripe;
377 380 regSize = numBlocks;
378 381 }
379 382 } else {
380 383 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
381 384 regStart += ref_in_start_stripe;
382 385 regSize = stripSize - ref_in_start_stripe;
383 386 }
384 387
385 388 if (numRows > 2) {
386 - regSize += (numRows-2) << raid->stripeShift;
389 + regSize += (numRows - 2) << raid->stripeShift;
387 390 }
388 391
389 - if (endStrip == endRow*raid->rowDataSize) {
390 - regSize += ref_in_end_stripe+1;
392 + if (endStrip == endRow * raid->rowDataSize) {
393 + regSize += ref_in_end_stripe + 1;
391 394 } else {
392 395 regSize += stripSize;
393 396 }
394 397 }
395 398
396 399 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
397 400
398 401 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
399 402 pRAID_Context->regLockFlags = (isRead) ?
400 403 raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
401 404 } else {
402 405 pRAID_Context->regLockFlags = (isRead) ?
403 406 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
404 407 }
405 408
406 409 pRAID_Context->ldTargetId = raid->targetId;
407 410 pRAID_Context->regLockRowLBA = regStart;
408 411 pRAID_Context->regLockLength = regSize;
409 412 pRAID_Context->configSeqNum = raid->seqNum;
410 413
411 414 /*
412 415 * Get Phy Params only if FP capable,
413 416 * or else leave it to MR firmware to do the calculation.
414 417 */
415 418 if (io_info->fpOkForIo) {
416 419 /* if fast path possible then get the physical parameters */
417 420 retval = MR_GetPhyParams(instance, ld, start_strip,
418 421 ref_in_start_stripe, &io_info->pdBlock,
419 422 &io_info->devHandle, pRAID_Context, map);
420 423
421 424 /* If IO on an invalid Pd, then FP is not possible. */
422 425 if (io_info->devHandle == MR_PD_INVALID)
423 426 io_info->fpOkForIo = FALSE;
424 427
425 428 return (retval);
426 429
427 430 } else if (isRead) {
428 431 uint_t stripIdx;
429 432
430 433 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
431 434 if (!MR_GetPhyParams(instance, ld,
432 435 start_strip + stripIdx, ref_in_start_stripe,
433 436 &io_info->pdBlock, &io_info->devHandle,
434 437 pRAID_Context, map)) {
435 438 return (TRUE);
436 439 }
437 440 }
438 441 }
439 442 return (TRUE);
440 443 }
441 444
442 445
443 446 void
444 447 mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *map,
445 448 PLD_LOAD_BALANCE_INFO lbInfo)
446 449 {
447 450 int ldCount;
448 451 U16 ld;
449 452 MR_LD_RAID *raid;
450 453
451 454 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
452 455 ld = MR_TargetIdToLdGet(ldCount, map);
453 456
454 457 if (ld >= MAX_LOGICAL_DRIVES) {
455 458 con_log(CL_ANN1,
456 459 (CE_NOTE, "mrsas: ld=%d Invalid ld \n", ld));
457 460 continue;
458 461 }
459 462
460 463 raid = MR_LdRaidGet(ld, map);
461 464
462 465 /* Two drive Optimal RAID 1 */
463 466 if ((raid->level == 1) && (raid->rowSize == 2) &&
464 467 (raid->spanDepth == 1) &&
465 468 raid->ldState == MR_LD_STATE_OPTIMAL) {
466 469 U32 pd, arRef;
467 470
468 471 lbInfo[ldCount].loadBalanceFlag = 1;
469 472
470 473 /* Get the array on which this span is present. */
471 474 arRef = MR_LdSpanArrayGet(ld, 0, map);
472 475
473 476 pd = MR_ArPdGet(arRef, 0, map); /* Get the Pd. */
474 477 /* Get dev handle from Pd. */
475 478 lbInfo[ldCount].raid1DevHandle[0] =
476 479 MR_PdDevHandleGet(pd, map);
477 480
478 481 pd = MR_ArPdGet(arRef, 1, map); /* Get the Pd. */
479 482 /* Get dev handle from Pd. */
480 483 lbInfo[ldCount].raid1DevHandle[1] =
481 484 MR_PdDevHandleGet(pd, map);
482 485 con_log(CL_ANN1, (CE_NOTE,
483 486 "mrsas: ld=%d load balancing enabled \n", ldCount));
484 487 } else {
485 488 lbInfo[ldCount].loadBalanceFlag = 0;
486 489 }
487 490 }
488 491 }
489 492
490 493
491 494 U8
492 495 megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, U8 arm, U64 block,
493 496 U32 count)
494 497 {
495 498 U16 pend0, pend1;
496 499 U64 diff0, diff1;
497 500 U8 bestArm;
498 501
499 502 /* get the pending cmds for the data and mirror arms */
500 503 pend0 = lbInfo->scsi_pending_cmds[0];
501 504 pend1 = lbInfo->scsi_pending_cmds[1];
502 505
503 506 /* Determine the disk whose head is nearer to the req. block */
504 507 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
505 508 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
506 509 bestArm = (diff0 <= diff1 ? 0 : 1);
507 510
508 511 if ((bestArm == arm && pend0 > pend1 + 16) ||
509 512 (bestArm != arm && pend1 > pend0 + 16)) {
510 513 bestArm ^= 1;
511 514 }
512 515
513 516 /* Update the last accessed block on the correct pd */
514 517 lbInfo->last_accessed_block[bestArm] = block + count - 1;
515 518 return (bestArm);
516 519 }
517 520
518 521 U16
519 522 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
520 523 struct IO_REQUEST_INFO *io_info)
521 524 {
522 525 U8 arm, old_arm;
523 526 U16 devHandle;
524 527
525 528 old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
526 529
527 530 /* get best new arm */
528 531 arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock,
529 532 io_info->numBlocks);
530 533
531 534 devHandle = lbInfo->raid1DevHandle[arm];
532 535
533 536 lbInfo->scsi_pending_cmds[arm]++;
534 537
535 538 return (devHandle);
536 539 }
|
↓ open down ↓ |
136 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX