1 /*
2 * **********************************************************************
3 *
4 * ld_pd_map.c
5 *
6 * Solaris MegaRAID device driver for SAS2.0 controllers
7 * Copyright (c) 2008-2012, LSI Logic Corporation.
8 * All rights reserved.
9 *
10 * Version:
11 * Author:
12 * Swaminathan K S
13 * Arun Chandrashekhar
14 * Manju R
15 * Rasheed
16 * Shakeel Bukhari
17 *
18 *
19 * This module contains functions for device drivers
20 * to get pd-ld mapping information.
21 *
22 * **********************************************************************
23 */
24
25 #include <sys/scsi/scsi.h>
26 #include "ld_pd_map.h"
27 #include "mr_sas.h"
28
29 /*
30 * This function will check if FAST IO is possible on this logical drive
31 * by checking the EVENT information availabe in the driver
32 */
33 #define MR_LD_STATE_OPTIMAL 3
34 #define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) )
35
36 void
37 mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo);
38
39 U8
40 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
41 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
42
43 #define FALSE 0
44 #define TRUE 1
45
46 typedef U64 REGION_KEY;
47 typedef U32 REGION_LEN;
48 extern int debug_level_g;
49
50
51 MR_LD_RAID
52 *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map)
53 {
54 return (&map->raidMap.ldSpanMap[ld].ldRaid);
55 }
56
57 U16 MR_GetLDTgtId
58 (U32 ld, MR_FW_RAID_MAP_ALL *map)
59 {
60 return (map->raidMap.ldSpanMap[ld].ldRaid.targetId);
61 }
62
63
64 static MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(U32 ld, MR_FW_RAID_MAP_ALL *map)
65 {
66 return (&map->raidMap.ldSpanMap[ld].spanBlock[0]);
67 }
68
69 static U8 MR_LdDataArmGet(U32 ld, U32 armIdx, MR_FW_RAID_MAP_ALL *map)
70 {
71 return (map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]);
72 }
73
74 static U16 MR_ArPdGet(U32 ar, U32 arm, MR_FW_RAID_MAP_ALL *map)
75 {
76 return (map->raidMap.arMapInfo[ar].pd[arm]);
77 }
78
79 static U16 MR_LdSpanArrayGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map)
80 {
81 return (map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
82 }
83
84 static U16 MR_PdDevHandleGet(U32 pd, MR_FW_RAID_MAP_ALL *map)
85 {
86 return (map->raidMap.devHndlInfo[pd].curDevHdl);
87 }
88
89 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map)
90 {
91 return (map->raidMap.ldTgtIdToLd[ldTgtId]);
92 }
93
94 U16 MR_CheckDIF(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map)
95 {
96 MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
97 MR_LD_RAID *raid;
98 U32 ld;
99
100 ld = MR_TargetIdToLdGet(ldTgtId, map);
101
102 if (ld >= MAX_LOGICAL_DRIVES) {
103 return (FALSE);
104 }
105
106 raid = MR_LdRaidGet(ld, map);
107
108 return (raid->capability.ldPiMode == 0x8);
109 }
110
111 static MR_LD_SPAN *MR_LdSpanPtrGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map)
112 {
113 return (&map->raidMap.ldSpanMap[ld].spanBlock[span].span);
114 }
115
116 /*
117 * This function will validate Map info data provided by FW
118 */
119 U8 MR_ValidateMapInfo(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo)
120 {
121 MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
122
123
124 if (pFwRaidMap->totalSize !=
125 (sizeof (MR_FW_RAID_MAP) - sizeof (MR_LD_SPAN_MAP) +
126 (sizeof (MR_LD_SPAN_MAP) * pFwRaidMap->ldCount))) {
127
128 con_log(CL_ANN1, (CE_NOTE,\
129 "map info structure size 0x%x\
130 is not matching with ld count\n",\
131 (int)((sizeof (MR_FW_RAID_MAP) - sizeof (MR_LD_SPAN_MAP)) +\
132 (sizeof (MR_LD_SPAN_MAP) * pFwRaidMap->ldCount))));
133
134 con_log(CL_ANN1, (CE_NOTE, "span map 0x%x total size 0x%x\n",\
135 (int)sizeof (MR_LD_SPAN_MAP), pFwRaidMap->totalSize));
136
137 return (0);
138 }
139
140 mr_update_load_balance_params(map, lbInfo);
141
142 return (1);
143 }
144
145 U32
146 MR_GetSpanBlock(U32 ld, U64 row, U64 *span_blk, MR_FW_RAID_MAP_ALL *map, int *div_error)
147 {
148 MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
149 MR_QUAD_ELEMENT *quad;
150 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
151 U32 span, j;
152
153 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
154 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
155 quad = &pSpanBlock->block_span_info.quad[j];
156 if(quad->diff == 0) {
157 *div_error=1;
158 return (span);
159 }
160 if (quad->logStart <= row &&
161 row <= quad->logEnd &&
162 (((row-quad->logStart) % quad->diff)) == 0) {
163 if (span_blk != NULL) {
164 U64 blk;
165 blk = ((row-quad->logStart) /
166 (quad->diff));
167
168 blk = (blk + quad->offsetInSpan) << raid->stripeShift;
169 *span_blk = blk;
170 }
171 return (span);
172 }
173 }
174 }
175 return (span);
176 }
177
178
179 /*
180 * *************************************************************
181 *
182 * This routine calculates the arm, span and block for
183 * the specified stripe and reference in stripe.
184 *
185 * Inputs :
186 *
187 * ld - Logical drive number
188 * stripRow - Stripe number
189 * stripRef - Reference in stripe
190 *
191 * Outputs :
192 *
193 * span - Span number
194 * block - Absolute Block number in the physical disk
195 */
196 U8
197 MR_GetPhyParams(struct mrsas_instance *instance, U32 ld, U64 stripRow, U16 stripRef,
198 U64 *pdBlock, U16 *pDevHandle, MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context,
199 MR_FW_RAID_MAP_ALL *map)
200 {
201 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
202 U32 pd, arRef;
203 U8 physArm, span;
204 U64 row;
205 int error_code=0;
206 U8 retval = TRUE;
207 U32 rowMod;
208 U32 armQ;
209 U32 arm;
210
211 row = (stripRow / raid->rowDataSize);
212
213 if (raid->level == 6) {
214 U32 logArm = (stripRow % (raid->rowDataSize));
215
216 if (raid->rowSize == 0) {
217 return (FALSE);
218 }
219 rowMod = (row % (raid->rowSize));
220 armQ = raid->rowSize-1-rowMod;
221 arm = armQ+1+logArm;
222 if (arm >= raid->rowSize)
223 arm -= raid->rowSize;
224 physArm = (U8)arm;
225 } else {
226 if (raid->modFactor == 0)
227 return FALSE;
228 physArm = MR_LdDataArmGet(ld,
229 (stripRow % (raid->modFactor)), map);
230 }
231 if (raid->spanDepth == 1) {
232 span = 0;
233 *pdBlock = row << raid->stripeShift;
234 } else
235 span = (U8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code);
236
237 if (error_code == 1)
238 return FALSE;
239
240 // Get the array on which this span is present.
241 arRef = MR_LdSpanArrayGet(ld, span, map);
242 // Get the Pd.
243 pd = MR_ArPdGet(arRef, physArm, map);
244 // Get dev handle from Pd.
245 if (pd != MR_PD_INVALID) {
246 *pDevHandle = MR_PdDevHandleGet(pd, map);
247 }
248 else {
249 *pDevHandle = MR_PD_INVALID; // set dev handle as invalid.
250 if ( (raid->level >= 5) &&
251 ((instance->device_id != PCI_DEVICE_ID_LSI_INVADER) ||
252 (instance->device_id == PCI_DEVICE_ID_LSI_INVADER &&
253 raid->regTypeReqOnRead != REGION_TYPE_UNUSED)) )
254 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
255 else if (raid->level == 1) {
256 pd = MR_ArPdGet(arRef, physArm + 1, map); // Get Alternate Pd.
257 if (pd != MR_PD_INVALID)
258 *pDevHandle = MR_PdDevHandleGet(pd, map); // Get dev handle from Pd.
259 }
260 }
261
262 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
263
264 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
265
266 return retval;
267 }
268
269
270
271 /*
272 * ***********************************************************************
273 *
274 * MR_BuildRaidContext function
275 *
276 * This function will initiate command processing. The start/end row and strip
277 * information is calculated then the lock is acquired.
278 * This function will return 0 if region lock
279 * was acquired OR return num strips ???
280 */
281
282 U8
283 MR_BuildRaidContext(struct mrsas_instance *instance, struct IO_REQUEST_INFO *io_info,
284 MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
285 {
286 MR_LD_RAID *raid;
287 U32 ld, stripSize, stripe_mask;
288 U64 endLba, endStrip, endRow;
289 U64 start_row, start_strip;
290 REGION_KEY regStart;
291 REGION_LEN regSize;
292 U8 num_strips, numRows;
293 U16 ref_in_start_stripe;
294 U16 ref_in_end_stripe;
295
296 U64 ldStartBlock;
297 U32 numBlocks, ldTgtId;
298 U8 isRead;
299 U8 retval = 0;
300
301 ldStartBlock = io_info->ldStartBlock;
302 numBlocks = io_info->numBlocks;
303 ldTgtId = io_info->ldTgtId;
304 isRead = io_info->isRead;
305
306 if ( map == NULL ) {
307 io_info->fpOkForIo = FALSE;
308 return (FALSE);
309 }
310
311 ld = MR_TargetIdToLdGet(ldTgtId, map);
312
313 if (ld >= MAX_LOGICAL_DRIVES) {
314 io_info->fpOkForIo = FALSE;
315 return (FALSE);
316 }
317
318 raid = MR_LdRaidGet(ld, map);
319
320 stripSize = 1 << raid->stripeShift;
321 stripe_mask = stripSize-1;
322 /*
323 * calculate starting row and stripe, and number of strips and rows
324 */
325 start_strip = ldStartBlock >> raid->stripeShift;
326 ref_in_start_stripe = (U16)(ldStartBlock & stripe_mask);
327 endLba = ldStartBlock + numBlocks - 1;
328 ref_in_end_stripe = (U16)(endLba & stripe_mask);
329 endStrip = endLba >> raid->stripeShift;
330 num_strips = (U8)(endStrip - start_strip + 1);
331 /* Check to make sure is not deviding by zero */
332 if (raid->rowDataSize == 0)
333 return FALSE;
334 start_row = (start_strip / raid->rowDataSize);
335 endRow = (endStrip / raid->rowDataSize);
336 // get the row count
337 numRows = (U8)(endRow - start_row + 1);
338
339 /*
340 * calculate region info.
341 */
342 regStart = start_row << raid->stripeShift;
343 regSize = stripSize;
344
345 /* Check if we can send this I/O via FastPath */
346 if (raid->capability.fpCapable) {
347 if (isRead)
348 io_info->fpOkForIo = (raid->capability.fpReadCapable &&
349 ((num_strips == 1) ||
350 raid->capability.fpReadAcrossStripe));
351 else
352 io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
353 ((num_strips == 1) ||
354 raid->capability.fpWriteAcrossStripe));
355 } else
356 io_info->fpOkForIo = FALSE;
357
358
359 /*
360 * Check for DIF support
361 */
362 if ( !raid->capability.ldPiMode ) {
363 io_info->ldPI = FALSE;
364 } else {
365 io_info->ldPI = TRUE;
366 }
367
368 if (numRows == 1) {
369 if (num_strips == 1) {
370 regStart += ref_in_start_stripe;
371 regSize = numBlocks;
372 }
373 } else {
374 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
375 regStart += ref_in_start_stripe;
376 regSize = stripSize - ref_in_start_stripe;
377 }
378
379 if (numRows > 2) {
380 regSize += (numRows-2) << raid->stripeShift;
381 }
382
383 if (endStrip == endRow*raid->rowDataSize) {
384 regSize += ref_in_end_stripe+1;
385 } else {
386 regSize += stripSize;
387 }
388 }
389
390 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
391
392 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER)
393 pRAID_Context->regLockFlags =
394 (isRead)? raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
395 else
396 pRAID_Context->regLockFlags =
397 (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
398
399 pRAID_Context->ldTargetId = raid->targetId;
400 pRAID_Context->regLockRowLBA = regStart;
401 pRAID_Context->regLockLength = regSize;
402 pRAID_Context->configSeqNum = raid->seqNum;
403
404 /*
405 * Get Phy Params only if FP capable,
406 * or else leave it to MR firmware to do the calculation.
407 */
408 if (io_info->fpOkForIo) {
409 /* if fast path possible then get the physical parameters */
410 retval = MR_GetPhyParams(instance, ld, start_strip, ref_in_start_stripe,
411 &io_info->pdBlock, &io_info->devHandle,
412 pRAID_Context, map);
413
414 if (io_info->devHandle == MR_PD_INVALID) // If IO on an invalid Pd, then FP is not possible.
415 io_info->fpOkForIo = FALSE;
416
417 return retval;
418
419 } else if (isRead) {
420 uint stripIdx;
421 for (stripIdx=0; stripIdx<num_strips; stripIdx++) {
422 if (!MR_GetPhyParams(instance, ld, start_strip + stripIdx, ref_in_start_stripe,
423 &io_info->pdBlock, &io_info->devHandle, pRAID_Context, map))
424 return TRUE;
425 }
426 }
427 return (TRUE);
428 }
429
430
431 void
432 mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo)
433 {
434 int ldCount;
435 U16 ld;
436 MR_LD_RAID *raid;
437
438 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++)
439 {
440 ld = MR_TargetIdToLdGet(ldCount, map);
441
442 if (ld >= MAX_LOGICAL_DRIVES) {
443 con_log(CL_ANN1, (CE_NOTE,
444 "mrsas: ld=%d Invalid ld \n", ld));
445 continue;
446 }
447
448 raid = MR_LdRaidGet(ld, map);
449
450 /* Two drive Optimal RAID 1 */
451 if ((raid->level == 1) && (raid->rowSize == 2) && (raid->spanDepth == 1)
452 && raid->ldState == MR_LD_STATE_OPTIMAL) {
453 U32 pd, arRef;
454
455 lbInfo[ldCount].loadBalanceFlag = 1;
456 //U8 physArm = 0;
457
458 arRef = MR_LdSpanArrayGet(ld, 0, map); // Get the array on which this span is present.
459
460 pd = MR_ArPdGet(arRef, 0, map); // Get the Pd.
461 lbInfo[ldCount].raid1DevHandle[0] = MR_PdDevHandleGet(pd, map); // Get dev handle from Pd.
462
463 pd = MR_ArPdGet(arRef, 1, map); // Get the Pd.
464 lbInfo[ldCount].raid1DevHandle[1] = MR_PdDevHandleGet(pd, map); // Get dev handle from Pd.
465 con_log(CL_ANN1, (CE_NOTE,\
466 "mrsas: ld=%d load balancing enabled \n", ldCount));
467 } else {
468 lbInfo[ldCount].loadBalanceFlag = 0;
469 }
470 }
471 }
472
473
474 U8 megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, U8 arm, U64 block, U32 count)
475 {
476 U16 pend0, pend1;
477 U64 diff0, diff1;
478 U8 bestArm;
479
480 /* get the pending cmds for the data and mirror arms */
481 pend0 = lbInfo->scsi_pending_cmds[0];
482 pend1 = lbInfo->scsi_pending_cmds[1];
483
484 /* Determine the disk whose head is nearer to the req. block */
485 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
486 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
487 bestArm = (diff0 <= diff1 ? 0 : 1);
488
489 if ((bestArm == arm && pend0 > pend1 + 16) || (bestArm != arm && pend1 > pend0 + 16))
490 bestArm ^= 1;
491
492 /* Update the last accessed block on the correct pd */
493 lbInfo->last_accessed_block[bestArm] = block + count - 1;
494 return bestArm;
495 }
496
497 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info)
498 {
499 U8 arm, old_arm;
500 U16 devHandle;
501
502 old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
503
504 /* get best new arm */
505 arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock, io_info->numBlocks);
506
507 devHandle = lbInfo->raid1DevHandle[arm];
508
509 lbInfo->scsi_pending_cmds[arm]++;
510
511 return devHandle;
512 }