Print this page
| Split |
Close |
| Expand all |
| Collapse all |
--- old/./ld_pd_map.c
+++ new/./ld_pd_map.c
1 1 /*
2 2 * **********************************************************************
3 3 *
4 4 * ld_pd_map.c
5 5 *
6 6 * Solaris MegaRAID device driver for SAS2.0 controllers
7 7 * Copyright (c) 2008-2012, LSI Logic Corporation.
8 8 * All rights reserved.
9 9 *
10 10 * Version:
11 11 * Author:
12 12 * Swaminathan K S
13 13 * Arun Chandrashekhar
14 14 * Manju R
15 15 * Rasheed
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * Shakeel Bukhari
17 17 *
18 18 *
19 19 * This module contains functions for device drivers
20 20 * to get pd-ld mapping information.
21 21 *
22 22 * **********************************************************************
23 23 */
24 24
25 25 #include <sys/scsi/scsi.h>
26 -#include "ld_pd_map.h"
27 26 #include "mr_sas.h"
27 +#include "ld_pd_map.h"
28 28
29 29 /*
30 30 * This function will check if FAST IO is possible on this logical drive
31 - * by checking the EVENT information availabe in the driver
31 + * by checking the EVENT information available in the driver
32 32 */
33 -#define MR_LD_STATE_OPTIMAL 3
34 -#define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) )
33 +#define MR_LD_STATE_OPTIMAL 3
34 +#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
35 35
36 -void
37 -mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo);
36 +static void mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *,
37 + PLD_LOAD_BALANCE_INFO);
38 38
39 -U8
40 -MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
41 - MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
42 -
43 39 #define FALSE 0
44 40 #define TRUE 1
45 41
46 42 typedef U64 REGION_KEY;
47 43 typedef U32 REGION_LEN;
48 44 extern int debug_level_g;
49 45
50 46
51 47 MR_LD_RAID
52 48 *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map)
53 49 {
54 50 return (&map->raidMap.ldSpanMap[ld].ldRaid);
55 51 }
56 52
57 -U16 MR_GetLDTgtId
58 -(U32 ld, MR_FW_RAID_MAP_ALL *map)
53 +U16
54 +MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map)
59 55 {
60 56 return (map->raidMap.ldSpanMap[ld].ldRaid.targetId);
61 57 }
62 58
63 59
64 -static MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(U32 ld, MR_FW_RAID_MAP_ALL *map)
60 +static MR_SPAN_BLOCK_INFO *
61 +MR_LdSpanInfoGet(U32 ld, MR_FW_RAID_MAP_ALL *map)
65 62 {
66 63 return (&map->raidMap.ldSpanMap[ld].spanBlock[0]);
67 64 }
68 65
69 -static U8 MR_LdDataArmGet(U32 ld, U32 armIdx, MR_FW_RAID_MAP_ALL *map)
66 +static U8
67 +MR_LdDataArmGet(U32 ld, U32 armIdx, MR_FW_RAID_MAP_ALL *map)
70 68 {
71 69 return (map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]);
72 70 }
73 71
74 -static U16 MR_ArPdGet(U32 ar, U32 arm, MR_FW_RAID_MAP_ALL *map)
72 +static U16
73 +MR_ArPdGet(U32 ar, U32 arm, MR_FW_RAID_MAP_ALL *map)
75 74 {
76 75 return (map->raidMap.arMapInfo[ar].pd[arm]);
77 76 }
78 77
79 -static U16 MR_LdSpanArrayGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map)
78 +static U16
79 +MR_LdSpanArrayGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map)
80 80 {
81 81 return (map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
82 82 }
83 83
84 -static U16 MR_PdDevHandleGet(U32 pd, MR_FW_RAID_MAP_ALL *map)
84 +static U16
85 +MR_PdDevHandleGet(U32 pd, MR_FW_RAID_MAP_ALL *map)
85 86 {
86 87 return (map->raidMap.devHndlInfo[pd].curDevHdl);
87 88 }
88 89
89 -U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map)
90 +U16
91 +MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map)
90 92 {
91 93 return (map->raidMap.ldTgtIdToLd[ldTgtId]);
92 94 }
93 95
94 -U16 MR_CheckDIF(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map)
96 +U16
97 +MR_CheckDIF(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map)
95 98 {
96 - MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
97 99 MR_LD_RAID *raid;
98 100 U32 ld;
99 -
101 +
100 102 ld = MR_TargetIdToLdGet(ldTgtId, map);
101 103
102 104 if (ld >= MAX_LOGICAL_DRIVES) {
103 105 return (FALSE);
104 106 }
105 107
106 108 raid = MR_LdRaidGet(ld, map);
107 109
108 110 return (raid->capability.ldPiMode == 0x8);
109 111 }
110 112
111 -static MR_LD_SPAN *MR_LdSpanPtrGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map)
113 +static MR_LD_SPAN *
114 +MR_LdSpanPtrGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map)
112 115 {
113 116 return (&map->raidMap.ldSpanMap[ld].spanBlock[span].span);
114 117 }
115 118
116 119 /*
117 120 * This function will validate Map info data provided by FW
118 121 */
119 -U8 MR_ValidateMapInfo(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo)
122 +U8
123 +MR_ValidateMapInfo(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo)
120 124 {
121 125 MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
126 + U32 fwsize = sizeof (MR_FW_RAID_MAP) - sizeof (MR_LD_SPAN_MAP) +
127 + (sizeof (MR_LD_SPAN_MAP) * pFwRaidMap->ldCount);
122 128
129 + if (pFwRaidMap->totalSize != fwsize) {
123 130
124 - if (pFwRaidMap->totalSize !=
125 - (sizeof (MR_FW_RAID_MAP) - sizeof (MR_LD_SPAN_MAP) +
126 - (sizeof (MR_LD_SPAN_MAP) * pFwRaidMap->ldCount))) {
127 -
128 - con_log(CL_ANN1, (CE_NOTE,\
129 - "map info structure size 0x%x\
130 - is not matching with ld count\n",\
131 - ((sizeof (MR_FW_RAID_MAP) - sizeof (MR_LD_SPAN_MAP)) +\
132 - (sizeof (MR_LD_SPAN_MAP) * pFwRaidMap->ldCount))));
133 -
131 + con_log(CL_ANN1, (CE_NOTE,
132 + "map info structure size 0x%x is "
133 + "not matching with ld count\n", fwsize));
134 + /* sizeof (foo) returns size_t, which is *LONG*. */
134 135 con_log(CL_ANN1, (CE_NOTE, "span map 0x%x total size 0x%x\n",\
135 - sizeof (MR_LD_SPAN_MAP), pFwRaidMap->totalSize));
136 + (int)sizeof (MR_LD_SPAN_MAP), pFwRaidMap->totalSize));
136 137
137 138 return (0);
138 139 }
139 140
140 141 mr_update_load_balance_params(map, lbInfo);
141 142
142 143 return (1);
143 144 }
144 145
145 146 U32
146 -MR_GetSpanBlock(U32 ld, U64 row, U64 *span_blk, MR_FW_RAID_MAP_ALL *map, int *div_error)
147 +MR_GetSpanBlock(U32 ld, U64 row, U64 *span_blk, MR_FW_RAID_MAP_ALL *map,
148 + int *div_error)
147 149 {
148 150 MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
149 - MR_QUAD_ELEMENT *quad;
151 + MR_QUAD_ELEMENT *qe;
150 152 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
151 153 U32 span, j;
152 154
153 155 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
154 156 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
155 - quad = &pSpanBlock->block_span_info.quad[j];
156 - if(quad->diff == 0) {
157 - *div_error=1;
157 + qe = &pSpanBlock->block_span_info.quads[j];
158 + if (qe->diff == 0) {
159 + *div_error = 1;
158 160 return (span);
159 161 }
160 - if (quad->logStart <= row &&
161 - row <= quad->logEnd &&
162 - (((row-quad->logStart) % quad->diff)) == 0) {
162 + if (qe->logStart <= row && row <= qe->logEnd &&
163 + (((row - qe->logStart) % qe->diff)) == 0) {
163 164 if (span_blk != NULL) {
164 165 U64 blk;
165 - blk = ((row-quad->logStart) /
166 - (quad->diff));
166 + blk = ((row - qe->logStart) /
167 + (qe->diff));
167 168
168 - blk = (blk + quad->offsetInSpan) << raid->stripeShift;
169 + blk = (blk + qe->offsetInSpan) <<
170 + raid->stripeShift;
169 171 *span_blk = blk;
170 172 }
171 173 return (span);
172 174 }
173 175 }
174 176 }
175 177 return (span);
176 178 }
177 179
178 180
179 181 /*
180 182 * *************************************************************
181 183 *
182 184 * This routine calculates the arm, span and block for
183 185 * the specified stripe and reference in stripe.
184 186 *
185 187 * Inputs :
186 188 *
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
187 189 * ld - Logical drive number
188 190 * stripRow - Stripe number
189 191 * stripRef - Reference in stripe
190 192 *
191 193 * Outputs :
192 194 *
193 195 * span - Span number
194 196 * block - Absolute Block number in the physical disk
195 197 */
196 198 U8
197 -MR_GetPhyParams(struct mrsas_instance *instance, U32 ld, U64 stripRow, U16 stripRef,
198 -U64 *pdBlock, U16 *pDevHandle, MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context,
199 -MR_FW_RAID_MAP_ALL *map)
199 +MR_GetPhyParams(struct mrsas_instance *instance, U32 ld, U64 stripRow,
200 + U16 stripRef, U64 *pdBlock, U16 *pDevHandle,
201 + MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
200 202 {
201 203 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
202 204 U32 pd, arRef;
203 205 U8 physArm, span;
204 206 U64 row;
205 - int error_code=0;
207 + int error_code = 0;
206 208 U8 retval = TRUE;
207 209 U32 rowMod;
208 210 U32 armQ;
209 211 U32 arm;
210 212
211 - row = (stripRow / raid->rowDataSize);
213 + ASSERT(raid->rowDataSize != 0);
212 214
215 + row = (stripRow / raid->rowDataSize);
216 +
213 217 if (raid->level == 6) {
214 218 U32 logArm = (stripRow % (raid->rowDataSize));
215 219
216 220 if (raid->rowSize == 0) {
217 221 return (FALSE);
218 222 }
219 223 rowMod = (row % (raid->rowSize));
220 224 armQ = raid->rowSize-1-rowMod;
221 - arm = armQ+1+logArm;
225 + arm = armQ + 1 + logArm;
222 226 if (arm >= raid->rowSize)
223 227 arm -= raid->rowSize;
224 228 physArm = (U8)arm;
225 229 } else {
226 230 if (raid->modFactor == 0)
227 - return FALSE;
231 + return (FALSE);
228 232 physArm = MR_LdDataArmGet(ld,
229 233 (stripRow % (raid->modFactor)), map);
230 234 }
231 235 if (raid->spanDepth == 1) {
232 236 span = 0;
233 237 *pdBlock = row << raid->stripeShift;
234 238 } else
235 239 span = (U8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code);
236 -
240 +
237 241 if (error_code == 1)
238 - return FALSE;
242 + return (FALSE);
239 243
240 - // Get the array on which this span is present.
244 + /* Get the array on which this span is present. */
241 245 arRef = MR_LdSpanArrayGet(ld, span, map);
242 - // Get the Pd.
246 + /* Get the Pd. */
243 247 pd = MR_ArPdGet(arRef, physArm, map);
244 - // Get dev handle from Pd.
248 + /* Get dev handle from Pd. */
245 249 if (pd != MR_PD_INVALID) {
246 - *pDevHandle = MR_PdDevHandleGet(pd, map);
250 + *pDevHandle = MR_PdDevHandleGet(pd, map);
251 + } else {
252 + *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
253 + if ((raid->level >= 5) &&
254 + ((instance->device_id != PCI_DEVICE_ID_LSI_INVADER) ||
255 + (instance->device_id == PCI_DEVICE_ID_LSI_INVADER &&
256 + raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) {
257 + pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
258 + } else if (raid->level == 1) {
259 + /* Get Alternate Pd. */
260 + pd = MR_ArPdGet(arRef, physArm + 1, map);
261 + /* Get dev handle from Pd. */
262 + if (pd != MR_PD_INVALID)
263 + *pDevHandle = MR_PdDevHandleGet(pd, map);
264 + }
247 265 }
248 - else {
249 - *pDevHandle = MR_PD_INVALID; // set dev handle as invalid.
250 - if ( (raid->level >= 5) &&
251 - ((instance->device_id != PCI_DEVICE_ID_LSI_INVADER) ||
252 - (instance->device_id == PCI_DEVICE_ID_LSI_INVADER &&
253 - raid->regTypeReqOnRead != REGION_TYPE_UNUSED)) )
254 - pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
255 - else if (raid->level == 1) {
256 - pd = MR_ArPdGet(arRef, physArm + 1, map); // Get Alternate Pd.
257 - if (pd != MR_PD_INVALID)
258 - *pDevHandle = MR_PdDevHandleGet(pd, map); // Get dev handle from Pd.
259 - }
260 - }
261 266
262 267 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
263 268
264 - pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
269 + pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
270 + physArm;
265 271
266 - return retval;
272 + return (retval);
267 273 }
268 274
269 275
270 276
271 277 /*
272 278 * ***********************************************************************
273 279 *
274 280 * MR_BuildRaidContext function
275 281 *
276 282 * This function will initiate command processing. The start/end row and strip
277 283 * information is calculated then the lock is acquired.
278 284 * This function will return 0 if region lock
279 285 * was acquired OR return num strips ???
280 286 */
281 287
282 288 U8
283 -MR_BuildRaidContext(struct mrsas_instance *instance, struct IO_REQUEST_INFO *io_info,
284 -MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
289 +MR_BuildRaidContext(struct mrsas_instance *instance,
290 + struct IO_REQUEST_INFO *io_info, MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context,
291 + MR_FW_RAID_MAP_ALL *map)
285 292 {
286 293 MR_LD_RAID *raid;
287 294 U32 ld, stripSize, stripe_mask;
288 295 U64 endLba, endStrip, endRow;
289 296 U64 start_row, start_strip;
290 297 REGION_KEY regStart;
291 298 REGION_LEN regSize;
292 299 U8 num_strips, numRows;
293 300 U16 ref_in_start_stripe;
294 301 U16 ref_in_end_stripe;
295 302
296 303 U64 ldStartBlock;
297 304 U32 numBlocks, ldTgtId;
298 305 U8 isRead;
299 - U8 retval = 0;
306 + U8 retval = 0;
300 307
301 308 ldStartBlock = io_info->ldStartBlock;
302 309 numBlocks = io_info->numBlocks;
303 310 ldTgtId = io_info->ldTgtId;
304 311 isRead = io_info->isRead;
305 -
306 - if ( map == NULL ) {
312 +
313 + if (map == NULL) {
307 314 io_info->fpOkForIo = FALSE;
308 315 return (FALSE);
309 316 }
310 317
311 318 ld = MR_TargetIdToLdGet(ldTgtId, map);
312 319
313 320 if (ld >= MAX_LOGICAL_DRIVES) {
314 321 io_info->fpOkForIo = FALSE;
315 322 return (FALSE);
316 323 }
317 324
318 325 raid = MR_LdRaidGet(ld, map);
319 326
320 327 stripSize = 1 << raid->stripeShift;
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
321 328 stripe_mask = stripSize-1;
322 329 /*
323 330 * calculate starting row and stripe, and number of strips and rows
324 331 */
325 332 start_strip = ldStartBlock >> raid->stripeShift;
326 333 ref_in_start_stripe = (U16)(ldStartBlock & stripe_mask);
327 334 endLba = ldStartBlock + numBlocks - 1;
328 335 ref_in_end_stripe = (U16)(endLba & stripe_mask);
329 336 endStrip = endLba >> raid->stripeShift;
330 337 num_strips = (U8)(endStrip - start_strip + 1);
331 - /* Check to make sure is not deviding by zero */
332 - if (raid->rowDataSize == 0)
333 - return FALSE;
338 + /* Check to make sure is not dividing by zero */
339 + if (raid->rowDataSize == 0)
340 + return (FALSE);
334 341 start_row = (start_strip / raid->rowDataSize);
335 342 endRow = (endStrip / raid->rowDataSize);
336 - // get the row count
343 + /* get the row count */
337 344 numRows = (U8)(endRow - start_row + 1);
338 345
339 346 /*
340 347 * calculate region info.
341 348 */
342 349 regStart = start_row << raid->stripeShift;
343 350 regSize = stripSize;
344 351
345 - /* Check if we can send this I/O via FastPath */
346 - if (raid->capability.fpCapable) {
347 - if (isRead)
348 - io_info->fpOkForIo = (raid->capability.fpReadCapable &&
349 - ((num_strips == 1) ||
350 - raid->capability.fpReadAcrossStripe));
351 - else
352 - io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
353 - ((num_strips == 1) ||
354 - raid->capability.fpWriteAcrossStripe));
355 - } else
356 - io_info->fpOkForIo = FALSE;
352 + /* Check if we can send this I/O via FastPath */
353 + if (raid->capability.fpCapable) {
354 + if (isRead) {
355 + io_info->fpOkForIo = (raid->capability.fpReadCapable &&
356 + ((num_strips == 1) ||
357 + raid->capability.fpReadAcrossStripe));
358 + } else {
359 + io_info->fpOkForIo =
360 + (raid->capability.fpWriteCapable &&
361 + ((num_strips == 1) ||
362 + raid->capability.fpWriteAcrossStripe));
363 + }
364 + } else
365 + io_info->fpOkForIo = FALSE;
357 366
358 367
359 368 /*
360 369 * Check for DIF support
361 370 */
362 - if ( !raid->capability.ldPiMode ) {
371 + if (!raid->capability.ldPiMode) {
363 372 io_info->ldPI = FALSE;
364 373 } else {
365 374 io_info->ldPI = TRUE;
366 375 }
367 376
368 377 if (numRows == 1) {
369 378 if (num_strips == 1) {
370 379 regStart += ref_in_start_stripe;
371 380 regSize = numBlocks;
372 381 }
373 382 } else {
374 383 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
375 384 regStart += ref_in_start_stripe;
376 385 regSize = stripSize - ref_in_start_stripe;
377 386 }
378 387
379 388 if (numRows > 2) {
380 - regSize += (numRows-2) << raid->stripeShift;
389 + regSize += (numRows - 2) << raid->stripeShift;
381 390 }
382 391
383 - if (endStrip == endRow*raid->rowDataSize) {
384 - regSize += ref_in_end_stripe+1;
392 + if (endStrip == endRow * raid->rowDataSize) {
393 + regSize += ref_in_end_stripe + 1;
385 394 } else {
386 395 regSize += stripSize;
387 396 }
388 397 }
389 -
390 - pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
391 398
392 - if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER)
393 - pRAID_Context->regLockFlags =
394 - (isRead)? raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
395 - else
396 - pRAID_Context->regLockFlags =
397 - (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
399 + pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
398 400
399 - pRAID_Context->ldTargetId = raid->targetId;
400 - pRAID_Context->regLockRowLBA = regStart;
401 - pRAID_Context->regLockLength = regSize;
402 - pRAID_Context->configSeqNum = raid->seqNum;
401 + if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
402 + pRAID_Context->regLockFlags = (isRead) ?
403 + raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
404 + } else {
405 + pRAID_Context->regLockFlags = (isRead) ?
406 + REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
407 + }
403 408
409 + pRAID_Context->ldTargetId = raid->targetId;
410 + pRAID_Context->regLockRowLBA = regStart;
411 + pRAID_Context->regLockLength = regSize;
412 + pRAID_Context->configSeqNum = raid->seqNum;
413 +
404 414 /*
405 415 * Get Phy Params only if FP capable,
406 416 * or else leave it to MR firmware to do the calculation.
407 417 */
408 418 if (io_info->fpOkForIo) {
409 - /* if fast path possible then get the physical parameters */
410 - retval = MR_GetPhyParams(instance, ld, start_strip, ref_in_start_stripe,
411 - &io_info->pdBlock, &io_info->devHandle,
412 - pRAID_Context, map);
419 + /* if fast path possible then get the physical parameters */
420 + retval = MR_GetPhyParams(instance, ld, start_strip,
421 + ref_in_start_stripe, &io_info->pdBlock,
422 + &io_info->devHandle, pRAID_Context, map);
413 423
414 - if (io_info->devHandle == MR_PD_INVALID) // If IO on an invalid Pd, then FP is not possible.
424 + /* If IO on an invalid Pd, then FP is not possible. */
425 + if (io_info->devHandle == MR_PD_INVALID)
415 426 io_info->fpOkForIo = FALSE;
416 427
417 - return retval;
428 + return (retval);
418 429
419 430 } else if (isRead) {
420 - uint stripIdx;
421 - for (stripIdx=0; stripIdx<num_strips; stripIdx++) {
422 - if (!MR_GetPhyParams(instance, ld, start_strip + stripIdx, ref_in_start_stripe,
423 - &io_info->pdBlock, &io_info->devHandle, pRAID_Context, map))
424 - return TRUE;
431 + uint_t stripIdx;
432 +
433 + for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
434 + if (!MR_GetPhyParams(instance, ld,
435 + start_strip + stripIdx, ref_in_start_stripe,
436 + &io_info->pdBlock, &io_info->devHandle,
437 + pRAID_Context, map)) {
438 + return (TRUE);
439 + }
425 440 }
426 441 }
427 442 return (TRUE);
428 443 }
429 444
430 445
431 446 void
432 -mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo)
447 +mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *map,
448 + PLD_LOAD_BALANCE_INFO lbInfo)
433 449 {
434 450 int ldCount;
435 451 U16 ld;
436 452 MR_LD_RAID *raid;
437 453
438 - for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++)
439 - {
454 + for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
440 455 ld = MR_TargetIdToLdGet(ldCount, map);
441 456
442 - if (ld >= MAX_LOGICAL_DRIVES) {
443 - con_log(CL_ANN1, (CE_NOTE,
444 - "mrsas: ld=%d Invalid ld \n", ld));
445 - continue;
446 - }
457 + if (ld >= MAX_LOGICAL_DRIVES) {
458 + con_log(CL_ANN1,
459 + (CE_NOTE, "mrsas: ld=%d Invalid ld \n", ld));
460 + continue;
461 + }
447 462
448 463 raid = MR_LdRaidGet(ld, map);
449 -
464 +
450 465 /* Two drive Optimal RAID 1 */
451 - if ((raid->level == 1) && (raid->rowSize == 2) && (raid->spanDepth == 1)
452 - && raid->ldState == MR_LD_STATE_OPTIMAL) {
466 + if ((raid->level == 1) && (raid->rowSize == 2) &&
467 + (raid->spanDepth == 1) &&
468 + raid->ldState == MR_LD_STATE_OPTIMAL) {
453 469 U32 pd, arRef;
454 470
455 471 lbInfo[ldCount].loadBalanceFlag = 1;
456 - //U8 physArm = 0;
457 -
458 - arRef = MR_LdSpanArrayGet(ld, 0, map); // Get the array on which this span is present.
459 472
460 - pd = MR_ArPdGet(arRef, 0, map); // Get the Pd.
461 - lbInfo[ldCount].raid1DevHandle[0] = MR_PdDevHandleGet(pd, map); // Get dev handle from Pd.
473 + /* Get the array on which this span is present. */
474 + arRef = MR_LdSpanArrayGet(ld, 0, map);
462 475
463 - pd = MR_ArPdGet(arRef, 1, map); // Get the Pd.
464 - lbInfo[ldCount].raid1DevHandle[1] = MR_PdDevHandleGet(pd, map); // Get dev handle from Pd.
465 - con_log(CL_ANN1, (CE_NOTE,\
466 - "mrsas: ld=%d load balancing enabled \n", ldCount));
476 + pd = MR_ArPdGet(arRef, 0, map); /* Get the Pd. */
477 + /* Get dev handle from Pd. */
478 + lbInfo[ldCount].raid1DevHandle[0] =
479 + MR_PdDevHandleGet(pd, map);
480 +
481 + pd = MR_ArPdGet(arRef, 1, map); /* Get the Pd. */
482 + /* Get dev handle from Pd. */
483 + lbInfo[ldCount].raid1DevHandle[1] =
484 + MR_PdDevHandleGet(pd, map);
485 + con_log(CL_ANN1, (CE_NOTE,
486 + "mrsas: ld=%d load balancing enabled \n", ldCount));
467 487 } else {
468 488 lbInfo[ldCount].loadBalanceFlag = 0;
469 489 }
470 490 }
471 491 }
472 492
473 493
474 -U8 megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, U8 arm, U64 block, U32 count)
494 +U8
495 +megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, U8 arm, U64 block,
496 + U32 count)
475 497 {
476 - U16 pend0, pend1;
477 - U64 diff0, diff1;
478 - U8 bestArm;
498 + U16 pend0, pend1;
499 + U64 diff0, diff1;
500 + U8 bestArm;
479 501
480 - /* get the pending cmds for the data and mirror arms */
481 - pend0 = lbInfo->scsi_pending_cmds[0];
482 - pend1 = lbInfo->scsi_pending_cmds[1];
502 + /* get the pending cmds for the data and mirror arms */
503 + pend0 = lbInfo->scsi_pending_cmds[0];
504 + pend1 = lbInfo->scsi_pending_cmds[1];
483 505
484 - /* Determine the disk whose head is nearer to the req. block */
485 - diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
486 - diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
487 - bestArm = (diff0 <= diff1 ? 0 : 1);
506 + /* Determine the disk whose head is nearer to the req. block */
507 + diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
508 + diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
509 + bestArm = (diff0 <= diff1 ? 0 : 1);
488 510
489 - if ((bestArm == arm && pend0 > pend1 + 16) || (bestArm != arm && pend1 > pend0 + 16))
490 - bestArm ^= 1;
511 + if ((bestArm == arm && pend0 > pend1 + 16) ||
512 + (bestArm != arm && pend1 > pend0 + 16)) {
513 + bestArm ^= 1;
514 + }
491 515
492 - /* Update the last accessed block on the correct pd */
493 - lbInfo->last_accessed_block[bestArm] = block + count - 1;
494 - return bestArm;
516 + /* Update the last accessed block on the correct pd */
517 + lbInfo->last_accessed_block[bestArm] = block + count - 1;
518 + return (bestArm);
495 519 }
496 520
497 -U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info)
521 +U16
522 +get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
523 + struct IO_REQUEST_INFO *io_info)
498 524 {
499 525 U8 arm, old_arm;
500 526 U16 devHandle;
501 527
502 528 old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
503 529
504 530 /* get best new arm */
505 - arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock, io_info->numBlocks);
531 + arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock,
532 + io_info->numBlocks);
506 533
507 534 devHandle = lbInfo->raid1DevHandle[arm];
508 535
509 536 lbInfo->scsi_pending_cmds[arm]++;
510 537
511 - return devHandle;
538 + return (devHandle);
512 539 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX