Merge tag 'please-pull-pstore' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl...
[cascardo/linux.git] / drivers / scsi / megaraid / megaraid_sas_fp.c
1 /*
2  *  Linux MegaRAID driver for SAS based RAID controllers
3  *
4  *  Copyright (c) 2009-2012  LSI Corporation.
5  *
6  *  This program is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU General Public License
8  *  as published by the Free Software Foundation; either version 2
9  *  of the License, or (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  *  FILE: megaraid_sas_fp.c
21  *
22  *  Authors: LSI Corporation
23  *           Sumant Patro
24  *           Varad Talamacki
25  *           Manoj Jose
26  *
27  *  Send feedback to: <megaraidlinux@lsi.com>
28  *
29  *  Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
30  *     ATTN: Linuxraid
31  */
32
33 #include <linux/kernel.h>
34 #include <linux/types.h>
35 #include <linux/pci.h>
36 #include <linux/list.h>
37 #include <linux/moduleparam.h>
38 #include <linux/module.h>
39 #include <linux/spinlock.h>
40 #include <linux/interrupt.h>
41 #include <linux/delay.h>
42 #include <linux/uio.h>
43 #include <linux/uaccess.h>
44 #include <linux/fs.h>
45 #include <linux/compat.h>
46 #include <linux/blkdev.h>
47 #include <linux/poll.h>
48
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53
54 #include "megaraid_sas_fusion.h"
55 #include "megaraid_sas.h"
56 #include <asm/div64.h>
57
58 #define LB_PENDING_CMDS_DEFAULT 4
59 static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
60 module_param(lb_pending_cmds, int, S_IRUGO);
61 MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
62         "threshold. Valid Values are 1-128. Default: 4");
63
64
65 #define ABS_DIFF(a, b)   (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
66 #define MR_LD_STATE_OPTIMAL 3
67 #define FALSE 0
68 #define TRUE 1
69
70 #define SPAN_DEBUG 0
71 #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
72 #define SPAN_ROW_DATA_SIZE(map_, ld, index_)   (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
73 #define SPAN_INVALID  0xff
74
75 /* Prototypes */
76 static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
77         PLD_SPAN_INFO ldSpanInfo);
78 static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
79         u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
80         struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map);
81 static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
82         u64 strip, struct MR_DRV_RAID_MAP_ALL *map);
83
84 u32 mega_mod64(u64 dividend, u32 divisor)
85 {
86         u64 d;
87         u32 remainder;
88
89         if (!divisor)
90                 printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n");
91         d = dividend;
92         remainder = do_div(d, divisor);
93         return remainder;
94 }
95
96 /**
97  * @param dividend    : Dividend
98  * @param divisor    : Divisor
99  *
100  * @return quotient
101  **/
102 u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
103 {
104         u32 remainder;
105         u64 d;
106
107         if (!divisor)
108                 printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n");
109
110         d = dividend;
111         remainder = do_div(d, divisor);
112
113         return d;
114 }
115
116 struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
117 {
118         return &map->raidMap.ldSpanMap[ld].ldRaid;
119 }
120
121 static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
122                                                    struct MR_DRV_RAID_MAP_ALL
123                                                    *map)
124 {
125         return &map->raidMap.ldSpanMap[ld].spanBlock[0];
126 }
127
128 static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map)
129 {
130         return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
131 }
132
133 u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map)
134 {
135         return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
136 }
137
138 u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
139 {
140         return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
141 }
142
143 u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
144 {
145         return map->raidMap.devHndlInfo[pd].curDevHdl;
146 }
147
148 u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
149 {
150         return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
151 }
152
153 u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
154 {
155         return map->raidMap.ldTgtIdToLd[ldTgtId];
156 }
157
158 static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
159                                           struct MR_DRV_RAID_MAP_ALL *map)
160 {
161         return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
162 }
163
164 /*
165  * This function will Populate Driver Map using firmware raid map
166  */
167 void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
168 {
169         struct fusion_context *fusion = instance->ctrl_context;
170         struct MR_FW_RAID_MAP_ALL     *fw_map_old    = NULL;
171         struct MR_FW_RAID_MAP         *pFwRaidMap    = NULL;
172         int i;
173
174
175         struct MR_DRV_RAID_MAP_ALL *drv_map =
176                         fusion->ld_drv_map[(instance->map_id & 1)];
177         struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
178
179         if (instance->supportmax256vd) {
180                 memcpy(fusion->ld_drv_map[instance->map_id & 1],
181                         fusion->ld_map[instance->map_id & 1],
182                         fusion->current_map_sz);
183                 /* New Raid map will not set totalSize, so keep expected value
184                  * for legacy code in ValidateMapInfo
185                  */
186                 pDrvRaidMap->totalSize = sizeof(struct MR_FW_RAID_MAP_EXT);
187         } else {
188                 fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
189                         fusion->ld_map[(instance->map_id & 1)];
190                 pFwRaidMap = &fw_map_old->raidMap;
191
192 #if VD_EXT_DEBUG
193                 for (i = 0; i < pFwRaidMap->ldCount; i++) {
194                         dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
195                                 "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
196                                 instance->unique_id, i,
197                                 fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId,
198                                 fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum,
199                                 fw_map_old->raidMap.ldSpanMap[i].ldRaid.size);
200                 }
201 #endif
202
203                 memset(drv_map, 0, fusion->drv_map_sz);
204                 pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
205                 pDrvRaidMap->ldCount = pFwRaidMap->ldCount;
206                 pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
207                 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
208                         pDrvRaidMap->ldTgtIdToLd[i] =
209                                 (u8)pFwRaidMap->ldTgtIdToLd[i];
210                 for (i = 0; i < pDrvRaidMap->ldCount; i++) {
211                         pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
212 #if VD_EXT_DEBUG
213                         dev_dbg(&instance->pdev->dev,
214                                 "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
215                                 "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
216                                 "size 0x%x\n", i, i,
217                                 pFwRaidMap->ldSpanMap[i].ldRaid.targetId,
218                                 pFwRaidMap->ldSpanMap[i].ldRaid.seqNum,
219                                 (u32)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize);
220                         dev_dbg(&instance->pdev->dev,
221                                 "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
222                                 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
223                                 "size 0x%x\n", i, i,
224                                 pDrvRaidMap->ldSpanMap[i].ldRaid.targetId,
225                                 pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum,
226                                 (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
227                         dev_dbg(&instance->pdev->dev, "Driver raid map all %p "
228                                 "raid map %p LD RAID MAP %p/%p\n", drv_map,
229                                 pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid,
230                                 &pDrvRaidMap->ldSpanMap[i].ldRaid);
231 #endif
232                 }
233                 memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
234                         sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
235                 memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo,
236                         sizeof(struct MR_DEV_HANDLE_INFO) *
237                         MAX_RAIDMAP_PHYSICAL_DEVICES);
238         }
239 }
240
241 /*
242  * This function will validate Map info data provided by FW
243  */
244 u8 MR_ValidateMapInfo(struct megasas_instance *instance)
245 {
246         struct fusion_context *fusion;
247         struct MR_DRV_RAID_MAP_ALL *drv_map;
248         struct MR_DRV_RAID_MAP *pDrvRaidMap;
249         struct LD_LOAD_BALANCE_INFO *lbInfo;
250         PLD_SPAN_INFO ldSpanInfo;
251         struct MR_LD_RAID         *raid;
252         int ldCount, num_lds;
253         u16 ld;
254         u32 expected_size;
255
256
257         MR_PopulateDrvRaidMap(instance);
258
259         fusion = instance->ctrl_context;
260         drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
261         pDrvRaidMap = &drv_map->raidMap;
262
263         lbInfo = fusion->load_balance_info;
264         ldSpanInfo = fusion->log_to_span;
265
266         if (instance->supportmax256vd)
267                 expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
268         else
269                 expected_size =
270                         (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
271                         (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pDrvRaidMap->ldCount)));
272
273         if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
274                 dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
275                        (unsigned int) expected_size);
276                 dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
277                         (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
278                         le32_to_cpu(pDrvRaidMap->totalSize));
279                 return 0;
280         }
281
282         if (instance->UnevenSpanSupport)
283                 mr_update_span_set(drv_map, ldSpanInfo);
284
285         mr_update_load_balance_params(drv_map, lbInfo);
286
287         num_lds = le32_to_cpu(drv_map->raidMap.ldCount);
288
289         /*Convert Raid capability values to CPU arch */
290         for (ldCount = 0; ldCount < num_lds; ldCount++) {
291                 ld = MR_TargetIdToLdGet(ldCount, drv_map);
292                 raid = MR_LdRaidGet(ld, drv_map);
293                 le32_to_cpus((u32 *)&raid->capability);
294         }
295
296         return 1;
297 }
298
299 u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
300                     struct MR_DRV_RAID_MAP_ALL *map)
301 {
302         struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
303         struct MR_QUAD_ELEMENT    *quad;
304         struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
305         u32                span, j;
306
307         for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
308
309                 for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
310                         quad = &pSpanBlock->block_span_info.quad[j];
311
312                         if (le32_to_cpu(quad->diff) == 0)
313                                 return SPAN_INVALID;
314                         if (le64_to_cpu(quad->logStart) <= row && row <=
315                                 le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
316                                 le32_to_cpu(quad->diff))) == 0) {
317                                 if (span_blk != NULL) {
318                                         u64  blk, debugBlk;
319                                         blk =  mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
320                                         debugBlk = blk;
321
322                                         blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
323                                         *span_blk = blk;
324                                 }
325                                 return span;
326                         }
327                 }
328         }
329         return SPAN_INVALID;
330 }
331
332 /*
333 ******************************************************************************
334 *
335 * Function to print info about span set created in driver from FW raid map
336 *
337 * Inputs :
338 * map    - LD map
339 * ldSpanInfo - ldSpanInfo per HBA instance
340 */
341 #if SPAN_DEBUG
342 static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map,
343         PLD_SPAN_INFO ldSpanInfo)
344 {
345
346         u8   span;
347         u32    element;
348         struct MR_LD_RAID *raid;
349         LD_SPAN_SET *span_set;
350         struct MR_QUAD_ELEMENT    *quad;
351         int ldCount;
352         u16 ld;
353
354         for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
355                 ld = MR_TargetIdToLdGet(ldCount, map);
356                         if (ld >= MAX_LOGICAL_DRIVES_EXT)
357                                 continue;
358                 raid = MR_LdRaidGet(ld, map);
359                 dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
360                         ld, raid->spanDepth);
361                 for (span = 0; span < raid->spanDepth; span++)
362                         dev_dbg(&instance->pdev->dev, "Span=%x,"
363                         " number of quads=%x\n", span,
364                         le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
365                         block_span_info.noElements));
366                 for (element = 0; element < MAX_QUAD_DEPTH; element++) {
367                         span_set = &(ldSpanInfo[ld].span_set[element]);
368                         if (span_set->span_row_data_width == 0)
369                                 break;
370
371                         dev_dbg(&instance->pdev->dev, "Span Set %x:"
372                                 "width=%x, diff=%x\n", element,
373                                 (unsigned int)span_set->span_row_data_width,
374                                 (unsigned int)span_set->diff);
375                         dev_dbg(&instance->pdev->dev, "logical LBA"
376                                 "start=0x%08lx, end=0x%08lx\n",
377                                 (long unsigned int)span_set->log_start_lba,
378                                 (long unsigned int)span_set->log_end_lba);
379                         dev_dbg(&instance->pdev->dev, "span row start=0x%08lx,"
380                                 " end=0x%08lx\n",
381                                 (long unsigned int)span_set->span_row_start,
382                                 (long unsigned int)span_set->span_row_end);
383                         dev_dbg(&instance->pdev->dev, "data row start=0x%08lx,"
384                                 " end=0x%08lx\n",
385                                 (long unsigned int)span_set->data_row_start,
386                                 (long unsigned int)span_set->data_row_end);
387                         dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx,"
388                                 " end=0x%08lx\n",
389                                 (long unsigned int)span_set->data_strip_start,
390                                 (long unsigned int)span_set->data_strip_end);
391
392                         for (span = 0; span < raid->spanDepth; span++) {
393                                 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
394                                         block_span_info.noElements) >=
395                                         element + 1) {
396                                         quad = &map->raidMap.ldSpanMap[ld].
397                                                 spanBlock[span].block_span_info.
398                                                 quad[element];
399                                 dev_dbg(&instance->pdev->dev, "Span=%x,"
400                                         "Quad=%x, diff=%x\n", span,
401                                         element, le32_to_cpu(quad->diff));
402                                 dev_dbg(&instance->pdev->dev,
403                                         "offset_in_span=0x%08lx\n",
404                                         (long unsigned int)le64_to_cpu(quad->offsetInSpan));
405                                 dev_dbg(&instance->pdev->dev,
406                                         "logical start=0x%08lx, end=0x%08lx\n",
407                                         (long unsigned int)le64_to_cpu(quad->logStart),
408                                         (long unsigned int)le64_to_cpu(quad->logEnd));
409                                 }
410                         }
411                 }
412         }
413         return 0;
414 }
415 #endif
416
417 /*
418 ******************************************************************************
419 *
420 * This routine calculates the Span block for given row using spanset.
421 *
422 * Inputs :
423 *    instance - HBA instance
424 *    ld   - Logical drive number
425 *    row        - Row number
426 *    map    - LD map
427 *
428 * Outputs :
429 *
430 *    span          - Span number
431 *    block         - Absolute Block number in the physical disk
432 *    div_error     - Devide error code.
433 */
434
435 u32 mr_spanset_get_span_block(struct megasas_instance *instance,
436                 u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
437 {
438         struct fusion_context *fusion = instance->ctrl_context;
439         struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
440         LD_SPAN_SET *span_set;
441         struct MR_QUAD_ELEMENT    *quad;
442         u32    span, info;
443         PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
444
445         for (info = 0; info < MAX_QUAD_DEPTH; info++) {
446                 span_set = &(ldSpanInfo[ld].span_set[info]);
447
448                 if (span_set->span_row_data_width == 0)
449                         break;
450
451                 if (row > span_set->data_row_end)
452                         continue;
453
454                 for (span = 0; span < raid->spanDepth; span++)
455                         if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
456                                 block_span_info.noElements) >= info+1) {
457                                 quad = &map->raidMap.ldSpanMap[ld].
458                                         spanBlock[span].
459                                         block_span_info.quad[info];
460                                 if (le32_to_cpu(quad->diff == 0))
461                                         return SPAN_INVALID;
462                                 if (le64_to_cpu(quad->logStart) <= row  &&
463                                         row <= le64_to_cpu(quad->logEnd)  &&
464                                         (mega_mod64(row - le64_to_cpu(quad->logStart),
465                                                 le32_to_cpu(quad->diff))) == 0) {
466                                         if (span_blk != NULL) {
467                                                 u64  blk;
468                                                 blk = mega_div64_32
469                                                     ((row - le64_to_cpu(quad->logStart)),
470                                                     le32_to_cpu(quad->diff));
471                                                 blk = (blk + le64_to_cpu(quad->offsetInSpan))
472                                                          << raid->stripeShift;
473                                                 *span_blk = blk;
474                                         }
475                                         return span;
476                                 }
477                         }
478         }
479         return SPAN_INVALID;
480 }
481
482 /*
483 ******************************************************************************
484 *
485 * This routine calculates the row for given strip using spanset.
486 *
487 * Inputs :
488 *    instance - HBA instance
489 *    ld   - Logical drive number
490 *    Strip        - Strip
491 *    map    - LD map
492 *
493 * Outputs :
494 *
495 *    row         - row associated with strip
496 */
497
498 static u64  get_row_from_strip(struct megasas_instance *instance,
499         u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
500 {
501         struct fusion_context *fusion = instance->ctrl_context;
502         struct MR_LD_RAID       *raid = MR_LdRaidGet(ld, map);
503         LD_SPAN_SET     *span_set;
504         PLD_SPAN_INFO   ldSpanInfo = fusion->log_to_span;
505         u32             info, strip_offset, span, span_offset;
506         u64             span_set_Strip, span_set_Row, retval;
507
508         for (info = 0; info < MAX_QUAD_DEPTH; info++) {
509                 span_set = &(ldSpanInfo[ld].span_set[info]);
510
511                 if (span_set->span_row_data_width == 0)
512                         break;
513                 if (strip > span_set->data_strip_end)
514                         continue;
515
516                 span_set_Strip = strip - span_set->data_strip_start;
517                 strip_offset = mega_mod64(span_set_Strip,
518                                 span_set->span_row_data_width);
519                 span_set_Row = mega_div64_32(span_set_Strip,
520                                 span_set->span_row_data_width) * span_set->diff;
521                 for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
522                         if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
523                                 block_span_info.noElements >= info+1)) {
524                                 if (strip_offset >=
525                                         span_set->strip_offset[span])
526                                         span_offset++;
527                                 else
528                                         break;
529                         }
530 #if SPAN_DEBUG
531                 dev_info(&instance->pdev->dev, "Strip 0x%llx,"
532                         "span_set_Strip 0x%llx, span_set_Row 0x%llx"
533                         "data width 0x%llx span offset 0x%x\n", strip,
534                         (unsigned long long)span_set_Strip,
535                         (unsigned long long)span_set_Row,
536                         (unsigned long long)span_set->span_row_data_width,
537                         span_offset);
538                 dev_info(&instance->pdev->dev, "For strip 0x%llx"
539                         "row is 0x%llx\n", strip,
540                         (unsigned long long) span_set->data_row_start +
541                         (unsigned long long) span_set_Row + (span_offset - 1));
542 #endif
543                 retval = (span_set->data_row_start + span_set_Row +
544                                 (span_offset - 1));
545                 return retval;
546         }
547         return -1LLU;
548 }
549
550
551 /*
552 ******************************************************************************
553 *
554 * This routine calculates the Start Strip for given row using spanset.
555 *
556 * Inputs :
557 *    instance - HBA instance
558 *    ld   - Logical drive number
559 *    row        - Row number
560 *    map    - LD map
561 *
562 * Outputs :
563 *
564 *    Strip         - Start strip associated with row
565 */
566
567 static u64 get_strip_from_row(struct megasas_instance *instance,
568                 u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map)
569 {
570         struct fusion_context *fusion = instance->ctrl_context;
571         struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
572         LD_SPAN_SET *span_set;
573         struct MR_QUAD_ELEMENT    *quad;
574         PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
575         u32    span, info;
576         u64  strip;
577
578         for (info = 0; info < MAX_QUAD_DEPTH; info++) {
579                 span_set = &(ldSpanInfo[ld].span_set[info]);
580
581                 if (span_set->span_row_data_width == 0)
582                         break;
583                 if (row > span_set->data_row_end)
584                         continue;
585
586                 for (span = 0; span < raid->spanDepth; span++)
587                         if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
588                                 block_span_info.noElements) >= info+1) {
589                                 quad = &map->raidMap.ldSpanMap[ld].
590                                         spanBlock[span].block_span_info.quad[info];
591                                 if (le64_to_cpu(quad->logStart) <= row  &&
592                                         row <= le64_to_cpu(quad->logEnd)  &&
593                                         mega_mod64((row - le64_to_cpu(quad->logStart)),
594                                         le32_to_cpu(quad->diff)) == 0) {
595                                         strip = mega_div64_32
596                                                 (((row - span_set->data_row_start)
597                                                         - le64_to_cpu(quad->logStart)),
598                                                         le32_to_cpu(quad->diff));
599                                         strip *= span_set->span_row_data_width;
600                                         strip += span_set->data_strip_start;
601                                         strip += span_set->strip_offset[span];
602                                         return strip;
603                                 }
604                         }
605         }
606         dev_err(&instance->pdev->dev, "get_strip_from_row"
607                 "returns invalid strip for ld=%x, row=%lx\n",
608                 ld, (long unsigned int)row);
609         return -1;
610 }
611
612 /*
613 ******************************************************************************
614 *
615 * This routine calculates the Physical Arm for given strip using spanset.
616 *
617 * Inputs :
618 *    instance - HBA instance
619 *    ld   - Logical drive number
620 *    strip      - Strip
621 *    map    - LD map
622 *
623 * Outputs :
624 *
625 *    Phys Arm         - Phys Arm associated with strip
626 */
627
628 static u32 get_arm_from_strip(struct megasas_instance *instance,
629         u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
630 {
631         struct fusion_context *fusion = instance->ctrl_context;
632         struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
633         LD_SPAN_SET *span_set;
634         PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
635         u32    info, strip_offset, span, span_offset, retval;
636
637         for (info = 0 ; info < MAX_QUAD_DEPTH; info++) {
638                 span_set = &(ldSpanInfo[ld].span_set[info]);
639
640                 if (span_set->span_row_data_width == 0)
641                         break;
642                 if (strip > span_set->data_strip_end)
643                         continue;
644
645                 strip_offset = (uint)mega_mod64
646                                 ((strip - span_set->data_strip_start),
647                                 span_set->span_row_data_width);
648
649                 for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
650                         if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
651                                 block_span_info.noElements) >= info+1) {
652                                 if (strip_offset >=
653                                         span_set->strip_offset[span])
654                                         span_offset =
655                                                 span_set->strip_offset[span];
656                                 else
657                                         break;
658                         }
659 #if SPAN_DEBUG
660                 dev_info(&instance->pdev->dev, "get_arm_from_strip:"
661                         "for ld=0x%x strip=0x%lx arm is  0x%x\n", ld,
662                         (long unsigned int)strip, (strip_offset - span_offset));
663 #endif
664                 retval = (strip_offset - span_offset);
665                 return retval;
666         }
667
668         dev_err(&instance->pdev->dev, "get_arm_from_strip"
669                 "returns invalid arm for ld=%x strip=%lx\n",
670                 ld, (long unsigned int)strip);
671
672         return -1;
673 }
674
675 /* This Function will return Phys arm */
676 u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
677                 struct MR_DRV_RAID_MAP_ALL *map)
678 {
679         struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
680         /* Need to check correct default value */
681         u32    arm = 0;
682
683         switch (raid->level) {
684         case 0:
685         case 5:
686         case 6:
687                 arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
688                 break;
689         case 1:
690                 /* start with logical arm */
691                 arm = get_arm_from_strip(instance, ld, stripe, map);
692                 if (arm != -1U)
693                         arm *= 2;
694                 break;
695         }
696
697         return arm;
698 }
699
700
701 /*
702 ******************************************************************************
703 *
704 * This routine calculates the arm, span and block for the specified stripe and
705 * reference in stripe using spanset
706 *
707 * Inputs :
708 *
709 *    ld   - Logical drive number
710 *    stripRow        - Stripe number
711 *    stripRef    - Reference in stripe
712 *
713 * Outputs :
714 *
715 *    span          - Span number
716 *    block         - Absolute Block number in the physical disk
717 */
718 static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
719                 u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
720                 struct RAID_CONTEXT *pRAID_Context,
721                 struct MR_DRV_RAID_MAP_ALL *map)
722 {
723         struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
724         u32     pd, arRef;
725         u8      physArm, span;
726         u64     row;
727         u8      retval = TRUE;
728         u8      do_invader = 0;
729         u64     *pdBlock = &io_info->pdBlock;
730         u16     *pDevHandle = &io_info->devHandle;
731         u32     logArm, rowMod, armQ, arm;
732
733         if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
734                 instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
735                 do_invader = 1;
736
737         /*Get row and span from io_info for Uneven Span IO.*/
738         row         = io_info->start_row;
739         span        = io_info->start_span;
740
741
742         if (raid->level == 6) {
743                 logArm = get_arm_from_strip(instance, ld, stripRow, map);
744                 if (logArm == -1U)
745                         return FALSE;
746                 rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
747                 armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
748                 arm = armQ + 1 + logArm;
749                 if (arm >= SPAN_ROW_SIZE(map, ld, span))
750                         arm -= SPAN_ROW_SIZE(map, ld, span);
751                 physArm = (u8)arm;
752         } else
753                 /* Calculate the arm */
754                 physArm = get_arm(instance, ld, span, stripRow, map);
755         if (physArm == 0xFF)
756                 return FALSE;
757
758         arRef       = MR_LdSpanArrayGet(ld, span, map);
759         pd          = MR_ArPdGet(arRef, physArm, map);
760
761         if (pd != MR_PD_INVALID)
762                 *pDevHandle = MR_PdDevHandleGet(pd, map);
763         else {
764                 *pDevHandle = MR_PD_INVALID;
765                 if ((raid->level >= 5) &&
766                         (!do_invader  || (do_invader &&
767                         (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
768                         pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
769                 else if (raid->level == 1) {
770                         pd = MR_ArPdGet(arRef, physArm + 1, map);
771                         if (pd != MR_PD_INVALID)
772                                 *pDevHandle = MR_PdDevHandleGet(pd, map);
773                 }
774         }
775
776         *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
777         pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
778                                         physArm;
779         io_info->span_arm = pRAID_Context->spanArm;
780         return retval;
781 }
782
783 /*
784 ******************************************************************************
785 *
786 * This routine calculates the arm, span and block for the specified stripe and
787 * reference in stripe.
788 *
789 * Inputs :
790 *
791 *    ld   - Logical drive number
792 *    stripRow        - Stripe number
793 *    stripRef    - Reference in stripe
794 *
795 * Outputs :
796 *
797 *    span          - Span number
798 *    block         - Absolute Block number in the physical disk
799 */
800 u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
801                 u16 stripRef, struct IO_REQUEST_INFO *io_info,
802                 struct RAID_CONTEXT *pRAID_Context,
803                 struct MR_DRV_RAID_MAP_ALL *map)
804 {
805         struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
806         u32         pd, arRef;
807         u8          physArm, span;
808         u64         row;
809         u8          retval = TRUE;
810         u8          do_invader = 0;
811         u64         *pdBlock = &io_info->pdBlock;
812         u16         *pDevHandle = &io_info->devHandle;
813
814         if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
815                 instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
816                 do_invader = 1;
817
818         row =  mega_div64_32(stripRow, raid->rowDataSize);
819
820         if (raid->level == 6) {
821                 /* logical arm within row */
822                 u32 logArm =  mega_mod64(stripRow, raid->rowDataSize);
823                 u32 rowMod, armQ, arm;
824
825                 if (raid->rowSize == 0)
826                         return FALSE;
827                 /* get logical row mod */
828                 rowMod = mega_mod64(row, raid->rowSize);
829                 armQ = raid->rowSize-1-rowMod; /* index of Q drive */
830                 arm = armQ+1+logArm; /* data always logically follows Q */
831                 if (arm >= raid->rowSize) /* handle wrap condition */
832                         arm -= raid->rowSize;
833                 physArm = (u8)arm;
834         } else  {
835                 if (raid->modFactor == 0)
836                         return FALSE;
837                 physArm = MR_LdDataArmGet(ld,  mega_mod64(stripRow,
838                                                           raid->modFactor),
839                                           map);
840         }
841
842         if (raid->spanDepth == 1) {
843                 span = 0;
844                 *pdBlock = row << raid->stripeShift;
845         } else {
846                 span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
847                 if (span == SPAN_INVALID)
848                         return FALSE;
849         }
850
851         /* Get the array on which this span is present */
852         arRef       = MR_LdSpanArrayGet(ld, span, map);
853         pd          = MR_ArPdGet(arRef, physArm, map); /* Get the pd */
854
855         if (pd != MR_PD_INVALID)
856                 /* Get dev handle from Pd. */
857                 *pDevHandle = MR_PdDevHandleGet(pd, map);
858         else {
859                 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
860                 if ((raid->level >= 5) &&
861                         (!do_invader  || (do_invader &&
862                         (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
863                         pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
864                 else if (raid->level == 1) {
865                         /* Get alternate Pd. */
866                         pd = MR_ArPdGet(arRef, physArm + 1, map);
867                         if (pd != MR_PD_INVALID)
868                                 /* Get dev handle from Pd */
869                                 *pDevHandle = MR_PdDevHandleGet(pd, map);
870                 }
871         }
872
873         *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
874         pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
875                 physArm;
876         io_info->span_arm = pRAID_Context->spanArm;
877         return retval;
878 }
879
880 /*
881 ******************************************************************************
882 *
883 * MR_BuildRaidContext function
884 *
885 * This function will initiate command processing.  The start/end row and strip
886 * information is calculated then the lock is acquired.
887 * This function will return 0 if region lock was acquired OR return num strips
888 */
889 u8
890 MR_BuildRaidContext(struct megasas_instance *instance,
891                     struct IO_REQUEST_INFO *io_info,
892                     struct RAID_CONTEXT *pRAID_Context,
893                     struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN)
894 {
895         struct MR_LD_RAID  *raid;
896         u32         ld, stripSize, stripe_mask;
897         u64         endLba, endStrip, endRow, start_row, start_strip;
898         u64         regStart;
899         u32         regSize;
900         u8          num_strips, numRows;
901         u16         ref_in_start_stripe, ref_in_end_stripe;
902         u64         ldStartBlock;
903         u32         numBlocks, ldTgtId;
904         u8          isRead;
905         u8          retval = 0;
906         u8          startlba_span = SPAN_INVALID;
907         u64 *pdBlock = &io_info->pdBlock;
908
909         ldStartBlock = io_info->ldStartBlock;
910         numBlocks = io_info->numBlocks;
911         ldTgtId = io_info->ldTgtId;
912         isRead = io_info->isRead;
913         io_info->IoforUnevenSpan = 0;
914         io_info->start_span     = SPAN_INVALID;
915
916         ld = MR_TargetIdToLdGet(ldTgtId, map);
917         raid = MR_LdRaidGet(ld, map);
918
919         /*
920          * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
921          * return FALSE
922          */
923         if (raid->rowDataSize == 0) {
924                 if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
925                         return FALSE;
926                 else if (instance->UnevenSpanSupport) {
927                         io_info->IoforUnevenSpan = 1;
928                 } else {
929                         dev_info(&instance->pdev->dev,
930                                 "raid->rowDataSize is 0, but has SPAN[0]"
931                                 "rowDataSize = 0x%0x,"
932                                 "but there is _NO_ UnevenSpanSupport\n",
933                                 MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
934                         return FALSE;
935                 }
936         }
937
938         stripSize = 1 << raid->stripeShift;
939         stripe_mask = stripSize-1;
940
941
942         /*
943          * calculate starting row and stripe, and number of strips and rows
944          */
945         start_strip         = ldStartBlock >> raid->stripeShift;
946         ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask);
947         endLba              = ldStartBlock + numBlocks - 1;
948         ref_in_end_stripe   = (u16)(endLba & stripe_mask);
949         endStrip            = endLba >> raid->stripeShift;
950         num_strips          = (u8)(endStrip - start_strip + 1); /* End strip */
951
952         if (io_info->IoforUnevenSpan) {
953                 start_row = get_row_from_strip(instance, ld, start_strip, map);
954                 endRow    = get_row_from_strip(instance, ld, endStrip, map);
955                 if (start_row == -1ULL || endRow == -1ULL) {
956                         dev_info(&instance->pdev->dev, "return from %s %d."
957                                 "Send IO w/o region lock.\n",
958                                 __func__, __LINE__);
959                         return FALSE;
960                 }
961
962                 if (raid->spanDepth == 1) {
963                         startlba_span = 0;
964                         *pdBlock = start_row << raid->stripeShift;
965                 } else
966                         startlba_span = (u8)mr_spanset_get_span_block(instance,
967                                                 ld, start_row, pdBlock, map);
968                 if (startlba_span == SPAN_INVALID) {
969                         dev_info(&instance->pdev->dev, "return from %s %d"
970                                 "for row 0x%llx,start strip %llx"
971                                 "endSrip %llx\n", __func__, __LINE__,
972                                 (unsigned long long)start_row,
973                                 (unsigned long long)start_strip,
974                                 (unsigned long long)endStrip);
975                         return FALSE;
976                 }
977                 io_info->start_span     = startlba_span;
978                 io_info->start_row      = start_row;
979 #if SPAN_DEBUG
980                 dev_dbg(&instance->pdev->dev, "Check Span number from %s %d"
981                         "for row 0x%llx, start strip 0x%llx end strip 0x%llx"
982                         " span 0x%x\n", __func__, __LINE__,
983                         (unsigned long long)start_row,
984                         (unsigned long long)start_strip,
985                         (unsigned long long)endStrip, startlba_span);
986                 dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx"
987                         "Start span 0x%x\n", (unsigned long long)start_row,
988                         (unsigned long long)endRow, startlba_span);
989 #endif
990         } else {
991                 start_row = mega_div64_32(start_strip, raid->rowDataSize);
992                 endRow    = mega_div64_32(endStrip, raid->rowDataSize);
993         }
994         numRows = (u8)(endRow - start_row + 1);
995
996         /*
997          * calculate region info.
998          */
999
1000         /* assume region is at the start of the first row */
1001         regStart            = start_row << raid->stripeShift;
1002         /* assume this IO needs the full row - we'll adjust if not true */
1003         regSize             = stripSize;
1004
1005         /* Check if we can send this I/O via FastPath */
1006         if (raid->capability.fpCapable) {
1007                 if (isRead)
1008                         io_info->fpOkForIo = (raid->capability.fpReadCapable &&
1009                                               ((num_strips == 1) ||
1010                                                raid->capability.
1011                                                fpReadAcrossStripe));
1012                 else
1013                         io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
1014                                               ((num_strips == 1) ||
1015                                                raid->capability.
1016                                                fpWriteAcrossStripe));
1017         } else
1018                 io_info->fpOkForIo = FALSE;
1019
1020         if (numRows == 1) {
1021                 /* single-strip IOs can always lock only the data needed */
1022                 if (num_strips == 1) {
1023                         regStart += ref_in_start_stripe;
1024                         regSize = numBlocks;
1025                 }
1026                 /* multi-strip IOs always need to full stripe locked */
1027         } else if (io_info->IoforUnevenSpan == 0) {
1028                 /*
1029                  * For Even span region lock optimization.
1030                  * If the start strip is the last in the start row
1031                  */
1032                 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
1033                         regStart += ref_in_start_stripe;
1034                         /* initialize count to sectors from startref to end
1035                            of strip */
1036                         regSize = stripSize - ref_in_start_stripe;
1037                 }
1038
1039                 /* add complete rows in the middle of the transfer */
1040                 if (numRows > 2)
1041                         regSize += (numRows-2) << raid->stripeShift;
1042
1043                 /* if IO ends within first strip of last row*/
1044                 if (endStrip == endRow*raid->rowDataSize)
1045                         regSize += ref_in_end_stripe+1;
1046                 else
1047                         regSize += stripSize;
1048         } else {
1049                 /*
1050                  * For Uneven span region lock optimization.
1051                  * If the start strip is the last in the start row
1052                  */
1053                 if (start_strip == (get_strip_from_row(instance, ld, start_row, map) +
1054                                 SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
1055                         regStart += ref_in_start_stripe;
1056                         /* initialize count to sectors from
1057                          * startRef to end of strip
1058                          */
1059                         regSize = stripSize - ref_in_start_stripe;
1060                 }
1061                 /* Add complete rows in the middle of the transfer*/
1062
1063                 if (numRows > 2)
1064                         /* Add complete rows in the middle of the transfer*/
1065                         regSize += (numRows-2) << raid->stripeShift;
1066
1067                 /* if IO ends within first strip of last row */
1068                 if (endStrip == get_strip_from_row(instance, ld, endRow, map))
1069                         regSize += ref_in_end_stripe + 1;
1070                 else
1071                         regSize += stripSize;
1072         }
1073
1074         pRAID_Context->timeoutValue =
1075                 cpu_to_le16(raid->fpIoTimeoutForLd ?
1076                             raid->fpIoTimeoutForLd :
1077                             map->raidMap.fpPdIoTimeoutSec);
1078         if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
1079                 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
1080                 pRAID_Context->regLockFlags = (isRead) ?
1081                         raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
1082         else
1083                 pRAID_Context->regLockFlags = (isRead) ?
1084                         REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
1085         pRAID_Context->VirtualDiskTgtId = raid->targetId;
1086         pRAID_Context->regLockRowLBA    = cpu_to_le64(regStart);
1087         pRAID_Context->regLockLength    = cpu_to_le32(regSize);
1088         pRAID_Context->configSeqNum     = raid->seqNum;
1089         /* save pointer to raid->LUN array */
1090         *raidLUN = raid->LUN;
1091
1092
1093         /*Get Phy Params only if FP capable, or else leave it to MR firmware
1094           to do the calculation.*/
1095         if (io_info->fpOkForIo) {
1096                 retval = io_info->IoforUnevenSpan ?
1097                                 mr_spanset_get_phy_params(instance, ld,
1098                                         start_strip, ref_in_start_stripe,
1099                                         io_info, pRAID_Context, map) :
1100                                 MR_GetPhyParams(instance, ld, start_strip,
1101                                         ref_in_start_stripe, io_info,
1102                                         pRAID_Context, map);
1103                 /* If IO on an invalid Pd, then FP is not possible.*/
1104                 if (io_info->devHandle == MR_PD_INVALID)
1105                         io_info->fpOkForIo = FALSE;
1106                 return retval;
1107         } else if (isRead) {
1108                 uint stripIdx;
1109                 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
1110                         retval = io_info->IoforUnevenSpan ?
1111                                 mr_spanset_get_phy_params(instance, ld,
1112                                     start_strip + stripIdx,
1113                                     ref_in_start_stripe, io_info,
1114                                     pRAID_Context, map) :
1115                                 MR_GetPhyParams(instance, ld,
1116                                     start_strip + stripIdx, ref_in_start_stripe,
1117                                     io_info, pRAID_Context, map);
1118                         if (!retval)
1119                                 return TRUE;
1120                 }
1121         }
1122
1123 #if SPAN_DEBUG
1124         /* Just for testing what arm we get for strip.*/
1125         if (io_info->IoforUnevenSpan)
1126                 get_arm_from_strip(instance, ld, start_strip, map);
1127 #endif
1128         return TRUE;
1129 }
1130
1131 /*
1132 ******************************************************************************
1133 *
1134 * This routine pepare spanset info from Valid Raid map and store it into
1135 * local copy of ldSpanInfo per instance data structure.
1136 *
1137 * Inputs :
1138 * map    - LD map
1139 * ldSpanInfo - ldSpanInfo per HBA instance
1140 *
1141 */
1142 void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
1143         PLD_SPAN_INFO ldSpanInfo)
1144 {
1145         u8   span, count;
1146         u32  element, span_row_width;
1147         u64  span_row;
1148         struct MR_LD_RAID *raid;
1149         LD_SPAN_SET *span_set, *span_set_prev;
1150         struct MR_QUAD_ELEMENT    *quad;
1151         int ldCount;
1152         u16 ld;
1153
1154
1155         for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
1156                 ld = MR_TargetIdToLdGet(ldCount, map);
1157                 if (ld >= MAX_LOGICAL_DRIVES_EXT)
1158                         continue;
1159                 raid = MR_LdRaidGet(ld, map);
1160                 for (element = 0; element < MAX_QUAD_DEPTH; element++) {
1161                         for (span = 0; span < raid->spanDepth; span++) {
1162                                 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
1163                                         block_span_info.noElements) <
1164                                         element + 1)
1165                                         continue;
1166                                 span_set = &(ldSpanInfo[ld].span_set[element]);
1167                                 quad = &map->raidMap.ldSpanMap[ld].
1168                                         spanBlock[span].block_span_info.
1169                                         quad[element];
1170
1171                                 span_set->diff = le32_to_cpu(quad->diff);
1172
1173                                 for (count = 0, span_row_width = 0;
1174                                         count < raid->spanDepth; count++) {
1175                                         if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
1176                                                 spanBlock[count].
1177                                                 block_span_info.
1178                                                 noElements) >= element + 1) {
1179                                                 span_set->strip_offset[count] =
1180                                                         span_row_width;
1181                                                 span_row_width +=
1182                                                         MR_LdSpanPtrGet
1183                                                         (ld, count, map)->spanRowDataSize;
1184                                                 printk(KERN_INFO "megasas:"
1185                                                         "span %x rowDataSize %x\n",
1186                                                         count, MR_LdSpanPtrGet
1187                                                         (ld, count, map)->spanRowDataSize);
1188                                         }
1189                                 }
1190
1191                                 span_set->span_row_data_width = span_row_width;
1192                                 span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
1193                                         le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
1194                                         le32_to_cpu(quad->diff));
1195
1196                                 if (element == 0) {
1197                                         span_set->log_start_lba = 0;
1198                                         span_set->log_end_lba =
1199                                                 ((span_row << raid->stripeShift)
1200                                                 * span_row_width) - 1;
1201
1202                                         span_set->span_row_start = 0;
1203                                         span_set->span_row_end = span_row - 1;
1204
1205                                         span_set->data_strip_start = 0;
1206                                         span_set->data_strip_end =
1207                                                 (span_row * span_row_width) - 1;
1208
1209                                         span_set->data_row_start = 0;
1210                                         span_set->data_row_end =
1211                                                 (span_row * le32_to_cpu(quad->diff)) - 1;
1212                                 } else {
1213                                         span_set_prev = &(ldSpanInfo[ld].
1214                                                         span_set[element - 1]);
1215                                         span_set->log_start_lba =
1216                                                 span_set_prev->log_end_lba + 1;
1217                                         span_set->log_end_lba =
1218                                                 span_set->log_start_lba +
1219                                                 ((span_row << raid->stripeShift)
1220                                                 * span_row_width) - 1;
1221
1222                                         span_set->span_row_start =
1223                                                 span_set_prev->span_row_end + 1;
1224                                         span_set->span_row_end =
1225                                         span_set->span_row_start + span_row - 1;
1226
1227                                         span_set->data_strip_start =
1228                                         span_set_prev->data_strip_end + 1;
1229                                         span_set->data_strip_end =
1230                                                 span_set->data_strip_start +
1231                                                 (span_row * span_row_width) - 1;
1232
1233                                         span_set->data_row_start =
1234                                                 span_set_prev->data_row_end + 1;
1235                                         span_set->data_row_end =
1236                                                 span_set->data_row_start +
1237                                                 (span_row * le32_to_cpu(quad->diff)) - 1;
1238                                 }
1239                                 break;
1240                 }
1241                 if (span == raid->spanDepth)
1242                         break;
1243             }
1244         }
1245 #if SPAN_DEBUG
1246         getSpanInfo(map, ldSpanInfo);
1247 #endif
1248
1249 }
1250
1251 void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
1252         struct LD_LOAD_BALANCE_INFO *lbInfo)
1253 {
1254         int ldCount;
1255         u16 ld;
1256         struct MR_LD_RAID *raid;
1257
1258         if (lb_pending_cmds > 128 || lb_pending_cmds < 1)
1259                 lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
1260
1261         for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
1262                 ld = MR_TargetIdToLdGet(ldCount, drv_map);
1263                 if (ld >= MAX_LOGICAL_DRIVES_EXT) {
1264                         lbInfo[ldCount].loadBalanceFlag = 0;
1265                         continue;
1266                 }
1267
1268                 raid = MR_LdRaidGet(ld, drv_map);
1269                 if ((raid->level != 1) ||
1270                         (raid->ldState != MR_LD_STATE_OPTIMAL)) {
1271                         lbInfo[ldCount].loadBalanceFlag = 0;
1272                         continue;
1273                 }
1274                 lbInfo[ldCount].loadBalanceFlag = 1;
1275         }
1276 }
1277
1278 u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
1279         struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
1280 {
1281         struct fusion_context *fusion;
1282         struct MR_LD_RAID  *raid;
1283         struct MR_DRV_RAID_MAP_ALL *drv_map;
1284         u16     pend0, pend1, ld;
1285         u64     diff0, diff1;
1286         u8      bestArm, pd0, pd1, span, arm;
1287         u32     arRef, span_row_size;
1288
1289         u64 block = io_info->ldStartBlock;
1290         u32 count = io_info->numBlocks;
1291
1292         span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK)
1293                         >> RAID_CTX_SPANARM_SPAN_SHIFT);
1294         arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK);
1295
1296
1297         fusion = instance->ctrl_context;
1298         drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
1299         ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map);
1300         raid = MR_LdRaidGet(ld, drv_map);
1301         span_row_size = instance->UnevenSpanSupport ?
1302                         SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize;
1303
1304         arRef = MR_LdSpanArrayGet(ld, span, drv_map);
1305         pd0 = MR_ArPdGet(arRef, arm, drv_map);
1306         pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
1307                 (arm + 1 - span_row_size) : arm + 1, drv_map);
1308
1309         /* get the pending cmds for the data and mirror arms */
1310         pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
1311         pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
1312
1313         /* Determine the disk whose head is nearer to the req. block */
1314         diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
1315         diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
1316         bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
1317
1318         if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds)  ||
1319                         (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
1320                 bestArm ^= 1;
1321
1322         /* Update the last accessed block on the correct pd */
1323         io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
1324         lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
1325         io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
1326 #if SPAN_DEBUG
1327         if (arm != bestArm)
1328                 dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
1329                         "occur - span 0x%x arm 0x%x bestArm 0x%x "
1330                         "io_info->span_arm 0x%x\n",
1331                         span, arm, bestArm, io_info->span_arm);
1332 #endif
1333         return io_info->pd_after_lb;
1334 }
1335
1336 u16 get_updated_dev_handle(struct megasas_instance *instance,
1337         struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
1338 {
1339         u8 arm_pd;
1340         u16 devHandle;
1341         struct fusion_context *fusion;
1342         struct MR_DRV_RAID_MAP_ALL *drv_map;
1343
1344         fusion = instance->ctrl_context;
1345         drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
1346
1347         /* get best new arm (PD ID) */
1348         arm_pd  = megasas_get_best_arm_pd(instance, lbInfo, io_info);
1349         devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
1350         atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
1351         return devHandle;
1352 }