Index: sys/dev/mrsas/mrsas.h =================================================================== --- sys/dev/mrsas/mrsas.h +++ sys/dev/mrsas/mrsas.h @@ -165,14 +165,22 @@ device_printf(sc->mrsas_dev, msg, ##args); \ } while (0) +#define le32_to_cpus(x) do { *((u_int32_t *)(x)) = le32toh((*(u_int32_t *)x)); } while (0) +#define le16_to_cpus(x) do { *((u_int16_t *)(x)) = le16toh((*(u_int16_t *)x)); } while (0) + /**************************************************************************** * Raid Context structure which describes MegaRAID specific IO Paramenters * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames ****************************************************************************/ typedef struct _RAID_CONTEXT { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int8_t Type:4; u_int8_t nseg:4; +#else + u_int8_t nseg:4; + u_int8_t Type:4; +#endif u_int8_t resvd0; u_int16_t timeoutValue; u_int8_t regLockFlags; @@ -197,12 +205,19 @@ * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames */ typedef struct _RAID_CONTEXT_G35 { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int16_t Type:4; u_int16_t nseg:4; u_int16_t resvd0:8; +#else + u_int16_t resvd0:8; + u_int16_t nseg:4; + u_int16_t Type:4; +#endif u_int16_t timeoutValue; union { struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int16_t reserved:1; u_int16_t sld:1; u_int16_t c2f:1; @@ -213,6 +228,18 @@ u_int16_t log:1; u_int16_t cpuSel:4; u_int16_t setDivert:4; +#else + u_int16_t setDivert:4; + u_int16_t cpuSel:4; + u_int16_t log:1; + u_int16_t rw:1; + u_int16_t sbs:1; + u_int16_t sqn:1; + u_int16_t fwn:1; + u_int16_t c2f:1; + u_int16_t sld:1; + u_int16_t reserved:1; +#endif } bits; u_int16_t s; } routingFlags; @@ -228,9 +255,15 @@ u_int8_t RAIDFlags; u_int8_t spanArm; u_int16_t configSeqNum; +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int16_t numSGE:12; u_int16_t reserved:3; u_int16_t streamDetected:1; +#else + u_int16_t streamDetected:1; + u_int16_t reserved:3; + u_int16_t numSGE:12; +#endif u_int8_t resvd2[2]; } RAID_CONTEXT_G35; @@ -433,9 +466,15 @@ MR_TM_REQUEST TmRequest; union { struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t isTMForLD:1; u_int32_t isTMForPD:1; u_int32_t reserved1:30; +#else + u_int32_t reserved1:30; + u_int32_t isTMForPD:1; + u_int32_t isTMForLD:1; +#endif u_int32_t reserved2; } tmReqFlags; MR_TM_REPLY TMReply; @@ -808,6 +847,7 @@ typedef struct _MR_LD_RAID { struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t fpCapable:1; u_int32_t raCapable:1; u_int32_t reserved5:2; @@ -822,6 +862,22 @@ u_int32_t tmCapable:1; u_int32_t fpCacheBypassCapable:1; u_int32_t reserved4:5; +#else + u_int32_t reserved4:5; + u_int32_t fpCacheBypassCapable:1; + u_int32_t tmCapable:1; + u_int32_t fpNonRWCapable:1; + u_int32_t fpReadAcrossStripe:1; + u_int32_t fpWriteAcrossStripe:1; + u_int32_t fpReadCapable:1; + u_int32_t fpWriteCapable:1; + u_int32_t encryptionType:8; + u_int32_t pdPiMode:4; + u_int32_t ldPiMode:4; + u_int32_t reserved5:2; + u_int32_t raCapable:1; + u_int32_t fpCapable:1; +#endif } capability; u_int32_t reserved6; u_int64_t size; @@ -844,9 +900,15 @@ u_int16_t seqNum; struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN + u_int32_t reserved:30; + u_int32_t regTypeReqOnReadLsValid:1; + u_int32_t ldSyncRequired:1; +#else u_int32_t ldSyncRequired:1; u_int32_t regTypeReqOnReadLsValid:1; u_int32_t reserved:30; +#endif } flags; u_int8_t LUN[8]; @@ -854,9 +916,15 @@ u_int8_t reserved2[3]; u_int32_t logicalBlockLength; struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN + u_int32_t reserved1:24; + u_int32_t LdLogicalBlockExp:4; + u_int32_t LdPiExp:4; +#else u_int32_t LdPiExp:4; u_int32_t LdLogicalBlockExp:4; u_int32_t reserved1:24; +#endif } exponent; u_int8_t reserved3[0x80 - 0x38]; } MR_LD_RAID; @@ -1039,8 +1107,13 @@ u_int16_t seqNum; u_int16_t devHandle; struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int8_t tmCapable:1; u_int8_t reserved:7; +#else + u_int8_t reserved:7; + u_int8_t tmCapable:1; +#endif } capability; u_int8_t reserved; u_int16_t pdTargetId; @@ -1868,6 +1941,7 @@ * structure. */ struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t copyBackDisabled:1; u_int32_t SMARTerEnabled:1; u_int32_t prCorrectUnconfiguredAreas:1; @@ -1899,6 +1973,39 @@ u_int32_t enableSwZone:1; u_int32_t limitMaxRateSATA3G:1; u_int32_t reserved:2; +#else + u_int32_t reserved:2; + u_int32_t limitMaxRateSATA3G:1; + u_int32_t enableSwZone:1; + u_int32_t ignore64ldRestriction:1; + u_int32_t disableT10RebuildAssist:1; + u_int32_t disableImmediateIO:1; + u_int32_t enableAutoLockRecovery:1; + u_int32_t enableVirtualCache:1; + u_int32_t enableConfigAutoBalance:1; + u_int32_t forceSGPIOForQuadOnly:1; + u_int32_t useEmergencySparesforSMARTer:1; + u_int32_t useUnconfGoodForEmergency:1; + u_int32_t useGlobalSparesForEmergency:1; + u_int32_t preventPIImport:1; + u_int32_t enablePI:1; + u_int32_t useDiskActivityForLocate:1; + u_int32_t disableCacheBypass:1; + u_int32_t enableJBOD:1; + u_int32_t disableSpinDownHS:1; + u_int32_t allowBootWithPinnedCache:1; + u_int32_t disableOnlineCtrlReset:1; + u_int32_t enableSecretKeyControl:1; + u_int32_t autoEnhancedImport:1; + u_int32_t enableSpinDownUnconfigured:1; + u_int32_t SSDPatrolReadEnabled:1; + u_int32_t SSDSMARTerEnabled:1; + u_int32_t disableNCQ:1; + u_int32_t useFdeOnly:1; + u_int32_t prCorrectUnconfiguredAreas:1; + u_int32_t SMARTerEnabled:1; + u_int32_t copyBackDisabled:1; +#endif } OnOffProperties; u_int8_t autoSnapVDSpace; u_int8_t viewSpace; @@ -2170,6 +2277,7 @@ u_int16_t cacheMemorySize; /* 0x7A2 */ struct { /* 0x7A4 */ +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t supportPIcontroller:1; u_int32_t supportLdPIType1:1; u_int32_t supportLdPIType2:1; @@ -2194,6 +2302,30 @@ u_int32_t supportUnevenSpans:1; u_int32_t reserved:11; +#else + u_int32_t reserved:11; + u_int32_t supportUnevenSpans:1; + u_int32_t dedicatedHotSparesLimited:1; + u_int32_t headlessMode:1; + u_int32_t supportEmulatedDrives:1; + u_int32_t supportResetNow:1; + u_int32_t realTimeScheduler:1; + u_int32_t supportSSDPatrolRead:1; + u_int32_t supportPerfTuning:1; + u_int32_t disableOnlinePFKChange:1; + u_int32_t supportJBOD:1; + u_int32_t supportBootTimePFKChange:1; + u_int32_t supportSetLinkSpeed:1; + u_int32_t supportEmergencySpares:1; + u_int32_t supportSuspendResumeBGops:1; + u_int32_t blockSSDWriteCacheChange:1; + u_int32_t supportShieldState:1; + u_int32_t supportLdBBMInfo:1; + u_int32_t supportLdPIType3:1; + u_int32_t supportLdPIType2:1; + u_int32_t supportLdPIType1:1; + u_int32_t supportPIcontroller:1; +#endif } adapterOperations2; u_int8_t driverVersion[32]; /* 0x7A8 */ @@ -2206,6 +2338,7 @@ u_int8_t reserved5[2]; /* 0x7CD reserved */ struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t peerIsPresent:1; u_int32_t peerIsIncompatible:1; @@ -2214,6 +2347,15 @@ u_int32_t ctrlPropIncompatible:1; u_int32_t premiumFeatureMismatch:1; u_int32_t reserved:26; +#else + u_int32_t reserved:26; + u_int32_t premiumFeatureMismatch:1; + u_int32_t ctrlPropIncompatible:1; + u_int32_t fwVersionMismatch:1; + u_int32_t hwIncompatible:1; + u_int32_t peerIsIncompatible:1; + u_int32_t peerIsPresent:1; +#endif } cluster; char clusterId[16]; /* 0x7D4 */ @@ -2221,6 +2363,7 @@ char reserved6[4]; /* 0x7E4 RESERVED FOR IOV */ struct { /* 0x7E8 */ +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t supportPersonalityChange:2; u_int32_t supportThermalPollInterval:1; u_int32_t supportDisableImmediateIO:1; @@ -2246,11 +2389,39 @@ u_int32_t supportExtendedSSCSize:1; u_int32_t useSeqNumJbodFP:1; u_int32_t reserved:7; +#else + u_int32_t reserved:7; + u_int32_t useSeqNumJbodFP:1; + u_int32_t supportExtendedSSCSize:1; + u_int32_t supportDiskCacheSettingForSysPDs:1; + u_int32_t supportCPLDUpdate:1; + u_int32_t supportTTYLogCompression:1; + u_int32_t discardCacheDuringLDDelete:1; + u_int32_t supportSecurityonJBOD:1; + u_int32_t supportCacheBypassModes:1; + u_int32_t supportDisableSESMonitoring:1; + u_int32_t supportForceFlash:1; + u_int32_t supportNVDRAM:1; + u_int32_t supportDrvActivityLEDSetting:1; + u_int32_t supportAllowedOpsforDrvRemoval:1; + u_int32_t supportHOQRebuild:1; + u_int32_t supportForceTo512e:1; + u_int32_t supportNVCacheErase:1; + u_int32_t supportDebugQueue:1; + u_int32_t supportSwZone:1; + u_int32_t supportCrashDump:1; + u_int32_t supportMaxExtLDs:1; + u_int32_t supportT10RebuildAssist:1; + u_int32_t supportDisableImmediateIO:1; + u_int32_t supportThermalPollInterval:1; + u_int32_t supportPersonalityChange:2; +#endif } adapterOperations3; u_int8_t pad_cpld[16]; struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int16_t ctrlInfoExtSupported:1; u_int16_t supportIbuttonLess:1; u_int16_t supportedEncAlgo:1; @@ -2260,6 +2431,17 @@ u_int16_t supportPdMapTargetId:1; u_int16_t FWSwapsBBUVPDInfo:1; u_int16_t reserved:8; +#else + u_int16_t reserved:8; + u_int16_t FWSwapsBBUVPDInfo:1; + u_int16_t supportPdMapTargetId:1; + u_int16_t supportSESCtrlInMultipathCfg:1; + u_int16_t imageUploadSupported:1; + u_int16_t supportEncryptedMfc:1; + u_int16_t supportedEncAlgo:1; + u_int16_t supportIbuttonLess:1; + u_int16_t ctrlInfoExtSupported:1; +#endif } adapterOperations4; u_int8_t pad[0x800 - 0x7FE]; /* 0x7FE */ @@ -2332,6 +2514,7 @@ typedef union _MFI_CAPABILITIES { struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t support_fp_remote_lun:1; u_int32_t support_additional_msix:1; u_int32_t support_fastpath_wb:1; @@ -2342,6 +2525,18 @@ u_int32_t support_ext_queue_depth:1; u_int32_t support_ext_io_size:1; u_int32_t reserved:23; +#else + u_int32_t reserved:23; + u_int32_t support_ext_io_size:1; + u_int32_t support_ext_queue_depth:1; + u_int32_t security_protocol_cmds_fw:1; + u_int32_t support_core_affinity:1; + u_int32_t support_ndrive_r1_lb:1; + u_int32_t support_max_255lds:1; + u_int32_t support_fastpath_wb:1; + u_int32_t support_additional_msix:1; + u_int32_t support_fp_remote_lun:1; +#endif } mfi_capabilities; u_int32_t reg; } MFI_CAPABILITIES; @@ -2602,9 +2797,15 @@ #pragma pack(1) union mrsas_evt_class_locale { struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int16_t locale; u_int8_t reserved; int8_t class; +#else + int8_t class; + u_int8_t reserved; + u_int16_t locale; +#endif } __packed members; u_int32_t word; @@ -2890,6 +3091,7 @@ typedef struct _MRSAS_DRV_PCI_LINK_CAPABILITY { union { struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t linkSpeed:4; u_int32_t linkWidth:6; u_int32_t aspmSupport:2; @@ -2897,6 +3099,15 @@ u_int32_t l1ExitLatency:3; u_int32_t rsvdp:6; u_int32_t portNumber:8; +#else + u_int32_t portNumber:8; + u_int32_t rsvdp:6; + u_int32_t l1ExitLatency:3; + u_int32_t losExitLatency:3; + u_int32_t aspmSupport:2; + u_int32_t linkWidth:6; + u_int32_t linkSpeed:4; +#endif } bits; u_int32_t asUlong; @@ -2908,12 +3119,21 @@ typedef struct _MRSAS_DRV_PCI_LINK_STATUS_CAPABILITY { union { struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int16_t linkSpeed:4; u_int16_t negotiatedLinkWidth:6; u_int16_t linkTrainingError:1; u_int16_t linkTraning:1; u_int16_t slotClockConfig:1; u_int16_t rsvdZ:3; +#else + u_int16_t rsvdZ:3; + u_int16_t slotClockConfig:1; + u_int16_t linkTraning:1; + u_int16_t linkTrainingError:1; + u_int16_t negotiatedLinkWidth:6; + u_int16_t linkSpeed:4; +#endif } bits; u_int16_t asUshort; @@ -2967,6 +3187,7 @@ struct { union { struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int16_t forcedPDGUID:1; u_int16_t inVD:1; u_int16_t isGlobalSpare:1; @@ -2974,6 +3195,15 @@ u_int16_t isForeign:1; u_int16_t reserved:7; u_int16_t intf:4; +#else + u_int16_t intf:4; + u_int16_t reserved:7; + u_int16_t isForeign:1; + u_int16_t isSpare:1; + u_int16_t isGlobalSpare:1; + u_int16_t inVD:1; + u_int16_t forcedPDGUID:1; +#endif } pdType; u_int16_t type; }; @@ -3004,6 +3234,7 @@ */ struct MR_PD_PROGRESS { struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t rbld:1; u_int32_t patrol:1; u_int32_t clear:1; @@ -3011,6 +3242,15 @@ u_int32_t erase:1; u_int32_t locate:1; u_int32_t reserved:26; +#else + u_int32_t reserved:26; + u_int32_t locate:1; + u_int32_t erase:1; + u_int32_t copyBack:1; + u_int32_t clear:1; + u_int32_t patrol:1; + u_int32_t rbld:1; +#endif } active; union MR_PROGRESS rbld; union MR_PROGRESS patrol; @@ -3020,12 +3260,21 @@ }; struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t rbld:1; u_int32_t patrol:1; u_int32_t clear:1; u_int32_t copyBack:1; u_int32_t erase:1; u_int32_t reserved:27; +#else + u_int32_t reserved:27; + u_int32_t erase:1; + u_int32_t copyBack:1; + u_int32_t clear:1; + u_int32_t patrol:1; + u_int32_t rbld:1; +#endif } pause; union MR_PROGRESS reserved[3]; @@ -3057,13 +3306,18 @@ struct { u_int8_t count; +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int8_t isPathBroken:4; u_int8_t reserved3:3; u_int8_t widePortCapable:1; - +#else + u_int8_t widePortCapable:1; + u_int8_t reserved3:3; + u_int8_t isPathBroken:4; +#endif u_int8_t connectorIndex[2]; u_int8_t reserved[4]; - u_int64_t sasAddr[2]; + u_int64_t sasAddr[2]; u_int8_t reserved2[16]; } pathInfo; @@ -3088,6 +3342,7 @@ u_int16_t copyBackPartnerId; u_int16_t enclPartnerDeviceId; struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int16_t fdeCapable:1; u_int16_t fdeEnabled:1; u_int16_t secured:1; @@ -3095,6 +3350,15 @@ u_int16_t foreign:1; u_int16_t needsEKM:1; u_int16_t reserved:10; +#else + u_int16_t reserved:10; + u_int16_t needsEKM:1; + u_int16_t foreign:1; + u_int16_t locked:1; + u_int16_t secured:1; + u_int16_t fdeEnabled:1; + u_int16_t fdeCapable:1; +#endif } security; u_int8_t mediaType; u_int8_t notCertified; @@ -3110,6 +3374,7 @@ u_int16_t reserved2; struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t piType:3; u_int32_t piFormatted:1; u_int32_t piEligible:1; @@ -3123,6 +3388,21 @@ u_int32_t wceUnchanged:1; u_int32_t supportScsiUnmap:1; u_int32_t reserved:18; +#else + u_int32_t reserved:18; + u_int32_t supportScsiUnmap:1; + u_int32_t wceUnchanged:1; + u_int32_t useSSEraseType:1; + u_int32_t ineligibleForLd:1; + u_int32_t ineligibleForSSCD:1; + u_int32_t emergencySpare:1; + u_int32_t commissionedSpare:1; + u_int32_t WCE:1; + u_int32_t NCQ:1; + u_int32_t piEligible:1; + u_int32_t piFormatted:1; + u_int32_t piType:3; +#endif } properties; u_int64_t shieldDiagCompletionTime; @@ -3132,8 +3412,13 @@ u_int8_t reserved4[2]; struct { +#if _BYTE_ORDER == _LITTLE_ENDIAN u_int32_t bbmErrCountSupported:1; u_int32_t bbmErrCount:31; +#else + u_int32_t bbmErrCount:31; + u_int32_t bbmErrCountSupported:1; +#endif } bbmErr; u_int8_t reserved1[512-428]; Index: sys/dev/mrsas/mrsas.c =================================================================== --- sys/dev/mrsas/mrsas.c +++ sys/dev/mrsas/mrsas.c @@ -52,6 +52,7 @@ #include #include #include +#include /* * Function prototypes @@ -619,13 +620,13 @@ dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = htole16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info); - dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; - dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr; - dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info); + dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_log_info)); + dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_GET_INFO); + dcmd->sgl.sge32[0].phys_addr = htole32(sc->el_info_phys_addr & 0xFFFFFFFF); + dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_log_info)); retcode = mrsas_issue_blocked_cmd(sc, cmd); if (retcode == ETIMEDOUT) @@ -681,7 +682,7 @@ curr_aen.word = class_locale_word; if (sc->aen_cmd) { - prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1]; + prev_aen.word = le32toh(sc->aen_cmd->frame->dcmd.mbox.w[1]); /* * A class whose enum value is smaller is inclusive of all @@ -732,16 +733,16 @@ dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = htole16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail); - dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; - dcmd->mbox.w[0] = seq_num; + dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_detail)); + dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT); + dcmd->mbox.w[0] = htole32(seq_num); sc->last_seq_num = seq_num; - dcmd->mbox.w[1] = curr_aen.word; - dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr; - dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail); + dcmd->mbox.w[1] = htole32(curr_aen.word); + dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->evt_detail_phys_addr & 0xFFFFFFFF); + dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_detail)); if (sc->aen_cmd != NULL) { mrsas_release_mfi_cmd(cmd); @@ -907,9 +908,6 @@ * Set up PCI and registers */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); - if ((cmd & PCIM_CMD_PORTEN) == 0) { - return (ENXIO); - } /* Force the busmaster enable bit on. */ cmd |= PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); @@ -1704,7 +1702,7 @@ /* Find our reply descriptor for the command and process */ while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) { - smid = reply_desc->SMID; + smid = le16toh(reply_desc->SMID); cmd_mpt = sc->mpt_cmd_list[smid - 1]; scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request; @@ -1736,7 +1734,7 @@ case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST: if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status, - extStatus, data_length, sense); + extStatus, le32toh(data_length), sense); mrsas_cmd_done(sc, cmd_mpt); mrsas_atomic_dec(&sc->fw_outstanding); } else { @@ -1764,7 +1762,7 @@ mrsas_release_mpt_cmd(r1_cmd); mrsas_atomic_dec(&sc->fw_outstanding); mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status, - extStatus, data_length, sense); + extStatus, le32toh(data_length), sense); mrsas_cmd_done(sc, cmd_mpt); mrsas_atomic_dec(&sc->fw_outstanding); } @@ -1778,7 +1776,7 @@ * And also make sure that the issue_polled call should only be * used if INTERRUPT IS DISABLED. */ - if (cmd_mfi->frame->hdr.flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) + if (cmd_mfi->frame->hdr.flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) mrsas_release_mfi_cmd(cmd_mfi); else mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status); @@ -2593,6 +2591,13 @@ (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1)); scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_2)); + + mrsas_dprint(sc, MRSAS_TRACE, "%s: sc->reply_q_depth 0x%x," + "sc->request_alloc_sz 0x%x, sc->reply_alloc_sz 0x%x," + "sc->io_frames_alloc_sz 0x%x\n", __func__, + sc->reply_q_depth, sc->request_alloc_sz, + sc->reply_alloc_sz, sc->io_frames_alloc_sz); + /* * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, * Firmware support extended IO chain frame which is 4 time more @@ -2617,8 +2622,10 @@ mrsas_dprint(sc, MRSAS_INFO, "max sge: 0x%x, max chain frame size: 0x%x, " - "max fw cmd: 0x%x\n", sc->max_num_sge, - sc->max_chain_frame_sz, sc->max_fw_cmds); + "max fw cmd: 0x%x sc->chain_frames_alloc_sz: 0x%x\n", + sc->max_num_sge, + sc->max_chain_frame_sz, sc->max_fw_cmds, + sc->chain_frames_alloc_sz); /* Used for pass thru MFI frame (DCMD) */ sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16; @@ -2738,19 +2745,19 @@ IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024); IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT; IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER; - IOCInitMsg->MsgVersion = MPI2_VERSION; - IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION; - IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; - IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth; - IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr; - IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr; + IOCInitMsg->MsgVersion = htole16(MPI2_VERSION); + IOCInitMsg->HeaderVersion = htole16(MPI2_HEADER_VERSION); + IOCInitMsg->SystemRequestFrameSize = htole16(MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); + IOCInitMsg->ReplyDescriptorPostQueueDepth = htole16(sc->reply_q_depth); + IOCInitMsg->ReplyDescriptorPostQueueAddress = htole64(sc->reply_desc_phys_addr); + IOCInitMsg->SystemRequestFrameBaseAddress = htole64(sc->io_request_phys_addr); IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0); IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT; init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem; init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = 0xFF; - init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; + init_frame->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); /* driver support Extended MSIX */ if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { @@ -2768,11 +2775,16 @@ init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1; if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1; + + init_frame->driver_operations.reg = htole32(init_frame->driver_operations.reg); + phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024; - init_frame->queue_info_new_phys_addr_lo = phys_addr; - init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t); + init_frame->queue_info_new_phys_addr_lo = htole32(phys_addr); + init_frame->data_xfer_len = htole32(sizeof(Mpi2IOCInitRequest_t)); + + req_desc.addr.u.low = htole32((bus_addr_t)sc->ioc_init_phys_mem & 0xFFFFFFFF); + req_desc.addr.u.high = htole32((bus_addr_t)sc->ioc_init_phys_mem >> 32); - req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem; req_desc.MFAIo.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); @@ -2923,9 +2935,9 @@ { mtx_lock(&sc->pci_lock); mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port), - req_desc_lo); + le32toh(req_desc_lo)); mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port), - req_desc_hi); + le32toh(req_desc_hi)); mtx_unlock(&sc->pci_lock); } @@ -2944,7 +2956,7 @@ { if (sc->atomic_desc_support) mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port), - req_desc_lo); + le32toh(req_desc_lo)); else mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi); } @@ -3103,7 +3115,6 @@ sc = (struct mrsas_softc *)arg; mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__); - sc->ocr_thread_active = 1; mtx_lock(&sc->sim_lock); for (;;) { @@ -3642,10 +3653,10 @@ dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info); - dcmd->opcode = MR_DCMD_CTRL_GET_INFO; - dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr; - dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info); + dcmd->data_xfer_len = htole32(sizeof(struct mrsas_ctrl_info)); + dcmd->opcode = htole32(MR_DCMD_CTRL_GET_INFO); + dcmd->sgl.sge32[0].phys_addr = htole32(sc->ctlr_info_phys_addr & 0xFFFFFFFF); + dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_ctrl_info)); if (!sc->mask_interrupts) retcode = mrsas_issue_blocked_cmd(sc, cmd); @@ -3654,8 +3665,13 @@ if (retcode == ETIMEDOUT) goto dcmd_timeout; - else + else { memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); + le32_to_cpus(&sc->ctrl_info->properties.OnOffProperties); + le32_to_cpus(&sc->ctrl_info->adapterOperations2); + le32_to_cpus(&sc->ctrl_info->adapterOperations3); + le16_to_cpus(&sc->ctrl_info->adapterOperations4); + } do_ocr = 0; mrsas_update_ext_vd_details(sc); @@ -3813,7 +3829,7 @@ int i, retcode = SUCCESS; frame_hdr->cmd_status = 0xFF; - frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; + frame_hdr->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); /* Issue the frame using inbound queue port */ if (mrsas_issue_dcmd(sc, cmd)) { @@ -3892,7 +3908,7 @@ req_desc->addr.Words = 0; req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - req_desc->SCSIIO.SMID = index; + req_desc->SCSIIO.SMID = htole16(index); return (req_desc); } @@ -3927,7 +3943,7 @@ * mrsas_complete_cmd. */ - if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) + if (frame_hdr->flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; io_req = mpt_cmd->io_request; @@ -3944,12 +3960,12 @@ io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; io_req->ChainOffset = sc->chain_offset_mfi_pthru; - mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; + mpi25_ieee_chain->Address = htole64(mfi_cmd->frame_phys_addr); mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; - mpi25_ieee_chain->Length = sc->max_chain_frame_sz; + mpi25_ieee_chain->Length = htole32(sc->max_chain_frame_sz); return (0); } @@ -4100,7 +4116,7 @@ break; } /* See if got an event notification */ - if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) + if (le32toh(cmd->frame->dcmd.opcode) == MR_DCMD_CTRL_EVENT_WAIT) mrsas_complete_aen(sc, cmd); else mrsas_wakeup(sc, cmd); @@ -4264,14 +4280,14 @@ dcmd->sge_count = 1; dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = (pd_seq_map_sz); - dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO); - dcmd->sgl.sge32[0].phys_addr = (pd_seq_h); - dcmd->sgl.sge32[0].length = (pd_seq_map_sz); + dcmd->data_xfer_len = htole32(pd_seq_map_sz); + dcmd->opcode = htole32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO); + dcmd->sgl.sge32[0].phys_addr = htole32(pd_seq_h & 0xFFFFFFFF); + dcmd->sgl.sge32[0].length = htole32(pd_seq_map_sz); if (pend) { dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG; - dcmd->flags = (MFI_FRAME_DIR_WRITE); + dcmd->flags = htole16(MFI_FRAME_DIR_WRITE); sc->jbod_seq_cmd = cmd; if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, @@ -4280,13 +4296,13 @@ } else return 0; } else - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = htole16(MFI_FRAME_DIR_READ); retcode = mrsas_issue_polled(sc, cmd); if (retcode == ETIMEDOUT) goto dcmd_timeout; - if (pd_sync->count > MAX_PHYSICAL_DEVICES) { + if (le32toh(pd_sync->count) > MAX_PHYSICAL_DEVICES) { device_printf(sc->mrsas_dev, "driver supports max %d JBOD, but FW reports %d\n", MAX_PHYSICAL_DEVICES, pd_sync->count); @@ -4364,13 +4380,13 @@ dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = htole16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = sc->current_map_sz; - dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; - dcmd->sgl.sge32[0].phys_addr = map_phys_addr; - dcmd->sgl.sge32[0].length = sc->current_map_sz; + dcmd->data_xfer_len = htole32(sc->current_map_sz); + dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO); + dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF); + dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz); retcode = mrsas_issue_polled(sc, cmd); if (retcode == ETIMEDOUT) @@ -4427,15 +4443,15 @@ dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_WRITE; + dcmd->flags = htole16(MFI_FRAME_DIR_WRITE); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = sc->current_map_sz; + dcmd->data_xfer_len = htole32(sc->current_map_sz); dcmd->mbox.b[0] = num_lds; dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG; - dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; - dcmd->sgl.sge32[0].phys_addr = map_phys_addr; - dcmd->sgl.sge32[0].length = sc->current_map_sz; + dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO); + dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF); + dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz); sc->map_update_cmd = cmd; if (mrsas_issue_dcmd(sc, cmd)) { @@ -4472,17 +4488,17 @@ memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); - dcmd->mbox.s[0] = device_id; + dcmd->mbox.s[0] = htole16(device_id); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = sizeof(struct mrsas_pd_info); - dcmd->opcode = MR_DCMD_PD_GET_INFO; - dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->pd_info_phys_addr; - dcmd->sgl.sge32[0].length = sizeof(struct mrsas_pd_info); + dcmd->data_xfer_len = htole32(sizeof(struct mrsas_pd_info)); + dcmd->opcode = htole32(MR_DCMD_PD_GET_INFO); + dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->pd_info_phys_addr & 0xFFFFFFFF); + dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_pd_info)); if (!sc->mask_interrupts) retcode = mrsas_issue_blocked_cmd(sc, cmd); @@ -4493,7 +4509,7 @@ goto dcmd_timeout; sc->target_list[device_id].interface_type = - sc->pd_info_mem->state.ddf.pdType.intf; + le16toh(sc->pd_info_mem->state.ddf.pdType.intf); do_ocr = 0; @@ -4572,6 +4588,7 @@ struct MR_PD_ADDRESS *pd_addr; bus_addr_t pd_list_phys_addr = 0; struct mrsas_tmp_dcmd *tcmd; + u_int16_t dev_id; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { @@ -4601,13 +4618,13 @@ dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = htole16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); - dcmd->opcode = MR_DCMD_PD_LIST_QUERY; - dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr; - dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); + dcmd->data_xfer_len = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST)); + dcmd->opcode = htole32(MR_DCMD_PD_LIST_QUERY); + dcmd->sgl.sge32[0].phys_addr = htole32(pd_list_phys_addr & 0xFFFFFFFF); + dcmd->sgl.sge32[0].length = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST)); if (!sc->mask_interrupts) retcode = mrsas_issue_blocked_cmd(sc, cmd); @@ -4620,17 +4637,18 @@ /* Get the instance PD list */ pd_count = MRSAS_MAX_PD; pd_addr = pd_list_mem->addr; - if (pd_list_mem->count < pd_count) { + if (le32toh(pd_list_mem->count) < pd_count) { memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); - for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) { - sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId; - sc->local_pd_list[pd_addr->deviceId].driveType = - pd_addr->scsiDevType; - sc->local_pd_list[pd_addr->deviceId].driveState = + for (pd_index = 0; pd_index < le32toh(pd_list_mem->count); pd_index++) { + dev_id = le16toh(pd_addr->deviceId); + sc->local_pd_list[dev_id].tid = dev_id; + sc->local_pd_list[dev_id].driveType = + le16toh(pd_addr->scsiDevType); + sc->local_pd_list[dev_id].driveState = MR_PD_STATE_SYSTEM; - if (sc->target_list[pd_addr->deviceId].target_id == 0xffff) - mrsas_add_target(sc, pd_addr->deviceId); + if (sc->target_list[dev_id].target_id == 0xffff) + mrsas_add_target(sc, dev_id); pd_addr++; } for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) { @@ -4711,10 +4729,10 @@ dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; - dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); - dcmd->opcode = MR_DCMD_LD_GET_LIST; - dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr; - dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); + dcmd->data_xfer_len = htole32(sizeof(struct MR_LD_LIST)); + dcmd->opcode = htole32(MR_DCMD_LD_GET_LIST); + dcmd->sgl.sge32[0].phys_addr = htole32(ld_list_phys_addr); + dcmd->sgl.sge32[0].length = htole32(sizeof(struct MR_LD_LIST)); dcmd->pad_0 = 0; if (!sc->mask_interrupts) @@ -4730,10 +4748,10 @@ #endif /* Get the instance LD list */ - if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) { - sc->CurLdCount = ld_list_mem->ldCount; + if (le32toh(ld_list_mem->ldCount) <= sc->fw_supported_vd_count) { + sc->CurLdCount = le32toh(ld_list_mem->ldCount); memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); - for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) { + for (ld_index = 0; ld_index < le32toh(ld_list_mem->ldCount); ld_index++) { ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; drv_tgt_id = ids + MRSAS_MAX_PD; if (ld_list_mem->ldList[ld_index].state != 0) { Index: sys/dev/mrsas/mrsas_cam.c =================================================================== --- sys/dev/mrsas/mrsas_cam.c +++ sys/dev/mrsas/mrsas_cam.c @@ -623,17 +623,17 @@ mtx_unlock(&sc->raidmap_lock); if (cmd->flags == MRSAS_DIR_IN) /* from device */ - cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ; + cmd->io_request->Control |= htole32(MPI2_SCSIIO_CONTROL_READ); else if (cmd->flags == MRSAS_DIR_OUT) /* to device */ - cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE; + cmd->io_request->Control |= htole32(MPI2_SCSIIO_CONTROL_WRITE); - cmd->io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; + cmd->io_request->SGLFlags = htole16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; - cmd->io_request->SenseBufferLowAddress = cmd->sense_phys_addr; + cmd->io_request->SenseBufferLowAddress = htole32(cmd->sense_phys_addr & 0xFFFFFFFF); cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE; req_desc = cmd->request_desc; - req_desc->SCSIIO.SMID = cmd->index; + req_desc->SCSIIO.SMID = htole16(cmd->index); /* * Start timer for IO timeout. Default timeout value is 90 second. @@ -807,7 +807,7 @@ (sc->max_sge_in_main_msg * sizeof(MPI2_SGE_IO_UNION))); /* sense buffer is different for r1 command */ - r1_cmd->io_request->SenseBufferLowAddress = r1_cmd->sense_phys_addr; + r1_cmd->io_request->SenseBufferLowAddress = htole32(r1_cmd->sense_phys_addr & 0xFFFFFFFF); r1_cmd->ccb_ptr = cmd->ccb_ptr; req_desc2 = mrsas_get_request_desc(sc, r1_cmd->index - 1); @@ -854,17 +854,17 @@ device_id = ccb_h->target_id; io_request = cmd->io_request; - io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; + io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id); io_request->RaidContext.raid_context.status = 0; io_request->RaidContext.raid_context.exStatus = 0; /* just the cdb len, other flags zero, and ORed-in later for FP */ - io_request->IoFlags = csio->cdb_len; + io_request->IoFlags = htole16(csio->cdb_len); if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS) device_printf(sc->mrsas_dev, "Build ldio or fpio error\n"); - io_request->DataLength = cmd->length; + io_request->DataLength = htole32(cmd->length); if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { if (cmd->sge_count > sc->max_num_sge) { @@ -978,7 +978,7 @@ u_int32_t start_lba_hi, start_lba_lo, ld_block_size, ld; u_int32_t datalength = 0; - io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; + io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id); start_lba_lo = 0; start_lba_hi = 0; @@ -1041,7 +1041,7 @@ io_info.ldTgtId = device_id; io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID; - io_request->DataLength = cmd->length; + io_request->DataLength = htole32(cmd->length); switch (ccb_h->flags & CAM_DIR_MASK) { case CAM_DIR_IN: @@ -1127,7 +1127,7 @@ MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA; io_request->RaidContext.raid_context.nseg = 0x1; - io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; + io_request->IoFlags |= htole16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); io_request->RaidContext.raid_context.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE); @@ -1135,7 +1135,7 @@ io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA; io_request->RaidContext.raid_context_g35.nseg = 0x1; io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1; - io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; + io_request->IoFlags |= htole16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); if (io_request->RaidContext.raid_context_g35.routingFlags.bits.sld) { io_request->RaidContext.raid_context_g35.RAIDFlags = (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS @@ -1166,7 +1166,7 @@ cmd->pdInterface = io_info.pdInterface; } else { /* Not FP IO */ - io_request->RaidContext.raid_context.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec; + io_request->RaidContext.raid_context.timeoutValue = htole16(map_ptr->raidMap.fpPdIoTimeoutSec); cmd->request_desc->SCSIIO.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); @@ -1186,7 +1186,7 @@ io_request->RaidContext.raid_context_g35.nseg = 0x1; } io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; - io_request->DevHandle = device_id; + io_request->DevHandle = htole16(device_id); } return (0); } @@ -1301,7 +1301,7 @@ pd_sync->seq[device_id].pdTargetId; else io_request->RaidContext.raid_context.VirtualDiskTgtId = - device_id + 255; + htole16(device_id + 255); io_request->RaidContext.raid_context.configSeqNum = pd_sync->seq[device_id].seqNum; io_request->DevHandle = pd_sync->seq[device_id].devHandle; if (sc->is_ventura || sc->is_aero) @@ -1316,7 +1316,7 @@ io_request->RaidContext.raid_context.nseg = 0x1; } else if (sc->fast_path_io) { //printf("Using LD RAID map\n"); - io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; + io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id); io_request->RaidContext.raid_context.configSeqNum = 0; local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; io_request->DevHandle = @@ -1324,7 +1324,7 @@ } else { //printf("Using FW PATH\n"); /* Want to send all IO via FW path */ - io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; + io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id); io_request->RaidContext.raid_context.configSeqNum = 0; io_request->DevHandle = MR_DEVHANDLE_INVALID; } @@ -1340,12 +1340,12 @@ (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); io_request->RaidContext.raid_context.timeoutValue = - local_map_ptr->raidMap.fpPdIoTimeoutSec; - io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; + htole16(local_map_ptr->raidMap.fpPdIoTimeoutSec); + io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id); } else { /* system pd fast path */ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; - io_request->RaidContext.raid_context.timeoutValue = local_map_ptr->raidMap.fpPdIoTimeoutSec; + io_request->RaidContext.raid_context.timeoutValue = htole16(local_map_ptr->raidMap.fpPdIoTimeoutSec); /* * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH @@ -1353,7 +1353,7 @@ * and not the Exception queue */ if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) - io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; + io_request->IoFlags |= htole16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << @@ -1361,7 +1361,7 @@ } io_request->LUN[1] = ccb_h->target_lun & 0xF; - io_request->DataLength = cmd->length; + io_request->DataLength = htole32(cmd->length); if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { if (cmd->sge_count > sc->max_num_sge) { @@ -1539,8 +1539,8 @@ } if (nseg != 0) { for (i = 0; i < nseg; i++) { - sgl_ptr->Address = segs[i].ds_addr; - sgl_ptr->Length = segs[i].ds_len; + sgl_ptr->Address = htole64(segs[i].ds_addr); + sgl_ptr->Length = htole32(segs[i].ds_len); sgl_ptr->Flags = 0; if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { if (i == nseg - 1) @@ -1565,8 +1565,8 @@ sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; else sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); - sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed)); - sg_chain->Address = cmd->chain_frame_phys_addr; + sg_chain->Length = htole32((sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed))); + sg_chain->Address = htole64(cmd->chain_frame_phys_addr); sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame; } } Index: sys/dev/mrsas/mrsas_fp.c =================================================================== --- sys/dev/mrsas/mrsas_fp.c +++ sys/dev/mrsas/mrsas_fp.c @@ -192,13 +192,13 @@ u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) { - return (map->raidMap.ldSpanMap[ld].ldRaid.targetId); + return le16toh(map->raidMap.ldSpanMap[ld].ldRaid.targetId); } static u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) { - return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef; + return le16toh(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); } static u_int8_t @@ -221,7 +221,7 @@ static u_int16_t MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_DRV_RAID_MAP_ALL * map) { - return map->raidMap.arMapInfo[ar].pd[arm]; + return le16toh(map->raidMap.arMapInfo[ar].pd[arm]); } static MR_LD_SPAN * @@ -291,11 +291,11 @@ device_printf(sc->mrsas_dev, " raidMapSize 0x%x, descTableOffset 0x%x, " " descTableSize 0x%x, descTableNumElements 0x%x \n", - fw_map_dyn->raidMapSize, fw_map_dyn->descTableOffset, + fw_map_dyn->raidMapSize, le32toh(fw_map_dyn->descTableOffset), fw_map_dyn->descTableSize, fw_map_dyn->descTableNumElements); #endif desc_table = (MR_RAID_MAP_DESC_TABLE *) ((char *)fw_map_dyn + - fw_map_dyn->descTableOffset); + le32toh(fw_map_dyn->descTableOffset)); if (desc_table != fw_map_dyn->raidMapDescTable) { device_printf(sc->mrsas_dev, "offsets of desc table are not matching returning " @@ -303,15 +303,16 @@ desc_table, fw_map_dyn->raidMapDescTable); } memset(drv_map, 0, sc->drv_map_sz); - ld_count = fw_map_dyn->ldCount; - pDrvRaidMap->ldCount = ld_count; + ld_count = le16toh(fw_map_dyn->ldCount); + pDrvRaidMap->ldCount = htole16(ld_count); pDrvRaidMap->fpPdIoTimeoutSec = fw_map_dyn->fpPdIoTimeoutSec; - pDrvRaidMap->totalSize = sizeof(MR_DRV_RAID_MAP_ALL); + pDrvRaidMap->totalSize = htole32(sizeof(MR_DRV_RAID_MAP_ALL)); /* point to actual data starting point */ raid_map_data = (char *)fw_map_dyn + - fw_map_dyn->descTableOffset + fw_map_dyn->descTableSize; + le32toh(fw_map_dyn->descTableOffset) + + le32toh(fw_map_dyn->descTableSize); - for (i = 0; i < fw_map_dyn->descTableNumElements; ++i) { + for (i = 0; i < le32toh(fw_map_dyn->descTableNumElements); ++i) { if (!desc_table) { device_printf(sc->mrsas_dev, "desc table is null, coming out %p \n", desc_table); @@ -324,28 +325,29 @@ device_printf(sc->mrsas_dev, "raidmap type %d, raidmapOffset 0x%x, " " raid map number of elements 0%x, raidmapsize 0x%x\n", - desc_table->raidMapDescType, desc_table->raidMapDescOffset, - desc_table->raidMapDescElements, desc_table->raidMapDescBufferSize); + le32toh(desc_table->raidMapDescType), desc_table->raidMapDescOffset, + le32toh(desc_table->raidMapDescElements), desc_table->raidMapDescBufferSize); #endif - switch (desc_table->raidMapDescType) { + switch (le32toh(desc_table->raidMapDescType)) { case RAID_MAP_DESC_TYPE_DEVHDL_INFO: fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo = (MR_DEV_HANDLE_INFO *) - ((char *)raid_map_data + desc_table->raidMapDescOffset); + ((char *)raid_map_data + le32toh(desc_table->raidMapDescOffset)); #if VD_EXT_DEBUG device_printf(sc->mrsas_dev, "devHndlInfo address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo); #endif memcpy(pDrvRaidMap->devHndlInfo, fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo, - sizeof(MR_DEV_HANDLE_INFO) * desc_table->raidMapDescElements); + sizeof(MR_DEV_HANDLE_INFO) * le32toh(desc_table->raidMapDescElements)); break; case RAID_MAP_DESC_TYPE_TGTID_INFO: fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd = (u_int16_t *) - ((char *)raid_map_data + desc_table->raidMapDescOffset); + ((char *)raid_map_data + + le32toh(desc_table->raidMapDescOffset)); #if VD_EXT_DEBUG device_printf(sc->mrsas_dev, "ldTgtIdToLd address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd); #endif - for (j = 0; j < desc_table->raidMapDescElements; j++) { + for (j = 0; j < le32toh(desc_table->raidMapDescElements); j++) { pDrvRaidMap->ldTgtIdToLd[j] = fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd[j]; #if VD_EXT_DEBUG device_printf(sc->mrsas_dev, @@ -355,19 +357,20 @@ break; case RAID_MAP_DESC_TYPE_ARRAY_INFO: fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo = (MR_ARRAY_INFO *) ((char *)raid_map_data + - desc_table->raidMapDescOffset); + le32toh(desc_table->raidMapDescOffset)); #if VD_EXT_DEBUG device_printf(sc->mrsas_dev, "arMapInfo address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo); #endif memcpy(pDrvRaidMap->arMapInfo, fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo, - sizeof(MR_ARRAY_INFO) * desc_table->raidMapDescElements); + sizeof(MR_ARRAY_INFO) * le32toh(desc_table->raidMapDescElements)); break; case RAID_MAP_DESC_TYPE_SPAN_INFO: fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap = (MR_LD_SPAN_MAP *) ((char *)raid_map_data + - desc_table->raidMapDescOffset); + le32toh(desc_table->raidMapDescOffset)); memcpy(pDrvRaidMap->ldSpanMap, fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap, - sizeof(MR_LD_SPAN_MAP) * desc_table->raidMapDescElements); + sizeof(MR_LD_SPAN_MAP) * + le32toh(desc_table->raidMapDescElements)); #if VD_EXT_DEBUG device_printf(sc->mrsas_dev, "ldSpanMap address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap); @@ -419,7 +422,7 @@ return MR_PopulateDrvRaidMapVentura(sc); } else if (sc->max256vdSupport) { fw_map_ext = (MR_FW_RAID_MAP_EXT *) sc->raidmap_mem[(sc->map_id & 1)]; - ld_count = (u_int16_t)(fw_map_ext->ldCount); + ld_count = (u_int16_t)le16toh(fw_map_ext->ldCount); if (ld_count > MAX_LOGICAL_DRIVES_EXT) { device_printf(sc->mrsas_dev, "mrsas: LD count exposed in RAID map in not valid\n"); @@ -434,7 +437,7 @@ } #endif memset(drv_map, 0, sc->drv_map_sz); - pDrvRaidMap->ldCount = ld_count; + pDrvRaidMap->ldCount = htole16(ld_count); pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec; for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++) { pDrvRaidMap->ldTgtIdToLd[i] = (u_int16_t)fw_map_ext->ldTgtIdToLd[i]; @@ -462,13 +465,13 @@ memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo, sizeof(MR_DEV_HANDLE_INFO) * MAX_RAIDMAP_PHYSICAL_DEVICES); - pDrvRaidMap->totalSize = sizeof(MR_FW_RAID_MAP_EXT); + pDrvRaidMap->totalSize = htole32(sizeof(MR_FW_RAID_MAP_EXT)); } else { fw_map_old = (MR_FW_RAID_MAP_ALL *) sc->raidmap_mem[(sc->map_id & 1)]; pFwRaidMap = &fw_map_old->raidMap; #if VD_EXT_DEBUG - for (i = 0; i < pFwRaidMap->ldCount; i++) { + for (i = 0; i < le32toh(pFwRaidMap->ldCount); i++) { device_printf(sc->mrsas_dev, "Index 0x%x Target Id 0x%x Seq Num 0x%x Size 0/%lx\n", i, fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId, @@ -550,12 +553,12 @@ else expected_map_size = (sizeof(MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP)) + - (sizeof(MR_LD_SPAN_MAP) * pDrvRaidMap->ldCount); + (sizeof(MR_LD_SPAN_MAP) * le16toh(pDrvRaidMap->ldCount)); - if (pDrvRaidMap->totalSize != expected_map_size) { + if (le32toh(pDrvRaidMap->totalSize) != expected_map_size) { device_printf(sc->mrsas_dev, "map size %x not matching ld count\n", expected_map_size); device_printf(sc->mrsas_dev, "span map= %x\n", (unsigned int)sizeof(MR_LD_SPAN_MAP)); - device_printf(sc->mrsas_dev, "pDrvRaidMap->totalSize=%x\n", pDrvRaidMap->totalSize); + device_printf(sc->mrsas_dev, "pDrvRaidMap->totalSize=%x\n", le32toh(pDrvRaidMap->totalSize)); return 1; } if (sc->UnevenSpanSupport) { @@ -597,8 +600,8 @@ printf("LD %x: span_depth=%x\n", ld, raid->spanDepth); for (span = 0; span < raid->spanDepth; span++) printf("Span=%x, number of quads=%x\n", span, - map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements); + le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements)); for (element = 0; element < MAX_QUAD_DEPTH; element++) { span_set = &(ldSpanInfo[ld].span_set[element]); if (span_set->span_row_data_width == 0) @@ -627,12 +630,12 @@ spanBlock[span].block_span_info. quad[element]; printf("Span=%x, Quad=%x, diff=%x\n", span, - element, quad->diff); + element, le32toh(quad->diff)); printf("offset_in_span=0x%08lx\n", - (long unsigned int)quad->offsetInSpan); + (long unsigned int)le64toh(quad->offsetInSpan)); printf("logical start=0x%08lx, end=0x%08lx\n", - (long unsigned int)quad->logStart, - (long unsigned int)quad->logEnd); + (long unsigned int)le64toh(quad->logStart), + (long unsigned int)le64toh(quad->logEnd)); } } } @@ -674,8 +677,8 @@ continue; for (span = 0; span < raid->spanDepth; span++) - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements >= info + 1) { + if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) >= info + 1) { quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span]. block_span_info.quad[info]; @@ -683,17 +686,17 @@ *div_error = 1; return span; } - if (quad->logStart <= row && - row <= quad->logEnd && - (mega_mod64(row - quad->logStart, - quad->diff)) == 0) { + if (le64toh(quad->logStart) <= row && + row <= le64toh(quad->logEnd) && + (mega_mod64(row - le64toh(quad->logStart), + le32toh(quad->diff))) == 0) { if (span_blk != NULL) { u_int64_t blk; blk = mega_div64_32 - ((row - quad->logStart), - quad->diff); - blk = (blk + quad->offsetInSpan) + ((row - le64toh(quad->logStart)), + le32toh(quad->diff)); + blk = (blk + le64toh(quad->offsetInSpan)) << raid->stripeShift; *span_blk = blk; } @@ -740,8 +743,8 @@ span_set_Row = mega_div64_32(span_set_Strip, span_set->span_row_data_width) * span_set->diff; for (span = 0, span_offset = 0; span < raid->spanDepth; span++) - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements >= info + 1) { + if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) >= info + 1) { if (strip_offset >= span_set->strip_offset[span]) span_offset++; @@ -793,18 +796,18 @@ continue; for (span = 0; span < raid->spanDepth; span++) - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements >= info + 1) { + if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) >= info + 1) { quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span].block_span_info.quad[info]; - if (quad->logStart <= row && - row <= quad->logEnd && - mega_mod64((row - quad->logStart), - quad->diff) == 0) { + if (le64toh(quad->logStart) <= row && + row <= le64toh(quad->logEnd) && + mega_mod64((row - le64toh(quad->logStart)), + le32toh(quad->diff)) == 0) { strip = mega_div64_32 (((row - span_set->data_row_start) - - quad->logStart), - quad->diff); + - le64toh(quad->logStart)), + le32toh(quad->diff)); strip *= span_set->span_row_data_width; strip += span_set->data_strip_start; strip += span_set->strip_offset[span]; @@ -853,8 +856,8 @@ span_set->span_row_data_width); for (span = 0, span_offset = 0; span < raid->spanDepth; span++) - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements >= info + 1) { + if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) >= info + 1) { if (strip_offset >= span_set->strip_offset[span]) span_offset = span_set->strip_offset[span]; else @@ -957,7 +960,7 @@ io_info->r1_alt_dev_handle = MR_PdDevHandleGet(r1_alt_pd, map); } } else { - *pDevHandle = MR_DEVHANDLE_INVALID; + *pDevHandle = htole16(MR_DEVHANDLE_INVALID); if ((raid->level >= 5) && ((sc->device_id == MRSAS_TBOLT) || (sc->mrsas_gen3_ctrl && raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) @@ -971,7 +974,7 @@ } } - *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; + *pdBlock += stripRef + le64toh(MR_LdSpanPtrGet(ld, span, map)->startBlk); if (sc->is_ventura || sc->is_aero) { ((RAID_CONTEXT_G35 *) pRAID_Context)->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; @@ -1152,14 +1155,14 @@ else regSize += stripSize; } - pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; + pRAID_Context->timeoutValue = htole16(map->raidMap.fpPdIoTimeoutSec); if (sc->mrsas_gen3_ctrl) pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; else if (sc->device_id == MRSAS_TBOLT) pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; pRAID_Context->VirtualDiskTgtId = raid->targetId; - pRAID_Context->regLockRowLBA = regStart; - pRAID_Context->regLockLength = regSize; + pRAID_Context->regLockRowLBA = htole64(regStart); + pRAID_Context->regLockLength = htole32(regSize); pRAID_Context->configSeqNum = raid->seqNum; /* @@ -1232,20 +1235,20 @@ raid = MR_LdRaidGet(ld, map); for (element = 0; element < MAX_QUAD_DEPTH; element++) { for (span = 0; span < raid->spanDepth; span++) { - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements < element + 1) + if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) < element + 1) continue; /* TO-DO */ span_set = &(ldSpanInfo[ld].span_set[element]); quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span].block_span_info.quad[element]; - span_set->diff = quad->diff; + span_set->diff = le32toh(quad->diff); for (count = 0, span_row_width = 0; count < raid->spanDepth; count++) { - if (map->raidMap.ldSpanMap[ld].spanBlock[count]. - block_span_info.noElements >= element + 1) { + if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[count]. + block_span_info.noElements) >= element + 1) { span_set->strip_offset[count] = span_row_width; span_row_width += MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize; @@ -1257,8 +1260,9 @@ } span_set->span_row_data_width = span_row_width; - span_row = mega_div64_32(((quad->logEnd - - quad->logStart) + quad->diff), quad->diff); + span_row = mega_div64_32(((le64toh(quad->logEnd) - + le64toh(quad->logStart)) + le32toh(quad->diff)), + le32toh(quad->diff)); if (element == 0) { span_set->log_start_lba = 0; @@ -1272,7 +1276,8 @@ span_set->data_strip_end = (span_row * span_row_width) - 1; span_set->data_row_start = 0; - span_set->data_row_end = (span_row * quad->diff) - 1; + span_set->data_row_end = + (span_row * le32toh(quad->diff)) - 1; } else { span_set_prev = &(ldSpanInfo[ld].span_set[element - 1]); span_set->log_start_lba = span_set_prev->log_end_lba + 1; @@ -1290,7 +1295,7 @@ span_set->data_row_start = span_set_prev->data_row_end + 1; span_set->data_row_end = span_set->data_row_start + - (span_row * quad->diff) - 1; + (span_row * le32toh(quad->diff)) - 1; } break; } @@ -1331,6 +1336,7 @@ continue; } raid = MR_LdRaidGet(ld, drv_map); + le32_to_cpus(&raid->capability); if ((raid->level != 1) || (raid->ldState != MR_LD_STATE_OPTIMAL)) { lbInfo[ldCount].loadBalanceFlag = 0; @@ -1390,9 +1396,9 @@ cdb[19] = (u_int8_t)(start_blk & 0xff); /* Logical block reference tag */ - io_request->CDB.EEDP32.PrimaryReferenceTag = swap32(ref_tag); - io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; - io_request->IoFlags = 32; /* Specify 32-byte cdb */ + io_request->CDB.EEDP32.PrimaryReferenceTag = htobe32(ref_tag); + io_request->CDB.EEDP32.PrimaryApplicationTagMask = htobe16(0xffff); + io_request->IoFlags = htole16(32); /* Specify 32-byte cdb */ /* Transfer length */ cdb[28] = (u_int8_t)((num_blocks >> 24) & 0xff); @@ -1402,19 +1408,19 @@ /* set SCSI IO EEDP Flags */ if (ccb_h->flags == CAM_DIR_OUT) { - io_request->EEDPFlags = + io_request->EEDPFlags = htole16( MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | - MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); } else { - io_request->EEDPFlags = + io_request->EEDPFlags = htole16( MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | - MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; + MPI2_SCSIIO_EEDPFLAGS_INSERT_OP); } - io_request->Control |= (0x4 << 26); - io_request->EEDPBlockSize = ld_block_size; + io_request->Control |= htole32(0x4 << 26); + io_request->EEDPBlockSize = htole32(ld_block_size); } else { /* Some drives don't support 16/12 byte CDB's, convert to 10 */ if (((cdb_len == 12) || (cdb_len == 16)) && @@ -1442,7 +1448,7 @@ cdb[8] = (u_int8_t)(num_blocks & 0xff); cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); - io_request->IoFlags = 10; /* Specify 10-byte cdb */ + io_request->IoFlags = htole16(10); /* Specify 10-byte cdb */ cdb_len = 10; } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { /* Convert to 16 byte CDB for large LBA's */ @@ -1478,7 +1484,7 @@ cdb[11] = (u_int8_t)((num_blocks >> 16) & 0xff); cdb[10] = (u_int8_t)((num_blocks >> 24) & 0xff); - io_request->IoFlags = 16; /* Specify 16-byte cdb */ + io_request->IoFlags = htole16(16); /* Specify 16-byte cdb */ cdb_len = 16; } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) { /* convert to 10 byte CDB */ @@ -1700,7 +1706,7 @@ io_info->r1_alt_dev_handle = MR_PdDevHandleGet(r1_alt_pd, map); } } else { - *pDevHandle = MR_DEVHANDLE_INVALID; /* set dev handle as invalid. */ + *pDevHandle = htole16(MR_DEVHANDLE_INVALID); /* set dev handle as invalid. */ if ((raid->level >= 5) && ((sc->device_id == MRSAS_TBOLT) || (sc->mrsas_gen3_ctrl && raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) @@ -1716,7 +1722,7 @@ } } - *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; + *pdBlock += stripRef + le64toh(MR_LdSpanPtrGet(ld, span, map)->startBlk); if (sc->is_ventura || sc->is_aero) { ((RAID_CONTEXT_G35 *) pRAID_Context)->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; Index: sys/powerpc/conf/GENERIC64 =================================================================== --- sys/powerpc/conf/GENERIC64 +++ sys/powerpc/conf/GENERIC64 @@ -142,6 +142,7 @@ options AHC_ALLOW_MEMIO # Attempt to use memory mapped I/O device isp # Qlogic family device ispfw # Firmware module for Qlogic host adapters +device mrsas # LSI/Avago MegaRAID SAS/SATA, 6Gb/s and 12Gb/s device mpt # LSI-Logic MPT-Fusion device mps # LSI-Logic MPT-Fusion 2 device sym # NCR/Symbios/LSI Logic 53C8XX/53C1010/53C1510D