diff --git a/examples/pcapscan.cc b/examples/pcapscan.cc index 5c2c7a9d..92db5cdf 100644 --- a/examples/pcapscan.cc +++ b/examples/pcapscan.cc @@ -106,8 +106,7 @@ struct FiveTuple { dstAddr = iphdr->ip_dst.s_addr; // UDP/TCP ports - const struct udphdr *uh = - (const struct udphdr *)(((const char *)iphdr) + (iphdr->ip_hl * 4)); + const struct udphdr *uh = reinterpret_cast(iphdr) + (iphdr->ip_hl * 4); srcPort = uh->uh_sport; dstPort = uh->uh_dport; } @@ -136,7 +135,7 @@ static int onMatch(unsigned int id, unsigned long long from, unsigned long long to, unsigned int flags, void *ctx) { // Our context points to a size_t storing the match count - size_t *matches = (size_t *)ctx; + size_t *matches = static_cast(ctx); (*matches)++; return 0; // continue matching } @@ -232,9 +231,8 @@ public: } // Valid TCP or UDP packet - const struct ip *iphdr = (const struct ip *)(pktData - + sizeof(struct ether_header)); - const char *payload = (const char *)pktData + offset; + const struct ip *iphdr = reinterpret_cast(pktData) + sizeof(struct ether_header); + const char *payload = reinterpret_cast(pktData) + offset; size_t id = stream_map.insert(std::make_pair(FiveTuple(iphdr), stream_map.size())).first->second; @@ -574,7 +572,7 @@ int main(int argc, char **argv) { */ static bool payloadOffset(const unsigned char *pkt_data, unsigned int *offset, unsigned int *length) { - const ip *iph = (const ip *)(pkt_data + sizeof(ether_header)); + const ip *iph = reinterpret_cast(pkt_data) + sizeof(ether_header); const tcphdr *th = nullptr; // Ignore packets that aren't IPv4 @@ -593,7 +591,7 @@ static bool payloadOffset(const unsigned char *pkt_data, unsigned int *offset, switch (iph->ip_p) { case IPPROTO_TCP: - th = (const tcphdr *)((const char *)iph + ihlen); + th = reinterpret_cast(iph) + ihlen; thlen = th->th_off * 4; break; case IPPROTO_UDP: diff --git a/src/compiler/compiler.cpp b/src/compiler/compiler.cpp index c4d74738..aa8de4ba 100644 --- a/src/compiler/compiler.cpp +++ b/src/compiler/compiler.cpp @@ -478,7 +478,7 @@ hs_database_t *dbCreate(const char *in_bytecode, size_t len, u64a platform) { DEBUG_PRINTF("db size %zu\n", db_len); DEBUG_PRINTF("db platform %llx\n", platform); - struct hs_database *db = (struct hs_database *)hs_database_alloc(db_len); + struct hs_database *db = static_cast(hs_database_alloc(db_len)); if (hs_check_alloc(db) != HS_SUCCESS) { hs_database_free(db); return nullptr; @@ -492,7 +492,7 @@ hs_database_t *dbCreate(const char *in_bytecode, size_t len, u64a platform) { DEBUG_PRINTF("shift is %zu\n", shift); db->bytecode = offsetof(struct hs_database, bytes) - shift; - char *bytecode = (char *)db + db->bytecode; + char *bytecode = reinterpret_cast(db) + db->bytecode; assert(ISALIGNED_CL(bytecode)); db->magic = HS_DB_MAGIC; @@ -525,7 +525,7 @@ struct hs_database *build(NG &ng, unsigned int *length, u8 pureFlag) { throw CompileError("Internal error."); } - const char *bytecode = (const char *)(rose.get()); + const char *bytecode = reinterpret_cast(rose.get()); const platform_t p = target_to_platform(ng.cc.target_info); struct hs_database *db = dbCreate(bytecode, *length, p); if (!db) { diff --git a/src/compiler/error.cpp b/src/compiler/error.cpp index 07db9819..c4252f7c 100644 --- a/src/compiler/error.cpp +++ b/src/compiler/error.cpp @@ -57,15 +57,14 @@ extern const hs_compile_error_t hs_badalloc = { namespace ue2 { hs_compile_error_t *generateCompileError(const string &err, int expression) { - hs_compile_error_t *ret = - (struct hs_compile_error *)hs_misc_alloc(sizeof(hs_compile_error_t)); + hs_compile_error_t *ret = static_cast(hs_misc_alloc(sizeof(hs_compile_error_t))); if (ret) { hs_error_t e = hs_check_alloc(ret); if (e != HS_SUCCESS) { hs_misc_free(ret); return const_cast(&hs_badalloc); } - char *msg = (char *)hs_misc_alloc(err.size() + 1); + char *msg = static_cast(hs_misc_alloc(err.size() + 1)); if (msg) { e = hs_check_alloc(msg); if (e != HS_SUCCESS) { diff --git a/src/fdr/fdr_compile.cpp b/src/fdr/fdr_compile.cpp index d15e4537..8127740a 100644 --- a/src/fdr/fdr_compile.cpp +++ b/src/fdr/fdr_compile.cpp @@ -127,7 +127,7 @@ void andMask(u8 *dest, const u8 *a, const u8 *b, u32 num_bytes) { } void FDRCompiler::createInitialState(FDR *fdr) { - u8 *start = (u8 *)&fdr->start; + u8 *start = reinterpret_cast(&fdr->start); /* initial state should to be 1 in each slot in the bucket up to bucket * minlen - 1, and 0 thereafter */ @@ -175,7 +175,7 @@ bytecode_ptr FDRCompiler::setupFDR() { auto fdr = make_zeroed_bytecode_ptr(size, 64); assert(fdr); // otherwise would have thrown std::bad_alloc - u8 *fdr_base = (u8 *)fdr.get(); + u8 *fdr_base = reinterpret_cast(fdr.get()); // Write header. fdr->size = size; diff --git a/src/fdr/fdr_confirm_compile.cpp b/src/fdr/fdr_confirm_compile.cpp index 625d1b14..cfbd23fe 100644 --- a/src/fdr/fdr_confirm_compile.cpp +++ b/src/fdr/fdr_confirm_compile.cpp @@ -58,7 +58,7 @@ u64a make_u64a_mask(const vector &v) { u64a mask = 0; size_t vlen = v.size(); size_t len = std::min(vlen, sizeof(mask)); - unsigned char *m = (unsigned char *)&mask; + u8 *m = reinterpret_cast(&mask); memcpy(m + sizeof(mask) - len, &v[vlen - len], len); return mask; } @@ -245,10 +245,10 @@ bytecode_ptr getFDRConfirm(const vector &lits, fdrc->groups = gm; // After the FDRConfirm, we have the lit index array. - u8 *fdrc_base = (u8 *)fdrc.get(); + u8 *fdrc_base = reinterpret_cast(fdrc.get()); u8 *ptr = fdrc_base + sizeof(*fdrc); ptr = ROUNDUP_PTR(ptr, alignof(u32)); - u32 *bitsToLitIndex = (u32 *)ptr; + u32 *bitsToLitIndex = reinterpret_cast(ptr); ptr += bitsToLitIndexSize; // After the lit index array, we have the LitInfo structures themselves, @@ -265,7 +265,7 @@ bytecode_ptr getFDRConfirm(const vector &lits, LiteralIndex litIdx = *i; // Write LitInfo header. - LitInfo &finalLI = *(LitInfo *)ptr; + LitInfo &finalLI = *(reinterpret_cast(ptr)); finalLI = tmpLitInfo[litIdx]; ptr += sizeof(LitInfo); // String starts directly after LitInfo. @@ -317,7 +317,7 @@ setupFullConfs(const vector &lits, auto buf = make_zeroed_bytecode_ptr(totalSize, 64); assert(buf); // otherwise would have thrown std::bad_alloc - u32 *confBase = (u32 *)buf.get(); + u32 *confBase = reinterpret_cast(buf.get()); u8 *ptr = buf.get() + totalConfSwitchSize; assert(ISALIGNED_CL(ptr)); diff --git a/src/fdr/flood_compile.cpp b/src/fdr/flood_compile.cpp index ff805ca3..6811fc95 100644 --- a/src/fdr/flood_compile.cpp +++ b/src/fdr/flood_compile.cpp @@ -208,8 +208,8 @@ bytecode_ptr setupFDRFloodControl(const vector &lits, auto buf = make_zeroed_bytecode_ptr(totalSize, 16); assert(buf); // otherwise would have thrown std::bad_alloc - u32 *floodHeader = (u32 *)buf.get(); - FDRFlood *layoutFlood = (FDRFlood *)(buf.get() + floodHeaderSize); + u32 *floodHeader = reinterpret_cast(buf.get()); + FDRFlood *layoutFlood = reinterpret_cast(buf.get() + floodHeaderSize); u32 currentFloodIndex = 0; for (const auto &m : flood2chars) { diff --git a/src/fdr/teddy_compile.cpp b/src/fdr/teddy_compile.cpp index 23b70bb7..821a69e2 100644 --- a/src/fdr/teddy_compile.cpp +++ b/src/fdr/teddy_compile.cpp @@ -328,7 +328,7 @@ bool pack(const vector &lits, static void initReinforcedTable(u8 *rmsk) { - u64a *mask = (u64a *)rmsk; + u64a *mask = reinterpret_cast(rmsk); fill_n(mask, N_CHARS, 0x00ffffffffffffffULL); } @@ -576,8 +576,8 @@ bytecode_ptr TeddyCompiler::build() { auto fdr = make_zeroed_bytecode_ptr(size, 64); assert(fdr); // otherwise would have thrown std::bad_alloc - Teddy *teddy = (Teddy *)fdr.get(); // ugly - u8 *teddy_base = (u8 *)teddy; + Teddy *teddy = reinterpret_cast(fdr.get()); // ugly + u8 *teddy_base = reinterpret_cast(teddy); // Write header. teddy->size = size; diff --git a/src/hs.cpp b/src/hs.cpp index 61e46148..22a9043b 100644 --- a/src/hs.cpp +++ b/src/hs.cpp @@ -589,7 +589,7 @@ hs_error_t hs_expression_info_int(const char *expression, unsigned int flags, return HS_COMPILER_ERROR; } - hs_expr_info *rv = (hs_expr_info *)hs_misc_alloc(sizeof(*rv)); + hs_expr_info *rv = static_cast(hs_misc_alloc(sizeof(*rv))); if (!rv) { *error = const_cast(&hs_enomem); return HS_COMPILER_ERROR; diff --git a/src/hwlm/hwlm_build.cpp b/src/hwlm/hwlm_build.cpp index 73f05921..bb83849b 100644 --- a/src/hwlm/hwlm_build.cpp +++ b/src/hwlm/hwlm_build.cpp @@ -155,6 +155,7 @@ bytecode_ptr hwlmBuild(const HWLMProto &proto, const CompileContext &cc, auto h = make_zeroed_bytecode_ptr(hwlm_len, 64); h->type = proto.engType; + // cppcheck-suppress cstyleCast memcpy(HWLM_DATA(h.get()), eng.get(), engSize); return h; @@ -218,10 +219,12 @@ size_t hwlmSize(const HWLM *h) { switch (h->type) { case HWLM_ENGINE_NOOD: - engSize = noodSize((const noodTable *)HWLM_C_DATA(h)); + // cppcheck-suppress cstyleCast + engSize = noodSize(reinterpret_cast(HWLM_C_DATA(h))); break; case HWLM_ENGINE_FDR: - engSize = fdrSize((const FDR *)HWLM_C_DATA(h)); + // cppcheck-suppress cstyleCast + engSize = fdrSize(reinterpret_cast(HWLM_C_DATA(h))); break; } diff --git a/src/hwlm/noodle_build.cpp b/src/hwlm/noodle_build.cpp index a0128d0a..74dfbd2c 100644 --- a/src/hwlm/noodle_build.cpp +++ b/src/hwlm/noodle_build.cpp @@ -56,7 +56,7 @@ u64a make_u64a_mask(const vector &v) { u64a mask = 0; size_t len = v.size(); - unsigned char *m = (unsigned char *)&mask; + u8 *m = reinterpret_cast(&mask); DEBUG_PRINTF("making mask len %zu\n", len); memcpy(m, &v[0], len); return mask; diff --git a/src/nfa/accel_dfa_build_strat.cpp b/src/nfa/accel_dfa_build_strat.cpp index 7139d5be..249d39c9 100644 --- a/src/nfa/accel_dfa_build_strat.cpp +++ b/src/nfa/accel_dfa_build_strat.cpp @@ -426,7 +426,7 @@ void accel_dfa_build_strat::buildAccel(UNUSED dstate_id_t this_idx, const AccelScheme &info, void *accel_out) { - AccelAux *accel = (AccelAux *)accel_out; + AccelAux *accel = reinterpret_cast(accel_out); DEBUG_PRINTF("accelerations scheme has offset s%u/d%u\n", info.offset, info.double_offset); @@ -473,7 +473,8 @@ accel_dfa_build_strat::buildAccel(UNUSED dstate_id_t this_idx, u8 c1 = info.double_byte.begin()->first & m1; u8 c2 = info.double_byte.begin()->second & m2; #ifdef HAVE_SVE2 - if (vermicelliDoubleMasked16Build(c1, c2, m1, m2, (u8 *)&accel->mdverm16.mask)) { + if (vermicelliDoubleMasked16Build(c1, c2, m1, m2, + reinterpret_cast(&accel->mdverm16.mask))) { accel->accel_type = ACCEL_DVERM16_MASKED; accel->mdverm16.offset = verify_u8(info.double_offset); accel->mdverm16.c1 = c1; @@ -482,8 +483,9 @@ accel_dfa_build_strat::buildAccel(UNUSED dstate_id_t this_idx, c1, c2); return; } else if (info.double_byte.size() <= 8 && - vermicelliDouble16Build(info.double_byte, (u8 *)&accel->dverm16.mask, - (u8 *)&accel->dverm16.firsts)) { + vermicelliDouble16Build(info.double_byte, + reinterpret_cast(&accel->dverm16.mask), + reinterpret_cast(&accel->dverm16.firsts))) { accel->accel_type = ACCEL_DVERM16; accel->dverm16.offset = verify_u8(info.double_offset); DEBUG_PRINTF("building double16-vermicelli\n"); @@ -503,8 +505,9 @@ accel_dfa_build_strat::buildAccel(UNUSED dstate_id_t this_idx, } #ifdef HAVE_SVE2 if (info.double_byte.size() <= 8 && - vermicelliDouble16Build(info.double_byte, (u8 *)&accel->dverm16.mask, - (u8 *)&accel->dverm16.firsts)) { + vermicelliDouble16Build(info.double_byte, + reinterpret_cast(&accel->dverm16.mask), + reinterpret_cast(&accel->dverm16.firsts))) { accel->accel_type = ACCEL_DVERM16; accel->dverm16.offset = verify_u8(info.double_offset); DEBUG_PRINTF("building double16-vermicelli\n"); @@ -515,9 +518,11 @@ accel_dfa_build_strat::buildAccel(UNUSED dstate_id_t this_idx, if (double_byte_ok(info) && shuftiBuildDoubleMasks( - info.double_cr, info.double_byte, (u8 *)&accel->dshufti.lo1, - (u8 *)&accel->dshufti.hi1, (u8 *)&accel->dshufti.lo2, - (u8 *)&accel->dshufti.hi2)) { + info.double_cr, info.double_byte, + reinterpret_cast(&accel->dshufti.lo1), + reinterpret_cast(&accel->dshufti.hi1), + reinterpret_cast(&accel->dshufti.lo2), + reinterpret_cast(&accel->dshufti.hi2))) { accel->accel_type = ACCEL_DSHUFTI; accel->dshufti.offset = verify_u8(info.double_offset); DEBUG_PRINTF("state %hu is double shufti\n", this_idx); @@ -549,7 +554,7 @@ accel_dfa_build_strat::buildAccel(UNUSED dstate_id_t this_idx, #ifdef HAVE_SVE2 if (info.cr.count() <= 16) { accel->accel_type = ACCEL_VERM16; - vermicelli16Build(info.cr, (u8 *)&accel->verm16.mask); + vermicelli16Build(info.cr, reinterpret_cast(&accel->verm16.mask)); DEBUG_PRINTF("state %hu is vermicelli16\n", this_idx); return; } @@ -562,16 +567,18 @@ accel_dfa_build_strat::buildAccel(UNUSED dstate_id_t this_idx, } accel->accel_type = ACCEL_SHUFTI; - if (-1 != shuftiBuildMasks(info.cr, (u8 *)&accel->shufti.lo, - (u8 *)&accel->shufti.hi)) { + if (-1 != shuftiBuildMasks(info.cr, + reinterpret_cast(&accel->shufti.lo), + reinterpret_cast(&accel->shufti.hi))) { DEBUG_PRINTF("state %hu is shufti\n", this_idx); return; } assert(!info.cr.none()); accel->accel_type = ACCEL_TRUFFLE; - truffleBuildMasks(info.cr, (u8 *)&accel->truffle.mask1, - (u8 *)&accel->truffle.mask2); + truffleBuildMasks(info.cr, + reinterpret_cast(&accel->truffle.mask1), + reinterpret_cast(&accel->truffle.mask2)); DEBUG_PRINTF("state %hu is truffle\n", this_idx); } diff --git a/src/nfa/accelcompile.cpp b/src/nfa/accelcompile.cpp index e0be910d..5da0df82 100644 --- a/src/nfa/accelcompile.cpp +++ b/src/nfa/accelcompile.cpp @@ -84,8 +84,9 @@ void buildAccelSingle(const AccelInfo &info, AccelAux *aux) { #endif DEBUG_PRINTF("attempting shufti for %zu chars\n", outs); - if (-1 != shuftiBuildMasks(info.single_stops, (u8 *)&aux->shufti.lo, - (u8 *)&aux->shufti.hi)) { + if (-1 != shuftiBuildMasks(info.single_stops, + reinterpret_cast(&aux->shufti.lo), + reinterpret_cast(&aux->shufti.hi))) { aux->accel_type = ACCEL_SHUFTI; aux->shufti.offset = offset; DEBUG_PRINTF("shufti built OK\n"); @@ -98,8 +99,9 @@ void buildAccelSingle(const AccelInfo &info, AccelAux *aux) { DEBUG_PRINTF("building Truffle for %zu chars\n", outs); aux->accel_type = ACCEL_TRUFFLE; aux->truffle.offset = offset; - truffleBuildMasks(info.single_stops, (u8 *)&aux->truffle.mask1, - (u8 *)&aux->truffle.mask2); + truffleBuildMasks(info.single_stops, + reinterpret_cast(&aux->truffle.mask1), + reinterpret_cast(&aux->truffle.mask2)); return; } @@ -219,8 +221,9 @@ void buildAccelDouble(const AccelInfo &info, AccelAux *aux) { c1, c2); return; } else if (outs2 <= 8 && - vermicelliDouble16Build(info.double_stop2, (u8 *)&aux->dverm16.mask, - (u8 *)&aux->dverm16.firsts)) { + vermicelliDouble16Build(info.double_stop2, + reinterpret_cast(&aux->dverm16.mask), + reinterpret_cast(&aux->dverm16.firsts))) { aux->accel_type = ACCEL_DVERM16; aux->dverm16.offset = offset; DEBUG_PRINTF("building double16-vermicelli\n"); @@ -254,9 +257,11 @@ void buildAccelDouble(const AccelInfo &info, AccelAux *aux) { aux->accel_type = ACCEL_DSHUFTI; aux->dshufti.offset = offset; if (shuftiBuildDoubleMasks( - info.double_stop1, info.double_stop2, (u8 *)&aux->dshufti.lo1, - (u8 *)&aux->dshufti.hi1, (u8 *)&aux->dshufti.lo2, - (u8 *)&aux->dshufti.hi2)) { + info.double_stop1, info.double_stop2, + reinterpret_cast(&aux->dshufti.lo1), + reinterpret_cast(&aux->dshufti.hi1), + reinterpret_cast(&aux->dshufti.lo2), + reinterpret_cast(&aux->dshufti.hi2))) { return; } } diff --git a/src/nfa/castlecompile.cpp b/src/nfa/castlecompile.cpp index 9667413c..28f1aed9 100644 --- a/src/nfa/castlecompile.cpp +++ b/src/nfa/castlecompile.cpp @@ -106,25 +106,27 @@ void writeCastleScanEngine(const CharReach &cr, Castle *c) { #ifdef HAVE_SVE2 if (cr.count() <= 16) { c->type = CASTLE_NVERM16; - vermicelli16Build(cr, (u8 *)&c->u.verm16.mask); + vermicelli16Build(cr, reinterpret_cast(&c->u.verm16.mask)); return; } if (negated.count() <= 16) { c->type = CASTLE_VERM16; - vermicelli16Build(negated, (u8 *)&c->u.verm16.mask); + vermicelli16Build(negated, reinterpret_cast(&c->u.verm16.mask)); return; } #endif // HAVE_SVE2 - if (shuftiBuildMasks(negated, (u8 *)&c->u.shuf.mask_lo, - (u8 *)&c->u.shuf.mask_hi) != -1) { + if (shuftiBuildMasks(negated, + reinterpret_cast(&c->u.shuf.mask_lo), + reinterpret_cast(&c->u.shuf.mask_hi)) != -1) { c->type = CASTLE_SHUFTI; return; } c->type = CASTLE_TRUFFLE; - truffleBuildMasks(negated, (u8 *)(u8 *)&c->u.truffle.mask1, - (u8 *)&c->u.truffle.mask2); + truffleBuildMasks(negated, + reinterpret_cast(&c->u.truffle.mask1), + reinterpret_cast(&c->u.truffle.mask2)); } static @@ -602,9 +604,9 @@ buildCastle(const CastleProto &proto, nfa->minWidth = verify_u32(minWidth); nfa->maxWidth = maxWidth.is_finite() ? verify_u32(maxWidth) : 0; - char * const base_ptr = (char *)nfa.get() + sizeof(NFA); + char * const base_ptr = reinterpret_cast(nfa.get()) + sizeof(NFA); char *ptr = base_ptr; - Castle *c = (Castle *)ptr; + Castle *c = reinterpret_cast(ptr); c->numRepeats = verify_u32(subs.size()); c->numGroups = exclusiveInfo.numGroups; c->exclusive = verify_s8(exclusive); @@ -615,7 +617,7 @@ buildCastle(const CastleProto &proto, writeCastleScanEngine(cr, c); ptr += sizeof(Castle); - SubCastle *subCastles = ((SubCastle *)(ROUNDUP_PTR(ptr, alignof(u32)))); + SubCastle *subCastles = reinterpret_cast(ROUNDUP_PTR(ptr, alignof(u32))); copy(subs.begin(), subs.end(), subCastles); u32 length = 0; @@ -625,16 +627,16 @@ buildCastle(const CastleProto &proto, SubCastle *sub = &subCastles[i]; sub->repeatInfoOffset = offset; - ptr = (char *)sub + offset; + ptr = reinterpret_cast(sub) + offset; memcpy(ptr, &infos[i], sizeof(RepeatInfo)); if (patchSize[i]) { - RepeatInfo *info = (RepeatInfo *)ptr; - u64a *table = ((u64a *)(ROUNDUP_PTR(((char *)(info) + - sizeof(*info)), alignof(u64a)))); + RepeatInfo *info = reinterpret_cast(ptr); + u64a *table = reinterpret_cast(ROUNDUP_PTR(info + + sizeof(*info), alignof(u64a))); copy(tables.begin() + tableIdx, tables.begin() + tableIdx + patchSize[i], table); - u32 diff = (char *)table - (char *)info + + u32 diff = reinterpret_cast(table) - reinterpret_cast(info) + sizeof(u64a) * patchSize[i]; info->length = diff; length += diff; @@ -657,8 +659,6 @@ buildCastle(const CastleProto &proto, if (!stale_iter.empty()) { c->staleIterOffset = verify_u32(ptr - base_ptr); copy_bytes(ptr, stale_iter); - // Removed unused increment operation - // ptr += byte_length(stale_iter); } return nfa; diff --git a/src/nfa/goughcompile.cpp b/src/nfa/goughcompile.cpp index e481e15c..6366c76f 100644 --- a/src/nfa/goughcompile.cpp +++ b/src/nfa/goughcompile.cpp @@ -1077,8 +1077,9 @@ bytecode_ptr goughCompile(raw_som_dfa &raw, u8 somPrecision, return bytecode_ptr(nullptr); } - u8 alphaShift - = ((const mcclellan *)getImplNfa(basic_dfa.get()))->alphaShift; + // cppcheck-suppress cstyleCast + const auto nfa = static_cast(getImplNfa(basic_dfa.get())); + u8 alphaShift = nfa->alphaShift; u32 edge_count = (1U << alphaShift) * raw.states.size(); u32 curr_offset = ROUNDUP_N(basic_dfa->length, 4); @@ -1119,8 +1120,8 @@ bytecode_ptr goughCompile(raw_som_dfa &raw, u8 somPrecision, u32 gough_size = ROUNDUP_N(curr_offset, 16); auto gough_dfa = make_zeroed_bytecode_ptr(gough_size); - memcpy(gough_dfa.get(), basic_dfa.get(), basic_dfa->length); - memcpy((char *)gough_dfa.get() + haig_offset, &gi, sizeof(gi)); + memcpy(reinterpret_cast(gough_dfa.get()), basic_dfa.get(), basic_dfa->length); + memcpy(reinterpret_cast(gough_dfa.get()) + haig_offset, &gi, sizeof(gi)); if (gough_dfa->type == MCCLELLAN_NFA_16) { gough_dfa->type = GOUGH_NFA_16; } else { @@ -1133,18 +1134,19 @@ bytecode_ptr goughCompile(raw_som_dfa &raw, u8 somPrecision, gough_dfa->streamStateSize = base_state_size + slot_count * somPrecision; gough_dfa->scratchStateSize = (u32)(16 + scratch_slot_count * sizeof(u64a)); - mcclellan *m = (mcclellan *)getMutableImplNfa(gough_dfa.get()); + // cppcheck-suppress cstyleCast + auto *m = reinterpret_cast(getMutableImplNfa(gough_dfa.get())); m->haig_offset = haig_offset; /* update nfa length, haig_info offset (leave mcclellan length alone) */ gough_dfa->length = gough_size; /* copy in blocks */ - copy_bytes((u8 *)gough_dfa.get() + edge_prog_offset, edge_blocks); + copy_bytes(reinterpret_cast(gough_dfa.get()) + edge_prog_offset, edge_blocks); if (top_prog_offset) { - copy_bytes((u8 *)gough_dfa.get() + top_prog_offset, top_blocks); + copy_bytes(reinterpret_cast(gough_dfa.get()) + top_prog_offset, top_blocks); } - copy_bytes((u8 *)gough_dfa.get() + prog_base_offset, temp_blocks); + copy_bytes(reinterpret_cast(gough_dfa.get()) + prog_base_offset, temp_blocks); return gough_dfa; } @@ -1177,7 +1179,7 @@ AccelScheme gough_build_strat::find_escape_strings(dstate_id_t this_idx) const { void gough_build_strat::buildAccel(dstate_id_t this_idx, const AccelScheme &info, void *accel_out) { assert(mcclellan_build_strat::accelSize() == sizeof(AccelAux)); - gough_accel *accel = (gough_accel *)accel_out; + gough_accel *accel = reinterpret_cast(accel_out); /* build a plain accelaux so we can work out where we can get to */ mcclellan_build_strat::buildAccel(this_idx, info, &accel->accel); DEBUG_PRINTF("state %hu is accel with type %hhu\n", this_idx, @@ -1315,7 +1317,8 @@ void raw_gough_report_info_impl::fillReportLists(NFA *n, size_t base_offset, for (const raw_gough_report_list &r : rl) { ro.emplace_back(base_offset); - gough_report_list *p = (gough_report_list *)((char *)n + base_offset); + u8 * n_ptr = reinterpret_cast(n); + gough_report_list *p = reinterpret_cast(n_ptr + base_offset); u32 i = 0; for (const som_report &sr : r.reports) { diff --git a/src/nfa/goughcompile_reg.cpp b/src/nfa/goughcompile_reg.cpp index 4b0c90f1..83d0c4cc 100644 --- a/src/nfa/goughcompile_reg.cpp +++ b/src/nfa/goughcompile_reg.cpp @@ -194,7 +194,7 @@ void handle_pending_vars(GoughSSAVar *def, const GoughGraph &g, if (contains(aux.containing_v, var)) { /* def is used by join vertex, value only needs to be live on some * incoming edges */ - const GoughSSAVarJoin *vj = (GoughSSAVarJoin *)var; + const GoughSSAVarJoin *vj = reinterpret_cast(var); const flat_set &live_edges = vj->get_edges_for_input(def); for (const auto &e : live_edges) { diff --git a/src/nfa/mcsheng_compile.cpp b/src/nfa/mcsheng_compile.cpp index 9bf97c41..0ca31c99 100644 --- a/src/nfa/mcsheng_compile.cpp +++ b/src/nfa/mcsheng_compile.cpp @@ -144,11 +144,11 @@ u8 dfa_info::getAlphaShift() const { static mstate_aux *getAux(NFA *n, dstate_id_t i) { - const mcsheng *m = (mcsheng *)getMutableImplNfa(n); - mstate_aux *aux_base = (mstate_aux *)((char *)n + m->aux_offset); + const mcsheng *m = reinterpret_cast(getMutableImplNfa(n)); + mstate_aux *aux_base = reinterpret_cast(reinterpret_cast(n) + m->aux_offset); mstate_aux *aux = aux_base + i; - assert((const char *)aux < (const char *)n + m->length); + assert(reinterpret_cast(aux) < reinterpret_cast(n) + m->length); return aux; } @@ -192,8 +192,8 @@ void createShuffleMasks(mcsheng *m, const dfa_info &info, } for (u32 i = 0; i < N_CHARS; i++) { assert(info.alpha_remap[i] != info.alpha_remap[TOP]); - memcpy((u8 *)&m->sheng_masks[i], - (u8 *)masks[info.alpha_remap[i]].data(), sizeof(m128)); + memcpy(reinterpret_cast(&m->sheng_masks[i]), + reinterpret_cast(masks[info.alpha_remap[i]].data()), sizeof(m128)); } m->sheng_end = sheng_end; m->sheng_accel_limit = sheng_end - 1; @@ -223,7 +223,7 @@ void populateBasicInfo(size_t state_size, const dfa_info &info, nfa->type = MCSHENG_NFA_16; } - mcsheng *m = (mcsheng *)getMutableImplNfa(nfa); + mcsheng *m = reinterpret_cast(getMutableImplNfa(nfa)); for (u32 i = 0; i < 256; i++) { m->remap[i] = verify_u8(info.alpha_remap[i]); } @@ -244,11 +244,11 @@ void populateBasicInfo(size_t state_size, const dfa_info &info, static mstate_aux *getAux64(NFA *n, dstate_id_t i) { - const mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(n); - mstate_aux *aux_base = (mstate_aux *)((char *)n + m->aux_offset); + const mcsheng64 *m = reinterpret_cast(getMutableImplNfa(n)); + mstate_aux *aux_base = reinterpret_cast(reinterpret_cast(n) + m->aux_offset); mstate_aux *aux = aux_base + i; - assert((const char *)aux < (const char *)n + m->length); + assert(reinterpret_cast(aux) < reinterpret_cast(n) + m->length); return aux; } @@ -292,8 +292,8 @@ void createShuffleMasks64(mcsheng64 *m, const dfa_info &info, } for (u32 i = 0; i < N_CHARS; i++) { assert(info.alpha_remap[i] != info.alpha_remap[TOP]); - memcpy((u8 *)&m->sheng_succ_masks[i], - (u8 *)masks[info.alpha_remap[i]].data(), sizeof(m512)); + memcpy(reinterpret_cast(&m->sheng_succ_masks[i]), + reinterpret_cast(masks[info.alpha_remap[i]].data()), sizeof(m512)); } m->sheng_end = sheng_end; m->sheng_accel_limit = sheng_end - 1; @@ -323,7 +323,7 @@ void populateBasicInfo64(size_t state_size, const dfa_info &info, nfa->type = MCSHENG_64_NFA_16; } - mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(nfa); + mcsheng64 *m = reinterpret_cast(getMutableImplNfa(nfa)); for (u32 i = 0; i < 256; i++) { m->remap[i] = verify_u8(info.alpha_remap[i]); } @@ -650,7 +650,7 @@ void fill_in_aux_info(NFA *nfa, const dfa_info &info, const vector &reports_eod, u32 report_base_offset, const raw_report_info &ri) { - mcsheng *m = (mcsheng *)getMutableImplNfa(nfa); + mcsheng *m = reinterpret_cast(getMutableImplNfa(nfa)); vector reportOffsets; @@ -667,7 +667,7 @@ void fill_in_aux_info(NFA *nfa, const dfa_info &info, assert(accel_offset <= accel_end_offset); assert(ISALIGNED_N(accel_offset, alignof(union AccelAux))); info.strat.buildAccel(i, accel_escape_info.at(i), - (void *)((char *)m + this_aux->accel_offset)); + reinterpret_cast(reinterpret_cast(m) + this_aux->accel_offset)); } } } @@ -692,7 +692,7 @@ static void fill_in_succ_table_16(NFA *nfa, const dfa_info &info, dstate_id_t sheng_end, UNUSED dstate_id_t sherman_base) { - u16 *succ_table = (u16 *)((char *)nfa + sizeof(NFA) + sizeof(mcsheng)); + u16 *succ_table = reinterpret_cast(reinterpret_cast(nfa) + sizeof(NFA) + sizeof(mcsheng)); u8 alphaShift = info.getAlphaShift(); assert(alphaShift <= 8); @@ -724,7 +724,7 @@ void fill_in_aux_info64(NFA *nfa, const dfa_info &info, const vector &reports_eod, u32 report_base_offset, const raw_report_info &ri) { - mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(nfa); + mcsheng64 *m = reinterpret_cast(getMutableImplNfa(nfa)); vector reportOffsets; @@ -741,7 +741,7 @@ void fill_in_aux_info64(NFA *nfa, const dfa_info &info, assert(accel_offset <= accel_end_offset); assert(ISALIGNED_N(accel_offset, alignof(union AccelAux))); info.strat.buildAccel(i, accel_escape_info.at(i), - (void *)((char *)m + this_aux->accel_offset)); + reinterpret_cast(reinterpret_cast(m) + this_aux->accel_offset)); } } } @@ -766,7 +766,7 @@ static void fill_in_succ_table_64_16(NFA *nfa, const dfa_info &info, dstate_id_t sheng_end, UNUSED dstate_id_t sherman_base) { - u16 *succ_table = (u16 *)((char *)nfa + sizeof(NFA) + sizeof(mcsheng64)); + u16 *succ_table = reinterpret_cast(reinterpret_cast(nfa) + sizeof(NFA) + sizeof(mcsheng64)); u8 alphaShift = info.getAlphaShift(); assert(alphaShift <= 8); @@ -956,8 +956,8 @@ bool is_cyclic_near(const raw_dfa &raw, dstate_id_t root) { static void fill_in_sherman(NFA *nfa, const dfa_info &info, UNUSED u16 sherman_limit) { - char *nfa_base = (char *)nfa; - mcsheng *m = (mcsheng *)getMutableImplNfa(nfa); + char *nfa_base = reinterpret_cast(nfa); + mcsheng *m = reinterpret_cast(getMutableImplNfa(nfa)); char *sherman_table = nfa_base + m->sherman_offset; assert(ISALIGNED_16(sherman_table)); @@ -978,10 +978,10 @@ void fill_in_sherman(NFA *nfa, const dfa_info &info, UNUSED u16 sherman_limit) { assert(len <= 9); dstate_id_t d = info.states[i].daddy; - *(u8 *)(curr_sherman_entry + SHERMAN_TYPE_OFFSET) = SHERMAN_STATE; - *(u8 *)(curr_sherman_entry + SHERMAN_LEN_OFFSET) = len; - *(u16 *)(curr_sherman_entry + SHERMAN_DADDY_OFFSET) = info.implId(d); - u8 *chars = (u8 *)(curr_sherman_entry + SHERMAN_CHARS_OFFSET); + *(reinterpret_cast(curr_sherman_entry + SHERMAN_TYPE_OFFSET)) = SHERMAN_STATE; + *(reinterpret_cast(curr_sherman_entry + SHERMAN_LEN_OFFSET)) = len; + *(reinterpret_cast(curr_sherman_entry + SHERMAN_DADDY_OFFSET)) = info.implId(d); + u8 *chars = reinterpret_cast(curr_sherman_entry + SHERMAN_CHARS_OFFSET); for (u16 s = 0; s < info.impl_alpha_size; s++) { if (info.states[i].next[s] != info.states[d].next[s]) { @@ -989,7 +989,7 @@ void fill_in_sherman(NFA *nfa, const dfa_info &info, UNUSED u16 sherman_limit) { } } - u16 *states = (u16 *)(curr_sherman_entry + SHERMAN_STATES_OFFSET(len)); + u16 *states = reinterpret_cast(curr_sherman_entry + SHERMAN_STATES_OFFSET(len)); for (u16 s = 0; s < info.impl_alpha_size; s++) { if (info.states[i].next[s] != info.states[d].next[s]) { DEBUG_PRINTF("s overrider %hu dad %hu char next %hu\n", fs, @@ -997,7 +997,7 @@ void fill_in_sherman(NFA *nfa, const dfa_info &info, UNUSED u16 sherman_limit) { info.implId(info.states[i].next[s])); u16 entry_val = info.implId(info.states[i].next[s]); entry_val |= get_edge_flags(nfa, entry_val); - unaligned_store_u16((u8 *)states++, entry_val); + unaligned_store_u16(reinterpret_cast(states++), entry_val); } } } @@ -1063,7 +1063,7 @@ bytecode_ptr mcshengCompile16(dfa_info &info, dstate_id_t sheng_end, assert(ISALIGNED_N(accel_offset, alignof(union AccelAux))); auto nfa = make_zeroed_bytecode_ptr(total_size); - mcsheng *m = (mcsheng *)getMutableImplNfa(nfa.get()); + mcsheng *m = reinterpret_cast(getMutableImplNfa(nfa.get())); populateBasicInfo(sizeof(u16), info, total_size, aux_offset, accel_offset, accel_escape_info.size(), arb, single, nfa.get()); @@ -1091,7 +1091,7 @@ bytecode_ptr mcshengCompile16(dfa_info &info, dstate_id_t sheng_end, static void fill_in_succ_table_8(NFA *nfa, const dfa_info &info, dstate_id_t sheng_end) { - u8 *succ_table = (u8 *)nfa + sizeof(NFA) + sizeof(mcsheng); + u8 *succ_table = reinterpret_cast(reinterpret_cast(nfa) + sizeof(NFA) + sizeof(mcsheng)); u8 alphaShift = info.getAlphaShift(); assert(alphaShift <= 8); @@ -1114,8 +1114,8 @@ void fill_in_succ_table_8(NFA *nfa, const dfa_info &info, static void fill_in_sherman64(NFA *nfa, const dfa_info &info, UNUSED u16 sherman_limit) { - char *nfa_base = (char *)nfa; - mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(nfa); + char *nfa_base = reinterpret_cast(nfa); + mcsheng *m = reinterpret_cast(getMutableImplNfa(nfa)); char *sherman_table = nfa_base + m->sherman_offset; assert(ISALIGNED_16(sherman_table)); @@ -1136,10 +1136,10 @@ void fill_in_sherman64(NFA *nfa, const dfa_info &info, UNUSED u16 sherman_limit) assert(len <= 9); dstate_id_t d = info.states[i].daddy; - *(u8 *)(curr_sherman_entry + SHERMAN_TYPE_OFFSET) = SHERMAN_STATE; - *(u8 *)(curr_sherman_entry + SHERMAN_LEN_OFFSET) = len; - *(u16 *)(curr_sherman_entry + SHERMAN_DADDY_OFFSET) = info.implId(d); - u8 *chars = (u8 *)(curr_sherman_entry + SHERMAN_CHARS_OFFSET); + *(reinterpret_cast(curr_sherman_entry + SHERMAN_TYPE_OFFSET)) = SHERMAN_STATE; + *(reinterpret_cast(curr_sherman_entry + SHERMAN_LEN_OFFSET)) = len; + *(reinterpret_cast(curr_sherman_entry + SHERMAN_DADDY_OFFSET)) = info.implId(d); + u8 *chars = reinterpret_cast(curr_sherman_entry + SHERMAN_CHARS_OFFSET); for (u16 s = 0; s < info.impl_alpha_size; s++) { if (info.states[i].next[s] != info.states[d].next[s]) { @@ -1147,7 +1147,7 @@ void fill_in_sherman64(NFA *nfa, const dfa_info &info, UNUSED u16 sherman_limit) } } - u16 *states = (u16 *)(curr_sherman_entry + SHERMAN_STATES_OFFSET(len)); + u16 *states = reinterpret_cast(curr_sherman_entry + SHERMAN_STATES_OFFSET(len)); for (u16 s = 0; s < info.impl_alpha_size; s++) { if (info.states[i].next[s] != info.states[d].next[s]) { DEBUG_PRINTF("s overrider %hu dad %hu char next %hu\n", fs, @@ -1155,7 +1155,7 @@ void fill_in_sherman64(NFA *nfa, const dfa_info &info, UNUSED u16 sherman_limit) info.implId(info.states[i].next[s])); u16 entry_val = info.implId(info.states[i].next[s]); entry_val |= get_edge_flags64(nfa, entry_val); - unaligned_store_u16((u8 *)states++, entry_val); + unaligned_store_u16(reinterpret_cast(states++), entry_val); } } } @@ -1221,7 +1221,7 @@ bytecode_ptr mcsheng64Compile16(dfa_info&info, dstate_id_t sheng_end, assert(ISALIGNED_N(accel_offset, alignof(union AccelAux))); auto nfa = make_zeroed_bytecode_ptr(total_size); - mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(nfa.get()); + mcsheng64 *m = reinterpret_cast(getMutableImplNfa(nfa.get())); populateBasicInfo64(sizeof(u16), info, total_size, aux_offset, accel_offset, accel_escape_info.size(), arb, single, nfa.get()); @@ -1249,7 +1249,7 @@ bytecode_ptr mcsheng64Compile16(dfa_info&info, dstate_id_t sheng_end, static void fill_in_succ_table_64_8(NFA *nfa, const dfa_info &info, dstate_id_t sheng_end) { - u8 *succ_table = (u8 *)nfa + sizeof(NFA) + sizeof(mcsheng64); + u8 *succ_table = reinterpret_cast(reinterpret_cast(nfa) + sizeof(NFA) + sizeof(mcsheng)); u8 alphaShift = info.getAlphaShift(); assert(alphaShift <= 8); @@ -1347,7 +1347,7 @@ bytecode_ptr mcshengCompile8(dfa_info &info, dstate_id_t sheng_end, assert(ISALIGNED_N(accel_offset, alignof(union AccelAux))); auto nfa = make_zeroed_bytecode_ptr(total_size); - mcsheng *m = (mcsheng *)getMutableImplNfa(nfa.get()); + mcsheng *m = reinterpret_cast(getMutableImplNfa(nfa.get())); allocateImplId8(info, sheng_end, accel_escape_info, &m->accel_limit_8, &m->accept_limit_8); @@ -1400,7 +1400,7 @@ bytecode_ptr mcsheng64Compile8(dfa_info &info, dstate_id_t sheng_end, assert(ISALIGNED_N(accel_offset, alignof(union AccelAux))); auto nfa = make_zeroed_bytecode_ptr(total_size); - mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(nfa.get()); + mcsheng64 *m = reinterpret_cast(getMutableImplNfa(nfa.get())); allocateImplId8(info, sheng_end, accel_escape_info, &m->accel_limit_8, &m->accept_limit_8); diff --git a/src/nfa/nfa_internal.h b/src/nfa/nfa_internal.h index 8cc701b6..544867bc 100644 --- a/src/nfa/nfa_internal.h +++ b/src/nfa/nfa_internal.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2015-2020, Intel Corporation + * Copyright (c) 2024, VectorCamp PC * Copyright (c) 2021, Arm Limited * * Redistribution and use in source and binary forms, with or without @@ -133,6 +134,7 @@ struct ALIGN_CL_DIRECTIVE NFA { /* Note: implementation (e.g. a LimEx) directly follows struct in memory */ } ; +#ifndef __cplusplus // Accessor macro for the implementation NFA: we do things this way to avoid // type-punning warnings. #define getImplNfa(nfa) \ @@ -140,6 +142,13 @@ struct ALIGN_CL_DIRECTIVE NFA { // Non-const version of the above, used at compile time. #define getMutableImplNfa(nfa) ((char *)(nfa) + sizeof(struct NFA)) +#else +// Same versions without C casts to avoid Cppcheck warnings +#define getImplNfa(nfa) \ + (reinterpret_cast(reinterpret_cast(nfa) + sizeof(struct NFA))) + +#define getMutableImplNfa(nfa) (reinterpret_cast(nfa) + sizeof(struct NFA)) +#endif static really_inline u32 nfaAcceptsEod(const struct NFA *nfa) { return nfa->flags & NFA_ACCEPTS_EOD; diff --git a/src/nfa/shufti_simd.hpp b/src/nfa/shufti_simd.hpp index feeb54ab..bdb0ff9f 100644 --- a/src/nfa/shufti_simd.hpp +++ b/src/nfa/shufti_simd.hpp @@ -264,7 +264,7 @@ const u8 *shuftiDoubleExecReal(m128 mask1_lo, m128 mask1_hi, m128 mask2_lo, m128 const u8 *shuftiExec(m128 mask_lo, m128 mask_hi, const u8 *buf, const u8 *buf_end) { if (buf_end - buf < VECTORSIZE) { - return shuftiFwdSlow((const u8 *)&mask_lo, (const u8 *)&mask_hi, buf, buf_end); + return shuftiFwdSlow(reinterpret_cast(&mask_lo), reinterpret_cast(&mask_hi), buf, buf_end); } return shuftiExecReal(mask_lo, mask_hi, buf, buf_end); } @@ -272,7 +272,7 @@ const u8 *shuftiExec(m128 mask_lo, m128 mask_hi, const u8 *buf, const u8 *rshuftiExec(m128 mask_lo, m128 mask_hi, const u8 *buf, const u8 *buf_end) { if (buf_end - buf < VECTORSIZE) { - return shuftiRevSlow((const u8 *)&mask_lo, (const u8 *)&mask_hi, buf, buf_end); + return shuftiRevSlow(reinterpret_cast(&mask_lo), reinterpret_cast(&mask_hi), buf, buf_end); } return rshuftiExecReal(mask_lo, mask_hi, buf, buf_end); } diff --git a/src/parser/Parser.rl b/src/parser/Parser.rl index e5cbfe2b..ae419ec8 100644 --- a/src/parser/Parser.rl +++ b/src/parser/Parser.rl @@ -224,7 +224,7 @@ u8 decodeCtrl(char raw) { static unichar readUtf8CodePoint2c(const char *s) { - auto *ts = (const u8 *)s; + auto *ts = reinterpret_cast(s); assert(ts[0] >= 0xc0 && ts[0] < 0xe0); assert(ts[1] >= 0x80 && ts[1] < 0xc0); unichar val = ts[0] & 0x1f; diff --git a/src/util/alloc.cpp b/src/util/alloc.cpp index fb20f3d3..a6d30b33 100644 --- a/src/util/alloc.cpp +++ b/src/util/alloc.cpp @@ -68,6 +68,7 @@ namespace ue2 { #endif void *aligned_malloc_internal(size_t size, size_t align) { + // cppcheck-suppress cstyleCast void *mem= nullptr;; int rv = posix_memalign(&mem, align, size); if (rv != 0) { @@ -104,17 +105,17 @@ void *aligned_zmalloc(size_t size) { const size_t alloc_size = size + HACK_OFFSET; - void *mem = aligned_malloc_internal(alloc_size, 64); + char *mem = static_cast(aligned_malloc_internal(alloc_size, 64)); if (!mem) { DEBUG_PRINTF("unable to allocate %zu bytes\n", alloc_size); throw std::bad_alloc(); } - DEBUG_PRINTF("alloced %p reporting %p\n", mem, (char *)mem + HACK_OFFSET); + DEBUG_PRINTF("alloced %p reporting %p\n", mem, mem + HACK_OFFSET); assert(ISALIGNED_N(mem, 64)); memset(mem, 0, alloc_size); - return (void *)((char *)mem + HACK_OFFSET); + return reinterpret_cast(mem + HACK_OFFSET); } /** \brief Free a pointer allocated with \ref aligned_zmalloc. */ @@ -123,7 +124,8 @@ void aligned_free(void *ptr) { return; } - void *addr = (void *)((char *)ptr - HACK_OFFSET); + char *addr_c = static_cast(ptr); + void *addr = static_cast(addr_c - HACK_OFFSET); DEBUG_PRINTF("asked to free %p freeing %p\n", ptr, addr); assert(ISALIGNED_N(addr, 64)); diff --git a/src/util/supervector/arch/arm/impl.cpp b/src/util/supervector/arch/arm/impl.cpp index 845ccea0..17859d61 100644 --- a/src/util/supervector/arch/arm/impl.cpp +++ b/src/util/supervector/arch/arm/impl.cpp @@ -511,7 +511,7 @@ really_inline SuperVector<16> SuperVector<16>::Ones_vshl(uint8_t const N) template <> really_inline SuperVector<16> SuperVector<16>::loadu(void const *ptr) { - return {SuperVector<16>(vld1q_s32((const int32_t *)ptr))}; + return {SuperVector<16>(vld1q_s32(reinterpret_cast(ptr)))}; } template <> @@ -519,7 +519,7 @@ really_inline SuperVector<16> SuperVector<16>::load(void const *ptr) { assert(ISALIGNED_N(ptr, alignof(SuperVector::size))); ptr = vectorscan_assume_aligned(ptr, SuperVector::size); - return {SuperVector<16>(vld1q_s32((const int32_t *)ptr))}; + return {SuperVector<16>(vld1q_s32(reinterpret_cast(ptr)))}; } template <> diff --git a/src/util/supervector/arch/x86/impl.cpp b/src/util/supervector/arch/x86/impl.cpp index 6a7dfa3d..0323d5e5 100644 --- a/src/util/supervector/arch/x86/impl.cpp +++ b/src/util/supervector/arch/x86/impl.cpp @@ -508,7 +508,7 @@ really_inline SuperVector<16> SuperVector<16>::Ones_vshl(uint8_t const N) template <> really_inline SuperVector<16> SuperVector<16>::loadu(void const *ptr) { - return SuperVector<16>(_mm_loadu_si128((const m128 *)ptr)); + return SuperVector<16>(_mm_loadu_si128(reinterpret_cast(ptr))); } template <> @@ -516,14 +516,14 @@ really_inline SuperVector<16> SuperVector<16>::load(void const *ptr) { assert(ISALIGNED_N(ptr, alignof(SuperVector::size))); ptr = vectorscan_assume_aligned(ptr, SuperVector::size); - return SuperVector<16>(_mm_load_si128((const m128 *)ptr)); + return SuperVector<16>(_mm_load_si128(reinterpret_cast(ptr))); } template <> really_inline SuperVector<16> SuperVector<16>::loadu_maskz(void const *ptr, uint8_t const len) { SuperVector mask = Ones_vshr(16 -len); - SuperVector v = SuperVector<16>(_mm_loadu_si128((const m128 *)ptr)); + SuperVector v = SuperVector<16>(_mm_loadu_si128(reinterpret_cast(ptr))); return mask & v; }