Fix C-style casts

This commit is contained in:
Konstantinos Margaritis
2024-05-15 23:22:39 +03:00
parent afd03a3d85
commit e39db866ce
20 changed files with 108 additions and 92 deletions

View File

@@ -426,7 +426,7 @@ void
accel_dfa_build_strat::buildAccel(UNUSED dstate_id_t this_idx,
const AccelScheme &info,
void *accel_out) {
AccelAux *accel = (AccelAux *)accel_out;
AccelAux *accel = reinterpret_cast<AccelAux *>(accel_out);
DEBUG_PRINTF("accelerations scheme has offset s%u/d%u\n", info.offset,
info.double_offset);
@@ -473,7 +473,8 @@ accel_dfa_build_strat::buildAccel(UNUSED dstate_id_t this_idx,
u8 c1 = info.double_byte.begin()->first & m1;
u8 c2 = info.double_byte.begin()->second & m2;
#ifdef HAVE_SVE2
if (vermicelliDoubleMasked16Build(c1, c2, m1, m2, (u8 *)&accel->mdverm16.mask)) {
if (vermicelliDoubleMasked16Build(c1, c2, m1, m2,
reinterpret_cast<u8 *>(&accel->mdverm16.mask))) {
accel->accel_type = ACCEL_DVERM16_MASKED;
accel->mdverm16.offset = verify_u8(info.double_offset);
accel->mdverm16.c1 = c1;
@@ -482,8 +483,9 @@ accel_dfa_build_strat::buildAccel(UNUSED dstate_id_t this_idx,
c1, c2);
return;
} else if (info.double_byte.size() <= 8 &&
vermicelliDouble16Build(info.double_byte, (u8 *)&accel->dverm16.mask,
(u8 *)&accel->dverm16.firsts)) {
vermicelliDouble16Build(info.double_byte,
reinterpret_cast<u8 *>(&accel->dverm16.mask),
reinterpret_cast<u8 *>(&accel->dverm16.firsts))) {
accel->accel_type = ACCEL_DVERM16;
accel->dverm16.offset = verify_u8(info.double_offset);
DEBUG_PRINTF("building double16-vermicelli\n");
@@ -503,8 +505,9 @@ accel_dfa_build_strat::buildAccel(UNUSED dstate_id_t this_idx,
}
#ifdef HAVE_SVE2
if (info.double_byte.size() <= 8 &&
vermicelliDouble16Build(info.double_byte, (u8 *)&accel->dverm16.mask,
(u8 *)&accel->dverm16.firsts)) {
vermicelliDouble16Build(info.double_byte,
reinterpret_cast<u8 *>(&accel->dverm16.mask),
reinterpret_cast<u8 *>(&accel->dverm16.firsts))) {
accel->accel_type = ACCEL_DVERM16;
accel->dverm16.offset = verify_u8(info.double_offset);
DEBUG_PRINTF("building double16-vermicelli\n");
@@ -515,9 +518,11 @@ accel_dfa_build_strat::buildAccel(UNUSED dstate_id_t this_idx,
if (double_byte_ok(info) &&
shuftiBuildDoubleMasks(
info.double_cr, info.double_byte, (u8 *)&accel->dshufti.lo1,
(u8 *)&accel->dshufti.hi1, (u8 *)&accel->dshufti.lo2,
(u8 *)&accel->dshufti.hi2)) {
info.double_cr, info.double_byte,
reinterpret_cast<u8 *>(&accel->dshufti.lo1),
reinterpret_cast<u8 *>(&accel->dshufti.hi1),
reinterpret_cast<u8 *>(&accel->dshufti.lo2),
reinterpret_cast<u8 *>(&accel->dshufti.hi2))) {
accel->accel_type = ACCEL_DSHUFTI;
accel->dshufti.offset = verify_u8(info.double_offset);
DEBUG_PRINTF("state %hu is double shufti\n", this_idx);
@@ -549,7 +554,7 @@ accel_dfa_build_strat::buildAccel(UNUSED dstate_id_t this_idx,
#ifdef HAVE_SVE2
if (info.cr.count() <= 16) {
accel->accel_type = ACCEL_VERM16;
vermicelli16Build(info.cr, (u8 *)&accel->verm16.mask);
vermicelli16Build(info.cr, reinterpret_cast<u8 *>(&accel->verm16.mask));
DEBUG_PRINTF("state %hu is vermicelli16\n", this_idx);
return;
}
@@ -562,16 +567,18 @@ accel_dfa_build_strat::buildAccel(UNUSED dstate_id_t this_idx,
}
accel->accel_type = ACCEL_SHUFTI;
if (-1 != shuftiBuildMasks(info.cr, (u8 *)&accel->shufti.lo,
(u8 *)&accel->shufti.hi)) {
if (-1 != shuftiBuildMasks(info.cr,
reinterpret_cast<u8 *>(&accel->shufti.lo),
reinterpret_cast<u8 *>(&accel->shufti.hi))) {
DEBUG_PRINTF("state %hu is shufti\n", this_idx);
return;
}
assert(!info.cr.none());
accel->accel_type = ACCEL_TRUFFLE;
truffleBuildMasks(info.cr, (u8 *)&accel->truffle.mask1,
(u8 *)&accel->truffle.mask2);
truffleBuildMasks(info.cr,
reinterpret_cast<u8 *>(&accel->truffle.mask1),
reinterpret_cast<u8 *>(&accel->truffle.mask2));
DEBUG_PRINTF("state %hu is truffle\n", this_idx);
}

View File

@@ -84,8 +84,9 @@ void buildAccelSingle(const AccelInfo &info, AccelAux *aux) {
#endif
DEBUG_PRINTF("attempting shufti for %zu chars\n", outs);
if (-1 != shuftiBuildMasks(info.single_stops, (u8 *)&aux->shufti.lo,
(u8 *)&aux->shufti.hi)) {
if (-1 != shuftiBuildMasks(info.single_stops,
reinterpret_cast<u8 *>(&aux->shufti.lo),
reinterpret_cast<u8 *>(&aux->shufti.hi))) {
aux->accel_type = ACCEL_SHUFTI;
aux->shufti.offset = offset;
DEBUG_PRINTF("shufti built OK\n");
@@ -98,8 +99,9 @@ void buildAccelSingle(const AccelInfo &info, AccelAux *aux) {
DEBUG_PRINTF("building Truffle for %zu chars\n", outs);
aux->accel_type = ACCEL_TRUFFLE;
aux->truffle.offset = offset;
truffleBuildMasks(info.single_stops, (u8 *)&aux->truffle.mask1,
(u8 *)&aux->truffle.mask2);
truffleBuildMasks(info.single_stops,
reinterpret_cast<u8 *>(&aux->truffle.mask1),
reinterpret_cast<u8 *>(&aux->truffle.mask2));
return;
}
@@ -219,8 +221,9 @@ void buildAccelDouble(const AccelInfo &info, AccelAux *aux) {
c1, c2);
return;
} else if (outs2 <= 8 &&
vermicelliDouble16Build(info.double_stop2, (u8 *)&aux->dverm16.mask,
(u8 *)&aux->dverm16.firsts)) {
vermicelliDouble16Build(info.double_stop2,
reinterpret_cast<u8 *>(&aux->dverm16.mask),
reinterpret_cast<u8 *>(&aux->dverm16.firsts))) {
aux->accel_type = ACCEL_DVERM16;
aux->dverm16.offset = offset;
DEBUG_PRINTF("building double16-vermicelli\n");
@@ -254,9 +257,11 @@ void buildAccelDouble(const AccelInfo &info, AccelAux *aux) {
aux->accel_type = ACCEL_DSHUFTI;
aux->dshufti.offset = offset;
if (shuftiBuildDoubleMasks(
info.double_stop1, info.double_stop2, (u8 *)&aux->dshufti.lo1,
(u8 *)&aux->dshufti.hi1, (u8 *)&aux->dshufti.lo2,
(u8 *)&aux->dshufti.hi2)) {
info.double_stop1, info.double_stop2,
reinterpret_cast<u8 *>(&aux->dshufti.lo1),
reinterpret_cast<u8 *>(&aux->dshufti.hi1),
reinterpret_cast<u8 *>(&aux->dshufti.lo2),
reinterpret_cast<u8 *>(&aux->dshufti.hi2))) {
return;
}
}

View File

@@ -106,25 +106,27 @@ void writeCastleScanEngine(const CharReach &cr, Castle *c) {
#ifdef HAVE_SVE2
if (cr.count() <= 16) {
c->type = CASTLE_NVERM16;
vermicelli16Build(cr, (u8 *)&c->u.verm16.mask);
vermicelli16Build(cr, reinterpret_cast<u8 *>(&c->u.verm16.mask));
return;
}
if (negated.count() <= 16) {
c->type = CASTLE_VERM16;
vermicelli16Build(negated, (u8 *)&c->u.verm16.mask);
vermicelli16Build(negated, reinterpret_cast<u8 *>(&c->u.verm16.mask));
return;
}
#endif // HAVE_SVE2
if (shuftiBuildMasks(negated, (u8 *)&c->u.shuf.mask_lo,
(u8 *)&c->u.shuf.mask_hi) != -1) {
if (shuftiBuildMasks(negated,
reinterpret_cast<u8 *>(&c->u.shuf.mask_lo),
reinterpret_cast<u8 *>(&c->u.shuf.mask_hi)) != -1) {
c->type = CASTLE_SHUFTI;
return;
}
c->type = CASTLE_TRUFFLE;
truffleBuildMasks(negated, (u8 *)(u8 *)&c->u.truffle.mask1,
(u8 *)&c->u.truffle.mask2);
truffleBuildMasks(negated,
reinterpret_cast<u8 *>(&c->u.truffle.mask1),
reinterpret_cast<u8 *>(&c->u.truffle.mask2));
}
static
@@ -602,9 +604,9 @@ buildCastle(const CastleProto &proto,
nfa->minWidth = verify_u32(minWidth);
nfa->maxWidth = maxWidth.is_finite() ? verify_u32(maxWidth) : 0;
char * const base_ptr = (char *)nfa.get() + sizeof(NFA);
char * const base_ptr = reinterpret_cast<char *>(nfa.get()) + sizeof(NFA);
char *ptr = base_ptr;
Castle *c = (Castle *)ptr;
Castle *c = reinterpret_cast<Castle *>(ptr);
c->numRepeats = verify_u32(subs.size());
c->numGroups = exclusiveInfo.numGroups;
c->exclusive = verify_s8(exclusive);
@@ -615,7 +617,7 @@ buildCastle(const CastleProto &proto,
writeCastleScanEngine(cr, c);
ptr += sizeof(Castle);
SubCastle *subCastles = ((SubCastle *)(ROUNDUP_PTR(ptr, alignof(u32))));
SubCastle *subCastles = reinterpret_cast<SubCastle *>(ROUNDUP_PTR(ptr, alignof(u32)));
copy(subs.begin(), subs.end(), subCastles);
u32 length = 0;
@@ -625,16 +627,16 @@ buildCastle(const CastleProto &proto,
SubCastle *sub = &subCastles[i];
sub->repeatInfoOffset = offset;
ptr = (char *)sub + offset;
ptr = reinterpret_cast<char *>(sub) + offset;
memcpy(ptr, &infos[i], sizeof(RepeatInfo));
if (patchSize[i]) {
RepeatInfo *info = (RepeatInfo *)ptr;
u64a *table = ((u64a *)(ROUNDUP_PTR(((char *)(info) +
sizeof(*info)), alignof(u64a))));
RepeatInfo *info = reinterpret_cast<RepeatInfo *>(ptr);
u64a *table = reinterpret_cast<u64a *>(ROUNDUP_PTR(info +
sizeof(*info), alignof(u64a)));
copy(tables.begin() + tableIdx,
tables.begin() + tableIdx + patchSize[i], table);
u32 diff = (char *)table - (char *)info +
u32 diff = reinterpret_cast<ptrdiff_t>(table) - reinterpret_cast<ptrdiff_t>(info) +
sizeof(u64a) * patchSize[i];
info->length = diff;
length += diff;
@@ -657,8 +659,6 @@ buildCastle(const CastleProto &proto,
if (!stale_iter.empty()) {
c->staleIterOffset = verify_u32(ptr - base_ptr);
copy_bytes(ptr, stale_iter);
// Removed unused increment operation
// ptr += byte_length(stale_iter);
}
return nfa;

View File

@@ -1077,8 +1077,9 @@ bytecode_ptr<NFA> goughCompile(raw_som_dfa &raw, u8 somPrecision,
return bytecode_ptr<NFA>(nullptr);
}
u8 alphaShift
= ((const mcclellan *)getImplNfa(basic_dfa.get()))->alphaShift;
// cppcheck-suppress cstyleCast
const auto nfa = static_cast<const mcclellan *>(getImplNfa(basic_dfa.get()));
u8 alphaShift = nfa->alphaShift;
u32 edge_count = (1U << alphaShift) * raw.states.size();
u32 curr_offset = ROUNDUP_N(basic_dfa->length, 4);
@@ -1119,8 +1120,8 @@ bytecode_ptr<NFA> goughCompile(raw_som_dfa &raw, u8 somPrecision,
u32 gough_size = ROUNDUP_N(curr_offset, 16);
auto gough_dfa = make_zeroed_bytecode_ptr<NFA>(gough_size);
memcpy(gough_dfa.get(), basic_dfa.get(), basic_dfa->length);
memcpy((char *)gough_dfa.get() + haig_offset, &gi, sizeof(gi));
memcpy(reinterpret_cast<char *>(gough_dfa.get()), basic_dfa.get(), basic_dfa->length);
memcpy(reinterpret_cast<char *>(gough_dfa.get()) + haig_offset, &gi, sizeof(gi));
if (gough_dfa->type == MCCLELLAN_NFA_16) {
gough_dfa->type = GOUGH_NFA_16;
} else {
@@ -1133,18 +1134,19 @@ bytecode_ptr<NFA> goughCompile(raw_som_dfa &raw, u8 somPrecision,
gough_dfa->streamStateSize = base_state_size + slot_count * somPrecision;
gough_dfa->scratchStateSize = (u32)(16 + scratch_slot_count * sizeof(u64a));
mcclellan *m = (mcclellan *)getMutableImplNfa(gough_dfa.get());
// cppcheck-suppress cstyleCast
auto *m = reinterpret_cast<mcclellan *>(getMutableImplNfa(gough_dfa.get()));
m->haig_offset = haig_offset;
/* update nfa length, haig_info offset (leave mcclellan length alone) */
gough_dfa->length = gough_size;
/* copy in blocks */
copy_bytes((u8 *)gough_dfa.get() + edge_prog_offset, edge_blocks);
copy_bytes(reinterpret_cast<u8 *>(gough_dfa.get()) + edge_prog_offset, edge_blocks);
if (top_prog_offset) {
copy_bytes((u8 *)gough_dfa.get() + top_prog_offset, top_blocks);
copy_bytes(reinterpret_cast<u8 *>(gough_dfa.get()) + top_prog_offset, top_blocks);
}
copy_bytes((u8 *)gough_dfa.get() + prog_base_offset, temp_blocks);
copy_bytes(reinterpret_cast<u8 *>(gough_dfa.get()) + prog_base_offset, temp_blocks);
return gough_dfa;
}
@@ -1177,7 +1179,7 @@ AccelScheme gough_build_strat::find_escape_strings(dstate_id_t this_idx) const {
void gough_build_strat::buildAccel(dstate_id_t this_idx, const AccelScheme &info,
void *accel_out) {
assert(mcclellan_build_strat::accelSize() == sizeof(AccelAux));
gough_accel *accel = (gough_accel *)accel_out;
gough_accel *accel = reinterpret_cast<gough_accel *>(accel_out);
/* build a plain accelaux so we can work out where we can get to */
mcclellan_build_strat::buildAccel(this_idx, info, &accel->accel);
DEBUG_PRINTF("state %hu is accel with type %hhu\n", this_idx,
@@ -1315,7 +1317,8 @@ void raw_gough_report_info_impl::fillReportLists(NFA *n, size_t base_offset,
for (const raw_gough_report_list &r : rl) {
ro.emplace_back(base_offset);
gough_report_list *p = (gough_report_list *)((char *)n + base_offset);
u8 * n_ptr = reinterpret_cast<u8 *>(n);
gough_report_list *p = reinterpret_cast<gough_report_list *>(n_ptr + base_offset);
u32 i = 0;
for (const som_report &sr : r.reports) {

View File

@@ -194,7 +194,7 @@ void handle_pending_vars(GoughSSAVar *def, const GoughGraph &g,
if (contains(aux.containing_v, var)) {
/* def is used by join vertex, value only needs to be live on some
* incoming edges */
const GoughSSAVarJoin *vj = (GoughSSAVarJoin *)var;
const GoughSSAVarJoin *vj = reinterpret_cast<const GoughSSAVarJoin *>(var);
const flat_set<GoughEdge> &live_edges
= vj->get_edges_for_input(def);
for (const auto &e : live_edges) {

View File

@@ -264,7 +264,7 @@ const u8 *shuftiDoubleExecReal(m128 mask1_lo, m128 mask1_hi, m128 mask2_lo, m128
const u8 *shuftiExec(m128 mask_lo, m128 mask_hi, const u8 *buf,
const u8 *buf_end) {
if (buf_end - buf < VECTORSIZE) {
return shuftiFwdSlow((const u8 *)&mask_lo, (const u8 *)&mask_hi, buf, buf_end);
return shuftiFwdSlow(reinterpret_cast<const u8 *>(&mask_lo), reinterpret_cast<const u8 *>(&mask_hi), buf, buf_end);
}
return shuftiExecReal<VECTORSIZE>(mask_lo, mask_hi, buf, buf_end);
}
@@ -272,7 +272,7 @@ const u8 *shuftiExec(m128 mask_lo, m128 mask_hi, const u8 *buf,
const u8 *rshuftiExec(m128 mask_lo, m128 mask_hi, const u8 *buf,
const u8 *buf_end) {
if (buf_end - buf < VECTORSIZE) {
return shuftiRevSlow((const u8 *)&mask_lo, (const u8 *)&mask_hi, buf, buf_end);
return shuftiRevSlow(reinterpret_cast<const u8 *>(&mask_lo), reinterpret_cast<const u8 *>(&mask_hi), buf, buf_end);
}
return rshuftiExecReal<VECTORSIZE>(mask_lo, mask_hi, buf, buf_end);
}