Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[ARM]: Implement CPU plugin just-in-time emitter for Erf operation #27499 #28176

Open
wants to merge 10 commits into
base: master
Choose a base branch
from
Original file line number Diff line number Diff line change
Expand Up @@ -2438,6 +2438,95 @@ std::set<std::vector<element::Type>> jit_sigmoid_emitter::get_supported_precisio
return {{element::f32}};
}

/// ERF ///
jit_erf_emitter::jit_erf_emitter(dnnl::impl::cpu::aarch64::jit_generator* host,
dnnl::impl::cpu::aarch64::cpu_isa_t host_isa,
const std::shared_ptr<ov::Node>& node)
: jit_emitter(host, host_isa, node, get_arithmetic_binary_exec_precision(node)) {
prepare_table();
exp_emitter = std::make_unique<jit_exp_emitter>(h, host_isa, node);
}

jit_erf_emitter::jit_erf_emitter(dnnl::impl::cpu::aarch64::jit_generator* host,
dnnl::impl::cpu::aarch64::cpu_isa_t host_isa,
const ov::element::Type exec_prc)
: jit_emitter(host, host_isa, exec_prc) {
prepare_table();
exp_emitter = std::make_unique<jit_exp_emitter>(h, host_isa, exec_prc);
}

size_t jit_erf_emitter::get_inputs_count() const {
return 1;
}

size_t jit_erf_emitter::get_aux_vecs_count() const {
return exp_emitter->get_aux_vecs_count() + 4;
}

size_t jit_erf_emitter::get_aux_gprs_count() const {
return exp_emitter->get_aux_gprs_count() + 1;
}

void jit_erf_emitter::emit_impl(const std::vector<size_t>& in_vec_idxs,
const std::vector<size_t>& out_vec_idxs) const {
if (host_isa_ == dnnl::impl::cpu::aarch64::asimd) {
emit_isa<dnnl::impl::cpu::aarch64::asimd>(in_vec_idxs, out_vec_idxs);
} else {
OPENVINO_THROW("Can't create jit eltwise kernel");
}
}

template <dnnl::impl::cpu::aarch64::cpu_isa_t isa>
void jit_erf_emitter::emit_isa(const std::vector<size_t>& in_vec_idxs,
const std::vector<size_t>& out_vec_idxs) const {
if (exec_prc_ != ov::element::f32) {
OPENVINO_THROW("unsupported precision: " + exec_prc_.to_string());
}

using TReg = typename dnnl::impl::cpu::aarch64::cpu_isa_traits<isa>::TReg;
const TReg src(in_vec_idxs[0]);
const TReg dst(out_vec_idxs[0]);
const TReg vmm_aux0(aux_vec_idxs[0]);
const TReg vmm_aux1(aux_vec_idxs[1]);
const TReg vmm_aux2(aux_vec_idxs[2]);
const TReg vmm_aux3(aux_vec_idxs[3]);

// x = x * x
h->fmul(vmm_aux0.s, src.s, src.s);

// exp(-x)
h->fneg(vmm_aux0.s, vmm_aux0.s);
exp_emitter->emit_code({vmm_aux0.getIdx()}, {vmm_aux1.getIdx()}, aux_vec_idxs, aux_gpr_idxs);

// t = 1 / (p * x + 1)
h->ld1r(vmm_aux2.s, table_val2("erf_p"));
h->fmul(vmm_aux2.s, vmm_aux2.s, src.s);
h->ld1r(vmm_aux3.s, table_val2("one"));
h->fadd(vmm_aux2.s, vmm_aux2.s, vmm_aux3.s);
h->fdiv(vmm_aux2.s, vmm_aux3.s, vmm_aux2.s);

// erf = sign * (1 - exp(-x) * t)
h->fmul(vmm_aux1.s, vmm_aux1.s, vmm_aux2.s);
h->fsub(vmm_aux1.s, vmm_aux3.s, vmm_aux1.s);
h->fcmge(vmm_aux3.s, src.s, 0.0);
h->bsl(vmm_aux3.b16, vmm_aux1.b16, vmm_aux0.b16);
h->mov(dst.b16, vmm_aux3.b16);
}

void jit_erf_emitter::emit_data() const {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If the method impl of derived class is the same as of base class and this method is virtual in base class - no need to override this method.

However, you use exp_emitter in the impl of erf_emitter. So you need to emit data for exp_emitter in this method too:

void jit_erf_emitter::emit_data() const {
    jit_emitter::emit_data();
    exp_emitter->emit_data();
}

Otherwise, exp_emitter cannot find the constant values in the table.

I believe that if we add exp_emitter->emit_data();, failed GHA jobs with the following error messages will be fixed:

[ RUN      ] smoke_Activation_Basic/ActivationLayerTest.Inference/IS=([])_TS={(1.50)}_TS=()_Erf_constants_value=()_netPRC=f32_trgDev=CPU
bad err=4 in Xbyak::Error

MEM_USAGE=1972392KB
src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp:97: Failure
Exception from src/inference/src/cpp/core.cpp:109:
Exception from src/inference/src/dev/plugin.cpp:53:
label is not found


[  FAILED  ] smoke_Activation_Basic/ActivationLayerTest.Inference/IS=([])_TS={(1.50)}_TS=()_Erf_constants_value=()_netPRC=f32_trgDev=CPU, where GetParam() = ((26, {}), f32, ({ ({}, { { 1, 50 } }) }, {}), "CPU") (46 ms)

jit_emitter::emit_data();
}

std::set<std::vector<element::Type>> jit_erf_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32}};
}
void jit_erf_emitter::register_table_entries() {
push_arg_entry_of("one", 0x3f800000, true);
push_arg_entry_of("erf_p", 0x3ea7ba05, true);
}



/// SOFT_SIGN ///
jit_soft_sign_emitter::jit_soft_sign_emitter(dnnl::impl::cpu::aarch64::jit_generator* host,
dnnl::impl::cpu::aarch64::cpu_isa_t host_isa,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -978,6 +978,40 @@ class jit_sigmoid_emitter : public jit_emitter {
void emit_isa(const std::vector<size_t>& in_vec_idxs, const std::vector<size_t>& out_vec_idxs) const;
};

class jit_erf_emitter : public jit_emitter {
public:
jit_erf_emitter(dnnl::impl::cpu::aarch64::jit_generator* host,
dnnl::impl::cpu::aarch64::cpu_isa_t host_isa,
const ov::element::Type exec_prc = ov::element::f32);

jit_erf_emitter(dnnl::impl::cpu::aarch64::jit_generator* host,
dnnl::impl::cpu::aarch64::cpu_isa_t host_isa,
const std::shared_ptr<ov::Node>& node);

size_t get_inputs_count() const override;

size_t get_aux_vecs_count() const override;

size_t get_aux_gprs_count() const override;

void register_table_entries() override;


void emit_data() const override;

static std::set<std::vector<element::Type>> get_supported_precisions(
const std::shared_ptr<ov::Node>& node = nullptr);

private:
std::unique_ptr<jit_exp_emitter> exp_emitter;

void emit_impl(const std::vector<size_t>& in_vec_idxs, const std::vector<size_t>& out_vec_idxs) const override;

template <dnnl::impl::cpu::aarch64::cpu_isa_t isa>
void emit_isa(const std::vector<size_t>& in_vec_idxs, const std::vector<size_t>& out_vec_idxs) const;

};

class jit_soft_sign_emitter : public jit_emitter {
public:
jit_soft_sign_emitter(dnnl::impl::cpu::aarch64::jit_generator* host,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ bool JitEltwiseExecutor::isSupported(const Algorithm& algorithm,
Algorithm::EltwiseDivide,
Algorithm::EltwiseElu,
Algorithm::EltwiseEqual,
Algorithm::EltwiseErf,
Algorithm::EltwiseExp,
Algorithm::EltwiseFloor,
Algorithm::EltwiseFloorMod,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -660,6 +660,7 @@ std::shared_ptr<jit_emitter> jit_uni_eltwise_generic<isa>::create_eltwise_emitte
OV_CASE(Algorithm::EltwiseDivide, ov::intel_cpu::aarch64::jit_divide_emitter),
OV_CASE(Algorithm::EltwiseElu, ov::intel_cpu::aarch64::jit_elu_emitter),
OV_CASE(Algorithm::EltwiseEqual, ov::intel_cpu::aarch64::jit_equal_emitter),
OV_CASE(Algorithm::EltwiseErf, ov::intel_cpu::aarch64::jit_erf_emitter),
OV_CASE(Algorithm::EltwiseExp, ov::intel_cpu::aarch64::jit_exp_emitter),
OV_CASE(Algorithm::EltwiseFloor, ov::intel_cpu::aarch64::jit_floor_emitter),
OV_CASE(Algorithm::EltwiseFloorMod, ov::intel_cpu::aarch64::jit_floor_mod_emitter),
Expand Down Expand Up @@ -847,6 +848,7 @@ std::set<std::vector<element::Type>> eltwise_precision_helper::get_supported_pre
OV_CASE(Algorithm::EltwiseDivide, jit_divide_emitter),
OV_CASE(Algorithm::EltwiseElu, jit_elu_emitter),
OV_CASE(Algorithm::EltwiseEqual, jit_equal_emitter),
OV_CASE(Algorithm::EltwiseErf, jit_erf_emitter),
OV_CASE(Algorithm::EltwiseExp, jit_exp_emitter),
OV_CASE(Algorithm::EltwiseFloor, jit_floor_emitter),
OV_CASE(Algorithm::EltwiseFloorMod, jit_floor_mod_emitter),
Expand Down
Loading