1 /*******************************************************************************
2 * Copyright 2021 Intel Corporation
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *******************************************************************************/
16 
17 #ifndef CPU_MATMUL_REF_MATMUL_INT8_HPP
18 #define CPU_MATMUL_REF_MATMUL_INT8_HPP
19 
20 #include <assert.h>
21 
22 #include "common/c_types_map.hpp"
23 #include "common/primitive.hpp"
24 #include "common/type_helpers.hpp"
25 #include "common/utils.hpp"
26 
27 #include "cpu/platform.hpp"
28 #include "cpu/primitive_attr_postops.hpp"
29 
30 #include "cpu/matmul/cpu_matmul_pd.hpp"
31 
32 namespace dnnl {
33 namespace impl {
34 namespace cpu {
35 namespace matmul {
36 
37 struct ref_matmul_int8_t : public primitive_t {
38     struct pd_t : public cpu_matmul_pd_t {
39         using cpu_matmul_pd_t::cpu_matmul_pd_t;
40 
41         DECLARE_COMMON_PD_T("ref_int8:any", ref_matmul_int8_t);
42 
initdnnl::impl::cpu::matmul::ref_matmul_int8_t::pd_t43         status_t init(engine_t *engine) {
44             using namespace data_type;
45             using smask_t = primitive_attr_t::skip_mask_t;
46             const auto src_type = src_md(0)->data_type;
47             const auto wei_type = weights_md(0)->data_type;
48             const auto bia_type = weights_md(1)->data_type;
49             const auto dst_type = dst_md(0)->data_type;
50 
51             bool ok = utils::one_of(src_type, s8, u8) && wei_type == s8
52                     && IMPLICATION(with_bias(),
53                             utils::one_of(bia_type, f32, bf16, s32, s8, u8))
54                     && utils::one_of(dst_type, f32, bf16, s32, s8, u8)
55                     && attr()->has_default_values(smask_t::oscale_runtime
56                                     | smask_t::zero_points_runtime
57                                     | smask_t::post_ops | smask_t::sum_dt,
58                             dst_type)
59                     && attr_.post_ops_.check_sum_consistent_dt(dst_type)
60                     && attr_oscale_ok() && attr_zero_points_ok()
61                     && set_default_formats()
62                     && attr_.set_default_formats(dst_md(0)) == status::success;
63             return ok ? status::success : status::unimplemented;
64         }
65 
66     private:
attr_oscale_okdnnl::impl::cpu::matmul::ref_matmul_int8_t::pd_t67         bool attr_oscale_ok() const {
68             const auto &oscale = attr()->output_scales_;
69             return oscale.mask_ == 0 || oscale.mask_ == (1 << (batched() + 1));
70         }
71 
attr_zero_points_okdnnl::impl::cpu::matmul::ref_matmul_int8_t::pd_t72         bool attr_zero_points_ok() const {
73             int mask_src = 0, mask_wei = 0, mask_dst = 0;
74             attr()->zero_points_.get(DNNL_ARG_SRC, nullptr, &mask_src, nullptr);
75             attr()->zero_points_.get(
76                     DNNL_ARG_WEIGHTS, nullptr, &mask_wei, nullptr);
77             attr()->zero_points_.get(DNNL_ARG_DST, nullptr, &mask_dst, nullptr);
78 
79             return (mask_src == 0 || mask_src == 1 << 1) && (mask_wei == 0)
80                     && (mask_dst == 0 || mask_dst == 1 << 1);
81         }
82     };
83 
ref_matmul_int8_tdnnl::impl::cpu::matmul::ref_matmul_int8_t84     ref_matmul_int8_t(const pd_t *apd) : primitive_t(apd) {}
85 
initdnnl::impl::cpu::matmul::ref_matmul_int8_t86     status_t init(engine_t *engine) override {
87         ref_post_ops
88                 = utils::make_unique<ref_post_ops_t>(pd()->attr()->post_ops_);
89         if (!ref_post_ops) return status::out_of_memory;
90         return status::success;
91     }
92 
executednnl::impl::cpu::matmul::ref_matmul_int8_t93     status_t execute(const exec_ctx_t &ctx) const override {
94         return execute_ref(ctx);
95     }
96 
97 private:
pddnnl::impl::cpu::matmul::ref_matmul_int8_t98     const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
99     status_t execute_ref(const exec_ctx_t &ctx) const;
100     std::unique_ptr<ref_post_ops_t> ref_post_ops;
101 };
102 
103 } // namespace matmul
104 } // namespace cpu
105 } // namespace impl
106 } // namespace dnnl
107 
108 #endif
109