/dports/misc/py-gluonnlp/gluon-nlp-0.10.0/tests/unittest/ |
H A D | test_attention_cell.py | 9 num_heads=None): argument 40 read_value_npy = read_value_npy.reshape((batch_size, query_length, num_heads, 44 for j in range(num_heads): 84 for query_units, key_units, value_units, num_heads in [(4, 4, 8, 2), (3, 3, 9, 3), 94 num_heads=num_heads) 96 q_channel=query_units // num_heads, 97 k_channel=key_units // num_heads, 98 v_channel=value_units // num_heads, 101 num_heads=num_heads)
|
/dports/misc/py-gluonnlp/gluon-nlp-0.10.0/src/gluonnlp/model/ |
H A D | transformer.py | 212 num_heads=num_heads, scaled=scaled, 322 .format(units, num_heads) 341 units=units, hidden_size=hidden_size, num_heads=num_heads, 497 self._num_heads = num_heads 507 num_heads=num_heads, 512 num_heads=num_heads, 595 self._num_states = num_heads 614 num_heads=num_heads, attention_cell=attention_cell, 913 num_layers=num_layers, num_heads=num_heads, max_length=max_src_length, units=units, 918 num_layers=num_layers, num_heads=num_heads, max_length=max_tgt_length, units=units, [all …]
|
H A D | bert.py | 84 def __init__(self, units, num_heads, dropout=0.0, use_bias=True, argument 88 self._num_heads = num_heads 201 def __init__(self, units=128, hidden_size=512, num_heads=4, argument 213 self.attention_cell = DotProductSelfAttentionCell(units, num_heads, 311 max_length=50, num_heads=4, dropout=0.0, argument 316 assert units % num_heads == 0,\ 319 .format(units, num_heads) 336 units=units, hidden_size=hidden_size, num_heads=num_heads, 1305 num_heads=predefined_args['num_heads'], 1520 num_heads=predefined_args['num_heads'], [all …]
|
/dports/misc/py-gluonnlp/gluon-nlp-0.10.0/scripts/language_model/transformer/ |
H A D | transformer.py | 112 self._num_heads = num_heads 121 assert units % num_heads == 0 173 units=128, hidden_size=2048, num_heads=4, scaled=True, dropout=0.0, argument 182 'num_heads={}'.format(units, num_heads) 188 self._num_heads = num_heads 230 d_head=units // num_heads, num_heads=num_heads, scaled=scaled, 234 hidden_size=hidden_size, num_heads=num_heads, 539 'num_heads={}'.format(units, num_heads) 544 self._num_heads = num_heads 560 d_head=units // num_heads, num_heads=num_heads, scaled=scaled, [all …]
|
H A D | attention_cell.py | 73 def __init__(self, d_head: int, num_heads: int, dropout: float, scaled: bool, 78 self._num_heads = num_heads 87 mx.gluon.nn.Dense(units=d_head * num_heads, use_bias=False, flatten=False, 90 self.query_key_bias = self.params.get('query_key_bias', shape=(num_heads, d_head), 92 self.query_emb_bias = self.params.get('query_emb_bias', shape=(num_heads, d_head), 219 def __init__(self, d_head: int, num_heads: int, dropout: float, scaled: bool, 224 self._num_heads = num_heads 233 mx.gluon.nn.Dense(units=d_head * num_heads, use_bias=False, flatten=False, 236 self.query_key_bias = self.params.get('query_key_bias', shape=(num_heads, d_head), 238 self.query_emb_bias = self.params.get('query_emb_bias', shape=(num_heads, d_head), [all …]
|
/dports/misc/ncnn/ncnn-20211208/tools/pnnx/tests/ |
H A D | test_nn_MultiheadAttention.py | 23 self.attention_0_0 = nn.MultiheadAttention(embed_dim=64, num_heads=4) 24 …self.attention_0_1 = nn.MultiheadAttention(embed_dim=64, num_heads=8, bias=False, add_bias_kv=Fals… 25 …self.attention_0_2 = nn.MultiheadAttention(embed_dim=64, num_heads=16, bias=True, add_bias_kv=True… 28 self.attention_1_0 = nn.MultiheadAttention(embed_dim=40, num_heads=4, batch_first=True) 29 …self.attention_1_1 = nn.MultiheadAttention(embed_dim=40, num_heads=8, bias=False, add_bias_kv=Fals… 30 …self.attention_1_2 = nn.MultiheadAttention(embed_dim=40, num_heads=10, bias=True, add_bias_kv=True…
|
/dports/math/py-pytorchvideo/pytorchvideo-0.1.3/pytorchvideo/layers/ |
H A D | attention.py | 175 num_heads: int = 8, 217 self.num_heads = num_heads 218 head_dim = dim // num_heads 321 .reshape(batch_size, q_size, self.num_heads, chan_size // self.num_heads) 326 .reshape(batch_size, k_size, self.num_heads, chan_size // self.num_heads) 331 .reshape(batch_size, v_size, self.num_heads, chan_size // self.num_heads) 406 x = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) 462 num_heads: int, 514 num_heads=num_heads,
|
/dports/misc/mxnet/incubator-mxnet-1.9.0/3rdparty/mkldnn/tests/benchdnn/inputs/matmul/ |
H A D | shapes_bert | 2 # mb = 1, num_heads = 12, hidden_size = 1024, t_x = t_y = 128 6 # mb = 128, num_heads = 12, hidden_size = 768, t_x = t_y = 128, 10 # mb = 128, num_heads = 16, hidden_size = 1024, t_x = t_y = 128,
|
H A D | shapes_bert_large | 2 # mb = 1, num_heads = 12, hidden_size = 768, t_x = t_y = 384 6 # mb = 128, num_heads = 12, hidden_size = 768, t_x = t_y = 384 10 # mb = 128, num_heads = 16, hidden_size = 1024, t_x = t_y = 384
|
H A D | shapes_transformer | 2 # mb = 1, num_heads = 16, hidden_size = 1024, t_x = t_y = 40 5 # mb = 128, num_heads = 16, hidden_size = 1024, t_x = t_y = 40
|
/dports/math/onednn/oneDNN-2.5.1/tests/benchdnn/inputs/matmul/ |
H A D | shapes_bert | 2 # mb = 1, num_heads = 12, hidden_size = 1024, t_x = t_y = 128 6 # mb = 128, num_heads = 12, hidden_size = 768, t_x = t_y = 128, 10 # mb = 128, num_heads = 16, hidden_size = 1024, t_x = t_y = 128,
|
H A D | shapes_bert_large | 2 # mb = 1, num_heads = 12, hidden_size = 768, t_x = t_y = 384 6 # mb = 128, num_heads = 12, hidden_size = 768, t_x = t_y = 384 10 # mb = 128, num_heads = 16, hidden_size = 1024, t_x = t_y = 384
|
H A D | shapes_transformer | 2 # mb = 1, num_heads = 16, hidden_size = 1024, t_x = t_y = 40 5 # mb = 128, num_heads = 16, hidden_size = 1024, t_x = t_y = 40
|
/dports/graphics/vapoursynth-waifu2x-ncnn-vulkan/vapoursynth-waifu2x-ncnn-vulkan-r4/deps/ncnn/tests/ |
H A D | test_multiheadattention.cpp | 18 static int test_multiheadattention(const ncnn::Mat& a, int num_heads) in test_multiheadattention() argument 24 pd.set(1, num_heads); in test_multiheadattention() 51 static int test_multiheadattention_sameqkv(const ncnn::Mat& a, int num_heads) in test_multiheadattention_sameqkv() argument 57 pd.set(1, num_heads); in test_multiheadattention_sameqkv()
|
/dports/graphics/waifu2x-ncnn-vulkan/waifu2x-ncnn-vulkan-20210521/src/ncnn/tests/ |
H A D | test_multiheadattention.cpp | 18 static int test_multiheadattention(const ncnn::Mat& a, int num_heads) in test_multiheadattention() argument 24 pd.set(1, num_heads); in test_multiheadattention() 51 static int test_multiheadattention_sameqkv(const ncnn::Mat& a, int num_heads) in test_multiheadattention_sameqkv() argument 57 pd.set(1, num_heads); in test_multiheadattention_sameqkv()
|
/dports/benchmarks/vkpeak/vkpeak-20210430/ncnn/tests/ |
H A D | test_multiheadattention.cpp | 18 static int test_multiheadattention(const ncnn::Mat& a, int num_heads) in test_multiheadattention() argument 24 pd.set(1, num_heads); in test_multiheadattention() 51 static int test_multiheadattention_sameqkv(const ncnn::Mat& a, int num_heads) in test_multiheadattention_sameqkv() argument 57 pd.set(1, num_heads); in test_multiheadattention_sameqkv()
|
/dports/misc/ncnn/ncnn-20211208/tests/ |
H A D | test_multiheadattention.cpp | 18 static int test_multiheadattention(const ncnn::Mat& a, int num_heads) in test_multiheadattention() argument 24 pd.set(1, num_heads); in test_multiheadattention() 51 static int test_multiheadattention_sameqkv(const ncnn::Mat& a, int num_heads) in test_multiheadattention_sameqkv() argument 57 pd.set(1, num_heads); in test_multiheadattention_sameqkv()
|
/dports/math/py-pytorchvideo/pytorchvideo-0.1.3/pytorchvideo/models/ |
H A D | vision_transformers.py | 114 num_heads: int = 1, 313 num_heads = round_width(num_heads, head_mul[i], min_width=1, divisor=1) 314 patch_embed_dim = round_width(patch_embed_dim, dim_mul[i], divisor=num_heads) 318 divisor=round_width(num_heads, head_mul[i + 1]), 325 num_heads=num_heads,
|
H A D | masked_multistream.py | 104 def __init__(self, feature_dim: int, num_heads: int = 1): 112 embed_dim=feature_dim, num_heads=num_heads 283 num_heads: int = 1, 294 nn.TransformerEncoderLayer(dim_in, num_heads), num_layers
|
/dports/x11/workrave/workrave-1.10.44/frontend/gtkmm/src/ |
H A D | GUI.cc | 135 num_heads(-1), in GUI() 591 if (new_num_heads != num_heads || num_heads <= 0) in init_multihead_mem() 602 int max_heads = new_num_heads > num_heads ? new_num_heads : num_heads; in init_multihead_mem() 609 if (i < num_heads) in init_multihead_mem() 651 num_heads = new_num_heads; in init_multihead_mem() 664 for (int i = 0; i < num_heads; i++) in init_multihead_desktop() 762 num_heads = count; in init_gtk_multihead() 1033 for (int i = 0; i < num_heads; i++) in create_prelude_window() 1038 active_prelude_count = num_heads; in create_prelude_window() 1095 active_break_count = num_heads; in create_break_window() [all …]
|
/dports/math/py-flax/flax-0.3.3/flax/linen/ |
H A D | attention.py | 141 num_heads: int 183 assert qkv_features % self.num_heads == 0, ( 185 head_dim = qkv_features // self.num_heads 189 features=(self.num_heads, head_dim), 212 *batch_dims, max_length, num_heads, depth_per_head = ( 215 expected_shape = tuple(batch_dims) + (1, num_heads, depth_per_head)
|
/dports/misc/py-gluonnlp/gluon-nlp-0.10.0/scripts/text_generation/model/ |
H A D | gpt.py | 70 def __init__(self, units, num_heads, dropout=0.0, argument 75 self._num_heads = num_heads 76 assert units % num_heads == 0 215 def __init__(self, units, vocab_size, max_length, num_layers, num_heads, dropout=0.0, argument 221 self._num_heads = num_heads 236 units=units, num_heads=num_heads, dropout=dropout,
|
/dports/misc/ncnn/ncnn-20211208/tools/pnnx/tests/ncnn/ |
H A D | test_nn_MultiheadAttention.py | 23 self.attention_0_0 = nn.MultiheadAttention(embed_dim=64, num_heads=4) 26 self.attention_1_0 = nn.MultiheadAttention(embed_dim=40, num_heads=4, batch_first=True)
|
/dports/games/libretro-mame2003_plus/mame2003-plus-libretro-17e9889/src/machine/ |
H A D | idectrl.c | 141 UINT8 num_heads; member 286 ide->num_heads = hdinfo->heads; in ide_controller_init_custom() 339 state_save_register_UINT8 ("ide", which, "num_heads", &ide->num_heads, 1); in ide_controller_init_custom() 507 if (ide->cur_head >= ide->num_heads) in next_sector() 539 int total_sectors = ide->num_cylinders * ide->num_heads * ide->num_sectors; in ide_build_features() 540 int sectors_per_track = ide->num_heads * ide->num_sectors; in ide_build_features() 551 ide->features[ 3*2+0] = ide->num_heads & 0xff; /* 3: logical heads */ in ide_build_features() 552 ide->features[ 3*2+1] = ide->num_heads >> 8; in ide_build_features() 593 ide->features[55*2+0] = ide->num_heads & 0xff; /* 55: number of current logical heads */ in ide_build_features() 594 ide->features[55*2+1] = ide->num_heads >> 8; in ide_build_features() [all …]
|
/dports/games/libretro-mame2003/mame2003-libretro-4358db4/src/machine/ |
H A D | idectrl.c | 148 UINT8 num_heads; member 293 ide->num_heads = hdinfo->heads; in ide_controller_init_custom() 298 printf("CHS: %d %d %d\n", ide->num_cylinders, ide->num_heads, ide->num_sectors); in ide_controller_init_custom() 514 if (ide->cur_head >= ide->num_heads) in next_sector() 546 int total_sectors = ide->num_cylinders * ide->num_heads * ide->num_sectors; in ide_build_features() 547 int sectors_per_track = ide->num_heads * ide->num_sectors; in ide_build_features() 558 ide->features[ 3*2+0] = ide->num_heads & 0xff; /* 3: logical heads */ in ide_build_features() 559 ide->features[ 3*2+1] = ide->num_heads >> 8; in ide_build_features() 600 ide->features[55*2+0] = ide->num_heads & 0xff; /* 55: number of current logical heads */ in ide_build_features() 601 ide->features[55*2+1] = ide->num_heads >> 8; in ide_build_features() [all …]
|