1 //------------------------------------------------------------------------------
2 // GB_subassign_05f: C(:,:)<C,struct> = scalar ; no S, C anything, M structural
3 //------------------------------------------------------------------------------
4
5 // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
6 // SPDX-License-Identifier: Apache-2.0
7
8 //------------------------------------------------------------------------------
9
10 // Method 05f: C(:,:)<C,struct> = scalar ; no S
11 // compare with Method 05e
12
13 // M: present and aliased with C
14 // Mask_comp: false
15 // Mask_struct: true
16 // C_replace: false or effectively false
17 // accum: NULL
18 // A: scalar
19 // S: none
20
21 // C is aliased to M, and can have any sparsity on input, so M is not provided
22 // here. All values in C are replaced by the scalar. C can have any sparsity
23 // format (hyper/sparse/bitmap/full). If C is bitmap, only assignments where
24 // only assignments where (Cb [pC] == 1) are needed, but it's faster to just
25 // assign the scalar to all entries in Cx.
26
27 // TODO::: when uniform-valued matrices are supported, this method will take
28 // O(1) time.
29
30 // TODO: this method can be merged with 05d
31
32 #include "GB_subassign_methods.h"
33
34 #undef GB_FREE_ALL
35 #define GB_FREE_ALL
36
GB_subassign_05f(GrB_Matrix C,const void * scalar,const GrB_Type atype,GB_Context Context)37 GrB_Info GB_subassign_05f
38 (
39 GrB_Matrix C,
40 // input:
41 const void *scalar,
42 const GrB_Type atype,
43 GB_Context Context
44 )
45 {
46
47 //--------------------------------------------------------------------------
48 // get inputs
49 //--------------------------------------------------------------------------
50
51 GrB_Info info ;
52 ASSERT_MATRIX_OK (C, "C for subassign method_05f", GB0) ;
53
54 // C can be jumbled, in which case it remains so C on output
55 ASSERT (!GB_ZOMBIES (C)) ;
56 ASSERT (GB_JUMBLED_OK (C)) ;
57 ASSERT (!GB_PENDING (C)) ;
58
59 const GB_Type_code ccode = C->type->code ;
60 const size_t csize = C->type->size ;
61 GB_GET_SCALAR ;
62
63 int64_t cnz = GB_NNZ_HELD (C) ;
64
65 //--------------------------------------------------------------------------
66 // Method 05f: C(:,:)<C,s> = x ; C anything, x is a scalar, structural mask
67 //--------------------------------------------------------------------------
68
69 // Time: Optimal: the method must iterate over all entries in C, and the
70 // time is O(nnz(C)). When uniform-valued matrices are supported, this
71 // method will take O(1) time.
72
73 //--------------------------------------------------------------------------
74 // determine the number of threads to use
75 //--------------------------------------------------------------------------
76
77 GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
78 int nthreads = GB_nthreads (cnz, chunk, nthreads_max) ;
79
80 int64_t pC ;
81
82 //--------------------------------------------------------------------------
83 // define the worker for the switch factory
84 //--------------------------------------------------------------------------
85
86 // worker for built-in types
87 #define GB_WORKER(ctype) \
88 { \
89 ctype *restrict Cx = (ctype *) C->x ; \
90 ctype x = (*(ctype *) cwork) ; \
91 GB_PRAGMA (omp parallel for num_threads(nthreads) schedule(static)) \
92 for (pC = 0 ; pC < cnz ; pC++) \
93 { \
94 Cx [pC] = x ; \
95 } \
96 } \
97 break ;
98
99 //--------------------------------------------------------------------------
100 // launch the switch factory
101 //--------------------------------------------------------------------------
102
103 // TODO: use fewer cases (1, 2, 4, 8, 16 bytes, and uint's)
104
105 switch (C->type->code)
106 {
107 case GB_BOOL_code : GB_WORKER (bool) ;
108 case GB_INT8_code : GB_WORKER (int8_t) ;
109 case GB_INT16_code : GB_WORKER (int16_t) ;
110 case GB_INT32_code : GB_WORKER (int32_t) ;
111 case GB_INT64_code : GB_WORKER (int64_t) ;
112 case GB_UINT8_code : GB_WORKER (uint8_t) ;
113 case GB_UINT16_code : GB_WORKER (uint16_t) ;
114 case GB_UINT32_code : GB_WORKER (uint32_t) ;
115 case GB_UINT64_code : GB_WORKER (uint64_t) ;
116 case GB_FP32_code : GB_WORKER (float) ;
117 case GB_FP64_code : GB_WORKER (double) ;
118 case GB_FC32_code : GB_WORKER (GxB_FC32_t) ;
119 case GB_FC64_code : GB_WORKER (GxB_FC64_t) ;
120 default:
121 {
122 // worker for all user-defined types
123 GB_BURBLE_N (cnz, "(generic C(:,:)<C,struct>=x assign) ") ;
124 GB_void *restrict Cx = (GB_void *) C->x ;
125 #pragma omp parallel for num_threads(nthreads) schedule(static)
126 for (pC = 0 ; pC < cnz ; pC++)
127 {
128 memcpy (Cx +((pC)*csize), cwork, csize) ;
129 }
130 }
131 break ;
132 }
133
134 //--------------------------------------------------------------------------
135 // return result
136 //--------------------------------------------------------------------------
137
138 ASSERT_MATRIX_OK (C, "C output for subassign method_05f", GB0) ;
139 ASSERT (GB_JUMBLED_OK (C)) ;
140 return (GrB_SUCCESS) ;
141 }
142
143