1 //------------------------------------------------------------------------------
2 // GB_assign_prep: check and prepare inputs for GB_assign and GB_subassign
3 //------------------------------------------------------------------------------
4 
5 // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
6 // SPDX-License-Identifier: Apache-2.0
7 
8 //------------------------------------------------------------------------------
9 
10 // GB_assign_prep checks the inputs for all assignment methods:
11 // GrB_Row_assign, GrB_Col_assign, GrB_assign, and GxB_subassign.
12 
13 #include "GB_subassign.h"
14 #include "GB_bitmap_assign.h"
15 #include "GB_assign_zombie.h"
16 #include "GB_subassign_methods.h"
17 #include "GB_transpose.h"
18 #include "GB_subref.h"
19 
20 #undef  GB_FREE_ALL
21 #define GB_FREE_ALL                 \
22 {                                   \
23     GB_Matrix_free (&C2) ;          \
24     GB_Matrix_free (&A2) ;          \
25     GB_Matrix_free (&AT) ;          \
26     GB_Matrix_free (&M2) ;          \
27     GB_Matrix_free (&MT) ;          \
28     GB_FREE_WERK (&I2, I2_size) ;   \
29     GB_FREE_WERK (&J2, J2_size) ;   \
30     GB_FREE_WERK (&I2k, I2k_size) ; \
31     GB_FREE_WERK (&J2k, J2k_size) ; \
32 }
33 
GB_assign_prep(GrB_Matrix * Chandle,GrB_Matrix * Mhandle,GrB_Matrix * Ahandle,GrB_Matrix * C2_handle,GrB_Matrix * M2_handle,GrB_Matrix * A2_handle,GrB_Matrix C2_header_handle,GrB_Matrix M2_header_handle,GrB_Matrix A2_header_handle,GrB_Matrix MT_header_handle,GrB_Matrix AT_header_handle,GrB_Index ** I_handle,GrB_Index ** I2_handle,size_t * I2_size_handle,int64_t * ni_handle,int64_t * nI_handle,int * Ikind_handle,int64_t Icolon[3],GrB_Index ** J_handle,GrB_Index ** J2_handle,size_t * J2_size_handle,int64_t * nj_handle,int64_t * nJ_handle,int * Jkind_handle,int64_t Jcolon[3],bool * done,GrB_Type * atype_handle,GrB_Matrix C_in,bool * C_replace,int * assign_kind,const GrB_Matrix M_in,const bool Mask_comp,const bool Mask_struct,bool M_transpose,const GrB_BinaryOp accum,const GrB_Matrix A_in,bool A_transpose,const GrB_Index * Rows,const GrB_Index nRows_in,const GrB_Index * Cols,const GrB_Index nCols_in,const bool scalar_expansion,const void * scalar,const GB_Type_code scalar_code,GB_Context Context)34 GrB_Info GB_assign_prep
35 (
36     // output:
37     GrB_Matrix *Chandle,            // C_in, or C2 if C is aliased to M or A
38     GrB_Matrix *Mhandle,            // M_in, or a modified version M2
39     GrB_Matrix *Ahandle,            // A_in, or a modified version A2
40 
41     // modified versions of the matrices C, M, and A:
42     GrB_Matrix *C2_handle,          // NULL, or a copy of C
43     GrB_Matrix *M2_handle,          // NULL, or a temporary matrix
44     GrB_Matrix *A2_handle,          // NULL, or a temporary matrix
45 
46     // static headers for C2, M2, A2, MT and AT
47     GrB_Matrix C2_header_handle,
48     GrB_Matrix M2_header_handle,
49     GrB_Matrix A2_header_handle,
50     GrB_Matrix MT_header_handle,
51     GrB_Matrix AT_header_handle,
52 
53     // modified versions of the Rows/Cols lists, and their analysis:
54     GrB_Index **I_handle,           // Rows, Cols, or a modified copy I2
55     GrB_Index **I2_handle,          // NULL, or sorted/pruned Rows or Cols
56     size_t *I2_size_handle,
57     int64_t *ni_handle,
58     int64_t *nI_handle,
59     int *Ikind_handle,
60     int64_t Icolon [3],
61 
62     GrB_Index **J_handle,           // Rows, Cols, or a modified copy J2
63     GrB_Index **J2_handle,          // NULL, or sorted/pruned Rows or Cols
64     size_t *J2_size_handle,
65     int64_t *nj_handle,
66     int64_t *nJ_handle,
67     int *Jkind_handle,
68     int64_t Jcolon [3],
69 
70     bool *done,                     // true if the prep has finished all work
71     GrB_Type *atype_handle,         // type of A or the scalar
72 
73     // input/output
74     GrB_Matrix C_in,                // input/output matrix for results
75     bool *C_replace,                // descriptor for C
76     int *assign_kind,               // row/col assign, assign, or subassign
77 
78     // input
79     const GrB_Matrix M_in,          // optional mask for C
80     const bool Mask_comp,           // true if mask is complemented
81     const bool Mask_struct,         // if true, use the only structure of M
82     bool M_transpose,               // true if the mask should be transposed
83     const GrB_BinaryOp accum,       // optional accum for accum(C,T)
84     const GrB_Matrix A_in,          // input matrix
85     bool A_transpose,               // true if A is transposed
86     const GrB_Index *Rows,          // row indices
87     const GrB_Index nRows_in,       // number of row indices
88     const GrB_Index *Cols,          // column indices
89     const GrB_Index nCols_in,       // number of column indices
90     const bool scalar_expansion,    // if true, expand scalar to A
91     const void *scalar,             // scalar to be expanded
92     const GB_Type_code scalar_code, // type code of scalar to expand
93     GB_Context Context
94 )
95 {
96 
97     //--------------------------------------------------------------------------
98     // check inputs
99     //--------------------------------------------------------------------------
100 
101     GrB_Info info ;
102     GB_RETURN_IF_FAULTY_OR_POSITIONAL (accum) ;
103     GB_RETURN_IF_NULL (Rows) ;
104     GB_RETURN_IF_NULL (Cols) ;
105 
106     GrB_Matrix C = C_in ;
107     GrB_Matrix M = M_in ;
108     GrB_Matrix A = A_in ;
109 
110     ASSERT_MATRIX_OK (C, "C input for GB_assign/subassign", GB0) ;
111     ASSERT (!GB_is_shallow (C)) ;
112     ASSERT_MATRIX_OK_OR_NULL (M, "M for GB_assign/subassign", GB0) ;
113     ASSERT_BINARYOP_OK_OR_NULL (accum, "accum for GB_assign/subassign", GB0) ;
114     ASSERT (scalar_code <= GB_UDT_code) ;
115 
116     GrB_Matrix C2 = NULL ;
117     GrB_Matrix M2 = NULL ;
118     GrB_Matrix A2 = NULL ;
119     GrB_Matrix MT = NULL ;
120     GrB_Matrix AT = NULL ;
121 
122     GrB_Index *I2  = NULL ; size_t I2_size = 0 ;
123     GrB_Index *J2  = NULL ; size_t J2_size = 0 ;
124     GrB_Index *I2k = NULL ; size_t I2k_size = 0 ;
125     GrB_Index *J2k = NULL ; size_t J2k_size = 0 ;
126     (*done) = false ;
127     (*atype_handle) = NULL ;
128 
129     (*Chandle) = NULL ;
130     (*Mhandle) = NULL ;
131     (*Ahandle) = NULL ;
132 
133     (*C2_handle) = NULL ;
134     (*A2_handle) = NULL ;
135     (*M2_handle) = NULL ;
136 
137     (*I_handle) = NULL ;
138     (*I2_handle) = NULL ;
139     (*I2_size_handle) = 0 ;
140     (*ni_handle) = 0 ;
141     (*nI_handle) = 0 ;
142     (*Ikind_handle) = 0 ;
143 
144     (*J_handle) = NULL ;
145     (*J2_handle) = NULL ;
146     (*J2_size_handle) = 0 ;
147     (*nj_handle) = 0 ;
148     (*nJ_handle) = 0 ;
149     (*Jkind_handle) = 0 ;
150 
151     //--------------------------------------------------------------------------
152     // determine the type of A or the scalar
153     //--------------------------------------------------------------------------
154 
155     GrB_Type atype ;
156     if (scalar_expansion)
157     {
158         // for scalar expansion, the NULL pointer case has been already checked
159         // for user-defined types, and can't be NULL for built-in types.
160         ASSERT (scalar != NULL) ;
161         ASSERT (A == NULL) ;
162         ASSERT ((*assign_kind) == GB_ASSIGN || (*assign_kind) == GB_SUBASSIGN) ;
163         atype = GB_code_type (scalar_code, C->type) ;
164     }
165     else
166     {
167         // GrB_*assign, not scalar:  The user's input matrix has been checked.
168         // The pointer to the scalar is NULL.
169         ASSERT (scalar == NULL) ;
170         ASSERT_MATRIX_OK (A, "A for GB_assign/GB_subassign", GB0) ;
171         atype = A->type ;
172     }
173 
174     //--------------------------------------------------------------------------
175     // delete any lingering zombies and assemble any pending tuples
176     //--------------------------------------------------------------------------
177 
178     // zombies and pending tuples in C or OK, but not M or A
179     GB_MATRIX_WAIT_IF_PENDING_OR_ZOMBIES (M) ;
180     GB_MATRIX_WAIT_IF_PENDING_OR_ZOMBIES (A) ;
181 
182     // some kernels allow for M and A to be jumbled
183     ASSERT (GB_JUMBLED_OK (M)) ;
184     ASSERT (GB_JUMBLED_OK (A)) ;
185 
186     // C can have any kind of pending work
187     ASSERT (GB_ZOMBIES_OK (C)) ;
188     ASSERT (GB_JUMBLED_OK (C)) ;
189     ASSERT (GB_PENDING_OK (C)) ;
190 
191     //--------------------------------------------------------------------------
192     // check domains of C, M, A, and accum
193     //--------------------------------------------------------------------------
194 
195     // GB_compatible is not used since most of it is slightly different here
196     if (accum != NULL)
197     {
198         // C<M>(Rows,Cols) = accum (C(Rows,Cols),A), or
199         // C(Rows,Cols)<M> = accum (C(Rows,Cols),A)
200         GB_OK (GB_BinaryOp_compatible (accum, C->type, C->type,
201             (scalar_expansion) ? NULL : A->type,
202             (scalar_expansion) ? scalar_code : GB_ignore_code, Context)) ;
203     }
204 
205     // C<M>(Rows,Cols) = T, so C and T must be compatible.
206     // also C<M>(Rows,Cols) = accum(C,T) for entries in T but not C
207     if (scalar_expansion)
208     {
209         if (!GB_code_compatible (C->type->code, scalar_code))
210         {
211             GB_ERROR (GrB_DOMAIN_MISMATCH, "Input scalar of type [%s]\n"
212                 "cannot be typecast to output of type [%s]",
213                 GB_code_string (scalar_code), C->type->name) ;
214         }
215     }
216     else
217     {
218         if (!GB_Type_compatible (C->type, A->type))
219         {
220             GB_ERROR (GrB_DOMAIN_MISMATCH, "Input of type [%s]\n"
221                 "cannot be typecast to output of type [%s]",
222                 A->type->name, C->type->name) ;
223         }
224     }
225 
226     if (M != NULL && !Mask_struct)
227     {
228         // M is typecast to boolean
229         if (!GB_Type_compatible (M->type, GrB_BOOL))
230         {
231             GB_ERROR (GrB_DOMAIN_MISMATCH,
232                 "M of type [%s] cannot be typecast to boolean", M->type->name) ;
233         }
234     }
235 
236     //--------------------------------------------------------------------------
237     // determine the properites of the Rows and Cols index lists
238     //--------------------------------------------------------------------------
239 
240     int64_t nRows, nCols, RowColon [3], ColColon [3] ;
241     int RowsKind, ColsKind ;
242     GB_ijlength (Rows, nRows_in, GB_NROWS (C), &nRows, &RowsKind, RowColon) ;
243     GB_ijlength (Cols, nCols_in, GB_NCOLS (C), &nCols, &ColsKind, ColColon) ;
244 
245     //--------------------------------------------------------------------------
246     // check the dimensions of M
247     //--------------------------------------------------------------------------
248 
249     if (M != NULL)
250     {
251         // check the mask: size depends on the method
252 
253         switch (*assign_kind)
254         {
255             case GB_ROW_ASSIGN :
256             {
257                 // GrB_Row_assign:
258                 // M is a column vector the same size as one row of C
259                 ASSERT (nRows == 1) ;
260                 ASSERT (!scalar_expansion) ;
261                 ASSERT (GB_VECTOR_OK (M)) ;
262                 if (GB_NROWS (M) != GB_NCOLS (C))
263                 {
264                     GB_ERROR (GrB_DIMENSION_MISMATCH, "Mask vector m length"
265                         " is " GBd "; must match the number of columns of C ("
266                         GBd ")", GB_NROWS (M), GB_NCOLS (C)) ;
267                 }
268             }
269             break ;
270 
271             case GB_COL_ASSIGN :
272             {
273                 // GrB_Col_assign:
274                 // M is a column vector the same size as one column of C
275                 ASSERT (nCols == 1) ;
276                 ASSERT (!scalar_expansion) ;
277                 ASSERT (GB_VECTOR_OK (M)) ;
278                 if (GB_NROWS (M) != GB_NROWS (C))
279                 {
280                     GB_ERROR (GrB_DIMENSION_MISMATCH, "Mask vector m length"
281                         " is " GBd "; must match the number of rows of C ("
282                         GBd ")", GB_NROWS (M), GB_NROWS (C)) ;
283                 }
284             }
285             break ;
286 
287             case GB_ASSIGN :
288             {
289                 // GrB_Matrix_assign, GrB_Vector_assign, and scalar variants: M
290                 // is a matrix the same size as C for entire matrix (or vector)
291                 // assignment, where A is either a matrix or a scalar
292                 if (GB_NROWS (M) != GB_NROWS (C) ||
293                     GB_NCOLS (M) != GB_NCOLS (C))
294                 {
295                     GB_ERROR (GrB_DIMENSION_MISMATCH, "Mask M is " GBd "-by-"
296                         GBd "; " "must match result C (" GBd "-by-" GBd ")",
297                         GB_NROWS (M), GB_NCOLS (M),
298                         GB_NROWS (C), GB_NCOLS (C)) ;
299                 }
300             }
301             break ;
302 
303             case GB_SUBASSIGN :
304             {
305                 // GxB_subassign: M is a matrix the same size as C(Rows,Cols)
306                 int64_t mnrows = M_transpose ? GB_NCOLS (M) : GB_NROWS (M) ;
307                 int64_t mncols = M_transpose ? GB_NROWS (M) : GB_NCOLS (M) ;
308                 if (mnrows != nRows || mncols != nCols)
309                 {
310                     GB_ERROR (GrB_DIMENSION_MISMATCH,
311                         "M is " GBd "-by-" GBd "%s, "
312                         "must match size of result C(I,J): " GBd "-by-" GBd "",
313                         mnrows, mncols, M_transpose ? " (transposed)" : "",
314                         nRows, nCols) ;
315                 }
316             }
317             break ;
318 
319             default:
320                 ASSERT (GB_DEAD_CODE) ;
321         }
322     }
323 
324     //--------------------------------------------------------------------------
325     // check the dimensions of A
326     //--------------------------------------------------------------------------
327 
328     if (!scalar_expansion)
329     {
330         int64_t anrows = (A_transpose) ? GB_NCOLS (A) : GB_NROWS (A) ;
331         int64_t ancols = (A_transpose) ? GB_NROWS (A) : GB_NCOLS (A) ;
332         if (nRows != anrows || nCols != ancols)
333         {
334             GB_ERROR (GrB_DIMENSION_MISMATCH,
335                 "Dimensions not compatible:\n"
336                 "C(Rows,Cols) is " GBd "-by-" GBd "\n"
337                 "input is " GBd "-by-" GBd "%s",
338                 nRows, nCols, anrows, ancols,
339                 A_transpose ? " (transposed)" : "") ;
340         }
341     }
342 
343     //--------------------------------------------------------------------------
344     // handle the CSR/CSC format of C:
345     //--------------------------------------------------------------------------
346 
347     // GrB_Row_assign, GxB_Row_subassign: A is always a vector in CSC format,
348     // and A_transpose is always true.  If C is in CSC format then A_transpose
349     // remains true, and the n-by-1 vector A is transposed below into a 1-by-n
350     // hypersparse CSC matrix.  If C is in CSR format then A_transpose becomes
351     // false, and the assignment does not need to transpose A.  It remains in
352     // CSC format but has the correct vector length and dimension for the
353     // CSR/CSC-agnostic assignment.
354 
355     // GrB_Col_assign, GxB_Col_subassign: A is always a vector in CSC format,
356     // and A_transpose is always false.  If C is in CSC format then A_transpose
357     // remains false, and the assignment does not need to transpose A.  If C is
358     // in CSR format then A_transpose becomes true, and the the n-by-1 vector A
359     // is transposed below into a 1-by-n hypersparse CSC matrix.  The CSC
360     // format is ignored by the CSR/CSC-agnostic assignment.
361 
362     // GrB_Vector_assign, GxB_Vector_subassign:  both A and C are always in CSC
363     // format, and A_transpose is always false, and doesn't change below.
364 
365     // GrB_Matrix_assign, GxB_Matrix_subassign:  A and C can be in any format,
366     // and A_transpose can be true or false, depending on the descriptor.  If
367     // the CSR/CSC formats of A and C are the same, then A_transpose remains
368     // as-is.  If they differ, then A_transpose is flipped.  Then the CSR-CSC
369     // agnostic assignment proceeds.
370 
371     bool C_is_csc = C->is_csc ;
372     if (!scalar_expansion && C_is_csc != A->is_csc)
373     {
374         // Flip the sense of A_transpose
375         A_transpose = !A_transpose ;
376     }
377 
378     // get the I and J index lists
379     int Ikind, Jkind ;
380     const GrB_Index *I, *J ;
381     int64_t ni, nj, nI, nJ ;
382 
383     if (C_is_csc)
384     {
385         // C is in CSC format
386         I      = Rows     ;     J      = Cols     ;
387         ni     = nRows_in ;     nj     = nCols_in ;
388         Ikind  = RowsKind ;     Jkind  = ColsKind ;
389         nI     = nRows    ;     nJ     = nCols    ;
390         memcpy (Icolon, RowColon, 3 * sizeof (int64_t)) ;
391         memcpy (Jcolon, ColColon, 3 * sizeof (int64_t)) ;
392     }
393     else
394     {
395         // C is in CSR format
396         I       = Cols     ;    J       = Rows     ;
397         ni      = nCols_in ;    nj      = nRows_in ;
398         Ikind   = ColsKind ;    Jkind   = RowsKind ;
399         nI      = nCols    ;    nJ      = nRows    ;
400         memcpy (Icolon, ColColon, 3 * sizeof (int64_t)) ;
401         memcpy (Jcolon, RowColon, 3 * sizeof (int64_t)) ;
402         // flip the sense of row/col assign
403         if ((*assign_kind) == GB_ROW_ASSIGN)
404         {
405             // assignment to vector j = J [0], which is Rows [0]
406             (*assign_kind) = GB_COL_ASSIGN ;
407         }
408         else if ((*assign_kind) == GB_COL_ASSIGN)
409         {
410             // assignment to index i = I [0], which is Cols [0]
411             (*assign_kind) = GB_ROW_ASSIGN ;
412         }
413     }
414 
415     // J is now a list of vectors in the range 0:C->vdim-1
416     // I is now a list of indices in the range 0:C->vlen-1
417 
418     bool whole_C_matrix = (Ikind == GB_ALL && Jkind == GB_ALL) ;
419 
420     //--------------------------------------------------------------------------
421     // quick return if an empty mask is complemented
422     //--------------------------------------------------------------------------
423 
424     bool C_is_bitmap = GB_IS_BITMAP (C) ;
425     int C_sparsity = GB_sparsity_control (C->sparsity, C->vdim) ;
426     bool C_may_be_bitmap = (C_sparsity & GxB_BITMAP) ;
427     bool use_bitmap_assign = (C_is_bitmap ||
428         ((*C_replace) && GB_IS_FULL (C) && C_may_be_bitmap)) ;
429 
430     // an empty mask occurs when M is not present, but complemented
431 
432     if (M == NULL && Mask_comp)
433     {
434 
435         //----------------------------------------------------------------------
436         // C<!,replace or !replace>(I,J) = anything
437         //----------------------------------------------------------------------
438 
439         // The mask M is empty, and complemented, and thus M(i,j)=0 for all i
440         // and j.  The result does not depend on A or accum.  The output C is
441         // either untouched (if C_replace is false) or cleared (if C_replace is
442         // true).  However, the GrB_Row_assign and GrB_Col_assign only clear
443         // their specific row or column of C, respectively.  GB_subassign only
444         // clears C(I,J).  GrB_assign clears all of C.
445 
446         // M is NULL so C and M cannot be the same, and A is ignored so
447         // it doesn't matter whether or not C == A.  Thus C is not aliased
448         // to the inputs.
449 
450         // This condition is like GB_RETURN_IF_QUICK_MASK(...), except that
451         // the action taken by C_replace is different for row/col assign
452         // and subassign.
453 
454         if (*C_replace)
455         {
456 
457             //------------------------------------------------------------------
458             // C<!,replace>(I,J) = anything
459             //------------------------------------------------------------------
460 
461             ASSERT_MATRIX_OK (C, "C for quick mask", GB0) ;
462 
463             // to clear the whole C matrix: assign and subassign are the same
464 
465             switch (whole_C_matrix ? GB_ASSIGN : (*assign_kind))
466             {
467 
468                 //--------------------------------------------------------------
469                 // row assign: delete all entries in C(i,:)
470                 //--------------------------------------------------------------
471 
472                 case GB_ROW_ASSIGN :
473                 {
474                     // delete all entries in each vector with index i
475                     GB_MATRIX_WAIT_IF_PENDING (C) ;
476                     if (use_bitmap_assign)
477                     {
478                         // neither A nor the scalar are used, so convert this
479                         // to a scalar assignment (the scalar is not used)
480                         GBURBLE ("bitmap C(i,:)=zombie ") ;
481                         int scalar_unused = 0 ;
482                         GB_OK (GB_bitmap_assign (C, /* C_replace: */ true,
483                             I,    1, GB_LIST, NULL, // I
484                             NULL, 0, GB_ALL,  NULL, // J
485                             /* no M: */ NULL,
486                             /* Mask_comp: */ true,
487                             /* Mask_struct: ignored */ false,
488                             /* no accum: */ NULL,
489                             /* no A: */ NULL,
490                             /* scalar: */ &scalar_unused, GrB_INT32,
491                             GB_ROW_ASSIGN, Context)) ;
492                     }
493                     else
494                     {
495                         GB_MATRIX_WAIT_IF_JUMBLED (C) ;
496                         GB_ENSURE_SPARSE (C) ;
497                         GBURBLE ("C(i,:)=zombie ") ;
498                         GB_assign_zombie2 (C, I [0], Context) ;
499                     }
500                 }
501                 break ;
502 
503                 //--------------------------------------------------------------
504                 // col assign: delete all entries in C(:,j)
505                 //--------------------------------------------------------------
506 
507                 case GB_COL_ASSIGN :
508                 {
509                     GB_MATRIX_WAIT_IF_PENDING (C) ;
510                     if (use_bitmap_assign)
511                     {
512                         // neither A nor the scalar are used, so convert this
513                         // to a scalar assignment (the scalar is not used)
514                         GBURBLE ("bitmap C(:,j)=zombie ") ;
515                         int scalar_unused = 0 ;
516                         GB_OK (GB_bitmap_assign (C, /* C_replace: */ true,
517                             NULL, 0, GB_ALL,  NULL, // I
518                             J,    1, GB_LIST, NULL, // J
519                             /* no M: */ NULL,
520                             /* Mask_comp: */ true,
521                             /* Mask_struct: ignored */ false,
522                             /* no accum: */ NULL,
523                             /* no A: */ NULL,
524                             /* scalar: */ &scalar_unused, GrB_INT32,
525                             GB_COL_ASSIGN, Context)) ;
526                     }
527                     else
528                     {
529                         GB_ENSURE_SPARSE (C) ;
530                         GBURBLE ("C(:,j)=zombie ") ;
531                         GB_assign_zombie1 (C, J [0], Context) ;
532                     }
533                 }
534                 break ;
535 
536                 //--------------------------------------------------------------
537                 // assign: delete all entries in C
538                 //--------------------------------------------------------------
539 
540                 case GB_ASSIGN :
541                 {
542                     // C<!>=anything since result does not depend on computing
543                     // Z.  Since C_replace is true, all of C is cleared.  This
544                     // is the same as the GB_RETURN_IF_QUICK_MASK macro.
545                     // GB_clear either converts C to an empty sparse/hyper
546                     // matrix, or to a bitmap matrix with no entries, depending
547                     // on its sparsity control setting.
548                     GBURBLE ("clear C ") ;
549                     GB_OK (GB_clear (C, Context)) ;
550                 }
551                 break ;
552 
553                 //--------------------------------------------------------------
554                 // subassign: delete all entries in C(I,J)
555                 //--------------------------------------------------------------
556 
557                 case GB_SUBASSIGN :
558                 {
559                     GB_MATRIX_WAIT_IF_PENDING (C) ;
560                     if (use_bitmap_assign)
561                     {
562                         // neither A nor the scalar are used, so convert this
563                         // to a scalar assignment (the scalar is not used)
564                         GBURBLE ("bitmap C(I,J)=zombie ") ;
565                         int scalar_unused = 0 ;
566                         GB_OK (GB_bitmap_assign (C, /* C_replace: */ true,
567                             I, nI, Ikind, Icolon,
568                             J, nJ, Jkind, Jcolon,
569                             /* no M: */ NULL,
570                             /* Mask_comp: */ true,
571                             /* Mask_struct: ignored */ false,
572                             /* no accum: */ NULL,
573                             /* no A: */ NULL,
574                             /* scalar: */ &scalar_unused, GrB_INT32,
575                             GB_SUBASSIGN, Context)) ;
576                     }
577                     else
578                     {
579                         // Method 00: C(I,J) = empty, using S
580                         GBURBLE ("C(I,J)=zombie ") ;
581                         GB_ENSURE_SPARSE (C) ;
582                         GB_OK (GB_subassign_zombie (C,
583                             I, ni, nI, Ikind, Icolon,
584                             J, nj, nJ, Jkind, Jcolon, Context)) ;
585                     }
586                 }
587                 break ;
588 
589                 default: ;
590             }
591         }
592 
593         //----------------------------------------------------------------------
594         // finalize C if blocking mode is enabled, and return result
595         //----------------------------------------------------------------------
596 
597         ASSERT_MATRIX_OK (C, "Final C for assign, quick mask", GB0) ;
598         (*done) = true ;
599         GB_FREE_ALL ;
600         ASSERT (C == C_in) ;
601         (*Chandle) = C ;
602         return (GB_block (C, Context)) ;
603     }
604 
605     //--------------------------------------------------------------------------
606     // disable C_replace if no mask present
607     //--------------------------------------------------------------------------
608 
609     bool no_mask = (M == NULL && !Mask_comp) ;
610     if (no_mask)
611     {
612         // no_mask:  mask is not present, and not complemented
613         if (*C_replace)
614         {
615             // The mask is not present and not complemented.  In this case,
616             // C_replace is effectively false for subassign.  Disable it, since
617             // it can force pending tuples to be assembled.
618             GBURBLE ("(no mask: C_replace effectively false) ") ;
619             (*C_replace) = false ;
620         }
621     }
622 
623     //--------------------------------------------------------------------------
624     // delete pending tuples for C(:,:) = x and C(:,:) = A
625     //--------------------------------------------------------------------------
626 
627     if (whole_C_matrix)
628     {
629         // If the assignment is C<M>(:,:) = ... then convert the assignment
630         // into a subassign.
631         (*assign_kind) = GB_SUBASSIGN ;
632     }
633 
634     if (whole_C_matrix && no_mask && accum == NULL)
635     {
636 
637         //----------------------------------------------------------------------
638         // C(:,:) = x or A:  whole matrix assignment with no mask
639         //----------------------------------------------------------------------
640 
641         // C_replace is already effectively false (see no_mask condition above)
642         ASSERT ((*C_replace) == false) ;
643 
644         if (GB_aliased (C, A) && !A_transpose && !scalar_expansion)
645         {
646             // C = C, with C and A aliased, no transpose, no mask, no accum
647             // operator, both I and J are ":", Mask_comp false.  C is not
648             // modified at all, and there's no work to do except to check for
649             // blocking mode.
650             GBURBLE ("(no-op) ") ;
651             (*done) = true ;
652             GB_FREE_ALL ;
653             ASSERT (C == C_in) ;
654             (*Chandle) = C ;
655             return (GB_block (C, Context)) ;
656         }
657 
658         // free pending tuples early but do not clear C.  If it is
659         // already dense then its pattern can be reused.
660         GB_Pending_free (&(C->Pending)) ;
661     }
662 
663     //--------------------------------------------------------------------------
664     // transpose A if requested
665     //--------------------------------------------------------------------------
666 
667     // GrB_Row_assign and GrB_Col_assign pass A as a typecasted vector,
668     // which is then quickly transposed to a hypersparse matrix.
669 
670     ASSERT_MATRIX_OK (C, "C here in GB_assign", GB0) ;
671 
672     if (!scalar_expansion && A_transpose)
673     {
674         // AT = A', with no typecasting
675         // transpose: no typecast, no op, not in-place
676         // TODO: if accum is present and it does not depend on the values of
677         // A,  only construct the pattern of AT, not the values.
678         GBURBLE ("(A transpose) ") ;
679         AT = GB_clear_static_header (AT_header_handle) ;
680         GB_OK (GB_transpose (&AT, NULL, C_is_csc, A,    // static header
681             NULL, NULL, NULL, false, Context)) ;
682         GB_MATRIX_WAIT (AT) ;       // A cannot be jumbled
683         A = AT ;
684     }
685 
686     //--------------------------------------------------------------------------
687     // transpose the mask if requested
688     //--------------------------------------------------------------------------
689 
690     // the mask for G*B_Col_*assign and G*B_Row_*assign is a GrB_Vector in CSC
691     // form, which is quickly transposed to a hypersparse matrix, if needed.
692     // G*B_Vector_*assign always has a CSC mask and CSC C matrix, since both
693     // are GrB_Vectors.
694 
695     if (M != NULL)
696     {
697         if (M->is_csc != C_is_csc)
698         {
699             // either G*B_Row_*assign and G*B_Col_*assign when matrix C is in
700             // CSR format, and or G*B_Matrix_assign when the format of the
701             // matrices C and M differ.
702             M_transpose = !M_transpose ;
703         }
704         if (M_transpose)
705         {
706             // MT = M' to conform M to the same CSR/CSC format as C,
707             // and typecast to boolean.
708             // transpose: typecast, no op, not in-place
709             // TODO: if Mask_struct, only construct the pattern of MT
710             GBURBLE ("(M transpose) ") ;
711             MT = GB_clear_static_header (MT_header_handle) ;
712             GB_OK (GB_transpose (&MT, GrB_BOOL, C_is_csc, M, // static header
713                 NULL, NULL, NULL, false, Context)) ;
714             GB_MATRIX_WAIT (MT) ;       // M cannot be jumbled
715             M = MT ;
716         }
717     }
718 
719     //--------------------------------------------------------------------------
720     // determine the properties of I and J
721     //--------------------------------------------------------------------------
722 
723     // If the descriptor says that A must be transposed, it has already been
724     // transposed in the caller.  Thus C(I,J), A, and M (if present) all
725     // have the same size: length(I)-by-length(J)
726 
727     bool I_unsorted, I_has_dupl, I_contig, J_unsorted, J_has_dupl, J_contig ;
728     int64_t imin, imax, jmin, jmax ;
729     GB_OK (GB_ijproperties (I, ni, nI, C->vlen, &Ikind, Icolon,
730                 &I_unsorted, &I_has_dupl, &I_contig, &imin, &imax, Context)) ;
731     GB_OK (GB_ijproperties (J, nj, nJ, C->vdim, &Jkind, Jcolon,
732                 &J_unsorted, &J_has_dupl, &J_contig, &jmin, &jmax, Context)) ;
733 
734     //--------------------------------------------------------------------------
735     // sort I and J and remove duplicates, if needed
736     //--------------------------------------------------------------------------
737 
738     // If I or J are explicit lists, and either of are unsorted or are sorted
739     // but have duplicate entries, then both I and J are sorted and their
740     // duplicates are removed.  A and M are adjusted accordingly.  Removing
741     // duplicates decreases the length of I and J.
742 
743     bool I_unsorted_or_has_dupl = (I_unsorted || I_has_dupl) ;
744     bool J_unsorted_or_has_dupl = (J_unsorted || J_has_dupl) ;
745     bool presort = I_unsorted_or_has_dupl || J_unsorted_or_has_dupl ;
746 
747     // This pre-sort of I and J is required for the parallel assignment.
748     // Otherwise, multiple threads may attempt to modify the same part of C.
749     // This could cause a race condition, if one thread flags a zombie at the
750     // same time another thread is using that index in a binary search.  If the
751     // 2nd thread finds either zombie/not-zombie, this is fine, but the
752     // modification would have to be atomic.  Atomic read/write is slow, so to
753     // avoid the use of atomics, the index lists I and J are sorted and all
754     // duplicates are removed.
755 
756     // A side benefit of this pre-sort is that it ensures that the results of
757     // GrB_assign and GxB_subassign are completely defined if I and J have
758     // duplicates.  The definition of this pre-sort is given in MATLAB below.
759 
760     /*
761         function C = subassign (C, I, J, A)
762         % submatrix assignment with pre-sort of I and J; and remove duplicates
763 
764         % delete duplicates from I, keeping the last one seen
765         [I2 I2k] = sort (I) ;
766         Idupl = [(I2 (1:end-1) == I2 (2:end)), false] ;
767         I2  = I2  (~Idupl) ;
768         I2k = I2k (~Idupl) ;
769         assert (isequal (I2, unique (I)))
770 
771         % delete duplicates from J, keeping the last one seen
772         [J2 J2k] = sort (J) ;
773         Jdupl = [(J2 (1:end-1) == J2 (2:end)), false] ;
774         J2  = J2  (~Jdupl) ;
775         J2k = J2k (~Jdupl) ;
776         assert (isequal (J2, unique (J)))
777 
778         % do the submatrix assignment, with no duplicates in I2 or J2
779         C (I2,J2) = A (I2k,J2k) ;
780     */
781 
782     // With this subassign script, the result returned by GB_subassigner
783     // matches the behavior in MATLAB, so the following holds:
784 
785     /*
786         C4 = C ;
787         C4 (I,J) = A ;
788         C3 = subassign (C, I, J, A) ;
789         assert (isequal (C4, C3)) ;
790     */
791 
792     // That is, the pre-sort of I, J, and A has no effect on the final C, in
793     // MATLAB.
794 
795     // The pre-sort itself takes additional work and memory space, but it may
796     // actually improve the performance since it makes the data access of C
797     // more regular, even in the sequential case.
798 
799     if (presort)
800     {
801 
802         ASSERT (Ikind == GB_LIST || Jkind == GB_LIST) ;
803         ASSERT (!whole_C_matrix) ;
804 
805         if (I_unsorted_or_has_dupl)
806         {
807             // I2 = sort I and remove duplicates
808             ASSERT (Ikind == GB_LIST) ;
809             GB_OK (GB_ijsort (I, &ni, &I2, &I2_size, &I2k, &I2k_size, Context));
810             // Recheck the length and properties of the new I2.  This may
811             // convert I2 to GB_ALL or GB_RANGE, after I2 has been sorted.
812             GB_ijlength (I2, ni, C->vlen, &nI, &Ikind, Icolon) ;
813             GB_OK (GB_ijproperties (I2, ni, nI, C->vlen, &Ikind, Icolon,
814                 &I_unsorted, &I_has_dupl, &I_contig, &imin, &imax, Context)) ;
815             ASSERT (! (I_unsorted || I_has_dupl)) ;
816             I = I2 ;
817         }
818 
819         if (J_unsorted_or_has_dupl)
820         {
821             // J2 = sort J and remove duplicates
822             ASSERT (Jkind == GB_LIST) ;
823             GB_OK (GB_ijsort (J, &nj, &J2, &J2_size, &J2k, &J2k_size, Context));
824             // Recheck the length and properties of the new J2.  This may
825             // convert J2 to GB_ALL or GB_RANGE, after J2 has been sorted.
826             GB_ijlength (J2, nj, C->vdim, &nJ, &Jkind, Jcolon) ;
827             GB_OK (GB_ijproperties (J2, nj, nJ, C->vdim, &Jkind, Jcolon,
828                 &J_unsorted, &J_has_dupl, &J_contig, &jmin, &jmax, Context)) ;
829             ASSERT (! (J_unsorted || J_has_dupl)) ;
830             J = J2 ;
831         }
832 
833         // inverse index lists to create the A2 and M2 submatrices:
834         const GrB_Index *Iinv = I_unsorted_or_has_dupl ? I2k : GrB_ALL ;
835         const GrB_Index *Jinv = J_unsorted_or_has_dupl ? J2k : GrB_ALL ;
836 
837         if (!scalar_expansion)
838         {
839             // A2 = A (Iinv, Jinv)
840             A2 = GB_clear_static_header (A2_header_handle) ;
841             GB_OK (GB_subref (A2, A->is_csc, A, Iinv, ni, Jinv, nj, false,
842                 Context)) ;
843             // GB_subref can return a jumbled result
844             ASSERT (GB_JUMBLED_OK (A2)) ;
845             if (A == AT)
846             {
847                 GB_Matrix_free (&AT) ;
848                 AT = NULL ;
849             }
850             A = A2 ;
851         }
852 
853         if (M != NULL && (*assign_kind) == GB_SUBASSIGN)
854         {
855             // M2 = M (Iinv, Jinv)
856             M2 = GB_clear_static_header (M2_header_handle) ;
857             GB_OK (GB_subref (M2, M->is_csc, M, Iinv, ni, Jinv, nj, false,
858                 Context)) ;
859             // GB_subref can return a jumbled result
860             ASSERT (GB_JUMBLED_OK (M2)) ;
861             if (M == MT)
862             {
863                 GB_Matrix_free (&MT) ;
864                 MT = NULL ;
865             }
866             M = M2 ;
867         }
868 
869         GB_FREE_WERK (&I2k, I2k_size) ;
870         GB_FREE_WERK (&J2k, J2k_size) ;
871     }
872 
873     // I and J are now sorted, with no duplicate entries.  They are either
874     // GB_ALL, GB_RANGE, or GB_STRIDE, which are intrinsically sorted with no
875     // duplicates, or they are explicit GB_LISTs with sorted entries and no
876     // duplicates.
877 
878     ASSERT (!I_unsorted) ; ASSERT (!I_has_dupl) ;
879     ASSERT (!J_unsorted) ; ASSERT (!J_has_dupl) ;
880 
881     //--------------------------------------------------------------------------
882     // check for early C_replace
883     //--------------------------------------------------------------------------
884 
885     // C(:,:)<any mask, replace> = A or x
886 
887     // C_replace_may_be_done_early is true if the C_replace action can take
888     // place now.  If true, the final C does not depend on the contents of
889     // C on input.  If bitmap assigment might be done, delay the clearing of
890     // C since it would be faster to set its bitmap to all zero later on,
891     // instead of freeing it and reallocating it.
892 
893     bool C_replace_may_be_done_early = (whole_C_matrix && (*C_replace)
894         && accum == NULL && !use_bitmap_assign) ;
895 
896     // If the entire C(:,:) is being assigned to, and if no accum operator is
897     // present, then the matrix can be cleared of all entries now, and then
898     // C_replace can be set false.  Clearing C now speeds up the assignment
899     // since the wait on C can be skipped, below.  It also simplifies the
900     // kernels.  If S is constructed, it is just an empty matrix.
901 
902     // By clearing C now and setting C_replace to false, the following methods
903     // are used: 09 becomes 05, 10 becomes 06n or 06s, 17 becomes 13, and 18
904     // becomes 14.  The S matrix for methods 06s, 13, and 14 is still created,
905     // but it is very fast to construct and traverse since C is empty.  Method
906     // 00 can be skipped since C is already empty (see "quick" case below).
907 
908         // prior time             new  time           action
909         // ----- ----             ---  ----           ------
910 
911         // 00:  O(S)              nothing, O(1)       C already cleared
912 
913         // 09:  O(M+S)            05:  O(M)           C<M> = x, no S
914 
915         // 10:  O((A+S)*log(m))   06n: O(M*(log(a))   C<M> = A, no S
916         //                        06s: O(A*(log(m))   C<M> = A, with S
917 
918         // 17:  O(m*n)            13:  O(m*n)         C<!M> = x, with S
919 
920         // 18:  O(A*log(m))       14:  O(A*log(m))    C<!M> = A, with S
921 
922         //  =====================       ==============
923         //  M   cmp rpl acc A   S       method: action
924         //  =====================       ==============
925 
926         //  M   -   -   -   -   -       05:  C(I,J)<M> = x, no S
927         //  M   -   -   -   A   -       06n: C(I,J)<M> = A, no S
928         //  M   -   -   -   A   S       06s: C(I,J)<M> = A, with S
929 
930         //  M   -   r   -   -   S       09:  C(I,J)<M,repl> = x, with S
931         //  M   -   r   -   A   S       10:  C(I,J)<M,repl> = A, with S
932 
933         //  M   c   -   -   -   S       13:  C(I,J)<!M> = x, with S
934         //  M   c   -   -   A   S       14:  C(I,J)<!M> = A, with S
935 
936         //  M   c   r   -   -   S       17:  C(I,J)<!M,repl> = x, with S
937         //  M   c   r   -   A   S       18:  C(I,J)<!M,repl> = A, with S
938 
939         // Methods 09, 10, 17, and 18 are now used only if C(I,J) is a
940         // submatrix of C, and not for the whole_C_matrix case.
941 
942     //--------------------------------------------------------------------------
943     // make a copy Z = C if C is aliased to A or M
944     //--------------------------------------------------------------------------
945 
946     // TODO: bitmap assign can handle C==M and C==A aliasing in some cases
947 
948     // If C is aliased to A and/or M, a copy of C typically must be made.
949     bool C_aliased = GB_aliased (C, A) || GB_aliased (C, M) ;
950 
951     // However, if C == M is aliased, M is structural and not complemented, I
952     // and J are both ":", and scalar assignment is being done, then the alias
953     // of C and M can be exploited.  The assignment is C<C,s>=scalar or
954     // C<C,s>+=scalar, but for now, only C<C,s>=scalar is exploited.  C_replace
955     // is effectively false.
956     bool C_exploit_alias_with_M =
957         ((C == M)               // C is exactly aliased with M
958         && Mask_struct          // mask is structural
959         && !Mask_comp           // and not complemented
960         && whole_C_matrix       // C(:,:) is being assigned to
961         && (accum == NULL)      // no accum (accum can be handled in the future)
962         && scalar_expansion) ;  // C<C,s> = scalar assignment
963 
964     // GB_assign cannot tolerate any alias with the input mask,
965     // if the C_replace phase will be performed.
966     if ((*C_replace) && ((*assign_kind) != GB_SUBASSIGN))
967     {
968         // the C_replace phase requires C and M_in not to be aliased
969         C_aliased = C_aliased || GB_aliased (C, M_in) ;
970     }
971 
972     if (C_exploit_alias_with_M)
973     {
974         // C<C,s>=scalar, and C_replace can be ignored.
975         ASSERT (C_aliased) ;            // C is aliased with M, but this is OK
976         ASSERT (!GB_aliased (C, A)) ;   // A is not present so C != A
977         if (*C_replace)
978         {
979             GBURBLE ("(C_replace ignored) ") ;
980             (*C_replace) = false ;
981         }
982     }
983     else if (C_aliased)
984     {
985         // C is aliased with M or A: make a copy of C to assign into
986         C2 = GB_clear_static_header (C2_header_handle) ;
987         if (C_replace_may_be_done_early)
988         {
989             // Instead of duplicating C, create a new empty matrix C2.
990             int sparsity = (C->h != NULL) ? GxB_HYPERSPARSE : GxB_SPARSE ;
991             GB_OK (GB_new (&C2, true, // sparse or hyper, static header
992                 C->type, C->vlen, C->vdim, GB_Ap_calloc, C_is_csc,
993                 sparsity, C->hyper_switch, 1, Context)) ;
994             GBURBLE ("(C alias cleared; C_replace early) ") ;
995             (*C_replace) = false ;
996         }
997         else
998         {
999             // finish any computations in C, but leave it jumbled
1000             // TODO:: keep zombies in C
1001             GBURBLE ("(C alias: make duplicate) ") ;
1002             GB_MATRIX_WAIT_IF_PENDING_OR_ZOMBIES (C) ;
1003             ASSERT (!GB_ZOMBIES (C)) ;
1004             ASSERT (GB_JUMBLED_OK (C)) ;
1005             ASSERT (!GB_PENDING (C)) ;
1006             // C2 = duplicate of C, which must be freed when done
1007             GB_OK (GB_dup2 (&C2, C, true, NULL, Context)) ; // static header
1008         }
1009         // C2 must be transplanted back into C when done
1010         C = C2 ;
1011         ASSERT (C->static_header) ;
1012     }
1013     else
1014     {
1015         // C is not aliased, but check if it can be cleared early
1016         if (C_replace_may_be_done_early)
1017         {
1018             // Clear C early.
1019             GB_OK (GB_clear (C, Context)) ;
1020             GBURBLE ("(C(:,:)<any mask>: C_replace early) ") ;
1021             (*C_replace) = false ;
1022         }
1023         // the assignment operates on C in-place
1024     }
1025 
1026     //--------------------------------------------------------------------------
1027     // disable C_replace if C is empty
1028     //--------------------------------------------------------------------------
1029 
1030     bool C_is_empty = (GB_NNZ (C) == 0 && !GB_PENDING (C) && !GB_ZOMBIES (C)) ;
1031     if (C_is_empty)
1032     {
1033         // C is completely empty.  C_replace is irrelevant so set it to false.
1034         (*C_replace) = false ;
1035     }
1036 
1037     //--------------------------------------------------------------------------
1038     // check compatibilty of prior pending tuples
1039     //--------------------------------------------------------------------------
1040 
1041     // The action: ( delete ) can only delete a live entry in the pattern.  It
1042     // cannot delete a pending tuple; pending tuples cannot become zombies.
1043     // Thus, if this assignment has the potential for creating zombies, all
1044     // prior pending tuples must be assembled now.  They thus become live
1045     // entries in the pattern of C, so that this GB_subassigner can
1046     // (potentially) turn them into zombies via action: ( delete ).
1047 
1048     // If accum is NULL, the operation is C(I,J) = A, or C(I,J)<M> = A.  If A
1049     // has any implicit zeros at all, or if M is present, then the
1050     // action: ( delete ) is possible.  This action is taken when an entry is
1051     // found in C but not A.  It is thus not possible to check A in advance if
1052     // an entry in C must be deleted.  If an entry does not appear in C but
1053     // appears as a pending tuple, deleting it would require a scan of all the
1054     // pending tuples in C.  This is costly, and simply assembling all pending
1055     // tuples first is faster.
1056 
1057     // The action: ( insert ) adds additional pending tuples.  All pending
1058     // tuples will be assembled sometime later on, using a single pending
1059     // operator, and thus the current accum operator must match the prior
1060     // pending operator.  If the operators do not match, then all prior pending
1061     // tuples must be assembled now, so that this GB_subassigner can
1062     // (potentially) insert new pending tuples whose pending operator is accum.
1063 
1064     // These tests are conservative because it is possible that this
1065     // GxB_subassign will not need to use action: ( insert ).
1066 
1067     // In the discussion below, let SECOND_Ctype denote the SECOND operator
1068     // z=f(x,y) whose ztype, xtype, and ytype matches the type of C.
1069 
1070     bool wait = false ;
1071 
1072     if (C->Pending == NULL)
1073     {
1074 
1075         //----------------------------------------------------------------------
1076         // no pending tuples currently exist
1077         //----------------------------------------------------------------------
1078 
1079         // If any new pending tuples are added, their pending operator is
1080         // accum, or the implicit SECOND_Ctype operator if accum is NULL.
1081         // The type of any pending tuples will become C->type.
1082         // Prior zombies have no effect on this decision.
1083 
1084         wait = false ;
1085 
1086     }
1087     else
1088     {
1089 
1090         //----------------------------------------------------------------------
1091         // prior pending tuples exist: check if action: ( delete ) can occur
1092         //----------------------------------------------------------------------
1093 
1094         // action: ( delete ) can only operate on entries in the pattern by
1095         // turning them into zombies.  It cannot delete prior pending tuples.
1096         // Thus all prior pending tuples must be assembled first if
1097         // action: ( delete ) can occur.
1098 
1099         if (*C_replace)
1100         {
1101             // C_replace must use the action: ( delete )
1102             wait = true ;
1103         }
1104         else if (accum == NULL)
1105         {
1106             // This GxB_subassign can potentially use action: ( delete ), and
1107             // thus prior pending tuples must be assembled first.  However, if
1108             // A is completely dense, then C(I,J)=A cannot delete any entries
1109             // from C.
1110 
1111             if (scalar_expansion || GB_is_dense (A))
1112             {
1113                 // A is a scalar or dense matrix, so entries cannot be deleted
1114                 wait = false ;
1115             }
1116             else
1117             {
1118                 // A is sparse.  action: ( delete ) might occur.
1119                 wait = true ;
1120             }
1121         }
1122 
1123         //----------------------------------------------------------------------
1124         // check if pending operator is compatible
1125         //----------------------------------------------------------------------
1126 
1127         if (!wait)
1128         {
1129 
1130             // ( delete ) will not occur, but new pending tuples may be added
1131             // via the action: ( insert ).  Check if the accum operator is the
1132             // same as the prior pending operator and ensure the types are
1133             // the same.
1134 
1135             ASSERT (C->Pending != NULL) ;
1136             ASSERT (C->Pending->type != NULL) ;
1137 
1138             if (atype != C->Pending->type)
1139             {
1140                 // entries in A are copied directly into the list of pending
1141                 // tuples for C, with no typecasting.  The type of the prior
1142                 // pending tuples must match the type of A.  Since the types
1143                 // do not match, prior updates must be assembled first.
1144                 wait = true ;
1145             }
1146             else if
1147             (
1148                 // the types match, now check the pending operator
1149                 ! (
1150                     // the operators are the same
1151                     (accum == C->Pending->op)
1152                     // or both operators are SECOND_Ctype, implicit or explicit
1153                     || (GB_op_is_second (accum, C->type) &&
1154                         GB_op_is_second (C->Pending->op, C->type))
1155                   )
1156             )
1157             {
1158                 wait = true ;
1159             }
1160         }
1161     }
1162 
1163     if (wait)
1164     {
1165         // Prior computations are not compatible with this assignment, so all
1166         // prior work must be finished.  This potentially costly.
1167         // delete any lingering zombies and assemble any pending tuples
1168         ASSERT_MATRIX_OK (C, "C before wait", GB0) ;
1169         GB_MATRIX_WAIT (C) ;
1170     }
1171 
1172     ASSERT_MATRIX_OK (C, "C before subassign", GB0) ;
1173     ASSERT_BINARYOP_OK_OR_NULL (accum, "accum for assign", GB0) ;
1174 
1175     //--------------------------------------------------------------------------
1176     // check again if C is empty
1177     //--------------------------------------------------------------------------
1178 
1179     // GB_clear or GB_Matrix_wait, above, may have deleted all the zombies in
1180     // C, so check again if C is empty.
1181 
1182     C_is_empty = (GB_NNZ (C) == 0 && !GB_PENDING (C) && !GB_ZOMBIES (C)) ;
1183     if (C_is_empty)
1184     {
1185         // C is completely empty.  C_replace is irrelevant so set it to false.
1186         GBURBLE ("(C empty) ") ;
1187         (*C_replace) = false ;
1188     }
1189 
1190     //--------------------------------------------------------------------------
1191     // keep track of the current accum operator
1192     //--------------------------------------------------------------------------
1193 
1194     // If accum is NULL and pending tuples are added, they will be assembled
1195     // sometime later (not here) using the implied SECOND_Ctype operator.  This
1196     // GB_subassigner operation corresponds to C(I,J)=A or C(I,J)<M>=A.
1197     // Subsequent calls to GrB_setElement, and subsequent calls to GrB_assign
1198     // or GxB_subassign with an explict SECOND_Ctype operator, may create
1199     // additional pending tuples and add them to the list without requiring
1200     // that they be assembled first.
1201 
1202     // If accum is non-NULL, then all prior pending tuples have the same
1203     // pending operator as this accum.  If that prior operator was the implicit
1204     // SECOND_Ctype and those pending tuples still exist, then this accum
1205     // operator is the explicit SECOND_ctype operator.  The implicit
1206     // SECOND_Ctype operator is replaced with the current accum, which is the
1207     // explicit SECOND_Ctype operator.
1208 
1209     if (C->Pending != NULL)
1210     {
1211         C->Pending->op = accum ;
1212     }
1213 
1214     //--------------------------------------------------------------------------
1215     // return results
1216     //--------------------------------------------------------------------------
1217 
1218     (*Chandle) = C ;            // C is C_in or C2
1219     (*Mhandle) = M ;            // M is M_in or M2
1220     (*Ahandle) = A ;            // A is A_in or A2
1221 
1222     (*C2_handle) = C2 ;
1223     (*M2_handle) = (MT != NULL) ? MT : M2 ;
1224     (*A2_handle) = (AT != NULL) ? AT : A2 ;
1225 
1226     (*atype_handle) = atype ;
1227 
1228     // modified versions of the Rows/Cols lists, and their analysis:
1229     (*I_handle) = (GrB_Index *) I ;     // either Rows, Cols, or I2
1230     (*I2_handle) = I2 ;         // temporary sorted copy of Rows or Cols list
1231     (*I2_size_handle) = I2_size ;
1232     (*ni_handle) = ni ;
1233     (*nI_handle) = nI ;
1234     (*Ikind_handle) = Ikind ;
1235 
1236     (*J_handle) = (GrB_Index *) J ;     // either Rows, Cols, or J2
1237     (*J2_handle) = J2 ;         // temporary sorted copy of Rows or Cols list
1238     (*J2_size_handle) = J2_size ;
1239     (*nj_handle) = nj ;
1240     (*nJ_handle) = nJ ;
1241     (*Jkind_handle) = Jkind ;
1242 
1243     return (GrB_SUCCESS) ;
1244 }
1245 
1246