1 //------------------------------------------------------------------------------
2 // GB_convert_sparse_to_hyper: convert a matrix from sparse to hyperspasre
3 //------------------------------------------------------------------------------
4 
5 // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
6 // SPDX-License-Identifier: Apache-2.0
7 
8 //------------------------------------------------------------------------------
9 
10 // On input, the matrix may have shallow A->p content; it is safely removed.
11 // On output, the matrix is always hypersparse (even if out of memory).  If the
12 // input matrix is non-hypersparse, it is given new A->p and A->h that are not
13 // shallow.  If the input matrix is already hypersparse, nothing is changed
14 // (and in that case A->p and A->h remain shallow on output if shallow on
15 // input). The A->x and A->i content is not changed; it remains in whatever
16 // shallow/non-shallow state that it had on input).
17 
18 // If an out-of-memory condition occurs, all content of the matrix is cleared.
19 
20 // If the input matrix A is hypersparse, bitmap or full, it is unchanged.
21 
22 #include "GB.h"
23 
GB_convert_sparse_to_hyper(GrB_Matrix A,GB_Context Context)24 GrB_Info GB_convert_sparse_to_hyper // convert from sparse to hypersparse
25 (
26     GrB_Matrix A,           // matrix to convert to hypersparse
27     GB_Context Context
28 )
29 {
30 
31     //--------------------------------------------------------------------------
32     // check inputs
33     //--------------------------------------------------------------------------
34 
35     ASSERT_MATRIX_OK (A, "A converting to hypersparse", GB0) ;
36     int64_t anz = GB_NNZ (A) ;
37     ASSERT (GB_ZOMBIES_OK (A)) ;
38     ASSERT (GB_JUMBLED_OK (A)) ;
39     ASSERT (GB_PENDING_OK (A)) ;
40 
41     //--------------------------------------------------------------------------
42     // convert A from sparse to hypersparse
43     //--------------------------------------------------------------------------
44 
45     if (GB_IS_SPARSE (A))
46     {
47 
48         //----------------------------------------------------------------------
49         // determine the number of threads to use
50         //----------------------------------------------------------------------
51 
52         GBURBLE ("(sparse to hyper) ") ;
53         int64_t n = A->vdim ;
54         GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
55         int nthreads = GB_nthreads (n, chunk, nthreads_max) ;
56         int ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ;
57         ntasks = GB_IMIN (ntasks, n) ;
58         ntasks = GB_IMAX (ntasks, 1) ;
59 
60         //----------------------------------------------------------------------
61         // count the number of non-empty vectors in A in each slice
62         //----------------------------------------------------------------------
63 
64         ASSERT (A->nvec == A->plen && A->plen == n) ;
65 
66         const int64_t *restrict Ap_old = A->p ;
67         size_t Ap_old_size = A->p_size ;
68         bool Ap_old_shallow = A->p_shallow ;
69 
70         GB_WERK_DECLARE (Count, int64_t) ;
71         GB_WERK_PUSH (Count, ntasks+1, int64_t) ;
72         if (Count == NULL)
73         {
74             // out of memory
75             return (GrB_OUT_OF_MEMORY) ;
76         }
77 
78         int tid ;
79         #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
80         for (tid = 0 ; tid < ntasks ; tid++)
81         {
82             int64_t jstart, jend, my_nvec_nonempty = 0 ; ;
83             GB_PARTITION (jstart, jend, n, tid, ntasks) ;
84             for (int64_t j = jstart ; j < jend ; j++)
85             {
86                 if (Ap_old [j] < Ap_old [j+1]) my_nvec_nonempty++ ;
87             }
88             Count [tid] = my_nvec_nonempty ;
89         }
90 
91         //----------------------------------------------------------------------
92         // compute cumulative sum of Counts and nvec_nonempty
93         //----------------------------------------------------------------------
94 
95         GB_cumsum (Count, ntasks, NULL, 1, NULL) ;
96         int64_t nvec_nonempty = Count [ntasks] ;
97         A->nvec_nonempty = nvec_nonempty ;
98 
99         //----------------------------------------------------------------------
100         // allocate the new A->p and A->h
101         //----------------------------------------------------------------------
102 
103         int64_t *restrict Ap_new = NULL ; size_t Ap_new_size = 0 ;
104         int64_t *restrict Ah_new = NULL ; size_t Ah_new_size = 0 ;
105         Ap_new = GB_MALLOC (nvec_nonempty+1, int64_t, &Ap_new_size) ;
106         Ah_new = GB_MALLOC (nvec_nonempty  , int64_t, &Ah_new_size) ;
107         if (Ap_new == NULL || Ah_new == NULL)
108         {
109             // out of memory
110             GB_WERK_POP (Count, int64_t) ;
111             GB_FREE (&Ap_new, Ap_new_size) ;
112             GB_FREE (&Ah_new, Ah_new_size) ;
113             return (GrB_OUT_OF_MEMORY) ;
114         }
115 
116         //----------------------------------------------------------------------
117         // transplant the new A->p and A->h into the matrix
118         //----------------------------------------------------------------------
119 
120         A->plen = nvec_nonempty ;
121         A->nvec = nvec_nonempty ;
122         A->p = Ap_new ; A->p_size = Ap_new_size ;
123         A->h = Ah_new ; A->h_size = Ah_new_size ;
124         A->p_shallow = false ;
125         A->h_shallow = false ;
126 
127         //----------------------------------------------------------------------
128         // construct the new hyperlist in the new A->p and A->h
129         //----------------------------------------------------------------------
130 
131         #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
132         for (tid = 0 ; tid < ntasks ; tid++)
133         {
134             int64_t jstart, jend, k = Count [tid] ;
135             GB_PARTITION (jstart, jend, n, tid, ntasks) ;
136             for (int64_t j = jstart ; j < jend ; j++)
137             {
138                 if (Ap_old [j] < Ap_old [j+1])
139                 {
140                     // vector index j is the kth vector in the new Ah
141                     Ap_new [k] = Ap_old [j] ;
142                     Ah_new [k] = j ;
143                     k++ ;
144                 }
145             }
146             ASSERT (k == Count [tid+1]) ;
147         }
148 
149         Ap_new [nvec_nonempty] = anz ;
150         A->magic = GB_MAGIC ;
151 
152         //----------------------------------------------------------------------
153         // free workspace, and free the old A->p unless it's shallow
154         //----------------------------------------------------------------------
155 
156         GB_WERK_POP (Count, int64_t) ;
157         if (!Ap_old_shallow)
158         {
159             GB_FREE (&Ap_old, Ap_old_size) ;
160         }
161 
162         //----------------------------------------------------------------------
163         // A is now hypersparse
164         //----------------------------------------------------------------------
165 
166         ASSERT (GB_IS_HYPERSPARSE (A)) ;
167     }
168 
169     //--------------------------------------------------------------------------
170     // A is now in hypersparse form (or left as full or bitmap)
171     //--------------------------------------------------------------------------
172 
173     ASSERT (anz == GB_NNZ (A)) ;
174     ASSERT_MATRIX_OK (A, "A conv to hypersparse (or left full/bitmap)", GB0) ;
175     ASSERT (!GB_IS_SPARSE (A)) ;
176     ASSERT (GB_ZOMBIES_OK (A)) ;
177     ASSERT (GB_JUMBLED_OK (A)) ;
178     ASSERT (GB_PENDING_OK (A)) ;
179     return (GrB_SUCCESS) ;
180 }
181 
182