1 //////////////////////////////////////////////////////////////////////// 2 // 3 // Copyright (C) 1998-2021 The Octave Project Developers 4 // 5 // See the file COPYRIGHT.md in the top-level directory of this 6 // distribution or <https://octave.org/copyright/>. 7 // 8 // This file is part of Octave. 9 // 10 // Octave is free software: you can redistribute it and/or modify it 11 // under the terms of the GNU General Public License as published by 12 // the Free Software Foundation, either version 3 of the License, or 13 // (at your option) any later version. 14 // 15 // Octave is distributed in the hope that it will be useful, but 16 // WITHOUT ANY WARRANTY; without even the implied warranty of 17 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 // GNU General Public License for more details. 19 // 20 // You should have received a copy of the GNU General Public License 21 // along with Octave; see the file COPYING. If not, see 22 // <https://www.gnu.org/licenses/>. 23 // 24 //////////////////////////////////////////////////////////////////////// 25 26 #if ! defined (octave_Sparse_op_defs_h) 27 #define octave_Sparse_op_defs_h 1 28 29 #include "octave-config.h" 30 31 #include "Array-util.h" 32 #include "lo-array-errwarn.h" 33 #include "mx-inlines.cc" 34 #include "oct-locbuf.h" 35 36 // sparse matrix by scalar operations. 37 38 #define SPARSE_SMS_BIN_OP_1(R, F, OP, M, S) \ 39 R \ 40 F (const M& m, const S& s) \ 41 { \ 42 octave_idx_type nr = m.rows (); \ 43 octave_idx_type nc = m.cols (); \ 44 \ 45 R r (nr, nc, (0.0 OP s)); \ 46 \ 47 for (octave_idx_type j = 0; j < nc; j++) \ 48 for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ 49 r.xelem (m.ridx (i), j) = m.data (i) OP s; \ 50 return r; \ 51 } 52 53 #define SPARSE_SMS_BIN_OP_2(R, F, OP, M, S) \ 54 R \ 55 F (const M& m, const S& s) \ 56 { \ 57 octave_idx_type nr = m.rows (); \ 58 octave_idx_type nc = m.cols (); \ 59 octave_idx_type nz = m.nnz (); \ 60 \ 61 R r (nr, nc, nz); \ 62 \ 63 for (octave_idx_type i = 0; i < nz; i++) \ 64 { \ 65 r.xdata (i) = m.data (i) OP s; \ 66 r.xridx (i) = m.ridx (i); \ 67 } \ 68 for (octave_idx_type i = 0; i < nc + 1; i++) \ 69 r.xcidx (i) = m.cidx (i); \ 70 \ 71 r.maybe_compress (true); \ 72 return r; \ 73 } 74 75 #define SPARSE_SMS_BIN_OPS(R1, R2, M, S) \ 76 SPARSE_SMS_BIN_OP_1 (R1, operator +, +, M, S) \ 77 SPARSE_SMS_BIN_OP_1 (R1, operator -, -, M, S) \ 78 SPARSE_SMS_BIN_OP_2 (R2, operator *, *, M, S) \ 79 SPARSE_SMS_BIN_OP_2 (R2, operator /, /, M, S) 80 81 #define SPARSE_SMS_CMP_OP(F, OP, M, S) \ 82 SparseBoolMatrix \ 83 F (const M& m, const S& s) \ 84 { \ 85 octave_idx_type nr = m.rows (); \ 86 octave_idx_type nc = m.cols (); \ 87 SparseBoolMatrix r; \ 88 \ 89 M::element_type m_zero = M::element_type (); \ 90 \ 91 if (m_zero OP s) \ 92 { \ 93 r = SparseBoolMatrix (nr, nc, true); \ 94 for (octave_idx_type j = 0; j < nc; j++) \ 95 for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ 96 if (! (m.data (i) OP s)) \ 97 r.data (m.ridx (i) + j * nr) = false; \ 98 r.maybe_compress (true); \ 99 } \ 100 else \ 101 { \ 102 r = SparseBoolMatrix (nr, nc, m.nnz ()); \ 103 r.cidx (0) = static_cast<octave_idx_type> (0); \ 104 octave_idx_type nel = 0; \ 105 for (octave_idx_type j = 0; j < nc; j++) \ 106 { \ 107 for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ 108 if (m.data (i) OP s) \ 109 { \ 110 r.ridx (nel) = m.ridx (i); \ 111 r.data (nel++) = true; \ 112 } \ 113 r.cidx (j + 1) = nel; \ 114 } \ 115 r.maybe_compress (false); \ 116 } \ 117 return r; \ 118 } 119 120 #define SPARSE_SMS_CMP_OPS(M, S) \ 121 SPARSE_SMS_CMP_OP (mx_el_lt, <, M, S) \ 122 SPARSE_SMS_CMP_OP (mx_el_le, <=, M, S) \ 123 SPARSE_SMS_CMP_OP (mx_el_ge, >=, M, S) \ 124 SPARSE_SMS_CMP_OP (mx_el_gt, >, M, S) \ 125 SPARSE_SMS_CMP_OP (mx_el_eq, ==, M, S) \ 126 SPARSE_SMS_CMP_OP (mx_el_ne, !=, M, S) 127 128 #define SPARSE_SMS_EQNE_OPS(M, S) \ 129 SPARSE_SMS_CMP_OP (mx_el_eq, ==, M, S) \ 130 SPARSE_SMS_CMP_OP (mx_el_ne, !=, M, S) 131 132 #define SPARSE_SMS_BOOL_OR_OP(M, S) \ 133 SparseBoolMatrix \ 134 mx_el_or (const M& m, const S& s) \ 135 { \ 136 octave_idx_type nr = m.rows (); \ 137 octave_idx_type nc = m.cols (); \ 138 SparseBoolMatrix r; \ 139 \ 140 M::element_type lhs_zero = M::element_type (); \ 141 S rhs_zero = S (); \ 142 \ 143 if (nr > 0 && nc > 0) \ 144 { \ 145 if (s != rhs_zero) \ 146 r = SparseBoolMatrix (nr, nc, true); \ 147 else \ 148 { \ 149 r = SparseBoolMatrix (nr, nc, m.nnz ()); \ 150 r.cidx (0) = static_cast<octave_idx_type> (0); \ 151 octave_idx_type nel = 0; \ 152 for (octave_idx_type j = 0; j < nc; j++) \ 153 { \ 154 for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ 155 if (m.data (i) != lhs_zero) \ 156 { \ 157 r.ridx (nel) = m.ridx (i); \ 158 r.data (nel++) = true; \ 159 } \ 160 r.cidx (j + 1) = nel; \ 161 } \ 162 r.maybe_compress (false); \ 163 } \ 164 } \ 165 return r; \ 166 } 167 168 #define SPARSE_SMS_BOOL_AND_OP(M, S) \ 169 SparseBoolMatrix \ 170 mx_el_and (const M& m, const S& s) \ 171 { \ 172 octave_idx_type nr = m.rows (); \ 173 octave_idx_type nc = m.cols (); \ 174 SparseBoolMatrix r; \ 175 \ 176 M::element_type lhs_zero = M::element_type (); \ 177 S rhs_zero = S (); \ 178 \ 179 if (nr > 0 && nc > 0) \ 180 { \ 181 if (s != rhs_zero) \ 182 { \ 183 r = SparseBoolMatrix (nr, nc, m.nnz ()); \ 184 r.cidx (0) = static_cast<octave_idx_type> (0); \ 185 octave_idx_type nel = 0; \ 186 for (octave_idx_type j = 0; j < nc; j++) \ 187 { \ 188 for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ 189 if (m.data (i) != lhs_zero) \ 190 { \ 191 r.ridx (nel) = m.ridx (i); \ 192 r.data (nel++) = true; \ 193 } \ 194 r.cidx (j + 1) = nel; \ 195 } \ 196 r.maybe_compress (false); \ 197 } \ 198 else \ 199 r = SparseBoolMatrix (nr, nc); \ 200 } \ 201 return r; \ 202 } 203 204 #define SPARSE_SMS_BOOL_OPS(M, S) \ 205 SPARSE_SMS_BOOL_AND_OP (M, S) \ 206 SPARSE_SMS_BOOL_OR_OP (M, S) 207 208 // scalar by sparse matrix operations. 209 210 #define SPARSE_SSM_BIN_OP_1(R, F, OP, S, M) \ 211 R \ 212 F (const S& s, const M& m) \ 213 { \ 214 octave_idx_type nr = m.rows (); \ 215 octave_idx_type nc = m.cols (); \ 216 \ 217 R r (nr, nc, (s OP 0.0)); \ 218 \ 219 for (octave_idx_type j = 0; j < nc; j++) \ 220 for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ 221 r.xelem (m.ridx (i), j) = s OP m.data (i); \ 222 \ 223 return r; \ 224 } 225 226 #define SPARSE_SSM_BIN_OP_2(R, F, OP, S, M) \ 227 R \ 228 F (const S& s, const M& m) \ 229 { \ 230 octave_idx_type nr = m.rows (); \ 231 octave_idx_type nc = m.cols (); \ 232 octave_idx_type nz = m.nnz (); \ 233 \ 234 R r (nr, nc, nz); \ 235 \ 236 for (octave_idx_type i = 0; i < nz; i++) \ 237 { \ 238 r.xdata (i) = s OP m.data (i); \ 239 r.xridx (i) = m.ridx (i); \ 240 } \ 241 for (octave_idx_type i = 0; i < nc + 1; i++) \ 242 r.xcidx (i) = m.cidx (i); \ 243 \ 244 r.maybe_compress(true); \ 245 return r; \ 246 } 247 248 #define SPARSE_SSM_BIN_OPS(R1, R2, S, M) \ 249 SPARSE_SSM_BIN_OP_1 (R1, operator +, +, S, M) \ 250 SPARSE_SSM_BIN_OP_1 (R1, operator -, -, S, M) \ 251 SPARSE_SSM_BIN_OP_2 (R2, operator *, *, S, M) \ 252 SPARSE_SSM_BIN_OP_2 (R2, operator /, /, S, M) 253 254 #define SPARSE_SSM_CMP_OP(F, OP, S, M) \ 255 SparseBoolMatrix \ 256 F (const S& s, const M& m) \ 257 { \ 258 octave_idx_type nr = m.rows (); \ 259 octave_idx_type nc = m.cols (); \ 260 SparseBoolMatrix r; \ 261 \ 262 M::element_type m_zero = M::element_type (); \ 263 \ 264 if (s OP m_zero) \ 265 { \ 266 r = SparseBoolMatrix (nr, nc, true); \ 267 for (octave_idx_type j = 0; j < nc; j++) \ 268 for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ 269 if (! (s OP m.data (i))) \ 270 r.data (m.ridx (i) + j * nr) = false; \ 271 r.maybe_compress (true); \ 272 } \ 273 else \ 274 { \ 275 r = SparseBoolMatrix (nr, nc, m.nnz ()); \ 276 r.cidx (0) = static_cast<octave_idx_type> (0); \ 277 octave_idx_type nel = 0; \ 278 for (octave_idx_type j = 0; j < nc; j++) \ 279 { \ 280 for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ 281 if (s OP m.data (i)) \ 282 { \ 283 r.ridx (nel) = m.ridx (i); \ 284 r.data (nel++) = true; \ 285 } \ 286 r.cidx (j + 1) = nel; \ 287 } \ 288 r.maybe_compress (false); \ 289 } \ 290 return r; \ 291 } 292 293 #define SPARSE_SSM_CMP_OPS(S, M) \ 294 SPARSE_SSM_CMP_OP (mx_el_lt, <, S, M) \ 295 SPARSE_SSM_CMP_OP (mx_el_le, <=, S, M) \ 296 SPARSE_SSM_CMP_OP (mx_el_ge, >=, S, M) \ 297 SPARSE_SSM_CMP_OP (mx_el_gt, >, S, M) \ 298 SPARSE_SSM_CMP_OP (mx_el_eq, ==, S, M) \ 299 SPARSE_SSM_CMP_OP (mx_el_ne, !=, S, M) 300 301 #define SPARSE_SSM_EQNE_OPS(S, M) \ 302 SPARSE_SSM_CMP_OP (mx_el_eq, ==, S, M) \ 303 SPARSE_SSM_CMP_OP (mx_el_ne, !=, S, M) 304 305 #define SPARSE_SSM_BOOL_OR_OP(S, M) \ 306 SparseBoolMatrix \ 307 mx_el_or (const S& s, const M& m) \ 308 { \ 309 octave_idx_type nr = m.rows (); \ 310 octave_idx_type nc = m.cols (); \ 311 SparseBoolMatrix r; \ 312 \ 313 S lhs_zero = S (); \ 314 M::element_type rhs_zero = M::element_type (); \ 315 \ 316 if (nr > 0 && nc > 0) \ 317 { \ 318 if (s != lhs_zero) \ 319 r = SparseBoolMatrix (nr, nc, true); \ 320 else \ 321 { \ 322 r = SparseBoolMatrix (nr, nc, m.nnz ()); \ 323 r.cidx (0) = static_cast<octave_idx_type> (0); \ 324 octave_idx_type nel = 0; \ 325 for (octave_idx_type j = 0; j < nc; j++) \ 326 { \ 327 for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ 328 if (m.data (i) != rhs_zero) \ 329 { \ 330 r.ridx (nel) = m.ridx (i); \ 331 r.data (nel++) = true; \ 332 } \ 333 r.cidx (j + 1) = nel; \ 334 } \ 335 r.maybe_compress (false); \ 336 } \ 337 } \ 338 return r; \ 339 } 340 341 #define SPARSE_SSM_BOOL_AND_OP(S, M) \ 342 SparseBoolMatrix \ 343 mx_el_and (const S& s, const M& m) \ 344 { \ 345 octave_idx_type nr = m.rows (); \ 346 octave_idx_type nc = m.cols (); \ 347 SparseBoolMatrix r; \ 348 \ 349 S lhs_zero = S (); \ 350 M::element_type rhs_zero = M::element_type (); \ 351 \ 352 if (nr > 0 && nc > 0) \ 353 { \ 354 if (s != lhs_zero) \ 355 { \ 356 r = SparseBoolMatrix (nr, nc, m.nnz ()); \ 357 r.cidx (0) = static_cast<octave_idx_type> (0); \ 358 octave_idx_type nel = 0; \ 359 for (octave_idx_type j = 0; j < nc; j++) \ 360 { \ 361 for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \ 362 if (m.data (i) != rhs_zero) \ 363 { \ 364 r.ridx (nel) = m.ridx (i); \ 365 r.data (nel++) = true; \ 366 } \ 367 r.cidx (j + 1) = nel; \ 368 } \ 369 r.maybe_compress (false); \ 370 } \ 371 else \ 372 r = SparseBoolMatrix (nr, nc); \ 373 } \ 374 return r; \ 375 } 376 377 #define SPARSE_SSM_BOOL_OPS(S, M) \ 378 SPARSE_SSM_BOOL_AND_OP (S, M) \ 379 SPARSE_SSM_BOOL_OR_OP (S, M) 380 381 // sparse matrix by sparse matrix operations. 382 383 #define SPARSE_SMSM_BIN_OP_1(R, F, OP, M1, M2) \ 384 R \ 385 F (const M1& m1, const M2& m2) \ 386 { \ 387 R r; \ 388 \ 389 octave_idx_type m1_nr = m1.rows (); \ 390 octave_idx_type m1_nc = m1.cols (); \ 391 \ 392 octave_idx_type m2_nr = m2.rows (); \ 393 octave_idx_type m2_nc = m2.cols (); \ 394 \ 395 if (m1_nr == 1 && m1_nc == 1) \ 396 { \ 397 if (m1.elem (0,0) == 0.) \ 398 r = OP R (m2); \ 399 else \ 400 { \ 401 r = R (m2_nr, m2_nc, m1.data (0) OP 0.); \ 402 \ 403 for (octave_idx_type j = 0 ; j < m2_nc ; j++) \ 404 { \ 405 octave_quit (); \ 406 octave_idx_type idxj = j * m2_nr; \ 407 for (octave_idx_type i = m2.cidx (j) ; i < m2.cidx (j+1) ; i++) \ 408 { \ 409 octave_quit (); \ 410 r.data (idxj + m2.ridx (i)) = m1.data (0) OP m2.data (i); \ 411 } \ 412 } \ 413 r.maybe_compress (); \ 414 } \ 415 } \ 416 else if (m2_nr == 1 && m2_nc == 1) \ 417 { \ 418 if (m2.elem (0,0) == 0.) \ 419 r = R (m1); \ 420 else \ 421 { \ 422 r = R (m1_nr, m1_nc, 0. OP m2.data (0)); \ 423 \ 424 for (octave_idx_type j = 0 ; j < m1_nc ; j++) \ 425 { \ 426 octave_quit (); \ 427 octave_idx_type idxj = j * m1_nr; \ 428 for (octave_idx_type i = m1.cidx (j) ; i < m1.cidx (j+1) ; i++) \ 429 { \ 430 octave_quit (); \ 431 r.data (idxj + m1.ridx (i)) = m1.data (i) OP m2.data (0); \ 432 } \ 433 } \ 434 r.maybe_compress (); \ 435 } \ 436 } \ 437 else if (m1_nr != m2_nr || m1_nc != m2_nc) \ 438 octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ 439 else \ 440 { \ 441 r = R (m1_nr, m1_nc, (m1.nnz () + m2.nnz ())); \ 442 \ 443 octave_idx_type jx = 0; \ 444 r.cidx (0) = 0; \ 445 for (octave_idx_type i = 0 ; i < m1_nc ; i++) \ 446 { \ 447 octave_idx_type ja = m1.cidx (i); \ 448 octave_idx_type ja_max = m1.cidx (i+1); \ 449 bool ja_lt_max = ja < ja_max; \ 450 \ 451 octave_idx_type jb = m2.cidx (i); \ 452 octave_idx_type jb_max = m2.cidx (i+1); \ 453 bool jb_lt_max = jb < jb_max; \ 454 \ 455 while (ja_lt_max || jb_lt_max) \ 456 { \ 457 octave_quit (); \ 458 if ((! jb_lt_max) || \ 459 (ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \ 460 { \ 461 r.ridx (jx) = m1.ridx (ja); \ 462 r.data (jx) = m1.data (ja) OP 0.; \ 463 jx++; \ 464 ja++; \ 465 ja_lt_max= ja < ja_max; \ 466 } \ 467 else if ((! ja_lt_max) || \ 468 (jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)))) \ 469 { \ 470 r.ridx (jx) = m2.ridx (jb); \ 471 r.data (jx) = 0. OP m2.data (jb); \ 472 jx++; \ 473 jb++; \ 474 jb_lt_max= jb < jb_max; \ 475 } \ 476 else \ 477 { \ 478 if ((m1.data (ja) OP m2.data (jb)) != 0.) \ 479 { \ 480 r.data (jx) = m1.data (ja) OP m2.data (jb); \ 481 r.ridx (jx) = m1.ridx (ja); \ 482 jx++; \ 483 } \ 484 ja++; \ 485 ja_lt_max= ja < ja_max; \ 486 jb++; \ 487 jb_lt_max= jb < jb_max; \ 488 } \ 489 } \ 490 r.cidx (i+1) = jx; \ 491 } \ 492 \ 493 r.maybe_compress (); \ 494 } \ 495 \ 496 return r; \ 497 } 498 499 #define SPARSE_SMSM_BIN_OP_2(R, F, OP, M1, M2) \ 500 R \ 501 F (const M1& m1, const M2& m2) \ 502 { \ 503 R r; \ 504 \ 505 octave_idx_type m1_nr = m1.rows (); \ 506 octave_idx_type m1_nc = m1.cols (); \ 507 \ 508 octave_idx_type m2_nr = m2.rows (); \ 509 octave_idx_type m2_nc = m2.cols (); \ 510 \ 511 if (m1_nr == 1 && m1_nc == 1) \ 512 { \ 513 if (m1.elem (0,0) == 0.) \ 514 r = R (m2_nr, m2_nc); \ 515 else \ 516 { \ 517 r = R (m2); \ 518 octave_idx_type m2_nnz = m2.nnz (); \ 519 \ 520 for (octave_idx_type i = 0 ; i < m2_nnz ; i++) \ 521 { \ 522 octave_quit (); \ 523 r.data (i) = m1.data (0) OP r.data (i); \ 524 } \ 525 r.maybe_compress (); \ 526 } \ 527 } \ 528 else if (m2_nr == 1 && m2_nc == 1) \ 529 { \ 530 if (m2.elem (0,0) == 0.) \ 531 r = R (m1_nr, m1_nc); \ 532 else \ 533 { \ 534 r = R (m1); \ 535 octave_idx_type m1_nnz = m1.nnz (); \ 536 \ 537 for (octave_idx_type i = 0 ; i < m1_nnz ; i++) \ 538 { \ 539 octave_quit (); \ 540 r.data (i) = r.data (i) OP m2.data (0); \ 541 } \ 542 r.maybe_compress (); \ 543 } \ 544 } \ 545 else if (m1_nr != m2_nr || m1_nc != m2_nc) \ 546 octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ 547 else \ 548 { \ 549 r = R (m1_nr, m1_nc, (m1.nnz () > m2.nnz () ? m1.nnz () : m2.nnz ())); \ 550 \ 551 octave_idx_type jx = 0; \ 552 r.cidx (0) = 0; \ 553 for (octave_idx_type i = 0 ; i < m1_nc ; i++) \ 554 { \ 555 octave_idx_type ja = m1.cidx (i); \ 556 octave_idx_type ja_max = m1.cidx (i+1); \ 557 bool ja_lt_max = ja < ja_max; \ 558 \ 559 octave_idx_type jb = m2.cidx (i); \ 560 octave_idx_type jb_max = m2.cidx (i+1); \ 561 bool jb_lt_max = jb < jb_max; \ 562 \ 563 while (ja_lt_max || jb_lt_max) \ 564 { \ 565 octave_quit (); \ 566 if ((! jb_lt_max) || \ 567 (ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \ 568 { \ 569 ja++; ja_lt_max= ja < ja_max; \ 570 } \ 571 else if ((! ja_lt_max) || \ 572 (jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)))) \ 573 { \ 574 jb++; jb_lt_max= jb < jb_max; \ 575 } \ 576 else \ 577 { \ 578 if ((m1.data (ja) OP m2.data (jb)) != 0.) \ 579 { \ 580 r.data (jx) = m1.data (ja) OP m2.data (jb); \ 581 r.ridx (jx) = m1.ridx (ja); \ 582 jx++; \ 583 } \ 584 ja++; ja_lt_max= ja < ja_max; \ 585 jb++; jb_lt_max= jb < jb_max; \ 586 } \ 587 } \ 588 r.cidx (i+1) = jx; \ 589 } \ 590 \ 591 r.maybe_compress (); \ 592 } \ 593 \ 594 return r; \ 595 } 596 597 #define SPARSE_SMSM_BIN_OP_3(R, F, OP, M1, M2) \ 598 R \ 599 F (const M1& m1, const M2& m2) \ 600 { \ 601 R r; \ 602 \ 603 octave_idx_type m1_nr = m1.rows (); \ 604 octave_idx_type m1_nc = m1.cols (); \ 605 \ 606 octave_idx_type m2_nr = m2.rows (); \ 607 octave_idx_type m2_nc = m2.cols (); \ 608 \ 609 if (m1_nr == 1 && m1_nc == 1) \ 610 { \ 611 if ((m1.elem (0,0) OP Complex ()) == Complex ()) \ 612 { \ 613 octave_idx_type m2_nnz = m2.nnz (); \ 614 r = R (m2); \ 615 for (octave_idx_type i = 0 ; i < m2_nnz ; i++) \ 616 r.data (i) = m1.elem (0,0) OP r.data (i); \ 617 r.maybe_compress (); \ 618 } \ 619 else \ 620 { \ 621 r = R (m2_nr, m2_nc, m1.elem (0,0) OP Complex ()); \ 622 for (octave_idx_type j = 0 ; j < m2_nc ; j++) \ 623 { \ 624 octave_quit (); \ 625 octave_idx_type idxj = j * m2_nr; \ 626 for (octave_idx_type i = m2.cidx (j) ; i < m2.cidx (j+1) ; i++) \ 627 { \ 628 octave_quit (); \ 629 r.data (idxj + m2.ridx (i)) = m1.elem (0,0) OP m2.data (i); \ 630 } \ 631 } \ 632 r.maybe_compress (); \ 633 } \ 634 } \ 635 else if (m2_nr == 1 && m2_nc == 1) \ 636 { \ 637 if ((Complex () OP m1.elem (0,0)) == Complex ()) \ 638 { \ 639 octave_idx_type m1_nnz = m1.nnz (); \ 640 r = R (m1); \ 641 for (octave_idx_type i = 0 ; i < m1_nnz ; i++) \ 642 r.data (i) = r.data (i) OP m2.elem (0,0); \ 643 r.maybe_compress (); \ 644 } \ 645 else \ 646 { \ 647 r = R (m1_nr, m1_nc, Complex () OP m2.elem (0,0)); \ 648 for (octave_idx_type j = 0 ; j < m1_nc ; j++) \ 649 { \ 650 octave_quit (); \ 651 octave_idx_type idxj = j * m1_nr; \ 652 for (octave_idx_type i = m1.cidx (j) ; i < m1.cidx (j+1) ; i++) \ 653 { \ 654 octave_quit (); \ 655 r.data (idxj + m1.ridx (i)) = m1.data (i) OP m2.elem (0,0); \ 656 } \ 657 } \ 658 r.maybe_compress (); \ 659 } \ 660 } \ 661 else if (m1_nr != m2_nr || m1_nc != m2_nc) \ 662 octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ 663 else \ 664 { \ 665 \ 666 /* FIXME: Kludge... Always double/Complex, so Complex () */ \ 667 r = R (m1_nr, m1_nc, (Complex () OP Complex ())); \ 668 \ 669 for (octave_idx_type i = 0 ; i < m1_nc ; i++) \ 670 { \ 671 octave_idx_type ja = m1.cidx (i); \ 672 octave_idx_type ja_max = m1.cidx (i+1); \ 673 bool ja_lt_max = ja < ja_max; \ 674 \ 675 octave_idx_type jb = m2.cidx (i); \ 676 octave_idx_type jb_max = m2.cidx (i+1); \ 677 bool jb_lt_max = jb < jb_max; \ 678 \ 679 while (ja_lt_max || jb_lt_max) \ 680 { \ 681 octave_quit (); \ 682 if ((! jb_lt_max) || \ 683 (ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \ 684 { \ 685 /* keep those kludges coming */ \ 686 r.elem (m1.ridx (ja),i) = m1.data (ja) OP Complex (); \ 687 ja++; \ 688 ja_lt_max= ja < ja_max; \ 689 } \ 690 else if ((! ja_lt_max) || \ 691 (jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)))) \ 692 { \ 693 /* keep those kludges coming */ \ 694 r.elem (m2.ridx (jb),i) = Complex () OP m2.data (jb); \ 695 jb++; \ 696 jb_lt_max= jb < jb_max; \ 697 } \ 698 else \ 699 { \ 700 r.elem (m1.ridx (ja),i) = m1.data (ja) OP m2.data (jb); \ 701 ja++; \ 702 ja_lt_max= ja < ja_max; \ 703 jb++; \ 704 jb_lt_max= jb < jb_max; \ 705 } \ 706 } \ 707 } \ 708 r.maybe_compress (true); \ 709 } \ 710 \ 711 return r; \ 712 } 713 714 // Note that SM ./ SM needs to take into account the NaN and Inf values 715 // implied by the division by zero. 716 // FIXME: Are the NaNs double(NaN) or Complex(NaN,Nan) in the complex case? 717 #define SPARSE_SMSM_BIN_OPS(R1, R2, M1, M2) \ 718 SPARSE_SMSM_BIN_OP_1 (R1, operator +, +, M1, M2) \ 719 SPARSE_SMSM_BIN_OP_1 (R1, operator -, -, M1, M2) \ 720 SPARSE_SMSM_BIN_OP_2 (R2, product, *, M1, M2) \ 721 SPARSE_SMSM_BIN_OP_3 (R2, quotient, /, M1, M2) 722 723 // FIXME: this macro duplicates the bodies of the template functions 724 // defined in the SPARSE_SSM_CMP_OP and SPARSE_SMS_CMP_OP macros. 725 726 #define SPARSE_SMSM_CMP_OP(F, OP, M1, M2) \ 727 SparseBoolMatrix \ 728 F (const M1& m1, const M2& m2) \ 729 { \ 730 SparseBoolMatrix r; \ 731 \ 732 octave_idx_type m1_nr = m1.rows (); \ 733 octave_idx_type m1_nc = m1.cols (); \ 734 \ 735 octave_idx_type m2_nr = m2.rows (); \ 736 octave_idx_type m2_nc = m2.cols (); \ 737 \ 738 M1::element_type Z1 = M1::element_type (); \ 739 M2::element_type Z2 = M2::element_type (); \ 740 \ 741 if (m1_nr == 1 && m1_nc == 1) \ 742 { \ 743 if (m1.elem (0,0) OP Z2) \ 744 { \ 745 r = SparseBoolMatrix (m2_nr, m2_nc, true); \ 746 for (octave_idx_type j = 0; j < m2_nc; j++) \ 747 for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \ 748 if (! (m1.elem (0,0) OP m2.data (i))) \ 749 r.data (m2.ridx (i) + j * m2_nr) = false; \ 750 r.maybe_compress (true); \ 751 } \ 752 else \ 753 { \ 754 r = SparseBoolMatrix (m2_nr, m2_nc, m2.nnz ()); \ 755 r.cidx (0) = static_cast<octave_idx_type> (0); \ 756 octave_idx_type nel = 0; \ 757 for (octave_idx_type j = 0; j < m2_nc; j++) \ 758 { \ 759 for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \ 760 if (m1.elem (0,0) OP m2.data (i)) \ 761 { \ 762 r.ridx (nel) = m2.ridx (i); \ 763 r.data (nel++) = true; \ 764 } \ 765 r.cidx (j + 1) = nel; \ 766 } \ 767 r.maybe_compress (false); \ 768 } \ 769 } \ 770 else if (m2_nr == 1 && m2_nc == 1) \ 771 { \ 772 if (Z1 OP m2.elem (0,0)) \ 773 { \ 774 r = SparseBoolMatrix (m1_nr, m1_nc, true); \ 775 for (octave_idx_type j = 0; j < m1_nc; j++) \ 776 for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \ 777 if (! (m1.data (i) OP m2.elem (0,0))) \ 778 r.data (m1.ridx (i) + j * m1_nr) = false; \ 779 r.maybe_compress (true); \ 780 } \ 781 else \ 782 { \ 783 r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz ()); \ 784 r.cidx (0) = static_cast<octave_idx_type> (0); \ 785 octave_idx_type nel = 0; \ 786 for (octave_idx_type j = 0; j < m1_nc; j++) \ 787 { \ 788 for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \ 789 if (m1.data (i) OP m2.elem (0,0)) \ 790 { \ 791 r.ridx (nel) = m1.ridx (i); \ 792 r.data (nel++) = true; \ 793 } \ 794 r.cidx (j + 1) = nel; \ 795 } \ 796 r.maybe_compress (false); \ 797 } \ 798 } \ 799 else if (m1_nr == m2_nr && m1_nc == m2_nc) \ 800 { \ 801 if (m1_nr != 0 || m1_nc != 0) \ 802 { \ 803 if (Z1 OP Z2) \ 804 { \ 805 r = SparseBoolMatrix (m1_nr, m1_nc, true); \ 806 for (octave_idx_type j = 0; j < m1_nc; j++) \ 807 { \ 808 octave_idx_type i1 = m1.cidx (j); \ 809 octave_idx_type e1 = m1.cidx (j+1); \ 810 octave_idx_type i2 = m2.cidx (j); \ 811 octave_idx_type e2 = m2.cidx (j+1); \ 812 while (i1 < e1 || i2 < e2) \ 813 { \ 814 if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \ 815 { \ 816 if (! (Z1 OP m2.data (i2))) \ 817 r.data (m2.ridx (i2) + j * m1_nr) = false; \ 818 i2++; \ 819 } \ 820 else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \ 821 { \ 822 if (! (m1.data (i1) OP Z2)) \ 823 r.data (m1.ridx (i1) + j * m1_nr) = false; \ 824 i1++; \ 825 } \ 826 else \ 827 { \ 828 if (! (m1.data (i1) OP m2.data (i2))) \ 829 r.data (m1.ridx (i1) + j * m1_nr) = false; \ 830 i1++; \ 831 i2++; \ 832 } \ 833 } \ 834 } \ 835 r.maybe_compress (true); \ 836 } \ 837 else \ 838 { \ 839 r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz () + m2.nnz ()); \ 840 r.cidx (0) = static_cast<octave_idx_type> (0); \ 841 octave_idx_type nel = 0; \ 842 for (octave_idx_type j = 0; j < m1_nc; j++) \ 843 { \ 844 octave_idx_type i1 = m1.cidx (j); \ 845 octave_idx_type e1 = m1.cidx (j+1); \ 846 octave_idx_type i2 = m2.cidx (j); \ 847 octave_idx_type e2 = m2.cidx (j+1); \ 848 while (i1 < e1 || i2 < e2) \ 849 { \ 850 if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \ 851 { \ 852 if (Z1 OP m2.data (i2)) \ 853 { \ 854 r.ridx (nel) = m2.ridx (i2); \ 855 r.data (nel++) = true; \ 856 } \ 857 i2++; \ 858 } \ 859 else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \ 860 { \ 861 if (m1.data (i1) OP Z2) \ 862 { \ 863 r.ridx (nel) = m1.ridx (i1); \ 864 r.data (nel++) = true; \ 865 } \ 866 i1++; \ 867 } \ 868 else \ 869 { \ 870 if (m1.data (i1) OP m2.data (i2)) \ 871 { \ 872 r.ridx (nel) = m1.ridx (i1); \ 873 r.data (nel++) = true; \ 874 } \ 875 i1++; \ 876 i2++; \ 877 } \ 878 } \ 879 r.cidx (j + 1) = nel; \ 880 } \ 881 r.maybe_compress (false); \ 882 } \ 883 } \ 884 } \ 885 else \ 886 { \ 887 if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ 888 octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ 889 } \ 890 return r; \ 891 } 892 893 #define SPARSE_SMSM_CMP_OPS(M1, M2) \ 894 SPARSE_SMSM_CMP_OP (mx_el_lt, <, M1, M2) \ 895 SPARSE_SMSM_CMP_OP (mx_el_le, <=, M1, M2) \ 896 SPARSE_SMSM_CMP_OP (mx_el_ge, >=, M1, M2) \ 897 SPARSE_SMSM_CMP_OP (mx_el_gt, >, M1, M2) \ 898 SPARSE_SMSM_CMP_OP (mx_el_eq, ==, M1, M2) \ 899 SPARSE_SMSM_CMP_OP (mx_el_ne, !=, M1, M2) 900 901 #define SPARSE_SMSM_EQNE_OPS(M1, M2) \ 902 SPARSE_SMSM_CMP_OP (mx_el_eq, ==, M1, M2) \ 903 SPARSE_SMSM_CMP_OP (mx_el_ne, !=, M1, M2) 904 905 #define SPARSE_SMSM_BOOL_AND_OP(M1, M2) \ 906 extern SparseBoolMatrix mx_el_and (const M1&, const M2::element_type&); \ 907 extern SparseBoolMatrix mx_el_and (const M1::element_type&, const M2&); \ 908 SparseBoolMatrix \ 909 mx_el_and (const M1& m1, const M2& m2) \ 910 { \ 911 SparseBoolMatrix r; \ 912 \ 913 octave_idx_type m1_nr = m1.rows (); \ 914 octave_idx_type m1_nc = m1.cols (); \ 915 \ 916 octave_idx_type m2_nr = m2.rows (); \ 917 octave_idx_type m2_nc = m2.cols (); \ 918 \ 919 M1::element_type lhs_zero = M1::element_type (); \ 920 M2::element_type rhs_zero = M2::element_type (); \ 921 \ 922 if (m1_nr == 1 && m1_nc == 1) \ 923 return mx_el_and (m1.elem (0,0), m2); \ 924 else if (m2_nr == 1 && m2_nc == 1) \ 925 return mx_el_and (m1, m2.elem (0,0)); \ 926 else if (m1_nr == m2_nr && m1_nc == m2_nc) \ 927 { \ 928 if (m1_nr != 0 || m1_nc != 0) \ 929 { \ 930 r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz () + m2.nnz ()); \ 931 r.cidx (0) = static_cast<octave_idx_type> (0); \ 932 octave_idx_type nel = 0; \ 933 for (octave_idx_type j = 0; j < m1_nc; j++) \ 934 { \ 935 octave_idx_type i1 = m1.cidx (j); \ 936 octave_idx_type e1 = m1.cidx (j+1); \ 937 octave_idx_type i2 = m2.cidx (j); \ 938 octave_idx_type e2 = m2.cidx (j+1); \ 939 while (i1 < e1 || i2 < e2) \ 940 { \ 941 if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \ 942 i2++; \ 943 else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \ 944 i1++; \ 945 else \ 946 { \ 947 if (m1.data (i1) != lhs_zero && m2.data (i2) != rhs_zero) \ 948 { \ 949 r.ridx (nel) = m1.ridx (i1); \ 950 r.data (nel++) = true; \ 951 } \ 952 i1++; \ 953 i2++; \ 954 } \ 955 } \ 956 r.cidx (j + 1) = nel; \ 957 } \ 958 r.maybe_compress (false); \ 959 } \ 960 } \ 961 else \ 962 { \ 963 if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ 964 octave::err_nonconformant ("mx_el_and_", m1_nr, m1_nc, m2_nr, m2_nc); \ 965 } \ 966 return r; \ 967 } 968 969 #define SPARSE_SMSM_BOOL_OR_OP(M1, M2) \ 970 extern SparseBoolMatrix mx_el_or (const M1&, const M2::element_type&); \ 971 extern SparseBoolMatrix mx_el_or (const M1::element_type&, const M2&); \ 972 SparseBoolMatrix \ 973 mx_el_or (const M1& m1, const M2& m2) \ 974 { \ 975 SparseBoolMatrix r; \ 976 \ 977 octave_idx_type m1_nr = m1.rows (); \ 978 octave_idx_type m1_nc = m1.cols (); \ 979 \ 980 octave_idx_type m2_nr = m2.rows (); \ 981 octave_idx_type m2_nc = m2.cols (); \ 982 \ 983 M1::element_type lhs_zero = M1::element_type (); \ 984 M2::element_type rhs_zero = M2::element_type (); \ 985 \ 986 if (m1_nr == 1 && m1_nc == 1) \ 987 return mx_el_or (m1.elem (0,0), m2); \ 988 else if (m2_nr == 1 && m2_nc == 1) \ 989 return mx_el_or (m1, m2.elem (0,0)); \ 990 else if (m1_nr == m2_nr && m1_nc == m2_nc) \ 991 { \ 992 if (m1_nr != 0 || m1_nc != 0) \ 993 { \ 994 r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz () + m2.nnz ()); \ 995 r.cidx (0) = static_cast<octave_idx_type> (0); \ 996 octave_idx_type nel = 0; \ 997 for (octave_idx_type j = 0; j < m1_nc; j++) \ 998 { \ 999 octave_idx_type i1 = m1.cidx (j); \ 1000 octave_idx_type e1 = m1.cidx (j+1); \ 1001 octave_idx_type i2 = m2.cidx (j); \ 1002 octave_idx_type e2 = m2.cidx (j+1); \ 1003 while (i1 < e1 || i2 < e2) \ 1004 { \ 1005 if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \ 1006 { \ 1007 if (m2.data (i2) != rhs_zero) \ 1008 { \ 1009 r.ridx (nel) = m2.ridx (i2); \ 1010 r.data (nel++) = true; \ 1011 } \ 1012 i2++; \ 1013 } \ 1014 else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \ 1015 { \ 1016 if (m1.data (i1) != lhs_zero) \ 1017 { \ 1018 r.ridx (nel) = m1.ridx (i1); \ 1019 r.data (nel++) = true; \ 1020 } \ 1021 i1++; \ 1022 } \ 1023 else \ 1024 { \ 1025 if (m1.data (i1) != lhs_zero || m2.data (i2) != rhs_zero) \ 1026 { \ 1027 r.ridx (nel) = m1.ridx (i1); \ 1028 r.data (nel++) = true; \ 1029 } \ 1030 i1++; \ 1031 i2++; \ 1032 } \ 1033 } \ 1034 r.cidx (j + 1) = nel; \ 1035 } \ 1036 r.maybe_compress (false); \ 1037 } \ 1038 } \ 1039 else \ 1040 { \ 1041 if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ 1042 octave::err_nonconformant ("mx_el_or", m1_nr, m1_nc, m2_nr, m2_nc); \ 1043 } \ 1044 return r; \ 1045 } 1046 1047 #define SPARSE_SMSM_BOOL_OPS(M1, M2) \ 1048 SPARSE_SMSM_BOOL_AND_OP (M1, M2) \ 1049 SPARSE_SMSM_BOOL_OR_OP (M1, M2) 1050 1051 // matrix by sparse matrix operations. 1052 1053 #define SPARSE_MSM_BIN_OP_1(R, F, OP, M1, M2) \ 1054 R \ 1055 F (const M1& m1, const M2& m2) \ 1056 { \ 1057 R r; \ 1058 \ 1059 octave_idx_type m1_nr = m1.rows (); \ 1060 octave_idx_type m1_nc = m1.cols (); \ 1061 \ 1062 octave_idx_type m2_nr = m2.rows (); \ 1063 octave_idx_type m2_nc = m2.cols (); \ 1064 \ 1065 if (m2_nr == 1 && m2_nc == 1) \ 1066 r = R (m1 OP m2.elem (0,0)); \ 1067 else if (m1_nr != m2_nr || m1_nc != m2_nc) \ 1068 octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ 1069 else \ 1070 { \ 1071 r = R (F (m1, m2.matrix_value ())); \ 1072 } \ 1073 return r; \ 1074 } 1075 1076 #define SPARSE_MSM_BIN_OP_2(R, F, OP, M1, M2) \ 1077 R \ 1078 F (const M1& m1, const M2& m2) \ 1079 { \ 1080 R r; \ 1081 \ 1082 octave_idx_type m1_nr = m1.rows (); \ 1083 octave_idx_type m1_nc = m1.cols (); \ 1084 \ 1085 octave_idx_type m2_nr = m2.rows (); \ 1086 octave_idx_type m2_nc = m2.cols (); \ 1087 \ 1088 if (m2_nr == 1 && m2_nc == 1) \ 1089 r = R (m1 OP m2.elem (0,0)); \ 1090 else if (m1_nr != m2_nr || m1_nc != m2_nc) \ 1091 octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ 1092 else \ 1093 { \ 1094 if (do_mx_check (m1, mx_inline_all_finite<M1::element_type>)) \ 1095 { \ 1096 /* Sparsity pattern is preserved. */ \ 1097 octave_idx_type m2_nz = m2.nnz (); \ 1098 r = R (m2_nr, m2_nc, m2_nz); \ 1099 for (octave_idx_type j = 0, k = 0; j < m2_nc; j++) \ 1100 { \ 1101 octave_quit (); \ 1102 for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \ 1103 { \ 1104 octave_idx_type mri = m2.ridx (i); \ 1105 R::element_type x = m1(mri, j) OP m2.data (i); \ 1106 if (x != 0.0) \ 1107 { \ 1108 r.xdata (k) = x; \ 1109 r.xridx (k) = m2.ridx (i); \ 1110 k++; \ 1111 } \ 1112 } \ 1113 r.xcidx (j+1) = k; \ 1114 } \ 1115 r.maybe_compress (false); \ 1116 return r; \ 1117 } \ 1118 else \ 1119 r = R (F (m1, m2.matrix_value ())); \ 1120 } \ 1121 \ 1122 return r; \ 1123 } 1124 1125 #define SPARSE_MSM_BIN_OPS(R1, R2, M1, M2) \ 1126 SPARSE_MSM_BIN_OP_1 (R1, operator +, +, M1, M2) \ 1127 SPARSE_MSM_BIN_OP_1 (R1, operator -, -, M1, M2) \ 1128 SPARSE_MSM_BIN_OP_2 (R2, product, *, M1, M2) \ 1129 SPARSE_MSM_BIN_OP_1 (R2, quotient, /, M1, M2) 1130 1131 #define SPARSE_MSM_CMP_OP(F, OP, M1, M2) \ 1132 SparseBoolMatrix \ 1133 F (const M1& m1, const M2& m2) \ 1134 { \ 1135 SparseBoolMatrix r; \ 1136 \ 1137 octave_idx_type m1_nr = m1.rows (); \ 1138 octave_idx_type m1_nc = m1.cols (); \ 1139 \ 1140 octave_idx_type m2_nr = m2.rows (); \ 1141 octave_idx_type m2_nc = m2.cols (); \ 1142 \ 1143 if (m2_nr == 1 && m2_nc == 1) \ 1144 r = SparseBoolMatrix (F (m1, m2.elem (0,0))); \ 1145 else if (m1_nr == m2_nr && m1_nc == m2_nc) \ 1146 { \ 1147 if (m1_nr != 0 || m1_nc != 0) \ 1148 { \ 1149 /* Count num of nonzero elements */ \ 1150 octave_idx_type nel = 0; \ 1151 for (octave_idx_type j = 0; j < m1_nc; j++) \ 1152 for (octave_idx_type i = 0; i < m1_nr; i++) \ 1153 if (m1.elem (i, j) OP m2.elem (i, j)) \ 1154 nel++; \ 1155 \ 1156 r = SparseBoolMatrix (m1_nr, m1_nc, nel); \ 1157 \ 1158 octave_idx_type ii = 0; \ 1159 r.cidx (0) = 0; \ 1160 for (octave_idx_type j = 0; j < m1_nc; j++) \ 1161 { \ 1162 for (octave_idx_type i = 0; i < m1_nr; i++) \ 1163 { \ 1164 bool el = m1.elem (i, j) OP m2.elem (i, j); \ 1165 if (el) \ 1166 { \ 1167 r.data (ii) = el; \ 1168 r.ridx (ii++) = i; \ 1169 } \ 1170 } \ 1171 r.cidx (j+1) = ii; \ 1172 } \ 1173 } \ 1174 } \ 1175 else \ 1176 { \ 1177 if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ 1178 octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ 1179 } \ 1180 return r; \ 1181 } 1182 1183 #define SPARSE_MSM_CMP_OPS(M1, M2) \ 1184 SPARSE_MSM_CMP_OP (mx_el_lt, <, M1, M2) \ 1185 SPARSE_MSM_CMP_OP (mx_el_le, <=, M1, M2) \ 1186 SPARSE_MSM_CMP_OP (mx_el_ge, >=, M1, M2) \ 1187 SPARSE_MSM_CMP_OP (mx_el_gt, >, M1, M2) \ 1188 SPARSE_MSM_CMP_OP (mx_el_eq, ==, M1, M2) \ 1189 SPARSE_MSM_CMP_OP (mx_el_ne, !=, M1, M2) 1190 1191 #define SPARSE_MSM_EQNE_OPS(M1, M2) \ 1192 SPARSE_MSM_CMP_OP (mx_el_eq, ==, M1, M2) \ 1193 SPARSE_MSM_CMP_OP (mx_el_ne, !=, M1, M2) 1194 1195 #define SPARSE_MSM_BOOL_OP(F, OP, M1, M2) \ 1196 SparseBoolMatrix \ 1197 F (const M1& m1, const M2& m2) \ 1198 { \ 1199 SparseBoolMatrix r; \ 1200 \ 1201 octave_idx_type m1_nr = m1.rows (); \ 1202 octave_idx_type m1_nc = m1.cols (); \ 1203 \ 1204 octave_idx_type m2_nr = m2.rows (); \ 1205 octave_idx_type m2_nc = m2.cols (); \ 1206 \ 1207 M1::element_type lhs_zero = M1::element_type (); \ 1208 M2::element_type rhs_zero = M2::element_type (); \ 1209 \ 1210 if (m2_nr == 1 && m2_nc == 1) \ 1211 r = SparseBoolMatrix (F (m1, m2.elem (0,0))); \ 1212 else if (m1_nr == m2_nr && m1_nc == m2_nc) \ 1213 { \ 1214 if (m1_nr != 0 || m1_nc != 0) \ 1215 { \ 1216 /* Count num of nonzero elements */ \ 1217 octave_idx_type nel = 0; \ 1218 for (octave_idx_type j = 0; j < m1_nc; j++) \ 1219 for (octave_idx_type i = 0; i < m1_nr; i++) \ 1220 if ((m1.elem (i, j) != lhs_zero) \ 1221 OP (m2.elem (i, j) != rhs_zero)) \ 1222 nel++; \ 1223 \ 1224 r = SparseBoolMatrix (m1_nr, m1_nc, nel); \ 1225 \ 1226 octave_idx_type ii = 0; \ 1227 r.cidx (0) = 0; \ 1228 for (octave_idx_type j = 0; j < m1_nc; j++) \ 1229 { \ 1230 for (octave_idx_type i = 0; i < m1_nr; i++) \ 1231 { \ 1232 bool el = (m1.elem (i, j) != lhs_zero) \ 1233 OP (m2.elem (i, j) != rhs_zero); \ 1234 if (el) \ 1235 { \ 1236 r.data (ii) = el; \ 1237 r.ridx (ii++) = i; \ 1238 } \ 1239 } \ 1240 r.cidx (j+1) = ii; \ 1241 } \ 1242 } \ 1243 } \ 1244 else \ 1245 { \ 1246 if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ 1247 octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ 1248 } \ 1249 return r; \ 1250 } 1251 1252 #define SPARSE_MSM_BOOL_OPS(M1, M2) \ 1253 SPARSE_MSM_BOOL_OP (mx_el_and, &&, M1, M2) \ 1254 SPARSE_MSM_BOOL_OP (mx_el_or, ||, M1, M2) 1255 1256 // sparse matrix by matrix operations. 1257 1258 #define SPARSE_SMM_BIN_OP_1(R, F, OP, M1, M2) \ 1259 R \ 1260 F (const M1& m1, const M2& m2) \ 1261 { \ 1262 R r; \ 1263 \ 1264 octave_idx_type m1_nr = m1.rows (); \ 1265 octave_idx_type m1_nc = m1.cols (); \ 1266 \ 1267 octave_idx_type m2_nr = m2.rows (); \ 1268 octave_idx_type m2_nc = m2.cols (); \ 1269 \ 1270 if (m1_nr == 1 && m1_nc == 1) \ 1271 r = R (m1.elem (0,0) OP m2); \ 1272 else if (m1_nr != m2_nr || m1_nc != m2_nc) \ 1273 octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ 1274 else \ 1275 { \ 1276 r = R (m1.matrix_value () OP m2); \ 1277 } \ 1278 return r; \ 1279 } 1280 1281 // sm .* m preserves sparsity if m contains no Infs nor Nans. 1282 #define SPARSE_SMM_BIN_OP_2_CHECK_product(ET) \ 1283 do_mx_check (m2, mx_inline_all_finite<ET>) 1284 1285 // sm ./ m preserves sparsity if m contains no NaNs or zeros. 1286 #define SPARSE_SMM_BIN_OP_2_CHECK_quotient(ET) \ 1287 ! do_mx_check (m2, mx_inline_any_nan<ET>) && m2.nnz () == m2.numel () 1288 1289 #define SPARSE_SMM_BIN_OP_2(R, F, OP, M1, M2) \ 1290 R \ 1291 F (const M1& m1, const M2& m2) \ 1292 { \ 1293 R r; \ 1294 \ 1295 octave_idx_type m1_nr = m1.rows (); \ 1296 octave_idx_type m1_nc = m1.cols (); \ 1297 \ 1298 octave_idx_type m2_nr = m2.rows (); \ 1299 octave_idx_type m2_nc = m2.cols (); \ 1300 \ 1301 if (m1_nr == 1 && m1_nc == 1) \ 1302 r = R (m1.elem (0,0) OP m2); \ 1303 else if (m1_nr != m2_nr || m1_nc != m2_nc) \ 1304 octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ 1305 else \ 1306 { \ 1307 if (SPARSE_SMM_BIN_OP_2_CHECK_ ## F(M2::element_type)) \ 1308 { \ 1309 /* Sparsity pattern is preserved. */ \ 1310 octave_idx_type m1_nz = m1.nnz (); \ 1311 r = R (m1_nr, m1_nc, m1_nz); \ 1312 for (octave_idx_type j = 0, k = 0; j < m1_nc; j++) \ 1313 { \ 1314 octave_quit (); \ 1315 for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \ 1316 { \ 1317 octave_idx_type mri = m1.ridx (i); \ 1318 R::element_type x = m1.data (i) OP m2 (mri, j); \ 1319 if (x != 0.0) \ 1320 { \ 1321 r.xdata (k) = x; \ 1322 r.xridx (k) = m1.ridx (i); \ 1323 k++; \ 1324 } \ 1325 } \ 1326 r.xcidx (j+1) = k; \ 1327 } \ 1328 r.maybe_compress (false); \ 1329 return r; \ 1330 } \ 1331 else \ 1332 r = R (F (m1.matrix_value (), m2)); \ 1333 } \ 1334 \ 1335 return r; \ 1336 } 1337 1338 #define SPARSE_SMM_BIN_OPS(R1, R2, M1, M2) \ 1339 SPARSE_SMM_BIN_OP_1 (R1, operator +, +, M1, M2) \ 1340 SPARSE_SMM_BIN_OP_1 (R1, operator -, -, M1, M2) \ 1341 SPARSE_SMM_BIN_OP_2 (R2, product, *, M1, M2) \ 1342 SPARSE_SMM_BIN_OP_2 (R2, quotient, /, M1, M2) 1343 1344 #define SPARSE_SMM_CMP_OP(F, OP, M1, M2) \ 1345 SparseBoolMatrix \ 1346 F (const M1& m1, const M2& m2) \ 1347 { \ 1348 SparseBoolMatrix r; \ 1349 \ 1350 octave_idx_type m1_nr = m1.rows (); \ 1351 octave_idx_type m1_nc = m1.cols (); \ 1352 \ 1353 octave_idx_type m2_nr = m2.rows (); \ 1354 octave_idx_type m2_nc = m2.cols (); \ 1355 \ 1356 if (m1_nr == 1 && m1_nc == 1) \ 1357 r = SparseBoolMatrix (F (m1.elem (0,0), m2)); \ 1358 else if (m1_nr == m2_nr && m1_nc == m2_nc) \ 1359 { \ 1360 if (m1_nr != 0 || m1_nc != 0) \ 1361 { \ 1362 /* Count num of nonzero elements */ \ 1363 octave_idx_type nel = 0; \ 1364 for (octave_idx_type j = 0; j < m1_nc; j++) \ 1365 for (octave_idx_type i = 0; i < m1_nr; i++) \ 1366 if (m1.elem (i, j) OP m2.elem (i, j)) \ 1367 nel++; \ 1368 \ 1369 r = SparseBoolMatrix (m1_nr, m1_nc, nel); \ 1370 \ 1371 octave_idx_type ii = 0; \ 1372 r.cidx (0) = 0; \ 1373 for (octave_idx_type j = 0; j < m1_nc; j++) \ 1374 { \ 1375 for (octave_idx_type i = 0; i < m1_nr; i++) \ 1376 { \ 1377 bool el = m1.elem (i, j) OP m2.elem (i, j); \ 1378 if (el) \ 1379 { \ 1380 r.data (ii) = el; \ 1381 r.ridx (ii++) = i; \ 1382 } \ 1383 } \ 1384 r.cidx (j+1) = ii; \ 1385 } \ 1386 } \ 1387 } \ 1388 else \ 1389 { \ 1390 if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ 1391 octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ 1392 } \ 1393 return r; \ 1394 } 1395 1396 #define SPARSE_SMM_CMP_OPS(M1, M2) \ 1397 SPARSE_SMM_CMP_OP (mx_el_lt, <, M1, M2) \ 1398 SPARSE_SMM_CMP_OP (mx_el_le, <=, M1, M2) \ 1399 SPARSE_SMM_CMP_OP (mx_el_ge, >=, M1, M2) \ 1400 SPARSE_SMM_CMP_OP (mx_el_gt, >, M1, M2) \ 1401 SPARSE_SMM_CMP_OP (mx_el_eq, ==, M1, M2) \ 1402 SPARSE_SMM_CMP_OP (mx_el_ne, !=, M1, M2) 1403 1404 #define SPARSE_SMM_EQNE_OPS(M1, M2) \ 1405 SPARSE_SMM_CMP_OP (mx_el_eq, ==, M1, M2) \ 1406 SPARSE_SMM_CMP_OP (mx_el_ne, !=, M1, M2) 1407 1408 #define SPARSE_SMM_BOOL_OP(F, OP, M1, M2) \ 1409 SparseBoolMatrix \ 1410 F (const M1& m1, const M2& m2) \ 1411 { \ 1412 SparseBoolMatrix r; \ 1413 \ 1414 octave_idx_type m1_nr = m1.rows (); \ 1415 octave_idx_type m1_nc = m1.cols (); \ 1416 \ 1417 octave_idx_type m2_nr = m2.rows (); \ 1418 octave_idx_type m2_nc = m2.cols (); \ 1419 \ 1420 M1::element_type lhs_zero = M1::element_type (); \ 1421 M2::element_type rhs_zero = M2::element_type (); \ 1422 \ 1423 if (m1_nr == 1 && m1_nc == 1) \ 1424 r = SparseBoolMatrix (F (m1.elem (0,0), m2)); \ 1425 else if (m1_nr == m2_nr && m1_nc == m2_nc) \ 1426 { \ 1427 if (m1_nr != 0 || m1_nc != 0) \ 1428 { \ 1429 /* Count num of nonzero elements */ \ 1430 octave_idx_type nel = 0; \ 1431 for (octave_idx_type j = 0; j < m1_nc; j++) \ 1432 for (octave_idx_type i = 0; i < m1_nr; i++) \ 1433 if ((m1.elem (i, j) != lhs_zero) \ 1434 OP (m2.elem (i, j) != rhs_zero)) \ 1435 nel++; \ 1436 \ 1437 r = SparseBoolMatrix (m1_nr, m1_nc, nel); \ 1438 \ 1439 octave_idx_type ii = 0; \ 1440 r.cidx (0) = 0; \ 1441 for (octave_idx_type j = 0; j < m1_nc; j++) \ 1442 { \ 1443 for (octave_idx_type i = 0; i < m1_nr; i++) \ 1444 { \ 1445 bool el = (m1.elem (i, j) != lhs_zero) \ 1446 OP (m2.elem (i, j) != rhs_zero); \ 1447 if (el) \ 1448 { \ 1449 r.data (ii) = el; \ 1450 r.ridx (ii++) = i; \ 1451 } \ 1452 } \ 1453 r.cidx (j+1) = ii; \ 1454 } \ 1455 } \ 1456 } \ 1457 else \ 1458 { \ 1459 if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \ 1460 octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \ 1461 } \ 1462 return r; \ 1463 } 1464 1465 #define SPARSE_SMM_BOOL_OPS(M1, M2) \ 1466 SPARSE_SMM_BOOL_OP (mx_el_and, &&, M1, M2) \ 1467 SPARSE_SMM_BOOL_OP (mx_el_or, ||, M1, M2) 1468 1469 // Avoid some code duplication. Maybe we should use templates. 1470 1471 #define SPARSE_CUMSUM(RET_TYPE, ELT_TYPE, FCN) \ 1472 \ 1473 octave_idx_type nr = rows (); \ 1474 octave_idx_type nc = cols (); \ 1475 \ 1476 RET_TYPE retval; \ 1477 \ 1478 if (nr > 0 && nc > 0) \ 1479 { \ 1480 if ((nr == 1 && dim == -1) || dim == 1) \ 1481 /* Ugly!! Is there a better way? */ \ 1482 retval = transpose (). FCN (0) .transpose (); \ 1483 else \ 1484 { \ 1485 octave_idx_type nel = 0; \ 1486 for (octave_idx_type i = 0; i < nc; i++) \ 1487 { \ 1488 ELT_TYPE t = ELT_TYPE (); \ 1489 for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \ 1490 { \ 1491 t += data (j); \ 1492 if (t != ELT_TYPE ()) \ 1493 { \ 1494 if (j == cidx (i+1) - 1) \ 1495 nel += nr - ridx (j); \ 1496 else \ 1497 nel += ridx (j+1) - ridx (j); \ 1498 } \ 1499 } \ 1500 } \ 1501 retval = RET_TYPE (nr, nc, nel); \ 1502 retval.cidx (0) = 0; \ 1503 octave_idx_type ii = 0; \ 1504 for (octave_idx_type i = 0; i < nc; i++) \ 1505 { \ 1506 ELT_TYPE t = ELT_TYPE (); \ 1507 for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \ 1508 { \ 1509 t += data (j); \ 1510 if (t != ELT_TYPE ()) \ 1511 { \ 1512 if (j == cidx (i+1) - 1) \ 1513 { \ 1514 for (octave_idx_type k = ridx (j); k < nr; k++) \ 1515 { \ 1516 retval.data (ii) = t; \ 1517 retval.ridx (ii++) = k; \ 1518 } \ 1519 } \ 1520 else \ 1521 { \ 1522 for (octave_idx_type k = ridx (j); k < ridx (j+1); k++) \ 1523 { \ 1524 retval.data (ii) = t; \ 1525 retval.ridx (ii++) = k; \ 1526 } \ 1527 } \ 1528 } \ 1529 } \ 1530 retval.cidx (i+1) = ii; \ 1531 } \ 1532 } \ 1533 } \ 1534 else \ 1535 retval = RET_TYPE (nr,nc); \ 1536 \ 1537 return retval 1538 1539 #define SPARSE_CUMPROD(RET_TYPE, ELT_TYPE, FCN) \ 1540 \ 1541 octave_idx_type nr = rows (); \ 1542 octave_idx_type nc = cols (); \ 1543 \ 1544 RET_TYPE retval; \ 1545 \ 1546 if (nr > 0 && nc > 0) \ 1547 { \ 1548 if ((nr == 1 && dim == -1) || dim == 1) \ 1549 /* Ugly!! Is there a better way? */ \ 1550 retval = transpose (). FCN (0) .transpose (); \ 1551 else \ 1552 { \ 1553 octave_idx_type nel = 0; \ 1554 for (octave_idx_type i = 0; i < nc; i++) \ 1555 { \ 1556 octave_idx_type jj = 0; \ 1557 for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \ 1558 { \ 1559 if (jj == ridx (j)) \ 1560 { \ 1561 nel++; \ 1562 jj++; \ 1563 } \ 1564 else \ 1565 break; \ 1566 } \ 1567 } \ 1568 retval = RET_TYPE (nr, nc, nel); \ 1569 retval.cidx (0) = 0; \ 1570 octave_idx_type ii = 0; \ 1571 for (octave_idx_type i = 0; i < nc; i++) \ 1572 { \ 1573 ELT_TYPE t = ELT_TYPE (1.); \ 1574 octave_idx_type jj = 0; \ 1575 for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \ 1576 { \ 1577 if (jj == ridx (j)) \ 1578 { \ 1579 t *= data (j); \ 1580 retval.data (ii) = t; \ 1581 retval.ridx (ii++) = jj++; \ 1582 } \ 1583 else \ 1584 break; \ 1585 } \ 1586 retval.cidx (i+1) = ii; \ 1587 } \ 1588 } \ 1589 } \ 1590 else \ 1591 retval = RET_TYPE (nr,nc); \ 1592 \ 1593 return retval 1594 1595 #define SPARSE_BASE_REDUCTION_OP(RET_TYPE, EL_TYPE, ROW_EXPR, COL_EXPR, \ 1596 INIT_VAL, MT_RESULT) \ 1597 \ 1598 octave_idx_type nr = rows (); \ 1599 octave_idx_type nc = cols (); \ 1600 \ 1601 RET_TYPE retval; \ 1602 \ 1603 if (nr > 0 && nc > 0) \ 1604 { \ 1605 if ((nr == 1 && dim == -1) || dim == 1) \ 1606 { \ 1607 /* Define j here to allow fancy definition for prod method */ \ 1608 octave_idx_type j = 0; \ 1609 OCTAVE_LOCAL_BUFFER (EL_TYPE, tmp, nr); \ 1610 \ 1611 for (octave_idx_type i = 0; i < nr; i++) \ 1612 tmp[i] = INIT_VAL; \ 1613 for (j = 0; j < nc; j++) \ 1614 { \ 1615 for (octave_idx_type i = cidx (j); i < cidx (j + 1); i++) \ 1616 { \ 1617 ROW_EXPR; \ 1618 } \ 1619 } \ 1620 octave_idx_type nel = 0; \ 1621 for (octave_idx_type i = 0; i < nr; i++) \ 1622 if (tmp[i] != EL_TYPE ()) \ 1623 nel++; \ 1624 retval = RET_TYPE (nr, static_cast<octave_idx_type> (1), nel); \ 1625 retval.cidx (0) = 0; \ 1626 retval.cidx (1) = nel; \ 1627 nel = 0; \ 1628 for (octave_idx_type i = 0; i < nr; i++) \ 1629 if (tmp[i] != EL_TYPE ()) \ 1630 { \ 1631 retval.data (nel) = tmp[i]; \ 1632 retval.ridx (nel++) = i; \ 1633 } \ 1634 } \ 1635 else \ 1636 { \ 1637 OCTAVE_LOCAL_BUFFER (EL_TYPE, tmp, nc); \ 1638 \ 1639 for (octave_idx_type j = 0; j < nc; j++) \ 1640 { \ 1641 tmp[j] = INIT_VAL; \ 1642 for (octave_idx_type i = cidx (j); i < cidx (j + 1); i++) \ 1643 { \ 1644 COL_EXPR; \ 1645 } \ 1646 } \ 1647 octave_idx_type nel = 0; \ 1648 for (octave_idx_type i = 0; i < nc; i++) \ 1649 if (tmp[i] != EL_TYPE ()) \ 1650 nel++; \ 1651 retval = RET_TYPE (static_cast<octave_idx_type> (1), nc, nel); \ 1652 retval.cidx (0) = 0; \ 1653 nel = 0; \ 1654 for (octave_idx_type i = 0; i < nc; i++) \ 1655 if (tmp[i] != EL_TYPE ()) \ 1656 { \ 1657 retval.data (nel) = tmp[i]; \ 1658 retval.ridx (nel++) = 0; \ 1659 retval.cidx (i+1) = retval.cidx (i) + 1; \ 1660 } \ 1661 else \ 1662 retval.cidx (i+1) = retval.cidx (i); \ 1663 } \ 1664 } \ 1665 else if (nc == 0 && (nr == 0 || (nr == 1 && dim == -1))) \ 1666 { \ 1667 if (MT_RESULT) \ 1668 { \ 1669 retval = RET_TYPE (static_cast<octave_idx_type> (1), \ 1670 static_cast<octave_idx_type> (1), \ 1671 static_cast<octave_idx_type> (1)); \ 1672 retval.cidx (0) = 0; \ 1673 retval.cidx (1) = 1; \ 1674 retval.ridx (0) = 0; \ 1675 retval.data (0) = MT_RESULT; \ 1676 } \ 1677 else \ 1678 retval = RET_TYPE (static_cast<octave_idx_type> (1), \ 1679 static_cast<octave_idx_type> (1), \ 1680 static_cast<octave_idx_type> (0)); \ 1681 } \ 1682 else if (nr == 0 && (dim == 0 || dim == -1)) \ 1683 { \ 1684 if (MT_RESULT) \ 1685 { \ 1686 retval = RET_TYPE (static_cast<octave_idx_type> (1), nc, nc); \ 1687 retval.cidx (0) = 0; \ 1688 for (octave_idx_type i = 0; i < nc ; i++) \ 1689 { \ 1690 retval.ridx (i) = 0; \ 1691 retval.cidx (i+1) = i+1; \ 1692 retval.data (i) = MT_RESULT; \ 1693 } \ 1694 } \ 1695 else \ 1696 retval = RET_TYPE (static_cast<octave_idx_type> (1), nc, \ 1697 static_cast<octave_idx_type> (0)); \ 1698 } \ 1699 else if (nc == 0 && dim == 1) \ 1700 { \ 1701 if (MT_RESULT) \ 1702 { \ 1703 retval = RET_TYPE (nr, static_cast<octave_idx_type> (1), nr); \ 1704 retval.cidx (0) = 0; \ 1705 retval.cidx (1) = nr; \ 1706 for (octave_idx_type i = 0; i < nr; i++) \ 1707 { \ 1708 retval.ridx (i) = i; \ 1709 retval.data (i) = MT_RESULT; \ 1710 } \ 1711 } \ 1712 else \ 1713 retval = RET_TYPE (nr, static_cast<octave_idx_type> (1), \ 1714 static_cast<octave_idx_type> (0)); \ 1715 } \ 1716 else \ 1717 retval.resize (nr > 0, nc > 0); \ 1718 \ 1719 return retval 1720 1721 #define SPARSE_REDUCTION_OP_ROW_EXPR(OP) \ 1722 tmp[ridx (i)] OP data (i) 1723 1724 #define SPARSE_REDUCTION_OP_COL_EXPR(OP) \ 1725 tmp[j] OP data (i) 1726 1727 #define SPARSE_REDUCTION_OP(RET_TYPE, EL_TYPE, OP, INIT_VAL, MT_RESULT) \ 1728 SPARSE_BASE_REDUCTION_OP (RET_TYPE, EL_TYPE, \ 1729 SPARSE_REDUCTION_OP_ROW_EXPR (OP), \ 1730 SPARSE_REDUCTION_OP_COL_EXPR (OP), \ 1731 INIT_VAL, MT_RESULT) 1732 1733 // Don't break from this loop if the test succeeds because 1734 // we are looping over the rows and not the columns in the inner loop. 1735 #define SPARSE_ANY_ALL_OP_ROW_CODE(TEST_OP, TEST_TRUE_VAL) \ 1736 if (data (i) TEST_OP 0.0) \ 1737 tmp[ridx (i)] = TEST_TRUE_VAL; 1738 1739 #define SPARSE_ANY_ALL_OP_COL_CODE(TEST_OP, TEST_TRUE_VAL) \ 1740 if (data (i) TEST_OP 0.0) \ 1741 { \ 1742 tmp[j] = TEST_TRUE_VAL; \ 1743 break; \ 1744 } 1745 1746 #define SPARSE_ANY_ALL_OP(DIM, INIT_VAL, MT_RESULT, TEST_OP, TEST_TRUE_VAL) \ 1747 SPARSE_BASE_REDUCTION_OP (SparseBoolMatrix, char, \ 1748 SPARSE_ANY_ALL_OP_ROW_CODE (TEST_OP, TEST_TRUE_VAL), \ 1749 SPARSE_ANY_ALL_OP_COL_CODE (TEST_OP, TEST_TRUE_VAL), \ 1750 INIT_VAL, MT_RESULT) 1751 1752 #define SPARSE_ALL_OP(DIM) \ 1753 if ((rows () == 1 && dim == -1) || dim == 1) \ 1754 return transpose (). all (0). transpose (); \ 1755 else \ 1756 { \ 1757 SPARSE_ANY_ALL_OP (DIM, (cidx (j+1) - cidx (j) < nr ? false : true), \ 1758 true, ==, false); \ 1759 } 1760 1761 #define SPARSE_ANY_OP(DIM) SPARSE_ANY_ALL_OP (DIM, false, false, !=, true) 1762 1763 #define SPARSE_SPARSE_MUL(RET_TYPE, RET_EL_TYPE, EL_TYPE) \ 1764 octave_idx_type nr = m.rows (); \ 1765 octave_idx_type nc = m.cols (); \ 1766 \ 1767 octave_idx_type a_nr = a.rows (); \ 1768 octave_idx_type a_nc = a.cols (); \ 1769 \ 1770 if (nr == 1 && nc == 1) \ 1771 { \ 1772 RET_EL_TYPE s = m.elem (0,0); \ 1773 octave_idx_type nz = a.nnz (); \ 1774 RET_TYPE r (a_nr, a_nc, nz); \ 1775 \ 1776 for (octave_idx_type i = 0; i < nz; i++) \ 1777 { \ 1778 octave_quit (); \ 1779 r.data (i) = s * a.data (i); \ 1780 r.ridx (i) = a.ridx (i); \ 1781 } \ 1782 for (octave_idx_type i = 0; i < a_nc + 1; i++) \ 1783 { \ 1784 octave_quit (); \ 1785 r.cidx (i) = a.cidx (i); \ 1786 } \ 1787 \ 1788 r.maybe_compress (true); \ 1789 return r; \ 1790 } \ 1791 else if (a_nr == 1 && a_nc == 1) \ 1792 { \ 1793 RET_EL_TYPE s = a.elem (0,0); \ 1794 octave_idx_type nz = m.nnz (); \ 1795 RET_TYPE r (nr, nc, nz); \ 1796 \ 1797 for (octave_idx_type i = 0; i < nz; i++) \ 1798 { \ 1799 octave_quit (); \ 1800 r.data (i) = m.data (i) * s; \ 1801 r.ridx (i) = m.ridx (i); \ 1802 } \ 1803 for (octave_idx_type i = 0; i < nc + 1; i++) \ 1804 { \ 1805 octave_quit (); \ 1806 r.cidx (i) = m.cidx (i); \ 1807 } \ 1808 \ 1809 r.maybe_compress (true); \ 1810 return r; \ 1811 } \ 1812 else if (nc != a_nr) \ 1813 octave::err_nonconformant ("operator *", nr, nc, a_nr, a_nc); \ 1814 else \ 1815 { \ 1816 OCTAVE_LOCAL_BUFFER (octave_idx_type, w, nr); \ 1817 RET_TYPE retval (nr, a_nc, static_cast<octave_idx_type> (0)); \ 1818 for (octave_idx_type i = 0; i < nr; i++) \ 1819 w[i] = 0; \ 1820 retval.xcidx (0) = 0; \ 1821 \ 1822 octave_idx_type nel = 0; \ 1823 \ 1824 for (octave_idx_type i = 0; i < a_nc; i++) \ 1825 { \ 1826 for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \ 1827 { \ 1828 octave_idx_type col = a.ridx (j); \ 1829 for (octave_idx_type k = m.cidx (col) ; k < m.cidx (col+1); k++) \ 1830 { \ 1831 if (w[m.ridx (k)] < i + 1) \ 1832 { \ 1833 w[m.ridx (k)] = i + 1; \ 1834 nel++; \ 1835 } \ 1836 octave_quit (); \ 1837 } \ 1838 } \ 1839 retval.xcidx (i+1) = nel; \ 1840 } \ 1841 \ 1842 if (nel == 0) \ 1843 return RET_TYPE (nr, a_nc); \ 1844 else \ 1845 { \ 1846 for (octave_idx_type i = 0; i < nr; i++) \ 1847 w[i] = 0; \ 1848 \ 1849 OCTAVE_LOCAL_BUFFER (RET_EL_TYPE, Xcol, nr); \ 1850 \ 1851 retval.change_capacity (nel); \ 1852 /* The optimal break-point as estimated from simulations */ \ 1853 /* Note that Mergesort is O(nz log(nz)) while searching all */ \ 1854 /* values is O(nr), where nz here is nonzero per row of */ \ 1855 /* length nr. The test itself was then derived from the */ \ 1856 /* simulation with random square matrices and the observation */ \ 1857 /* of the number of nonzero elements in the output matrix */ \ 1858 /* it was found that the breakpoints were */ \ 1859 /* nr: 500 1000 2000 5000 10000 */ \ 1860 /* nz: 6 25 97 585 2202 */ \ 1861 /* The below is a simplication of the 'polyfit'-ed parameters */ \ 1862 /* to these breakpoints */ \ 1863 octave_idx_type n_per_col = (a_nc > 43000 ? 43000 : \ 1864 (a_nc * a_nc) / 43000); \ 1865 octave_idx_type ii = 0; \ 1866 octave_idx_type *ri = retval.xridx (); \ 1867 octave_sort<octave_idx_type> sort; \ 1868 \ 1869 for (octave_idx_type i = 0; i < a_nc ; i++) \ 1870 { \ 1871 if (retval.xcidx (i+1) - retval.xcidx (i) > n_per_col) \ 1872 { \ 1873 for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \ 1874 { \ 1875 octave_idx_type col = a.ridx (j); \ 1876 EL_TYPE tmpval = a.data (j); \ 1877 for (octave_idx_type k = m.cidx (col) ; \ 1878 k < m.cidx (col+1); k++) \ 1879 { \ 1880 octave_quit (); \ 1881 octave_idx_type row = m.ridx (k); \ 1882 if (w[row] < i + 1) \ 1883 { \ 1884 w[row] = i + 1; \ 1885 Xcol[row] = tmpval * m.data (k); \ 1886 } \ 1887 else \ 1888 Xcol[row] += tmpval * m.data (k); \ 1889 } \ 1890 } \ 1891 for (octave_idx_type k = 0; k < nr; k++) \ 1892 if (w[k] == i + 1) \ 1893 { \ 1894 retval.xdata (ii) = Xcol[k]; \ 1895 retval.xridx (ii++) = k; \ 1896 } \ 1897 } \ 1898 else \ 1899 { \ 1900 for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \ 1901 { \ 1902 octave_idx_type col = a.ridx (j); \ 1903 EL_TYPE tmpval = a.data (j); \ 1904 for (octave_idx_type k = m.cidx (col) ; \ 1905 k < m.cidx (col+1); k++) \ 1906 { \ 1907 octave_quit (); \ 1908 octave_idx_type row = m.ridx (k); \ 1909 if (w[row] < i + 1) \ 1910 { \ 1911 w[row] = i + 1; \ 1912 retval.xridx (ii++) = row; \ 1913 Xcol[row] = tmpval * m.data (k); \ 1914 } \ 1915 else \ 1916 Xcol[row] += tmpval * m.data (k); \ 1917 } \ 1918 } \ 1919 sort.sort (ri + retval.xcidx (i), ii - retval.xcidx (i)); \ 1920 for (octave_idx_type k = retval.xcidx (i); k < ii; k++) \ 1921 retval.xdata (k) = Xcol[retval.xridx (k)]; \ 1922 } \ 1923 } \ 1924 retval.maybe_compress (true); \ 1925 return retval; \ 1926 } \ 1927 } 1928 1929 #define SPARSE_FULL_MUL(RET_TYPE, EL_TYPE) \ 1930 octave_idx_type nr = m.rows (); \ 1931 octave_idx_type nc = m.cols (); \ 1932 \ 1933 octave_idx_type a_nr = a.rows (); \ 1934 octave_idx_type a_nc = a.cols (); \ 1935 \ 1936 if (nr == 1 && nc == 1) \ 1937 { \ 1938 RET_TYPE retval = m.elem (0,0) * a; \ 1939 return retval; \ 1940 } \ 1941 else if (nc != a_nr) \ 1942 octave::err_nonconformant ("operator *", nr, nc, a_nr, a_nc); \ 1943 else \ 1944 { \ 1945 RET_TYPE::element_type zero = RET_TYPE::element_type (); \ 1946 \ 1947 RET_TYPE retval (nr, a_nc, zero); \ 1948 \ 1949 for (octave_idx_type i = 0; i < a_nc ; i++) \ 1950 { \ 1951 for (octave_idx_type j = 0; j < a_nr; j++) \ 1952 { \ 1953 octave_quit (); \ 1954 \ 1955 EL_TYPE tmpval = a.elem (j,i); \ 1956 for (octave_idx_type k = m.cidx (j) ; k < m.cidx (j+1); k++) \ 1957 retval.elem (m.ridx (k),i) += tmpval * m.data (k); \ 1958 } \ 1959 } \ 1960 return retval; \ 1961 } 1962 1963 #define SPARSE_FULL_TRANS_MUL(RET_TYPE, EL_TYPE, CONJ_OP) \ 1964 octave_idx_type nr = m.rows (); \ 1965 octave_idx_type nc = m.cols (); \ 1966 \ 1967 octave_idx_type a_nr = a.rows (); \ 1968 octave_idx_type a_nc = a.cols (); \ 1969 \ 1970 if (nr == 1 && nc == 1) \ 1971 { \ 1972 RET_TYPE retval = CONJ_OP (m.elem (0,0)) * a; \ 1973 return retval; \ 1974 } \ 1975 else if (nr != a_nr) \ 1976 octave::err_nonconformant ("operator *", nc, nr, a_nr, a_nc); \ 1977 else \ 1978 { \ 1979 RET_TYPE retval (nc, a_nc); \ 1980 \ 1981 for (octave_idx_type i = 0; i < a_nc ; i++) \ 1982 { \ 1983 for (octave_idx_type j = 0; j < nc; j++) \ 1984 { \ 1985 octave_quit (); \ 1986 \ 1987 EL_TYPE acc = EL_TYPE (); \ 1988 for (octave_idx_type k = m.cidx (j) ; k < m.cidx (j+1); k++) \ 1989 acc += a.elem (m.ridx (k),i) * CONJ_OP (m.data (k)); \ 1990 retval.xelem (j,i) = acc; \ 1991 } \ 1992 } \ 1993 return retval; \ 1994 } 1995 1996 #define FULL_SPARSE_MUL(RET_TYPE, EL_TYPE) \ 1997 octave_idx_type nr = m.rows (); \ 1998 octave_idx_type nc = m.cols (); \ 1999 \ 2000 octave_idx_type a_nr = a.rows (); \ 2001 octave_idx_type a_nc = a.cols (); \ 2002 \ 2003 if (a_nr == 1 && a_nc == 1) \ 2004 { \ 2005 RET_TYPE retval = m * a.elem (0,0); \ 2006 return retval; \ 2007 } \ 2008 else if (nc != a_nr) \ 2009 octave::err_nonconformant ("operator *", nr, nc, a_nr, a_nc); \ 2010 else \ 2011 { \ 2012 RET_TYPE::element_type zero = RET_TYPE::element_type (); \ 2013 \ 2014 RET_TYPE retval (nr, a_nc, zero); \ 2015 \ 2016 for (octave_idx_type i = 0; i < a_nc ; i++) \ 2017 { \ 2018 octave_quit (); \ 2019 for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \ 2020 { \ 2021 octave_idx_type col = a.ridx (j); \ 2022 EL_TYPE tmpval = a.data (j); \ 2023 \ 2024 for (octave_idx_type k = 0 ; k < nr; k++) \ 2025 retval.xelem (k,i) += tmpval * m.elem (k,col); \ 2026 } \ 2027 } \ 2028 return retval; \ 2029 } 2030 2031 #define FULL_SPARSE_MUL_TRANS(RET_TYPE, EL_TYPE, CONJ_OP) \ 2032 octave_idx_type nr = m.rows (); \ 2033 octave_idx_type nc = m.cols (); \ 2034 \ 2035 octave_idx_type a_nr = a.rows (); \ 2036 octave_idx_type a_nc = a.cols (); \ 2037 \ 2038 if (a_nr == 1 && a_nc == 1) \ 2039 { \ 2040 RET_TYPE retval = m * CONJ_OP (a.elem (0,0)); \ 2041 return retval; \ 2042 } \ 2043 else if (nc != a_nc) \ 2044 octave::err_nonconformant ("operator *", nr, nc, a_nc, a_nr); \ 2045 else \ 2046 { \ 2047 RET_TYPE::element_type zero = RET_TYPE::element_type (); \ 2048 \ 2049 RET_TYPE retval (nr, a_nr, zero); \ 2050 \ 2051 for (octave_idx_type i = 0; i < a_nc ; i++) \ 2052 { \ 2053 octave_quit (); \ 2054 for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \ 2055 { \ 2056 octave_idx_type col = a.ridx (j); \ 2057 EL_TYPE tmpval = CONJ_OP (a.data (j)); \ 2058 for (octave_idx_type k = 0 ; k < nr; k++) \ 2059 retval.xelem (k,col) += tmpval * m.elem (k,i); \ 2060 } \ 2061 } \ 2062 return retval; \ 2063 } 2064 2065 #endif 2066