1 /* -*- mode:C++ -*- */ 2 /* 3 * Copyright (C) 2000,2014 B. Parisse, Institut Fourier, 38402 St Martin d'Heres 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 3 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #ifndef _GIAC_POLY_H_ 19 #define _GIAC_POLY_H_ 20 #include "first.h" 21 //#include <fstream> 22 #include "vector.h" 23 #include <string> 24 #include "fraction.h" 25 #include "index.h" 26 #include "monomial.h" 27 #include "threaded.h" 28 #include <algorithm> 29 30 #ifndef NO_NAMESPACE_GIAC 31 namespace giac { 32 #endif // ndef NO_NAMESPACE_GIAC 33 34 // the tensor class to represent polynomial 35 template <class T> class tensor{ 36 public: 37 // members 38 int dim; // number of indices, this is the size of monomial.index 39 std::vector< monomial<T> > coord; // sorted list of monomials 40 // T zero; 41 // functional object sorting function for monomial ordering 42 bool (* is_strictly_greater)( const index_m &, const index_m &); 43 std::pointer_to_binary_function < const monomial<T> &, const monomial<T> &, bool> m_is_strictly_greater ; 44 // constructors tensor(const tensor<T> & t)45 tensor(const tensor<T> & t) : dim(t.dim), coord(t.coord), is_strictly_greater(t.is_strictly_greater), m_is_strictly_greater(t.m_is_strictly_greater) { } tensor(const tensor<T> & t,const std::vector<monomial<T>> & v)46 tensor(const tensor<T> & t, const std::vector< monomial<T> > & v) : dim(t.dim), coord(v), is_strictly_greater(t.is_strictly_greater), m_is_strictly_greater(t.m_is_strictly_greater) { } 47 // warning: this constructor prohibits construction of tensor from a value 48 // of type T if this value is an int, except by using tensor<T>(T(int)) tensor()49 tensor() : dim(0), is_strictly_greater(i_lex_is_strictly_greater), m_is_strictly_greater(std::ptr_fun<const monomial<T> &, const monomial<T> &, bool>(m_lex_is_strictly_greater<T>)) { } tensor(int d)50 explicit tensor(int d) : dim(d), is_strictly_greater(i_lex_is_strictly_greater), m_is_strictly_greater(std::ptr_fun<const monomial<T> &, const monomial<T> &, bool>(m_lex_is_strictly_greater<T>)) { } tensor(int d,const tensor<T> & t)51 explicit tensor(int d,const tensor<T> & t) : dim(d),is_strictly_greater(t.is_strictly_greater), m_is_strictly_greater(t.m_is_strictly_greater) { } tensor(const monomial<T> & v)52 tensor(const monomial<T> & v) : dim(int(v.index.size())), is_strictly_greater(i_lex_is_strictly_greater), m_is_strictly_greater(std::ptr_fun<const monomial<T> &, const monomial<T> &, bool>(m_lex_is_strictly_greater<T>)) { 53 coord.push_back(v); 54 } tensor(const T & v,int d)55 tensor(const T & v, int d) : dim(d), is_strictly_greater(i_lex_is_strictly_greater), m_is_strictly_greater(std::ptr_fun<const monomial<T> &, const monomial<T> &, bool>(m_lex_is_strictly_greater<T>)) { 56 if (!is_zero(v)) 57 coord.push_back(monomial<T>(v,0,d)); 58 } tensor(int d,const std::vector<monomial<T>> & c)59 tensor(int d,const std::vector< monomial<T> > & c) : dim(d), coord(c), is_strictly_greater(i_lex_is_strictly_greater),m_is_strictly_greater(std::ptr_fun<const monomial<T> &, const monomial<T> &, bool>(m_lex_is_strictly_greater<T>)) { } ~tensor()60 ~tensor() { coord.clear(); } 61 // member functions 62 // ordering monomials in the tensor tsort()63 void tsort(){ 64 #if 1 // def NSPIRE 65 sort_helper<T> M(m_is_strictly_greater); 66 sort(coord.begin(),coord.end(),M); 67 #else 68 sort(coord.begin(),coord.end(),m_is_strictly_greater); 69 #endif 70 } lexsorted_degree()71 int lexsorted_degree() const{ 72 if (!dim) 73 return 0; 74 if (coord.empty()) 75 return 0; 76 else 77 return coord.front().index.front(); 78 } 79 int degree(int n) const ; 80 int valuation(int n) const ; 81 index_t degree() const ; 82 int total_degree() const ; 83 int partial_degree(int nvars) const ; // total degree wrt to vars 0..nvars-1 84 void reverse() ; // reverse variable ordering 85 void append(const tensor<T> &); 86 void Tcoeffs(std::vector< tensor<T> > & v) const; 87 std::vector< tensor<T> > Tcoeffs() const; 88 tensor<T> coeff(int deg) const; 89 tensor<T> multiplydegrees(int d) const ; 90 tensor<T> dividedegrees(int d) const ; 91 tensor<T> dividealldegrees(int d) const ; 92 tensor<T> homogeneize() const; 93 void reorder(const std::vector<int> & permutation) ; 94 // shift and multiply, shift and divide, shift only 95 tensor<T> shift (const index_m & ishift,const T & fois) const ; 96 tensor<T> shift (const T & fois,const index_m & ishift) const ; 97 tensor<T> shift (const index_m & ishift) const ; 98 // tensor<T> operator + (const T & other) const ; 99 void TAdd(const tensor<T> &other,tensor<T> & result) const; 100 // tensor<T> operator - (const tensor<T> & other) const ; 101 void TSub(const tensor<T> &other,tensor<T> & result) const; 102 // tensor<T> operator * (const tensor<T> & other) const ; 103 // tensor<T> operator * (const T & fact) const ; 104 tensor<T> & operator *= (const T & fact) ; 105 // tensor<T> operator - () const ; 106 // tensor<T> operator / (const tensor<T> & other) const ; 107 // tensor<T> operator / (const T & fact) const ; 108 tensor<T> & operator /= (const T & fact) ; 109 // tensor<T> operator % (const tensor<T> & other) const ; 110 bool TDivRem (const tensor<T> & other, tensor<T> & quo, tensor<T> & rem, bool allowrational = true ) const ; // this=quo*other+rem 111 bool TDivRemHash(const tensor<T> & b,tensor<T> & quo,tensor<T> & r,bool allowrational=false,int exactquo=0,double qmax=0.0) const ; // same as TDivRem but allowrationnal=false *and* poly with 1 main variable 112 bool TDivRem1(const tensor<T> & b,tensor<T> & quo,tensor<T> & r,bool allowrational=false,int exactquo=0) const ; 113 bool Texactquotient (const tensor<T> & other, tensor<T> & quo,bool allowrational=false ) const ; // this=quo*other+rem, rem must be 0 114 bool TPseudoDivRem (const tensor<T> & other, tensor<T> & quo, tensor<T> & rem, tensor<T> & a) const ; // a*this=quo*other+rem 115 // bool TDivRem (const T & x0, tensor<T> & quo, tensor<T> & rem) const ; 116 tensor<T> operator () (const T & x0 ) const; 117 // T operator () (const std::vector<T> & x0) const; 118 T constant_term() const ; 119 T norm() const; 120 tensor<T> derivative() const ; 121 tensor<T> integrate() const ; 122 // insertion of a monomial and reordering 123 void insert_monomial(const monomial<T> & c); 124 // position corresponding to an index, return -1 if not found 125 int position(const index_m & v) const; 126 index_m vector_int(int position) const; 127 T & operator () ( const index_m & v) ; 128 const T & operator () ( const index_m & v) const ; 129 tensor<T> untrunc1 (int j=0) const { 130 std::vector< monomial<T> > v; 131 Untrunc1(this->coord,j,v); 132 return tensor<T>(dim+1,v); 133 } 134 void untruncn (int j=0) { 135 Untruncn(this->coord,j); 136 ++dim; 137 } untrunc(int j,int dim)138 tensor<T> untrunc (int j,int dim) const { 139 std::vector< monomial<T> > v; 140 Untrunc(this->coord,j,dim,v); 141 return tensor<T>(dim,v); 142 } trunc1()143 tensor<T> trunc1 () const { 144 assert(dim); 145 std::vector< monomial<T> > v; 146 Trunc1(this->coord,v); 147 return tensor<T>(dim-1,v); 148 } 149 int gcddeg(int k) const; 150 index_t gcddeg() const; 151 // printing print()152 std::string print() const { 153 if (coord.empty()) 154 return ""; 155 std::string s; 156 typename std::vector< monomial<T> > :: const_iterator it=coord.begin(),itend=coord.end(); 157 for (;;){ 158 s += it->print(); 159 ++it; 160 if (it==itend) 161 return s; 162 s += '+' ; 163 } 164 }; dbgprint()165 const char * dbgprint() const { 166 #if 0 167 static std::string s; 168 s=print(); 169 return s.c_str(); 170 #else 171 static std::string * sptr=0; 172 if (!sptr) sptr=new std::string; 173 *sptr=print(); 174 return sptr->c_str(); 175 #endif 176 } high_order_degree_truncate(int n)177 void high_order_degree_truncate(int n){ 178 // suppress terms of order >= n 179 typename std::vector< monomial<T> >::iterator it=coord.begin(),itend=coord.end(); 180 for (;it!=itend;++it){ 181 if (it->index.front()<n) 182 break; 183 } 184 if (it!=coord.begin() && it!=itend) 185 coord.erase(coord.begin(),it); 186 } total_degree_truncate(int n)187 tensor<T> total_degree_truncate(int n) const { 188 tensor<T> res(dim); 189 // suppress terms of total degree > n 190 typename std::vector< monomial<T> >::const_iterator it=coord.begin(),itend=coord.end(); 191 for (;it!=itend;++it){ 192 if (sum_degree(it->index)<=n) 193 res.coord.push_back(*it); 194 } 195 return res; 196 } 197 }; 198 template <class T> class Tref_tensor{ 199 public: 200 ref_count_t ref_count; 201 tensor<T> t; 202 Tref_tensor<T>(const tensor<T> & P): ref_count(1),t(P) {} 203 Tref_tensor<T>(int dim): ref_count(1),t(dim) {} 204 }; 205 206 // convert p to monomial represented by unsigned integers 207 // using the rule [a1 a2 .. an] [deg1 ... degn ] -> 208 // (... (a1*deg2+a2)*deg3 +...)*degn+an 209 template<class T,class U> convert(const tensor<T> & p,const index_t & deg,std::vector<T_unsigned<T,U>> & v)210 void convert(const tensor<T> & p,const index_t & deg,std::vector< T_unsigned<T,U> > & v){ 211 typename std::vector< monomial<T> >::const_iterator it=p.coord.begin(),itend=p.coord.end(),itstop; 212 v.clear(); 213 v.reserve(itend-it); 214 T_unsigned<T,U> gu; 215 U u; 216 int nterms; 217 index_t::const_iterator itit,itcur,idxcur,idxend,ditbeg=deg.begin(),ditend=deg.end(),dit; 218 for (;it!=itend;++it){ 219 u=0; 220 itcur=itit=it->index.begin(); 221 for (dit=ditbeg;dit!=ditend;++itit,++dit) 222 u=u*unsigned(*dit)+unsigned(*itit); 223 gu.u=u; 224 gu.g=it->value; 225 v.push_back(gu); 226 // dense poly check 227 --itit; 228 nterms=*itit; 229 if (nterms<2 || nterms>=itend-it) 230 continue; 231 itstop=it+nterms; 232 if (itstop->index.back()) 233 continue; 234 for (idxcur=itstop->index.begin(),idxend=itstop->index.end()-1;idxcur!=idxend;++idxcur,++itcur){ 235 if (*idxcur!=*itcur) 236 break; 237 } 238 if (idxcur!=idxend) 239 continue; 240 // this part is dense 241 for (;it!=itstop;){ 242 ++it; 243 gu.g=it->value; 244 --gu.u; 245 v.push_back(gu); 246 } 247 } 248 } 249 250 template<class T,class U> convert(const std::vector<T_unsigned<T,U>> & v,const index_t & deg,tensor<T> & p)251 void convert(const std::vector< T_unsigned<T,U> > & v,const index_t & deg,tensor<T> & p){ 252 typename std::vector< T_unsigned<T,U> >::const_iterator it=v.begin(),itend=v.end(); 253 index_t::const_reverse_iterator ditbeg=deg.rbegin(),ditend=deg.rend(),dit; 254 p.dim=ditend-ditbeg; 255 p.coord.clear(); 256 p.coord.reserve(itend-it); 257 U u; 258 index_t i(p.dim); 259 int k; 260 for (;it!=itend;++it){ 261 u=it->u; 262 for (k=p.dim-1,dit=ditbeg;dit!=ditend;++dit,--k){ 263 i[k]=u % unsigned(*dit); 264 u = u/unsigned(*dit); 265 } 266 p.coord.push_back(monomial<T>(it->g,i)); 267 } 268 } 269 270 template <class T> 271 bool operator == (const tensor<T> & p,const tensor<T> & q){ 272 return p.dim==q.dim && p.coord.size()==q.coord.size() && p.coord==q.coord; 273 } 274 tensor_is_strictly_greater(const tensor<T> & p,const tensor<T> & q)275 template <class T> bool tensor_is_strictly_greater(const tensor<T> & p,const tensor<T> & q){ 276 if (q.coord.empty()) 277 return true; 278 if (p.coord.empty()) 279 return false; 280 return p.m_is_strictly_greater(p.coord.front(),q.coord.front()); 281 } 282 283 template <class T> insert_monomial(const monomial<T> & c)284 void tensor<T>::insert_monomial(const monomial<T> & c){ 285 coord.push_back(c); 286 this->tsort(); // sort(coord.begin(),coord.end(),m_is_strictly_greater); 287 } 288 289 template <class T> degree(int n)290 int tensor<T>::degree(int n) const { 291 typename std::vector< monomial<T> >::const_iterator it=this->coord.begin(); 292 typename std::vector< monomial<T> >::const_iterator it_end=this->coord.end(); 293 int res=0; 294 for (;it!=it_end;++it){ 295 int temp=(it->index)[n]; 296 if (res<temp) 297 res=temp; 298 } 299 return res; 300 } 301 302 template <class T> total_degree()303 int tensor<T>::total_degree() const { 304 typename std::vector< monomial<T> >::const_iterator it=this->coord.begin(); 305 typename std::vector< monomial<T> >::const_iterator it_end=this->coord.end(); 306 int res=0; 307 for (;it!=it_end;++it){ 308 int temp=sum_degree(it->index); 309 if (res<temp) 310 res=temp; 311 } 312 return res; 313 } 314 315 template <class T> homogeneize()316 tensor<T> tensor<T>::homogeneize() const { 317 int d=total_degree(); 318 tensor<T> res(*this); 319 typename std::vector< monomial<T> >::iterator it=res.coord.begin(),itend=res.coord.end(); 320 for (;it!=itend;++it){ 321 index_t i=it->index.iref(); 322 i.push_back(d-sum_degree(i)); 323 it->index=i; 324 } 325 return res; 326 } 327 328 template <class T> partial_degree(int vars)329 int tensor<T>::partial_degree(int vars) const { 330 typename std::vector< monomial<T> >::const_iterator it=this->coord.begin(); 331 typename std::vector< monomial<T> >::const_iterator it_end=this->coord.end(); 332 int res=0; 333 for (;it!=it_end;++it){ 334 int temp=0; 335 index_t::const_iterator jt=it->index.begin(),jtend=jt+vars; 336 for (;jt!=jtend;++jt){ 337 temp += *jt; 338 } 339 if (res<temp) 340 res=temp; 341 } 342 return res; 343 } 344 345 template <class T> valuation(int n)346 int tensor<T>::valuation(int n) const { 347 typename std::vector< monomial<T> >::const_iterator it=this->coord.begin(); 348 typename std::vector< monomial<T> >::const_iterator it_end=this->coord.end(); 349 if (it==it_end) 350 return 0; 351 int res=(it->index)[n]; 352 for (;it!=it_end;++it){ 353 int temp=(it->index)[n]; 354 if (res>temp) 355 res=temp; 356 } 357 return res; 358 } 359 360 template <class T> degree()361 index_t tensor<T>::degree() const { 362 typename std::vector< monomial<T> >::const_iterator it=this->coord.begin(),it2; 363 typename std::vector< monomial<T> >::const_iterator it_end=this->coord.end(); 364 index_t res(dim); 365 if (!dim) return res; 366 index_t::iterator itresbeg=res.begin(),itresend=res.end(),itres; 367 index_t::const_iterator ittemp,ittemp2,ittempend; 368 if (// false && 369 is_strictly_greater==i_lex_is_strictly_greater){ 370 for (;it!=it_end;++it){ 371 ittemp=it->index.begin(); 372 for (itres=itresbeg;itres!=itresend;++itres,++ittemp){ 373 if (*itres<*ittemp) 374 *itres=*ittemp; 375 } 376 // check if the polynomial is dense -> skip ? 377 --ittemp; // point to xn power 378 if (*ittemp<3 || *ittemp>=it_end-it) 379 continue; 380 it2=it+(*ittemp); // if dense, point to the last monomial with same x1..xn-1 381 if (it2->index.back()) // last power in xn must be 0 382 continue; 383 ittemp=it->index.begin(); 384 ittemp2=it2->index.begin(); 385 ittempend=ittemp+dim-1; // check all other powers 386 for (;ittemp!=ittempend;++ittemp2,++ittemp){ 387 if (*ittemp!=*ittemp2) 388 break; 389 } 390 if (ittemp!=ittempend) 391 continue; 392 it=it2; 393 } 394 } 395 else { 396 for (;it!=it_end;++it){ 397 ittemp=it->index.begin(); 398 for (itres=itresbeg;itres!=itresend;++itres,++ittemp){ 399 if (*itres<*ittemp) 400 *itres=*ittemp; 401 } 402 } 403 } 404 return res; 405 } 406 407 template <class T> append(const tensor<T> & p)408 void tensor<T>::append(const tensor<T> & p){ 409 if (p.coord.empty()) 410 return; 411 if (this->coord.empty()){ 412 this->dim=p.dim; 413 this->coord=p.coord; 414 return; 415 } 416 if (is_strictly_greater(this->coord.back().index , p.coord.front().index)){ 417 this->coord.reserve(this->coord.size()+p.coord.size()); 418 typename std::vector< monomial<T> >::const_iterator it=p.coord.begin(); 419 typename std::vector< monomial<T> >::const_iterator it_end=p.coord.end(); 420 for (;it!=it_end;++it) 421 this->coord.push_back(*it); 422 } 423 else 424 TAdd(p,*this); // *this=*this+p; 425 } 426 427 template <class T> multiplydegrees(int d)428 tensor<T> tensor<T>::multiplydegrees(int d) const { 429 tensor<T> res(dim); 430 typename std::vector< monomial<T> >::const_iterator it=coord.begin(),it_end=coord.end(); 431 for (;it!=it_end;++it){ 432 index_t i=it->index.iref(); 433 i.front() *= d; 434 res.coord.push_back(monomial<T>(it->value,i)); 435 } 436 return res; 437 } 438 439 template <class T> dividedegrees(int d)440 tensor<T> tensor<T>::dividedegrees(int d) const { 441 tensor<T> res(dim); 442 typename std::vector< monomial<T> >::const_iterator it=coord.begin(),it_end=coord.end(); 443 for (;it!=it_end;++it){ 444 index_t i=it->index.iref(); 445 i.front() /= d; 446 res.coord.push_back(monomial<T>(it->value,i)); 447 } 448 return res; 449 } 450 451 template <class T> dividealldegrees(int d)452 tensor<T> tensor<T>::dividealldegrees(int d) const { 453 tensor<T> res(dim); 454 typename std::vector< monomial<T> >::const_iterator it=coord.begin(),it_end=coord.end(); 455 for (;it!=it_end;++it){ 456 index_t i=it->index.iref(); 457 i = i/d; 458 res.coord.push_back(monomial<T>(it->value,i)); 459 } 460 return res; 461 } 462 463 template <class T> position(const index_m & v)464 int tensor<T>::position(const index_m & v) const { 465 int smax=int(coord.size())-1; 466 int smin=0; 467 int s; 468 for (;smin<smax;){ 469 s=(smax+smin)/2; // smin <= s < smax 470 index_m vs=coord[s].index; 471 if (v==vs) 472 break; 473 if (is_strictly_greater(v,vs)) // if v > v[s] must start above smin+1 474 smax=s-1; // same 475 else 476 smin=s+1; // keeps smin <=smax 477 } 478 s=(smax+smin)/2; // if loop breaked return the correct s, else smin=smax 479 index_m vs=coord[s].index; 480 if (v==vs) 481 return(s); 482 else 483 return(-1); 484 } 485 486 template <class T> vector_int(int position)487 index_m tensor<T>::vector_int(int position) const { 488 return(coord[position].index); 489 } 490 491 template <class T> operator()492 T & tensor<T>::operator () ( const index_m & v) { 493 int p=position(v); 494 if (p!=-1) 495 return coord[p].value; 496 coord.push_back(T(0),v); 497 p=position(v); 498 return coord[p].value; 499 } 500 501 template<class T> operator()502 const T & tensor<T>::operator () ( const index_m & v) const{ 503 static T const myzero(0); 504 int p=position(v); 505 if (p==-1) { 506 return myzero; 507 } 508 else 509 return (coord[p].value); 510 } 511 512 template <class T> 513 std::ostream & operator << (std::ostream & os, const tensor<T> & t) 514 { 515 return os << t.print(); 516 } 517 518 519 template <class T> lexsort(std::vector<monomial<T>> & v)520 void lexsort(std::vector < monomial<T> > & v){ 521 #if 1 // def NSPIRE 522 sort_helper<T> M(std::ptr_fun<const monomial<T> &, const monomial<T> &, bool>(m_lex_is_strictly_greater<T>)); 523 sort(v.begin(),v.end(),M); 524 #else 525 sort(v.begin(),v.end(),std::ptr_fun<const monomial<T> &, const monomial<T> &, bool>(m_lex_is_strictly_greater<T>)); 526 #endif 527 } 528 529 530 531 template <class T> gcddeg(int k)532 int tensor<T>::gcddeg(int k) const { 533 assert(k<dim); 534 int res=0; 535 typename std::vector< monomial<T> >::const_iterator it=coord.begin(),itend=coord.end(); 536 for (;it!=itend;++it){ 537 res=mygcd((it->index)[k],res); 538 if (res==1) 539 break; 540 } 541 return res; 542 } 543 544 template <class T> gcddeg()545 index_t tensor<T>::gcddeg() const { 546 typename std::vector< monomial<T> >::const_iterator it=coord.begin(),itend=coord.end(); 547 assert(itend!=it); 548 index_t res(it->index.iref()); 549 index_t zero(res.size()); 550 for (;it!=itend;++it){ 551 res=index_gcd(it->index.iref(),res); 552 if (res==zero) 553 break; 554 } 555 return res; 556 } 557 558 // univariate Horner evaluation with remainder, rem is the evaluation result 559 // as a n-1-dimensional tensor and quo the quotient as a n-dim tensor 560 /* 561 template <class T> 562 bool tensor<T>::TDivRem (const T & x0, tensor<T> & quo, tensor<T> & rem) const 563 { 564 if (coord.empty()){ 565 rem=*this; 566 quo=*this; 567 return true; 568 } 569 std::vector< monomial<T> > horner_coord(coord); 570 tensor<T> add_rem(dim-1), add_quo(dim-1); 571 lexsort(horner_coord); 572 tensor<T> quotient(*this,new_seq); 573 rem.coord.clear(); 574 quo.coord.clear(); 575 typename std::vector< monomial<T> >::const_iterator it = horner_coord.begin(); 576 index_m pui=it->index; 577 for (;it!=horner_coord.end();++it){ 578 if (pui.front()==it->index.front()) { 579 // same external power, add 580 add_rem.coord.push_back(it->trunc1()); 581 add_quo.coord.push_back(*it); 582 } 583 else { // different power do an Horner * 584 rem=(rem+add_rem)*pow(x0,pui.front()-it->index.front()); 585 add_rem.coord.clear(); 586 add_rem.coord.push_back(it->trunc1()); 587 for (;pui.front()> it->index.front();){ 588 pui[0]--; 589 add_quo.divided_by_x(); 590 add_quo=add_quo*x0; 591 quo=quo+add_quo; 592 } 593 add_quo.coord.clear(); 594 pui=it->index; 595 } 596 } 597 rem=(rem+add_rem)*pow(x,pui.front()); 598 for (;pui.front();){ 599 pui[0]--; 600 add_quo.divided_by_x(); 601 add_quo=add_quo*x0; 602 quo=quo+add_quo; 603 } 604 // sort remainder and quotient 605 rem.tsort(); 606 quo.tsort(); 607 return true; 608 } 609 */ 610 611 // univariate Horner evaluation with remainder, rem is the evaluation result 612 // as a n-1-dimensional tensor, the quotient is not computed 613 template <class T> operator()614 tensor<T> tensor<T>::operator () (const T & x0 ) const 615 { 616 if (coord.empty()) 617 return *this; 618 if (!dim){ 619 return *this; 620 } 621 std::vector< monomial<T> > horner_coord(coord); 622 tensor<T> rem(dim-1),add_rem(dim-1) ; 623 lexsort(horner_coord); 624 typename std::vector< monomial<T> >::const_iterator it = horner_coord.begin(); 625 index_m pui=(*it).index; 626 for (;it!=horner_coord.end();++it){ 627 if (pui.front()==it->index.front()) { 628 // same external power, add 629 add_rem.coord.push_back(it->trunc1()); 630 } 631 else { // different power do an Horner * 632 #if !defined NSPIRE && !defined(FXCG)// GIAC_VECTOR 633 rem.TAdd(add_rem,rem); rem *= pow(x0,pui.front()-it->index.front()); 634 #else 635 rem =(add_rem+rem)*pow(x0,pui.front()-it->index.front()); 636 #endif 637 add_rem.coord.clear(); 638 add_rem.coord.push_back(it->trunc1()); 639 pui=it->index; 640 } 641 } 642 rem.TAdd(add_rem,rem); // rem=(add_rem+rem); 643 if (pui.front()){ 644 #if !defined (NSPIRE) && !defined(FXCG) // GIAC_VECTOR 645 rem *= pow(x0,pui.front()); 646 #else 647 rem = rem*pow(x0,pui.front()); 648 #endif 649 } 650 rem.tsort(); 651 return rem; 652 } 653 654 655 template <class T> TAdd(const tensor<T> & other,tensor<T> & result)656 void tensor<T>::TAdd (const tensor<T> & other,tensor<T> & result) const { 657 typename std::vector< monomial<T> >::const_iterator a=coord.begin(); 658 typename std::vector< monomial<T> >::const_iterator a_end=coord.end(); 659 if (a == a_end) { 660 result=other; 661 return ; 662 } 663 typename std::vector< monomial<T> >::const_iterator b=other.coord.begin(); 664 typename std::vector< monomial<T> >::const_iterator b_end=other.coord.end(); 665 if (b==b_end){ 666 result= *this; 667 return ; 668 } 669 Add(a,a_end,b,b_end,result.coord,is_strictly_greater); 670 } 671 672 /* 673 template <class T> 674 tensor<T> tensor<T>::operator + (const tensor<T> & other) const { 675 // Tensor addition 676 typename std::vector< monomial<T> >::const_iterator a=coord.begin(); 677 typename std::vector< monomial<T> >::const_iterator a_end=coord.end(); 678 if (a == a_end) { 679 return other; 680 } 681 typename std::vector< monomial<T> >::const_iterator b=other.coord.begin(); 682 typename std::vector< monomial<T> >::const_iterator b_end=other.coord.end(); 683 if (b==b_end){ 684 return *this; 685 } 686 std::vector< monomial<T> > new_coord; 687 Add(a,a_end,b,b_end,new_coord,is_strictly_greater); 688 return tensor<T>(*this,new_coord); 689 } 690 691 */ 692 693 template <class T> TSub(const tensor<T> & other,tensor<T> & result)694 void tensor<T>::TSub (const tensor<T> & other,tensor<T> & result) const { 695 typename std::vector< monomial<T> >::const_iterator a=coord.begin(); 696 typename std::vector< monomial<T> >::const_iterator a_end=coord.end(); 697 typename std::vector< monomial<T> >::const_iterator b=other.coord.begin(); 698 typename std::vector< monomial<T> >::const_iterator b_end=other.coord.end(); 699 if (b == b_end) { 700 result= *this; 701 return; 702 } 703 Sub(a,a_end,b,b_end,result.coord,is_strictly_greater); 704 } 705 706 /* 707 template <class T> 708 tensor<T> tensor<T>::operator - (const tensor<T> & other) const { 709 // Tensor addition 710 typename std::vector< monomial<T> >::const_iterator a=coord.begin(); 711 typename std::vector< monomial<T> >::const_iterator a_end=coord.end(); 712 typename std::vector< monomial<T> >::const_iterator b=other.coord.begin(); 713 typename std::vector< monomial<T> >::const_iterator b_end=other.coord.end(); 714 if (b == b_end) { 715 return *this; 716 } 717 std::vector< monomial<T> > new_coord; 718 Sub(a,a_end,b,b_end,new_coord,is_strictly_greater); 719 return tensor<T>(*this,new_coord); 720 } 721 722 template <class T> 723 tensor<T> tensor<T>::operator - () const { 724 // Tensor addition 725 std::vector< monomial<T> > new_coord; 726 typename std::vector< monomial<T> >::const_iterator a = coord.begin(); 727 typename std::vector< monomial<T> >::const_iterator a_end = coord.end(); 728 new_coord.reserve(((int) a_end - (int) a )/(sizeof(monomial<T>))); 729 for (;a!=a_end;++a){ 730 new_coord.push_back(monomial<T>(-(*a).value,(*a).index)); 731 } 732 return tensor<T>(*this,new_coord); 733 } 734 735 template <class T> 736 tensor<T> tensor<T>::operator * (const tensor<T> & other) const { 737 // Multiplication 738 typename std::vector< monomial<T> >::const_iterator ita = coord.begin(); 739 typename std::vector< monomial<T> >::const_iterator ita_end = coord.end(); 740 typename std::vector< monomial<T> >::const_iterator itb = other.coord.begin(); 741 typename std::vector< monomial<T> >::const_iterator itb_end = other.coord.end(); 742 // COUT << coord.size() << " " << (int) ita_end - (int) ita << " " << sizeof(monomial<T>) << '\n' ; 743 // first some trivial cases 744 if (ita==ita_end) 745 return(*this); 746 if (itb==itb_end) 747 return(other); 748 if (is_one(*this)) 749 return other; 750 if (is_one(other)) 751 return *this; 752 // Now look if length a=1 or length b=1, happens frequently 753 // think of x^3*y^2*z translated to internal form 754 int c1=coord.size(); 755 if (c1==1) 756 return(other.shift(coord.front().index,coord.front().value)); 757 int c2=other.coord.size(); 758 if (c2==1) 759 return(this->shift(other.coord.front().index,other.coord.front().value)); 760 std::vector< monomial<T> > new_coord; 761 new_coord.reserve(c1+c2); // assumes dense poly (would be c1+c2-1) 762 Mul(ita,ita_end,itb,itb_end,new_coord,m_is_strictly_greater); 763 return tensor<T>(*this,new_coord); 764 } 765 766 */ 767 768 template <class T> Tpow(const tensor<T> & x,int n)769 tensor<T> Tpow(const tensor<T> & x,int n){ 770 #ifndef NO_STDEXCEPT 771 if (n<0) 772 setsizeerr("poly.h/Tpow n<0"); 773 #endif 774 if (!n) 775 return tensor<T>(T(1),x.dim); 776 if (n==1) 777 return x; 778 if (n==2) 779 return x*x; 780 if (x.coord.size()==1) 781 return tensor<T>(monomial<T>(pow(x.coord.front().value,n),x.coord.front().index*n)); 782 tensor<T> res(x); 783 for (int j=1;j<n;j++) 784 res=res*x; 785 return res; 786 /* 787 Note: contrary to univariate polynomials or integers 788 the "fast" powering algorithm is *slower* than the above 789 loop for multivariate polynomials (with the current implementation of Mul) 790 Indeed a dense poly of deg. aa and d variables may have 791 binomial(aa+d,d) monomials 792 hence the last multiplication in the "fast" powering algorithm 793 is O(binomial(n/2*deg+d,d)^2)=O(n^(2d)) 794 as the n multiplications of the above loop are 795 O(n*binomial(n*deg+d,d)) = O(n^(d+1)) 796 tensor<T> temp=Tpow(x,n/2); 797 if (n%2) 798 return tensor<T>(temp*temp*x); 799 else 800 return tensor<T>(temp*temp); 801 */ 802 } 803 804 /* 805 template <class T> 806 tensor<T> tensor<T>::operator / (const tensor<T> & other) const { 807 tensor<T> rem(*this),quo(*this); 808 assert( (*this).TDivRem(other,quo,rem) ); 809 return(quo); 810 } 811 812 template <class T> 813 tensor<T> tensor<T>::operator % (const tensor<T> & other) const { 814 tensor<T> rem(*this),quo(*this); 815 assert( (*this).TDivRem(other,quo,rem) ); 816 return(rem); 817 } 818 819 */ 820 821 /* 822 template <class T> 823 tensor<T> tensor<T>::operator * (const T & fact ) const { 824 // Tensor constant multiplication 825 if (is_one(fact)) 826 return *this; 827 std::vector< monomial<T> > new_coord; 828 if (is_zero(fact)) 829 return tensor<T>(*this,new_coord); 830 typename std::vector< monomial<T> >::const_iterator a = coord.begin(); 831 typename std::vector< monomial<T> >::const_iterator a_end = coord.end(); 832 Mul(a,a_end,fact,new_coord); 833 return tensor<T>(*this,new_coord); 834 } 835 836 inline template <class T> 837 tensor<T> operator * (const T & fact ,const tensor<T> & p){ 838 return p*fact; 839 } 840 */ 841 842 template <class T> 843 tensor<T> & tensor<T>::operator *= (const T & fact ) { 844 // Tensor constant multiplication 845 if (is_one(fact)) 846 return *this; 847 if (is_zero(fact)){ 848 coord.clear(); 849 return *this; 850 } 851 typename std::vector< monomial<T> >::const_iterator a = coord.begin(); 852 typename std::vector< monomial<T> >::const_iterator a_end = coord.end(); 853 Mul<T>(a,a_end,fact,coord); 854 return *this; 855 } 856 857 /* 858 template <class T> 859 tensor<T> tensor<T>::operator / (const T & fact ) const { 860 if (is_one(fact)) 861 return *this; 862 std::vector< monomial<T> > new_coord; 863 typename std::vector< monomial<T> >::const_iterator a = coord.begin(); 864 typename std::vector< monomial<T> >::const_iterator a_end = coord.end(); 865 Div(a,a_end,fact,new_coord); 866 return tensor<T>(*this,new_coord); 867 } 868 */ 869 870 template <class T> 871 tensor<T> & tensor<T>::operator /= (const T & fact ) { 872 if (is_one(fact)) 873 return *this; 874 typename std::vector< monomial<T> >::const_iterator a = coord.begin(); 875 typename std::vector< monomial<T> >::const_iterator a_end = coord.end(); 876 Div<T>(a,a_end,fact,coord); 877 return *this; 878 } 879 880 template<class T> Tnextcoeff(typename std::vector<monomial<T>>::const_iterator & it,const typename std::vector<monomial<T>>::const_iterator & itend)881 tensor<T> Tnextcoeff(typename std::vector< monomial<T> >::const_iterator & it,const typename std::vector< monomial<T> >::const_iterator & itend){ 882 if (it==itend) 883 return tensor<T>(0); 884 int n=it->index.front(); 885 int d=int(it->index.size()); 886 tensor<T> res(d-1); 887 for (;(it!=itend) && (it->index.front()==n);++it) 888 res.coord.push_back(it->trunc1()); 889 return res; 890 } 891 892 template<class T> Tlastcoeff(const typename std::vector<monomial<T>>::const_iterator & itbeg,const typename std::vector<monomial<T>>::const_iterator & itend)893 tensor<T> Tlastcoeff(const typename std::vector< monomial<T> >::const_iterator & itbeg,const typename std::vector< monomial<T> >::const_iterator & itend){ 894 assert(itbeg!=itend); 895 typename std::vector< monomial<T> >::const_iterator it=itend; 896 --it; 897 int n=it->index.front(); 898 int d=int(it->index.size()); 899 tensor<T> res(d-1); 900 for (;;){ 901 if (it==itbeg) 902 break; 903 --it; 904 if (it->index.front()!=n){ 905 ++it; 906 break; 907 } 908 } 909 for (;it!=itend;++it) 910 res.coord.push_back(it->trunc1()); 911 return res; 912 } 913 914 template <class T> TDivRem1(const tensor<T> & b,tensor<T> & quo,tensor<T> & r,bool allowrational,int exactquo)915 bool tensor<T>::TDivRem1(const tensor<T> & b,tensor<T> & quo,tensor<T> & r,bool allowrational,int exactquo) const { 916 const tensor<T> & a=*this; 917 quo.coord.clear(); 918 quo.dim=a.dim; 919 r.dim=a.dim; 920 r.coord.clear(); 921 if ( b.dim<=1 || b.coord.size()==1 || a.coord.empty() ){ 922 return a.TDivRem(b,quo,r,allowrational) && (exactquo?r.coord.empty():true) ; 923 } 924 /* alternative code 925 std::vector< tensor<T> > R=Tcoeffs(); 926 std::vector< tensor<T> > B=b.Tcoeffs(); 927 tensor<T> Q; 928 int rs=R.size(),bs=B.size(),qs=rs-bs; 929 if (rs<bs){ 930 r=a; 931 return true; 932 } 933 for (int i=0;i<=qs;++i){ 934 if (!R[i].Texactquotient(B[0],Q,allowrational)) 935 return false; 936 for (int j=i+1;j<i+bs;++j){ 937 if (!B[j-i].coord.empty()) 938 R[j]=R[j]-Q*B[j-i]; 939 } 940 Q=Q.untrunc1(qs-i); 941 quo.append(Q); 942 } 943 // convert R[qs+1..rs] to r 944 for (int i=qs+1;i<rs;++i){ 945 R[i]=R[i].untrunc1(rs-i-1); 946 r.append(R[i]); 947 } 948 return true; 949 */ 950 // old code 951 typename std::vector< monomial<T> >::const_iterator it=b.coord.begin(),itend=b.coord.end(); 952 r=a; 953 if (b.coord.empty()){ 954 if (exactquo) 955 return r.coord.empty(); 956 return true; 957 } 958 int bdeg=it->index.front(),rdeg=r.lexsorted_degree(),qdeg=rdeg-bdeg; // int bval=(itend-1)->index.front(); 959 tensor<T> b0(Tnextcoeff<T>(it,b.coord.end())); 960 tensor<T> q(b0.dim); 961 if (it==b.coord.end()){ // bn==b0, b=b0*main var to a power 962 it=r.coord.begin(); 963 itend=r.coord.end(); 964 while ( it!=itend){ 965 rdeg=it->index.front(); 966 if (rdeg<bdeg) 967 break; 968 tensor<T> a0(Tnextcoeff<T>(it,itend)),tmp(a0.dim); 969 if (!a0.Texactquotient(b0,q,allowrational)) 970 return false; 971 q=q.untrunc1(rdeg-bdeg); 972 quo.append(q); 973 } 974 r.coord=std::vector< monomial<T> >(it,itend); 975 if (exactquo) 976 return it==itend; 977 return true; 978 } 979 tensor<T> q1,b1(b0.dim); // subprincipal coeff 980 if (it->index.front()==bdeg-1) 981 b1=Tnextcoeff<T>(it,b.coord.end()); 982 // here it might be improved by checking that lastcoeff divides 983 #if 1 984 if (exactquo){ 985 tensor<T> bn=Tlastcoeff<T>(b.coord.begin(),b.coord.end()); 986 tensor<T> an(Tlastcoeff<T>(a.coord.begin(),a.coord.end())),qn; 987 if (!an.Texactquotient(bn,qn,allowrational)) 988 return false; 989 } 990 #endif 991 for ( ;(rdeg=r.lexsorted_degree()) >=bdeg;){ 992 it=r.coord.begin(); 993 itend=r.coord.end(); 994 // compute 2 terms of the quotient in one iteration, 995 // this will save one long substraction 996 tensor<T> a0(Tnextcoeff<T>(it,itend)),a1(a0.dim); 997 if (exactquo && it==itend) 998 return false; 999 if (!a0.Texactquotient(b0,q,allowrational)) 1000 return false; 1001 qdeg=rdeg-bdeg; 1002 if (qdeg){ 1003 if (it!=itend && it->index.front()==rdeg-1) 1004 a1=Tnextcoeff<T>(it,itend); 1005 #if defined GIAC_VECTOR || defined NSPIRE // fix for * on arm compiler 1006 tensor<T> tmp(q.dim); 1007 tmp.coord=q.coord*b1.coord; 1008 a1.TSub(tmp,a1); 1009 #else 1010 a1.TSub(q*b1,a1); // a1=a1-q*b1; 1011 #endif 1012 q=q.untrunc1(qdeg); 1013 if (!a1.Texactquotient(b0,q1,allowrational)) 1014 return false; 1015 q.TAdd(q1.untrunc1(qdeg-1),q); // q=q+q1.untrunc1(qdeg-1); 1016 } 1017 else 1018 q=q.untrunc1(qdeg); 1019 /* 1020 if (exactquo){ 1021 tensor<T> an(Tlastcoeff<T>(it,itend)); 1022 int rval=(itend-1)->index.front(); 1023 if (rval-bval<rdeg-bdeg){ 1024 if (rval<bval+count) // we must gain one valuation per loop 1025 return false; 1026 if (!an.Texactquotient(bn,qn,allowrational)) 1027 return false; 1028 q=q+qn.untrunc1(rval-bval); 1029 ++count; 1030 } 1031 } 1032 */ 1033 quo.TAdd(q,quo); // quo=quo+q; 1034 #if defined GIAC_VECTOR || defined NSPIRE 1035 tensor<T> tmp(q.dim); 1036 tmp.coord=q.coord*b.coord; 1037 r.TSub(tmp,r); // r=r-q*b; 1038 #else 1039 r.TSub(q*b,r); // r=r-q*b; 1040 #endif 1041 if (r.coord.empty()) 1042 return true; 1043 } 1044 if (exactquo) return r.coord.empty(); 1045 return true; 1046 } 1047 1048 1049 // hashing seems slower than DivRem1 if computation should be done with int 1050 template <class T> TDivRemHash(const tensor<T> & b,tensor<T> & quo,tensor<T> & r,bool allowrational,int exactquo,double qmax)1051 bool tensor<T>::TDivRemHash(const tensor<T> & b,tensor<T> & quo,tensor<T> & r,bool allowrational,int exactquo,double qmax) const { 1052 const tensor<T> & a=*this; 1053 quo.coord.clear(); 1054 quo.dim=a.dim; 1055 r.dim=a.dim; 1056 r.coord.clear(); 1057 int bs=b.coord.size(); 1058 if ( b.dim<=1 || bs==1 || a.coord.empty() ){ 1059 return a.TDivRem(b,quo,r,allowrational) && (exactquo?r.coord.empty():true) ; 1060 } 1061 int bdeg=b.coord.front().index.front(),rdeg=lexsorted_degree(),ddeg=rdeg-bdeg; 1062 if (ddeg>2 && bs>10){ 1063 index_t d1=degree(),d2=b.degree(),d3=b.coord.front().index.iref(),d(dim); 1064 // i-th degrees of th / other in quotient and remainder 1065 // are <= i-th degree of th + ddeg*(i-th degree of other - i-th degree of lcoeff of other) 1066 double ans=1; 1067 for (int i=0;i<dim;++i){ 1068 d[i]=d1[i]+(ddeg+1)*(d2[i]-d3[i])+1; 1069 int j=1; 1070 // round to newt power of 2 1071 for (;;j++){ 1072 if (!(d[i] >>= 1)) 1073 break; 1074 } 1075 d[i] = 1 << j; 1076 ans = ans*unsigned(d[i]); 1077 if (ans/RAND_MAX>RAND_MAX) 1078 break; 1079 } 1080 if (ans<RAND_MAX){ 1081 std::vector< T_unsigned<T,unsigned> > p1,p2,quot,remain; 1082 std::vector<unsigned> vars(dim); 1083 vars[dim-1]=1; 1084 for (int i=dim-2;i>=0;--i){ 1085 vars[i]=d[i+1]*vars[i+1]; 1086 } 1087 convert(*this,d,p1); 1088 convert(b,d,p2); 1089 if (hashdivrem<T,unsigned>(p1,p2,quot,remain,vars,/* reduce */0,qmax,allowrational)){ 1090 convert(quot,d,quo); 1091 convert(remain,d,r); 1092 return true; 1093 } 1094 else 1095 ans=1e200; // don't make another unsuccessful division! 1096 } 1097 if (ans/RAND_MAX<RAND_MAX){ 1098 std::vector< T_unsigned<T,ulonglong> > p1,p2,quot,remain; 1099 std::vector<ulonglong> vars(dim); 1100 vars[dim-1]=1; 1101 for (int i=dim-2;i>=0;--i){ 1102 vars[i]=d[i+1]*vars[i+1]; 1103 } 1104 convert(*this,d,p1); 1105 convert(b,d,p2); 1106 if (hashdivrem<T,ulonglong>(p1,p2,quot,remain,vars,/* reduce */0,qmax,allowrational)){ 1107 convert(quot,d,quo); 1108 convert(remain,d,r); 1109 return true; 1110 } 1111 } 1112 } 1113 return TDivRem1(b,quo,r,allowrational,exactquo); 1114 } 1115 coeff(int deg)1116 template<class T> tensor<T> tensor<T>::coeff(int deg) const { 1117 tensor<T> res(dim-1); 1118 typename std::vector< monomial<T> >::const_iterator it=coord.begin(),itend=coord.end(); 1119 for (;it!=itend;++it){ 1120 int d=it->index.front(); 1121 if (d>deg) 1122 continue; 1123 if (d==deg) 1124 return Tnextcoeff<T>(it,itend); 1125 else 1126 return res; 1127 } 1128 return res; 1129 } 1130 1131 template<class T> Tcoeffs(std::vector<tensor<T>> & v)1132 void tensor<T>::Tcoeffs(std::vector< tensor<T> > & v) const{ 1133 int current_deg=lexsorted_degree(); 1134 std::vector< tensor<T> > w(current_deg+1,dim-1); 1135 typename std::vector< monomial<T> >::const_iterator it=coord.begin(),itend=coord.end(); 1136 for (;it!=itend;++it){ 1137 w[current_deg-it->index.front()].coord.push_back(it->trunc1()); 1138 } 1139 w.swap(v); 1140 } 1141 1142 template<class T> Tcoeffs()1143 std::vector< tensor<T> > tensor<T>::Tcoeffs() const{ 1144 int current_deg=lexsorted_degree(); 1145 std::vector< tensor<T> > v; 1146 v.reserve(current_deg+1); 1147 typename std::vector< monomial<T> >::const_iterator it=coord.begin(),itend=coord.end(); 1148 for (;it!=itend;--current_deg){ 1149 if (it->index.front()==current_deg){ 1150 v.push_back(Tnextcoeff<T>(it,itend)); 1151 } 1152 else 1153 v.push_back(tensor<T>(dim-1)); 1154 } 1155 for (;current_deg>=0;--current_deg) v.push_back(tensor<T>(dim-1)); 1156 return v; 1157 } 1158 1159 template <class T> Texactquotient(const tensor<T> & b,tensor<T> & quo,bool allowrational)1160 bool tensor<T>::Texactquotient(const tensor<T> & b,tensor<T> & quo,bool allowrational) const { 1161 if (coord.empty()){ 1162 quo.dim=dim; quo.coord.clear(); 1163 return true; 1164 } 1165 if (*this==b){ 1166 quo=tensor<T>(T(1),dim); 1167 return true; 1168 } 1169 if (dim>1 && !allowrational && lexsorted_degree()==b.lexsorted_degree()){ 1170 if (!Tfirstcoeff(*this).trunc1().Texactquotient(Tfirstcoeff(b).trunc1(),quo,allowrational)) 1171 return false; 1172 quo=quo.untrunc1(); 1173 if (is_one(quo)) 1174 return false; // already tested above 1175 return *this==quo*b; 1176 } 1177 tensor<T> r(b.dim); 1178 return this->TDivRem1(b,quo,r,allowrational,1); 1179 } 1180 1181 template <class T> TDivRem(const tensor<T> & other,tensor<T> & quo,tensor<T> & rem,bool allowrational)1182 bool tensor<T>::TDivRem (const tensor<T> & other, tensor<T> & quo, tensor<T> & rem, bool allowrational ) const { 1183 int asize=int((*this).coord.size()); 1184 if (!asize){ 1185 quo=*this; 1186 rem=*this; 1187 return true; 1188 } 1189 int bsize=int(other.coord.size()); 1190 if (!bsize){ 1191 quo.dim=dim; quo.coord.clear(); 1192 rem=*this; 1193 return true; 1194 } 1195 index_m a_max = (*this).coord.front().index; 1196 index_m b_max = other.coord.front().index; 1197 quo.coord.clear(); 1198 quo.dim=this->dim; 1199 rem.dim=this->dim; 1200 if (bsize==1){ 1201 rem.coord.clear(); 1202 T b=other.coord.front().value; 1203 if (b_max==b_max*0){ 1204 if (is_one(b)) 1205 quo = *this ; 1206 else { 1207 typename std::vector< monomial<T> >::const_iterator itend=coord.end(); 1208 for (typename std::vector< monomial<T> >::const_iterator it=coord.begin();it!=itend;++it){ 1209 T q=rdiv(it->value,b); 1210 if (!allowrational && has_denominator(q)) 1211 return false; 1212 quo.coord.push_back(monomial<T>(q,it->index)); 1213 } 1214 } 1215 return true; 1216 } 1217 typename std::vector< monomial<T> >::const_iterator itend=coord.end(); 1218 typename std::vector< monomial<T> >::const_iterator it=coord.begin(); 1219 for (;it!=itend;++it){ 1220 if (!(it->index>=b_max)) 1221 break; 1222 T q=rdiv(it->value,b); 1223 if (!allowrational && has_denominator(q)) 1224 return false; 1225 quo.coord.push_back(monomial<T>(q,it->index-b_max)); 1226 } 1227 rem.coord=std::vector< monomial<T> >(it,itend); 1228 return true; 1229 } 1230 rem=*this; 1231 if ( ! (a_max>=b_max) ){ 1232 // test that the first power of a_max is < to that of b_max 1233 return (a_max.front()<b_max.front()); 1234 } 1235 T b=other.coord.front().value; 1236 while (a_max >= b_max){ 1237 // errors should be trapped here and false returned if error occured 1238 T q=rdiv(rem.coord.front().value,b); 1239 if (!allowrational){ 1240 if ( has_denominator(q) || 1241 (!is_zero(q*b - rem.coord.front().value)) ) 1242 return false; 1243 } 1244 // end error trapping 1245 quo.coord.push_back(monomial<T>(q,a_max-b_max)); 1246 const tensor<T> & temp=other.shift(a_max-b_max,q); 1247 rem.TSub(temp,rem); // rem = rem-temp; 1248 if (rem.coord.size()) 1249 a_max=rem.coord.front().index; 1250 else 1251 break; 1252 } 1253 return(true); 1254 } 1255 1256 template <class T> TPseudoDivRem(const tensor<T> & other,tensor<T> & quo,tensor<T> & rem,tensor<T> & a)1257 bool tensor<T>::TPseudoDivRem (const tensor<T> & other, tensor<T> & quo, tensor<T> & rem, tensor<T> & a) const { 1258 int m=this->lexsorted_degree(); 1259 int n=other.lexsorted_degree(); 1260 a.coord.clear(); 1261 a.coord.push_back(monomial<T>(T(1),a.dim)); 1262 rem=*this; 1263 quo.coord.clear(); 1264 if (m<n) 1265 return true; 1266 // a=Tpow(Tfirstcoeff(other),m-n+1); 1267 // return (a*(*this)).TDivRem1(other,quo,rem); 1268 index_m ishift(dim); 1269 tensor<T> b0(Tfirstcoeff(other)); 1270 for (int i=m;i>=n;--i){ 1271 #if defined NSPIRE || defined(FXCG) 1272 a=a*b0; 1273 quo=quo*b0; 1274 #else 1275 #ifdef GIAC_VECTOR 1276 a.coord *= b0.coord; // a.coord = a.coord*b0.coord; // a=a*b0; 1277 quo.coord *= b0.coord; // quo.coord = quo.coord*b0.coord; // quo=quo*b0; 1278 #else 1279 a *= b0; // a=a*b0 1280 quo *= b0; // quo=quo*b0 1281 #endif // GIAC_VECTOR 1282 #endif // NSPIRE 1283 typename std::vector< monomial<T> >::const_iterator it=rem.coord.begin(),itend=rem.coord.end(); 1284 if (it==itend || it->index.front()!=i){ 1285 #if defined NSPIRE || defined(FXCG) 1286 rem=rem*b0; 1287 #else 1288 #ifdef GIAC_VECTOR 1289 rem.coord *= b0.coord; // rem.coord = rem.coord*b0.coord; // rem=rem*b0; 1290 #else 1291 rem *= b0; // rem=rem*b0; 1292 #endif 1293 #endif // NSPIRE 1294 continue; 1295 } 1296 ishift.front()=i-n; 1297 const tensor<T> & rem0 = Tfirstcoeff(rem).shift(ishift); 1298 quo.append(rem0); 1299 #if defined GIAC_VECTOR || defined NSPIRE 1300 rem.coord = rem.coord*b0.coord; 1301 tensor<T> tmp(rem0.dim); 1302 tmp.coord=rem0.coord*other.coord; 1303 rem.TSub(tmp,rem); // rem=rem*b0-rem0*other; 1304 #else 1305 rem=rem*b0-rem0*other; 1306 #endif 1307 } 1308 return true; 1309 } 1310 1311 template <class T> constant_term()1312 T tensor<T>::constant_term () const { 1313 if (!((*this).coord.size())) 1314 return T(0); 1315 #if 1 1316 if (sum_degree((*this).coord.back().index)==0) 1317 return (*this).coord.back().value; 1318 return T(0); 1319 #else 1320 index_m i=(*this).coord.front().index*0; 1321 return (*this)(i); 1322 #endif 1323 } 1324 1325 template <class T> shift(const index_m & i,const T & fois)1326 tensor<T> tensor<T>::shift (const index_m & i,const T & fois) const { 1327 tensor<T> res(dim,*this); 1328 res.coord.reserve(coord.size()); 1329 Shift(this->coord,i,fois,res.coord); 1330 return res; 1331 } 1332 1333 template <class T> shift(const T & fois,const index_m & i)1334 tensor<T> tensor<T>::shift (const T & fois,const index_m & i) const { 1335 tensor<T> res(dim,*this); 1336 res.coord.reserve(coord.size()); 1337 Shift(this->coord,fois,i,res.coord); 1338 return res; 1339 } 1340 1341 template <class T> shift(const index_m & i)1342 tensor<T> tensor<T>::shift (const index_m & i) const { 1343 tensor<T> res(dim); 1344 res.coord.reserve(coord.size()); 1345 Shift(this->coord,i,res.coord); 1346 return res; 1347 } 1348 1349 template <class T> reverse()1350 void tensor<T>::reverse() { 1351 typename std::vector< monomial<T> >::const_iterator itend=coord.end(); 1352 for (typename std::vector< monomial<T> >::iterator it=coord.begin();it!=itend;++it) 1353 (*it).reverse(); 1354 this->tsort(); 1355 } 1356 1357 template <class T> reorder(const std::vector<int> & permutation)1358 void tensor<T>::reorder(const std::vector<int> & permutation) { 1359 typename std::vector< monomial<T> >::const_iterator itend=coord.end(); 1360 for (typename std::vector< monomial<T> >::iterator it=coord.begin();it!=itend;++it) 1361 it->reorder(permutation); 1362 this->tsort(); 1363 } 1364 1365 template <class T> norm()1366 T tensor<T>::norm() const{ 1367 T res( 0); 1368 typename std::vector< monomial<T> >::const_iterator itend=coord.end(); 1369 for (typename std::vector< monomial<T> >::const_iterator it=coord.begin();it!=itend;++it) 1370 res=max(res,it->norm()); 1371 return res; 1372 } 1373 1374 template <class T> Tcontent(const tensor<T> & p)1375 T Tcontent(const tensor<T> & p){ 1376 return Content(p.coord); 1377 } 1378 1379 template<class T> 1380 T Tppz(tensor<T> & p,bool divide=true){ 1381 T n=Tcontent(p); 1382 if (divide) 1383 p /= n; 1384 return T(n); 1385 } 1386 1387 template<class T> Tis_one(const tensor<T> & p)1388 bool Tis_one(const tensor<T> &p){ 1389 if (p.coord.size()!=1) 1390 return false; 1391 if (!is_one(p.coord.front().value)) 1392 return false; 1393 const index_m & i = p.coord.front().index; 1394 index_t::const_iterator it=i.begin(),itend=i.end(); 1395 for (;it!=itend ;++it){ 1396 if ((*it)!=0) 1397 return false; 1398 } 1399 return true; 1400 } 1401 1402 template <class T> Tis_constant(const tensor<T> & p)1403 bool Tis_constant(const tensor<T> & p){ 1404 if (p.coord.size()!=1) 1405 return false; 1406 const index_m & i = p.coord.front().index; 1407 index_t::const_iterator it=i.begin(),itend=i.end(); 1408 for (;it!=itend ;++it){ 1409 if ((*it)!=0) 1410 return false; 1411 } 1412 return true; 1413 } 1414 1415 template<class T> Tlistmax(const tensor<T> & p,T & n)1416 bool Tlistmax(const tensor<T> &p,T & n){ 1417 n=T( 1); 1418 for (typename std::vector< monomial<T> >::const_iterator it=p.coord.begin();it!=p.coord.end() ;++it){ 1419 if (!(it->value.is_cinteger())) 1420 return false; 1421 n=max(n,linfnorm(it->value)); 1422 } 1423 return true; 1424 } 1425 1426 template<class T> Tapply(const tensor<T> & p,T (* f)(const T &),tensor<T> & pgcd)1427 void Tapply(const tensor<T> & p,T (*f)(const T &),tensor<T> & pgcd){ 1428 typename std::vector< monomial<T> >::const_iterator it=p.coord.begin(),itend=p.coord.end(); 1429 Apply(it,itend,f,pgcd.coord); 1430 } 1431 1432 template<class T> Tapply(const tensor<T> & p,T (* f)(const T &))1433 tensor<T> Tapply(const tensor<T> &p,T (*f)(const T &)){ 1434 tensor<T> res(p.dim); 1435 Tapply(p,f,res); 1436 return res; 1437 } 1438 1439 template<class T> Tlgcd(const tensor<T> & p,tensor<T> & pgcd)1440 void Tlgcd(const tensor<T> & p,tensor<T> & pgcd){ 1441 if (!p.dim){ 1442 pgcd=p; 1443 return ; 1444 } 1445 if (Tis_one(pgcd)) 1446 return; 1447 pgcd=pgcd.trunc1(); 1448 typename std::vector< monomial<T> >::const_iterator it=p.coord.begin(); 1449 typename std::vector< monomial<T> >::const_iterator itend=p.coord.end(); 1450 // typename std::vector< monomial<T> >::const_iterator itbegin=it; 1451 for (;it!=itend;){ 1452 if (Tis_one(pgcd)) 1453 break; 1454 pgcd=gcd(pgcd,Tnextcoeff<T>(it,itend)); 1455 } 1456 if (pgcd.coord.empty()){ 1457 index_m i; 1458 for (int j=0;j<p.dim;j++) 1459 i.push_back(0); 1460 pgcd.coord.push_back(monomial<T>(T(1),i)); 1461 ++pgcd.dim; 1462 } 1463 else 1464 pgcd=pgcd.untrunc1(); 1465 } 1466 1467 template<class T> Tlgcd(const tensor<T> & p)1468 tensor<T> Tlgcd(const tensor<T> & p){ 1469 if (p.dim==1){ 1470 const T & c=Tcontent(p); 1471 return tensor<T>(c,1); 1472 } 1473 tensor<T> pgcd(p.dim); // pgcd=0 1474 Tlgcd(p,pgcd); 1475 return pgcd; 1476 } 1477 1478 template<class T> Tcommonlgcd(const tensor<T> & p,const tensor<T> & q,tensor<T> & pgcd)1479 void Tcommonlgcd(const tensor<T> & p,const tensor<T> & q,tensor<T> & pgcd){ 1480 if (!p.dim){ 1481 pgcd=p; 1482 return ; 1483 } 1484 pgcd=pgcd.trunc1(); 1485 typename std::vector< monomial<T> >::const_iterator it=p.coord.begin(); 1486 typename std::vector< monomial<T> >::const_iterator itend=p.coord.end(); 1487 typename std::vector< monomial<T> >::const_iterator jt=q.coord.begin(); 1488 typename std::vector< monomial<T> >::const_iterator jtend=q.coord.end(); 1489 for (;(it!=itend) && (jt!=jtend);){ 1490 if (Tis_one(pgcd)) 1491 break; 1492 pgcd=gcd(pgcd,Tnextcoeff<T>(it,itend)); 1493 pgcd=gcd(pgcd,Tnextcoeff<T>(jt,jtend)); 1494 } 1495 for (;(it!=itend);){ 1496 if (Tis_one(pgcd)) 1497 break; 1498 pgcd=gcd(pgcd,Tnextcoeff<T>(it,itend)); 1499 } 1500 for (;(jt!=jtend);){ 1501 if (Tis_one(pgcd)) 1502 break; 1503 pgcd=gcd(pgcd,Tnextcoeff<T>(jt,jtend)); 1504 } 1505 if (pgcd.coord.empty()){ 1506 index_m i; 1507 for (int j=0;j<p.dim;j++) 1508 i.push_back(0); 1509 pgcd.coord.push_back(monomial<T>(T(1),i)); 1510 } 1511 else 1512 pgcd=pgcd.untrunc1(); 1513 } 1514 1515 // collect all terms with same 1st Tpower as the 1st power of p 1516 template<class T> Tcarcomp(const tensor<T> & p)1517 tensor<T> Tcarcomp(const tensor<T> & p){ 1518 typename std::vector< monomial<T> >::const_iterator it=p.coord.begin(); 1519 int n=it->index.front(); 1520 tensor<T> res(p.dim); 1521 for (;n==it->index.front();++it) 1522 res.coord.push_back(monomial<T>(it->value,it->index)); 1523 return res; 1524 } 1525 1526 template<class T> Tfirstcoeff(const tensor<T> & p)1527 tensor<T> Tfirstcoeff(const tensor<T> & p){ 1528 typename std::vector< monomial<T> >::const_iterator it=p.coord.begin(); 1529 typename std::vector< monomial<T> >::const_iterator itend=p.coord.end(); 1530 if (it==itend) 1531 return p; 1532 int n=it->index.front(); 1533 tensor<T> res(p.dim); 1534 for (;(it!=itend) && (n==it->index.front());++it) 1535 res.coord.push_back(monomial<T>(it->value,it->index.set_first_zero())); 1536 return res; 1537 } 1538 1539 template<class T> Tswap(tensor<T> * & a,tensor<T> * & b)1540 void Tswap(tensor<T> * & a, tensor<T> * & b){ 1541 tensor<T> * c = a; 1542 a = b ; 1543 b = c ; 1544 } 1545 1546 // polynomial subresultant: compute content and primitive part of the GCD 1547 // with respect to the other vars 1548 // gcddeg!=0 if we know the probable degree of the gcd 1549 template<class T> Tcontentgcd(const tensor<T> & p,const tensor<T> & q,tensor<T> & cont,tensor<T> & prim,int gcddeg)1550 void Tcontentgcd(const tensor<T> &p, const tensor<T> & q, tensor<T> & cont,tensor<T> & prim,int gcddeg){ 1551 if (p.coord.empty()){ 1552 cont=Tlgcd(q); 1553 prim=q/Tlgcd(q); 1554 return ; 1555 } 1556 if (q.coord.empty()){ 1557 cont=Tlgcd(p); 1558 prim=p/Tlgcd(p); 1559 return; 1560 } 1561 assert(p.dim==q.dim); 1562 // set auxiliary polynomials g and h to 1 1563 tensor<T> g(T(1),p.dim); 1564 tensor<T> h(g); 1565 // dp and dq are the "content" of p and q w.r.t. other variables 1566 tensor<T> dp(Tlgcd(p)); 1567 tensor<T> dq(Tlgcd(q)); 1568 cont=gcd(dp.trunc1(),dq.trunc1()).untrunc1(); 1569 if (!p.dim){ 1570 prim=tensor<T>(T(1),0); 1571 return ; 1572 } 1573 // COUT << "Cont" << cont << '\n'; 1574 tensor<T> a(p.dim),b(p.dim),quo(p.dim),r(p.dim),tmp(p.dim); 1575 // a and b are the primitive part of p and q 1576 p.TDivRem1(dp,a,r,true); 1577 q.TDivRem1(dq,b,r,true); 1578 while ( 1579 !a.coord.empty() 1580 && !ctrl_c && !interrupted 1581 ){ 1582 int n=b.lexsorted_degree(); 1583 int m=a.lexsorted_degree(); 1584 if (!n) {// if b is constant (then b!=0), gcd=original Tlgcd 1585 prim=tensor<T>(T(1),p.dim); 1586 return ; 1587 } 1588 if (n==gcddeg){ 1589 b.TDivRem1(Tlgcd(b),prim,r,true); 1590 if (a.Texactquotient(prim,r,true)) 1591 return; 1592 } 1593 int ddeg=m-n; 1594 if (ddeg<0){ 1595 #if defined RTOS_THREADX || defined BESTA_OS || defined USTL 1596 tensor<T> t(a); a=b; b=t; 1597 #else 1598 swap(a,b); // exchange a<->b may occur only at the beginning 1599 #endif 1600 } 1601 else { 1602 tensor<T> b0(Tfirstcoeff(b)); 1603 a.TPseudoDivRem(b,quo,r,tmp); 1604 // (a*Tpow(b0,ddeg+1)).TDivRem1(b,quo,r); // division works always 1605 if (r.coord.empty()) 1606 break; 1607 // remainder is non 0, loop continue: a <- b 1608 a=b; 1609 tensor<T> temp(Tpow(h,ddeg)); 1610 // now divides r by g*h^(m-n), result is the new b 1611 r.TDivRem1(g*temp,b,quo); // quo is the remainder here, not used 1612 // new g=b0 and new h=b0^(m-n)*h/temp 1613 if (ddeg==1) // the normal case, remainder deg. decreases by 1 each time 1614 h=b0; 1615 else // not sure if it's better to keep temp or divide by h^(m-n+1) 1616 (Tpow(b0,ddeg)*h).TDivRem1(temp,h,quo); 1617 g=b0; 1618 } 1619 } 1620 // COUT << "Prim" << b << '\n'; 1621 b.TDivRem1(Tlgcd(b),prim,r,true); 1622 } 1623 1624 template<class T> Tsturm_seq(const giac::tensor<T> & p,giac::tensor<T> & cont,std::vector<tensor<T>> & sturm_seq)1625 void Tsturm_seq(const giac::tensor<T> &p, giac::tensor<T> & cont,std::vector< tensor<T> > & sturm_seq){ 1626 sturm_seq=std::vector< tensor<T> > (1,p); 1627 if (p.coord.empty()){ 1628 cont=p; 1629 return ; 1630 } 1631 // set auxiliary polynomials g and h to 1 1632 tensor<T> g(T(1),p.dim); 1633 tensor<T> h(g); 1634 // dp and dq are the "content" of p and q w.r.t. other variables 1635 cont=Tlgcd(p); 1636 if (!p.dim) 1637 return ; 1638 // COUT << "Cont" << cont << '\n'; 1639 tensor<T> a(p.dim),b(p.dim),quo(p.dim),r(p.dim),tmp(p.dim); 1640 tensor<T> b0(g); 1641 std::vector< tensor<T> > sign_error(2,g); 1642 // a is the primitive part of p and q 1643 p.TDivRem1(cont,a,r); 1644 b=p.derivative(); 1645 sturm_seq.push_back(b); 1646 for (int loop_counter=0;!a.coord.empty();++loop_counter){ 1647 int m=a.lexsorted_degree(); 1648 int n=b.lexsorted_degree(); 1649 int ddeg=m-n; // should be 1 generically 1650 if (!n) {// if b is constant (then b!=0), gcd=original Tlgcd 1651 return ; 1652 } 1653 b0=Tfirstcoeff(b); 1654 a.TPseudoDivRem(b,quo,r,tmp); 1655 // (a*Tpow(b0,ddeg+1)).TDivRem1(b,quo,r); // division works always 1656 if (r.coord.empty()) 1657 break; 1658 // remainder is non 0, loop continue: a <- b 1659 a=b; 1660 tensor<T> temp(Tpow(h,ddeg)); 1661 // now divides r by g*h^(m-n) and change sign, result is the new b 1662 r.TDivRem1(-g*temp,b,quo); // quo is the remainder here, not used 1663 // Now save b in the Sturm sequence *with sign adjustement* 1664 // Since we have -g*h^ddeg*r=b0^(ddeg+1)*a % previous b 1665 // instead of -r=a % previous b for correct sign 1666 // the sign error on b does not change the sign error on r 1667 // if ddeg is odd, the sign error on r is sign error on a*sign(g*h) 1668 // if ddeg is even, the signe error on r is sign error on a*sign(b0*g) 1669 // Note that if ddeg is odd and the previous ddeg was generic = 1 1670 // we do not change the sign error at all 1671 // Note that the sign errors propagate modulo 2 on the indices 1672 // of the Sturm sequence hence the vector of sign errors of length 2 1673 if ( ( (ddeg %2)!=0 ) && (h!=g) ) 1674 sign_error[loop_counter %2] = sign_error[loop_counter %2] * h * g ; 1675 if ( ( (ddeg %2)==0) && (b0!=g) ) 1676 sign_error[loop_counter %2] = sign_error[loop_counter % 2] * b0 * g; 1677 sturm_seq.push_back(b*sign_error[loop_counter %2]); 1678 // new g=b0 and new h=b0^(m-n)*h/temp 1679 if (ddeg==1) // the normal case, remainder deg. decreases by 1 each time 1680 h=b0; 1681 else // not sure if it's better to keep temp or divide by h^(m-n+1) 1682 (Tpow(b0,ddeg)*h).TDivRem1(temp,h,quo); 1683 g=b0; 1684 } // end while loop 1685 } 1686 1687 // polynomial sub-resultant: return the gcd only 1688 template<class T> 1689 tensor<T> Tgcdpsr(const tensor<T> & p, const tensor<T> & q,int gcddeg=0){ 1690 tensor<T> prim(p.dim),cont(p.dim); 1691 Tcontentgcd(p,q,prim,cont,gcddeg); 1692 // COUT << "Prim" << prim << "Cont" << cont << '\n'; 1693 return prim*cont; 1694 } 1695 1696 template<class T> Tresultant(const tensor<T> & p,const tensor<T> & q)1697 tensor<T> Tresultant(const tensor<T> &p, const tensor<T> & q){ 1698 assert(p.dim==q.dim); 1699 if (p.coord.empty()) 1700 return p; 1701 if (q.coord.empty()) 1702 return q; 1703 int m=p.lexsorted_degree(); 1704 int n=q.lexsorted_degree(); 1705 int sign=1; 1706 tensor<T> ptmp(p),qtmp(q); 1707 if (n > m) { 1708 if ( (n*m) % 2) 1709 sign=-1; 1710 int tmpint=n; n=m; m=tmpint; 1711 #if defined RTOS_THREADX || defined BESTA_OS || defined USTL 1712 ptmp=q; qtmp=p; // swap(ptmp,qtmp); 1713 #else 1714 swap(ptmp,qtmp); 1715 #endif 1716 } 1717 // degree(qtmp)=n <= degree(ptmp)=m 1718 if (!n) // q is cst 1719 return pow(qtmp,m)*T(sign); 1720 int ddeg; 1721 T cp(ppz(ptmp)),cq(ppz(qtmp)); 1722 T res(pow(cq,m)*pow(cp,n)); 1723 tensor<T> g(T(1),p.dim), h(T(1),p.dim); 1724 while (n){ 1725 if (m*n %2) 1726 sign=-sign; 1727 ddeg=m-n; 1728 if (debug_infolevel) 1729 CERR << CLOCK() << "Tresultant n,m,ddeg: " << n << " ," << m << " ," << ddeg << '\n'; 1730 tensor<T> tmp1(Tfirstcoeff(qtmp)),tmp2(p.dim),tmp3(pow(h,ddeg)),rem(p.dim),a(p.dim); 1731 ptmp.TPseudoDivRem(qtmp,tmp2,rem,a); // (ptmp*pow(tmp1,ddeg+1)).TDivRem1(qtmp,tmp2,rem,false); 1732 rem.high_order_degree_truncate(n); 1733 ptmp=qtmp; 1734 m=n; 1735 qtmp=(rem/g)/tmp3; // qtmp=rem/(g*tmp3); 1736 n=qtmp.lexsorted_degree(); 1737 if (ddeg==1) 1738 h=tmp1; 1739 else 1740 h=(h*pow(tmp1,ddeg))/tmp3; 1741 g=tmp1; 1742 } 1743 // q is cst or zero 1744 if (qtmp.coord.empty()) 1745 return tensor<T>(T(0),p.dim); 1746 m=ptmp.lexsorted_degree(); 1747 return (pow(qtmp,m)/pow(h,m-1))*(res*T(sign)); 1748 } 1749 1750 // B�zout identity 1751 // given p and q, find u and v s.t. u*p+v*q=d where d=gcd(p,q) using PSR algo 1752 // Iterative algorithm to find u and d, then q=(d-u*p)/v 1753 template<class T> Tegcdpsr(const tensor<T> & p1,const tensor<T> & p2,tensor<T> & u,tensor<T> & v,tensor<T> & d)1754 void Tegcdpsr(const tensor<T> &p1, const tensor<T> & p2, tensor<T> & u,tensor<T> & v,tensor<T> & d){ 1755 assert(p1.dim==p2.dim); 1756 // set auxiliary polynomials g and h to 1 1757 tensor<T> g(T(1),p1.dim); 1758 tensor<T> h(g); 1759 tensor<T> a(p1.dim),b(p1.dim),q(p1.dim),r(p1.dim); 1760 const tensor<T> & cp1=Tlgcd(p1); 1761 const tensor<T> & cp2=Tlgcd(p2); 1762 bool Tswapped=false; 1763 if (p1.lexsorted_degree()<p2.lexsorted_degree()) 1764 Tswapped=true; 1765 // initializes a and b to p1, p2 1766 const tensor<T> & pp1=Tis_one(cp1)?p1:p1/cp1; 1767 const tensor<T> & pp2=Tis_one(cp2)?p2:p2/cp2; 1768 if (Tswapped){ 1769 a=pp2; 1770 b=pp1; 1771 } 1772 else { 1773 a=pp1; 1774 b=pp2; 1775 } 1776 // initializes ua to 1 and ub to 0, the coeff of u in ua*a+va*b=a 1777 tensor<T> ua(T(1),p1.dim), ub(p1.dim),ur(p1.dim); 1778 tensor<T> b0pow(p1.dim); 1779 // loop: ddeg <- deg(a)-deg(b), 1780 // TDivRem: b0^(ddeg+1)*a = bq+r 1781 // hence ur <- ua*b0^(ddeg+1)-q*ub verifies 1782 // ur*a+vr*b=r 1783 // a <- b, b <- r/(g*h^ddeg), ua <- ub and ub<- ur/(g*h^ddeg) 1784 // g <- b0, h <- b0^(m-n) * h / h^ddeg 1785 for (;;){ 1786 int n=b.lexsorted_degree(); 1787 int m=a.lexsorted_degree(); 1788 if (!n){ // b is cst !=0 hence is the gcd, ub is valid 1789 break; 1790 } 1791 int ddeg=m-n; 1792 const tensor<T> & b0=Tfirstcoeff(b); 1793 // b0pow=Tpow(b0,ddeg+1); 1794 // (a*b0pow).TDivRem1(b,q,r); // division works always 1795 a.TPseudoDivRem(b,q,r,b0pow); 1796 // if r is 0 then b is the gcd and ub the coeff 1797 if (r.coord.empty()) 1798 break; 1799 // COUT << ua*b0pow << '\n' << q*ub << '\n' ; 1800 (ua*b0pow).TSub(q*ub,ur); // ur=ua*b0pow-q*ub; 1801 // COUT << ur << '\n'; 1802 #if defined RTOS_THREADX || defined BESTA_OS || defined USTL 1803 a=b; 1804 #else 1805 swap(a,b); // a=b 1806 #endif 1807 const tensor<T> & temp=Tpow(h,ddeg); 1808 // now divides r by g*h^(m-n), result is the new b 1809 r.TDivRem1(g*temp,b,q); // q is not used anymore 1810 #if defined RTOS_THREADX || defined BESTA_OS || defined USTL 1811 ua=ub; 1812 #else 1813 swap(ua,ub); // ua=ub 1814 #endif 1815 ur.TDivRem1(g*temp,ub,q); 1816 // COUT << (b-ub*p1) << "/" << p2 << '\n'; 1817 // new g=b0 and new h=b0^(m-n)*h/temp 1818 if (ddeg==1) // the normal case, remainder deg. decreases by 1 each time 1819 h=b0; 1820 else // not sure if it's better to keep temp or divide by h^(m-n+1) 1821 (Tpow(b0,ddeg)*h).TDivRem1(temp,h,q); 1822 g=b0; 1823 } 1824 // ub is valid and b is the gcd, vb=(b-ub*p1)/p2 if not Tswapped 1825 // vb is stored in ua 1826 // COUT << ub << '\n'; 1827 if (Tswapped){ 1828 (b-ub*pp2).TDivRem1(pp1,ua,r); 1829 ua *= cp2; // ua=ua*cp2; 1830 ub *= cp1; // ub=ub*cp1; 1831 b *= cp1; b *= cp2; // b=b*cp1*cp2; 1832 } 1833 else { 1834 (b-ub*pp1).TDivRem1(pp2,ua,r); 1835 ua *= cp1; // ua=ua*cp1; 1836 ub *= cp2; // ub=ub*cp2; 1837 b *= cp1; b *= cp2; // b=b*cp1*cp2; 1838 } 1839 // final simplifications 1840 q.coord.clear(); 1841 Tlgcd(b,q); // q=Tlgcd(b); 1842 Tlgcd(ua,q); 1843 Tlgcd(ub,q); 1844 b.TDivRem1(q,d,r,true); // d=b/Tlgcd 1845 if (Tswapped){ 1846 ub.TDivRem1(q,v,r,true); // v=ub/Tlgcd 1847 ua.TDivRem1(q,u,r,true); // u=ua/Tlgcd 1848 } 1849 else { 1850 ub.TDivRem1(q,u,r,true); // u=ub/Tlgcd 1851 ua.TDivRem1(q,v,r,true); // v=ua/Tlgcd 1852 } 1853 } 1854 1855 // seems interesting in dimension 1 only 1856 template<class T> TegcdTlgcd(const tensor<T> & p1,const tensor<T> & p2,tensor<T> & u,tensor<T> & v,tensor<T> & d)1857 void TegcdTlgcd(const tensor<T> &p1, const tensor<T> & p2, tensor<T> & u,tensor<T> & v,tensor<T> & d){ 1858 assert(p1.dim==p2.dim); 1859 // p1=cp1*a p2=cp2*b 1860 // a*u+b*v=d 1861 // hence multiplying by cp1*cp2 p1*(u*cp2)+p2*(v*cp1)=d*cp1*cp2 1862 tensor<T> cp1(Tlgcd(p1)), cp2(Tlgcd(p2)),pp1(p1/cp1),pp2(p2/cp2); 1863 tensor<T> a(p1.dim),b(p1.dim),q(p1.dim),r(p1.dim); 1864 bool Tswapped=false; 1865 if (p1.lexsorted_degree()<p2.lexsorted_degree()) 1866 Tswapped=true; 1867 // initializes a and b to p1, p2 1868 if (Tswapped){ 1869 a=pp2; 1870 b=pp1; 1871 } 1872 else { 1873 a=pp1; 1874 b=pp2; 1875 } 1876 // initializes ua to 1 and ub to 0, the coeff of u in ua*a+va*b=a 1877 tensor<T> ua(T(1),p1.dim), ub(p1.dim),ur(p1.dim); 1878 // loop: ddeg <- deg(a)-deg(b), 1879 // TDivRem: b0^(ddeg+1)*a = bq+r 1880 // hence ur <- ua*b0^(ddeg+1)-q*ub verifies 1881 // ur*a+vr*b=r 1882 // divide r and ur by their common Tlgcd 1883 // a <- b, b <- r/Tlgcd, ua <- ub and ub<- ur/Tlgcd 1884 for (;;){ 1885 int n=b.lexsorted_degree(); 1886 // int m=a.lexsorted_degree(); 1887 if (!n){ // b is cst !=0 hence is the gcd, ub is valid 1888 break; 1889 } 1890 // int ddeg=m-n; 1891 tensor<T> b0(Tfirstcoeff(b)),b0pow(b0.dim); 1892 // b0pow=Tpow(b0,ddeg+1); 1893 // (a*b0pow).TDivRem1(b,q,r,true); // division works always 1894 a.TPseudoDivRem(b,q,r,b0pow); 1895 // if r is 0 then b is the gcd and ub the coeff 1896 if (r.coord.empty()) 1897 break; 1898 ur=ua*b0pow-q*ub; 1899 a=b; 1900 tensor<T> pgcd(Tlgcd(ur)); 1901 Tlgcd(r,pgcd); 1902 // now divides r by pgcd, result is the new b 1903 r.TDivRem1(pgcd,b,q,true); // q is not used anymore 1904 ua=ub; 1905 ur.TDivRem1(pgcd,ub,q,true); 1906 } 1907 // ub is valid and b is the gcd, vb=(b-ub*p1)/p2 if not Tswapped 1908 // vb is stored in ua 1909 // COUT << ub << '\n'; 1910 if (Tswapped){ 1911 q=b-ub*pp2; 1912 tensor<T> b0(Tfirstcoeff(pp1)); 1913 int m=q.lexsorted_degree(); 1914 int n=pp1.lexsorted_degree(); 1915 tensor<T> b0pow(Tpow(b0,m-n+1)); 1916 b=b*b0pow; 1917 ub=ub*b0pow; 1918 q=q*b0pow; 1919 q.TDivRem1(pp1,ua,r,true); 1920 ua=ua*cp2; 1921 ub=ub*cp1; 1922 b=b*cp1*cp2; 1923 } 1924 else { 1925 // first multiply b and ub by p1_max_deg^# before doing division 1926 q=b-ub*pp1; 1927 tensor<T> b0(Tfirstcoeff(pp2)); 1928 int m=q.lexsorted_degree(); 1929 int n=pp2.lexsorted_degree(); 1930 tensor<T> b0pow(Tpow(b0,m-n+1)); 1931 b=b*b0pow; 1932 ub=ub*b0pow; 1933 q=q*b0pow; 1934 q.TDivRem1(pp2,ua,r,true); 1935 ua=ua*cp1; 1936 ub=ub*cp2; 1937 b=b*cp1*cp2; 1938 } 1939 // final simplifications 1940 q=Tlgcd(ua); 1941 Tlgcd(ub,q); 1942 Tlgcd(b,q); 1943 b.TDivRem1(q,d,r,true); // d=b/Tlgcd 1944 if (Tswapped){ 1945 ua.TDivRem1(q,u,r,true); // u=ua/Tlgcd 1946 ub.TDivRem1(q,v,r,true); // v=ub/Tlgcd 1947 } 1948 else { 1949 ub.TDivRem1(q,u,r,true); // u=ub/Tlgcd 1950 ua.TDivRem1(q,v,r,true); // v=ua/Tlgcd 1951 } 1952 } 1953 1954 // utility for B�zout identity solving 1955 template<class T> Tegcdtoabcuv(const tensor<T> & a,const tensor<T> & b,const tensor<T> & c,tensor<T> & u,tensor<T> & v,tensor<T> & d,tensor<T> & C)1956 void Tegcdtoabcuv(const tensor<T> & a,const tensor<T> &b, const tensor<T> &c, tensor<T> &u,tensor<T> &v, tensor<T> & d, tensor<T> & C){ 1957 tensor<T> d0(Tfirstcoeff(d)); 1958 int m=c.lexsorted_degree(); 1959 int n=d.lexsorted_degree(); 1960 assert(m>=n); // degree of c must be greater than degree of d 1961 // IMPROVE if n==0 1962 C=Tpow(d0,m-n+1); 1963 tensor<T> coverd(a.dim),temp(a.dim); 1964 (c*C).TDivRem1(d,coverd,temp); 1965 assert(temp.coord.empty()); // division of c by d must be exact 1966 // now multiply a*u+b*v=d by coverd -> a*u*coverd+b*v*coverd=c*d0pow 1967 u *= coverd; // u=u*coverd; 1968 v *= coverd; // v=v*coverd; 1969 m=u.lexsorted_degree(); 1970 n=b.lexsorted_degree(); 1971 if (m<n) 1972 return; 1973 // then reduces the degree of u, a*u+b*v=c*C 1974 d0=Tpow(Tfirstcoeff(b),m-n+1); 1975 C *= d0; // C=C*d0; 1976 // now a*u*d0+b*v*d0=c*C 1977 (u*d0).TDivRem1(b,temp,u); // replace u*d0 -> temp*b+u 1978 // a*b + b*(a*temp+v*d0) = c*C 1979 v=a*temp+v*d0; 1980 return ; 1981 } 1982 1983 // given a,b,c, find u, v and a cst C s.t. C*c=a*u+b*v and degree(u)<degree(b) 1984 // requires an egcd implementation, for example egcdpsr above 1985 template<class T> Tabcuv(const tensor<T> & a,const tensor<T> & b,const tensor<T> & c,tensor<T> & u,tensor<T> & v,tensor<T> & C)1986 void Tabcuv(const tensor<T> & a,const tensor<T> &b, const tensor<T> &c, tensor<T> &u,tensor<T> &v, tensor<T> & C){ 1987 tensor<T> d(a.dim); 1988 Tegcdpsr(a,b,u,v,d); // a*u+b*v=d 1989 Tegcdtoabcuv(a,b,c,u,v,d,C); 1990 } 1991 1992 template<class T> derivative()1993 tensor<T> tensor<T>::derivative() const{ 1994 if (coord.empty()) 1995 return *this; 1996 tensor<T> res(dim); 1997 if (dim==0) 1998 return res; 1999 res.coord.reserve(coord.size()); 2000 typename std::vector< monomial<T> >::const_iterator itend=coord.end(); 2001 T tmp; 2002 for (typename std::vector< monomial<T> >::const_iterator it=coord.begin();it!=itend;++it){ 2003 index_t i= it->index.iref() ; 2004 T n(i.front()); 2005 i[0]--; 2006 tmp = it->value*n; 2007 if (!is_zero(tmp)) 2008 res.coord.push_back(monomial<T>(tmp,i)); 2009 } 2010 return res; 2011 } 2012 2013 template<class T> integrate()2014 tensor<T> tensor<T>::integrate() const{ 2015 if (coord.empty()) 2016 return *this; 2017 tensor<T> res(dim); 2018 res.coord.reserve(coord.size()); 2019 typename std::vector< monomial<T> >::const_iterator itend=coord.end(); 2020 for (typename std::vector< monomial<T> >::const_iterator it=coord.begin();it!=itend;++it){ 2021 index_t i = it->index.iref(); 2022 T n(i.front()+1); 2023 i[0]++; 2024 if (!is_zero(n)) 2025 res.coord.push_back(monomial<T>(rdiv(it->value,n),i)); 2026 } 2027 return res; 2028 } 2029 2030 template<class T> Tfracadd(const tensor<T> & n1,const tensor<T> & d1,const tensor<T> & n2,const tensor<T> & d2,tensor<T> & num,tensor<T> & den)2031 void Tfracadd(const tensor<T> & n1, const tensor<T> & d1,const tensor<T> & n2, const tensor<T> & d2, tensor<T> & num, tensor<T> & den){ 2032 // COUT << n1 << "/" << d1 << "+" << n2 << "/" << d2 << "="; 2033 if (Tis_one(d1)){ 2034 n2.TAdd(n1*d2,num); // num=n1*d2+n2; 2035 den=d2; 2036 // COUT << num << "/" << den << '\n'; 2037 return; 2038 } 2039 if (Tis_one(d2)){ 2040 n1.TAdd(n2*d1,num); // num=n2*d1+n1; 2041 den=d1; 2042 // COUT << num << "/" << den << '\n'; 2043 return; 2044 } 2045 // n1/d1+n2/d2 with g=gcd(d1,d2), d1=d1g*g, d2=d2g*g is 2046 // (n1*d2g+n2*d1g)/g * 1/(d1g*d2g) 2047 tensor<T> d1g(d1),d2g(d2); 2048 den=simplify(d1g,d2g); 2049 (n1*d2g).TAdd(n2*d1g,num); 2050 simplify(num,den); 2051 den=den*d1g*d2g; 2052 2053 /* 2054 (n1*d2).TAdd(n2*d1,num); // num=n1*d2+n2*d1; 2055 den=d1*d2; 2056 simplify(num,den); 2057 */ 2058 } 2059 2060 template <class T> Tfracmul(const tensor<T> & n1,const tensor<T> & d1,const tensor<T> & n2,const tensor<T> & d2,tensor<T> & num,tensor<T> & den)2061 void Tfracmul(const tensor<T> & n1, const tensor<T> & d1,const tensor<T> & n2, const tensor<T> & d2, tensor<T> & num, tensor<T> & den){ 2062 // COUT << n1 << "/" << d1 << "*" << n2 << "/" << d2 << "="; 2063 if (Tis_one(d1)){ 2064 num=n1; 2065 den=d2; 2066 simplify(num,den); 2067 num=num*n2; 2068 // COUT << num << "/" << den << '\n'; 2069 return; 2070 } 2071 if (Tis_one(d2)){ 2072 num=n2; 2073 den=d1; 2074 simplify(num,den); 2075 num=num*n1; 2076 // COUT << num << "/" << den << '\n'; 2077 return; 2078 } 2079 num=n1; 2080 den=d2; 2081 simplify(num,den); 2082 tensor<T> ntemp(n2),dtemp(d1); 2083 simplify(ntemp,dtemp); 2084 num=num*ntemp; 2085 den=den*dtemp; 2086 // COUT << num << "/" << den << '\n'; 2087 } 2088 2089 /* 2090 template<class T> 2091 std::ostream & operator << (std::ostream & os, const std::vector< facteur<T> > & v){ 2092 std::vector< facteur<T> >::const_iterator itend=v.end(); 2093 for (std::vector< facteur<T> >::const_iterator it=v.begin();;){ 2094 os << *it ; 2095 ++it; 2096 if (it==itend) 2097 break; 2098 else 2099 os << "*"; 2100 } 2101 return(os); 2102 } 2103 */ 2104 2105 template<class T> Tproduct(const std::vector<facteur<T>> & v)2106 T Tproduct(const std::vector< facteur<T> > & v){ 2107 assert (!v.empty()); 2108 typename std::vector< facteur<T> >::const_iterator it=v.begin(); 2109 typename std::vector< facteur<T> >::const_iterator itend=v.end(); 2110 T prod(Tpow(it->fact,it->mult)); 2111 ++it; 2112 for (;it!=itend;++it) 2113 prod *= it->mult==1?it->fact:Tpow(it->fact,it->mult); // prod=prod*Tpow(it->fact,it->mult); 2114 return prod; 2115 } 2116 2117 template<class T> Tproduct(typename std::vector<facteur<T>>::const_iterator it,typename std::vector<facteur<T>>::const_iterator itend)2118 T Tproduct(typename std::vector< facteur<T> >::const_iterator it, 2119 typename std::vector< facteur<T> >::const_iterator itend){ 2120 assert (it!=itend); 2121 T prod(Tpow(it->fact,it->mult)); 2122 ++it; 2123 for (;it!=itend;++it) 2124 prod *= it->mult==1?it->fact:Tpow(it->fact,it->mult); // prod=prod*Tpow(it->fact,it->mult); 2125 return prod; 2126 } 2127 2128 // square-free factorization of a polynomial 2129 // T must be a 0 characteristic field 2130 template<class T> Tsqff_char0(const tensor<T> & p)2131 std::vector< facteur< tensor<T> > > Tsqff_char0(const tensor<T> &p ){ 2132 tensor<T> y(p.derivative()),w(p); 2133 tensor<T> c(simplify(w,y)); 2134 // p=p_1*p_2^2*...*p_n^n, c=gcd(p,p')=p_2*...*p_n^(n-1), 2135 // w=p/c=pi_i p_i, y=p'/c=sum_{i>=1} ip_i'*pi_{j!=i} p_j 2136 y.TSub(w.derivative(),y); // y=y-w.derivative(); // y= sum_{i>=2} (i-1) p_i' * pi_{j!=i} p_j 2137 std::vector< facteur< tensor<T> > > v; 2138 int k=1; // multiplicity counter 2139 while(!y.coord.empty()){ 2140 // y=sum_{i>=k+1} (i-k) p_i' * pi_{j!=i, j>=k} p_j 2141 const tensor<T> & g=simplify(w,y); 2142 if (!Tis_one(g)) 2143 v.push_back(facteur< tensor<T> >(g,k)); 2144 // this push p_k, now w=pi_{i>=k+1} p_i and 2145 // y=sum_{i>=k+1} (i-k) p_i' * pi_{j!=i, j>=k+1} p_j 2146 y.TSub(w.derivative(),y); // y=y-w.derivative(); 2147 // y=sum_{i>=k+1} (i-(k+1)) p_i' * pi_{j!=i, j>=k+1} p_j 2148 k++; 2149 } 2150 if (!Tis_one(w)) 2151 v.push_back(facteur< tensor<T> >(w,k)); 2152 return std::vector< facteur< tensor<T> > > (v); 2153 } 2154 2155 // class for partial fraction decomposition 2156 template<class T> 2157 class pf{ 2158 public: 2159 tensor<T> num; 2160 tensor<T> fact; 2161 tensor<T> den; 2162 int mult; // den=cste*fact^mult pf()2163 pf(): num(),fact(),den(),mult(0) {} pf(const pf & a)2164 pf(const pf & a) : num(a.num), fact(a.fact), den(a.den), mult(a.mult) {} pf(const tensor<T> & n,const tensor<T> & d,const tensor<T> & f,int m)2165 pf(const tensor<T> &n, const tensor<T> & d, const tensor<T> & f,int m) : num(n), fact(f), den(d), mult(m) {}; 2166 }; 2167 2168 template<class T> 2169 std::ostream & operator << (std::ostream & os, const pf<T> & v){ 2170 os << v.num << "/" << v.den ; 2171 return os; 2172 } 2173 2174 #ifdef NSPIRE 2175 template<class T,class I> 2176 nio::ios_base<I> & operator << (nio::ios_base<I> & os, const std::vector< pf<T> > & v){ 2177 typename std::vector< pf<T> >::const_iterator itend=v.end(); 2178 for (typename std::vector< pf<T> >::const_iterator it=v.begin();;){ 2179 os << *it ; 2180 ++it; 2181 if (it==itend) 2182 break; 2183 else 2184 os << "+"; 2185 } 2186 return(os); 2187 } 2188 #else 2189 template<class T> 2190 std::ostream & operator << (std::ostream & os, const std::vector< pf<T> > & v){ 2191 typename std::vector< pf<T> >::const_iterator itend=v.end(); 2192 for (typename std::vector< pf<T> >::const_iterator it=v.begin();;){ 2193 os << *it ; 2194 ++it; 2195 if (it==itend) 2196 break; 2197 else 2198 os << "+"; 2199 } 2200 return(os); 2201 } 2202 #endif 2203 2204 template<class T> TsimplifybyTlgcd(tensor<T> & a,tensor<T> & b)2205 tensor<T> TsimplifybyTlgcd(tensor<T>& a,tensor<T> &b){ 2206 const tensor<T> & Tlgcdg=gcd(Tlgcd(a),Tlgcd(b)); 2207 if (!Tis_one(Tlgcdg)){ 2208 a=a/Tlgcdg; 2209 b=b/Tlgcdg; 2210 } 2211 return Tlgcdg; 2212 } 2213 2214 // utility for Tpartfrac, see below 2215 template<class T> Tpartfrac(const tensor<T> & num,const tensor<T> & den,const std::vector<facteur<tensor<T>>> & w,int n,int m,std::vector<pf<T>> & pfdecomp)2216 void Tpartfrac(const tensor<T> & num, const tensor<T> & den, /* const tensor<T> & dendiff ,*/const std::vector< facteur< tensor<T> > > & w , int n, int m, std::vector < pf <T> > & pfdecomp ){ 2217 if (m==n) 2218 return; 2219 if (m-n==1){ 2220 tensor<T> nums(num), dens(den); 2221 TsimplifybyTlgcd(nums,dens); 2222 pfdecomp.push_back(pf<T>(nums,dens,w[n].fact,w[n].mult)); 2223 return ; 2224 } 2225 typename std::vector< facteur< tensor<T> > >::const_iterator it=w.begin()+n; // &w[n]; 2226 typename std::vector< facteur< tensor<T> > >::const_iterator it_end=w.begin()+m; // &w[m]; 2227 /* 2228 // check if all factors of degree 1 and mult 1 2229 for (;it!=itend;++it){ 2230 if (it->mult!=1 || it->fact.lexsorted_degree()!=1) 2231 break; 2232 } 2233 if (it==itend){ 2234 // add rem(num,it->fact)/rem(dendiff,it->fact) / it->fact for all factors 2235 it=w.begin()+n; 2236 for (;it!=itend;++it){ 2237 2238 } 2239 } 2240 */ 2241 // split v in 2 parts, then apply recursively Tpartfrac on each part 2242 it=w.begin()+n; 2243 int p=(m+n)/2; 2244 typename std::vector< facteur< tensor<T> > >::const_iterator it_milieu=w.begin()+p; // &w[p]; 2245 const tensor<T> & fn=Tproduct< tensor<T> >(it,it_milieu); 2246 const tensor<T> & fm=Tproduct< tensor<T> >(it_milieu,it_end); 2247 // write C*num=u*fn+v*fm 2248 tensor<T> C(den.dim),u(den.dim),v(den.dim); 2249 Tabcuv(fn,fm,num,u,v,C); 2250 C=C*(den/(fn*fm)); 2251 // num/den= (u*fn+v*fm)/(C*fn*fm)=u/(C*fm)+v/(C*fn) 2252 Tpartfrac(v,C*fn,w,n,p,pfdecomp); 2253 Tpartfrac(u,C*fm,w,p,m,pfdecomp); 2254 } 2255 2256 // if num/den with den=cste*pi_i d_i^{mult_i} 2257 // Tpartfrac rewrites num/den as 2258 // sum_{i} pfi.num/pfi.den with pfi.den = cste* pfi.den^pfi.mult 2259 // num and den are assumed to be prime together 2260 template<class T> Tpartfrac(const tensor<T> & num,const tensor<T> & den,const std::vector<facteur<tensor<T>>> & v,std::vector<pf<T>> & pfdecomp,tensor<T> & ipnum,tensor<T> & ipden)2261 void Tpartfrac(const tensor<T> & num, const tensor<T> & den, const std::vector< facteur< tensor<T> > > & v , std::vector < pf <T> > & pfdecomp, tensor<T> & ipnum, tensor<T> & ipden ){ 2262 int n=int(v.size()); 2263 pfdecomp.reserve(n); 2264 // compute ip and call Tpartfrac 2265 tensor<T> rem(num.dim); 2266 num.TPseudoDivRem(den,ipnum,rem,ipden); 2267 // ipden*num=den*ipnum+rem hence num/den=ipnum/ipden+rem/(ipden*den) 2268 const tensor<T> & temp=ipden*den; 2269 // simplify(rem,temp); 2270 if (n==1) 2271 pfdecomp.push_back(pf<T>(rem,temp,v.front().fact,v.front().mult)); 2272 else 2273 Tpartfrac(rem,temp,/* temp.derivative() ,*/ v,0,n,pfdecomp); 2274 } 2275 2276 // reduction of a fraction with multiple poles to single poles by integration 2277 // by part, use the relation 2278 // ilaplace(P'/P^(k+1))=laplacevar/k*ilaplace(1/P^k) 2279 template<class T> Tlaplace_reduce_pf(const pf<T> & p_cst,tensor<T> & laplacevar)2280 pf<T> Tlaplace_reduce_pf(const pf<T> & p_cst, tensor<T> & laplacevar ){ 2281 pf<T> p(p_cst); 2282 assert(p.mult>0); 2283 if (p.mult==1) 2284 return p_cst; 2285 tensor<T> fprime=p.fact.derivative(); 2286 tensor<T> d(fprime.dim),C(fprime.dim),u(fprime.dim),v(fprime.dim); 2287 Tegcdpsr(p.fact,fprime,u,v,d); // f*u+f'*v=d 2288 tensor<T> usave(u),vsave(v); 2289 // int initial_mult=p.mult-1; 2290 while (p.mult>1){ 2291 Tegcdtoabcuv(p.fact,fprime,p.num,u,v,d,C); 2292 p.mult--; 2293 p.den=(p.den/p.fact)*C*T(p.mult); 2294 p.num=u*T(p.mult)+v.derivative()+v*laplacevar; 2295 if ( (p.mult % 5)==1) // simplify from time to time 2296 TsimplifybyTlgcd(p.num,p.den); 2297 if (p.mult==1) 2298 break; 2299 u=usave; 2300 v=vsave; 2301 } 2302 return pf<T>(p); 2303 } 2304 2305 // reduction of a fraction with multiple poles to single poles by integration 2306 // by part, the integrated part is added to intdecomp 2307 template<class T> Tintreduce_pf(const pf<T> & p_cst,std::vector<pf<T>> & intdecomp)2308 pf<T> Tintreduce_pf(const pf<T> & p_cst, std::vector< pf<T> > & intdecomp ){ 2309 assert(p_cst.mult>0); 2310 if (p_cst.mult==1) 2311 return p_cst; 2312 pf<T> p(p_cst); 2313 tensor<T> fprime=p.fact.derivative(); 2314 tensor<T> d(fprime.dim),u(fprime.dim),v(fprime.dim),C(fprime.dim); 2315 tensor<T> resnum(fprime.dim),resden(T(1),fprime.dim),numtemp(fprime.dim),dentemp(fprime.dim); 2316 Tegcdpsr(p.fact,fprime,u,v,d); // f*u+f'*v=d 2317 tensor<T> usave(u),vsave(v); 2318 int initial_mult=p.mult-1; 2319 while (p.mult>1){ 2320 Tegcdtoabcuv(p.fact,fprime,p.num,u,v,d,C); 2321 p.mult--; 2322 p.den=(p.den/p.fact)*C*T(p.mult); 2323 p.num=u*T(p.mult)+v.derivative(); 2324 u=-p.den; 2325 TsimplifybyTlgcd(u,v); 2326 Tfracadd<T>(resnum,resden,v,u,numtemp,dentemp); 2327 resnum=numtemp; 2328 resden=dentemp; 2329 if ( (p.mult % 5)==1) // simplify from time to time 2330 TsimplifybyTlgcd(p.num,p.den); 2331 if (p.mult==1) 2332 break; 2333 u=usave; 2334 v=vsave; 2335 } 2336 intdecomp.push_back(pf<T>(resnum,resden,p.fact,initial_mult)); 2337 return pf<T>(p); 2338 } 2339 2340 template<class T> makevector(const T & a,const T & b)2341 std::vector<T> makevector(const T & a, const T &b){ 2342 std::vector<T> v; 2343 v.push_back(a); 2344 v.push_back(b); 2345 return v; 2346 } 2347 2348 template<class T> makevector(const T & a,const T & b,const T & c)2349 std::vector<T> makevector(const T & a, const T &b,const T & c){ 2350 std::vector<T> v; 2351 v.push_back(a); 2352 v.push_back(b); 2353 v.push_back(c); 2354 return v; 2355 } 2356 2357 template<class T> makevector(const T & a,const T & b,const T & c,const T & d)2358 std::vector<T> makevector(const T & a, const T &b,const T & c,const T & d){ 2359 std::vector<T> v; 2360 v.push_back(a); 2361 v.push_back(b); 2362 v.push_back(c); 2363 v.push_back(d); 2364 return v; 2365 } 2366 2367 template<class T> merge(const std::vector<T> & a,const std::vector<T> & b)2368 std::vector<T> merge(const std::vector<T> & a, const std::vector<T> &b){ 2369 std::vector<T> v(a); 2370 int as=a.size(); 2371 int bs=b.size(); 2372 v.reserve(as+bs); 2373 typename std::vector<T>::const_iterator it=b.begin(),itend=b.end(); 2374 for (;it!=itend;++it) 2375 v.push_back(*it); 2376 return v; 2377 } 2378 2379 #ifndef NO_NAMESPACE_GIAC 2380 } // namespace giac 2381 #endif // ndef NO_NAMESPACE_GIAC 2382 2383 2384 #endif // ndef _GIAC_POLY_H 2385 2386