1 /* -------------------------------------------------------------------------- *
2 * Simbody(tm) *
3 * -------------------------------------------------------------------------- *
4 * This is part of the SimTK biosimulation toolkit originating from *
5 * Simbios, the NIH National Center for Physics-Based Simulation of *
6 * Biological Structures at Stanford, funded under the NIH Roadmap for *
7 * Medical Research, grant U54 GM072970. See https://simtk.org/home/simbody. *
8 * *
9 * Portions copyright (c) 2005-12 Stanford University and the Authors. *
10 * Authors: Michael Sherman *
11 * Contributors: Derived from IVM code written by Charles Schwieters *
12 * *
13 * Licensed under the Apache License, Version 2.0 (the "License"); you may *
14 * not use this file except in compliance with the License. You may obtain a *
15 * copy of the License at http://www.apache.org/licenses/LICENSE-2.0. *
16 * *
17 * Unless required by applicable law or agreed to in writing, software *
18 * distributed under the License is distributed on an "AS IS" BASIS, *
19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
20 * See the License for the specific language governing permissions and *
21 * limitations under the License. *
22 * -------------------------------------------------------------------------- */
23
24 namespace SimTK {
dummySymbolToAvoidWarningInLengthConstraints()25 void dummySymbolToAvoidWarningInLengthConstraints() {}
26 };
27
28 #ifdef NOTDEF
29
30
31 /**@file
32 *
33 * Module for bond-length constraints use a shake-like algorithm.
34 * Requires integrator to specify time derivatives to constrain.
35 *
36 * setup:
37 * 1) determine couplings
38 * 2) determine which time derivatives to be applied
39 *
40 * execution:
41 * 1) need all time derivs of theta passed in
42 * 2) iteratively apply constraints from lowest to highest time deriv
43 *
44 * Deal with loop bond-length constraints.
45 */
46
47 #include "simbody/internal/common.h"
48 using namespace SimTK;
49
50 #include "LengthConstraints.h"
51 #include "RigidBodyNode.h"
52 #include "SimbodyMatterSubsystemRep.h"
53
54 #include "newtonRaphson.h"
55
56 #include <iostream>
57 using std::ostream;
58 using std::cout;
59 using std::endl;
60 using std::cerr;
61 using std::setw;
62
63 #include <algorithm>
64
65 class LoopWNodes;
66 static int compareLevel(const LoopWNodes& l1, //forward declarations
67 const LoopWNodes& l2);
68
69 //static bool sameBranch(const RigidBodyNode* tip,
70 // const LoopWNodes& l );
71
72 class BadNodeDef {}; //exception
73
74
LoopWNodes(const SimbodyMatterSubsystemRep & t,const RBDistanceConstraint & dc)75 LoopWNodes::LoopWNodes(const SimbodyMatterSubsystemRep& t, const RBDistanceConstraint& dc)
76 : tree(&t), rbDistCons(&dc), flipStations(false), outmostCommonBody(0)
77 {
78 const RigidBodyNode* dcNode1 = &dc.getStation(1).getNode();
79 const RigidBodyNode* dcNode2 = &dc.getStation(2).getNode();
80
81 if (dcNode1==dcNode2) {
82 cout << "LoopWNodes::LoopWNodes: bad topology:\n\t"
83 << "loop stations " << dc.getStation(1)
84 << " and " << dc.getStation(2)
85 << " are now in the same node. Deleting loop.\n";
86 SimTK_THROW1(SimTK::Exception::LoopConstraintConstructionFailure, "bad topology");
87 }
88
89 // Ensure that tips(2) is on the body which is farther from Ground.
90 flipStations = (dcNode1->getLevel() > dcNode2->getLevel());
91
92 // OK to use tips() at this point.
93
94 // Collect up the node path from tips(2) down to the last node on its
95 // side of the loop which is at a higher level than tips(1) (may be none).
96 const RigidBodyNode* node1 = &tips(1).getNode();
97 const RigidBodyNode* node2 = &tips(2).getNode();
98 while ( node2->getLevel() > node1->getLevel() ) {
99 nodes[1].push_back(node2);
100 node2 = node2->getParent();
101 }
102
103 // We're at the same level on both sides of the loop. Run down both
104 // simultaneously until we hit the first common ancestor, collecting
105 // up the nodes along the two paths (but not the common ancestor).
106 while ( node1 != node2 ) {
107 if ( node1->isGroundNode() ) {
108 cerr << "LoopWNodes::LoopWNodes: could not find base node.\n\t"
109 << "loop between stations " << tips(1) << " and "
110 << tips(2) << "\n";
111 SimTK_THROW1(SimTK::Exception::LoopConstraintConstructionFailure,
112 "could not find base node");
113 }
114 nodes[0].push_back(node1);
115 nodes[1].push_back(node2);
116 node1 = node1->getParent();
117 node2 = node2->getParent();
118 }
119
120 outmostCommonBody = node1; // that's the common ancestor; might be Ground
121
122 // We want these in base-to-tip order.
123 std::reverse(nodes[0].begin(), nodes[0].end());
124 std::reverse(nodes[1].begin(), nodes[1].end());
125
126 // Make the list of ancestors, starting with the outmostCommonBody unless it
127 // is ground. Put in base-to-tip order.
128 const RigidBodyNode* next = outmostCommonBody;
129 while (next->getLevel() != 0) {
130 ancestors.push_back(next);
131 next = next->getParent();
132 }
133 std::reverse(ancestors.begin(), ancestors.end());
134 }
135
136
137 ostream&
operator <<(ostream & o,const LengthSet & s)138 operator<<(ostream& o, const LengthSet& s)
139 {
140 o << "LengthSet --------------->\n";
141 for (int i=0 ; i<(int)s.loops.size() ; i++) {
142 o << "Loop " << i << ":\n" << s.loops[i];
143 }
144 o << "\nNodemap: ";
145 for (int i=0; i<(int)s.nodeMap.size(); ++i)
146 o << " " << s.nodeMap[i]->getNodeNum()
147 << "[" << s.nodeMap[i]->getLevel() << "]";
148 o << "\n<-------- end of LengthSet\n";
149 return o;
150 }
151
152
153 ostream&
operator <<(ostream & o,const LoopWNodes & w)154 operator<<(ostream& o, const LoopWNodes& w)
155 {
156 o << "tip1=" << w.tips(1) << " tip2=" << w.tips(2)
157 << " distance=" << w.getDistance() << endl;
158 o << "nodes[0]:";
159 for (int i=0; i<(int)w.nodes[0].size(); ++i)
160 o << " " << w.nodes[0][i]->getNodeNum()
161 << "[" << w.nodes[0][i]->getLevel() << "]";
162 o << "\nnodes[1]:";
163 for (int i=0; i<(int)w.nodes[1].size(); ++i)
164 o << " " << w.nodes[1][i]->getNodeNum()
165 << "[" << w.nodes[1][i]->getLevel() << "]";
166 o << "\nOutmost common body: " << w.getOutmostCommonBody()->getNodeNum()
167 << "[" << w.getOutmostCommonBody()->getLevel() << "]";
168 o << "\nAncestors: ";
169 for (int i=0; i<(int)w.ancestors.size(); ++i)
170 o << " " << w.ancestors[i]->getNodeNum()
171 << "[" << w.ancestors[i]->getLevel() << "]";
172 o << endl;
173 return o;
174 }
175
LengthConstraints(const SimbodyMatterSubsystemRep & rbt,int vbose)176 LengthConstraints::LengthConstraints
177 (const SimbodyMatterSubsystemRep& rbt, int vbose)
178 : maxIters( 20 ), maxMin( 20 ),
179 rbTree(rbt), verbose(vbose), posMin("posMin", cout), velMin("velMin", cout)
180 {
181 posMin.maxIters = maxIters;
182 posMin.maxMin = maxMin;
183 velMin.maxIters = maxIters;
184 velMin.maxMin = maxMin;
185 }
186
187 //
188 // compare LoopWNodes by outmost common node level value
189 //
190 static int
compareLevel(const LoopWNodes & l1,const LoopWNodes & l2)191 compareLevel(const LoopWNodes& l1,
192 const LoopWNodes& l2)
193 {
194 if ( l1.getOutmostCommonBody()->getLevel() > l2.getOutmostCommonBody()->getLevel() )
195 return 1;
196 else if ( l1.getOutmostCommonBody()->getLevel() < l2.getOutmostCommonBody()->getLevel() )
197 return -1;
198 else
199 return 0;
200 }
201
202 // Define this operator so std::sort will work.
operator <(const LoopWNodes & l1,const LoopWNodes & l2)203 static inline bool operator<(const LoopWNodes& l1, const LoopWNodes& l2) {
204 return compareLevel(l1,l2) == -1;
205 }
206
207 //1) construct: given list of loops
208 // a) if appropriate issue warning and exit.
209 // b) sort by base node of each loop.
210 // c) find loops which intersect: combine loops and increment
211 // number of length constraints
212 //
213 // Sherm's constraint coupling hypothesis (060616):
214 //
215 // For a constraint i:
216 // Kinematic participants K[i]
217 // = the set of bodies whose mobilities can affect verr[i]
218 // Dynamic participants D[i]
219 // = the set of bodies whose effective inertias can change
220 // as a result of enforcement of constraint i
221 // TODO: A thought on dynamic coupling -- is this the same as computing
222 // effective joint forces? Then only mobilities that can feel a
223 // test constraint force can be considered dynamic participants of
224 // that constraint.
225 //
226 // Constraints i & j are directly kinematically coupled if
227 // K[i] intersect K[j] <> { }
228 // Constraints i & j are directly dynamically coupled if
229 // (a) they are directly kinematically coupled, or
230 // (b) D[i] intersect D[j] <> { }
231 // TODO: I *think* dynamic coupling is one-way, from outboard to inboard.
232 // That means it defines an evaluation *order*, where modified
233 // inertias from outboard loops become the articulated body
234 // inertias to use in the inboard loops, rather than requiring simultaneous
235 // solutions for the multipliers.
236 // Kinematic coupling, on the other hand, is symmetric and implies
237 // simultaneous solution.
238 //
239 // Compute transitive closure:
240 // Constraints i & j are kinematically coupled if
241 // (a) they are directly kinematically coupled, or
242 // (b) constraint i is kinematically coupled to any constraint
243 // k to which j is kinematically coupled
244 // That is, build kinematic clusters with completely disjoint constraints.
245 //
246 // For now, do the same thing with acceleration constraints but this
247 // is too strict. TODO
248 //
249 //
250 void
construct(const Array_<RBDistanceConstraint * > & iloops)251 LengthConstraints::construct(const Array_<RBDistanceConstraint*>& iloops)
252 {
253 //clean up
254 pvConstraints.resize(0);
255 accConstraints.resize(0);
256
257 // if ( !useLengthConstraint ) FIX:!!!
258 // //issue error message?
259 // return;
260
261 LoopList loops;
262 for (int i=0 ; i < (int)iloops.size() ; i++) {
263 try {
264 LoopWNodes loop(rbTree, *iloops[i] );
265 loops.push_back( loop );
266 }
267 catch ( BadNodeDef ) {}
268 }
269
270 //cout << "RAW LOOPS:\n";
271 //for (int i=0; i < (int)loops.size(); ++i)
272 // cout << "Loop " << i << ":\n " << loops[i] << endl;
273
274 // sort loops by outmostCommonBody->level
275 std::sort(loops.begin(), loops.end()); // uses "<" operator by default; see above
276 LoopList accLoops = loops; //version for acceleration
277
278 // Sherm 060616: transitive closure calculation. Remember that
279 // the constraints are sorted by the level of their outmost common body,
280 // starting at ground, so that more-outboard constraints cannot cause
281 // us to need to revisit an earlier cluster. However, constraints at
282 // the same level can bring together earlier ones at that level.
283 // TODO: currently just restarting completely when we add a
284 // constraint to a cluster.
285
286 for (int i=0 ; i<(int)loops.size() ; i++) {
287 pvConstraints.push_back(LengthSet(this));
288 pvConstraints.back().addKinematicConstraint(loops[i]);
289 bool addedAConstraint;
290 do {
291 addedAConstraint = false;
292 for (int j=i+1 ; j<(int)loops.size() ; j++)
293 if ( (loops[j].nodes[0].size()
294 && pvConstraints[i].contains(loops[j].nodes[0][0]))
295 || (loops[j].nodes[1].size()
296 && pvConstraints[i].contains(loops[j].nodes[1][0])))
297 {
298 //add constraint equation j to cluster i
299 pvConstraints[i].addKinematicConstraint(loops[j]);
300 loops.erase(loops.begin() + j); // STL for &loops[j]
301 addedAConstraint = true;
302 break;
303 }
304 } while (addedAConstraint);
305 }
306
307 for (int i=0 ; i<(int)accLoops.size() ; i++) {
308 accConstraints.push_back(LengthSet(this));
309 accConstraints.back().addDynamicConstraint(accLoops[i]);
310 bool addedAConstraint;
311 do {
312 addedAConstraint = false;
313 for (int j=i+1 ; j<(int)accLoops.size() ; j++)
314 if ( (accLoops[j].nodes[0].size()
315 && accConstraints[i].contains(accLoops[j].nodes[0][0]))
316 || (accLoops[j].nodes[1].size()
317 && accConstraints[i].contains(accLoops[j].nodes[1][0]))
318 || (accLoops[j].ancestors.size()
319 && accConstraints[i].contains(accLoops[j].ancestors[0])))
320 {
321 //add constraint equation j to cluster i
322 accConstraints[i].addDynamicConstraint(accLoops[j]);
323 accLoops.erase(accLoops.begin() + j); // STL for &accLoops[j]
324 addedAConstraint = true;
325 break;
326 }
327 } while (addedAConstraint);
328 }
329
330 if (false && pvConstraints.size()>0) {
331 cout << "LengthConstraints::construct: pos/vel length sets found:\n";
332 for (int i=0 ; i<(int)pvConstraints.size() ; i++)
333 cout << pvConstraints[i] << "\n";
334 cout << "LengthConstraints::construct: accel length sets found:\n";
335 for (int i=0 ; i<(int)accConstraints.size() ; i++)
336 cout << accConstraints[i] << "\n";
337 }
338 }
339
addKinematicConstraint(const LoopWNodes & loop)340 void LengthSet::addKinematicConstraint(const LoopWNodes& loop) {
341 loops.push_back( LoopWNodes(loop) );
342 for (int b=0 ; b<2 ; b++)
343 for (int i=0; i<(int)loop.nodes[b].size(); i++)
344 if (std::find(nodeMap.begin(),nodeMap.end(),loop.nodes[b][i])
345 ==nodeMap.end())
346 {
347 // not found
348 ndofThisSet += loop.nodes[b][i]->getDOF();
349 nodeMap.push_back( loop.nodes[b][i] );
350 }
351 }
352
addDynamicConstraint(const LoopWNodes & loop)353 void LengthSet::addDynamicConstraint(const LoopWNodes& loop) {
354 loops.push_back( LoopWNodes(loop) );
355 for (int b=0 ; b<2 ; b++)
356 for (int i=0; i<(int)loop.nodes[b].size(); i++)
357 if (std::find(nodeMap.begin(),nodeMap.end(),loop.nodes[b][i])
358 ==nodeMap.end())
359 {
360 // not found
361 ndofThisSet += loop.nodes[b][i]->getDOF();
362 nodeMap.push_back( loop.nodes[b][i] );
363 }
364
365 for (int i=0; i<(int)loop.ancestors.size(); i++)
366 if (std::find(nodeMap.begin(),nodeMap.end(),loop.ancestors[i])
367 ==nodeMap.end())
368 {
369 // not found
370 ndofThisSet += loop.ancestors[i]->getDOF();
371 nodeMap.push_back( loop.ancestors[i] );
372 }
373 }
374
contains(const RigidBodyNode * node)375 bool LengthSet::contains(const RigidBodyNode* node) {
376 return std::find(nodeMap.begin(),nodeMap.end(),node) != nodeMap.end();
377 }
378
379 class CalcPosB {
380 State& s;
381 const LengthSet* lengthSet;
382 public:
CalcPosB(State & ss,const LengthSet * lset)383 CalcPosB(State& ss, const LengthSet* lset)
384 : s(ss), lengthSet(lset) {}
operator ()(const Vector & pos) const385 Vector operator()(const Vector& pos) const
386 { return lengthSet->calcPosB(s,pos); }
387 };
388
389 class CalcPosZ {
390 const State& s;
391 const LengthSet* lengthSet;
392 public:
CalcPosZ(const State & ss,const LengthSet * constraint)393 CalcPosZ(const State& ss, const LengthSet* constraint)
394 : s(ss), lengthSet(constraint) {}
operator ()(const Vector & b) const395 Vector operator()(const Vector& b) const
396 { return lengthSet->calcPosZ(s, b); }
397 };
398
399 class CalcVelB {
400 State& s;
401 const LengthSet* lengthSet;
402 public:
CalcVelB(State & ss,const LengthSet * constraint)403 CalcVelB(State& ss, const LengthSet* constraint)
404 : s(ss), lengthSet(constraint) {}
operator ()(const Vector & vel)405 Vector operator()(const Vector& vel)
406 { return lengthSet->calcVelB(s,vel); }
407 };
408
409 //
410 // Calculate the position constraint violation (zero when constraint met).
411 //
412 Vector
calcPosB(State & s,const Vector & pos) const413 LengthSet::calcPosB(State& s, const Vector& pos) const
414 {
415 setPos(s, pos);
416
417 // Although we're not changing the qErr cache entry here, we access
418 // it with "upd" because setPos() will have modified the q's and
419 // thus invalidated stage Position, so a "get" would fail.
420 const Vector& qErr = getRBTree().updQErr(s);
421 Vector b((int)loops.size());
422 for (int i=0; i<(int)loops.size(); ++i)
423 b[i] = loops[i].rbDistCons->getPosErr(qErr);
424 return b;
425 }
426
427 //
428 // Calculate the velocity constraint violation (zero when constraint met).
429 //
430 Vector
calcVelB(State & s,const Vector & vel) const431 LengthSet::calcVelB(State& s, const Vector& vel) const
432 {
433 setVel(s, vel);
434
435 // Although we're not changing the uErr cache entry here, we access
436 // it with "upd" because setVel() will have modified the u's and
437 // thus invalidated stage Velocity, so a "get" would fail.
438 const Vector& uErr = getRBTree().updUErr(s);
439
440 Vector b((int)loops.size());
441 for (int i=0; i<(int)loops.size(); ++i)
442 b[i] = loops[i].rbDistCons->getVelErr(uErr);
443 return b;
444 }
445
446 //
447 // Given a vector containing violations of position constraints, calculate
448 // a state update which should drive those violations to zero (if they
449 // were linear).
450 //
451 // This is a little tricky since the gradient we have is actually the
452 // gradient of the *velocity* errors with respect to the generalized speeds,
453 // but we want the gradient of the *position* errors with respect to
454 // the generalized coordinates. The theory goes something like this:
455 //
456 // d perr d perr_dot d verr d u
457 // ------ = ---------- = ------ * -----
458 // d q d qdot d u d qdot
459 //
460 // Let P = d perr/dq, A = d verr/du, Q = d qdot/du.
461 //
462 // We want to find a change deltaq that will elimate the current error b:
463 // P deltaq = b. [WRONG:]Instead we solve A * x = b, where x = inv(Q) * deltaq,
464 // and then solve deltaq = Q * x. Conveniently Q is our friendly invertible
465 // relation between qdot's and u's: qdot = Q*u.
466 //
467 // TODO: Sherm 080101: the above is incorrect. We have to factor (PQ^-1) and
468 // solve (PQ^-1)*deltaq = b for LS deltaq, which is not the same as
469 // solving for LS x in P*x=b and then deltaq=Q*x.
470 //
471 // TODO: I have oversimplified the above since we are really looking for
472 // a least squares solution to an underdetermined system. With the
473 // pseudoinverse A+ available we can write x = A+ * b.
474 //
475 Vector
calcPosZ(const State & s,const Vector & b) const476 LengthSet::calcPosZ(const State& s, const Vector& b) const
477 {
478 const Matrix Gt = calcGrad(s);
479 const Vector x = calcPseudoInverseA(Gt) * b;
480
481 const SBStateDigest digest(s, getRBTree(), Stage::Position);
482
483 Vector zu(getRBTree().getTotalDOF(),0.);
484 Vector zq(getRBTree().getTotalQAlloc(),0.);
485
486 // map the vector dir back to the appropriate elements of z
487 int indx=0; // sherm 060222: I added this
488 for (int i=0 ; i<(int)nodeMap.size() ; i++) {
489 const int d = nodeMap[i]->getDOF();
490 const int offs = nodeMap[i]->getUIndex();
491 zu(offs,d) = x(indx,d);
492 indx += d;
493
494 // Make qdot = Q*u.
495 nodeMap[i]->calcQDot(digest,zu,zq); // change u's to qdot's
496 }
497 assert(indx == ndofThisSet);
498
499 return zq;
500 }
501
502 //
503 // Given a vector containing violations of velocity constraints, calculate
504 // a state update which would drive those violations to zero (if they
505 // are linear and well conditioned).
506 //
507 // This is simpler than CalcPosZ because we have the right gradient here (it's
508 // the same one in both places). TODO: and shouldn't be recalculated!
509 //
510 class CalcVelZ {
511 const State& s;
512 const LengthSet* lengthSet;
513 const Matrix Gt;
514 const Matrix GInverse;
515 public:
CalcVelZ(const State & ss,const LengthSet * lset)516 CalcVelZ(const State& ss, const LengthSet* lset)
517 : s(ss), lengthSet(lset), Gt(lset->calcGrad(s)),
518 GInverse(LengthSet::calcPseudoInverseA(Gt))
519 {
520 }
521
operator ()(const Vector & b)522 Vector operator()(const Vector& b) {
523 const Vector dir = GInverse * b;
524 Vector z(lengthSet->getRBTree().getTotalDOF(),0.0);
525
526 // map the vector dir back to the appropriate elements of z
527 int indx=0; // sherm 060222: I added this
528 for (int i=0 ; i<(int)lengthSet->nodeMap.size() ; i++) {
529 const RigidBodyNode* n = lengthSet->nodeMap[i];
530 const int d = n->getDOF();
531 const int offs = n->getUIndex();
532 z(offs,d) = dir(indx,d);
533 indx += d;
534 }
535 assert(indx == lengthSet->ndofThisSet);
536 return z;
537 }
538 };
539
540 // Project out the position constraint errors from the given state.
541 bool
enforcePositionConstraints(State & s,const Real & requiredTol,const Real & desiredTol) const542 LengthConstraints::enforcePositionConstraints(State& s, const Real& requiredTol, const Real& desiredTol) const
543 {
544 assert(rbTree.getStage(s) >= Stage::Position-1);
545 Vector& pos = rbTree.updQ(s);
546
547 bool anyChanges = false;
548
549 try {
550 for (int i=0 ; i<(int)pvConstraints.size() ; i++) {
551 anyChanges = true; // TODO: assuming for now
552 posMin.calc(requiredTol, desiredTol, pos,
553 CalcPosB(s, &pvConstraints[i]),
554 CalcPosZ(s, &pvConstraints[i]));
555 }
556 }
557 catch (const SimTK::Exception::NewtonRaphsonFailure& cptn ) {
558 cout << "LengthConstraints::enforcePositionConstraints: exception: "
559 << cptn.getMessage() << '\n';
560 }
561
562 return anyChanges;
563 }
564
565 // Project out the velocity constraint errors from the given state.
566 bool
enforceVelocityConstraints(State & s,const Real & requiredTol,const Real & desiredTol) const567 LengthConstraints::enforceVelocityConstraints(State& s, const Real& requiredTol, const Real& desiredTol) const
568 {
569 assert(rbTree.getStage(s) >= Stage(Stage::Velocity).prev());
570 Vector& vel = rbTree.updU(s);
571
572 bool anyChanges = false;
573
574 try {
575 for (int i=0 ; i<(int)pvConstraints.size() ; i++) {
576 anyChanges = true; // TODO: assuming for now
577 velMin.calc(requiredTol, desiredTol, vel,
578 CalcVelB(s, &pvConstraints[i]),
579 CalcVelZ(s, &pvConstraints[i]));
580 }
581 }
582 catch (const SimTK::Exception::NewtonRaphsonFailure& cptn ) {
583 cout << "LengthConstraints::enforceVelocityConstraints: exception: "
584 << cptn.getMessage() << '\n';
585 }
586
587 return anyChanges;
588 }
589
590 //
591 //// on each iteration
592 //// for each loop
593 //// -first calc all usual properties
594 //// -recursively compute phi_ni for each length constraint
595 //// from tip to outmost common body of
596 //// -compute gradient
597 //// -update theta using quasi-Newton-Raphson
598 //// -compute Cartesian coords
599 //// -check for convergence
600 //// -repeat
601 //
602
603
setPos(State & s,const Vector & pos) const604 void LengthSet::setPos(State& s, const Vector& pos) const
605 {
606 const SBStateDigest digest(s, getRBTree(), Stage::Position);
607 Vector& q = getRBTree().updQ(s);
608 SBPositionCache& pc = getRBTree().updPositionCache(s);
609 Vector& qErr = getRBTree().updQErr(s);
610
611 for (int i=0 ; i<(int)nodeMap.size() ; i++)
612 nodeMap[i]->copyQ(digest, pos, q);
613
614 // TODO: sherm this is the wrong place for the stage update!
615 s.invalidateAll(Stage::Position);
616
617 // sherm TODO: this now computes kinematics for the whole system,
618 // but it should only have to update the loop we are interested in.
619 // Schwieters had this right before because his equivalent of 'setQ'
620 // above also performed the kinematics, while ours just saves the
621 // new state variable values and calculates here:
622 getRBTree().realizeSubsystemPosition(s);
623
624 // TODO: This is redundant after realizePosition(), but I'm leaving
625 // it here because this is actually all that need be recalculated for
626 // the loop closure iterations.
627 for (int i=0; i<(int)loops.size(); ++i)
628 loops[i].calcPosInfo(qErr,pc);
629 }
630
631 // Must have called LengthSet::setPos() already.
setVel(State & s,const Vector & vel) const632 void LengthSet::setVel(State& s, const Vector& vel) const
633 {
634 const SBStateDigest digest(s, getRBTree(), Stage::Position);
635 const SBPositionCache& pc = getRBTree().getPositionCache(s);
636 Vector& u = getRBTree().updU(s);
637 SBVelocityCache& vc = getRBTree().updVelocityCache(s);
638 Vector& uErr = getRBTree().updUErr(s);
639
640 for (int i=0 ; i<(int)nodeMap.size() ; i++)
641 nodeMap[i]->copyU(digest, vel, u);
642
643 // TODO: sherm this is the wrong place for the stage update!
644 s.invalidateAll(Stage::Velocity);
645
646 getRBTree().realizeSubsystemVelocity(s);
647
648 // TODO: see comment above in setPos
649 for (int i=0; i<(int)loops.size(); ++i)
650 loops[i].calcVelInfo(pc,uErr,vc);
651 }
652
653 //
654 ////A = df / dtheta_i
655 //// = [ -(q_ni-qn0)x , 1 ] [ phi_na^T Ha , phi_nb^T Hb , ... ]^T
656 ////
657 //
658
659 // Calculate gradient by central difference for testing the analytic
660 // version. Presumes that calcEnergy has been called previously with current
661 // value of ipos.
662 void
fdgradf(State & s,const Vector & pos,Matrix & grad) const663 LengthSet::fdgradf(State& s,
664 const Vector& pos,
665 Matrix& grad) const
666 {
667 const SBModelVars& mv = getRBTree().getModelVars(s);
668
669 // Gradf gradf(tree);
670 // gradf(x,grad); return;
671 const double eps = 1e-6;
672 const CalcPosB calcB(s, this);
673 const Vector b = calcB(pos);
674 int grad_indx=0;
675 for (int i=0 ; i<(int)nodeMap.size() ; i++) {
676 int pos_indx=nodeMap[i]->getQIndex();
677 for (int j=0 ; j<nodeMap[i]->getNQInUse(mv) ; j++,pos_indx++,grad_indx++) {
678 Vector posp = pos;
679 posp(pos_indx) += eps;
680 const Vector bp = calcB(posp);
681 posp(pos_indx) -= 2.*eps;
682 const Vector bpm = calcB(posp);
683 for (int k=0 ; k<b.size() ; k++)
684 grad(grad_indx,k) = -(bp(k)-bpm(k)) / (2.*eps);
685 }
686 }
687 }
688
689 void
testGrad(State & s,const Vector & pos,const Matrix & grad) const690 LengthSet::testGrad(State& s, const Vector& pos, const Matrix& grad) const
691 {
692 double tol = 1e-4;
693
694 Matrix fdgrad(ndofThisSet,loops.size());
695 fdgradf(s,pos,fdgrad);
696
697 for (int i=0 ; i<grad.nrow() ; i++)
698 for (int j=0 ; j<grad.ncol() ; j++)
699 if (fabs(grad(i,j)-fdgrad(i,j)) > fabs(tol))
700 cout << "testGrad: error in gradient: "
701 << setw(2) << i << ' '
702 << setw(2) << j << ": "
703 << grad(i,j) << ' ' << fdgrad(i,j) << '\n';
704 cout.flush();
705 }
706
707 //
708 // unitVec(p+ - p-) * d (p+ - p-) / d (theta_i)
709 // d g / d theta for all hingenodes in nodemap
710 //
711 // sherm: this appears to calculate the transpose of G
712 //
713 // sherm 060222: OK, this routine doesn't really do what it says
714 // when the constraint includes a ball or free joint. It
715 // does not use the actual q's, which are quaternions. Instead
716 // it is something like d(v+ - v-)/du. If the u's are the
717 // angular velocity across the joint then this is
718 // d(p+ - p-)/d qbar where qbar is a 1-2-3 Euler sequence
719 // which is 0,0,0 at the current orientation. ("instant
720 // coordinates").
721 //
722 // sherm: 060303 This routine uses the joint transition matrices ~H,
723 // which can be thought of as Jacobians ~H = d V_PB_G / d uB, that is
724 // partial derivative of the cross-joint relative *spatial* velocity
725 // with respect to that joint's generalized speeds uB. This allows
726 // analytic computation of d verr / d u where verr is the set of
727 // distance constraint velocity errors for the current set of
728 // coupled loops, and u are the generalized speeds for all joints
729 // which contribute to any of those loops. Some times this is the
730 // gradient we want, but we also want to use this routine to
731 // calculate d perr / d q, which we can't get directly. But it
732 // is easily finagled into the right gradient; see CalcPosZ above.
733 // The trickiest part is that we use nice physical variables for
734 // generalized speeds, like angular velocities for ball joints,
735 // but we use awkward mathematical constructs like quaternions and
736 // Euler angles for q's.
737 //
738 Matrix
calcGrad(const State & s) const739 LengthSet::calcGrad(const State& s) const
740 {
741 // We're not updating, but need to use upd here because Position stage
742 // was invalidated by change to state.
743 const SBStateDigest digest(s, getRBTree(), Stage::Position);
744 const SBPositionCache& pc = getRBTree().updPositionCache(s);
745
746 Matrix grad(ndofThisSet,loops.size(),0.0);
747 const Mat33 one(1); //FIX: should be done once
748
749 for (int i=0 ; i<(int)loops.size() ; i++) {
750 const LoopWNodes& l = loops[i];
751 Array_<SpatialMat> phiT[2];
752 for (int b=0 ; b<2 ; b++) {
753 phiT[b].resize( l.nodes[b].size() );
754 if ( l.nodes[b].size() ) {
755 phiT[b][phiT[b].size()-1] = 1; // identity
756 for (int j=l.nodes[b].size()-2 ; j>=0 ; j-- ) {
757 const RigidBodyNode* n = l.nodes[b][j+1];
758 phiT[b][j] = phiT[b][j+1] * ~n->getPhi(pc);
759 }
760 }
761 }
762
763 // compute gradient
764 //Vec3 uBond = unitVec(l.tipPos(pc,2) - l.tipPos(pc,1));
765 const Vec3 uBond = l.tipPos(pc,2) - l.tipPos(pc,1); // p
766 Row<2,Mat33> J[2];
767 for (int b=1 ; b<=2 ; b++)
768 // TODO: get rid of this b-1; make tips 0-based
769 J[b-1] = Row<2,Mat33>(-crossMat(l.tipPos(pc,b) -
770 l.tips(b).getNode().getX_GB(pc).p()), one);
771 int g_indx=0;
772 for (int j=0 ; j<(int)nodeMap.size() ; j++) {
773 Real elem=0.0;
774
775 // We just want to get the index at which nodeMap[j] is found in the
776 // Array_ (or -1 if not found) but that's not so easy!
777 const Array_<const RigidBodyNode*>& n0 = l.nodes[0];
778 const Array_<const RigidBodyNode*>& n1 = l.nodes[1];
779 Array_<const RigidBodyNode*>::const_iterator found0 =
780 std::find(n0.begin(),n0.end(),nodeMap[j]);
781 Array_<const RigidBodyNode*>::const_iterator found1 =
782 std::find(n1.begin(),n1.end(),nodeMap[j]);
783
784 const int l1_indx = (found0==n0.end() ? -1 : found0-n0.begin());
785 const int l2_indx = (found1==n1.end() ? -1 : found1-n1.begin());
786
787 for (int k=0 ; k < nodeMap[j]->getDOF() ; k++) {
788 const SpatialVec& HtCol = ~nodeMap[j]->getHRow(digest, k);
789 if ( l1_indx >= 0 ) {
790 elem = -dot(uBond , Vec3(J[0] * phiT[0][l1_indx]*HtCol));
791 } else if ( l2_indx >= 0 ) {
792 elem = dot(uBond , Vec3(J[1] * phiT[1][l2_indx]*HtCol));
793 }
794 grad(g_indx++,i) = elem;
795 }
796 }
797 // added by sherm 060222:
798 assert(g_indx == ndofThisSet); // ??
799 }
800 return grad;
801 }
802
803 //
804 // Calculate generalized inverse which minimizes changes in soln vector.
805 // TODO (sherm) This is trying to create a pseudoinverse
806 // using normal equations which is numerically bad and can't deal with
807 // redundant constraints. Should use an SVD or (faster) QTZ factorization
808 // instead.
809 //
810 // sherm 060314:
811 // n
812 // --------------------
813 // | |
814 // A (mXn) = m | | rank(A) <= m.
815 // | |
816 // --------------------
817 //
818 // The nXm pseudo-inverse A+ of a matrix A is A+ = V S+ ~U, where A=U*S*~V
819 // is the singular value decomposition (SVD) of A, and S+ is the inverse
820 // of the diagonal matrix S with some zeroes thrown it at the end after
821 // we run out of rank(A). For an underdetermined system with full row
822 // rank, that is, assuming m < n and rank(A)=m, you can do a poor man's
823 // calculation of this as A+ = ~A*inv(A*~A). In the overdetermined
824 // case we have m > n and rank(A)=n, in which case A+ = inv(~A*A)*~A.
825 //
826 // We are given gradient G (nuXnc). We're assuming nu > nc and rank(G)=nc, i.e.,
827 // no redundant constraints (not a good assumption in general!). We would like
828 // to get a least squares solution to ~G x = b where x has dimension nu and
829 // b has dimension nc. So if we set A=~G we have the underdetermined system
830 // depicted above and want to return A+ = ~A*inv(A*~A) = G*inv(~G*G). Ergo ...
831 /*static*/ Matrix
calcPseudoInverseA(const Matrix & transposeOfA)832 LengthSet::calcPseudoInverseA(const Matrix& transposeOfA)
833 {
834 const Matrix A = ~transposeOfA; // now A is dg/dtheta
835 const int m = A.nrow(); // see picture above
836 const int n = A.ncol();
837
838 // Now calculate the pseudo inverse of A.
839 Matrix pinvA(n,m,0.);
840 if ( A.normSqr() > 1e-10 ) {
841 if (m < n)
842 pinvA = ~A * (A * ~A).invert(); // normal underdetermined case
843 else if (m > n)
844 pinvA = (A * ~A).invert() * ~A; // wow, that's a lot of constraints!
845 else
846 pinvA = A.invert(); // not likely
847 }
848 return pinvA;
849 }
850
851
852 //
853 // Given a vector in mobility space, project it along the motion constraints
854 // of this LengthSet, by removing its component normal to the constraint
855 // manifold. Here we want to obtain a least squares solution x to
856 // A x = A v, A=[ncXnu] is the constraint Jacobian.
857 // We solve with pseudo inverse (see calcPseudoInverseA):
858 // Least squares x = A+ * Av.
859 // That is the component of v which is normal to the constraint manifold.
860 // So we then set v = v - x and return.
861 //
862 // TODO: projections for integration errors should be done in the
863 // error norm. I'm not sure whether that has to be done in this routine
864 // or whether caller can scale on the way in and out.
865 //
866 void
projectUVecOntoMotionConstraints(const State & s,Vector & v)867 LengthSet::projectUVecOntoMotionConstraints(const State& s, Vector& v)
868 {
869 const Matrix transposeOfA = calcGrad(s);
870 const Matrix pinvA = calcPseudoInverseA(transposeOfA); // TODO: this should already be available
871
872 const Vector rhs = packedMatTransposeTimesVec(transposeOfA,v); // i.e., rhs = Av
873
874 //FIX: using inverse is inefficient
875 const Vector x = pinvA * rhs;
876
877 // subtract forces due to these constraints
878 subtractPackedVecFromVec(v, x);
879 }
880
881 Matrix
calcPseudoInverseAFD(const State & s) const882 LengthSet::calcPseudoInverseAFD(const State& s) const
883 {
884 Matrix grad(ndofThisSet,loops.size()); // <-- transpose of the actual dg/dtheta
885 State sTmp = s;
886 Vector pos = getRBTree().getQ(s); // a copy
887 fdgradf(sTmp,pos,grad);
888
889 const Matrix A = ~grad; // now A is dg/dtheta
890 const int m = A.nrow(); // see picture above
891 const int n = A.ncol();
892
893 // Now calculate the pseudo inverse of A.
894 Matrix pinvA(n,m,0.);
895 if ( A.normSqr() > 1e-10 ) {
896 if (m < n)
897 pinvA = ~A * (A * ~A).invert(); // normal underdetermined case
898 else if (m > n)
899 pinvA = (A * ~A).invert() * ~A; // wow, that's a lot of constraints!
900 else
901 pinvA = A.invert(); // not likely
902 }
903 return pinvA;
904 }
905
906 //acceleration:
907 // 0) after initial acceleration calculation:
908 // 1) calculate Y (block diagonal matrix) for all nodes (common body to tip)
909 // 2) for each LengthSet, calculate Lagrange multiplier(s) and add in
910 // resulting force- update accelerations
911 // Do this step from tip to base.
912 //
913
calcConstraintForces(const State & s,const Vector & udotErr,Vector & multipliers,SBAccelerationCache & ac) const914 bool LengthConstraints::calcConstraintForces(const State& s, const Vector& udotErr,
915 Vector& multipliers,
916 SBAccelerationCache& ac) const
917 {
918 if ( accConstraints.size() == 0 )
919 return false;
920
921 rbTree.calcY(s); // TODO <-- this doesn't belong here!
922
923 for (int i=accConstraints.size()-1 ; i>=0 ; i--)
924 accConstraints[i].calcConstraintForces(s,udotErr,multipliers,ac);
925
926 return true;
927 }
928
addInCorrectionForces(const State & s,const SBAccelerationCache & ac,SpatialVecList & spatialForces) const929 void LengthConstraints::addInCorrectionForces(const State& s, const SBAccelerationCache& ac,
930 SpatialVecList& spatialForces) const
931 {
932 for (int i=accConstraints.size()-1 ; i>=0 ; i--)
933 accConstraints[i].addInCorrectionForces(s, ac, spatialForces);
934 }
935
936 void
projectUVecOntoMotionConstraints(const State & s,Vector & vec)937 LengthConstraints::projectUVecOntoMotionConstraints(const State& s, Vector& vec)
938 {
939 if ( pvConstraints.size() == 0 )
940 return;
941
942 for (int i=pvConstraints.size()-1 ; i>=0 ; i--)
943 pvConstraints[i].projectUVecOntoMotionConstraints(s, vec);
944
945 for (int i=pvConstraints.size()-1 ; i>=0 ; i--)
946 pvConstraints[i].testProjectedVec(s, vec);
947 }
948
949 //
950 // Calculate the acceleration of atom assuming that the spatial acceleration
951 // of the body (node) it's on is available.
952 //
953 //static Vec3
954 //getAccel(const IVMAtom* a)
955 //{
956 // const RigidBodyNode* n = a->node;
957 // Vec3 ret( SubVec6(n->getSpatialAcc(),3,3).vector() );
958 // ret += cross( SubVec6(n->getSpatialAcc(),0,3).vector() , a->pos - n->getAtom(0)->pos );
959 // ret += cross( SubVec6(n->getSpatialVel(),0,3).vector() , a->vel - n->getAtom(0)->vel );
960 // return ret;
961 //}
962
963 // T -1 T
964 // Compute v1 *(J M J )_mn * v2 where the indices mn are given by
965 // the nodes associated with stations s1 & s2.
966 //
967 static Real
computeA(const SBPositionCache & cc,const SBDynamicsCache & dc,const Vec3 & v1,const LoopWNodes & loop1,int s1,const LoopWNodes & loop2,int s2,const Vec3 & v2)968 computeA(const SBPositionCache& cc,
969 const SBDynamicsCache& dc,
970 const Vec3& v1,
971 const LoopWNodes& loop1, int s1,
972 const LoopWNodes& loop2, int s2,
973 const Vec3& v2)
974 {
975 const RigidBodyNode* n1 = &loop1.tips(s1).getNode();
976 const RigidBodyNode* n2 = &loop2.tips(s2).getNode();
977
978 const Mat33 one(1);
979
980 SpatialRow t1 = ~v1 * Row<2,Mat33>(crossMat(n1->getX_GB(cc).p() - loop1.tipPos(cc,s1)), one);
981 SpatialVec t2 = Vec<2,Mat33>(crossMat(loop2.tipPos(cc,s2) - n2->getX_GB(cc).p()), one) * v2;
982
983 while ( n1->getLevel() > n2->getLevel() ) {
984 t1 = t1 * ~n1->getPsi(dc);
985 n1 = n1->getParent();
986 }
987
988 while ( n2->getLevel() > n1->getLevel() ) {
989 t2 = n2->getPsi(dc) * t2;
990 n2 = n2->getParent();
991 }
992
993 while ( n1 != n2 ) {
994 if (n1->isGroundNode() || n2->isGroundNode() ) {
995 t1 = 0.; //not in same branch (or same tree -- sherm)
996 t2 = 0.;
997 cout << "computeA: cycles wasted calculating missed branch: "
998 << loop1.tips(s1) << " <-> " << loop2.tips(s2) << '\n';
999 return 0.;
1000 }
1001 t1 = t1 * ~n1->getPsi(dc);
1002 t2 = n2->getPsi(dc) * t2;
1003 n1 = n1->getParent();
1004 n2 = n2->getParent();
1005 }
1006
1007 // here n1==n2
1008
1009 Real ret = t1 * n1->getY(dc) * t2;
1010 return ret;
1011 }
1012
1013 //
1014 // To be called for LengthSets consecutively from tip to base.
1015 // Given acceleration errors for each loop contained in this LengthSet,
1016 // this will calculate a force for every station, and store that force
1017 // in the runtime block associated with that loop in the AccelerationCache
1018 // output argument. It is up to the caller to do something with these forces.
1019 //
1020 // See Section 2.6 on p. 294 of Schwieters & Clore,
1021 // J. Magnetic Resonance 152:288-302. Equation reference below
1022 // are to that paper. (sherm)
1023 //
1024 void
calcConstraintForces(const State & s,const Vector & udotErr,Vector & multipliers,SBAccelerationCache & ac) const1025 LengthSet::calcConstraintForces(const State& s, const Vector& udotErr,
1026 Vector& multipliers, SBAccelerationCache& ac) const
1027 {
1028 const SBPositionCache& pc = getRBTree().getPositionCache(s);
1029 const SBVelocityCache& vc = getRBTree().getVelocityCache(s);
1030 const SBDynamicsCache& dc = getRBTree().getDynamicsCache(s);
1031
1032 // This is the acceleration error for each loop constraint in this
1033 // LengthSet. We get a single scalar error per loop, since each
1034 // contains one distance constraint.
1035 // See Eq. [53] and the last term of Eq. [66].
1036 Vector rhs(loops.size());
1037 for (int i=0; i<(int)loops.size(); ++i)
1038 rhs[i] = loops[i].rbDistCons->getAccErr(udotErr);
1039
1040 // Here A = Q*(J inv(M) J')*Q' where J is the kinematic Jacobian for
1041 // the constrained points and Q is the constraint Jacobian. See first
1042 // term of Eq. [66].
1043 Matrix A(loops.size(),loops.size(),0.);
1044 for (int i=0 ; i<(int)loops.size() ; i++) {
1045 const Vec3 v1 = loops[i].tipPos(pc,2) - loops[i].tipPos(pc,1);
1046 for (int bi=1 ; bi<=2 ; bi++)
1047 for (int bj=1 ; bj<=2 ; bj++) {
1048 for (int j=i ; j<(int)loops.size() ; j++) {
1049 const Vec3 v2 = loops[j].tipPos(pc,2) - loops[j].tipPos(pc,1);
1050 Real contrib = computeA(pc, dc, v1, loops[i], bi,
1051 loops[j], bj, v2);
1052 A(i,j) += contrib * (bi==bj ? 1 : -1);
1053 }
1054 }
1055 }
1056
1057 // (sherm) Ouch -- this part is very crude. If this is known to be well
1058 // conditioned it should be factored with a symmetric method
1059 // like Cholesky, otherwise use an SVD (or better, complete orthogonal
1060 // factorization QTZ) to get appropriate least squares solution.
1061
1062 for (int i=0 ; i<(int)loops.size() ; i++) //fill lower triangle
1063 for (int j=0 ; j<i ; j++)
1064 A(i,j) = A(j,i);
1065
1066 //cout << "Solve A lambda=rhs; A=" << A;
1067 //cout << " rhs = " << rhs << endl;
1068
1069 //FIX: using inverse is inefficient
1070 const Vector multipliersForThisSet = A.invert() * rhs;
1071
1072 //TODO: need to copy out the right multipliers to the full multipliers array
1073
1074 cout << " OLD lambda = " << multipliersForThisSet << endl;
1075
1076 // add forces due to these constraints
1077 for (int i=0 ; i<(int)loops.size() ; i++) {
1078 const Vec3 frc = multipliersForThisSet[i] * (loops[i].tipPos(pc,2) - loops[i].tipPos(pc,1));
1079 loops[i].setTipForce(ac, 2, -frc);
1080 loops[i].setTipForce(ac, 1, frc);
1081 }
1082 }
1083
addInCorrectionForces(const State & s,const SBAccelerationCache & ac,SpatialVecList & spatialForces) const1084 void LengthSet::addInCorrectionForces(const State& s, const SBAccelerationCache& ac,
1085 SpatialVecList& spatialForces) const
1086 {
1087 const SBPositionCache& pc = getRBTree().getPositionCache(s);
1088
1089 for (int i=0; i<(int)loops.size(); ++i) {
1090 for (int t=1; t<=2; ++t) {
1091 const RigidBodyNode& node = loops[i].tipNode(t);
1092 const Vec3 force = loops[i].tipForce(ac,t);
1093 const Vec3 moment = cross(loops[i].tipPos(pc,t) - node.getX_GB(pc).p(), force);
1094 spatialForces[node.getNodeNum()] += SpatialVec(moment, force);
1095 }
1096 }
1097 }
1098
testAccel(const State & s) const1099 void LengthSet::testAccel(const State& s) const
1100 {
1101 const Vector& udotErr = getRBTree().getUDotErr(s);
1102
1103 double testTol=1e-8;
1104 for (int i=0 ; i<(int)loops.size() ; i++) {
1105 const Real aerr = fabs(loops[i].rbDistCons->getAccErr(udotErr));
1106 if (aerr > testTol)
1107 cout << "LengthSet::testAccel: constraint condition between atoms "
1108 << loops[i].tips(1) << " and " << loops[i].tips(2) << " violated.\n"
1109 << "\tnorm of violation: " << aerr << '\n';
1110 }
1111 cout.flush();
1112 }
1113
1114
1115 // This just computes ~mat*vec but first plucks out the relevant entries
1116 // in vec to squash it down to the same size as mat.
1117 Vector
packedMatTransposeTimesVec(const Matrix & packedMat,const Vector & vec)1118 LengthSet::packedMatTransposeTimesVec(const Matrix& packedMat, const Vector& vec)
1119 {
1120 // sherm 060222:
1121 assert(vec.size() == getRBTree().getTotalDOF());
1122 assert(packedMat.nrow() == ndofThisSet && packedMat.ncol() == ndofThisSet);
1123
1124 Vector packedVec(ndofThisSet); //build vector same size as mat
1125 int indx=0;
1126 for (int j=0 ; j<(int)nodeMap.size() ; j++) {
1127 const int d = nodeMap[j]->getDOF();
1128 const int offs = nodeMap[j]->getUIndex();
1129 packedVec(indx,d) = vec(offs,d);
1130 indx += d;
1131 }
1132 // sherm 060222:
1133 assert(indx == ndofThisSet);
1134
1135 return ~packedMat * packedVec;
1136 }
1137
1138 // vec is a full vector in mobility space; packedVec consists of just
1139 // the mobilities used in this LengthSet.
1140 void
subtractPackedVecFromVec(Vector & vec,const Vector & packedVec)1141 LengthSet::subtractPackedVecFromVec(Vector& vec,
1142 const Vector& packedVec)
1143 {
1144 // sherm 060222:
1145 assert(vec.size() == getRBTree().getTotalDOF());
1146 assert(packedVec.size() == ndofThisSet);
1147
1148 int indx=0;
1149 for (int j=0 ; j<(int)nodeMap.size() ; j++) {
1150 const int d = nodeMap[j]->getDOF();
1151 const int offs = nodeMap[j]->getUIndex();
1152 vec(offs,d) -= packedVec(indx,d);
1153 indx += d;
1154 }
1155
1156 assert(indx == ndofThisSet);
1157 }
1158
1159 void
testProjectedVec(const State & s,const Vector & vec) const1160 LengthSet::testProjectedVec(const State& s,
1161 const Vector& vec) const
1162 {
1163 // sherm 060222:
1164 assert(vec.size() == getRBTree().getTotalDOF());
1165
1166 Vector packedVec(ndofThisSet);
1167 int indx=0;
1168 for (int j=0 ; j<(int)nodeMap.size() ; j++) {
1169 const int d = nodeMap[j]->getDOF();
1170 const int offs = nodeMap[j]->getUIndex();
1171 packedVec(indx,d) = vec(offs,d);
1172 indx += d;
1173 }
1174 assert(indx == ndofThisSet);
1175
1176 const Matrix grad = calcGrad(s);
1177 const Vector test = ~grad * packedVec;
1178
1179 double testTol=1e-8;
1180 for (int i=0 ; i<(int)loops.size() ; i++) {
1181 if ( test(i) > testTol )
1182 cout << "LengthSet::Gradient: constraint condition between atoms "
1183 << loops[i].tips(1) << " and " << loops[i].tips(2) << " violated.\n"
1184 << "\tnorm of violation: " << test(i) << '\n';
1185 }
1186 cout.flush();
1187 }
1188
1189 void
fixVel0(State & s,Vector & iVel)1190 LengthConstraints::fixVel0(State& s, Vector& iVel)
1191 {
1192 // use molecule grouping
1193 for (int i=0 ; i<(int)accConstraints.size() ; i++)
1194 accConstraints[i].fixVel0(s, iVel);
1195 }
1196
1197 //
1198 // Correct internal velocities such that the constraint conditions are
1199 // met under the condition that the disturbance to station velocites is
1200 // small as possible.
1201 //
1202 void
fixVel0(State & s,Vector & iVel)1203 LengthSet::fixVel0(State& s, Vector& iVel)
1204 {
1205 assert(iVel.size() == getRBTree().getTotalDOF());
1206
1207 const SBPositionCache& pc = getRBTree().getPositionCache(s);
1208 const Vector& uErr = getRBTree().getUErr(s);
1209
1210 // store internal velocities
1211 Vector iVel0 = iVel;
1212
1213 // Allocate the vector of body impulses so we don't have to do it
1214 // inside the loop below. No need to initialize them here, though.
1215 Vector_<SpatialVec> bodyImpulses(getRBTree().getNumBodies());
1216
1217 //TODO
1218 // Currently we do not have any constraints involving mobilities (u's)
1219 // directly, so we don't generate mobility impulses here. But we'll need
1220 // a set of zero mobility forces to apply below.
1221 Vector zeroMobilityImpulses(getRBTree().getTotalDOF());
1222 zeroMobilityImpulses.setToZero();
1223
1224 // verr stores the current velocity errors, which we're assuming are valid.
1225 Vector verr((int)loops.size());
1226 for (int i=0; i<(int)loops.size(); ++i)
1227 verr[i] = loops[i].rbDistCons->getVelErr(uErr);
1228
1229 Matrix mat(loops.size(),loops.size());
1230 Array_<Vector> deltaIVel(loops.size());
1231 for (int m=0 ; m<(int)loops.size() ; m++) {
1232 deltaIVel[m].resize(iVel.size());
1233
1234 // Set all velocities to zero. TODO: this should just be an "ignore velocity"
1235 // option to realizeVelocity(); it shouldn't actually require putting zeroes everywhere.
1236 iVel = 0.;
1237 getRBTree().setU(s, iVel );
1238 getRBTree().realizeSubsystemVelocity(s);
1239
1240 // sherm: I think the following is a unit "probe" velocity, projected
1241 // along the separation vector.
1242 // That would explain the fact that there are no velocities here!
1243 const Vec3 probeImpulse = loops[m].tipPos(pc,2)-loops[m].tipPos(pc,1);
1244 const Vec3 force1 = -probeImpulse;
1245 const Vec3 force2 = probeImpulse;
1246
1247 const RigidBodyNode& node1 = loops[m].tipNode(1);
1248 const RigidBodyNode& node2 = loops[m].tipNode(2);
1249 const Vec3 moment1 = cross(loops[m].tipPos(pc,1)-node1.getX_GB(pc).p(), force1);
1250 const Vec3 moment2 = cross(loops[m].tipPos(pc,2)-node2.getX_GB(pc).p(), force2);
1251
1252 // Convert the probe impulses at the stations to spatial impulses at
1253 // the body origin.
1254 bodyImpulses.setToZero();
1255 bodyImpulses[node1.getNodeNum()] += SpatialVec(moment1, force1);
1256 bodyImpulses[node2.getNodeNum()] += SpatialVec(moment2, force2);
1257
1258 // Apply probe impulse as though it were a force; the resulting "acceleration"
1259 // is actually the deltaV produced by this impulse, that is, a deltaV which
1260 // results in a change in velocity along the line between the stations.
1261
1262 // calc deltaVa from k-th constraint condition
1263 //FIX: make sure that nodeMap is ordered by level
1264 // get all nodes in given molecule, ordered by level
1265 getRBTree().realizeZ(s, zeroMobilityImpulses, bodyImpulses);
1266 getRBTree().realizeTreeAccel(s);
1267 deltaIVel[m] = getRBTree().getUDot(s);
1268
1269 // Set the new velocities.
1270 getRBTree().setU(s, deltaIVel[m] );
1271 getRBTree().realizeSubsystemVelocity(s);
1272
1273 // Calculating partial(velocityError[n])/partial(deltav[m]). Any velocity
1274 // we see here is due to the deltav, since we started out at zero (uErr
1275 // was modified by realizeVelocity(), but our reference is still valid).
1276 for (int n=0; n<(int)loops.size(); ++n)
1277 mat(n,m) = loops[n].rbDistCons->getVelErr(uErr);
1278
1279 //store results of m-th constraint on deltaVa-n
1280 }
1281
1282 // Calculate the fraction of each deltaV by which we should change V to simultaneously
1283 // drive all the velocity errors to zero.
1284 // TODO: deal with a badly conditioned Jacobian to produce a least squares lambda.
1285 const Vector lambda = mat.invert() * verr;
1286
1287 iVel = iVel0;
1288 for (int m=0 ; m<(int)loops.size() ; m++)
1289 iVel -= lambda[m] * deltaIVel[m];
1290 getRBTree().setU(s, iVel);
1291 getRBTree().realizeSubsystemVelocity(s);
1292 }
1293
1294
operator <<(std::ostream & o,const RBStation & s)1295 std::ostream& operator<<(std::ostream& o, const RBStation& s) {
1296 o << "station " << s.getPoint() << " on node " << s.getNode().getNodeNum();
1297 return o;
1298 }
1299
1300
operator <<(std::ostream & o,const RBDirection & d)1301 std::ostream& operator<<(std::ostream& o, const RBDirection& d) {
1302 o << "normal " << d.getUnitVec() << " on node " << d.getNode().getNodeNum();
1303 return o;
1304 }
1305
1306 ////////////////////////////
1307 // RB DISTANCE CONSTRAINT //
1308 ////////////////////////////
1309
calcStationPosInfo(int i,SBPositionCache & pc) const1310 void RBDistanceConstraint::calcStationPosInfo(int i,
1311 SBPositionCache& pc) const
1312 {
1313 updStation_G(pc,i) = getNode(i).getX_GB(pc).R() * getPoint(i);
1314 updPos_G(pc,i) = getNode(i).getX_GB(pc).p() + getStation_G(pc,i);
1315 }
1316
calcStationVelInfo(int i,const SBPositionCache & pc,SBVelocityCache & vc) const1317 void RBDistanceConstraint::calcStationVelInfo(int i,
1318 const SBPositionCache& pc,
1319 SBVelocityCache& vc) const
1320 {
1321 const Vec3& w_G = getNode(i).getSpatialAngVel(vc);
1322 const Vec3& v_G = getNode(i).getSpatialLinVel(vc);
1323 updStationVel_G(vc,i) = cross(w_G, getStation_G(pc,i));
1324 updVel_G(vc,i) = v_G + getStationVel_G(vc,i);
1325 }
1326
calcStationAccInfo(int i,const SBPositionCache & pc,const SBVelocityCache & vc,SBAccelerationCache & ac) const1327 void RBDistanceConstraint::calcStationAccInfo(int i,
1328 const SBPositionCache& pc,
1329 const SBVelocityCache& vc,
1330 SBAccelerationCache& ac) const
1331 {
1332 const Vec3& w_G = getNode(i).getSpatialAngVel(vc);
1333 const Vec3& v_G = getNode(i).getSpatialLinVel(vc);
1334 const Vec3& aa_G = getNode(i).getSpatialAngAcc(ac);
1335 const Vec3& a_G = getNode(i).getSpatialLinAcc(ac);
1336 updAcc_G(ac,i) = a_G + cross(aa_G, getStation_G(pc,i))
1337 + cross(w_G, getStationVel_G(vc,i)); // i.e., w X (wXr)
1338 }
1339
1340
calcPosInfo(Vector & qErr,SBPositionCache & pc) const1341 void RBDistanceConstraint::calcPosInfo(Vector& qErr, SBPositionCache& pc) const
1342 {
1343 assert(isValid() && distConstNum >= 0);
1344 for (int i=1; i<=2; ++i) calcStationPosInfo(i,pc);
1345
1346 const Vec3 p = getPos_G(pc,2) - getPos_G(pc,1);
1347 updFromTip1ToTip2_G(pc) = p;
1348 const Real separation = getFromTip1ToTip2_G(pc).norm();
1349 updUnitDirection_G(pc) = getFromTip1ToTip2_G(pc) / separation;
1350 //TODO: |p|-d (should be 0.5(p^2-d^2)
1351 //updPosErr(cc) = separation - distance;
1352 updPosErr(qErr) = 0.5*(p.normSqr() - distance*distance);
1353
1354 }
1355
calcVelInfo(const SBPositionCache & pc,Vector & uErr,SBVelocityCache & vc) const1356 void RBDistanceConstraint::calcVelInfo(
1357 const SBPositionCache& pc,
1358 Vector& uErr,
1359 SBVelocityCache& vc) const
1360 {
1361 assert(isValid() && distConstNum >= 0);
1362 for (int i=1; i<=2; ++i) calcStationVelInfo(i,pc,vc);
1363
1364 updRelVel_G(vc) = getVel_G(vc,2) - getVel_G(vc,1);
1365 //TODO: u.v, u=p/|p| (should be p.v)
1366 //updVelErr(mc) = ~getUnitDirection_G(pc) * getRelVel_G(vc);
1367 updVelErr(uErr) = ~getFromTip1ToTip2_G(pc) * getRelVel_G(vc);
1368 }
1369
calcAccInfo(const SBPositionCache & pc,const SBVelocityCache & vc,Vector & udotErr,SBAccelerationCache & ac) const1370 void RBDistanceConstraint::calcAccInfo(
1371 const SBPositionCache& pc,
1372 const SBVelocityCache& vc,
1373 Vector& udotErr,
1374 SBAccelerationCache& ac) const
1375 {
1376 assert(isValid() && distConstNum >= 0);
1377 for (int i=1; i<=2; ++i) calcStationAccInfo(i,pc,vc,ac);
1378
1379 // TODO: v.v + a.p (good), but would have to be
1380 // u.a + (v-(v.u).u).v/|p| to be compatible with above
1381 const Vec3 relAcc_G = getAcc_G(ac,2) - getAcc_G(ac,1);
1382 updAccErr(udotErr) = getRelVel_G(vc).normSqr() + (~relAcc_G * getFromTip1ToTip2_G(pc));
1383 }
1384
1385
1386 #endif
1387
1388