1 /* CalculiX - A 3-dimensional finite element program */
2 /* Copyright (C) 1998-2021 Guido Dhondt */
3
4 /* This program is free software; you can redistribute it and/or */
5 /* modify it under the terms of the GNU General Public License as */
6 /* published by the Free Software Foundation(version 2); */
7 /* */
8
9 /* This program is distributed in the hope that it will be useful, */
10 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
11 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
12 /* GNU General Public License for more details. */
13
14 /* You should have received a copy of the GNU General Public License */
15 /* along with this program; if not, write to the Free Software */
16 /* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
17
18 #include <unistd.h>
19 #include <stdio.h>
20 #include <math.h>
21 #include <stdlib.h>
22 #include <pthread.h>
23 #include "CalculiX.h"
24
25 static char *lakonf1;
26
27 static ITG num_cpus,*nef1,*ipnei1,*neifa1,*neiel1,*jq1,*irow1,*nzs1,
28 *ielfa1,*ifabou1,*nbody1,*neq1,*nactdohinv1,*iau61,*iturbulent1;
29
30 static double *au1,*ad1,*b1,*vfa1,*xxn1,*area1,*vel1,
31 *umfa1,*alet1,*ale1,*gradtfa1,*xxi1,*body1,*volume1,*dtimef1,*velo1,
32 *veloo1,*cvfa1,*hcfa1,*cvel1,*gradvel1,*xload1,*gamma1,*xrlfa1,
33 *xxj1,*a11,*a21,*a31,*flux1,*xxni1,*xxnj1,*f11,*of21,*yy1,*umel1,
34 *gradkel1,*gradoel1,*sc1;
35
mafillkmain(ITG * nef,ITG * ipnei,ITG * neifa,ITG * neiel,double * vfa,double * xxn,double * area,double * au,double * ad,ITG * jq,ITG * irow,ITG * nzs,double * b,double * vel,double * umfa,double * alet,double * ale,double * gradtfa,double * xxi,double * body,double * volume,ITG * ielfa,char * lakonf,ITG * ifabou,ITG * nbody,ITG * neq,double * dtimef,double * velo,double * veloo,double * cvfa,double * hcfa,double * cvel,double * gradvel,double * xload,double * gamma,double * xrlfa,double * xxj,ITG * nactdohinv,double * a1,double * a2,double * a3,double * flux,ITG * iau6,double * xxni,double * xxnj,ITG * iturbulent,double * f1,double * of2,double * yy,double * umel,double * gradkel,double * gradoel,double * sc)36 void mafillkmain(ITG *nef,ITG *ipnei,ITG *neifa,
37 ITG *neiel,double *vfa,double *xxn,double *area,
38 double *au,double *ad,ITG *jq,ITG *irow,ITG *nzs,
39 double *b,double *vel,double *umfa,double *alet,
40 double *ale,double *gradtfa,double *xxi,double *body,
41 double *volume,ITG *ielfa,char *lakonf,
42 ITG *ifabou,ITG *nbody,ITG *neq,double *dtimef,double *velo,
43 double *veloo,double *cvfa,double *hcfa,double *cvel,
44 double *gradvel,double *xload,double *gamma,double *xrlfa,
45 double *xxj,ITG *nactdohinv,double *a1,double *a2,double *a3,
46 double *flux,ITG *iau6,double *xxni,double *xxnj,
47 ITG *iturbulent,double *f1,double *of2,double *yy,
48 double *umel,double *gradkel,double *gradoel,double *sc){
49
50 ITG i;
51
52 /* variables for multithreading procedure */
53
54 ITG sys_cpus,*ithread=NULL;
55 char *env,*envloc,*envsys;
56
57 num_cpus = 0;
58 sys_cpus=0;
59
60 /* explicit user declaration prevails */
61
62 envsys=getenv("NUMBER_OF_CPUS");
63 if(envsys){
64 sys_cpus=atoi(envsys);
65 if(sys_cpus<0) sys_cpus=0;
66 }
67
68 /* automatic detection of available number of processors */
69
70 if(sys_cpus==0){
71 sys_cpus = getSystemCPUs();
72 if(sys_cpus<1) sys_cpus=1;
73 }
74
75 /* local declaration prevails, if strictly positive */
76
77 envloc = getenv("CCX_NPROC_CFD");
78 if(envloc){
79 num_cpus=atoi(envloc);
80 if(num_cpus<0){
81 num_cpus=0;
82 }else if(num_cpus>sys_cpus){
83 num_cpus=sys_cpus;
84 }
85
86 }
87
88 /* else global declaration, if any, applies */
89
90 env = getenv("OMP_NUM_THREADS");
91 if(num_cpus==0){
92 if (env)
93 num_cpus = atoi(env);
94 if (num_cpus < 1) {
95 num_cpus=1;
96 }else if(num_cpus>sys_cpus){
97 num_cpus=sys_cpus;
98 }
99 }
100
101 // next line is to be inserted in a similar way for all other paralell parts
102
103 if(*nef<num_cpus) num_cpus=*nef;
104
105 pthread_t tid[num_cpus];
106
107 /* calculating the stiffness and/or mass matrix
108 (symmetric part) */
109
110 nef1=nef;ipnei1=ipnei;neifa1=neifa;neiel1=neiel;vfa1=vfa;xxn1=xxn;
111 area1=area;
112 jq1=jq;irow1=irow;nzs1=nzs;vel1=vel;umfa1=umfa;alet1=alet;ale1=ale;
113 gradtfa1=gradtfa;xxi1=xxi;body1=body;volume1=volume;
114 ielfa1=ielfa;lakonf1=lakonf;ifabou1=ifabou;
115 nbody1=nbody;neq1=neq;dtimef1=dtimef;velo1=velo;veloo1=veloo;
116 cvfa1=cvfa;hcfa1=hcfa;cvel1=cvel;gradvel1=gradvel;xload1=xload;
117 gamma1=gamma;xrlfa1=xrlfa;xxj1=xxj;nactdohinv1=nactdohinv;a11=a1;
118 a21=a2;a31=a3;flux1=flux;iau61=iau6;ad1=ad;au1=au;b1=b;xxni1=xxni;
119 xxnj1=xxnj,iturbulent1=iturbulent,f11=f1,of21=of2;yy1=yy;umel1=umel;
120 gradkel1=gradkel;gradoel1=gradoel;sc1=sc;
121
122 /* create threads and wait */
123
124 NNEW(ithread,ITG,num_cpus);
125 for(i=0; i<num_cpus; i++) {
126 ithread[i]=i;
127 pthread_create(&tid[i], NULL, (void *)mafillkmt, (void *)&ithread[i]);
128 }
129 for(i=0; i<num_cpus; i++) pthread_join(tid[i], NULL);
130
131 SFREE(ithread);
132
133 return;
134
135 }
136
137 /* subroutine for multithreading of mafillk */
138
mafillkmt(ITG * i)139 void *mafillkmt(ITG *i){
140
141 ITG nefa,nefb,nefdelta;
142
143 // ceil -> floor
144
145 nefdelta=(ITG)floor(*nef1/(double)num_cpus);
146 nefa=*i*nefdelta+1;
147 nefb=(*i+1)*nefdelta;
148 // next line! -> all parallel sections
149 if((*i==num_cpus-1)&&(nefb<*nef1)) nefb=*nef1;
150
151 FORTRAN(mafillk,(nef1,ipnei1,neifa1,neiel1,vfa1,xxn1,area1,
152 au1,ad1,jq1,irow1,nzs1,
153 b1,vel1,umfa1,alet1,ale1,gradtfa1,xxi1,
154 body1,volume1,ielfa1,lakonf1,ifabou1,
155 nbody1,neq1,dtimef1,velo1,veloo1,cvfa1,hcfa1,cvel1,
156 gradvel1,xload1,gamma1,xrlfa1,xxj1,nactdohinv1,
157 a11,a21,a31,flux1,&nefa,&nefb,iau61,xxni1,xxnj1,
158 iturbulent1,f11,of21,yy1,umel1,gradkel1,gradoel1,
159 sc1));
160
161 return NULL;
162 }
163