1# 2# DISCLAIMER 3# 4# This material was prepared as an account of work sponsored by an 5# agency of the United States Government. Neither the United States 6# Government nor the United States Department of Energy, nor Battelle, 7# nor any of their employees, MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR 8# ASSUMES ANY LEGAL LIABILITY OR RESPONSIBILITY FOR THE ACCURACY, 9# COMPLETENESS, OR USEFULNESS OF ANY INFORMATION, APPARATUS, PRODUCT, 10# SOFTWARE, OR PROCESS DISCLOSED, OR REPRESENTS THAT ITS USE WOULD NOT 11# INFRINGE PRIVATELY OWNED RIGHTS. 12# 13# ACKNOWLEDGMENT 14# 15# This software and its documentation were produced with Government 16# support under Contract Number DE-AC06-76RLO-1830 awarded by the United 17# States Department of Energy. The Government retains a paid-up 18# non-exclusive, irrevocable worldwide license to reproduce, prepare 19# 20# derivative works, perform publicly and display publicly by or for the 21# Government, including the right to distribute to other Government 22# contractors. 23# 24#====================================================================== 25# 26# -- PEIGS routine (version 2.1) -- 27# Pacific Northwest Laboratory 28# July 28, 1995 29# 30#====================================================================== 31# 32# General DEFS for making PEIGS. 33# 34# This Makefile requires GNU make at least version 3.62 to handle the conditionals 35# Before using it you must: 36# 37# - set the environment variable TARGET as follows for the type 38# of system for which you wish to build: 39# 40# setenv TARGET CRAY-T3E 41# 42# setenv TARGET UNIPROC # to use one processor 43# you will need to configure the compilers and 44# such but the other targets may be of assistance 45# here 46# 47# Since we do not have the resources to test it on all available platforms 48# we can only test this on the machines that we have access to. 49# 50# 51# - set SRC below to the path such that the peigs 52# directory is in $(SRC)/peigs. 53# 54# - check the machine specific definitions, 55# e.g., paths, etc., for your machine 56# 57# - If you want to use MPI, then check below for instructions 58# on how to select MPI, rather than the default of TCGMSG 59# or Intel NX. 60# 61# set the path to the "peigs" directory 62 63SRC = /disk1 64PEIGSDIR = $(SRC)/peigs3 65PEIGSLIB = $(SRC)/peigs3/libpeigs.a 66 67SHELL = /bin/sh 68 69OBJDIR = $(PEIGSDIR)/o 70HDIR = $(PEIGSDIR)/h 71CSRC = $(PEIGSDIR)/src/c 72F77SRC = $(PEIGSDIR)/src/f77 73COMMDIR = $(PEIGSDIR)/comm 74FTOC = $(PEIGSDIR)/ctof 75 76# Set DEF_TIMING = -DTIMING to do timings. Currently must have all 77# allocated processers participate in computation (since use mxsync). 78# 79# Do not use DEF_TIMINGS = -DTIMING when you want to use 80# the test codes in peigsXX/test_la. 81# 82# When you change DEF_TIMING you need to do a "make timing" 83# to remake the modules which depend on DEF_TIMING. 84# 85 86DEF_TIMING = -DTIMING 87# DEF_TIMING = 88 89# Generic definitions for BLAS and LAPACK. Will be OVERWRITTEN in 90# machine specific codes by new values. Currently, Make.generic in 91# .../peigs/example ignores BLASLIB and LAPACKLIB and just uses 92# $(PEIGSDIR)/blas.a and $(PEIGSDIR)/lapack.a. You should do whatever 93# is appropriate on your machine. 94 95LAPACKLIB = 96BLASLIB = 97 98# MPI usage 99# --------- 100# 101# By default PeIGS does not use MPI. 102# 103# To use MPI: 1) Set PEIGS_MPI_USE = ANY VALUE 104# in the statement below 105# 106# 2) make sure there is an "ifdef PEIGS_MPI_USE ... endif" 107# block defined for your TARGET. If not, then 108# you need to set this up. 109# 110# 3) make sure the path to MPI, MPIR_HOME, 111# is set correctly for your TARGET and machine.ibm 112PEIGS_MPI_USE = 0 113# Set MPI_INCLUDE to "blank" just in case MPI is not used. 114 115# MPI_INCLUDE = /files3/home/d3g270/mpich/include 116 117 118# Generic definition for CPP. Some of the machine specific definitions 119# replace the following CPP definition by a different definition. 120 121CPP = /usr/lib/cpp -P -C -D${NODE_TYPE} -D${COMM_PKG} -D${IO_STYLE} -D${TRACE_PKG} -D${CPU} -D${INT_TYPE} ${DEF_TIMING} 122 123#================================================== 124# Machine specific definitions. 125#================================================== 126# MACHINES THAT HAVE BEEN TESTED 127 128ifeq ($(TARGET),CRAY-T3E) 129# 130# 131AR = ar -r 132RANLIB = echo 133GLOB_DEFINES = -DCRAY_T3D 134EXPLICITF = TRUE 135CUBIX_OPTS = -node 136NODE_EXT = o 137HOST_EXT = 138HOST = ALPHA 139NODE_TYPE = ALPHA 140COMM_PKG = TCGMSG 141IO_STYLE = FILE_IO 142CORE_SUBDIRS_EXTRA = blas lapack # Only a couple of routines not in scilib 143RANLIB = echo 144MAKEFLAGS = -j 1 --no-print-directory 145INSTALL = @echo $@ is built 146OUTPUT_OPTION = 147FC = f90 $(FOPTIONS) $(FOPTIMIZE) -DTCGMSG -DALPHA -DCRAY_T3D $(DEFINES) -I$(HDIR) 148F77 = f90 $(FOPTIONS) $(FOPTIMIZE) -DTCGMSG -DALPHA -DCRAY_T3D $(DEFINES) -I$(HDIR) 149CCF77 = $(F77) 150CC = cc $(COPTIONS) -I$(HDIR) -DSTD_DBL -I$(HDIR) -DCRAY_T3D -DALPHA ${DEF_TIMING} 151CPP = /opt/ctl/CC/CC/lib/mppcpp -P -N -I$(HDIR) -P -D${COMM_PKG} -D${IO_STYLE} -DCRAY_T3D ${DEF_TIMING} 152 FOPTIONS = -d p -F -DCRAY_T3D -DALPHA 153 COPTIONS = -g -O3 scalar3,aggress,unroll2,vector3,pipeline2 154 FDEBUG = -O3 -O scalar1 155# FOPTIMIZE = -O3 scalar3,aggress,unroll2,vector3,pipeline2 156 FOPTIMIZE = -O3 scalar3,unroll2,vector3,pipeline2 157 CDEBUG = -O 1 158 COPTIMIZE = -O 159 CODEOBJ = SINGLE 160 COMMLIB = $(HOME)/libtcgmsg.a 161 162# 163# to debug code you must remove the -s flag unless you know assembler 164# 165# what for streams and such; check with your system consultant or 166# use the one that is commented out 167# 168# OPTIONS = -L$(LIBDIR) -Xm # -Wl"-Dstreams=on -s" -lmfastv 169OPTIONS = -L$(LIBDIR) -Xm -Wl"-Dstreams=on -s" -lmfastv 170DEFINES = -DCRAY_T3E -DCRAY_T3D -D__F90__ 171LINK = f90 $(OPTIONS) 172BLASLIB = -lmfastv -L/u1/fann/peigs3 -llapack -lblas -lpeigs 173FCONVERT = $(CPP) $(CPPFLAGS) $< | sed '/^\#/D' > $*.f 174EXPLICITF = TRUE 175ifdef PEIGS_MPI_USE 176 COMM_PKG = MPI 177 MPIR_HOME = 178 MPI_ARCH = 179 MPI_COMM = 180 MPI_INCLUDE = 181 COMMLIB = -lmpi 182 $(CC) += -htaskprivate 183 $(FC) += -htaskcommon 184 $(F77) += -htaskcommon 185endif 186endif 187 188ifeq ($(TARGET),LINUX) 189#alpha/mpich 190AR = ar r 191RANLIB = echo 192CC = gcc -I$(HDIR) -DPentium -DSTD_INT -DSTD_DBL -O3 193F77 = g77 -I$(HDIR) -O3 194CCF77 = $(F77) 195LINK = $(F77) 196CODEOBJ = DBLE 197CUBIX_OPTS = 198NODE_EXT = o 199HOST_EXT = out 200HOST = 201NODE_TYPE = single_cpu 202CPU = P5 203COMM_PKG = TCGMSG 204IO_STYLE = FILE_IO 205COMMLIB = $(HOME)/g/tcgmsg/ipcv4.0/libtcgmsg.a 206CTOFLIB = -lf2c 207BLASLIB = -L$(HOME)/lapack -lblas 208LAPACKLIB = -L$(HOME)/lapack -llapack 209HOST_EXT = out 210CPP = /usr/bin/cpp -P -C -D${COMM_PKG} -D${IO_STYLE} -D$(CPU) -DSTD_INT -DSTD_DBL -I$(HDIR) 211ifdef PEIGS_MPI_USE 212 COMM_PKG = MPI 213 MPIR_HOME = $(HOME)/mpich 214 MPI_INCLUDE = -I$(MPIR_HOME)/include 215 MPI_COMM = ch_p4 216 COMMLIB = -L$(MPIR_HOME)/lib/$(NODE_TYPE)/$(MPI_COMM) -lmpi 217endif 218endif 219 220ifeq ($(TARGET),SP1) 221## IBM SP-1, sp-2 or cluster of rs6000 wt tcgmsg ( using xlf instead of mpxlf ) 222# 223# check your cache line the data cache and the instruction cache 224# 225CODEOBJ = DBLE 226AR = ar r 227RANLIB = ranlib 228CC = mpcc -qarch=pwr2 -DSTD_INT -DSTD_DBL -DRIOS -I$(HDIR) \ 229-DIBM -DRS6000 \ 230-O3 -qstrict -qfloat=rsqrt:fltint:hssngl ${DEF_TIMING} -qinline \ 231-qcache=type=d:level=1:size=128:line=256:assoc=4:cost=14 -qcache=type=i:level=1:size=32:line=128 232F77 = mpxlf -qEXTNAME -qarch=pwr2 -I$(HDIR) -Pv -Wp,-eaj478 -WF,-Iinclude,-DIBM,-DSTD_INT,-DSTD_DBL -O3 -qstrict -bnoquiet -qfloat=rsqrt:fltint:hssngl \ 233 -qinline -NQ40000 -NT80000 234CCF77 = $(F77) -qcache=type=d:level=1:size=128:line=256:assoc=4:cost=14 -qcache=type=i:level=1:size=32:line=128 235FC = $(F77) 236LINK = $(F77) 237CUBIX_OPTS = 238NODE_EXT = o 239HOST_EXT = out 240NODE_TYPE = RIOS 241COMM_PKG = TCGMSG 242IO_STYLE = FILE_IO 243CPU = RIOS 244BLASLIB = -lesslp2 245LAPACKLIB = -lesslp2 -llapack 246COMMLIB = $(HOME)/g/libtcgmsg.a 247LAPACKLIB = $(PEIGSDIR)/liblapack.a 248COMMLIB = $(HOME)/nwchem/lib/SP1/libtcgmsg.a 249CPP = /usr/lib/cpp -P -C -D${COMM_PKG} -D${IO_STYLE} -DIBM -DSTD_INT -DSTD_DBL -I$(HDIR) 250ifdef PEIGS_MPI_USE 251 COMM_PKG = MPI 252 # MPI source directory for SP-2 at MHPCC 253 MPIR_HOME = 254 MPI_INCLUDE = 255 MPI_COMM = 256 MPI_COMM = 257 COMMLIB =-lmpi 258 CPP += -I/usr/lpp/ppe.poe/include 259endif 260endif 261 262ifeq ($(TARGET),IBM) 263## IBM SP-1, sp-2 or cluster of rs6000 wt tcgmsg ( using xlf instead of mpxlf ) 264# 265# check your cache line the data cache and the instruction cache 266# 267CODEOBJ = DBLE 268AR = ar r 269RANLIB = ranlib 270CC = cc -qarch=ppc -qtune=604 -DSTD_INT -DSTD_DBL -DRIOS -I$(HDIR) \ 271-DIBM -DRS6000 \ 272-O3 -qstrict -qfloat=rsqrt:fltint:hssngl ${DEF_TIMING} -qinline 273# -qcache=type=d:level=1:size=128:line=256:assoc=4:cost=14 -qcache=type=i:level=1:size=32:line=128 274F77 = xlf -qEXTNAME -qarch=604 -qtune=604 -I$(HDIR) -WF,-Iinclude,-DIBM,-DSTD_INT,-DSTD_DBL -O3 -qstrict -bnoquiet -qfloat=rsqrt:fltint:hssngl \ 275 -qinline -NQ40000 -NT80000 276CCF77 = $(F77) # -qcache=type=d:level=1:size=128:line=256:assoc=4:cost=14 -qcache=type=i:level=1:size=32:line=128 277FC = $(F77) 278LINK = $(F77) 279CUBIX_OPTS = 280NODE_EXT = o 281HOST_EXT = out 282NODE_TYPE = RIOS 283COMM_PKG = TCGMSG 284IO_STYLE = FILE_IO 285CPU = RIOS 286BLASLIB = -lesslp2 287LAPACKLIB = -lesslp2 -llapack 288COMMLIB = $(HOME)/g/libtcgmsg.a 289LAPACKLIB = $(PEIGSDIR)/liblapack.a 290COMMLIB = $(HOME)/nwchem/lib/SP1/libtcgmsg.a 291CPP = /usr/lib/cpp -P -C -D${COMM_PKG} -D${IO_STYLE} -DIBM -DSTD_INT -DSTD_DBL -I$(HDIR) 292ifdef PEIGS_MPI_USE 293 COMM_PKG = MPI 294 # MPI source directory for SP-2 at MHPCC 295 MPIR_HOME = 296 MPI_INCLUDE = 297 MPI_COMM = 298 MPI_COMM = 299 COMMLIB =-lmpi 300 CPP += -I/usr/lpp/ppe.poe/include 301endif 302endif 303 304 305ifeq ($(TARGET),SGITFP) 306# uname -a IRIX64 irix 6.4 cpu=IP27 307# 308# if you have r8000 should add -r8000 to the cc and f77 309# if you have r10000 should add -r10000 to the cc and f77 310#SGI/TCGMSG 311AR = ar r 312RANLIB = echo 313# 314# In CC -woff 1174,1552 turns off the warning messages: 315# warning(1174): variable "*" was declared but never referenced 316# warning(1552): variable "*" was set but never used 317# 318# Should fix this code in next release 319# 320 321CPP = /usr/lib/cpp -I$(HDIR) -DSGI64 -D${NODE_TYPE} -D${COMM_PKG} -D${IO_STYLE} -D${INT_TYPE} ${DEF_TIMING} -DMIPS 322 323CC = cc -DSTD_DBL -I$(HDIR) -DSGI64 -64 -mips4 -O3 -ansi -inline \ 324-OPT:const_copy_limit=20000 -TENV:X=3 \ -WK,-so=1,-o=1,-r=3,-dr=AKC -DMIPS 325 326FC = f77 -fdefault-integer-8 -O3 -I$(HDIR) -DSGI64 -DTCGMSG -64 -mips4 \ 327-W K,-so=1,-o=1,-r=3,-dr=AKC \ 328-OPT:roundoff=3:div_split=ON:alias=typed -TENV:X=3 -DMIPS 329 330# -OPT:fold_arith_limit=4000:const_copy_limit=20000:global_limit=20000:fprop_limit=2000 331 332F77 = $(FC) 333CCF77 = $(F77) 334 335# 336 337LINK = f77 -fdefault-integer-8 -I$(HDIR) -r8 -64 -mips4 -DTCGMSG 338 339# 340# 64 bit but using r8 compiler flag 341# 342CODEOBJ = DBLE 343CUBIX_OPTS = 344NODE_EXT = o 345HOST_EXT = out 346HOST = 347NODE_TYPE = 348COMM_PKG = TCGMSG 349IO_STYLE = FILE_IO 350# zodiac commlib 351COMMLIB = -L/scratch/nwchem/lib/SGITFP -lcomplib.sgimath -ltcgmsg \ 352-llapack -lblas 353CTOFLIB = -lftn -lm -lc -lutil 354BLASLIB = 355LAPACKLIB = 356HOST_EXT = out 357LIBS = 358# 359# no i8 for mpi c.f. SGI_N32 360# 361endif 362 363ifeq ($(TARGET),SGI_N32) 364AR = ar r 365RANLIB = echo 366#SGI/TCGMSG 367CPP = /usr/lib/cpp -I$(HDIR) -DSGI64 -D${NODE_TYPE} -D${COMM_PKG} -D${IO_STYLE} -D${INT_TYPE} ${DEF_TIMING} -DMIPS 368AR = ar r 369RANLIB = echo 370# 371# In CC -woff 1174,1552 turns off the warning messages: 372# warning(1174): variable "*" was declared but never referenced 373# warning(1552): variable "*" was set but never used 374# 375# Should fix this code in next release 376# 377CC = cc -n32 -DSTD_DBL -I$(HDIR) -DSGI64 -DMIPS 378F77 = f77 -n32 -I$(HDIR) -DTCGMSG -DMIPS 379FC = f77 -n32 -I$(HDIR) -DTCGMSG -DMIPS 380CCF77 = $(F77) 381OPT = -O3 -WK,-so=5,-ro=3,-o=5,-ur=12,-ur2=200,-inline=daxpy:dscal:ddot:dcopy 382OPT = 383OPTC = -O3 -Wk,-so=5,-ro=3,-o=5,-ur=12,-ur2=200,-inline=daxpy:dscal:ddot:dcopy 384OPTC = 385LINK = f77 -n32 -I$(HDIR) -DTCGMSG 386# 387# 64 bit but using r8 compiler flag 388CODEOBJ = DBLE 389CUBIX_OPTS = 390NODE_EXT = o 391HOST_EXT = out 392HOST = MIPS 393NODE_TYPE = MIPS 394CPU = SGITFP 395COMM_PKG = TCGMSG 396IO_STYLE = FILE_IO 397# zodiac commlib 398COMMLIB = /usr/people/d3g270/nwchem/lib/SGI_N32/libtcgmsg.a 399CTOFLIB = -lftn -lm -lc /usr/lib/crtn.o 400# BLASLIB = -lblas 401# LAPACKLIB= / -lcomplib.sgimath 402HOST_EXT = out 403BLASLIB= -L$(HOME)/peigs3 -llapack -lblas # -lcomplib.sgimath 404ifdef PEIGS_MPI_USE 405CC = cc -n32 -DSTD_DBL -I$(HDIR) -DSGI64 -DMIPS -woff 1174,1552 406F77 = f77 -n32 -I$(HDIR) -DMPI -DMIPS 407FC = f77 -n32 -I$(HDIR) -DMPI -DMIPS 408CCF77 = $(F77) 409COMM_PKG = MPI 410MPIR_HOME = $(HOME)/mpich 411MPI_INCLUDE = 412MPI_COMM = 413MPI_COMM = -lmpi 414COMMLIB = -lmpi 415BLASLIB= -L/scratch/peigs3 -lpeigs -lcomplib.sgimath -lblas 416endif 417endif 418 419ifeq ($(TARGET),SUN) 420# sun using 421# old sun...should work with solaris with minimal changes 422#SUN/TCGMSG 423AR = ar r 424RANLIB = ranlib 425# F77 = f77 -Bstatic -f 426# 427CC = gcc -ansi -static -O3 -I$(HDIR) -DSTD_DBL -DSTD_INT -DSUN -D$(CPU) ${DEF_TIMING} 428# 429# CC = cc -Bstatic -O1 -I$(HDIR) -DSTD_DBL -DSTD_INT -dalign 430# -DDEBUG1 431INT_TYPE = STD_INT 432F77 = f77 -Bstatic -O3 -dalign -DTCGMSG 433CCF77 = $(F77) 434LINK = f77 -O 435CODEOBJ = DBLE 436CUBIX_OPTS = 437NODE_EXT = o 438HOST_EXT = out 439HOST = SUN 440NODE_TYPE = SUN 441CPU = SPARC 442COMM_PKG = TCGMSG 443IO_STYLE = FILE_IO 444COMMLIB = $(HOME)/comm/libtcgmsg.a 445# 446# this is the worst to find for any machine 447# for the SUN we have c calling fortran library 448# 449CTOFLIB = /msrc/apps/lib/gcc-lib/sparc-sun-sunos4.1.3/2.4.3/libgcc.a -lF77 -lV77 -L/msrc/apps/f771.4/SC1.0 450# 451#replace if you are on a machine with assembly BLAS library 452# 453HOST_EXT = out 454ifdef PEIGS_MPI_USE 455CC = gcc -ansi -static -O3 -I$(HDIR) -DSTD_DBL -DSTD_INT -DSUN -D$(CPU) ${DEF_TIMING} -DMPI 456# 457# CC = cc -Bstatic -O1 -I$(HDIR) -DSTD_DBL -DSTD_INT -dalign 458# -DDEBUG1 459INT_TYPE = STD_INT 460F77 = f77 -Bstatic -O3 -dalign -DMPI 461CCF77 = $(F77) 462LINK = f77 -O 463COMM_PKG = MPI 464MPIR_HOME = $(HOME)/mpich 465MPI_INCLUDE = 466MPI_COMM = 467MPI_COMM = -lmpi 468COMMLIB = -lmpi 469BLASLIB= -L$(HOME)/peigs3 -llapack -lblas -lcomplib.sgimath 470endif 471endif 472 473 474 475# 476# machines that have not been tested since June 1997 477# 478 479ifeq ($(TARGET),CRAY-T3D) 480# 481# cray t3d at nersc 482# 483AR = ar r 484RANLIB = echo 485FOPT_REN = -Ccray-t3d -Wf-dp -O1 486COPT_REN = -O3 487F77 = cf77 $(FOPT_REN) -I$(HDIR) -DCRAY_T3D -DTCGMSG 488CCF77 = $(F77) 489CC = cc $(COPT_REN) -I$(HDIR) -DSTD_DBL -I$(HDIR) -DCRAY_T3D ${DEF_TIMING} 490LINK = cf77 $(FOPT_REN) -I$(HDIR) -DCRAY_T3D 491# -X 1 -g 492FOPT = 493RANLIB = echo 494GLOB_DEFINES = -DCRAY_T3D 495EXPLICITF = TRUE 496CUBIX_OPTS = -node 497NODE_EXT = o 498HOST_EXT = 499HOST = ALPHA 500NODE_TYPE = ALPHA 501COMM_PKG = TCGMSG 502IO_STYLE = FILE_IO 503# 504# Cray t3d SNRM2 routine currently has a bug, which has been reported to cray. 505# it is apparent with the geneig test routine when n > = 1500. 506#BLASLIB = -lblas 507# 508CPP = /mpp/bin/gpp -I$(HDIR) -P -D${COMM_PKG} -D${IO_STYLE} -DCRAY_T3D ${DEF_TIMING} 509 510# 64 bit is default 511CODEOBJ = SINGLE 512COMMLIB = ../../libtcgmsg.a 513endif 514 515# 516# -Mvect is braindead for long vector using pgi 517 518ifeq ($(TARGET),DELTA) 519AR = ar860 r 520RANLIB = echo 521CPU = i860 522#Delta machine, compiled on sun3 (intelisc) or delilah 523IEEE = -Knoieee 524CC = icc ${DEF_TIMING} 525OPT = -O3 $(IEEE) -Mquad -Mr8 -Minline=100 526# OPTC = -O3 $(IEEE) -Mquad -Mvect -node 527# 528# gcc options 529# 530BLASLIB = -lkmath 531# 532# GCC = /home/delilah5/gnu/delta-local/bin/gcc -fno-gnu-linker 533# OPTC = -O2 -ffast-math -fomit-frame-pointre 534# GCCLIB = /home/delilah5/gnu/lib/gcc-lib/i860-delta/2.4.3/libgcc.a 535# CC = $(GCC) -c 536# 537CTOFLIB = -l/usr/local/delta/LAPACK -llapack -lf -kmath -lm 538CUBIX_OPTS = 539HOST = 540LINK = if77 -node $(IEEE) -Mquad -Mr8 541F77 = if77 -node 542CCF77 = $(F77) 543NODE_EXT = o 544HOST_EXT = delta 545NODE_TYPE = DELTA 546COMM_PKG = iPSC_NATIVE 547IO_STYLE = FILE_IO 548CODEOBJ = DBLE 549endif 550 551ifeq ($(TARGET),Paragon) 552#860 box -- Battelle setup, for cross-compilation 553# also works for direct compilation on a paragon node, at least at caltech. 554# 555# -Mvect at your own risk 556AR = ar860 r 557RANLIB = echo 558OPT = -O3 -Knoieee -Mquad -Mr8 -Minline=100 559OPTC = -O2 -Knoieee -Mquad -Minline=100 560OPTC2 = -O3 -Knoieee -Mquad 561F77 = if77 562CCF77 = $(F77) 563CC = icc -D STD_DBL -D STD_INT -Di860 -DIntel -I$(HDIR) ${DEF_TIMING} 564INT_TYPE = STD_INT 565LINK = if77 -Knoieee -nx 566CUBIX_OPTS = -node 567NODE_EXT = o 568HOST_EXT = i860 569HOST = 570NODE_TYPE = i860_NODE 571COMM_PKG = iPSC_NATIVE 572IO_STYLE = FILE_IO 573BLASLIB = -lkmath 574# single precision is 32 bits 575CODEOBJ = DBLE 576 577## following two lines iff we're building for PICL 578ifdef PICLDIR 579NODELIBS = ${PICLDIR}/nodelib.a 580COMM_PKG = PICL 581endif 582 583ifdef PEIGS_MPI_USE 584 COMM_PKG = MPI 585 586 MPIR_HOME = /usr/local/MPI/mpich 587 588 MPI_ARCH = paragon 589 MPI_COMM = ch_nx 590 591# MPI_INCLUDE is used when compiling peigs???/comm/mxsubs.f 592 MPI_INCLUDE = -I$(MPIR_HOME)/include 593 594 COMMLIB = -L$(MPIR_HOME)/lib/$(MPI_ARCH)/$(MPI_COMM) -lmpi -lm 595endif 596 597CPP = /usr/lib/cpp -P -D${NODE_TYPE} -D${COMM_PKG} -D${IO_STYLE} -D${INT_TYPE} ${DEF_TIMING} -I$(HDIR) 598endif 599 600 601ifeq ($(TARGET),iPSC_860) 602#Intel DELTA 603#860 box -- Battelle setup, for cross-compilation 604#-Mvect=shortvect at your own risk 605# 606AR = ar860 r 607RANLIB = echo 608OPT = -O4 -Knoieee -Mquad -Mr8 -Minline=100 609OPTC = -O3 -Knoieee -Mquad -Minline=100 610OPTC2 = -O3 -Knoieee -Mquad 611F77 = if77 612CCF77 = $(F77) 613CC = icc -D STD_DBL -D STD_INT -I$(HDIR) -DIntel -Di860 ${DEF_TIMING} 614CODEOBJ = DBLE 615INT_TYPE = STD_INT 616LINK = if77 -Knoieee -node 617CUBIX_OPTS = -node 618NODE_EXT = o 619HOST_EXT = i860 620HOST = 621NODE_TYPE = i860_NODE 622COMM_PKG = iPSC_NATIVE 623IO_STYLE = FILE_IO 624# 625# Warning: using -lkmath is dangerous. 626# It yields garbage on some problems 627# on the Intel DELTA. 628# 629BLASLIB = -lkmath 630CODEOBJ = DBLE 631 632## following two lines iff we're building for PICL 633ifdef PICLDIR 634NODELIBS = ${PICLDIR}/nodelib.a 635COMM_PKG = PICL 636endif 637 638ifdef PEIGS_MPI_USE 639 640 # For Intel delta 641 COMM_PKG = MPI 642 643 MPIR_HOME = /usr/local/MPI/mpich 644 645 MPI_ARCH = intelnx 646 MPI_COMM = ch_nx 647 648 # MPI_INCLUDE is used when compiling peigs???/comm/mxsubs.f 649 MPI_INCLUDE = -I$(MPIR_HOME)/include 650 651 COMMLIB = -L$(MPIR_HOME)/lib/$(MPI_ARCH)/$(MPI_COMM) -lmpi -lm 652endif 653 654endif 655 656 657 658ifeq ($(TARGET),SGI) 659#SGI/TCGMSG 660AR = ar r 661RANLIB = echo 662CC = cc ${DEF_TIMING} -DMIPS 663F77 = f77 -static -DTCGMSG -DMIPS 664CCF77 = $(F77) 665LINK = f77 -O 666CODEOBJ = DBLE 667CUBIX_OPTS = 668NODE_EXT = o 669HOST_EXT = out 670HOST = SUN 671NODE_TYPE = SUN 672CPU = 673COMM_PKG = TCGMSG 674IO_STYLE = FILE_IO 675COMMLIB = $(SRC)/tcgmsg/ipcv4.0/libtcgmsg.a 676CTOFLIB = -lftn -lm -lc /usr/lib/crtn.o 677BLASLIB = -lblas 678CPP = /usr/lib/cpp -P -D${NODE_TYPE} -D${COMM_PKG} -D${IO_STYLE} -D${INT_TYPE} ${DEF_TIMING} -I$(HDIR) -DMIPS 679 680 681HOST_EXT = out 682endif 683 684 685ifeq ($(TARGET),KSR8) 686# 687# real*8 and integer*8 VERSION FOR KSR 688# using the crummy optimized ksrlapk.a ksrblas.a 689# 690# -xfpu3 generate suspect answers for choleski 691# 692#KSR/TCGMSG 693AR = ar r 694RANLIB = echo 695CC = cc -DSTD_DBL -I$(HDIR) -DKSR8 ${DEF_TIMING} 696# -O2 697# -DTIMING 698# COPT1 = -O1 699# COPT2 = -O2 700CODEOBJ = DBLE 701COPT1 = 702COPT2 = 703F77 = f77 -r8 -D${COMM_PKG} 704CCF77 = $(F77) 705OPTF = -O1 706OPTF2 = -O2 707LINK = f77 708CPP = /usr/lib/cpp -P -C -D${COMM_PKG} -D${IO_STYLE} -DKSR ${DEF_TIMING} 709CUBIX_OPTS = 710NODE_EXT =o 711HOST_EXT =out 712HOST = 713NODE_TYPE = 714CPU =KSR 715COMM_PKG =TCGMSG 716IO_STYLE =FILE_IO 717COMMLIB =/home/d3g681/TCGMSG_DISTRIB/libtcgmsg.a -lrpc -para 718CTOFLIB = 719# 720BLASLIB = -lksrblas 721LAPACKLIB = -lksrlapk 722HOST_EXT = out 723endif 724 725ifeq ($(TARGET),KSR) 726# 727# "real*8" version of lapack and blas 728# KSR/TCGMSG 729# 730# -xfpu3 generate suspect answers for choleski 731# 732AR = ar r 733RANLIB = echo 734CC = cc -DSTD_DBL -I$(HDIR) -DKSR -O2 ${DEF_TIMING} 735# -DTIMING 736# COPT1 = -O1 737# COPT2 = -O2 738COPT1 = 739COPT2 = 740CODEOBJ = DBLE 741F77 = f77 -D${COMM_PKG} -r8 -O2 742CCF77 = $(F77) 743# COPT1 = -O1 744# COPT2 = -O2 745# F77 = f77 -O2 -r8 -xfpu3 -D${COMM_PKG} -DSTD_DBL 746OPTF = -O1 747OPTF2 = -O2 748LINK = f77 749# CPP = /usr/lib/cpp -P -C -D${COMM_PKG} -D${IO_STYLE} -DKSR ${DEF_TIMING} 750LINK = f77 -O 751CPP = /usr/lib/cpp -P -C -D${COMM_PKG} -D${IO_STYLE} -DKSR ${DEF_TIMING} 752CUBIX_OPTS = 753NODE_EXT =o 754HOST_EXT =out 755HOST = 756NODE_TYPE = 757CPU =KSR 758COMM_PKG =TCGMSG 759IO_STYLE =FILE_IO 760COMMLIB =/home/d3g681/TCGMSG_DISTRIB/libtcgmsg.a -lrpc -para 761CTOFLIB = 762# 763#BLASLIB = -lblas 764#LAPACKLIB = -llapack 765HOST_EXT = out 766endif 767 768ifeq ($(TARGET),SunUniproc) 769##SUN/uniprocessor for debugg 770AR = ar r 771RANLIB = ranlib 772# 773# if you are using gcc with f77 you need the following combinations 774# 775# F77 = f77 -Bstatic -f 776# 777# CC = cc -g -Bstatic -I$(HDIR) -DSTD_DBL -DSTD_INT -DSUN -D$(CPU) ${DEF_TIMING} 778# 779# CC = gcc -g -ansi -fno-gnu-linker -static -DSTD_DBL -DSTD_INT -I$(HDIR) -D$(CPU) -pedantic 780# 781# F77 = f77 -Bstatic -g -dalign -I$(HDIR) 782CC = cc -I$(HDIR) -DALPHA -DDEBUG1 -DSTD_DBL -trapuv -g 783F77 = f77 -I$(HDIR) -trapuv -g -fdefault-integer-8 -fpe 784CCF77 = $(F77) 785LINK = $(F77) 786CTOFLIB = -lF77 -lV77 -L/msrc/apps/f771.4/SC1.0 787INT_TYPE = STD_INT 788CUBIX_OPTS = 789NODE_EXT = o 790HOST = SUN 791HOST_EXT = out 792NODE_TYPE = SUN 793COMM_PKG = UNIPROC 794CPU = SPARC 795IO_STYLE = FILE_IO 796BLASLIB = ../libblas.a 797LAPACKLIB = ../liblapack.a 798CODEOBJ = DBLE 799endif 800 801ifeq ($(TARGET),HP) 802# hp9000/700s 803AR = ar r 804RANLIB = echo 805CC = gcc -I$(HDIR) -DPA_RISC -DSTD_INT -DSTD_DBL -O3 806F77 = f77 +ppu -I$(HDIR) -O 807CCF77 = $(F77) 808LINK = $(F77) 809CODEOBJ = DBLE 810CUBIX_OPTS = 811NODE_EXT = o 812HOST_EXT = out 813HOST = 814NODE_TYPE = single_cpu 815CPU = PA_RISC 816COMM_PKG = TCGMSG 817IO_STYLE = FILE_IO 818COMMLIB = $(HOME)/george/g/tcgmsg/ipcv4.0/libtcgmsg.a 819CTOFLIB = -lf2c 820BLASLIB = -lblas 821LAPACKLIB = -llapack 822HOST_EXT = out 823CPP = /lib/cpp -P -C -D${COMM_PKG} -D${IO_STYLE} -D$(CPU) -DSTD_INT -DSTD_DBL -I$(HDIR) 824ifdef PEIGS_MPI_USE 825 COMM_PKG = MPI 826 MPIR_HOME = $(HOME)/mpich 827 MPI_INCLUDE = -I$(MPIR_HOME)/include 828 MPI_COMM = ch_p4 829 COMMLIB = -L$(MPIR_HOME)/lib/$(NODE_TYPE)/$(MPI_COMM) -lmpi 830endif 831endif 832 833export AR 834export RANLIB 835export LINK 836export CC 837export F77 838export CCF77 839export FC 840