1package terraform
2
3import (
4	"context"
5	"fmt"
6	"log"
7	"strings"
8	"sync"
9
10	"github.com/apparentlymart/go-versions/versions"
11	"github.com/hashicorp/terraform/internal/addrs"
12	"github.com/hashicorp/terraform/internal/configs"
13	"github.com/hashicorp/terraform/internal/instances"
14	"github.com/hashicorp/terraform/internal/lang"
15	"github.com/hashicorp/terraform/internal/plans"
16	"github.com/hashicorp/terraform/internal/providers"
17	"github.com/hashicorp/terraform/internal/provisioners"
18	"github.com/hashicorp/terraform/internal/states"
19	"github.com/hashicorp/terraform/internal/tfdiags"
20	"github.com/zclconf/go-cty/cty"
21
22	"github.com/hashicorp/terraform/internal/depsfile"
23	"github.com/hashicorp/terraform/internal/getproviders"
24	_ "github.com/hashicorp/terraform/internal/logging"
25)
26
27// InputMode defines what sort of input will be asked for when Input
28// is called on Context.
29type InputMode byte
30
31const (
32	// InputModeProvider asks for provider variables
33	InputModeProvider InputMode = 1 << iota
34
35	// InputModeStd is the standard operating mode and asks for both variables
36	// and providers.
37	InputModeStd = InputModeProvider
38)
39
40// ContextOpts are the user-configurable options to create a context with
41// NewContext.
42type ContextOpts struct {
43	Config       *configs.Config
44	Changes      *plans.Changes
45	State        *states.State
46	Targets      []addrs.Targetable
47	ForceReplace []addrs.AbsResourceInstance
48	Variables    InputValues
49	Meta         *ContextMeta
50	PlanMode     plans.Mode
51	SkipRefresh  bool
52
53	Hooks        []Hook
54	Parallelism  int
55	Providers    map[addrs.Provider]providers.Factory
56	Provisioners map[string]provisioners.Factory
57
58	// If non-nil, will apply as additional constraints on the provider
59	// plugins that will be requested from the provider resolver.
60	ProviderSHA256s map[string][]byte
61
62	// If non-nil, will be verified to ensure that provider requirements from
63	// configuration can be satisfied by the set of locked dependencies.
64	LockedDependencies *depsfile.Locks
65
66	// Set of providers to exclude from the requirements check process, as they
67	// are marked as in local development.
68	ProvidersInDevelopment map[addrs.Provider]struct{}
69
70	UIInput UIInput
71}
72
73// ContextMeta is metadata about the running context. This is information
74// that this package or structure cannot determine on its own but exposes
75// into Terraform in various ways. This must be provided by the Context
76// initializer.
77type ContextMeta struct {
78	Env string // Env is the state environment
79
80	// OriginalWorkingDir is the working directory where the Terraform CLI
81	// was run from, which may no longer actually be the current working
82	// directory if the user included the -chdir=... option.
83	//
84	// If this string is empty then the original working directory is the same
85	// as the current working directory.
86	//
87	// In most cases we should respect the user's override by ignoring this
88	// path and just using the current working directory, but this is here
89	// for some exceptional cases where the original working directory is
90	// needed.
91	OriginalWorkingDir string
92}
93
94// Context represents all the context that Terraform needs in order to
95// perform operations on infrastructure. This structure is built using
96// NewContext.
97type Context struct {
98	config       *configs.Config
99	changes      *plans.Changes
100	skipRefresh  bool
101	targets      []addrs.Targetable
102	forceReplace []addrs.AbsResourceInstance
103	variables    InputValues
104	meta         *ContextMeta
105	planMode     plans.Mode
106
107	// state, refreshState, and prevRunState simultaneously track three
108	// different incarnations of the Terraform state:
109	//
110	// "state" is always the most "up-to-date". During planning it represents
111	// our best approximation of the planned new state, and during applying
112	// it represents the results of all of the actions we've taken so far.
113	//
114	// "refreshState" is populated and relevant only during planning, where we
115	// update it to reflect a provider's sense of the current state of the
116	// remote object each resource instance is bound to but don't include
117	// any changes implied by the configuration.
118	//
119	// "prevRunState" is similar to refreshState except that it doesn't even
120	// include the result of the provider's refresh step, and instead reflects
121	// the state as we found it prior to any changes, although it does reflect
122	// the result of running the provider's schema upgrade actions so that the
123	// resource instance objects will all conform to the _current_ resource
124	// type schemas if planning is successful, so that in that case it will
125	// be meaningful to compare prevRunState to refreshState to detect changes
126	// made outside of Terraform.
127	state        *states.State
128	refreshState *states.State
129	prevRunState *states.State
130
131	hooks      []Hook
132	components contextComponentFactory
133	schemas    *Schemas
134	sh         *stopHook
135	uiInput    UIInput
136
137	l                   sync.Mutex // Lock acquired during any task
138	parallelSem         Semaphore
139	providerInputConfig map[string]map[string]cty.Value
140	providerSHA256s     map[string][]byte
141	runCond             *sync.Cond
142	runContext          context.Context
143	runContextCancel    context.CancelFunc
144}
145
146// (additional methods on Context can be found in context_*.go files.)
147
148// NewContext creates a new Context structure.
149//
150// Once a Context is created, the caller must not access or mutate any of
151// the objects referenced (directly or indirectly) by the ContextOpts fields.
152//
153// If the returned diagnostics contains errors then the resulting context is
154// invalid and must not be used.
155func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) {
156	log.Printf("[TRACE] terraform.NewContext: starting")
157	diags := CheckCoreVersionRequirements(opts.Config)
158	// If version constraints are not met then we'll bail early since otherwise
159	// we're likely to just see a bunch of other errors related to
160	// incompatibilities, which could be overwhelming for the user.
161	if diags.HasErrors() {
162		return nil, diags
163	}
164
165	// Copy all the hooks and add our stop hook. We don't append directly
166	// to the Config so that we're not modifying that in-place.
167	sh := new(stopHook)
168	hooks := make([]Hook, len(opts.Hooks)+1)
169	copy(hooks, opts.Hooks)
170	hooks[len(opts.Hooks)] = sh
171
172	state := opts.State
173	if state == nil {
174		state = states.NewState()
175	}
176
177	// Determine parallelism, default to 10. We do this both to limit
178	// CPU pressure but also to have an extra guard against rate throttling
179	// from providers.
180	// We throw an error in case of negative parallelism
181	par := opts.Parallelism
182	if par < 0 {
183		diags = diags.Append(tfdiags.Sourceless(
184			tfdiags.Error,
185			"Invalid parallelism value",
186			fmt.Sprintf("The parallelism must be a positive value. Not %d.", par),
187		))
188		return nil, diags
189	}
190
191	if par == 0 {
192		par = 10
193	}
194
195	// Set up the variables in the following sequence:
196	//    0 - Take default values from the configuration
197	//    1 - Take values from TF_VAR_x environment variables
198	//    2 - Take values specified in -var flags, overriding values
199	//        set by environment variables if necessary. This includes
200	//        values taken from -var-file in addition.
201	var variables InputValues
202	if opts.Config != nil {
203		// Default variables from the configuration seed our map.
204		variables = DefaultVariableValues(opts.Config.Module.Variables)
205	}
206	// Variables provided by the caller (from CLI, environment, etc) can
207	// override the defaults.
208	variables = variables.Override(opts.Variables)
209
210	components := &basicComponentFactory{
211		providers:    opts.Providers,
212		provisioners: opts.Provisioners,
213	}
214
215	log.Printf("[TRACE] terraform.NewContext: loading provider schemas")
216	schemas, err := LoadSchemas(opts.Config, opts.State, components)
217	if err != nil {
218		diags = diags.Append(tfdiags.Sourceless(
219			tfdiags.Error,
220			"Could not load plugin",
221			fmt.Sprintf(errPluginInit, err),
222		))
223		return nil, diags
224	}
225
226	changes := opts.Changes
227	if changes == nil {
228		changes = plans.NewChanges()
229	}
230
231	config := opts.Config
232	if config == nil {
233		config = configs.NewEmptyConfig()
234	}
235
236	// If we have a configuration and a set of locked dependencies, verify that
237	// the provider requirements from the configuration can be satisfied by the
238	// locked dependencies.
239	if opts.LockedDependencies != nil {
240		reqs, providerDiags := config.ProviderRequirements()
241		diags = diags.Append(providerDiags)
242
243		locked := opts.LockedDependencies.AllProviders()
244		unmetReqs := make(getproviders.Requirements)
245		for provider, versionConstraints := range reqs {
246			// Builtin providers are not listed in the locks file
247			if provider.IsBuiltIn() {
248				continue
249			}
250			// Development providers must be excluded from this check
251			if _, ok := opts.ProvidersInDevelopment[provider]; ok {
252				continue
253			}
254			// If the required provider doesn't exist in the lock, or the
255			// locked version doesn't meet the constraints, mark the
256			// requirement unmet
257			acceptable := versions.MeetingConstraints(versionConstraints)
258			if lock, ok := locked[provider]; !ok || !acceptable.Has(lock.Version()) {
259				unmetReqs[provider] = versionConstraints
260			}
261		}
262
263		if len(unmetReqs) > 0 {
264			var buf strings.Builder
265			for provider, versionConstraints := range unmetReqs {
266				fmt.Fprintf(&buf, "\n- %s", provider)
267				if len(versionConstraints) > 0 {
268					fmt.Fprintf(&buf, " (%s)", getproviders.VersionConstraintsString(versionConstraints))
269				}
270			}
271			diags = diags.Append(tfdiags.Sourceless(
272				tfdiags.Error,
273				"Provider requirements cannot be satisfied by locked dependencies",
274				fmt.Sprintf("The following required providers are not installed:\n%s\n\nPlease run \"terraform init\".", buf.String()),
275			))
276			return nil, diags
277		}
278	}
279
280	switch opts.PlanMode {
281	case plans.NormalMode, plans.DestroyMode:
282		// OK
283	case plans.RefreshOnlyMode:
284		if opts.SkipRefresh {
285			// The CLI layer (and other similar callers) should prevent this
286			// combination of options.
287			diags = diags.Append(tfdiags.Sourceless(
288				tfdiags.Error,
289				"Incompatible plan options",
290				"Cannot skip refreshing in refresh-only mode. This is a bug in Terraform.",
291			))
292			return nil, diags
293		}
294	default:
295		// The CLI layer (and other similar callers) should not try to
296		// create a context for a mode that Terraform Core doesn't support.
297		diags = diags.Append(tfdiags.Sourceless(
298			tfdiags.Error,
299			"Unsupported plan mode",
300			fmt.Sprintf("Terraform Core doesn't know how to handle plan mode %s. This is a bug in Terraform.", opts.PlanMode),
301		))
302		return nil, diags
303	}
304	if len(opts.ForceReplace) > 0 && opts.PlanMode != plans.NormalMode {
305		// The other modes don't generate no-op or update actions that we might
306		// upgrade to be "replace", so doesn't make sense to combine those.
307		diags = diags.Append(tfdiags.Sourceless(
308			tfdiags.Error,
309			"Unsupported plan mode",
310			fmt.Sprintf("Forcing resource instance replacement (with -replace=...) is allowed only in normal planning mode."),
311		))
312		return nil, diags
313	}
314
315	log.Printf("[TRACE] terraform.NewContext: complete")
316
317	// By the time we get here, we should have values defined for all of
318	// the root module variables, even if some of them are "unknown". It's the
319	// caller's responsibility to have already handled the decoding of these
320	// from the various ways the CLI allows them to be set and to produce
321	// user-friendly error messages if they are not all present, and so
322	// the error message from checkInputVariables should never be seen and
323	// includes language asking the user to report a bug.
324	if config != nil {
325		varDiags := checkInputVariables(config.Module.Variables, variables)
326		diags = diags.Append(varDiags)
327	}
328
329	return &Context{
330		components:   components,
331		schemas:      schemas,
332		planMode:     opts.PlanMode,
333		changes:      changes,
334		hooks:        hooks,
335		meta:         opts.Meta,
336		config:       config,
337		state:        state,
338		refreshState: state.DeepCopy(),
339		prevRunState: state.DeepCopy(),
340		skipRefresh:  opts.SkipRefresh,
341		targets:      opts.Targets,
342		forceReplace: opts.ForceReplace,
343		uiInput:      opts.UIInput,
344		variables:    variables,
345
346		parallelSem:         NewSemaphore(par),
347		providerInputConfig: make(map[string]map[string]cty.Value),
348		providerSHA256s:     opts.ProviderSHA256s,
349		sh:                  sh,
350	}, diags
351}
352
353func (c *Context) Schemas() *Schemas {
354	return c.schemas
355}
356
357type ContextGraphOpts struct {
358	// If true, validates the graph structure (checks for cycles).
359	Validate bool
360
361	// Legacy graphs only: won't prune the graph
362	Verbose bool
363}
364
365// Graph returns the graph used for the given operation type.
366//
367// The most extensive or complex graph type is GraphTypePlan.
368func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, tfdiags.Diagnostics) {
369	if opts == nil {
370		opts = &ContextGraphOpts{Validate: true}
371	}
372
373	log.Printf("[INFO] terraform: building graph: %s", typ)
374	switch typ {
375	case GraphTypeApply:
376		return (&ApplyGraphBuilder{
377			Config:       c.config,
378			Changes:      c.changes,
379			State:        c.state,
380			Components:   c.components,
381			Schemas:      c.schemas,
382			Targets:      c.targets,
383			ForceReplace: c.forceReplace,
384			Validate:     opts.Validate,
385		}).Build(addrs.RootModuleInstance)
386
387	case GraphTypeValidate:
388		// The validate graph is just a slightly modified plan graph: an empty
389		// state is substituted in for Validate.
390		return ValidateGraphBuilder(&PlanGraphBuilder{
391			Config:     c.config,
392			Components: c.components,
393			Schemas:    c.schemas,
394			Targets:    c.targets,
395			Validate:   opts.Validate,
396			State:      states.NewState(),
397		}).Build(addrs.RootModuleInstance)
398
399	case GraphTypePlan:
400		// Create the plan graph builder
401		return (&PlanGraphBuilder{
402			Config:       c.config,
403			State:        c.state,
404			Components:   c.components,
405			Schemas:      c.schemas,
406			Targets:      c.targets,
407			ForceReplace: c.forceReplace,
408			Validate:     opts.Validate,
409			skipRefresh:  c.skipRefresh,
410		}).Build(addrs.RootModuleInstance)
411
412	case GraphTypePlanDestroy:
413		return (&DestroyPlanGraphBuilder{
414			Config:      c.config,
415			State:       c.state,
416			Components:  c.components,
417			Schemas:     c.schemas,
418			Targets:     c.targets,
419			Validate:    opts.Validate,
420			skipRefresh: c.skipRefresh,
421		}).Build(addrs.RootModuleInstance)
422
423	case GraphTypePlanRefreshOnly:
424		// Create the plan graph builder, with skipPlanChanges set to
425		// activate the "refresh only" mode.
426		return (&PlanGraphBuilder{
427			Config:          c.config,
428			State:           c.state,
429			Components:      c.components,
430			Schemas:         c.schemas,
431			Targets:         c.targets,
432			Validate:        opts.Validate,
433			skipRefresh:     c.skipRefresh,
434			skipPlanChanges: true, // this activates "refresh only" mode.
435		}).Build(addrs.RootModuleInstance)
436
437	case GraphTypeEval:
438		return (&EvalGraphBuilder{
439			Config:     c.config,
440			State:      c.state,
441			Components: c.components,
442			Schemas:    c.schemas,
443		}).Build(addrs.RootModuleInstance)
444
445	default:
446		// Should never happen, because the above is exhaustive for all graph types.
447		panic(fmt.Errorf("unsupported graph type %s", typ))
448	}
449}
450
451// State returns a copy of the current state associated with this context.
452//
453// This cannot safely be called in parallel with any other Context function.
454func (c *Context) State() *states.State {
455	return c.state.DeepCopy()
456}
457
458// Eval produces a scope in which expressions can be evaluated for
459// the given module path.
460//
461// This method must first evaluate any ephemeral values (input variables, local
462// values, and output values) in the configuration. These ephemeral values are
463// not included in the persisted state, so they must be re-computed using other
464// values in the state before they can be properly evaluated. The updated
465// values are retained in the main state associated with the receiving context.
466//
467// This function takes no action against remote APIs but it does need access
468// to all provider and provisioner instances in order to obtain their schemas
469// for type checking.
470//
471// The result is an evaluation scope that can be used to resolve references
472// against the root module. If the returned diagnostics contains errors then
473// the returned scope may be nil. If it is not nil then it may still be used
474// to attempt expression evaluation or other analysis, but some expressions
475// may not behave as expected.
476func (c *Context) Eval(path addrs.ModuleInstance) (*lang.Scope, tfdiags.Diagnostics) {
477	// This is intended for external callers such as the "terraform console"
478	// command. Internally, we create an evaluator in c.walk before walking
479	// the graph, and create scopes in ContextGraphWalker.
480
481	var diags tfdiags.Diagnostics
482	defer c.acquireRun("eval")()
483
484	// Start with a copy of state so that we don't affect any instances
485	// that other methods may have already returned.
486	c.state = c.state.DeepCopy()
487	var walker *ContextGraphWalker
488
489	graph, graphDiags := c.Graph(GraphTypeEval, nil)
490	diags = diags.Append(graphDiags)
491	if !diags.HasErrors() {
492		var walkDiags tfdiags.Diagnostics
493		walker, walkDiags = c.walk(graph, walkEval)
494		diags = diags.Append(walker.NonFatalDiagnostics)
495		diags = diags.Append(walkDiags)
496	}
497
498	if walker == nil {
499		// If we skipped walking the graph (due to errors) then we'll just
500		// use a placeholder graph walker here, which'll refer to the
501		// unmodified state.
502		walker = c.graphWalker(walkEval)
503	}
504
505	// This is a bit weird since we don't normally evaluate outside of
506	// the context of a walk, but we'll "re-enter" our desired path here
507	// just to get hold of an EvalContext for it. GraphContextBuiltin
508	// caches its contexts, so we should get hold of the context that was
509	// previously used for evaluation here, unless we skipped walking.
510	evalCtx := walker.EnterPath(path)
511	return evalCtx.EvaluationScope(nil, EvalDataForNoInstanceKey), diags
512}
513
514// Apply applies the changes represented by this context and returns
515// the resulting state.
516//
517// Even in the case an error is returned, the state may be returned and will
518// potentially be partially updated.  In addition to returning the resulting
519// state, this context is updated with the latest state.
520//
521// If the state is required after an error, the caller should call
522// Context.State, rather than rely on the return value.
523//
524// TODO: Apply and Refresh should either always return a state, or rely on the
525//       State() method. Currently the helper/resource testing framework relies
526//       on the absence of a returned state to determine if Destroy can be
527//       called, so that will need to be refactored before this can be changed.
528func (c *Context) Apply() (*states.State, tfdiags.Diagnostics) {
529	defer c.acquireRun("apply")()
530
531	// Copy our own state
532	c.state = c.state.DeepCopy()
533
534	// Build the graph.
535	graph, diags := c.Graph(GraphTypeApply, nil)
536	if diags.HasErrors() {
537		return nil, diags
538	}
539
540	// Determine the operation
541	operation := walkApply
542	if c.planMode == plans.DestroyMode {
543		operation = walkDestroy
544	}
545
546	// Walk the graph
547	walker, walkDiags := c.walk(graph, operation)
548	diags = diags.Append(walker.NonFatalDiagnostics)
549	diags = diags.Append(walkDiags)
550
551	if c.planMode == plans.DestroyMode && !diags.HasErrors() {
552		// If we know we were trying to destroy objects anyway, and we
553		// completed without any errors, then we'll also prune out any
554		// leftover empty resource husks (left after all of the instances
555		// of a resource with "count" or "for_each" are destroyed) to
556		// help ensure we end up with an _actually_ empty state, assuming
557		// we weren't destroying with -target here.
558		//
559		// (This doesn't actually take into account -target, but that should
560		// be okay because it doesn't throw away anything we can't recompute
561		// on a subsequent "terraform plan" run, if the resources are still
562		// present in the configuration. However, this _will_ cause "count = 0"
563		// resources to read as unknown during the next refresh walk, which
564		// may cause some additional churn if used in a data resource or
565		// provider block, until we remove refreshing as a separate walk and
566		// just do it as part of the plan walk.)
567		c.state.PruneResourceHusks()
568	}
569
570	if len(c.targets) > 0 {
571		diags = diags.Append(tfdiags.Sourceless(
572			tfdiags.Warning,
573			"Applied changes may be incomplete",
574			`The plan was created with the -target option in effect, so some changes requested in the configuration may have been ignored and the output values may not be fully updated. Run the following command to verify that no other changes are pending:
575    terraform plan
576
577Note that the -target option is not suitable for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`,
578		))
579	}
580
581	// This isn't technically needed, but don't leave an old refreshed state
582	// around in case we re-use the context in internal tests.
583	c.refreshState = c.state.DeepCopy()
584
585	return c.state, diags
586}
587
588// Plan generates an execution plan for the given context, and returns the
589// refreshed state.
590//
591// The execution plan encapsulates the context and can be stored
592// in order to reinstantiate a context later for Apply.
593//
594// Plan also updates the diff of this context to be the diff generated
595// by the plan, so Apply can be called after.
596func (c *Context) Plan() (*plans.Plan, tfdiags.Diagnostics) {
597	defer c.acquireRun("plan")()
598	c.changes = plans.NewChanges()
599	var diags tfdiags.Diagnostics
600
601	if len(c.targets) > 0 {
602		diags = diags.Append(tfdiags.Sourceless(
603			tfdiags.Warning,
604			"Resource targeting is in effect",
605			`You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration.
606
607The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`,
608		))
609	}
610
611	var plan *plans.Plan
612	var planDiags tfdiags.Diagnostics
613	switch c.planMode {
614	case plans.NormalMode:
615		plan, planDiags = c.plan()
616	case plans.DestroyMode:
617		plan, planDiags = c.destroyPlan()
618	case plans.RefreshOnlyMode:
619		plan, planDiags = c.refreshOnlyPlan()
620	default:
621		panic(fmt.Sprintf("unsupported plan mode %s", c.planMode))
622	}
623	diags = diags.Append(planDiags)
624	if diags.HasErrors() {
625		return nil, diags
626	}
627
628	// convert the variables into the format expected for the plan
629	varVals := make(map[string]plans.DynamicValue, len(c.variables))
630	for k, iv := range c.variables {
631		// We use cty.DynamicPseudoType here so that we'll save both the
632		// value _and_ its dynamic type in the plan, so we can recover
633		// exactly the same value later.
634		dv, err := plans.NewDynamicValue(iv.Value, cty.DynamicPseudoType)
635		if err != nil {
636			diags = diags.Append(tfdiags.Sourceless(
637				tfdiags.Error,
638				"Failed to prepare variable value for plan",
639				fmt.Sprintf("The value for variable %q could not be serialized to store in the plan: %s.", k, err),
640			))
641			continue
642		}
643		varVals[k] = dv
644	}
645
646	// insert the run-specific data from the context into the plan; variables,
647	// targets and provider SHAs.
648	plan.VariableValues = varVals
649	plan.TargetAddrs = c.targets
650	plan.ProviderSHA256s = c.providerSHA256s
651
652	return plan, diags
653}
654
655func (c *Context) plan() (*plans.Plan, tfdiags.Diagnostics) {
656	var diags tfdiags.Diagnostics
657
658	graph, graphDiags := c.Graph(GraphTypePlan, nil)
659	diags = diags.Append(graphDiags)
660	if graphDiags.HasErrors() {
661		return nil, diags
662	}
663
664	// Do the walk
665	walker, walkDiags := c.walk(graph, walkPlan)
666	diags = diags.Append(walker.NonFatalDiagnostics)
667	diags = diags.Append(walkDiags)
668	if walkDiags.HasErrors() {
669		return nil, diags
670	}
671	plan := &plans.Plan{
672		UIMode:            plans.NormalMode,
673		Changes:           c.changes,
674		ForceReplaceAddrs: c.forceReplace,
675		PrevRunState:      c.prevRunState.DeepCopy(),
676	}
677
678	c.refreshState.SyncWrapper().RemovePlannedResourceInstanceObjects()
679
680	refreshedState := c.refreshState.DeepCopy()
681	plan.PriorState = refreshedState
682
683	// replace the working state with the updated state, so that immediate calls
684	// to Apply work as expected.
685	c.state = refreshedState
686
687	return plan, diags
688}
689
690func (c *Context) destroyPlan() (*plans.Plan, tfdiags.Diagnostics) {
691	var diags tfdiags.Diagnostics
692	destroyPlan := &plans.Plan{
693		PriorState: c.state.DeepCopy(),
694	}
695	c.changes = plans.NewChanges()
696
697	// A destroy plan starts by running Refresh to read any pending data
698	// sources, and remove missing managed resources. This is required because
699	// a "destroy plan" is only creating delete changes, and is essentially a
700	// local operation.
701	//
702	// NOTE: if skipRefresh _is_ set then we'll rely on the destroy-plan walk
703	// below to upgrade the prevRunState and priorState both to the latest
704	// resource type schemas, so NodePlanDestroyableResourceInstance.Execute
705	// must coordinate with this by taking that action only when c.skipRefresh
706	// _is_ set. This coupling between the two is unfortunate but necessary
707	// to work within our current structure.
708	if !c.skipRefresh {
709		refreshPlan, refreshDiags := c.plan()
710		diags = diags.Append(refreshDiags)
711		if diags.HasErrors() {
712			return nil, diags
713		}
714
715		// insert the refreshed state into the destroy plan result, and discard
716		// the changes recorded from the refresh.
717		destroyPlan.PriorState = refreshPlan.PriorState.DeepCopy()
718		destroyPlan.PrevRunState = refreshPlan.PrevRunState.DeepCopy()
719		c.changes = plans.NewChanges()
720	}
721
722	graph, graphDiags := c.Graph(GraphTypePlanDestroy, nil)
723	diags = diags.Append(graphDiags)
724	if graphDiags.HasErrors() {
725		return nil, diags
726	}
727
728	// Do the walk
729	walker, walkDiags := c.walk(graph, walkPlanDestroy)
730	diags = diags.Append(walker.NonFatalDiagnostics)
731	diags = diags.Append(walkDiags)
732	if walkDiags.HasErrors() {
733		return nil, diags
734	}
735
736	if c.skipRefresh {
737		// If we didn't do refreshing then both the previous run state and
738		// the prior state are the result of upgrading the previous run state,
739		// which we should've upgraded as part of the plan-destroy walk
740		// in NodePlanDestroyableResourceInstance.Execute, so they'll have the
741		// current schema but neither will reflect any out-of-band changes in
742		// the remote system.
743		destroyPlan.PrevRunState = c.prevRunState.DeepCopy()
744		destroyPlan.PriorState = c.prevRunState.DeepCopy()
745	}
746
747	destroyPlan.UIMode = plans.DestroyMode
748	destroyPlan.Changes = c.changes
749	return destroyPlan, diags
750}
751
752func (c *Context) refreshOnlyPlan() (*plans.Plan, tfdiags.Diagnostics) {
753	var diags tfdiags.Diagnostics
754
755	graph, graphDiags := c.Graph(GraphTypePlanRefreshOnly, nil)
756	diags = diags.Append(graphDiags)
757	if graphDiags.HasErrors() {
758		return nil, diags
759	}
760
761	// Do the walk
762	walker, walkDiags := c.walk(graph, walkPlan)
763	diags = diags.Append(walker.NonFatalDiagnostics)
764	diags = diags.Append(walkDiags)
765	if walkDiags.HasErrors() {
766		return nil, diags
767	}
768	plan := &plans.Plan{
769		UIMode:       plans.RefreshOnlyMode,
770		Changes:      c.changes,
771		PrevRunState: c.prevRunState.DeepCopy(),
772	}
773
774	// If the graph builder and graph nodes correctly obeyed our directive
775	// to refresh only, the set of resource changes should always be empty.
776	// We'll safety-check that here so we can return a clear message about it,
777	// rather than probably just generating confusing output at the UI layer.
778	if len(plan.Changes.Resources) != 0 {
779		// Some extra context in the logs in case the user reports this message
780		// as a bug, as a starting point for debugging.
781		for _, rc := range plan.Changes.Resources {
782			if depKey := rc.DeposedKey; depKey == states.NotDeposed {
783				log.Printf("[DEBUG] Refresh-only plan includes %s change for %s", rc.Action, rc.Addr)
784			} else {
785				log.Printf("[DEBUG] Refresh-only plan includes %s change for %s deposed object %s", rc.Action, rc.Addr, depKey)
786			}
787		}
788		diags = diags.Append(tfdiags.Sourceless(
789			tfdiags.Error,
790			"Invalid refresh-only plan",
791			"Terraform generated planned resource changes in a refresh-only plan. This is a bug in Terraform.",
792		))
793	}
794
795	c.refreshState.SyncWrapper().RemovePlannedResourceInstanceObjects()
796
797	refreshedState := c.refreshState
798	plan.PriorState = refreshedState.DeepCopy()
799
800	// replace the working state with the updated state, so that immediate calls
801	// to Apply work as expected. DeepCopy because such an apply should not
802	// mutate
803	c.state = refreshedState
804
805	return plan, diags
806}
807
808// Refresh goes through all the resources in the state and refreshes them
809// to their latest state. This is done by executing a plan, and retaining the
810// state while discarding the change set.
811//
812// In the case of an error, there is no state returned.
813func (c *Context) Refresh() (*states.State, tfdiags.Diagnostics) {
814	p, diags := c.Plan()
815	if diags.HasErrors() {
816		return nil, diags
817	}
818
819	return p.PriorState, diags
820}
821
822// Stop stops the running task.
823//
824// Stop will block until the task completes.
825func (c *Context) Stop() {
826	log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence")
827
828	c.l.Lock()
829	defer c.l.Unlock()
830
831	// If we're running, then stop
832	if c.runContextCancel != nil {
833		log.Printf("[WARN] terraform: run context exists, stopping")
834
835		// Tell the hook we want to stop
836		c.sh.Stop()
837
838		// Stop the context
839		c.runContextCancel()
840		c.runContextCancel = nil
841	}
842
843	// Grab the condition var before we exit
844	if cond := c.runCond; cond != nil {
845		log.Printf("[INFO] terraform: waiting for graceful stop to complete")
846		cond.Wait()
847	}
848
849	log.Printf("[WARN] terraform: stop complete")
850}
851
852// Validate performs semantic validation of the configuration, and returning
853// any warnings or errors.
854//
855// Syntax and structural checks are performed by the configuration loader,
856// and so are not repeated here.
857func (c *Context) Validate() tfdiags.Diagnostics {
858	defer c.acquireRun("validate")()
859
860	var diags tfdiags.Diagnostics
861
862	// If we have errors at this point then we probably won't be able to
863	// construct a graph without producing redundant errors, so we'll halt early.
864	if diags.HasErrors() {
865		return diags
866	}
867
868	// Build the graph so we can walk it and run Validate on nodes.
869	// We also validate the graph generated here, but this graph doesn't
870	// necessarily match the graph that Plan will generate, so we'll validate the
871	// graph again later after Planning.
872	graph, graphDiags := c.Graph(GraphTypeValidate, nil)
873	diags = diags.Append(graphDiags)
874	if graphDiags.HasErrors() {
875		return diags
876	}
877
878	// Walk
879	walker, walkDiags := c.walk(graph, walkValidate)
880	diags = diags.Append(walker.NonFatalDiagnostics)
881	diags = diags.Append(walkDiags)
882	if walkDiags.HasErrors() {
883		return diags
884	}
885
886	return diags
887}
888
889// Config returns the configuration tree associated with this context.
890func (c *Context) Config() *configs.Config {
891	return c.config
892}
893
894// Variables will return the mapping of variables that were defined
895// for this Context. If Input was called, this mapping may be different
896// than what was given.
897func (c *Context) Variables() InputValues {
898	return c.variables
899}
900
901// SetVariable sets a variable after a context has already been built.
902func (c *Context) SetVariable(k string, v cty.Value) {
903	c.variables[k] = &InputValue{
904		Value:      v,
905		SourceType: ValueFromCaller,
906	}
907}
908
909func (c *Context) acquireRun(phase string) func() {
910	// With the run lock held, grab the context lock to make changes
911	// to the run context.
912	c.l.Lock()
913	defer c.l.Unlock()
914
915	// Wait until we're no longer running
916	for c.runCond != nil {
917		c.runCond.Wait()
918	}
919
920	// Build our lock
921	c.runCond = sync.NewCond(&c.l)
922
923	// Create a new run context
924	c.runContext, c.runContextCancel = context.WithCancel(context.Background())
925
926	// Reset the stop hook so we're not stopped
927	c.sh.Reset()
928
929	return c.releaseRun
930}
931
932func (c *Context) releaseRun() {
933	// Grab the context lock so that we can make modifications to fields
934	c.l.Lock()
935	defer c.l.Unlock()
936
937	// End our run. We check if runContext is non-nil because it can be
938	// set to nil if it was cancelled via Stop()
939	if c.runContextCancel != nil {
940		c.runContextCancel()
941	}
942
943	// Unlock all waiting our condition
944	cond := c.runCond
945	c.runCond = nil
946	cond.Broadcast()
947
948	// Unset the context
949	c.runContext = nil
950}
951
952func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, tfdiags.Diagnostics) {
953	log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
954
955	walker := c.graphWalker(operation)
956
957	// Watch for a stop so we can call the provider Stop() API.
958	watchStop, watchWait := c.watchStop(walker)
959
960	// Walk the real graph, this will block until it completes
961	diags := graph.Walk(walker)
962
963	// Close the channel so the watcher stops, and wait for it to return.
964	close(watchStop)
965	<-watchWait
966
967	return walker, diags
968}
969
970func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker {
971	var state *states.SyncState
972	var refreshState *states.SyncState
973	var prevRunState *states.SyncState
974
975	switch operation {
976	case walkValidate:
977		// validate should not use any state
978		state = states.NewState().SyncWrapper()
979
980		// validate currently uses the plan graph, so we have to populate the
981		// refreshState and the prevRunState.
982		refreshState = states.NewState().SyncWrapper()
983		prevRunState = states.NewState().SyncWrapper()
984
985	case walkPlan, walkPlanDestroy:
986		state = c.state.SyncWrapper()
987		refreshState = c.refreshState.SyncWrapper()
988		prevRunState = c.prevRunState.SyncWrapper()
989
990	default:
991		state = c.state.SyncWrapper()
992	}
993
994	return &ContextGraphWalker{
995		Context:            c,
996		State:              state,
997		RefreshState:       refreshState,
998		PrevRunState:       prevRunState,
999		Changes:            c.changes.SyncWrapper(),
1000		InstanceExpander:   instances.NewExpander(),
1001		Operation:          operation,
1002		StopContext:        c.runContext,
1003		RootVariableValues: c.variables,
1004	}
1005}
1006
1007// watchStop immediately returns a `stop` and a `wait` chan after dispatching
1008// the watchStop goroutine. This will watch the runContext for cancellation and
1009// stop the providers accordingly.  When the watch is no longer needed, the
1010// `stop` chan should be closed before waiting on the `wait` chan.
1011// The `wait` chan is important, because without synchronizing with the end of
1012// the watchStop goroutine, the runContext may also be closed during the select
1013// incorrectly causing providers to be stopped. Even if the graph walk is done
1014// at that point, stopping a provider permanently cancels its StopContext which
1015// can cause later actions to fail.
1016func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) {
1017	stop := make(chan struct{})
1018	wait := make(chan struct{})
1019
1020	// get the runContext cancellation channel now, because releaseRun will
1021	// write to the runContext field.
1022	done := c.runContext.Done()
1023
1024	go func() {
1025		defer close(wait)
1026		// Wait for a stop or completion
1027		select {
1028		case <-done:
1029			// done means the context was canceled, so we need to try and stop
1030			// providers.
1031		case <-stop:
1032			// our own stop channel was closed.
1033			return
1034		}
1035
1036		// If we're here, we're stopped, trigger the call.
1037		log.Printf("[TRACE] Context: requesting providers and provisioners to gracefully stop")
1038
1039		{
1040			// Copy the providers so that a misbehaved blocking Stop doesn't
1041			// completely hang Terraform.
1042			walker.providerLock.Lock()
1043			ps := make([]providers.Interface, 0, len(walker.providerCache))
1044			for _, p := range walker.providerCache {
1045				ps = append(ps, p)
1046			}
1047			defer walker.providerLock.Unlock()
1048
1049			for _, p := range ps {
1050				// We ignore the error for now since there isn't any reasonable
1051				// action to take if there is an error here, since the stop is still
1052				// advisory: Terraform will exit once the graph node completes.
1053				p.Stop()
1054			}
1055		}
1056
1057		{
1058			// Call stop on all the provisioners
1059			walker.provisionerLock.Lock()
1060			ps := make([]provisioners.Interface, 0, len(walker.provisionerCache))
1061			for _, p := range walker.provisionerCache {
1062				ps = append(ps, p)
1063			}
1064			defer walker.provisionerLock.Unlock()
1065
1066			for _, p := range ps {
1067				// We ignore the error for now since there isn't any reasonable
1068				// action to take if there is an error here, since the stop is still
1069				// advisory: Terraform will exit once the graph node completes.
1070				p.Stop()
1071			}
1072		}
1073	}()
1074
1075	return stop, wait
1076}
1077