1 use crate::back::write::{
2     self, save_temp_bitcode, to_llvm_opt_settings, with_llvm_pmb, DiagnosticHandlers,
3 };
4 use crate::llvm::archive_ro::ArchiveRO;
5 use crate::llvm::{self, build_string, False, True};
6 use crate::{LlvmCodegenBackend, ModuleLlvm};
7 use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
8 use rustc_codegen_ssa::back::symbol_export;
9 use rustc_codegen_ssa::back::write::{
10     CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryConfig,
11 };
12 use rustc_codegen_ssa::traits::*;
13 use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
14 use rustc_data_structures::fx::FxHashMap;
15 use rustc_errors::{FatalError, Handler};
16 use rustc_hir::def_id::LOCAL_CRATE;
17 use rustc_middle::bug;
18 use rustc_middle::dep_graph::WorkProduct;
19 use rustc_middle::middle::exported_symbols::SymbolExportLevel;
20 use rustc_session::cgu_reuse_tracker::CguReuse;
21 use rustc_session::config::{self, CrateType, Lto};
22 use tracing::{debug, info};
23 
24 use std::ffi::{CStr, CString};
25 use std::fs::File;
26 use std::io;
27 use std::iter;
28 use std::path::Path;
29 use std::ptr;
30 use std::slice;
31 use std::sync::Arc;
32 
33 /// We keep track of the computed LTO cache keys from the previous
34 /// session to determine which CGUs we can reuse.
35 pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
36 
crate_type_allows_lto(crate_type: CrateType) -> bool37 pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
38     match crate_type {
39         CrateType::Executable | CrateType::Staticlib | CrateType::Cdylib => true,
40         CrateType::Dylib | CrateType::Rlib | CrateType::ProcMacro => false,
41     }
42 }
43 
prepare_lto( cgcx: &CodegenContext<LlvmCodegenBackend>, diag_handler: &Handler, ) -> Result<(Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>), FatalError>44 fn prepare_lto(
45     cgcx: &CodegenContext<LlvmCodegenBackend>,
46     diag_handler: &Handler,
47 ) -> Result<(Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>), FatalError> {
48     let export_threshold = match cgcx.lto {
49         // We're just doing LTO for our one crate
50         Lto::ThinLocal => SymbolExportLevel::Rust,
51 
52         // We're doing LTO for the entire crate graph
53         Lto::Fat | Lto::Thin => symbol_export::crates_export_threshold(&cgcx.crate_types),
54 
55         Lto::No => panic!("didn't request LTO but we're doing LTO"),
56     };
57 
58     let symbol_filter = &|&(ref name, level): &(String, SymbolExportLevel)| {
59         if level.is_below_threshold(export_threshold) {
60             Some(CString::new(name.as_str()).unwrap())
61         } else {
62             None
63         }
64     };
65     let exported_symbols = cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
66     let mut symbols_below_threshold = {
67         let _timer = cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold");
68         exported_symbols[&LOCAL_CRATE].iter().filter_map(symbol_filter).collect::<Vec<CString>>()
69     };
70     info!("{} symbols to preserve in this crate", symbols_below_threshold.len());
71 
72     // If we're performing LTO for the entire crate graph, then for each of our
73     // upstream dependencies, find the corresponding rlib and load the bitcode
74     // from the archive.
75     //
76     // We save off all the bytecode and LLVM module ids for later processing
77     // with either fat or thin LTO
78     let mut upstream_modules = Vec::new();
79     if cgcx.lto != Lto::ThinLocal {
80         if cgcx.opts.cg.prefer_dynamic {
81             diag_handler
82                 .struct_err("cannot prefer dynamic linking when performing LTO")
83                 .note(
84                     "only 'staticlib', 'bin', and 'cdylib' outputs are \
85                                supported with LTO",
86                 )
87                 .emit();
88             return Err(FatalError);
89         }
90 
91         // Make sure we actually can run LTO
92         for crate_type in cgcx.crate_types.iter() {
93             if !crate_type_allows_lto(*crate_type) {
94                 let e = diag_handler.fatal(
95                     "lto can only be run for executables, cdylibs and \
96                                             static library outputs",
97                 );
98                 return Err(e);
99             }
100         }
101 
102         for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() {
103             let exported_symbols =
104                 cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
105             {
106                 let _timer =
107                     cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold");
108                 symbols_below_threshold
109                     .extend(exported_symbols[&cnum].iter().filter_map(symbol_filter));
110             }
111 
112             let archive = ArchiveRO::open(path).expect("wanted an rlib");
113             let obj_files = archive
114                 .iter()
115                 .filter_map(|child| child.ok().and_then(|c| c.name().map(|name| (name, c))))
116                 .filter(|&(name, _)| looks_like_rust_object_file(name));
117             for (name, child) in obj_files {
118                 info!("adding bitcode from {}", name);
119                 match get_bitcode_slice_from_object_data(child.data()) {
120                     Ok(data) => {
121                         let module = SerializedModule::FromRlib(data.to_vec());
122                         upstream_modules.push((module, CString::new(name).unwrap()));
123                     }
124                     Err(msg) => return Err(diag_handler.fatal(&msg)),
125                 }
126             }
127         }
128     }
129 
130     Ok((symbols_below_threshold, upstream_modules))
131 }
132 
get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], String>133 fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], String> {
134     let mut len = 0;
135     let data =
136         unsafe { llvm::LLVMRustGetBitcodeSliceFromObjectData(obj.as_ptr(), obj.len(), &mut len) };
137     if !data.is_null() {
138         assert!(len != 0);
139         let bc = unsafe { slice::from_raw_parts(data, len) };
140 
141         // `bc` must be a sub-slice of `obj`.
142         assert!(obj.as_ptr() <= bc.as_ptr());
143         assert!(bc[bc.len()..bc.len()].as_ptr() <= obj[obj.len()..obj.len()].as_ptr());
144 
145         Ok(bc)
146     } else {
147         assert!(len == 0);
148         let msg = llvm::last_error().unwrap_or_else(|| "unknown LLVM error".to_string());
149         Err(format!("failed to get bitcode from object file for LTO ({})", msg))
150     }
151 }
152 
153 /// Performs fat LTO by merging all modules into a single one and returning it
154 /// for further optimization.
run_fat( cgcx: &CodegenContext<LlvmCodegenBackend>, modules: Vec<FatLTOInput<LlvmCodegenBackend>>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, ) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError>155 pub(crate) fn run_fat(
156     cgcx: &CodegenContext<LlvmCodegenBackend>,
157     modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
158     cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
159 ) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
160     let diag_handler = cgcx.create_diag_handler();
161     let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &diag_handler)?;
162     let symbols_below_threshold =
163         symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
164     fat_lto(
165         cgcx,
166         &diag_handler,
167         modules,
168         cached_modules,
169         upstream_modules,
170         &symbols_below_threshold,
171     )
172 }
173 
174 /// Performs thin LTO by performing necessary global analysis and returning two
175 /// lists, one of the modules that need optimization and another for modules that
176 /// can simply be copied over from the incr. comp. cache.
run_thin( cgcx: &CodegenContext<LlvmCodegenBackend>, modules: Vec<(String, ThinBuffer)>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, ) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError>177 pub(crate) fn run_thin(
178     cgcx: &CodegenContext<LlvmCodegenBackend>,
179     modules: Vec<(String, ThinBuffer)>,
180     cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
181 ) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
182     let diag_handler = cgcx.create_diag_handler();
183     let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &diag_handler)?;
184     let symbols_below_threshold =
185         symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
186     if cgcx.opts.cg.linker_plugin_lto.enabled() {
187         unreachable!(
188             "We should never reach this case if the LTO step \
189                       is deferred to the linker"
190         );
191     }
192     thin_lto(
193         cgcx,
194         &diag_handler,
195         modules,
196         upstream_modules,
197         cached_modules,
198         &symbols_below_threshold,
199     )
200 }
201 
prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBuffer)202 pub(crate) fn prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBuffer) {
203     let name = module.name.clone();
204     let buffer = ThinBuffer::new(module.module_llvm.llmod());
205     (name, buffer)
206 }
207 
fat_lto( cgcx: &CodegenContext<LlvmCodegenBackend>, diag_handler: &Handler, modules: Vec<FatLTOInput<LlvmCodegenBackend>>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>, symbols_below_threshold: &[*const libc::c_char], ) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError>208 fn fat_lto(
209     cgcx: &CodegenContext<LlvmCodegenBackend>,
210     diag_handler: &Handler,
211     modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
212     cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
213     mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
214     symbols_below_threshold: &[*const libc::c_char],
215 ) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
216     let _timer = cgcx.prof.generic_activity("LLVM_fat_lto_build_monolithic_module");
217     info!("going for a fat lto");
218 
219     // Sort out all our lists of incoming modules into two lists.
220     //
221     // * `serialized_modules` (also and argument to this function) contains all
222     //   modules that are serialized in-memory.
223     // * `in_memory` contains modules which are already parsed and in-memory,
224     //   such as from multi-CGU builds.
225     //
226     // All of `cached_modules` (cached from previous incremental builds) can
227     // immediately go onto the `serialized_modules` modules list and then we can
228     // split the `modules` array into these two lists.
229     let mut in_memory = Vec::new();
230     serialized_modules.extend(cached_modules.into_iter().map(|(buffer, wp)| {
231         info!("pushing cached module {:?}", wp.cgu_name);
232         (buffer, CString::new(wp.cgu_name).unwrap())
233     }));
234     for module in modules {
235         match module {
236             FatLTOInput::InMemory(m) => in_memory.push(m),
237             FatLTOInput::Serialized { name, buffer } => {
238                 info!("pushing serialized module {:?}", name);
239                 let buffer = SerializedModule::Local(buffer);
240                 serialized_modules.push((buffer, CString::new(name).unwrap()));
241             }
242         }
243     }
244 
245     // Find the "costliest" module and merge everything into that codegen unit.
246     // All the other modules will be serialized and reparsed into the new
247     // context, so this hopefully avoids serializing and parsing the largest
248     // codegen unit.
249     //
250     // Additionally use a regular module as the base here to ensure that various
251     // file copy operations in the backend work correctly. The only other kind
252     // of module here should be an allocator one, and if your crate is smaller
253     // than the allocator module then the size doesn't really matter anyway.
254     let costliest_module = in_memory
255         .iter()
256         .enumerate()
257         .filter(|&(_, module)| module.kind == ModuleKind::Regular)
258         .map(|(i, module)| {
259             let cost = unsafe { llvm::LLVMRustModuleCost(module.module_llvm.llmod()) };
260             (cost, i)
261         })
262         .max();
263 
264     // If we found a costliest module, we're good to go. Otherwise all our
265     // inputs were serialized which could happen in the case, for example, that
266     // all our inputs were incrementally reread from the cache and we're just
267     // re-executing the LTO passes. If that's the case deserialize the first
268     // module and create a linker with it.
269     let module: ModuleCodegen<ModuleLlvm> = match costliest_module {
270         Some((_cost, i)) => in_memory.remove(i),
271         None => {
272             assert!(!serialized_modules.is_empty(), "must have at least one serialized module");
273             let (buffer, name) = serialized_modules.remove(0);
274             info!("no in-memory regular modules to choose from, parsing {:?}", name);
275             ModuleCodegen {
276                 module_llvm: ModuleLlvm::parse(cgcx, &name, buffer.data(), diag_handler)?,
277                 name: name.into_string().unwrap(),
278                 kind: ModuleKind::Regular,
279             }
280         }
281     };
282     let mut serialized_bitcode = Vec::new();
283     {
284         let (llcx, llmod) = {
285             let llvm = &module.module_llvm;
286             (&llvm.llcx, llvm.llmod())
287         };
288         info!("using {:?} as a base module", module.name);
289 
290         // The linking steps below may produce errors and diagnostics within LLVM
291         // which we'd like to handle and print, so set up our diagnostic handlers
292         // (which get unregistered when they go out of scope below).
293         let _handler = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
294 
295         // For all other modules we codegened we'll need to link them into our own
296         // bitcode. All modules were codegened in their own LLVM context, however,
297         // and we want to move everything to the same LLVM context. Currently the
298         // way we know of to do that is to serialize them to a string and them parse
299         // them later. Not great but hey, that's why it's "fat" LTO, right?
300         for module in in_memory {
301             let buffer = ModuleBuffer::new(module.module_llvm.llmod());
302             let llmod_id = CString::new(&module.name[..]).unwrap();
303             serialized_modules.push((SerializedModule::Local(buffer), llmod_id));
304         }
305         // Sort the modules to ensure we produce deterministic results.
306         serialized_modules.sort_by(|module1, module2| module1.1.cmp(&module2.1));
307 
308         // For all serialized bitcode files we parse them and link them in as we did
309         // above, this is all mostly handled in C++. Like above, though, we don't
310         // know much about the memory management here so we err on the side of being
311         // save and persist everything with the original module.
312         let mut linker = Linker::new(llmod);
313         for (bc_decoded, name) in serialized_modules {
314             let _timer = cgcx
315                 .prof
316                 .generic_activity_with_arg("LLVM_fat_lto_link_module", format!("{:?}", name));
317             info!("linking {:?}", name);
318             let data = bc_decoded.data();
319             linker.add(data).map_err(|()| {
320                 let msg = format!("failed to load bc of {:?}", name);
321                 write::llvm_err(diag_handler, &msg)
322             })?;
323             serialized_bitcode.push(bc_decoded);
324         }
325         drop(linker);
326         save_temp_bitcode(cgcx, &module, "lto.input");
327 
328         // Fat LTO also suffers from the invalid DWARF issue similar to Thin LTO.
329         // Here we rewrite all `DICompileUnit` pointers if there is only one `DICompileUnit`.
330         // This only works around the problem when codegen-units = 1.
331         // Refer to the comments in the `optimize_thin_module` function for more details.
332         let mut cu1 = ptr::null_mut();
333         let mut cu2 = ptr::null_mut();
334         unsafe { llvm::LLVMRustLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2) };
335         if !cu2.is_null() {
336             let _timer =
337                 cgcx.prof.generic_activity_with_arg("LLVM_fat_lto_patch_debuginfo", &*module.name);
338             unsafe { llvm::LLVMRustLTOPatchDICompileUnit(llmod, cu1) };
339             save_temp_bitcode(cgcx, &module, "fat-lto-after-patch");
340         }
341 
342         // Internalize everything below threshold to help strip out more modules and such.
343         unsafe {
344             let ptr = symbols_below_threshold.as_ptr();
345             llvm::LLVMRustRunRestrictionPass(
346                 llmod,
347                 ptr as *const *const libc::c_char,
348                 symbols_below_threshold.len() as libc::size_t,
349             );
350             save_temp_bitcode(cgcx, &module, "lto.after-restriction");
351         }
352 
353         if cgcx.no_landing_pads {
354             unsafe {
355                 llvm::LLVMRustMarkAllFunctionsNounwind(llmod);
356             }
357             save_temp_bitcode(cgcx, &module, "lto.after-nounwind");
358         }
359     }
360 
361     Ok(LtoModuleCodegen::Fat { module: Some(module), _serialized_bitcode: serialized_bitcode })
362 }
363 
364 crate struct Linker<'a>(&'a mut llvm::Linker<'a>);
365 
366 impl Linker<'a> {
new(llmod: &'a llvm::Module) -> Self367     crate fn new(llmod: &'a llvm::Module) -> Self {
368         unsafe { Linker(llvm::LLVMRustLinkerNew(llmod)) }
369     }
370 
add(&mut self, bytecode: &[u8]) -> Result<(), ()>371     crate fn add(&mut self, bytecode: &[u8]) -> Result<(), ()> {
372         unsafe {
373             if llvm::LLVMRustLinkerAdd(
374                 self.0,
375                 bytecode.as_ptr() as *const libc::c_char,
376                 bytecode.len(),
377             ) {
378                 Ok(())
379             } else {
380                 Err(())
381             }
382         }
383     }
384 }
385 
386 impl Drop for Linker<'a> {
drop(&mut self)387     fn drop(&mut self) {
388         unsafe {
389             llvm::LLVMRustLinkerFree(&mut *(self.0 as *mut _));
390         }
391     }
392 }
393 
394 /// Prepare "thin" LTO to get run on these modules.
395 ///
396 /// The general structure of ThinLTO is quite different from the structure of
397 /// "fat" LTO above. With "fat" LTO all LLVM modules in question are merged into
398 /// one giant LLVM module, and then we run more optimization passes over this
399 /// big module after internalizing most symbols. Thin LTO, on the other hand,
400 /// avoid this large bottleneck through more targeted optimization.
401 ///
402 /// At a high level Thin LTO looks like:
403 ///
404 ///     1. Prepare a "summary" of each LLVM module in question which describes
405 ///        the values inside, cost of the values, etc.
406 ///     2. Merge the summaries of all modules in question into one "index"
407 ///     3. Perform some global analysis on this index
408 ///     4. For each module, use the index and analysis calculated previously to
409 ///        perform local transformations on the module, for example inlining
410 ///        small functions from other modules.
411 ///     5. Run thin-specific optimization passes over each module, and then code
412 ///        generate everything at the end.
413 ///
414 /// The summary for each module is intended to be quite cheap, and the global
415 /// index is relatively quite cheap to create as well. As a result, the goal of
416 /// ThinLTO is to reduce the bottleneck on LTO and enable LTO to be used in more
417 /// situations. For example one cheap optimization is that we can parallelize
418 /// all codegen modules, easily making use of all the cores on a machine.
419 ///
420 /// With all that in mind, the function here is designed at specifically just
421 /// calculating the *index* for ThinLTO. This index will then be shared amongst
422 /// all of the `LtoModuleCodegen` units returned below and destroyed once
423 /// they all go out of scope.
thin_lto( cgcx: &CodegenContext<LlvmCodegenBackend>, diag_handler: &Handler, modules: Vec<(String, ThinBuffer)>, serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, symbols_below_threshold: &[*const libc::c_char], ) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError>424 fn thin_lto(
425     cgcx: &CodegenContext<LlvmCodegenBackend>,
426     diag_handler: &Handler,
427     modules: Vec<(String, ThinBuffer)>,
428     serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
429     cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
430     symbols_below_threshold: &[*const libc::c_char],
431 ) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
432     let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis");
433     unsafe {
434         info!("going for that thin, thin LTO");
435 
436         let green_modules: FxHashMap<_, _> =
437             cached_modules.iter().map(|&(_, ref wp)| (wp.cgu_name.clone(), wp.clone())).collect();
438 
439         let full_scope_len = modules.len() + serialized_modules.len() + cached_modules.len();
440         let mut thin_buffers = Vec::with_capacity(modules.len());
441         let mut module_names = Vec::with_capacity(full_scope_len);
442         let mut thin_modules = Vec::with_capacity(full_scope_len);
443 
444         for (i, (name, buffer)) in modules.into_iter().enumerate() {
445             info!("local module: {} - {}", i, name);
446             let cname = CString::new(name.clone()).unwrap();
447             thin_modules.push(llvm::ThinLTOModule {
448                 identifier: cname.as_ptr(),
449                 data: buffer.data().as_ptr(),
450                 len: buffer.data().len(),
451             });
452             thin_buffers.push(buffer);
453             module_names.push(cname);
454         }
455 
456         // FIXME: All upstream crates are deserialized internally in the
457         //        function below to extract their summary and modules. Note that
458         //        unlike the loop above we *must* decode and/or read something
459         //        here as these are all just serialized files on disk. An
460         //        improvement, however, to make here would be to store the
461         //        module summary separately from the actual module itself. Right
462         //        now this is store in one large bitcode file, and the entire
463         //        file is deflate-compressed. We could try to bypass some of the
464         //        decompression by storing the index uncompressed and only
465         //        lazily decompressing the bytecode if necessary.
466         //
467         //        Note that truly taking advantage of this optimization will
468         //        likely be further down the road. We'd have to implement
469         //        incremental ThinLTO first where we could actually avoid
470         //        looking at upstream modules entirely sometimes (the contents,
471         //        we must always unconditionally look at the index).
472         let mut serialized = Vec::with_capacity(serialized_modules.len() + cached_modules.len());
473 
474         let cached_modules =
475             cached_modules.into_iter().map(|(sm, wp)| (sm, CString::new(wp.cgu_name).unwrap()));
476 
477         for (module, name) in serialized_modules.into_iter().chain(cached_modules) {
478             info!("upstream or cached module {:?}", name);
479             thin_modules.push(llvm::ThinLTOModule {
480                 identifier: name.as_ptr(),
481                 data: module.data().as_ptr(),
482                 len: module.data().len(),
483             });
484             serialized.push(module);
485             module_names.push(name);
486         }
487 
488         // Sanity check
489         assert_eq!(thin_modules.len(), module_names.len());
490 
491         // Delegate to the C++ bindings to create some data here. Once this is a
492         // tried-and-true interface we may wish to try to upstream some of this
493         // to LLVM itself, right now we reimplement a lot of what they do
494         // upstream...
495         let data = llvm::LLVMRustCreateThinLTOData(
496             thin_modules.as_ptr(),
497             thin_modules.len() as u32,
498             symbols_below_threshold.as_ptr(),
499             symbols_below_threshold.len() as u32,
500         )
501         .ok_or_else(|| write::llvm_err(diag_handler, "failed to prepare thin LTO context"))?;
502 
503         let data = ThinData(data);
504 
505         info!("thin LTO data created");
506 
507         let (key_map_path, prev_key_map, curr_key_map) = if let Some(ref incr_comp_session_dir) =
508             cgcx.incr_comp_session_dir
509         {
510             let path = incr_comp_session_dir.join(THIN_LTO_KEYS_INCR_COMP_FILE_NAME);
511             // If the previous file was deleted, or we get an IO error
512             // reading the file, then we'll just use `None` as the
513             // prev_key_map, which will force the code to be recompiled.
514             let prev =
515                 if path.exists() { ThinLTOKeysMap::load_from_file(&path).ok() } else { None };
516             let curr = ThinLTOKeysMap::from_thin_lto_modules(&data, &thin_modules, &module_names);
517             (Some(path), prev, curr)
518         } else {
519             // If we don't compile incrementally, we don't need to load the
520             // import data from LLVM.
521             assert!(green_modules.is_empty());
522             let curr = ThinLTOKeysMap::default();
523             (None, None, curr)
524         };
525         info!("thin LTO cache key map loaded");
526         info!("prev_key_map: {:#?}", prev_key_map);
527         info!("curr_key_map: {:#?}", curr_key_map);
528 
529         // Throw our data in an `Arc` as we'll be sharing it across threads. We
530         // also put all memory referenced by the C++ data (buffers, ids, etc)
531         // into the arc as well. After this we'll create a thin module
532         // codegen per module in this data.
533         let shared = Arc::new(ThinShared {
534             data,
535             thin_buffers,
536             serialized_modules: serialized,
537             module_names,
538         });
539 
540         let mut copy_jobs = vec![];
541         let mut opt_jobs = vec![];
542 
543         info!("checking which modules can be-reused and which have to be re-optimized.");
544         for (module_index, module_name) in shared.module_names.iter().enumerate() {
545             let module_name = module_name_to_str(module_name);
546             if let (Some(prev_key_map), true) =
547                 (prev_key_map.as_ref(), green_modules.contains_key(module_name))
548             {
549                 assert!(cgcx.incr_comp_session_dir.is_some());
550 
551                 // If a module exists in both the current and the previous session,
552                 // and has the same LTO cache key in both sessions, then we can re-use it
553                 if prev_key_map.keys.get(module_name) == curr_key_map.keys.get(module_name) {
554                     let work_product = green_modules[module_name].clone();
555                     copy_jobs.push(work_product);
556                     info!(" - {}: re-used", module_name);
557                     assert!(cgcx.incr_comp_session_dir.is_some());
558                     cgcx.cgu_reuse_tracker.set_actual_reuse(module_name, CguReuse::PostLto);
559                     continue;
560                 }
561             }
562 
563             info!(" - {}: re-compiled", module_name);
564             opt_jobs.push(LtoModuleCodegen::Thin(ThinModule {
565                 shared: shared.clone(),
566                 idx: module_index,
567             }));
568         }
569 
570         // Save the current ThinLTO import information for the next compilation
571         // session, overwriting the previous serialized data (if any).
572         if let Some(path) = key_map_path {
573             if let Err(err) = curr_key_map.save_to_file(&path) {
574                 let msg = format!("Error while writing ThinLTO key data: {}", err);
575                 return Err(write::llvm_err(diag_handler, &msg));
576             }
577         }
578 
579         Ok((opt_jobs, copy_jobs))
580     }
581 }
582 
run_pass_manager( cgcx: &CodegenContext<LlvmCodegenBackend>, diag_handler: &Handler, module: &ModuleCodegen<ModuleLlvm>, config: &ModuleConfig, thin: bool, ) -> Result<(), FatalError>583 pub(crate) fn run_pass_manager(
584     cgcx: &CodegenContext<LlvmCodegenBackend>,
585     diag_handler: &Handler,
586     module: &ModuleCodegen<ModuleLlvm>,
587     config: &ModuleConfig,
588     thin: bool,
589 ) -> Result<(), FatalError> {
590     let _timer = cgcx.prof.extra_verbose_generic_activity("LLVM_lto_optimize", &module.name[..]);
591 
592     // Now we have one massive module inside of llmod. Time to run the
593     // LTO-specific optimization passes that LLVM provides.
594     //
595     // This code is based off the code found in llvm's LTO code generator:
596     //      tools/lto/LTOCodeGenerator.cpp
597     debug!("running the pass manager");
598     unsafe {
599         if write::should_use_new_llvm_pass_manager(cgcx, config) {
600             let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO };
601             let opt_level = config.opt_level.unwrap_or(config::OptLevel::No);
602             write::optimize_with_new_llvm_pass_manager(
603                 cgcx,
604                 diag_handler,
605                 module,
606                 config,
607                 opt_level,
608                 opt_stage,
609             )?;
610             debug!("lto done");
611             return Ok(());
612         }
613 
614         let pm = llvm::LLVMCreatePassManager();
615         llvm::LLVMAddAnalysisPasses(module.module_llvm.tm, pm);
616 
617         if config.verify_llvm_ir {
618             let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr().cast());
619             llvm::LLVMRustAddPass(pm, pass.unwrap());
620         }
621 
622         let opt_level = config
623             .opt_level
624             .map(|x| to_llvm_opt_settings(x).0)
625             .unwrap_or(llvm::CodeGenOptLevel::None);
626         with_llvm_pmb(module.module_llvm.llmod(), config, opt_level, false, &mut |b| {
627             if thin {
628                 llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm);
629             } else {
630                 llvm::LLVMPassManagerBuilderPopulateLTOPassManager(
631                     b, pm, /* Internalize = */ False, /* RunInliner = */ True,
632                 );
633             }
634         });
635 
636         // We always generate bitcode through ThinLTOBuffers,
637         // which do not support anonymous globals
638         if config.bitcode_needed() {
639             let pass = llvm::LLVMRustFindAndCreatePass("name-anon-globals\0".as_ptr().cast());
640             llvm::LLVMRustAddPass(pm, pass.unwrap());
641         }
642 
643         if config.verify_llvm_ir {
644             let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr().cast());
645             llvm::LLVMRustAddPass(pm, pass.unwrap());
646         }
647 
648         llvm::LLVMRunPassManager(pm, module.module_llvm.llmod());
649 
650         llvm::LLVMDisposePassManager(pm);
651     }
652     debug!("lto done");
653     Ok(())
654 }
655 
656 pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer);
657 
658 unsafe impl Send for ModuleBuffer {}
659 unsafe impl Sync for ModuleBuffer {}
660 
661 impl ModuleBuffer {
new(m: &llvm::Module) -> ModuleBuffer662     pub fn new(m: &llvm::Module) -> ModuleBuffer {
663         ModuleBuffer(unsafe { llvm::LLVMRustModuleBufferCreate(m) })
664     }
665 }
666 
667 impl ModuleBufferMethods for ModuleBuffer {
data(&self) -> &[u8]668     fn data(&self) -> &[u8] {
669         unsafe {
670             let ptr = llvm::LLVMRustModuleBufferPtr(self.0);
671             let len = llvm::LLVMRustModuleBufferLen(self.0);
672             slice::from_raw_parts(ptr, len)
673         }
674     }
675 }
676 
677 impl Drop for ModuleBuffer {
drop(&mut self)678     fn drop(&mut self) {
679         unsafe {
680             llvm::LLVMRustModuleBufferFree(&mut *(self.0 as *mut _));
681         }
682     }
683 }
684 
685 pub struct ThinData(&'static mut llvm::ThinLTOData);
686 
687 unsafe impl Send for ThinData {}
688 unsafe impl Sync for ThinData {}
689 
690 impl Drop for ThinData {
drop(&mut self)691     fn drop(&mut self) {
692         unsafe {
693             llvm::LLVMRustFreeThinLTOData(&mut *(self.0 as *mut _));
694         }
695     }
696 }
697 
698 pub struct ThinBuffer(&'static mut llvm::ThinLTOBuffer);
699 
700 unsafe impl Send for ThinBuffer {}
701 unsafe impl Sync for ThinBuffer {}
702 
703 impl ThinBuffer {
new(m: &llvm::Module) -> ThinBuffer704     pub fn new(m: &llvm::Module) -> ThinBuffer {
705         unsafe {
706             let buffer = llvm::LLVMRustThinLTOBufferCreate(m);
707             ThinBuffer(buffer)
708         }
709     }
710 }
711 
712 impl ThinBufferMethods for ThinBuffer {
data(&self) -> &[u8]713     fn data(&self) -> &[u8] {
714         unsafe {
715             let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _;
716             let len = llvm::LLVMRustThinLTOBufferLen(self.0);
717             slice::from_raw_parts(ptr, len)
718         }
719     }
720 }
721 
722 impl Drop for ThinBuffer {
drop(&mut self)723     fn drop(&mut self) {
724         unsafe {
725             llvm::LLVMRustThinLTOBufferFree(&mut *(self.0 as *mut _));
726         }
727     }
728 }
729 
optimize_thin_module( thin_module: &mut ThinModule<LlvmCodegenBackend>, cgcx: &CodegenContext<LlvmCodegenBackend>, ) -> Result<ModuleCodegen<ModuleLlvm>, FatalError>730 pub unsafe fn optimize_thin_module(
731     thin_module: &mut ThinModule<LlvmCodegenBackend>,
732     cgcx: &CodegenContext<LlvmCodegenBackend>,
733 ) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
734     let diag_handler = cgcx.create_diag_handler();
735 
736     let module_name = &thin_module.shared.module_names[thin_module.idx];
737     let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap());
738     let tm =
739         (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&diag_handler, &e))?;
740 
741     // Right now the implementation we've got only works over serialized
742     // modules, so we create a fresh new LLVM context and parse the module
743     // into that context. One day, however, we may do this for upstream
744     // crates but for locally codegened modules we may be able to reuse
745     // that LLVM Context and Module.
746     let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
747     let llmod_raw = parse_module(llcx, module_name, thin_module.data(), &diag_handler)? as *const _;
748     let module = ModuleCodegen {
749         module_llvm: ModuleLlvm { llmod_raw, llcx, tm },
750         name: thin_module.name().to_string(),
751         kind: ModuleKind::Regular,
752     };
753     {
754         let target = &*module.module_llvm.tm;
755         let llmod = module.module_llvm.llmod();
756         save_temp_bitcode(cgcx, &module, "thin-lto-input");
757 
758         // Before we do much else find the "main" `DICompileUnit` that we'll be
759         // using below. If we find more than one though then rustc has changed
760         // in a way we're not ready for, so generate an ICE by returning
761         // an error.
762         let mut cu1 = ptr::null_mut();
763         let mut cu2 = ptr::null_mut();
764         llvm::LLVMRustLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2);
765         if !cu2.is_null() {
766             let msg = "multiple source DICompileUnits found";
767             return Err(write::llvm_err(&diag_handler, msg));
768         }
769 
770         // Like with "fat" LTO, get some better optimizations if landing pads
771         // are disabled by removing all landing pads.
772         if cgcx.no_landing_pads {
773             let _timer = cgcx
774                 .prof
775                 .generic_activity_with_arg("LLVM_thin_lto_remove_landing_pads", thin_module.name());
776             llvm::LLVMRustMarkAllFunctionsNounwind(llmod);
777             save_temp_bitcode(cgcx, &module, "thin-lto-after-nounwind");
778         }
779 
780         // Up next comes the per-module local analyses that we do for Thin LTO.
781         // Each of these functions is basically copied from the LLVM
782         // implementation and then tailored to suit this implementation. Ideally
783         // each of these would be supported by upstream LLVM but that's perhaps
784         // a patch for another day!
785         //
786         // You can find some more comments about these functions in the LLVM
787         // bindings we've got (currently `PassWrapper.cpp`)
788         {
789             let _timer =
790                 cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name());
791             if !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) {
792                 let msg = "failed to prepare thin LTO module";
793                 return Err(write::llvm_err(&diag_handler, msg));
794             }
795             save_temp_bitcode(cgcx, &module, "thin-lto-after-rename");
796         }
797 
798         {
799             let _timer = cgcx
800                 .prof
801                 .generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name());
802             if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) {
803                 let msg = "failed to prepare thin LTO module";
804                 return Err(write::llvm_err(&diag_handler, msg));
805             }
806             save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve");
807         }
808 
809         {
810             let _timer = cgcx
811                 .prof
812                 .generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name());
813             if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) {
814                 let msg = "failed to prepare thin LTO module";
815                 return Err(write::llvm_err(&diag_handler, msg));
816             }
817             save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize");
818         }
819 
820         {
821             let _timer =
822                 cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name());
823             if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target) {
824                 let msg = "failed to prepare thin LTO module";
825                 return Err(write::llvm_err(&diag_handler, msg));
826             }
827             save_temp_bitcode(cgcx, &module, "thin-lto-after-import");
828         }
829 
830         // Ok now this is a bit unfortunate. This is also something you won't
831         // find upstream in LLVM's ThinLTO passes! This is a hack for now to
832         // work around bugs in LLVM.
833         //
834         // First discovered in #45511 it was found that as part of ThinLTO
835         // importing passes LLVM will import `DICompileUnit` metadata
836         // information across modules. This means that we'll be working with one
837         // LLVM module that has multiple `DICompileUnit` instances in it (a
838         // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of
839         // bugs in LLVM's backend which generates invalid DWARF in a situation
840         // like this:
841         //
842         //  https://bugs.llvm.org/show_bug.cgi?id=35212
843         //  https://bugs.llvm.org/show_bug.cgi?id=35562
844         //
845         // While the first bug there is fixed the second ended up causing #46346
846         // which was basically a resurgence of #45511 after LLVM's bug 35212 was
847         // fixed.
848         //
849         // This function below is a huge hack around this problem. The function
850         // below is defined in `PassWrapper.cpp` and will basically "merge"
851         // all `DICompileUnit` instances in a module. Basically it'll take all
852         // the objects, rewrite all pointers of `DISubprogram` to point to the
853         // first `DICompileUnit`, and then delete all the other units.
854         //
855         // This is probably mangling to the debug info slightly (but hopefully
856         // not too much) but for now at least gets LLVM to emit valid DWARF (or
857         // so it appears). Hopefully we can remove this once upstream bugs are
858         // fixed in LLVM.
859         {
860             let _timer = cgcx
861                 .prof
862                 .generic_activity_with_arg("LLVM_thin_lto_patch_debuginfo", thin_module.name());
863             llvm::LLVMRustLTOPatchDICompileUnit(llmod, cu1);
864             save_temp_bitcode(cgcx, &module, "thin-lto-after-patch");
865         }
866 
867         // Alright now that we've done everything related to the ThinLTO
868         // analysis it's time to run some optimizations! Here we use the same
869         // `run_pass_manager` as the "fat" LTO above except that we tell it to
870         // populate a thin-specific pass manager, which presumably LLVM treats a
871         // little differently.
872         {
873             info!("running thin lto passes over {}", module.name);
874             let config = cgcx.config(module.kind);
875             run_pass_manager(cgcx, &diag_handler, &module, config, true)?;
876             save_temp_bitcode(cgcx, &module, "thin-lto-after-pm");
877         }
878     }
879     Ok(module)
880 }
881 
882 /// Maps LLVM module identifiers to their corresponding LLVM LTO cache keys
883 #[derive(Debug, Default)]
884 pub struct ThinLTOKeysMap {
885     // key = llvm name of importing module, value = LLVM cache key
886     keys: FxHashMap<String, String>,
887 }
888 
889 impl ThinLTOKeysMap {
save_to_file(&self, path: &Path) -> io::Result<()>890     fn save_to_file(&self, path: &Path) -> io::Result<()> {
891         use std::io::Write;
892         let file = File::create(path)?;
893         let mut writer = io::BufWriter::new(file);
894         for (module, key) in &self.keys {
895             writeln!(writer, "{} {}", module, key)?;
896         }
897         Ok(())
898     }
899 
load_from_file(path: &Path) -> io::Result<Self>900     fn load_from_file(path: &Path) -> io::Result<Self> {
901         use std::io::BufRead;
902         let mut keys = FxHashMap::default();
903         let file = File::open(path)?;
904         for line in io::BufReader::new(file).lines() {
905             let line = line?;
906             let mut split = line.split(' ');
907             let module = split.next().unwrap();
908             let key = split.next().unwrap();
909             assert_eq!(split.next(), None, "Expected two space-separated values, found {:?}", line);
910             keys.insert(module.to_string(), key.to_string());
911         }
912         Ok(Self { keys })
913     }
914 
from_thin_lto_modules( data: &ThinData, modules: &[llvm::ThinLTOModule], names: &[CString], ) -> Self915     fn from_thin_lto_modules(
916         data: &ThinData,
917         modules: &[llvm::ThinLTOModule],
918         names: &[CString],
919     ) -> Self {
920         let keys = iter::zip(modules, names)
921             .map(|(module, name)| {
922                 let key = build_string(|rust_str| unsafe {
923                     llvm::LLVMRustComputeLTOCacheKey(rust_str, module.identifier, data.0);
924                 })
925                 .expect("Invalid ThinLTO module key");
926                 (name.clone().into_string().unwrap(), key)
927             })
928             .collect();
929         Self { keys }
930     }
931 }
932 
module_name_to_str(c_str: &CStr) -> &str933 fn module_name_to_str(c_str: &CStr) -> &str {
934     c_str.to_str().unwrap_or_else(|e| {
935         bug!("Encountered non-utf8 LLVM module name `{}`: {}", c_str.to_string_lossy(), e)
936     })
937 }
938 
parse_module<'a>( cx: &'a llvm::Context, name: &CStr, data: &[u8], diag_handler: &Handler, ) -> Result<&'a llvm::Module, FatalError>939 pub fn parse_module<'a>(
940     cx: &'a llvm::Context,
941     name: &CStr,
942     data: &[u8],
943     diag_handler: &Handler,
944 ) -> Result<&'a llvm::Module, FatalError> {
945     unsafe {
946         llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr()).ok_or_else(
947             || {
948                 let msg = "failed to parse bitcode for LTO module";
949                 write::llvm_err(diag_handler, msg)
950             },
951         )
952     }
953 }
954