Struct rustc_trans::trans::CrateContextUnstable
[-] [+]
[src]
pub struct CrateContext<'a, 'tcx> { // some fields omitted }
Methods
impl<'b, 'tcx> CrateContext<'b, 'tcx>
fn shared(&self) -> &'b SharedCrateContext<'tcx>
fn local(&self) -> &'b LocalCrateContext<'tcx>
fn rotate(&self) -> CrateContext<'b, 'tcx>
Get a (possibly) different CrateContext
from the same
SharedCrateContext
.
fn maybe_iter(&self, iter_all: bool) -> CrateContextMaybeIterator<'b, 'tcx>
Either iterate over only self
, or iterate over all CrateContext
s in
the SharedCrateContext
. The iterator produces (ccx, is_origin)
pairs, where is_origin
is true
if ccx
is self
and false
otherwise. This method is useful for avoiding code duplication in
cases where it may or may not be necessary to translate code into every
context.
fn tcx<'a>(&'a self) -> &'a ctxt<'tcx>
fn sess<'a>(&'a self) -> &'a Session
fn builder<'a>(&'a self) -> Builder<'a, 'tcx>
fn raw_builder<'a>(&'a self) -> BuilderRef
fn get_intrinsic(&self, key: &&'static str) -> ValueRef
fn is_split_stack_supported(&self) -> bool
fn llmod(&self) -> ModuleRef
fn llcx(&self) -> ContextRef
fn td<'a>(&'a self) -> &'a TargetData
fn tn<'a>(&'a self) -> &'a TypeNames
fn externs<'a>(&'a self) -> &'a RefCell<ExternMap>
fn item_vals<'a>(&'a self) -> &'a RefCell<NodeMap<ValueRef>>
fn export_map<'a>(&'a self) -> &'a ExportMap
fn reachable<'a>(&'a self) -> &'a NodeSet
fn item_symbols<'a>(&'a self) -> &'a RefCell<NodeMap<String>>
fn link_meta<'a>(&'a self) -> &'a LinkMeta
fn needs_unwind_cleanup_cache(&self) -> &RefCell<FnvHashMap<Ty<'tcx>, bool>>
fn fn_pointer_shims(&self) -> &RefCell<FnvHashMap<Ty<'tcx>, ValueRef>>
fn drop_glues<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, ValueRef>>
fn external<'a>(&'a self) -> &'a RefCell<DefIdMap<Option<NodeId>>>
fn external_srcs<'a>(&'a self) -> &'a RefCell<NodeMap<DefId>>
fn monomorphized<'a>(&'a self) -> &'a RefCell<FnvHashMap<MonoId<'tcx>, ValueRef>>
fn monomorphizing<'a>(&'a self) -> &'a RefCell<DefIdMap<usize>>
fn vtables<'a>(&'a self) -> &'a RefCell<FnvHashMap<PolyTraitRef<'tcx>, ValueRef>>
fn const_cstr_cache<'a>(&'a self) -> &'a RefCell<FnvHashMap<InternedString, ValueRef>>
fn const_unsized<'a>(&'a self) -> &'a RefCell<FnvHashMap<ValueRef, ValueRef>>
fn const_globals<'a>(&'a self) -> &'a RefCell<FnvHashMap<ValueRef, ValueRef>>
fn const_values<'a>(&'a self) -> &'a RefCell<FnvHashMap<(NodeId, &'tcx Substs<'tcx>), ValueRef>>
fn static_values<'a>(&'a self) -> &'a RefCell<NodeMap<ValueRef>>
fn extern_const_values<'a>(&'a self) -> &'a RefCell<DefIdMap<ValueRef>>
fn impl_method_cache<'a>(&'a self) -> &'a RefCell<FnvHashMap<(DefId, Name), DefId>>
fn closure_bare_wrapper_cache<'a>(&'a self) -> &'a RefCell<FnvHashMap<ValueRef, ValueRef>>
fn lltypes<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, Type>>
fn llsizingtypes<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, Type>>
fn adt_reprs<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, Rc<Repr<'tcx>>>>
fn symbol_hasher<'a>(&'a self) -> &'a RefCell<Sha256>
fn type_hashcodes<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, String>>
fn stats<'a>(&'a self) -> &'a Stats
fn available_monomorphizations<'a>(&'a self) -> &'a RefCell<FnvHashSet<String>>
fn available_drop_glues<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, String>>
fn int_type(&self) -> Type
fn opaque_vec_type(&self) -> Type
fn closure_vals<'a>(&'a self) -> &'a RefCell<FnvHashMap<MonoId<'tcx>, ValueRef>>
fn dbg_cx<'a>(&'a self) -> &'a Option<CrateDebugContext<'tcx>>
fn eh_personality<'a>(&'a self) -> &'a RefCell<Option<ValueRef>>
fn count_llvm_insn(&self)
fn trait_cache(&self) -> &RefCell<FnvHashMap<PolyTraitRef<'tcx>, Vtable<'tcx, ()>>>
fn obj_size_bound(&self) -> u64
Return exclusive upper bound on object size.
The theoretical maximum object size is defined as the maximum positive int
value. This
ensures that the offset
semantics remain well-defined by allowing it to correctly index
every address within an object along with one byte past the end, along with allowing int
to store the difference between any two pointers into an object.
The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer to represent object size in bits. It would need to be 1 << 61 to account for this, but is currently conservatively bounded to 1 << 47 as that is enough to cover the current usable address space on 64-bit ARMv8 and x86_64.