diff --git a/CMakeLists.txt b/CMakeLists.txt index 4b55726708f34..4bae44e8fdd90 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -12,6 +12,11 @@ if(POLICY CMP0067) cmake_policy(SET CMP0067 NEW) endif() +# Convert relative paths to absolute for subdirectory `target_sources` +if(POLICY CMP0076) + cmake_policy(SET CMP0076 NEW) +endif() + # Add path for custom CMake modules. list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules") diff --git a/benchmark/scripts/run_smoke_bench b/benchmark/scripts/run_smoke_bench index 0facbe7b344f5..03e518d344d02 100755 --- a/benchmark/scripts/run_smoke_bench +++ b/benchmark/scripts/run_smoke_bench @@ -272,6 +272,7 @@ def report_code_size(opt_level, old_dir, new_dir, platform, output_file): old_lines = "" new_lines = "" for oldfile in files: + new_dir = os.path.join(new_dir, '') newfile = oldfile.replace(old_dir, new_dir, 1) if os.path.isfile(newfile): oldsize = get_codesize(oldfile) diff --git a/cmake/modules/AddSwift.cmake b/cmake/modules/AddSwift.cmake index 2848c7c9a3a7e..09abb13345bb4 100644 --- a/cmake/modules/AddSwift.cmake +++ b/cmake/modules/AddSwift.cmake @@ -399,17 +399,16 @@ function(_add_host_variant_link_flags target) endif() endfunction() -# Add a single variant of a new Swift library. +# Add a new Swift host library. # # Usage: -# _add_swift_host_library_single( -# target +# add_swift_host_library(name # [SHARED] # [STATIC] # [LLVM_LINK_COMPONENTS comp1 ...] # source1 [source2 source3 ...]) # -# target +# name # Name of the library (e.g., swiftParse). # # SHARED @@ -422,8 +421,8 @@ endfunction() # LLVM components this library depends on. # # source1 ... -# Sources to add into this library -function(_add_swift_host_library_single target) +# Sources to add into this library. +function(add_swift_host_library name) set(options SHARED STATIC) @@ -431,214 +430,142 @@ function(_add_swift_host_library_single target) set(multiple_parameter_options LLVM_LINK_COMPONENTS) - cmake_parse_arguments(ASHLS + cmake_parse_arguments(ASHL "${options}" "${single_parameter_options}" "${multiple_parameter_options}" ${ARGN}) - set(ASHLS_SOURCES ${ASHLS_UNPARSED_ARGUMENTS}) + set(ASHL_SOURCES ${ASHL_UNPARSED_ARGUMENTS}) - translate_flags(ASHLS "${options}") + translate_flags(ASHL "${options}") - if(NOT ASHLS_SHARED AND NOT ASHLS_STATIC) + if(NOT ASHL_SHARED AND NOT ASHL_STATIC) message(FATAL_ERROR "Either SHARED or STATIC must be specified") endif() - # Include LLVM Bitcode slices for iOS, Watch OS, and Apple TV OS device libraries. - set(embed_bitcode_arg) - if(SWIFT_EMBED_BITCODE_SECTION) - if(SWIFT_HOST_VARIANT_SDK MATCHES "(I|TV|WATCH)OS") - list(APPEND ASHLS_C_COMPILE_FLAGS "-fembed-bitcode") - set(embed_bitcode_arg EMBED_BITCODE) - endif() - endif() - if(XCODE) - string(REGEX MATCHALL "/[^/]+" split_path ${CMAKE_CURRENT_SOURCE_DIR}) - list(GET split_path -1 dir) - file(GLOB_RECURSE ASHLS_HEADERS + get_filename_component(dir ${CMAKE_CURRENT_SOURCE_DIR} DIRECTORY) + + file(GLOB_RECURSE ASHL_HEADERS ${SWIFT_SOURCE_DIR}/include/swift${dir}/*.h ${SWIFT_SOURCE_DIR}/include/swift${dir}/*.def ${CMAKE_CURRENT_SOURCE_DIR}/*.def) - - file(GLOB_RECURSE ASHLS_TDS + file(GLOB_RECURSE ASHL_TDS ${SWIFT_SOURCE_DIR}/include/swift${dir}/*.td) - set_source_files_properties(${ASHLS_HEADERS} ${ASHLS_TDS} - PROPERTIES + set_source_files_properties(${ASHL_HEADERS} ${ASHL_TDS} PROPERTIES HEADER_FILE_ONLY true) - source_group("TableGen descriptions" FILES ${ASHLS_TDS}) + source_group("TableGen descriptions" FILES ${ASHL_TDS}) - set(ASHLS_SOURCES ${ASHLS_SOURCES} ${ASHLS_HEADERS} ${ASHLS_TDS}) + set(ASHL_SOURCES ${ASHL_SOURCES} ${ASHL_HEADERS} ${ASHL_TDS}) endif() - if(ASHLS_SHARED) + if(ASHL_SHARED) set(libkind SHARED) - elseif(ASHLS_STATIC) + elseif(ASHL_STATIC) set(libkind STATIC) endif() - add_library("${target}" ${libkind} ${ASHLS_SOURCES}) - _set_target_prefix_and_suffix("${target}" "${libkind}" "${SWIFT_HOST_VARIANT_SDK}") - add_dependencies(${target} ${LLVM_COMMON_DEPENDS}) - - if(SWIFT_HOST_VARIANT_SDK STREQUAL WINDOWS) - swift_windows_include_for_arch(${SWIFT_HOST_VARIANT_ARCH} SWIFTLIB_INCLUDE) - target_include_directories("${target}" SYSTEM PRIVATE ${SWIFTLIB_INCLUDE}) - set_target_properties(${target} - PROPERTIES - CXX_STANDARD 14) - endif() - - if(SWIFT_HOST_VARIANT_SDK STREQUAL WINDOWS) - set_property(TARGET "${target}" PROPERTY NO_SONAME ON) - endif() - - llvm_update_compile_flags(${target}) - - set_output_directory(${target} + add_library(${name} ${libkind} ${ASHL_SOURCES}) + add_dependencies(${name} ${LLVM_COMMON_DEPENDS}) + llvm_update_compile_flags(${name}) + swift_common_llvm_config(${name} ${ASHL_LLVM_LINK_COMPONENTS}) + set_output_directory(${name} BINARY_DIR ${SWIFT_RUNTIME_OUTPUT_INTDIR} LIBRARY_DIR ${SWIFT_LIBRARY_OUTPUT_INTDIR}) if(SWIFT_HOST_VARIANT_SDK IN_LIST SWIFT_APPLE_PLATFORMS) - set_target_properties("${target}" + set_target_properties(${name} PROPERTIES INSTALL_NAME_DIR "@rpath") elseif(SWIFT_HOST_VARIANT_SDK STREQUAL LINUX) - set_target_properties("${target}" + set_target_properties(${name} PROPERTIES INSTALL_RPATH "$ORIGIN:/usr/lib/swift/linux") elseif(SWIFT_HOST_VARIANT_SDK STREQUAL CYGWIN) - set_target_properties("${target}" + set_target_properties(${name} PROPERTIES INSTALL_RPATH "$ORIGIN:/usr/lib/swift/cygwin") elseif(SWIFT_HOST_VARIANT_SDK STREQUAL "ANDROID") - set_target_properties("${target}" + set_target_properties(${name} PROPERTIES INSTALL_RPATH "$ORIGIN") endif() - set_target_properties("${target}" PROPERTIES BUILD_WITH_INSTALL_RPATH YES) - set_target_properties("${target}" PROPERTIES FOLDER "Swift libraries") + set_target_properties(${name} PROPERTIES + BUILD_WITH_INSTALL_RPATH YES + FOLDER "Swift libraries") - # Call llvm_config() only for libraries that are part of the compiler. - swift_common_llvm_config("${target}" ${ASHLS_LLVM_LINK_COMPONENTS}) - - target_compile_options(${target} PRIVATE - ${ASHLS_C_COMPILE_FLAGS}) - if(SWIFT_HOST_VARIANT_SDK STREQUAL WINDOWS) - if(libkind STREQUAL SHARED) - target_compile_definitions(${target} PRIVATE - _WINDLL) - endif() - endif() - - _add_host_variant_c_compile_flags(${target}) - _add_host_variant_link_flags(${target}) + _add_host_variant_c_compile_flags(${name}) + _add_host_variant_link_flags(${name}) + _set_target_prefix_and_suffix(${name} "${libkind}" "${SWIFT_HOST_VARIANT_SDK}") # Set compilation and link flags. if(SWIFT_HOST_VARIANT_SDK STREQUAL WINDOWS) swift_windows_include_for_arch(${SWIFT_HOST_VARIANT_ARCH} ${SWIFT_HOST_VARIANT_ARCH}_INCLUDE) - target_include_directories(${target} SYSTEM PRIVATE + target_include_directories(${name} SYSTEM PRIVATE ${${SWIFT_HOST_VARIANT_ARCH}_INCLUDE}) + if(libkind STREQUAL SHARED) + target_compile_definitions(${name} PRIVATE + _WINDLL) + endif() + if(NOT ${CMAKE_C_COMPILER_ID} STREQUAL MSVC) - swift_windows_get_sdk_vfs_overlay(ASHLS_VFS_OVERLAY) - target_compile_options(${target} PRIVATE - "SHELL:-Xclang -ivfsoverlay -Xclang ${ASHLS_VFS_OVERLAY}") + swift_windows_get_sdk_vfs_overlay(ASHL_VFS_OVERLAY) + target_compile_options(${name} PRIVATE + "SHELL:-Xclang -ivfsoverlay -Xclang ${ASHL_VFS_OVERLAY}") # MSVC doesn't support -Xclang. We don't need to manually specify # the dependent libraries as `cl` does so. - target_compile_options(${target} PRIVATE + target_compile_options(${name} PRIVATE "SHELL:-Xclang --dependent-lib=oldnames" # TODO(compnerd) handle /MT, /MTd "SHELL:-Xclang --dependent-lib=msvcrt$<$:d>") endif() + + set_target_properties(${name} PROPERTIES + CXX_STANDARD 14 + NO_SONAME YES) endif() if(${SWIFT_HOST_VARIANT_SDK} IN_LIST SWIFT_APPLE_PLATFORMS) - target_link_options(${target} PRIVATE - "LINKER:-compatibility_version,1") - if(SWIFT_COMPILER_VERSION) - target_link_options(${target} PRIVATE - "LINKER:-current_version,${SWIFT_COMPILER_VERSION}") - endif() # Include LLVM Bitcode slices for iOS, Watch OS, and Apple TV OS device libraries. if(SWIFT_EMBED_BITCODE_SECTION) - if(${SWIFT_HOST_VARIANT_SDK} MATCHES "(I|TV|WATCH)OS") - target_link_options(${target} PRIVATE - "LINKER:-bitcode_bundle" - "LINKER:-lto_library,${LLVM_LIBRARY_DIR}/libLTO.dylib") - - # Please note that using a generator expression to fit - # this in a single target_link_options does not work - # (at least in CMake 3.15 and 3.16), - # since that seems not to allow the LINKER: prefix to be - # evaluated (i.e. it will be added as-is to the linker parameters) - if(SWIFT_EMBED_BITCODE_SECTION_HIDE_SYMBOLS) - target_link_options(${target} PRIVATE - "LINKER:-bitcode_hide_symbols") - endif() + target_compile_options(${name} PRIVATE + -fembed-bitcode) + target_link_options(${name} PRIVATE + "LINKER:-bitcode_bundle" + "LINKER:-lto_library,${LLVM_LIBRARY_DIR}/libLTO.dylib") + + # Please note that using a generator expression to fit this in a single + # target_link_options does not work (at least in CMake 3.15 and 3.16), + # since that seems not to allow the LINKER: prefix to be evaluated (i.e. + # it will be added as-is to the linker parameters) + if(SWIFT_EMBED_BITCODE_SECTION_HIDE_SYMBOLS) + target_link_options(${name} PRIVATE + "LINKER:-bitcode_hide_symbols") endif() endif() - endif() - - # Do not add code here. -endfunction() -# Add a new Swift host library. -# -# Usage: -# add_swift_host_library(name -# [SHARED] -# [STATIC] -# [LLVM_LINK_COMPONENTS comp1 ...] -# source1 [source2 source3 ...]) -# -# name -# Name of the library (e.g., swiftParse). -# -# SHARED -# Build a shared library. -# -# STATIC -# Build a static library. -# -# LLVM_LINK_COMPONENTS -# LLVM components this library depends on. -# -# source1 ... -# Sources to add into this library. -function(add_swift_host_library name) - set(options - SHARED - STATIC) - set(single_parameter_options) - set(multiple_parameter_options - LLVM_LINK_COMPONENTS) - - cmake_parse_arguments(ASHL - "${options}" - "${single_parameter_options}" - "${multiple_parameter_options}" - ${ARGN}) - set(ASHL_SOURCES ${ASHL_UNPARSED_ARGUMENTS}) - - translate_flags(ASHL "${options}") + target_link_options(${name} PRIVATE + "LINKER:-compatibility_version,1") + if(SWIFT_COMPILER_VERSION) + target_link_options(${name} PRIVATE + "LINKER:-current_version,${SWIFT_COMPILER_VERSION}") + endif() - if(NOT ASHL_SHARED AND NOT ASHL_STATIC) - message(FATAL_ERROR "Either SHARED or STATIC must be specified") + set(DEPLOYMENT_VERSION "${SWIFT_SDK_${SWIFT_HOST_VARIANT_SDK}_DEPLOYMENT_VERSION}") + # MSVC, clang-cl, gcc don't understand -target. + if(CMAKE_C_COMPILER_ID MATCHES "Clang" AND NOT SWIFT_COMPILER_IS_MSVC_LIKE) + get_target_triple(target target_variant "${SWIFT_HOST_VARIANT_SDK}" "${SWIFT_HOST_VARIANT_ARCH}" + MACCATALYST_BUILD_FLAVOR "" + DEPLOYMENT_VERSION "${DEPLOYMENT_VERSION}") + target_link_options(${name} PRIVATE -target;${target}) + endif() endif() - _add_swift_host_library_single( - ${name} - ${ASHL_SHARED_keyword} - ${ASHL_STATIC_keyword} - ${ASHL_SOURCES} - LLVM_LINK_COMPONENTS ${ASHL_LLVM_LINK_COMPONENTS} - ) - add_dependencies(dev ${name}) if(NOT LLVM_INSTALL_TOOLCHAIN_ONLY) swift_install_in_component(TARGETS ${name} diff --git a/docs/SIL.rst b/docs/SIL.rst index c40f0fc2d14ea..947f166123d33 100644 --- a/docs/SIL.rst +++ b/docs/SIL.rst @@ -2107,6 +2107,94 @@ make the use of such types more convenient; it does not shift the ultimate responsibility for assuring the safety of unsafe language/library features away from the user. +Copy-on-Write Representation +---------------------------- + +Copy-on-Write (COW) data structures are implemented by a reference to an object +which is copied on mutation in case it's not uniquely referenced. + +A COW mutation sequence in SIL typically looks like:: + + (%uniq, %buffer) = begin_cow_mutation %immutable_buffer : $BufferClass + cond_br %uniq, bb_uniq, bb_not_unique + bb_uniq: + br bb_mutate(%buffer : $BufferClass) + bb_not_unique: + %copied_buffer = apply %copy_buffer_function(%buffer) : ... + br bb_mutate(%copied_buffer : $BufferClass) + bb_mutate(%mutable_buffer : $BufferClass): + %field = ref_element_addr %mutable_buffer : $BufferClass, #BufferClass.Field + store %value to %field : $ValueType + %new_immutable_buffer = end_cow_mutation %buffer : $BufferClass + +Loading from a COW data structure looks like:: + + %field1 = ref_element_addr [immutable] %immutable_buffer : $BufferClass, #BufferClass.Field + %value1 = load %field1 : $*FieldType + ... + %field2 = ref_element_addr [immutable] %immutable_buffer : $BufferClass, #BufferClass.Field + %value2 = load %field2 : $*FieldType + +The ``immutable`` attribute means that loading values from ``ref_element_addr`` +and ``ref_tail_addr`` instructions, which have the *same* operand, are +equivalent. +In other words, it's guaranteed that a buffer's properties are not mutated +between two ``ref_element/tail_addr [immutable]`` as long as they have the +same buffer reference as operand. +This is even true if e.g. the buffer 'escapes' to an unknown function. + + +In the example above, ``%value2`` is equal to ``%value1`` because the operand +of both ``ref_element_addr`` instructions is the same ``%immutable_buffer``. +Conceptually, the content of a COW buffer object can be seen as part of +the same *static* (immutable) SSA value as the buffer reference. + +The lifetime of a COW value is strictly separated into *mutable* and +*immutable* regions by ``begin_cow_mutation`` and +``end_cow_mutation`` instructions:: + + %b1 = alloc_ref $BufferClass + // The buffer %b1 is mutable + %b2 = end_cow_mutation %b1 : $BufferClass + // The buffer %b2 is immutable + (%u1, %b3) = begin_cow_mutation %b1 : $BufferClass + // The buffer %b3 is mutable + %b4 = end_cow_mutation %b3 : $BufferClass + // The buffer %b4 is immutable + ... + +Both, ``begin_cow_mutation`` and ``end_cow_mutation``, consume their operand +and return the new buffer as an *owned* value. +The ``begin_cow_mutation`` will compile down to a uniqueness check and +``end_cow_mutation`` will compile to a no-op. + +Although the physical pointer value of the returned buffer reference is the +same as the operand, it's important to generate a *new* buffer reference in +SIL. It prevents the optimizer from moving buffer accesses from a *mutable* into +a *immutable* region and vice versa. + +Because the buffer *content* is conceptually part of the +buffer *reference* SSA value, there must be a new buffer reference every time +the buffer content is mutated. + +To illustrate this, let's look at an example, where a COW value is mutated in +a loop. As with a scalar SSA value, also mutating a COW buffer will enforce a +phi-argument in the loop header block (for simplicity the code for copying a +non-unique buffer is not shown):: + + header_block(%b_phi : $BufferClass): + (%u, %b_mutate) = begin_cow_mutation %b_phi : $BufferClass + // Store something to %b_mutate + %b_immutable = end_cow_mutation %b_mutate : $BufferClass + cond_br %loop_cond, exit_block, backedge_block + backedge_block: + br header_block(b_immutable : $BufferClass) + exit_block: + +Two adjacent ``begin_cow_mutation`` and ``end_cow_mutation`` instructions +don't need to be in the same function. + + Instruction Set --------------- @@ -3199,6 +3287,56 @@ strong reference count is greater than 1. A discussion of the semantics can be found here: :ref:`arcopts.is_unique`. +begin_cow_mutation +`````````````````` + +:: + + sil-instruction ::= 'begin_cow_mutation' '[native]'? sil-operand + + (%1, %2) = begin_cow_mutation %0 : $C + // $C must be a reference-counted type + // %1 will be of type Builtin.Int1 + // %2 will be of type C + +Checks whether %0 is a unique reference to a memory object. Returns 1 in the +first result if the strong reference count is 1, and 0 if the strong reference +count is greater than 1. + +Returns the reference operand in the second result. The returned reference can +be used to mutate the object. Technically, the returned reference is the same +as the operand. But it's important that optimizations see the result as a +different SSA value than the operand. This is important to ensure the +correctness of ``ref_element_addr [immutable]``. + +The operand is consumed and the second result is returned as owned. + +The optional ``native`` attribute specifies that the operand has native Swift +reference counting. + +end_cow_mutation +```````````````` + +:: + + sil-instruction ::= 'end_cow_mutation' '[keep_unique]'? sil-operand + + %1 = end_cow_mutation %0 : $C + // $C must be a reference-counted type + // %1 will be of type C + +Marks the end of the mutation of a reference counted object. +Returns the reference operand. Technically, the returned reference is the same +as the operand. But it's important that optimizations see the result as a +different SSA value than the operand. This is important to ensure the +correctness of ``ref_element_addr [immutable]``. + +The operand is consumed and the result is returned as owned. The result is +guaranteed to be uniquely referenced. + +The optional ``keep_unique`` attribute indicates that the optimizer must not +replace this reference with a not uniquely reference object. + is_escaping_closure ``````````````````` @@ -4193,7 +4331,7 @@ ref_element_addr ```````````````` :: - sil-instruction ::= 'ref_element_addr' sil-operand ',' sil-decl-ref + sil-instruction ::= 'ref_element_addr' '[immutable]'? sil-operand ',' sil-decl-ref %1 = ref_element_addr %0 : $C, #C.field // %0 must be a value of class type $C @@ -4205,11 +4343,15 @@ Given an instance of a class, derives the address of a physical instance variable inside the instance. It is undefined behavior if the class value is null. +The ``immutable`` attribute specifies that all loads of the same instance +variable from the same class reference operand are guaranteed to yield the +same value. + ref_tail_addr ````````````` :: - sil-instruction ::= 'ref_tail_addr' sil-operand ',' sil-type + sil-instruction ::= 'ref_tail_addr' '[immutable]'? sil-operand ',' sil-type %1 = ref_tail_addr %0 : $C, $E // %0 must be a value of class type $C with tail-allocated elements $E @@ -4222,6 +4364,10 @@ object which is created by an ``alloc_ref`` with ``tail_elems``. It is undefined behavior if the class instance does not have tail-allocated arrays or if the element-types do not match. +The ``immutable`` attribute specifies that all loads of the same instance +variable from the same class reference operand are guaranteed to yield the +same value. + Enums ~~~~~ @@ -5315,7 +5461,7 @@ unconditional_checked_cast_addr sil-type 'in' sil-operand 'to' sil-type 'in' sil-operand - unconditional_checked_cast_addr $A in %0 : $*@thick A to $B in $*@thick B + unconditional_checked_cast_addr $A in %0 : $*@thick A to $B in %1 : $*@thick B // $A and $B must be both addresses // %1 will be of type $*B // $A is destroyed during the conversion. There is no implicit copy. diff --git a/docs/Testing.md b/docs/Testing.md index 0add7bacfd718..09306082790ed 100644 --- a/docs/Testing.md +++ b/docs/Testing.md @@ -24,6 +24,24 @@ We use multiple approaches to test the Swift toolchain. locally before committing. (Usually on a single platform, and not necessarily all tests.) * Buildbots run all tests, on all supported platforms. + [Smoke testing](ContinuousIntegration.md#smoke-testing) + skips the iOS, tvOS, and watchOS platforms. + +The [test/lit.cfg](https://github.com/apple/swift/blob/master/test/lit.cfg) +uses an iOS 10.3 simulator configuration named "iPhone 5" for 32-bit testing. + +1. Download and install the iOS 10.3 simulator runtime, in Xcode's + [Components](https://help.apple.com/xcode/#/deva7379ae35) preferences. + +2. Create an "iPhone 5" simulator configuration, either in Xcode's + [Devices and Simulators](https://help.apple.com/xcode/#/devf225e58da) + window, or with the command line: + + ```sh + xcrun simctl create 'iPhone 5' 'com.apple.CoreSimulator.SimDeviceType.iPhone-5' + ``` + +3. Append `--ios` to the `utils/build-script` command line (see below). ### Testsuite subsets diff --git a/docs/TypeChecker.rst b/docs/TypeChecker.rst index bd4a2f5a80861..a526e1bffd5a0 100644 --- a/docs/TypeChecker.rst +++ b/docs/TypeChecker.rst @@ -718,7 +718,7 @@ Locators During constraint generation and solving, numerous constraints are created, broken apart, and solved. During constraint application as well as during diagnostics emission, it is important to track the -relationship between the constraints and the actual expressions from +relationship between the constraints and the actual AST nodes from which they originally came. For example, consider the following type checking problem:: @@ -754,16 +754,16 @@ functions was selected to perform the conversion, so that conversion function can be called by constraint application if all else succeeds. *Locators* address both issues by tracking the location and derivation -of constraints. Each locator is anchored at a specific expression, -i.e., the function application ``f(10.5, x)``, and contains a path of -zero or more derivation steps from that anchor. For example, the -"``T(f)`` ==Fn ``T0 -> T1``" constraint has a locator that is -anchored at the function application and a path with the "apply -function" derivation step, meaning that this is the function being -applied. Similarly, the "``(T2, X) T1``" +constraint has a locator that is anchored at the function application +and a path with the "apply function" derivation step, meaning that +this is the function being applied. Similarly, the "``(T2, X) apply argument -> tuple element #0 The process of locator simplification maps a locator to its most -specific expression. Essentially, it starts at the anchor of the +specific AST node. Essentially, it starts at the anchor of the locator (in this case, the application ``f(10.5, x)``) and then walks the path, matching derivation steps to subexpressions. The "function application" derivation step extracts the argument (``(10.5, diff --git a/docs/libFuzzerIntegration.md b/docs/libFuzzerIntegration.md index 66588cfd76da7..ca77698fbf5d2 100644 --- a/docs/libFuzzerIntegration.md +++ b/docs/libFuzzerIntegration.md @@ -1,37 +1,39 @@ -libFuzzer Integration ---------------------- - -Swift compiler comes with a built-in `libFuzzer` integration. -In order to use it on a file `myfile.swift`, we define an entry point fuzzing function -with a `@_cdecl("LLVMFuzzerTestOneInput")` annotation: +# libFuzzer Integration +Custom builds of the Swift toolchain (including development snapshots) +have a built-in `libFuzzer` integration. In order to use it on a file +`myfile.swift`, define an entry point fuzzing function with a +`@_cdecl("LLVMFuzzerTestOneInput")` annotation: ```swift -@_cdecl("LLVMFuzzerTestOneInput") public func fuzzMe(Data: UnsafePointer, Size: CInt) -> CInt{ - // Test our code using provided Data. - } +@_cdecl("LLVMFuzzerTestOneInput") +public func test(_ start: UnsafeRawPointer, _ count: Int) -> CInt { + let bytes = UnsafeRawBufferPointer(start: start, count: count) + // TODO: Test the code using the provided bytes. + return 0 } ``` -To compile it, we use `-sanitize=fuzzer` flag to link `libFuzzer` -and enable coverage annotation, and `-parse-as-library` flag not to insert -the `main` symbol, such that the fuzzer entry point can be used: +To compile it, use the `-sanitize=fuzzer` flag to link `libFuzzer` +and enable code coverage information; and the `-parse-as-library` flag +to omit the `main` symbol, so that the fuzzer entry point can be used: ```bash % swiftc -sanitize=fuzzer -parse-as-library myfile.swift ``` -`libFuzzer` can be also combined with other sanitizers: +`libFuzzer` can be combined with other sanitizers: ```bash % swiftc -sanitize=fuzzer,address -parse-as-library myfile.swift ``` -Finally, we launch the fuzzing process: +Finally, launch the fuzzing process: ```bash -% ./a.out +% ./myfile ``` -Refer to the official `libFuzzer` documentation at http://llvm.org/docs/LibFuzzer.html -for the description of flags the resulting binary has. +Refer to the official `libFuzzer` documentation at + +for a description of the fuzzer's command line options. diff --git a/include/swift/ABI/Metadata.h b/include/swift/ABI/Metadata.h index d57ec0db0315f..4c8e9660a31f2 100644 --- a/include/swift/ABI/Metadata.h +++ b/include/swift/ABI/Metadata.h @@ -2000,7 +2000,7 @@ struct TargetExistentialTypeMetadata } /// Retrieve the set of protocols required by the existential. - ArrayRef getProtocols() const { + llvm::ArrayRef getProtocols() const { return { this->template getTrailingObjects(), NumProtocols }; } @@ -2013,7 +2013,7 @@ struct TargetExistentialTypeMetadata } /// Retrieve the set of protocols required by the existential. - MutableArrayRef getMutableProtocols() { + llvm::MutableArrayRef getMutableProtocols() { return { this->template getTrailingObjects(), NumProtocols }; } @@ -2535,13 +2535,13 @@ struct TargetProtocolConformanceDescriptor final getWitnessTable(const TargetMetadata *type) const; /// Retrieve the resilient witnesses. - ArrayRef getResilientWitnesses() const{ + llvm::ArrayRef getResilientWitnesses() const { if (!Flags.hasResilientWitnesses()) return { }; - return ArrayRef( - this->template getTrailingObjects(), - numTrailingObjects(OverloadToken())); + return llvm::ArrayRef( + this->template getTrailingObjects(), + numTrailingObjects(OverloadToken())); } ConstTargetPointer @@ -2828,23 +2828,23 @@ class TargetGenericEnvironment public: /// Retrieve the cumulative generic parameter counts at each level of genericity. - ArrayRef getGenericParameterCounts() const { - return ArrayRef(this->template getTrailingObjects(), + llvm::ArrayRef getGenericParameterCounts() const { + return llvm::makeArrayRef(this->template getTrailingObjects(), Flags.getNumGenericParameterLevels()); } /// Retrieve the generic parameters descriptors. - ArrayRef getGenericParameters() const { - return ArrayRef( - this->template getTrailingObjects(), - getGenericParameterCounts().back()); + llvm::ArrayRef getGenericParameters() const { + return llvm::makeArrayRef( + this->template getTrailingObjects(), + getGenericParameterCounts().back()); } /// Retrieve the generic requirements. - ArrayRef getGenericRequirements() const { - return ArrayRef( - this->template getTrailingObjects(), - Flags.getNumGenericRequirements()); + llvm::ArrayRef getGenericRequirements() const { + return llvm::makeArrayRef( + this->template getTrailingObjects(), + Flags.getNumGenericRequirements()); } }; @@ -4604,7 +4604,8 @@ class DynamicReplacementScope DynamicReplacementDescriptor>; friend TrailingObjects; - ArrayRef getReplacementDescriptors() const { + llvm::ArrayRef + getReplacementDescriptors() const { return {this->template getTrailingObjects(), numReplacements}; } diff --git a/include/swift/ABI/TypeIdentity.h b/include/swift/ABI/TypeIdentity.h index a25b8be2d7ab8..dadbd27abc73a 100644 --- a/include/swift/ABI/TypeIdentity.h +++ b/include/swift/ABI/TypeIdentity.h @@ -184,7 +184,7 @@ class ParsedTypeIdentity { StringRef FullIdentity; /// Any extended information that type might have. - Optional> ImportInfo; + llvm::Optional> ImportInfo; /// The ABI name of the type. StringRef getABIName() const { diff --git a/include/swift/AST/ASTContext.h b/include/swift/AST/ASTContext.h index 4da3c634968e6..f72d4a1d4115b 100644 --- a/include/swift/AST/ASTContext.h +++ b/include/swift/AST/ASTContext.h @@ -121,7 +121,7 @@ namespace swift { class UnifiedStatsReporter; class IndexSubset; struct SILAutoDiffDerivativeFunctionKey; - struct SubASTContextDelegate; + struct InterfaceSubContextDelegate; enum class KnownProtocolKind : uint8_t; @@ -238,10 +238,10 @@ class ASTContext final { UnifiedStatsReporter *Stats = nullptr; /// The language options used for translation. - LangOptions &LangOpts; + const LangOptions &LangOpts; /// The type checker options. - TypeCheckerOptions &TypeCheckerOpts; + const TypeCheckerOptions &TypeCheckerOpts; /// The search path options used by this AST context. SearchPathOptions &SearchPathOpts; @@ -753,7 +753,7 @@ class ASTContext final { StringRef moduleName, bool isUnderlyingClangModule, ModuleDependenciesCache &cache, - SubASTContextDelegate &delegate); + InterfaceSubContextDelegate &delegate); /// Load extensions to the given nominal type from the external /// module loaders. diff --git a/include/swift/AST/AutoDiff.h b/include/swift/AST/AutoDiff.h index e212a273f11e4..b2297381c7015 100644 --- a/include/swift/AST/AutoDiff.h +++ b/include/swift/AST/AutoDiff.h @@ -396,9 +396,16 @@ class DerivativeFunctionTypeError : public llvm::ErrorInfo { public: enum class Kind { + /// Original function type has no semantic results. NoSemanticResults, + /// Original function type has multiple semantic results. + // TODO(TF-1250): Support function types with multiple semantic results. MultipleSemanticResults, - NonDifferentiableParameters, + /// Differentiability parmeter indices are empty. + NoDifferentiabilityParameters, + /// A differentiability parameter does not conform to `Differentiable`. + NonDifferentiableDifferentiabilityParameter, + /// The original result type does not conform to `Differentiable`. NonDifferentiableResult }; @@ -408,12 +415,13 @@ class DerivativeFunctionTypeError /// The error kind. Kind kind; + /// The type and index of a differentiability parameter or result. + using TypeAndIndex = std::pair; + private: union Value { - IndexSubset *indices; - Type type; - Value(IndexSubset *indices) : indices(indices) {} - Value(Type type) : type(type) {} + TypeAndIndex typeAndIndex; + Value(TypeAndIndex typeAndIndex) : typeAndIndex(typeAndIndex) {} Value() {} } value; @@ -421,29 +429,21 @@ class DerivativeFunctionTypeError explicit DerivativeFunctionTypeError(AnyFunctionType *functionType, Kind kind) : functionType(functionType), kind(kind), value(Value()) { assert(kind == Kind::NoSemanticResults || - kind == Kind::MultipleSemanticResults); - }; - - explicit DerivativeFunctionTypeError(AnyFunctionType *functionType, Kind kind, - IndexSubset *nonDiffParameterIndices) - : functionType(functionType), kind(kind), value(nonDiffParameterIndices) { - assert(kind == Kind::NonDifferentiableParameters); + kind == Kind::MultipleSemanticResults || + kind == Kind::NoDifferentiabilityParameters); }; explicit DerivativeFunctionTypeError(AnyFunctionType *functionType, Kind kind, - Type nonDiffResultType) - : functionType(functionType), kind(kind), value(nonDiffResultType) { - assert(kind == Kind::NonDifferentiableResult); + TypeAndIndex nonDiffTypeAndIndex) + : functionType(functionType), kind(kind), value(nonDiffTypeAndIndex) { + assert(kind == Kind::NonDifferentiableDifferentiabilityParameter || + kind == Kind::NonDifferentiableResult); }; - IndexSubset *getNonDifferentiableParameterIndices() const { - assert(kind == Kind::NonDifferentiableParameters); - return value.indices; - } - - Type getNonDifferentiableResultType() const { - assert(kind == Kind::NonDifferentiableResult); - return value.type; + TypeAndIndex getNonDifferentiableTypeAndIndex() const { + assert(kind == Kind::NonDifferentiableDifferentiabilityParameter || + kind == Kind::NonDifferentiableResult); + return value.typeAndIndex; } void log(raw_ostream &OS) const override; diff --git a/include/swift/AST/Builtins.def b/include/swift/AST/Builtins.def index 1510688d37e3d..cd4aa5b46a5b4 100644 --- a/include/swift/AST/Builtins.def +++ b/include/swift/AST/Builtins.def @@ -429,6 +429,29 @@ BUILTIN_SIL_OPERATION(IsUnique, "isUnique", Special) /// BridgeObject to be treated as a native object by the runtime. BUILTIN_SIL_OPERATION(IsUnique_native, "isUnique_native", Special) +/// beginCOWMutation(inout T) -> Int1 +/// +/// Begins a copy-on-write mutation for a buffer reference which is passed as +/// inout argument. It returns a true if the buffer is uniquely referenced. +/// In this case the buffer may be mutated after calling this builtin. +/// +/// The beginCOWMutation builtin is very similar to isUnique. It just translates +/// to a different SIL instruction (begin_cow_mutation), which is the preferred +/// representation of COW in SIL. +BUILTIN_SIL_OPERATION(BeginCOWMutation, "beginCOWMutation", Special) + +/// beginCOWMutation_native(inout T) -> Int1 +/// +/// Like beginCOWMutation, but it's assumed that T has native Swift reference +/// counting. +BUILTIN_SIL_OPERATION(BeginCOWMutation_native, "beginCOWMutation_native", Special) + +/// endCOWMutation(inout T) +/// +/// Ends a copy-on-write mutation for a buffer reference which is passed as +/// inout argument. After calling this builtin, the buffer must not be mutated. +BUILTIN_SIL_OPERATION(EndCOWMutation, "endCOWMutation", Special) + /// bindMemory : (Builtin.RawPointer, Builtin.Word, T.Type) -> () BUILTIN_SIL_OPERATION(BindMemory, "bindMemory", Special) @@ -651,6 +674,12 @@ BUILTIN_MISC_OPERATION(AssignCopyArrayFrontToBack, "assignCopyArrayFrontToBack", BUILTIN_MISC_OPERATION(AssignCopyArrayBackToFront, "assignCopyArrayBackToFront", "", Special) BUILTIN_MISC_OPERATION(AssignTakeArray, "assignTakeArray", "", Special) +/// COWBufferForReading has type T -> T +/// +/// Returns the buffer reference which is passed as argument. +/// This builtin indicates to the optimizer that the buffer is not mutable. +BUILTIN_MISC_OPERATION(COWBufferForReading, "COWBufferForReading", "n", Special) + // unsafeGuaranteed has type T -> (T, Builtin.Int8) BUILTIN_MISC_OPERATION(UnsafeGuaranteed, "unsafeGuaranteed", "", Special) diff --git a/include/swift/AST/Decl.h b/include/swift/AST/Decl.h index ba5001da9a7e4..55f17668cd37c 100644 --- a/include/swift/AST/Decl.h +++ b/include/swift/AST/Decl.h @@ -558,10 +558,12 @@ class alignas(1 << DeclAlignInBits) Decl { IsIncompatibleWithWeakReferences : 1 ); - SWIFT_INLINE_BITFIELD(StructDecl, NominalTypeDecl, 1, + SWIFT_INLINE_BITFIELD(StructDecl, NominalTypeDecl, 1+1, /// True if this struct has storage for fields that aren't accessible in /// Swift. - HasUnreferenceableStorage : 1 + HasUnreferenceableStorage : 1, + /// True if this struct is imported from C++ and not trivially copyable. + IsCxxNotTriviallyCopyable : 1 ); SWIFT_INLINE_BITFIELD(EnumDecl, NominalTypeDecl, 2+1, @@ -573,7 +575,7 @@ class alignas(1 << DeclAlignInBits) Decl { HasAnyUnavailableValues : 1 ); - SWIFT_INLINE_BITFIELD(ModuleDecl, TypeDecl, 1+1+1+1+1+1+1+1, + SWIFT_INLINE_BITFIELD(ModuleDecl, TypeDecl, 1+1+1+1+1+1+1+1+1, /// If the module was or is being compiled with `-enable-testing`. TestingEnabled : 1, @@ -599,7 +601,10 @@ class alignas(1 << DeclAlignInBits) Decl { /// Whether the module was imported from Clang (or, someday, maybe another /// language). - IsNonSwiftModule : 1 + IsNonSwiftModule : 1, + + /// Whether this module is the main module. + IsMainModule : 1 ); SWIFT_INLINE_BITFIELD(PrecedenceGroupDecl, Decl, 1+2, @@ -3836,6 +3841,14 @@ class StructDecl final : public NominalTypeDecl { void setHasUnreferenceableStorage(bool v) { Bits.StructDecl.HasUnreferenceableStorage = v; } + + bool isCxxNotTriviallyCopyable() const { + return Bits.StructDecl.IsCxxNotTriviallyCopyable; + } + + void setIsCxxNotTriviallyCopyable(bool v) { + Bits.StructDecl.IsCxxNotTriviallyCopyable = v; + } }; /// This is the base type for AncestryOptions. Each flag describes possible diff --git a/include/swift/AST/DeclContext.h b/include/swift/AST/DeclContext.h index 70dcf0aaabb2b..ae57b1ad9eb0e 100644 --- a/include/swift/AST/DeclContext.h +++ b/include/swift/AST/DeclContext.h @@ -51,6 +51,7 @@ namespace swift { class Expr; class GenericParamList; class LazyMemberLoader; + class GenericContext; class GenericSignature; class GenericTypeParamDecl; class GenericTypeParamType; @@ -566,41 +567,6 @@ class alignas(1 << DeclContextAlignInBits) DeclContext { LLVM_READONLY ASTContext &getASTContext() const; - /// Retrieve the set of protocols whose conformances will be - /// associated with this declaration context. - /// - /// This function differs from \c getLocalConformances() in that it - /// returns protocol declarations, not protocol conformances, and - /// therefore does not require the protocol conformances to be - /// formed. - /// - /// \param lookupKind The kind of lookup to perform. - /// - /// FIXME: This likely makes more sense on IterableDeclContext or - /// something similar. - SmallVector - getLocalProtocols(ConformanceLookupKind lookupKind - = ConformanceLookupKind::All) const; - - /// Retrieve the set of protocol conformances associated with this - /// declaration context. - /// - /// \param lookupKind The kind of lookup to perform. - /// - /// FIXME: This likely makes more sense on IterableDeclContext or - /// something similar. - SmallVector - getLocalConformances(ConformanceLookupKind lookupKind - = ConformanceLookupKind::All) const; - - /// Retrieve diagnostics discovered while expanding conformances for this - /// declaration context. This operation then removes those diagnostics from - /// consideration, so subsequent calls to this function with the same - /// declaration context that have not had any new extensions bound - /// will see an empty array. - SmallVector - takeConformanceDiagnostics() const; - /// Retrieves a list of separately imported overlays which are shadowing /// \p declaring. If any \p overlays are returned, qualified lookups into /// \p declaring should be performed into \p overlays instead; since they @@ -815,9 +781,40 @@ class IterableDeclContext { /// valid). bool wasDeserialized() const; + /// Retrieve the set of protocols whose conformances will be + /// associated with this declaration context. + /// + /// This function differs from \c getLocalConformances() in that it + /// returns protocol declarations, not protocol conformances, and + /// therefore does not require the protocol conformances to be + /// formed. + /// + /// \param lookupKind The kind of lookup to perform. + SmallVector + getLocalProtocols(ConformanceLookupKind lookupKind + = ConformanceLookupKind::All) const; + + /// Retrieve the set of protocol conformances associated with this + /// declaration context. + /// + /// \param lookupKind The kind of lookup to perform. + SmallVector + getLocalConformances(ConformanceLookupKind lookupKind + = ConformanceLookupKind::All) const; + + /// Retrieve diagnostics discovered while expanding conformances for this + /// declaration context. This operation then removes those diagnostics from + /// consideration, so subsequent calls to this function with the same + /// declaration context that have not had any new extensions bound + /// will see an empty array. + SmallVector takeConformanceDiagnostics() const; + /// Return 'this' as a \c Decl. const Decl *getDecl() const; + /// Return 'this' as a \c GenericContext. + const GenericContext *getAsGenericContext() const; + /// Get the DeclID this Decl was deserialized from. serialization::DeclID getDeclID() const { assert(wasDeserialized()); diff --git a/include/swift/AST/DiagnosticsFrontend.def b/include/swift/AST/DiagnosticsFrontend.def index 6ad68ae2b6c65..4f72e3e17a7bb 100644 --- a/include/swift/AST/DiagnosticsFrontend.def +++ b/include/swift/AST/DiagnosticsFrontend.def @@ -379,5 +379,10 @@ ERROR(expectation_missing_opening_braces,none, ERROR(expectation_missing_closing_braces,none, "didn't find '}}' to match '{{' in expectation", ()) +WARNING(module_incompatible_with_skip_function_bodies,none, + "module '%0' cannot be built with " + "-experimental-skip-non-inlinable-function-bodies; this option has " + "been automatically disabled", (StringRef)) + #define UNDEFINE_DIAGNOSTIC_MACROS #include "DefineDiagnosticMacros.h" diff --git a/include/swift/AST/DiagnosticsSema.def b/include/swift/AST/DiagnosticsSema.def index 1a87141c3d801..9502e9c1ee6f9 100644 --- a/include/swift/AST/DiagnosticsSema.def +++ b/include/swift/AST/DiagnosticsSema.def @@ -71,6 +71,11 @@ ERROR(could_not_find_value_dynamic_member_corrected,none, ERROR(could_not_find_value_dynamic_member,none, "value of type %0 has no dynamic member %2 using key path from root type %1", (Type, Type, DeclNameRef)) +ERROR(cannot_infer_contextual_keypath_type_specify_root,none, + "cannot infer key path type from context; consider explicitly specifying a root type", ()) +ERROR(cannot_infer_keypath_root_anykeypath_context,none, + "'AnyKeyPath' does not provide enough context for root type to be inferred; " + "consider explicitly specifying a root type", ()) ERROR(could_not_find_type_member,none, "type %0 has no member %1", (Type, DeclNameRef)) @@ -245,6 +250,9 @@ ERROR(no_candidates_match_result_type,none, "no '%0' candidates produce the expected contextual result type %1", (StringRef, Type)) +ERROR(cannot_infer_closure_parameter_type,none, + "unable to infer type of a closure parameter %0 in the current context", + (StringRef)) ERROR(cannot_infer_closure_type,none, "unable to infer closure type in the current context", ()) ERROR(cannot_infer_closure_result_type,none, @@ -1706,6 +1714,13 @@ ERROR(spi_attribute_on_non_public,none, "cannot be declared '@_spi' because only public and open " "declarations can be '@_spi'", (AccessLevel, DescriptiveDeclKind)) +ERROR(spi_attribute_on_protocol_requirement,none, + "protocol requirement %0 cannot be declared '@_spi' without " + "a default implementation in a protocol extension", + (DeclName)) +ERROR(spi_attribute_on_frozen_stored_properties,none, + "stored property %0 cannot be declared '@_spi' in a '@frozen' struct", + (DeclName)) // Opaque return types ERROR(opaque_type_invalid_constraint,none, @@ -2995,8 +3010,6 @@ ERROR(implements_attr_protocol_not_conformed_to,none, ERROR(differentiable_attr_no_vjp_or_jvp_when_linear,none, "cannot specify 'vjp:' or 'jvp:' for linear functions; use '@transpose' " "attribute for transpose registration instead", ()) -ERROR(differentiable_attr_void_result,none, - "cannot differentiate void function %0", (DeclName)) ERROR(differentiable_attr_overload_not_found,none, "%0 does not have expected type %1", (DeclNameRef, Type)) // TODO(TF-482): Change duplicate `@differentiable` attribute diagnostic to also @@ -3015,9 +3028,6 @@ ERROR(differentiable_attr_invalid_access,none, "derivative function %0 is required to either be public or " "'@usableFromInline' because the original function %1 is public or " "'@usableFromInline'", (DeclNameRef, DeclName)) -ERROR(differentiable_attr_result_not_differentiable,none, - "can only differentiate functions with results that conform to " - "'Differentiable', but %0 does not conform to 'Differentiable'", (Type)) ERROR(differentiable_attr_protocol_req_where_clause,none, "'@differentiable' attribute on protocol requirement cannot specify " "'where' clause", ()) @@ -3128,6 +3138,9 @@ ERROR(autodiff_attr_original_void_result,none, ERROR(autodiff_attr_original_multiple_semantic_results,none, "cannot differentiate functions with both an 'inout' parameter and a " "result", ()) +ERROR(autodiff_attr_result_not_differentiable,none, + "can only differentiate functions with results that conform to " + "'Differentiable', but %0 does not conform to 'Differentiable'", (Type)) // differentiation `wrt` parameters clause ERROR(diff_function_no_parameters,none, @@ -4321,8 +4334,8 @@ ERROR(objc_invalid_on_subscript,none, ERROR(objc_invalid_on_static_subscript,none, "%0 cannot be %" OBJC_ATTR_SELECT "1", (DescriptiveDeclKind, unsigned)) ERROR(objc_invalid_with_generic_params,none, - "method cannot be %" OBJC_ATTR_SELECT "0 because it has generic " - "parameters", (unsigned)) + "%0 cannot be %" OBJC_ATTR_SELECT "1 because it has generic parameters", + (DescriptiveDeclKind, unsigned)) ERROR(objc_convention_invalid,none, "%0 is not representable in Objective-C, so it cannot be used" " with '@convention(%1)'", (Type, StringRef)) diff --git a/include/swift/AST/Expr.h b/include/swift/AST/Expr.h index 3c13b029ac9aa..9168f0583e618 100644 --- a/include/swift/AST/Expr.h +++ b/include/swift/AST/Expr.h @@ -4227,9 +4227,7 @@ class DefaultArgumentExpr final : public Expr { DefaultArgsOwner(defaultArgsOwner), ParamIndex(paramIndex), Loc(loc), ContextOrCallerSideExpr(dc) { } - SourceRange getSourceRange() const { return {}; } - - SourceLoc getArgumentListLoc() const { return Loc; } + SourceRange getSourceRange() const { return Loc; } ConcreteDeclRef getDefaultArgsOwner() const { return DefaultArgsOwner; diff --git a/include/swift/AST/GenericSignature.h b/include/swift/AST/GenericSignature.h index f21c00de9a4f2..4d347fae5b9d8 100644 --- a/include/swift/AST/GenericSignature.h +++ b/include/swift/AST/GenericSignature.h @@ -101,7 +101,7 @@ class GenericTypeParamType; /// both the generic type parameters and the requirements placed on those /// generic parameters. class GenericSignature { - GenericSignatureImpl *Ptr; + const GenericSignatureImpl *Ptr; public: /// Create a new generic signature with the given type parameters and @@ -118,13 +118,13 @@ class GenericSignature { ArrayRef requirements); public: - /*implicit*/ GenericSignature(GenericSignatureImpl *P = 0) : Ptr(P) {} + /*implicit*/ GenericSignature(const GenericSignatureImpl *P = 0) : Ptr(P) {} - GenericSignatureImpl *getPointer() const { return Ptr; } + const GenericSignatureImpl *getPointer() const { return Ptr; } bool isNull() const { return Ptr == 0; } - GenericSignatureImpl *operator->() const { return Ptr; } + const GenericSignatureImpl *operator->() const { return Ptr; } explicit operator bool() const { return Ptr != 0; } @@ -152,7 +152,7 @@ class GenericSignature { TypeArrayView genericParams, ArrayRef requirements); public: - using ConformsToArray = SmallVector; + using RequiredProtocols = SmallVector; private: // Direct comparison is disabled for generic signatures. Canonicalize them @@ -175,7 +175,7 @@ class CanGenericSignature : public GenericSignature { public: CanGenericSignature(std::nullptr_t) : GenericSignature(nullptr) {} - explicit CanGenericSignature(GenericSignatureImpl *P = 0) + explicit CanGenericSignature(const GenericSignatureImpl *P = 0) : GenericSignature(P) { assert(isActuallyCanonicalOrNull() && "Forming a CanGenericSignature out of a non-canonical signature!"); @@ -204,8 +204,8 @@ class alignas(1 << TypeAlignInBits) GenericSignatureImpl final GenericSignatureImpl(const GenericSignatureImpl&) = delete; void operator=(const GenericSignatureImpl&) = delete; - unsigned NumGenericParams; - unsigned NumRequirements; + const unsigned NumGenericParams; + const unsigned NumRequirements; GenericEnvironment *GenericEnv = nullptr; @@ -220,23 +220,13 @@ class alignas(1 << TypeAlignInBits) GenericSignatureImpl final return NumRequirements; } - /// Retrieve a mutable version of the generic parameters. - MutableArrayRef getGenericParamsBuffer() { - return {getTrailingObjects(), NumGenericParams}; - } - - /// Retrieve a mutable version of the requirements. - MutableArrayRef getRequirementsBuffer() { - return {getTrailingObjects(), NumRequirements}; - } - GenericSignatureImpl(TypeArrayView params, ArrayRef requirements, bool isKnownCanonical); // FIXME: Making this a CanGenericSignature reveals callers are violating // the interface's invariants. - mutable llvm::PointerUnion + mutable llvm::PointerUnion CanonicalSignatureOrASTContext; void buildConformanceAccessPath( @@ -244,15 +234,15 @@ class alignas(1 << TypeAlignInBits) GenericSignatureImpl final ArrayRef reqs, const void /*GenericSignatureBuilder::RequirementSource*/ *source, ProtocolDecl *conformingProto, Type rootType, - ProtocolDecl *requirementSignatureProto); + ProtocolDecl *requirementSignatureProto) const; friend class ArchetypeType; public: /// Retrieve the generic parameters. TypeArrayView getGenericParams() const { - auto temp = const_cast(this); - return TypeArrayView(temp->getGenericParamsBuffer()); + return TypeArrayView( + {getTrailingObjects(), NumGenericParams}); } /// Retrieve the innermost generic parameters. @@ -263,7 +253,7 @@ class alignas(1 << TypeAlignInBits) GenericSignatureImpl final /// Retrieve the requirements. ArrayRef getRequirements() const { - return const_cast(this)->getRequirementsBuffer(); + return {getTrailingObjects(), NumRequirements}; } /// Only allow allocation by doing a placement new. @@ -299,7 +289,7 @@ class alignas(1 << TypeAlignInBits) GenericSignatureImpl final } /// Return true if these two generic signatures are equal. - bool isEqual(GenericSignature Other); + bool isEqual(GenericSignature Other) const; /// Determines whether this GenericSignature is canonical. bool isCanonical() const; @@ -310,55 +300,56 @@ class alignas(1 << TypeAlignInBits) GenericSignatureImpl final CanGenericSignature getCanonicalSignature() const; /// Retrieve the generic signature builder for the given generic signature. - GenericSignatureBuilder *getGenericSignatureBuilder(); + GenericSignatureBuilder *getGenericSignatureBuilder() const; /// Returns the generic environment that provides fresh contextual types /// (archetypes) that correspond to the interface types in this generic /// signature. - GenericEnvironment *getGenericEnvironment(); + GenericEnvironment *getGenericEnvironment() const; /// Uniquing for the ASTContext. - void Profile(llvm::FoldingSetNodeID &ID) { + void Profile(llvm::FoldingSetNodeID &ID) const { Profile(ID, getGenericParams(), getRequirements()); } /// Determine whether the given dependent type is required to be a class. - bool requiresClass(Type type); + bool requiresClass(Type type) const; /// Determine the superclass bound on the given dependent type. - Type getSuperclassBound(Type type); + Type getSuperclassBound(Type type) const; - /// Determine the set of protocols to which the given dependent type - /// must conform. - GenericSignature::ConformsToArray getConformsTo(Type type); + /// Determine the set of protocols to which the given type parameter is + /// required to conform. + GenericSignature::RequiredProtocols getRequiredProtocols(Type type) const; - /// Determine whether the given dependent type conforms to this protocol. - bool conformsToProtocol(Type type, ProtocolDecl *proto); + /// Determine whether the given type parameter is required to conform to + /// the given protocol. + bool requiresProtocol(Type type, ProtocolDecl *proto) const; /// Determine whether the given dependent type is equal to a concrete type. - bool isConcreteType(Type type); + bool isConcreteType(Type type) const; /// Return the concrete type that the given dependent type is constrained to, /// or the null Type if it is not the subject of a concrete same-type /// constraint. - Type getConcreteType(Type type); + Type getConcreteType(Type type) const; /// Return the layout constraint that the given dependent type is constrained /// to, or the null LayoutConstraint if it is not the subject of layout /// constraint. - LayoutConstraint getLayoutConstraint(Type type); + LayoutConstraint getLayoutConstraint(Type type) const; /// Return whether two type parameters represent the same type under this /// generic signature. /// /// The type parameters must be known to not be concrete within the context. - bool areSameTypeParameterInContext(Type type1, Type type2); + bool areSameTypeParameterInContext(Type type1, Type type2) const; /// Determine if \c sig can prove \c requirement, meaning that it can deduce /// T: Foo or T == U (etc.) with the information it knows. This includes /// checking against global state, if any/all of the types in the requirement /// are concrete, not type parameters. - bool isRequirementSatisfied(Requirement requirement); + bool isRequirementSatisfied(Requirement requirement) const; /// Return the requirements of this generic signature that are not also /// satisfied by \c otherSig. @@ -370,14 +361,15 @@ class alignas(1 << TypeAlignInBits) GenericSignatureImpl final /// Return the canonical version of the given type under this generic /// signature. - CanType getCanonicalTypeInContext(Type type); - bool isCanonicalTypeInContext(Type type); + CanType getCanonicalTypeInContext(Type type) const; + bool isCanonicalTypeInContext(Type type) const; /// Return the canonical version of the given type under this generic /// signature. CanType getCanonicalTypeInContext(Type type, - GenericSignatureBuilder &builder); - bool isCanonicalTypeInContext(Type type, GenericSignatureBuilder &builder); + GenericSignatureBuilder &builder) const; + bool isCanonicalTypeInContext(Type type, + GenericSignatureBuilder &builder) const; /// Retrieve the conformance access path used to extract the conformance of /// interface \c type to the given \c protocol. @@ -392,14 +384,14 @@ class alignas(1 << TypeAlignInBits) GenericSignatureImpl final /// /// \seealso ConformanceAccessPath ConformanceAccessPath getConformanceAccessPath(Type type, - ProtocolDecl *protocol); + ProtocolDecl *protocol) const; /// Get the ordinal of a generic parameter in this generic signature. /// /// For example, if you have a generic signature for a nested context like: /// /// then this will return 0 for t_0_0, 1 for t_0_1, and 2 for t_1_0. - unsigned getGenericParamOrdinal(GenericTypeParamType *param); + unsigned getGenericParamOrdinal(GenericTypeParamType *param) const; /// Get a substitution map that maps all of the generic signature's /// generic parameters to themselves. @@ -440,7 +432,7 @@ static inline raw_ostream &operator<<(raw_ostream &OS, // A GenericSignature casts like a GenericSignatureImpl*. template <> struct simplify_type { - typedef ::swift::GenericSignatureImpl *SimpleType; + typedef const ::swift::GenericSignatureImpl *SimpleType; static SimpleType getSimplifiedValue(const ::swift::GenericSignature &Val) { return Val.getPointer(); } diff --git a/include/swift/AST/GenericSignatureBuilder.h b/include/swift/AST/GenericSignatureBuilder.h index 6243b843959c2..21b9c60d65a04 100644 --- a/include/swift/AST/GenericSignatureBuilder.h +++ b/include/swift/AST/GenericSignatureBuilder.h @@ -519,11 +519,7 @@ class GenericSignatureBuilder { ProtocolConformanceRef operator()(CanType dependentType, Type conformingReplacementType, - ProtocolDecl *conformedProtocol) const { - return builder->lookupConformance(dependentType, - conformingReplacementType, - conformedProtocol); - } + ProtocolDecl *conformedProtocol) const; }; /// Retrieve a function that can perform conformance lookup for this diff --git a/include/swift/AST/Module.h b/include/swift/AST/Module.h index 936bf83f5df50..c5a70a6e66773 100644 --- a/include/swift/AST/Module.h +++ b/include/swift/AST/Module.h @@ -347,6 +347,13 @@ class ModuleDecl : public DeclContext, public TypeDecl { return new (ctx) ModuleDecl(name, ctx, importInfo); } + static ModuleDecl * + createMainModule(ASTContext &ctx, Identifier name, ImplicitImportInfo iinfo) { + auto *Mod = ModuleDecl::create(name, ctx, iinfo); + Mod->Bits.ModuleDecl.IsMainModule = true; + return Mod; + } + using Decl::getASTContext; /// Retrieves information about which modules are implicitly imported by @@ -542,6 +549,10 @@ class ModuleDecl : public DeclContext, public TypeDecl { Bits.ModuleDecl.IsNonSwiftModule = flag; } + bool isMainModule() const { + return Bits.ModuleDecl.IsMainModule; + } + /// Retrieve the top-level module. If this module is already top-level, this /// returns itself. If this is a submodule such as \c Foo.Bar.Baz, this /// returns the module \c Foo. diff --git a/include/swift/AST/ModuleDependencies.h b/include/swift/AST/ModuleDependencies.h index 5e086bfb7e5a5..e169eff011f79 100644 --- a/include/swift/AST/ModuleDependencies.h +++ b/include/swift/AST/ModuleDependencies.h @@ -68,6 +68,13 @@ class SwiftModuleDependenciesStorage : public ModuleDependenciesStorageBase { /// The Swift interface file, if it can be used to generate the module file. const Optional swiftInterfaceFile; + /// The Swift frontend invocation arguments to build the Swift module from the + /// interface. + const std::vector buildCommandLine; + + /// The hash value that will be used for the generated module + const std::string contextHash; + /// Bridging header file, if there is one. Optional bridgingHeaderFile; @@ -82,9 +89,13 @@ class SwiftModuleDependenciesStorage : public ModuleDependenciesStorageBase { SwiftModuleDependenciesStorage( const std::string &compiledModulePath, - const Optional &swiftInterfaceFile + const Optional &swiftInterfaceFile, + ArrayRef buildCommandLine, + StringRef contextHash ) : ModuleDependenciesStorageBase(/*isSwiftModule=*/true, compiledModulePath), - swiftInterfaceFile(swiftInterfaceFile) { } + swiftInterfaceFile(swiftInterfaceFile), + buildCommandLine(buildCommandLine.begin(), buildCommandLine.end()), + contextHash(contextHash) { } ModuleDependenciesStorageBase *clone() const override { return new SwiftModuleDependenciesStorage(*this); @@ -162,10 +173,12 @@ class ModuleDependencies { /// built from a Swift interface file (\c .swiftinterface). static ModuleDependencies forSwiftInterface( const std::string &compiledModulePath, - const std::string &swiftInterfaceFile) { + const std::string &swiftInterfaceFile, + ArrayRef buildCommands, + StringRef contextHash) { return ModuleDependencies( std::make_unique( - compiledModulePath, swiftInterfaceFile)); + compiledModulePath, swiftInterfaceFile, buildCommands, contextHash)); } /// Describe the module dependencies for a serialized or parsed Swift module. @@ -173,7 +186,7 @@ class ModuleDependencies { const std::string &compiledModulePath) { return ModuleDependencies( std::make_unique( - compiledModulePath, None)); + compiledModulePath, None, ArrayRef(), StringRef())); } /// Describe the module dependencies for a Clang module that can be diff --git a/include/swift/AST/ModuleLoader.h b/include/swift/AST/ModuleLoader.h index 0df353ba3de79..d052d5838406c 100644 --- a/include/swift/AST/ModuleLoader.h +++ b/include/swift/AST/ModuleLoader.h @@ -51,6 +51,7 @@ class ModuleDependenciesCache; class NominalTypeDecl; class SourceFile; class TypeDecl; +class CompilerInstance; enum class KnownProtocolKind : uint8_t; @@ -88,13 +89,27 @@ class DependencyTracker { std::shared_ptr getClangCollector(); }; +struct SubCompilerInstanceInfo { + StringRef CompilerVersion; + CompilerInstance* Instance; + StringRef Hash; + ArrayRef BuildArguments; +}; + /// Abstract interface to run an action in a sub ASTContext. -struct SubASTContextDelegate { - virtual bool runInSubContext(ASTContext &ctx, StringRef interfacePath, - llvm::function_ref action) { - llvm_unreachable("function should be overriden"); - } - virtual ~SubASTContextDelegate() = default; +struct InterfaceSubContextDelegate { + virtual bool runInSubContext(StringRef moduleName, + StringRef interfacePath, + StringRef outputPath, + SourceLoc diagLoc, + llvm::function_ref, StringRef)> action) = 0; + virtual bool runInSubCompilerInstance(StringRef moduleName, + StringRef interfacePath, + StringRef outputPath, + SourceLoc diagLoc, + llvm::function_ref action) = 0; + + virtual ~InterfaceSubContextDelegate() = default; }; /// Abstract interface that loads named modules into the AST. @@ -199,7 +214,8 @@ class ModuleLoader { /// if no such module exists. virtual Optional getModuleDependencies( StringRef moduleName, - ModuleDependenciesCache &cache, SubASTContextDelegate &delegate) = 0; + ModuleDependenciesCache &cache, + InterfaceSubContextDelegate &delegate) = 0; }; } // namespace swift diff --git a/include/swift/AST/PlatformKind.h b/include/swift/AST/PlatformKind.h index 521167f6ce2d7..f03d5adc8f7bf 100644 --- a/include/swift/AST/PlatformKind.h +++ b/include/swift/AST/PlatformKind.h @@ -54,11 +54,11 @@ StringRef prettyPlatformString(PlatformKind platform); /// If ForTargetVariant is true then for zippered builds the target-variant /// triple will be used rather than the target to determine whether the /// platform is active. -bool isPlatformActive(PlatformKind Platform, LangOptions &LangOpts, +bool isPlatformActive(PlatformKind Platform, const LangOptions &LangOpts, bool ForTargetVariant = false); /// Returns the target platform for the given language options. -PlatformKind targetPlatform(LangOptions &LangOpts); +PlatformKind targetPlatform(const LangOptions &LangOpts); /// Returns true when availability attributes from the "parent" platform /// should also apply to the "child" platform for declarations without diff --git a/include/swift/AST/SILGenRequests.h b/include/swift/AST/SILGenRequests.h index c20c014838308..3f93c5608ed7b 100644 --- a/include/swift/AST/SILGenRequests.h +++ b/include/swift/AST/SILGenRequests.h @@ -81,19 +81,16 @@ struct SILGenDescriptor { /// If the module or file contains SIL that needs parsing, returns the file /// to be parsed, or \c nullptr if parsing isn't required. SourceFile *getSourceFileToParse() const; - - /// Whether the SIL is being emitted for a whole module. - bool isWholeModule() const; }; void simple_display(llvm::raw_ostream &out, const SILGenDescriptor &d); SourceLoc extractNearestSourceLoc(const SILGenDescriptor &desc); -class SILGenSourceFileRequest : - public SimpleRequest(SILGenDescriptor), - RequestFlags::Uncached|RequestFlags::DependencySource> { +class SILGenerationRequest + : public SimpleRequest< + SILGenerationRequest, std::unique_ptr(SILGenDescriptor), + RequestFlags::Uncached | RequestFlags::DependencySource> { public: using SimpleRequest::SimpleRequest; @@ -104,33 +101,12 @@ class SILGenSourceFileRequest : std::unique_ptr evaluate(Evaluator &evaluator, SILGenDescriptor desc) const; -public: - bool isCached() const { return true; } - public: // Incremental dependencies. evaluator::DependencySource readDependencySource(const evaluator::DependencyCollector &) const; }; -class SILGenWholeModuleRequest : - public SimpleRequest(SILGenDescriptor), - RequestFlags::Uncached> { -public: - using SimpleRequest::SimpleRequest; - -private: - friend SimpleRequest; - - // Evaluation. - std::unique_ptr - evaluate(Evaluator &evaluator, SILGenDescriptor desc) const; - -public: - bool isCached() const { return true; } -}; - /// Parses a .sil file into a SILModule. class ParseSILModuleRequest : public SimpleRequest(SILGenDescriptor), - Uncached, NoLocationInfo) -SWIFT_REQUEST(SILGen, SILGenWholeModuleRequest, +SWIFT_REQUEST(SILGen, SILGenerationRequest, std::unique_ptr(SILGenDescriptor), Uncached, NoLocationInfo) SWIFT_REQUEST(SILGen, ParseSILModuleRequest, diff --git a/include/swift/AST/Type.h b/include/swift/AST/Type.h index 2691d18d461c4..2d540a9138239 100644 --- a/include/swift/AST/Type.h +++ b/include/swift/AST/Type.h @@ -147,10 +147,7 @@ enum class SubstFlags { /// Map member types to their desugared witness type. DesugarMemberTypes = 0x02, /// Substitute types involving opaque type archetypes. - SubstituteOpaqueArchetypes = 0x04, - /// Force substitution of opened archetypes. Normally -- without these flag -- - /// opened archetype conformances are not substituted. - ForceSubstituteOpenedExistentials = 0x08, + SubstituteOpaqueArchetypes = 0x04 }; /// Options for performing substitutions into a type. @@ -384,7 +381,7 @@ SourceLoc extractNearestSourceLoc(Type ty); class CanType : public Type { bool isActuallyCanonicalOrNull() const; - static bool isReferenceTypeImpl(CanType type, GenericSignatureImpl *sig, + static bool isReferenceTypeImpl(CanType type, const GenericSignatureImpl *sig, bool functionsCount); static bool isExistentialTypeImpl(CanType type); static bool isAnyExistentialTypeImpl(CanType type); @@ -436,7 +433,7 @@ class CanType : public Type { /// - existentials with class or class protocol bounds /// But not: /// - function types - bool allowsOwnership(GenericSignatureImpl *sig) const { + bool allowsOwnership(const GenericSignatureImpl *sig) const { return isReferenceTypeImpl(*this, sig, /*functions count*/ false); } diff --git a/include/swift/AST/TypeCheckRequests.h b/include/swift/AST/TypeCheckRequests.h index c2eadb74e3885..3d96d17924add 100644 --- a/include/swift/AST/TypeCheckRequests.h +++ b/include/swift/AST/TypeCheckRequests.h @@ -1114,7 +1114,7 @@ void simple_display(llvm::raw_ostream &out, AncestryFlags value); class AbstractGenericSignatureRequest : public SimpleRequest, SmallVector), RequestFlags::Cached> { @@ -1127,7 +1127,7 @@ class AbstractGenericSignatureRequest : // Evaluation. GenericSignature evaluate(Evaluator &evaluator, - GenericSignatureImpl *baseSignature, + const GenericSignatureImpl *baseSignature, SmallVector addedParameters, SmallVector addedRequirements) const; @@ -1144,7 +1144,7 @@ class AbstractGenericSignatureRequest : class InferredGenericSignatureRequest : public SimpleRequest, SmallVector, @@ -1160,7 +1160,7 @@ class InferredGenericSignatureRequest : GenericSignature evaluate(Evaluator &evaluator, ModuleDecl *module, - GenericSignatureImpl *baseSignature, + const GenericSignatureImpl *baseSignature, GenericParamSource paramSource, SmallVector addedRequirements, SmallVector inferenceSources, @@ -2259,7 +2259,8 @@ void simple_display(llvm::raw_ostream &out, ConformanceLookupKind kind); /// must also be reported so it can be checked as well. class LookupAllConformancesInContextRequest : public SimpleRequest { @@ -2271,7 +2272,7 @@ class LookupAllConformancesInContextRequest // Evaluation. ProtocolConformanceLookupResult - evaluate(Evaluator &evaluator, const DeclContext *DC) const; + evaluate(Evaluator &evaluator, const IterableDeclContext *IDC) const; public: // Incremental dependencies @@ -2429,6 +2430,32 @@ class ResolveTypeRequest void simple_display(llvm::raw_ostream &out, const TypeResolution *resolution); SourceLoc extractNearestSourceLoc(const TypeRepr *repr); +/// Checks to see if any of the imports in a module use `@_implementationOnly` +/// in one file and not in another. +/// +/// Like redeclaration checking, but for imports. +/// +/// This is a request purely to ensure that we don't need to perform the same +/// checking for each file we resolve imports for. +/// FIXME: Once import resolution operates at module-level, this checking can +/// integrated into it. +class CheckInconsistentImplementationOnlyImportsRequest + : public SimpleRequest { +public: + using SimpleRequest::SimpleRequest; + +private: + friend SimpleRequest; + + evaluator::SideEffect evaluate(Evaluator &evaluator, ModuleDecl *mod) const; + +public: + // Cached. + bool isCached() const { return true; } +}; + // Allow AnyValue to compare two Type values, even though Type doesn't // support ==. template<> diff --git a/include/swift/AST/TypeCheckerTypeIDZone.def b/include/swift/AST/TypeCheckerTypeIDZone.def index 1a6aff564881b..f69a109bf4693 100644 --- a/include/swift/AST/TypeCheckerTypeIDZone.def +++ b/include/swift/AST/TypeCheckerTypeIDZone.def @@ -16,7 +16,7 @@ //===----------------------------------------------------------------------===// SWIFT_REQUEST(TypeChecker, AbstractGenericSignatureRequest, - GenericSignature (GenericSignatureImpl *, + GenericSignature (const GenericSignatureImpl *, SmallVector, SmallVector), Cached, NoLocationInfo) @@ -29,6 +29,8 @@ SWIFT_REQUEST(TypeChecker, AttachedPropertyWrappersRequest, NoLocationInfo) SWIFT_REQUEST(TypeChecker, CallerSideDefaultArgExprRequest, Expr *(DefaultArgumentExpr *), SeparatelyCached, NoLocationInfo) +SWIFT_REQUEST(TypeChecker, CheckInconsistentImplementationOnlyImportsRequest, + evaluator::SideEffect(ModuleDecl *), Cached, NoLocationInfo) SWIFT_REQUEST(TypeChecker, CheckRedeclarationRequest, evaluator::SideEffect(ValueDecl *), Uncached, NoLocationInfo) @@ -92,7 +94,7 @@ SWIFT_REQUEST(TypeChecker, HasDynamicCallableAttributeRequest, SWIFT_REQUEST(TypeChecker, HasImplementationOnlyImportsRequest, bool(SourceFile *), Cached, NoLocationInfo) SWIFT_REQUEST(TypeChecker, InferredGenericSignatureRequest, - GenericSignature (ModuleDecl *, GenericSignatureImpl *, + GenericSignature (ModuleDecl *, const GenericSignatureImpl *, GenericParamSource, SmallVector, SmallVector, bool), @@ -256,7 +258,7 @@ SWIFT_REQUEST(TypeChecker, ScopedImportLookupRequest, SWIFT_REQUEST(TypeChecker, ClosureHasExplicitResultRequest, bool(ClosureExpr *), Cached, NoLocationInfo) SWIFT_REQUEST(TypeChecker, LookupAllConformancesInContextRequest, - ProtocolConformanceLookupResult(const DeclContext *), + ProtocolConformanceLookupResult(const IterableDeclContext *), Uncached, NoLocationInfo) SWIFT_REQUEST(TypeChecker, SimpleDidSetRequest, bool(AccessorDecl *), Cached, NoLocationInfo) diff --git a/include/swift/AST/Types.h b/include/swift/AST/Types.h index f5ac2f5ada0c5..0bf41995878ab 100644 --- a/include/swift/AST/Types.h +++ b/include/swift/AST/Types.h @@ -558,7 +558,7 @@ class alignas(1 << TypeAlignInBits) TypeBase { /// allowsOwnership() - Are variables of this type permitted to have /// ownership attributes? - bool allowsOwnership(GenericSignatureImpl *sig = nullptr); + bool allowsOwnership(const GenericSignatureImpl *sig = nullptr); /// Determine whether this type involves a type variable. bool hasTypeVariable() const { @@ -2696,10 +2696,10 @@ enum class FunctionTypeRepresentation : uint8_t { /// A "thin" function that needs no context. Thin, - /// A C function pointer, which is thin and also uses the C calling - /// convention. + /// A C function pointer (or reference), which is thin and also uses the C + /// calling convention. CFunctionPointer, - + /// The value of the greatest AST function representation. Last = CFunctionPointer, }; @@ -2980,8 +2980,8 @@ class AnyFunctionType : public TypeBase { // We preserve a full clang::Type *, not a clang::FunctionType * as: // 1. We need to keep sugar in case we need to present an error to the user. // 2. The actual type being stored is [ignoring sugar] either a - // clang::PointerType or a clang::BlockPointerType which points to a - // clang::FunctionType. + // clang::PointerType, a clang::BlockPointerType, or a + // clang::ReferenceType which points to a clang::FunctionType. const clang::Type *ClangFunctionType; bool empty() const { return !ClangFunctionType; } diff --git a/include/swift/Basic/LLVM.h b/include/swift/Basic/LLVM.h index e3876ae81d59c..c88b322b0ecd7 100644 --- a/include/swift/Basic/LLVM.h +++ b/include/swift/Basic/LLVM.h @@ -34,14 +34,20 @@ namespace llvm { class Twine; template class SmallPtrSetImpl; template class SmallPtrSet; +#if !defined(swiftCore_EXPORTS) template class SmallVectorImpl; +#endif template class SmallVector; template class SmallString; template class SmallSetVector; +#if !defined(swiftCore_EXPORTS) template class ArrayRef; template class MutableArrayRef; +#endif template class TinyPtrVector; +#if !defined(swiftCore_EXPORTS) template class Optional; +#endif template class PointerUnion; template class iterator_range; class SmallBitVector; @@ -50,7 +56,9 @@ namespace llvm { class raw_ostream; class APInt; class APFloat; +#if !defined(swiftCore_EXPORTS) template class function_ref; +#endif } // end namespace llvm @@ -63,11 +71,15 @@ namespace swift { using llvm::cast_or_null; // Containers. +#if !defined(swiftCore_EXPORTS) using llvm::ArrayRef; - using llvm::iterator_range; using llvm::MutableArrayRef; +#endif + using llvm::iterator_range; using llvm::None; +#if !defined(swiftCore_EXPORTS) using llvm::Optional; +#endif using llvm::PointerUnion; using llvm::SmallBitVector; using llvm::SmallPtrSet; @@ -75,7 +87,9 @@ namespace swift { using llvm::SmallSetVector; using llvm::SmallString; using llvm::SmallVector; +#if !defined(swiftCore_EXPORTS) using llvm::SmallVectorImpl; +#endif using llvm::StringLiteral; using llvm::StringRef; using llvm::TinyPtrVector; @@ -84,7 +98,9 @@ namespace swift { // Other common classes. using llvm::APFloat; using llvm::APInt; +#if !defined(swiftCore_EXPORTS) using llvm::function_ref; +#endif using llvm::NoneType; using llvm::raw_ostream; } // end namespace swift diff --git a/include/swift/Basic/LangOptions.h b/include/swift/Basic/LangOptions.h index 0355ddfb52b19..1da784b10a5c6 100644 --- a/include/swift/Basic/LangOptions.h +++ b/include/swift/Basic/LangOptions.h @@ -489,11 +489,7 @@ namespace swift { /// 4.2 GHz Intel Core i7. /// (It's arbitrary, but will keep the compiler from taking too much time.) unsigned SwitchCheckingInvocationThreshold = 200000; - - /// Whether to delay checking that benefits from having the entire - /// module parsed, e.g., Objective-C method override checking. - bool DelayWholeModuleChecking = false; - + /// If true, the time it takes to type-check each function will be dumped /// to llvm::errs(). bool DebugTimeFunctionBodies = false; @@ -502,11 +498,6 @@ namespace swift { /// dumped to llvm::errs(). bool DebugTimeExpressions = false; - /// Indicate that the type checker is checking code that will be - /// immediately executed. This will suppress certain warnings - /// when executing scripts. - bool InImmediateMode = false; - /// Indicate that the type checker should skip type-checking non-inlinable /// function bodies. bool SkipNonInlinableFunctionBodies = false; diff --git a/include/swift/Basic/Lazy.h b/include/swift/Basic/Lazy.h index 6c899abeeffd6..521227f4db231 100644 --- a/include/swift/Basic/Lazy.h +++ b/include/swift/Basic/Lazy.h @@ -24,6 +24,24 @@ #include "swift/Basic/Malloc.h" #include "swift/Basic/type_traits.h" +#if defined(__wasi__) +// Temporary single-threaded stub. Should be replaced with a thread-safe version +// as soon as the WASI SDK allows it. See https://bugs.swift.org/browse/SR-12766. +inline void wasi_call_once(int *flag, void *context, void (*func)(void *)) { + switch (*flag) { + case 0: + *flag = 1; + func(context); + return; + case 1: + return; + default: + assert(false && "wasi_call_once got invalid flag"); + abort(); + } +} +#endif + namespace swift { #ifdef __APPLE__ @@ -38,6 +56,10 @@ namespace swift { using OnceToken_t = unsigned long; # define SWIFT_ONCE_F(TOKEN, FUNC, CONTEXT) \ _swift_once_f(&TOKEN, CONTEXT, FUNC) +#elif defined(__wasi__) + using OnceToken_t = int; +# define SWIFT_ONCE_F(TOKEN, FUNC, CONTEXT) \ + ::wasi_call_once(&TOKEN, CONTEXT, FUNC) #else using OnceToken_t = std::once_flag; # define SWIFT_ONCE_F(TOKEN, FUNC, CONTEXT) \ diff --git a/include/swift/Basic/STLExtras.h b/include/swift/Basic/STLExtras.h index 14d48f5bafbe5..fa06044937165 100644 --- a/include/swift/Basic/STLExtras.h +++ b/include/swift/Basic/STLExtras.h @@ -71,6 +71,7 @@ struct function_traits { } // end namespace swift +#if !defined(swiftCore_EXPORTS) namespace llvm { /// @{ @@ -109,6 +110,7 @@ inline void interleave(const Container &c, UnaryFunctor each_fn, /// @} } // end namespace llvm +#endif namespace swift { @@ -526,19 +528,18 @@ makeOptionalTransformRange(Range range, OptionalTransform op) { /// the result in an optional to indicate success or failure. template struct DowncastAsOptional { - template + template auto operator()(Superclass &value) const - -> Optional(value))> { + -> llvm::Optional(value))> { if (auto result = llvm::dyn_cast(value)) return result; return None; } - template + template auto operator()(const Superclass &value) const - -> Optional(value))> - { + -> llvm::Optional(value))> { if (auto result = llvm::dyn_cast(value)) return result; diff --git a/include/swift/ClangImporter/ClangImporter.h b/include/swift/ClangImporter/ClangImporter.h index 59d26e702bfda..6cce6c7cf386a 100644 --- a/include/swift/ClangImporter/ClangImporter.h +++ b/include/swift/ClangImporter/ClangImporter.h @@ -371,7 +371,7 @@ class ClangImporter final : public ClangModuleLoader { Optional getModuleDependencies( StringRef moduleName, ModuleDependenciesCache &cache, - SubASTContextDelegate &delegate) override; + InterfaceSubContextDelegate &delegate) override; /// Add dependency information for the bridging header. /// diff --git a/include/swift/Demangling/Demangler.h b/include/swift/Demangling/Demangler.h index 1ca202f0585bf..30da4d715c917 100644 --- a/include/swift/Demangling/Demangler.h +++ b/include/swift/Demangling/Demangler.h @@ -565,8 +565,7 @@ class Demangler : public NodeFactory { NodePointer demangleValueWitness(); NodePointer demangleTypeMangling(); - NodePointer demangleSymbolicReference(unsigned char rawKind, - const void *at); + NodePointer demangleSymbolicReference(unsigned char rawKind); bool demangleBoundGenerics(Vector &TypeListList, NodePointer &RetroactiveConformances); diff --git a/include/swift/Demangling/TypeDecoder.h b/include/swift/Demangling/TypeDecoder.h index 9fcdadd41e0de..49ccc8534a2df 100644 --- a/include/swift/Demangling/TypeDecoder.h +++ b/include/swift/Demangling/TypeDecoder.h @@ -23,6 +23,7 @@ #include "swift/Basic/LLVM.h" #include "swift/Runtime/Unreachable.h" #include "swift/Strings.h" +#include "llvm/ADT/ArrayRef.h" #include namespace swift { @@ -93,7 +94,7 @@ enum class ImplParameterDifferentiability { NotDifferentiable }; -static inline Optional +static inline llvm::Optional getDifferentiabilityFromString(StringRef string) { if (string.empty()) return ImplParameterDifferentiability::DifferentiableOrNotApplicable; @@ -114,7 +115,7 @@ class ImplFunctionParam { using ConventionType = ImplParameterConvention; using DifferentiabilityType = ImplParameterDifferentiability; - static Optional + static llvm::Optional getConventionFromString(StringRef conventionString) { if (conventionString == "@in") return ConventionType::Indirect_In; @@ -167,7 +168,7 @@ class ImplFunctionResult { public: using ConventionType = ImplResultConvention; - static Optional + static llvm::Optional getConventionFromString(StringRef conventionString) { if (conventionString == "@out") return ConventionType::Indirect; @@ -267,8 +268,8 @@ class ImplFunctionTypeFlags { #if SWIFT_OBJC_INTEROP /// For a mangled node that refers to an Objective-C class or protocol, /// return the class or protocol name. -static inline Optional getObjCClassOrProtocolName( - NodePointer node) { +static inline llvm::Optional +getObjCClassOrProtocolName(NodePointer node) { if (node->getKind() != Demangle::Node::Kind::Class && node->getKind() != Demangle::Node::Kind::Protocol) return None; @@ -434,7 +435,7 @@ class TypeDecoder { case NodeKind::Metatype: case NodeKind::ExistentialMetatype: { unsigned i = 0; - Optional repr; + llvm::Optional repr; // Handle lowered metatypes in a hackish way. If the representation // was not thin, force the resulting typeref to have a non-empty @@ -660,7 +661,7 @@ class TypeDecoder { } } - Optional> errorResult; + llvm::Optional> errorResult; switch (errorResults.size()) { case 0: break; @@ -887,7 +888,7 @@ class TypeDecoder { } } genericArgsLevels.push_back(genericArgsBuf.size()); - std::vector> genericArgs; + std::vector> genericArgs; for (unsigned i = 0; i < genericArgsLevels.size() - 1; ++i) { auto start = genericArgsLevels[i], end = genericArgsLevels[i+1]; genericArgs.emplace_back(genericArgsBuf.data() + start, @@ -906,7 +907,7 @@ class TypeDecoder { private: template bool decodeImplFunctionPart(Demangle::NodePointer node, - SmallVectorImpl &results) { + llvm::SmallVectorImpl &results) { if (node->getNumChildren() != 2) return true; @@ -915,7 +916,7 @@ class TypeDecoder { return true; StringRef conventionString = node->getChild(0)->getText(); - Optional convention = + llvm::Optional convention = T::getConventionFromString(conventionString); if (!convention) return true; @@ -929,7 +930,7 @@ class TypeDecoder { bool decodeImplFunctionParam( Demangle::NodePointer node, - SmallVectorImpl> &results) { + llvm::SmallVectorImpl> &results) { // Children: `convention, differentiability?, type` if (node->getNumChildren() != 2 && node->getNumChildren() != 3) return true; @@ -1031,7 +1032,7 @@ class TypeDecoder { bool decodeMangledFunctionInputType( Demangle::NodePointer node, - SmallVectorImpl> ¶ms, + llvm::SmallVectorImpl> ¶ms, bool &hasParamFlags) { // Look through a couple of sugar nodes. if (node->getKind() == NodeKind::Type || @@ -1082,8 +1083,8 @@ class TypeDecoder { return true; }; - auto decodeParam = [&](NodePointer paramNode) - -> Optional> { + auto decodeParam = + [&](NodePointer paramNode) -> llvm::Optional> { if (paramNode->getKind() != NodeKind::TupleElement) return None; diff --git a/include/swift/Frontend/Frontend.h b/include/swift/Frontend/Frontend.h index 09dc478b67bf6..33d62e639499f 100644 --- a/include/swift/Frontend/Frontend.h +++ b/include/swift/Frontend/Frontend.h @@ -653,10 +653,11 @@ class CompilerInstance { /// Retrieve a description of which modules should be implicitly imported. ImplicitImportInfo getImplicitImportInfo() const; - void performSemaUpTo(SourceFile::ASTStage_t LimitStage); + void performSemaUpTo(SourceFile::ASTStage_t LimitStage, + SourceFile::ParsingOptions POpts = {}); /// Return true if had load error - bool parsePartialModulesAndInputFiles(); + bool loadPartialModulesAndImplicitImports(); void forEachFileToTypeCheck(llvm::function_ref fn); diff --git a/include/swift/Frontend/FrontendInputsAndOutputs.h b/include/swift/Frontend/FrontendInputsAndOutputs.h index b1bbeb0129457..ccc699e211e30 100644 --- a/include/swift/Frontend/FrontendInputsAndOutputs.h +++ b/include/swift/Frontend/FrontendInputsAndOutputs.h @@ -173,7 +173,7 @@ class FrontendInputsAndOutputs { private: friend class ArgsToFrontendOptionsConverter; - friend class ModuleInterfaceBuilder; + friend struct InterfaceSubContextDelegateImpl; void setMainAndSupplementaryOutputs( ArrayRef outputFiles, ArrayRef supplementaryOutputs); diff --git a/include/swift/Frontend/ModuleInterfaceLoader.h b/include/swift/Frontend/ModuleInterfaceLoader.h index b734c73dd71e4..2a793922a1605 100644 --- a/include/swift/Frontend/ModuleInterfaceLoader.h +++ b/include/swift/Frontend/ModuleInterfaceLoader.h @@ -108,6 +108,7 @@ #define SWIFT_FRONTEND_MODULEINTERFACELOADER_H #include "swift/Basic/LLVM.h" +#include "swift/Frontend/Frontend.h" #include "swift/Frontend/ModuleInterfaceSupport.h" #include "swift/Serialization/SerializedModuleLoader.h" #include "llvm/Support/StringSaver.h" @@ -203,18 +204,67 @@ class ModuleInterfaceLoader : public SerializedModuleLoaderBase { std::string getModuleCachePathFromClang(const clang::CompilerInstance &Instance); -bool extractSwiftInterfaceVersionAndArgs(SourceManager &SM, - DiagnosticEngine &Diags, - StringRef InterfacePath, - version::Version &Vers, - StringRef &CompilerVersion, - llvm::StringSaver &SubArgSaver, - SmallVectorImpl &SubArgs, - SourceLoc diagnosticLoc = SourceLoc()); - -void inheritOptionsForBuildingInterface(CompilerInvocation &Invok, - const SearchPathOptions &SearchPathOpts, - const LangOptions &LangOpts); +struct InterfaceSubContextDelegateImpl: InterfaceSubContextDelegate { +private: + SourceManager &SM; + DiagnosticEngine &Diags; + llvm::BumpPtrAllocator Allocator; + llvm::StringSaver ArgSaver; + std::vector GenericArgs; + CompilerInvocation subInvocation; + std::vector ModuleOutputPaths; + + template + InFlightDiagnostic diagnose(StringRef interfacePath, + SourceLoc diagnosticLoc, + Diag ID, + typename detail::PassArgument::type... Args) { + SourceLoc loc = diagnosticLoc; + if (diagnosticLoc.isInvalid()) { + // Diagnose this inside the interface file, if possible. + loc = SM.getLocFromExternalSource(interfacePath, 1, 1); + } + return Diags.diagnose(loc, ID, std::move(Args)...); + } + void inheritOptionsForBuildingInterface(const SearchPathOptions &SearchPathOpts, + const LangOptions &LangOpts); + bool extractSwiftInterfaceVersionAndArgs(SmallVectorImpl &SubArgs, + std::string &CompilerVersion, + StringRef interfacePath, + SourceLoc diagnosticLoc); +public: + InterfaceSubContextDelegateImpl(SourceManager &SM, + DiagnosticEngine &Diags, + const SearchPathOptions &searchPathOpts, + const LangOptions &langOpts, + ClangModuleLoader *clangImporter, + bool buildModuleCacheDirIfAbsent, + StringRef moduleCachePath, + StringRef prebuiltCachePath, + bool serializeDependencyHashes, + bool trackSystemDependencies, + bool remarkOnRebuildFromInterface, + bool disableInterfaceFileLock); + bool runInSubContext(StringRef moduleName, + StringRef interfacePath, + StringRef outputPath, + SourceLoc diagLoc, + llvm::function_ref, StringRef)> action) override; + bool runInSubCompilerInstance(StringRef moduleName, + StringRef interfacePath, + StringRef outputPath, + SourceLoc diagLoc, + llvm::function_ref action) override; + + ~InterfaceSubContextDelegateImpl() = default; + + /// includes a hash of relevant key data. + StringRef computeCachedOutputPath(StringRef moduleName, + StringRef UseInterfacePath, + llvm::SmallString<256> &OutPath, + StringRef &CacheHash); + std::string getCacheHash(StringRef useInterfacePath); +}; } #endif diff --git a/include/swift/IDE/CodeCompletion.h b/include/swift/IDE/CodeCompletion.h index 142fad284ef82..4324cc9e920b8 100644 --- a/include/swift/IDE/CodeCompletion.h +++ b/include/swift/IDE/CodeCompletion.h @@ -182,6 +182,10 @@ class CodeCompletionStringChunk { /// This chunk should not be inserted into the editor buffer. TypeAnnotation, + /// Structured group version of 'TypeAnnotation'. + /// This grouped chunks should not be inserted into the editor buffer. + TypeAnnotationBegin, + /// A brace statement -- left brace and right brace. The preferred /// position to put the cursor after the completion result is inserted /// into the editor buffer is between the braces. @@ -195,7 +199,8 @@ class CodeCompletionStringChunk { return Kind == ChunkKind::CallParameterBegin || Kind == ChunkKind::GenericParameterBegin || Kind == ChunkKind::OptionalBegin || - Kind == ChunkKind::CallParameterTypeBegin; + Kind == ChunkKind::CallParameterTypeBegin || + Kind == ChunkKind::TypeAnnotationBegin; } static bool chunkHasText(ChunkKind Kind) { @@ -884,7 +889,7 @@ class CodeCompletionContext { : Cache(Cache) {} void setAnnotateResult(bool flag) { CurrentResults.annotateResult = flag; } - bool getAnnnoateResult() { return CurrentResults.annotateResult; } + bool getAnnotateResult() { return CurrentResults.annotateResult; } /// Allocate a string owned by the code completion context. StringRef copyString(StringRef Str); diff --git a/include/swift/IDE/CodeCompletionResultPrinter.h b/include/swift/IDE/CodeCompletionResultPrinter.h index 5d936503effde..717d6282a4309 100644 --- a/include/swift/IDE/CodeCompletionResultPrinter.h +++ b/include/swift/IDE/CodeCompletionResultPrinter.h @@ -27,6 +27,13 @@ void printCodeCompletionResultDescription(const CodeCompletionResult &Result, void printCodeCompletionResultDescriptionAnnotated( const CodeCompletionResult &Result, llvm::raw_ostream &OS, bool leadingPunctuation); + +void printCodeCompletionResultTypeName( + const CodeCompletionResult &Result, llvm::raw_ostream &OS); + +void printCodeCompletionResultTypeNameAnnotated( + const CodeCompletionResult &Result, llvm::raw_ostream &OS); + } // namespace ide } // namespace swift diff --git a/include/swift/Parse/Parser.h b/include/swift/Parse/Parser.h index aa9f3220c5efb..0cb741076e9b4 100644 --- a/include/swift/Parse/Parser.h +++ b/include/swift/Parse/Parser.h @@ -1359,8 +1359,7 @@ class Parser { ParserResult parsePatternTuple(); ParserResult - parseOptionalPatternTypeAnnotation(ParserResult P, - bool isOptional); + parseOptionalPatternTypeAnnotation(ParserResult P); ParserResult parseMatchingPattern(bool isExprBasic); ParserResult parseMatchingPatternAsLetOrVar(bool isLet, SourceLoc VarLoc, diff --git a/include/swift/Reflection/TypeRef.h b/include/swift/Reflection/TypeRef.h index aacc09f2bb00a..505a1acbc6aba 100644 --- a/include/swift/Reflection/TypeRef.h +++ b/include/swift/Reflection/TypeRef.h @@ -346,11 +346,11 @@ class OpaqueArchetypeTypeRef final : public TypeRef { // Each ArrayRef in ArgumentLists references into the buffer owned by this // vector, which must not be modified after construction. std::vector AllArgumentsBuf; - std::vector> ArgumentLists; - - static TypeRefID Profile(StringRef idString, - StringRef description, unsigned ordinal, - ArrayRef> argumentLists) { + std::vector> ArgumentLists; + + static TypeRefID + Profile(StringRef idString, StringRef description, unsigned ordinal, + llvm::ArrayRef> argumentLists) { TypeRefID ID; ID.addString(idString.str()); ID.addInteger(ordinal); @@ -362,14 +362,13 @@ class OpaqueArchetypeTypeRef final : public TypeRef { return ID; } - + public: - OpaqueArchetypeTypeRef(StringRef id, - StringRef description, unsigned ordinal, - ArrayRef> argumentLists) - : TypeRef(TypeRefKind::OpaqueArchetype), - ID(id), Description(description), Ordinal(ordinal) - { + OpaqueArchetypeTypeRef( + StringRef id, StringRef description, unsigned ordinal, + llvm::ArrayRef> argumentLists) + : TypeRef(TypeRefKind::OpaqueArchetype), ID(id), Description(description), + Ordinal(ordinal) { std::vector argumentListLengths; for (auto argList : argumentLists) { @@ -379,25 +378,24 @@ class OpaqueArchetypeTypeRef final : public TypeRef { } auto *data = AllArgumentsBuf.data(); for (auto length : argumentListLengths) { - ArgumentLists.push_back(ArrayRef(data, length)); + ArgumentLists.push_back(llvm::ArrayRef(data, length)); data += length; } assert(data == AllArgumentsBuf.data() + AllArgumentsBuf.size()); } - + template - static const OpaqueArchetypeTypeRef *create(Allocator &A, - StringRef id, StringRef description, - unsigned ordinal, - ArrayRef> arguments) { + static const OpaqueArchetypeTypeRef * + create(Allocator &A, StringRef id, StringRef description, unsigned ordinal, + llvm::ArrayRef> arguments) { FIND_OR_CREATE_TYPEREF(A, OpaqueArchetypeTypeRef, id, description, ordinal, arguments); } - ArrayRef> getArgumentLists() const { + llvm::ArrayRef> getArgumentLists() const { return ArgumentLists; } - + unsigned getOrdinal() const { return Ordinal; } diff --git a/include/swift/Reflection/TypeRefBuilder.h b/include/swift/Reflection/TypeRefBuilder.h index 1e098d937a107..9d2b743f1be1d 100644 --- a/include/swift/Reflection/TypeRefBuilder.h +++ b/include/swift/Reflection/TypeRefBuilder.h @@ -248,8 +248,9 @@ class TypeRefBuilder { public: using BuiltType = const TypeRef *; - using BuiltTypeDecl = Optional; - using BuiltProtocolDecl = Optional>; + using BuiltTypeDecl = llvm::Optional; + using BuiltProtocolDecl = + llvm::Optional>; TypeRefBuilder(const TypeRefBuilder &other) = delete; TypeRefBuilder &operator=(const TypeRefBuilder &other) = delete; @@ -295,8 +296,7 @@ class TypeRefBuilder { return BuiltinTypeRef::create(*this, mangledName); } - Optional - createTypeDecl(Node *node, bool &typeAlias) { + llvm::Optional createTypeDecl(Node *node, bool &typeAlias) { return Demangle::mangleNode(node); } @@ -310,25 +310,25 @@ class TypeRefBuilder { return std::make_pair(name, true); } - Optional createTypeDecl(std::string &&mangledName, - bool &typeAlias) { + llvm::Optional createTypeDecl(std::string &&mangledName, + bool &typeAlias) { return std::move(mangledName); } - - const NominalTypeRef *createNominalType( - const Optional &mangledName) { + + const NominalTypeRef * + createNominalType(const llvm::Optional &mangledName) { return NominalTypeRef::create(*this, *mangledName, nullptr); } - const NominalTypeRef *createNominalType( - const Optional &mangledName, - const TypeRef *parent) { + const NominalTypeRef * + createNominalType(const llvm::Optional &mangledName, + const TypeRef *parent) { return NominalTypeRef::create(*this, *mangledName, parent); } - const TypeRef *createTypeAliasType( - const Optional &mangledName, - const TypeRef *parent) { + const TypeRef * + createTypeAliasType(const llvm::Optional &mangledName, + const TypeRef *parent) { // TypeRefs don't contain sugared types return nullptr; } @@ -354,21 +354,21 @@ class TypeRefBuilder { } const BoundGenericTypeRef * - createBoundGenericType(const Optional &mangledName, + createBoundGenericType(const llvm::Optional &mangledName, const std::vector &args) { return BoundGenericTypeRef::create(*this, *mangledName, args, nullptr); } const BoundGenericTypeRef * - createBoundGenericType(const Optional &mangledName, - ArrayRef args, + createBoundGenericType(const llvm::Optional &mangledName, + llvm::ArrayRef args, const TypeRef *parent) { return BoundGenericTypeRef::create(*this, *mangledName, args, parent); } - + const TypeRef * resolveOpaqueType(NodePointer opaqueDescriptor, - ArrayRef> genericArgs, + llvm::ArrayRef> genericArgs, unsigned ordinal) { // TODO: Produce a type ref for the opaque type if the underlying type isn't // available. @@ -403,26 +403,25 @@ class TypeRefBuilder { genericArgs); } - const TupleTypeRef * - createTupleType(ArrayRef elements, - std::string &&labels, bool isVariadic) { + const TupleTypeRef *createTupleType(llvm::ArrayRef elements, + std::string &&labels, bool isVariadic) { // FIXME: Add uniqueness checks in TupleTypeRef::Profile and // unittests/Reflection/TypeRef.cpp if using labels for identity. return TupleTypeRef::create(*this, elements, isVariadic); } const FunctionTypeRef *createFunctionType( - ArrayRef> params, + llvm::ArrayRef> params, const TypeRef *result, FunctionTypeFlags flags) { return FunctionTypeRef::create(*this, params, result, flags); } const FunctionTypeRef *createImplFunctionType( - Demangle::ImplParameterConvention calleeConvention, - ArrayRef> params, - ArrayRef> results, - Optional> errorResult, - ImplFunctionTypeFlags flags) { + Demangle::ImplParameterConvention calleeConvention, + llvm::ArrayRef> params, + llvm::ArrayRef> results, + llvm::Optional> errorResult, + ImplFunctionTypeFlags flags) { // Minimal support for lowered function types. These come up in // reflection as capture types. For the reflection library's // purposes, the only part that matters is the convention. @@ -451,9 +450,8 @@ class TypeRefBuilder { } const ProtocolCompositionTypeRef * - createProtocolCompositionType(ArrayRef protocols, - BuiltType superclass, - bool isClassBound) { + createProtocolCompositionType(llvm::ArrayRef protocols, + BuiltType superclass, bool isClassBound) { std::vector protocolRefs; for (const auto &protocol : protocols) { if (!protocol) @@ -469,14 +467,15 @@ class TypeRefBuilder { isClassBound); } - const ExistentialMetatypeTypeRef * - createExistentialMetatypeType(const TypeRef *instance, - Optional repr=None) { + const ExistentialMetatypeTypeRef *createExistentialMetatypeType( + const TypeRef *instance, + llvm::Optional repr = None) { return ExistentialMetatypeTypeRef::create(*this, instance); } - const MetatypeTypeRef *createMetatypeType(const TypeRef *instance, - Optional repr=None) { + const MetatypeTypeRef *createMetatypeType( + const TypeRef *instance, + llvm::Optional repr = None) { bool WasAbstract = (repr && *repr != ImplMetatypeRepresentation::Thin); return MetatypeTypeRef::create(*this, instance, WasAbstract); } @@ -530,7 +529,7 @@ class TypeRefBuilder { const ObjCClassTypeRef * createBoundGenericObjCClassType(const std::string &name, - ArrayRef args) { + llvm::ArrayRef args) { // Remote reflection just ignores generic arguments for Objective-C // lightweight generic types, since they don't affect layout. return createObjCClassType(name); diff --git a/include/swift/Remote/MetadataReader.h b/include/swift/Remote/MetadataReader.h index 1ea30c644c835..171ebd491cfe2 100644 --- a/include/swift/Remote/MetadataReader.h +++ b/include/swift/Remote/MetadataReader.h @@ -466,7 +466,7 @@ class MetadataReader { } /// Get the remote process's swift_isaMask. - Optional readIsaMask() { + llvm::Optional readIsaMask() { auto encoding = getIsaEncoding(); if (encoding != IsaEncodingKind::Masked) { // Still return success if there's no isa encoding at all. @@ -480,7 +480,7 @@ class MetadataReader { } /// Given a remote pointer to metadata, attempt to discover its MetadataKind. - Optional + llvm::Optional readKindFromMetadata(StoredPointer MetadataAddress) { auto meta = readMetadata(MetadataAddress); if (!meta) return None; @@ -501,8 +501,8 @@ class MetadataReader { /// Given a remote pointer to class metadata, attempt to discover its class /// instance size and whether fields should use the resilient layout strategy. - Optional - readInstanceStartAndAlignmentFromClassMetadata(StoredPointer MetadataAddress) { + llvm::Optional readInstanceStartAndAlignmentFromClassMetadata( + StoredPointer MetadataAddress) { auto meta = readMetadata(MetadataAddress); if (!meta || meta->getKind() != MetadataKind::Class) return None; @@ -527,7 +527,7 @@ class MetadataReader { /// Given a pointer to the metadata, attempt to read the value /// witness table. Note that it's not safe to access any non-mandatory /// members of the value witness table, like extra inhabitants or enum members. - Optional> + llvm::Optional> readValueWitnessTable(StoredPointer MetadataAddress) { // The value witness table pointer is at offset -1 from the metadata // pointer, that is, the pointer-sized word immediately before the @@ -548,7 +548,7 @@ class MetadataReader { /// pointer to its metadata address, its value address, and whether this /// is a toll-free-bridged NSError or an actual Error existential wrapper /// around a native Swift value. - Optional + llvm::Optional readMetadataAndValueErrorExistential(RemoteAddress ExistentialAddress) { // An pointer to an error existential is always an heap object. auto MetadataAddress = @@ -629,7 +629,7 @@ class MetadataReader { /// Given a known-opaque existential, attemp to discover the pointer to its /// metadata address and its value. - Optional + llvm::Optional readMetadataAndValueOpaqueExistential(RemoteAddress ExistentialAddress) { // OpaqueExistentialContainer is the layout of an opaque existential. // `Type` is the pointer to the metadata. @@ -1181,10 +1181,11 @@ class MetadataReader { } /// Read the isa pointer of an Object-C tagged pointer value. - Optional + llvm::Optional readMetadataFromTaggedPointer(StoredPointer objectAddress) { - auto readArrayElement = [&](StoredPointer base, StoredPointer tag) - -> Optional { + auto readArrayElement = + [&](StoredPointer base, + StoredPointer tag) -> llvm::Optional { StoredPointer addr = base + tag * sizeof(StoredPointer); StoredPointer isa; if (!Reader->readInteger(RemoteAddress(addr), &isa)) @@ -1210,7 +1211,7 @@ class MetadataReader { /// Read the isa pointer of a class or closure context instance and apply /// the isa mask. - Optional + llvm::Optional readMetadataFromInstance(StoredPointer objectAddress) { if (isTaggedPointer(objectAddress)) return readMetadataFromTaggedPointer(objectAddress); @@ -1281,9 +1282,8 @@ class MetadataReader { /// /// The offset is in units of words, from the start of the class's /// metadata. - Optional - readGenericArgsOffset(MetadataRef metadata, - ContextDescriptorRef descriptor) { + llvm::Optional + readGenericArgsOffset(MetadataRef metadata, ContextDescriptorRef descriptor) { switch (descriptor->getKind()) { case ContextDescriptorKind::Class: { auto type = cast>(descriptor); @@ -1319,7 +1319,7 @@ class MetadataReader { using ClassMetadataBounds = TargetClassMetadataBounds; // This follows computeMetadataBoundsForSuperclass. - Optional + llvm::Optional readMetadataBoundsOfSuperclass(ContextDescriptorRef subclassRef) { auto subclass = cast>(subclassRef); if (!subclass->hasResilientSuperclass()) @@ -1332,34 +1332,34 @@ class MetadataReader { } return forTypeReference( - subclass->getResilientSuperclassReferenceKind(), rawSuperclass, - [&](ContextDescriptorRef superclass) - -> Optional { - if (!isa>(superclass)) + subclass->getResilientSuperclassReferenceKind(), rawSuperclass, + [&](ContextDescriptorRef superclass) + -> llvm::Optional { + if (!isa>(superclass)) + return None; + return readMetadataBoundsOfSuperclass(superclass); + }, + [&](MetadataRef metadata) -> llvm::Optional { + auto cls = dyn_cast>(metadata); + if (!cls) + return None; + + return cls->getClassBoundsAsSwiftSuperclass(); + }, + [](StoredPointer objcClassName) -> llvm::Optional { + // We have no ability to look up an ObjC class by name. + // FIXME: add a query for this; clients may have a way to do it. return None; - return readMetadataBoundsOfSuperclass(superclass); - }, - [&](MetadataRef metadata) -> Optional { - auto cls = dyn_cast>(metadata); - if (!cls) - return None; - - return cls->getClassBoundsAsSwiftSuperclass(); - }, - [](StoredPointer objcClassName) -> Optional { - // We have no ability to look up an ObjC class by name. - // FIXME: add a query for this; clients may have a way to do it. - return None; - }); + }); } template - Optional - forTypeReference(TypeReferenceKind refKind, StoredPointer ref, - const DescriptorFn &descriptorFn, - const MetadataFn &metadataFn, - const ClassNameFn &classNameFn) { + llvm::Optional forTypeReference(TypeReferenceKind refKind, + StoredPointer ref, + const DescriptorFn &descriptorFn, + const MetadataFn &metadataFn, + const ClassNameFn &classNameFn) { switch (refKind) { case TypeReferenceKind::IndirectTypeDescriptor: { StoredPointer descriptorAddress = 0; @@ -1399,7 +1399,7 @@ class MetadataReader { /// Read a single generic type argument from a bound generic type /// metadata. - Optional + llvm::Optional readGenericArgFromMetadata(StoredPointer metadata, unsigned index) { auto Meta = readMetadata(metadata); if (!Meta) @@ -1472,7 +1472,7 @@ class MetadataReader { } /// Given a remote pointer to class metadata, attempt to read its superclass. - Optional + llvm::Optional readOffsetToFirstCaptureFromMetadata(StoredPointer MetadataAddress) { auto meta = readMetadata(MetadataAddress); if (!meta || meta->getKind() != MetadataKind::HeapLocalVariable) @@ -1481,12 +1481,13 @@ class MetadataReader { auto heapMeta = cast>(meta); return heapMeta->OffsetToFirstCapture; } - - Optional readPointer(StoredPointer address) { + + llvm::Optional readPointer(StoredPointer address) { return Reader->readPointer(RemoteAddress(address), sizeof(StoredPointer)); } - - Optional readResolvedPointerValue(StoredPointer address) { + + llvm::Optional + readResolvedPointerValue(StoredPointer address) { if (auto pointer = readPointer(address)) { if (!pointer->isResolved()) return None; @@ -1494,7 +1495,7 @@ class MetadataReader { } return None; } - + template RemoteAbsolutePointer resolvePointerField(RemoteRef base, const U &field) { @@ -1504,7 +1505,7 @@ class MetadataReader { } /// Given a remote pointer to class metadata, attempt to read its superclass. - Optional + llvm::Optional readCaptureDescriptorFromMetadata(StoredPointer MetadataAddress) { auto meta = readMetadata(MetadataAddress); if (!meta || meta->getKind() != MetadataKind::HeapLocalVariable) @@ -1525,16 +1526,16 @@ class MetadataReader { RemoteRef base, const Field &field) { return (StoredPointer)base.resolveRelativeFieldData(field); } - - template - Optional resolveRelativeIndirectableField( - RemoteRef base, const Field &field) { + + template + llvm::Optional + resolveRelativeIndirectableField(RemoteRef base, const Field &field) { auto fieldRef = base.getField(field); int32_t offset; memcpy(&offset, fieldRef.getLocalBuffer(), sizeof(int32_t)); if (offset == 0) - return Optional(nullptr); + return llvm::Optional(nullptr); bool indirect = offset & 1; offset &= ~1u; @@ -1764,7 +1765,7 @@ class MetadataReader { /// Returns Optional(ParentContextDescriptorRef()) if there's no parent descriptor. /// Returns None if there was an error reading the parent descriptor. - Optional + llvm::Optional readParentContextDescriptor(ContextDescriptorRef base) { auto parentAddress = resolveRelativeIndirectableField(base, base->Parent); if (!parentAddress) @@ -1798,9 +1799,9 @@ class MetadataReader { } /// Read the name from a module, type, or protocol context descriptor. - Optional readContextDescriptorName( + llvm::Optional readContextDescriptorName( ContextDescriptorRef descriptor, - Optional> &importInfo) { + llvm::Optional> &importInfo) { std::string name; auto context = descriptor.getLocalBuffer(); @@ -1964,11 +1965,10 @@ class MetadataReader { /// If we have a context whose parent context is an anonymous context /// that provides the local/private name for the current context, /// produce a mangled node describing the name of \c context. - Demangle::NodePointer - adoptAnonymousContextName(ContextDescriptorRef contextRef, - Optional &parentContextRef, - Demangler &dem, - Demangle::NodePointer &outerNode) { + Demangle::NodePointer adoptAnonymousContextName( + ContextDescriptorRef contextRef, + llvm::Optional &parentContextRef, + Demangler &dem, Demangle::NodePointer &outerNode) { outerNode = nullptr; // Bail if there is no parent, or if the parent is in another image. @@ -2016,7 +2016,7 @@ class MetadataReader { return nullptr; // Read the name of the current context. - Optional> importInfo; + llvm::Optional> importInfo; auto contextName = readContextDescriptorName(contextRef, importInfo); if (!contextName) return nullptr; @@ -2133,7 +2133,7 @@ class MetadataReader { } Demangle::Node::Kind nodeKind; - Optional> importInfo; + llvm::Optional> importInfo; auto getContextName = [&]() -> bool { if (nameNode) diff --git a/include/swift/Runtime/Concurrent.h b/include/swift/Runtime/Concurrent.h index 003088c485fbf..4d2f040db8805 100644 --- a/include/swift/Runtime/Concurrent.h +++ b/include/swift/Runtime/Concurrent.h @@ -203,7 +203,7 @@ class ConcurrentMapBase : protected Allocator { // Deallocate the node. The static_cast here is required // because LLVM's allocator API is insane. - this->Deallocate(static_cast(node), allocSize); + this->Deallocate(static_cast(node), allocSize, alignof(Node)); } }; diff --git a/include/swift/Runtime/Numeric.h b/include/swift/Runtime/Numeric.h index 6d91e13040681..0d52e97d580fd 100644 --- a/include/swift/Runtime/Numeric.h +++ b/include/swift/Runtime/Numeric.h @@ -44,9 +44,9 @@ class IntegerLiteral { /// Return the chunks of data making up this value, arranged starting from /// the least-significant chunk. The value is sign-extended to fill the /// final chunk. - ArrayRef getData() const { - return ArrayRef(Data, - (Flags.getBitWidth() + BitsPerChunk - 1) / BitsPerChunk); + llvm::ArrayRef getData() const { + return llvm::makeArrayRef(Data, (Flags.getBitWidth() + BitsPerChunk - 1) / + BitsPerChunk); } /// The flags for this value. diff --git a/include/swift/SIL/SILBuilder.h b/include/swift/SIL/SILBuilder.h index cb1ab417683a3..26fab8b9acd85 100644 --- a/include/swift/SIL/SILBuilder.h +++ b/include/swift/SIL/SILBuilder.h @@ -1422,9 +1422,10 @@ class SILBuilder { } RefElementAddrInst *createRefElementAddr(SILLocation Loc, SILValue Operand, - VarDecl *Field, SILType ResultTy) { + VarDecl *Field, SILType ResultTy, + bool IsImmutable = false) { return insert(new (getModule()) RefElementAddrInst( - getSILDebugLocation(Loc), Operand, Field, ResultTy)); + getSILDebugLocation(Loc), Operand, Field, ResultTy, IsImmutable)); } RefElementAddrInst *createRefElementAddr(SILLocation Loc, SILValue Operand, VarDecl *Field) { @@ -1434,9 +1435,10 @@ class SILBuilder { } RefTailAddrInst *createRefTailAddr(SILLocation Loc, SILValue Ref, - SILType ResultTy) { + SILType ResultTy, + bool IsImmutable = false) { return insert(new (getModule()) RefTailAddrInst(getSILDebugLocation(Loc), - Ref, ResultTy)); + Ref, ResultTy, IsImmutable)); } DestructureStructInst *createDestructureStruct(SILLocation Loc, @@ -1722,6 +1724,17 @@ class SILBuilder { return insert(new (getModule()) IsUniqueInst(getSILDebugLocation(Loc), operand, Int1Ty)); } + BeginCOWMutationInst *createBeginCOWMutation(SILLocation Loc, + SILValue operand, bool isNative) { + auto Int1Ty = SILType::getBuiltinIntegerType(1, getASTContext()); + return insert(BeginCOWMutationInst::create(getSILDebugLocation(Loc), operand, + Int1Ty, getFunction(), isNative)); + } + EndCOWMutationInst *createEndCOWMutation(SILLocation Loc, SILValue operand, + bool keepUnique = false) { + return insert(new (getModule()) EndCOWMutationInst(getSILDebugLocation(Loc), + operand, keepUnique)); + } IsEscapingClosureInst *createIsEscapingClosure(SILLocation Loc, SILValue operand, unsigned VerificationType) { diff --git a/include/swift/SIL/SILCloner.h b/include/swift/SIL/SILCloner.h index 5b9f2dfcea89f..474961ebaf76d 100644 --- a/include/swift/SIL/SILCloner.h +++ b/include/swift/SIL/SILCloner.h @@ -1966,8 +1966,8 @@ SILCloner::visitRefElementAddrInst(RefElementAddrInst *Inst) { getBuilder().setCurrentDebugScope(getOpScope(Inst->getDebugScope())); recordClonedInstruction( Inst, getBuilder().createRefElementAddr( - getOpLocation(Inst->getLoc()), getOpValue(Inst->getOperand()), - Inst->getField(), getOpType(Inst->getType()))); + getOpLocation(Inst->getLoc()), getOpValue(Inst->getOperand()), + Inst->getField(), getOpType(Inst->getType()), Inst->isImmutable())); } template @@ -1977,7 +1977,8 @@ SILCloner::visitRefTailAddrInst(RefTailAddrInst *Inst) { recordClonedInstruction( Inst, getBuilder().createRefTailAddr(getOpLocation(Inst->getLoc()), getOpValue(Inst->getOperand()), - getOpType(Inst->getType()))); + getOpType(Inst->getType()), + Inst->isImmutable())); } template @@ -2370,6 +2371,20 @@ void SILCloner::visitIsUniqueInst(IsUniqueInst *Inst) { Inst, getBuilder().createIsUnique(getOpLocation(Inst->getLoc()), getOpValue(Inst->getOperand()))); } +template +void SILCloner::visitBeginCOWMutationInst(BeginCOWMutationInst *Inst) { + getBuilder().setCurrentDebugScope(getOpScope(Inst->getDebugScope())); + recordClonedInstruction( + Inst, getBuilder().createBeginCOWMutation(getOpLocation(Inst->getLoc()), + getOpValue(Inst->getOperand()), Inst->isNative())); +} +template +void SILCloner::visitEndCOWMutationInst(EndCOWMutationInst *Inst) { + getBuilder().setCurrentDebugScope(getOpScope(Inst->getDebugScope())); + recordClonedInstruction( + Inst, getBuilder().createEndCOWMutation(getOpLocation(Inst->getLoc()), + getOpValue(Inst->getOperand()), Inst->doKeepUnique())); +} template void SILCloner::visitIsEscapingClosureInst( IsEscapingClosureInst *Inst) { diff --git a/include/swift/SIL/SILInstruction.h b/include/swift/SIL/SILInstruction.h index 23a47ff99198c..99ce5b3225f64 100644 --- a/include/swift/SIL/SILInstruction.h +++ b/include/swift/SIL/SILInstruction.h @@ -1162,8 +1162,7 @@ class InstructionBase : public InstBase { } }; -/// A template base class for instructions that take a single SILValue operand -/// and has no result or a single value result. +/// A template base class for instructions that take a single SILValue operand. template class UnaryInstructionBase : public InstructionBase { // Space for 1 operand. @@ -5806,11 +5805,24 @@ class RefElementAddrInst friend SILBuilder; RefElementAddrInst(SILDebugLocation DebugLoc, SILValue Operand, - VarDecl *Field, SILType ResultTy) - : UnaryInstructionBase(DebugLoc, Operand, ResultTy, Field) {} + VarDecl *Field, SILType ResultTy, bool IsImmutable) + : UnaryInstructionBase(DebugLoc, Operand, ResultTy, Field) { + setImmutable(IsImmutable); + } public: ClassDecl *getClassDecl() const { return cast(getParentDecl()); } + + /// Returns true if all loads of the same instance variable from the same + /// class reference operand are guaranteed to yield the same value. + bool isImmutable() const { + return SILInstruction::Bits.RefElementAddrInst.Immutable; + } + + /// Sets the immutable flag. + void setImmutable(bool immutable = true) { + SILInstruction::Bits.RefElementAddrInst.Immutable = immutable; + } }; /// RefTailAddrInst - Derive the address of the first element of the first @@ -5821,8 +5833,11 @@ class RefTailAddrInst { friend SILBuilder; - RefTailAddrInst(SILDebugLocation DebugLoc, SILValue Operand, SILType ResultTy) - : UnaryInstructionBase(DebugLoc, Operand, ResultTy) {} + RefTailAddrInst(SILDebugLocation DebugLoc, SILValue Operand, SILType ResultTy, + bool IsImmutable) + : UnaryInstructionBase(DebugLoc, Operand, ResultTy) { + setImmutable(IsImmutable); + } public: ClassDecl *getClassDecl() const { @@ -5832,6 +5847,17 @@ class RefTailAddrInst } SILType getTailType() const { return getType().getObjectType(); } + + /// Returns true if all loads of the same instance variable from the same + /// class reference operand are guaranteed to yield the same value. + bool isImmutable() const { + return SILInstruction::Bits.RefTailAddrInst.Immutable; + } + + /// Sets the immutable flag. + void setImmutable(bool immutable = true) { + SILInstruction::Bits.RefTailAddrInst.Immutable = immutable; + } }; /// MethodInst - Abstract base for instructions that implement dynamic @@ -6562,6 +6588,102 @@ class IsUniqueInst : UnaryInstructionBase(DebugLoc, Operand, BoolTy) {} }; +class BeginCOWMutationInst; + +/// A result for the begin_cow_mutation instruction. See documentation for +/// begin_cow_mutation for more information. +class BeginCOWMutationResult final : public MultipleValueInstructionResult { +public: + BeginCOWMutationResult(unsigned index, SILType type, + ValueOwnershipKind ownershipKind) + : MultipleValueInstructionResult(ValueKind::BeginCOWMutationResult, + index, type, ownershipKind) {} + + BeginCOWMutationInst *getParent(); // inline below + const BeginCOWMutationInst *getParent() const { + return const_cast(this)->getParent(); + } + + static bool classof(const SILNode *N) { + return N->getKind() == SILNodeKind::BeginCOWMutationResult; + } +}; + +/// Performs a uniqueness check of the operand for the purpose of modifying +/// a copy-on-write object. +/// +/// Returns two results: the first result is an Int1 which is the result of the +/// uniqueness check. The second result is the class reference operand, which +/// can be used for mutation. +class BeginCOWMutationInst final + : public UnaryInstructionBase, + public MultipleValueInstructionTrailingObjects< + BeginCOWMutationInst, BeginCOWMutationResult> +{ + friend SILBuilder; + friend TrailingObjects; + + BeginCOWMutationInst(SILDebugLocation loc, SILValue operand, + ArrayRef resultTypes, + ArrayRef resultOwnerships, + bool isNative); + + static BeginCOWMutationInst * + create(SILDebugLocation loc, SILValue operand, SILType BoolTy, SILFunction &F, + bool isNative); + +public: + using MultipleValueInstructionTrailingObjects::totalSizeToAlloc; + + /// Returns the result of the uniqueness check. + SILValue getUniquenessResult() const { + return &getAllResultsBuffer()[0]; + } + + /// Returns the class reference which can be used for mutation. + SILValue getBufferResult() const { + return &getAllResultsBuffer()[1]; + } + + bool isNative() const { + return SILInstruction::Bits.BeginCOWMutationInst.Native; + } + + void setNative(bool native = true) { + SILInstruction::Bits.BeginCOWMutationInst.Native = native; + } +}; + +// Out of line to work around forward declaration issues. +inline BeginCOWMutationInst *BeginCOWMutationResult::getParent() { + auto *Parent = MultipleValueInstructionResult::getParent(); + return cast(Parent); +} + +/// Marks the end of the mutation of a reference counted object. +class EndCOWMutationInst + : public UnaryInstructionBase +{ + friend SILBuilder; + + EndCOWMutationInst(SILDebugLocation DebugLoc, SILValue Operand, + bool keepUnique) + : UnaryInstructionBase(DebugLoc, Operand, Operand->getType()) { + setKeepUnique(keepUnique); + } + +public: + bool doKeepUnique() const { + return SILInstruction::Bits.EndCOWMutationInst.KeepUnique; + } + + void setKeepUnique(bool keepUnique = true) { + SILInstruction::Bits.EndCOWMutationInst.KeepUnique = keepUnique; + } +}; + /// Given an escaping closure return true iff it has a non-nil context and the /// context has a strong reference count greater than 1. class IsEscapingClosureInst diff --git a/include/swift/SIL/SILModule.h b/include/swift/SIL/SILModule.h index abfbc08e1f4de..46e2576cd6923 100644 --- a/include/swift/SIL/SILModule.h +++ b/include/swift/SIL/SILModule.h @@ -107,8 +107,6 @@ enum class SILStage { /// when a Swift compilation context is lowered to SIL. class SILModule { friend class SILFunctionBuilder; - friend class SILGenSourceFileRequest; - friend class SILGenWholeModuleRequest; public: using FunctionListType = llvm::ilist; @@ -265,10 +263,6 @@ class SILModule { /// The indexed profile data to be used for PGO, or nullptr. std::unique_ptr PGOReader; - /// True if this SILModule really contains the whole module, i.e. - /// optimizations can assume that they see the whole module. - bool wholeModule; - /// The options passed into this SILModule. const SILOptions &Options; @@ -283,11 +277,8 @@ class SILModule { /// invalidation message is sent. llvm::SetVector NotificationHandlers; - // Intentionally marked private so that we need to use 'constructSIL()' - // to construct a SILModule. - SILModule(ModuleDecl *M, Lowering::TypeConverter &TC, - const SILOptions &Options, const DeclContext *associatedDC, - bool wholeModule); + SILModule(llvm::PointerUnion context, + Lowering::TypeConverter &TC, const SILOptions &Options); SILModule(const SILModule&) = delete; void operator=(const SILModule&) = delete; @@ -358,23 +349,14 @@ class SILModule { /// Erase a global SIL variable from the module. void eraseGlobalVariable(SILGlobalVariable *G); - /// Construct a SIL module from an AST module. - /// - /// The module will be constructed in the Raw stage. The provided AST module - /// should contain source files. + /// Create and return an empty SIL module suitable for generating or parsing + /// SIL into. /// - /// If a source file is provided, SIL will only be emitted for decls in that - /// source file. - static std::unique_ptr - constructSIL(ModuleDecl *M, Lowering::TypeConverter &TC, - const SILOptions &Options, FileUnit *sf = nullptr); - - /// Create and return an empty SIL module that we can - /// later parse SIL bodies directly into, without converting from an AST. + /// \param context The associated decl context. This should be a FileUnit in + /// single-file mode, and a ModuleDecl in whole-module mode. static std::unique_ptr - createEmptyModule(ModuleDecl *M, Lowering::TypeConverter &TC, - const SILOptions &Options, - bool WholeModule = false); + createEmptyModule(llvm::PointerUnion context, + Lowering::TypeConverter &TC, const SILOptions &Options); /// Get the Swift module associated with this SIL module. ModuleDecl *getSwiftModule() const { return TheSwiftModule; } @@ -382,15 +364,15 @@ class SILModule { ASTContext &getASTContext() const; SourceManager &getSourceManager() const { return getASTContext().SourceMgr; } - /// Get the Swift DeclContext associated with this SIL module. + /// Get the Swift DeclContext associated with this SIL module. This is never + /// null. /// /// All AST declarations within this context are assumed to have been fully /// processed as part of generating this module. This allows certain passes /// to make additional assumptions about these declarations. /// /// If this is the same as TheSwiftModule, the entire module is being - /// compiled as a single unit. If this is null, no context-based assumptions - /// can be made. + /// compiled as a single unit. const DeclContext *getAssociatedContext() const { return AssociatedDeclContext; } @@ -398,7 +380,7 @@ class SILModule { /// Returns true if this SILModule really contains the whole module, i.e. /// optimizations can assume that they see the whole module. bool isWholeModule() const { - return wholeModule; + return isa(AssociatedDeclContext); } bool isStdlibModule() const; diff --git a/include/swift/SIL/SILNode.h b/include/swift/SIL/SILNode.h index a3b2a52f43b92..f20b19fbae1f9 100644 --- a/include/swift/SIL/SILNode.h +++ b/include/swift/SIL/SILNode.h @@ -304,6 +304,18 @@ class alignas(8) SILNode { FieldNo : 32 ); + SWIFT_INLINE_BITFIELD(RefElementAddrInst, SingleValueInstruction, 1, + Immutable : 1 + ); + + SWIFT_INLINE_BITFIELD(RefTailAddrInst, SingleValueInstruction, 1, + Immutable : 1 + ); + + SWIFT_INLINE_BITFIELD(EndCOWMutationInst, NonValueInstruction, 1, + KeepUnique : 1 + ); + SWIFT_INLINE_BITFIELD_FULL(FieldIndexCacheBase, SingleValueInstruction, 32, : NumPadBits, FieldIndex : 32); @@ -357,6 +369,12 @@ class alignas(8) SILNode { NumCases : 32 ); + SWIFT_INLINE_BITFIELD_EMPTY(MultipleValueInstruction, SILInstruction); + + SWIFT_INLINE_BITFIELD(BeginCOWMutationInst, MultipleValueInstruction, 1, + Native : 1 + ); + } Bits; enum class SILNodeStorageLocation : uint8_t { Value, Instruction }; diff --git a/include/swift/SIL/SILNodes.def b/include/swift/SIL/SILNodes.def index d85bdb0bac175..9bd877c60c166 100644 --- a/include/swift/SIL/SILNodes.def +++ b/include/swift/SIL/SILNodes.def @@ -413,6 +413,7 @@ ABSTRACT_VALUE(SILArgument, ValueBase) ABSTRACT_VALUE(MultipleValueInstructionResult, ValueBase) MULTIPLE_VALUE_INST_RESULT(BeginApplyResult, MultipleValueInstructionResult) + MULTIPLE_VALUE_INST_RESULT(BeginCOWMutationResult, MultipleValueInstructionResult) MULTIPLE_VALUE_INST_RESULT(DestructureStructResult, MultipleValueInstructionResult) MULTIPLE_VALUE_INST_RESULT(DestructureTupleResult, MultipleValueInstructionResult) VALUE_RANGE(MultipleValueInstructionResult, BeginApplyResult, DestructureTupleResult) @@ -579,6 +580,9 @@ ABSTRACT_VALUE_AND_INST(SingleValueInstruction, ValueBase, SILInstruction) SINGLE_VALUE_INST(IsUniqueInst, is_unique, SingleValueInstruction, MayHaveSideEffects, DoesNotRelease) + SINGLE_VALUE_INST(EndCOWMutationInst, end_cow_mutation, + SingleValueInstruction, None, DoesNotRelease) + SINGLE_VALUE_INST(IsEscapingClosureInst, is_escaping_closure, SingleValueInstruction, MayRead, DoesNotRelease) @@ -876,6 +880,8 @@ NODE_RANGE(NonValueInstruction, UnreachableInst, CondFailInst) ABSTRACT_INST(MultipleValueInstruction, SILInstruction) FULLAPPLYSITE_MULTIPLE_VALUE_INST(BeginApplyInst, begin_apply, MultipleValueInstruction, MayHaveSideEffects, MayRelease) +MULTIPLE_VALUE_INST(BeginCOWMutationInst, begin_cow_mutation, + MultipleValueInstruction, None, DoesNotRelease) MULTIPLE_VALUE_INST(DestructureStructInst, destructure_struct, MultipleValueInstruction, None, DoesNotRelease) MULTIPLE_VALUE_INST(DestructureTupleInst, destructure_tuple, diff --git a/include/swift/SILOptimizer/Utils/LoadStoreOptUtils.h b/include/swift/SILOptimizer/Utils/LoadStoreOptUtils.h index 5a25557693b06..b3be068d40051 100644 --- a/include/swift/SILOptimizer/Utils/LoadStoreOptUtils.h +++ b/include/swift/SILOptimizer/Utils/LoadStoreOptUtils.h @@ -169,14 +169,16 @@ class LSBase { } /// Print the LSBase. - virtual void print(llvm::raw_ostream &os, SILModule *Mod, - TypeExpansionContext context) { + virtual void print(llvm::raw_ostream &os) { os << Base; - Path.getValue().print(os, *Mod, context); + SILFunction *F = Base->getFunction(); + if (F) { + Path.getValue().print(os, F->getModule(), TypeExpansionContext(*F)); + } } - virtual void dump(SILModule *Mod, TypeExpansionContext context) { - print(llvm::dbgs(), Mod, context); + virtual void dump() { + print(llvm::dbgs()); } }; @@ -260,13 +262,12 @@ class LSValue : public LSBase { return Path.getValue().createExtract(Base, Inst, true); } - void print(llvm::raw_ostream &os, SILModule *Mod, - TypeExpansionContext context) { + void print(llvm::raw_ostream &os) { if (CoveringValue) { os << "Covering Value"; return; } - LSBase::print(os, Mod, context); + LSBase::print(os); } /// Expand this SILValue to all individual fields it contains. diff --git a/include/swift/Sema/SourceLoader.h b/include/swift/Sema/SourceLoader.h index 97e82568ba285..9cb8a627146b8 100644 --- a/include/swift/Sema/SourceLoader.h +++ b/include/swift/Sema/SourceLoader.h @@ -94,7 +94,7 @@ class SourceLoader : public ModuleLoader { Optional getModuleDependencies( StringRef moduleName, ModuleDependenciesCache &cache, - SubASTContextDelegate &delegate) override { + InterfaceSubContextDelegate &delegate) override { // FIXME: Implement? return None; } diff --git a/include/swift/Serialization/SerializedModuleLoader.h b/include/swift/Serialization/SerializedModuleLoader.h index 4cfb23fbf5b64..a10302168da60 100644 --- a/include/swift/Serialization/SerializedModuleLoader.h +++ b/include/swift/Serialization/SerializedModuleLoader.h @@ -195,7 +195,7 @@ class SerializedModuleLoaderBase : public ModuleLoader { virtual Optional getModuleDependencies( StringRef moduleName, ModuleDependenciesCache &cache, - SubASTContextDelegate &delegate) override; + InterfaceSubContextDelegate &delegate) override; }; /// Imports serialized Swift modules into an ASTContext. diff --git a/include/swift/Subsystems.h b/include/swift/Subsystems.h index 084e064b4cebb..1029f651a38e8 100644 --- a/include/swift/Subsystems.h +++ b/include/swift/Subsystems.h @@ -152,14 +152,6 @@ namespace swift { /// emitted. void performWholeModuleTypeChecking(SourceFile &SF); - /// Checks to see if any of the imports in \p M use `@_implementationOnly` in - /// one file and not in another. - /// - /// Like redeclaration checking, but for imports. This isn't part of - /// swift::performWholeModuleTypeChecking because it's linear in the number - /// of declarations in the module. - void checkInconsistentImplementationOnlyImports(ModuleDecl *M); - /// Recursively validate the specified type. /// /// This is used when dealing with partial source files (e.g. SIL parsing, diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp index f430569a6360a..a69e56cd87f91 100644 --- a/lib/AST/ASTContext.cpp +++ b/lib/AST/ASTContext.cpp @@ -1631,7 +1631,7 @@ void ASTContext::addModuleLoader(std::unique_ptr loader, Optional ASTContext::getModuleDependencies( StringRef moduleName, bool isUnderlyingClangModule, - ModuleDependenciesCache &cache, SubASTContextDelegate &delegate) { + ModuleDependenciesCache &cache, InterfaceSubContextDelegate &delegate) { for (auto &loader : getImpl().ModuleLoaders) { if (isUnderlyingClangModule && loader.get() != getImpl().TheClangModuleLoader) @@ -3891,11 +3891,8 @@ OpaqueTypeArchetypeType::get(OpaqueTypeDecl *Decl, superclass = superclass.subst(Substitutions); } #endif - SmallVector protos; - for (auto proto : signature->getConformsTo(opaqueInterfaceTy)) { - protos.push_back(proto); - } - + const auto protos = signature->getRequiredProtocols(opaqueInterfaceTy); + auto mem = ctx.Allocate( OpaqueTypeArchetypeType::totalSizeToAlloc( protos.size(), superclass ? 1 : 0, layout ? 1 : 0), diff --git a/lib/AST/ASTMangler.cpp b/lib/AST/ASTMangler.cpp index 43ed5bc878eec..0ae8ccfa7b2f1 100644 --- a/lib/AST/ASTMangler.cpp +++ b/lib/AST/ASTMangler.cpp @@ -2539,8 +2539,9 @@ void ASTMangler::appendGenericSignatureParts( DependentMemberType * ASTMangler::dropProtocolFromAssociatedType(DependentMemberType *dmt) { auto baseTy = dmt->getBase(); - bool unambiguous = (!dmt->getAssocType() || - CurGenericSignature->getConformsTo(baseTy).size() <= 1); + bool unambiguous = + (!dmt->getAssocType() || + CurGenericSignature->getRequiredProtocols(baseTy).size() <= 1); if (auto *baseDMT = baseTy->getAs()) baseTy = dropProtocolFromAssociatedType(baseDMT); @@ -2573,8 +2574,8 @@ void ASTMangler::appendAssociatedTypeName(DependentMemberType *dmt) { // If the base type is known to have a single protocol conformance // in the current generic context, then we don't need to disambiguate the // associated type name by protocol. - if (!OptimizeProtocolNames || !CurGenericSignature - || CurGenericSignature->getConformsTo(dmt->getBase()).size() > 1) { + if (!OptimizeProtocolNames || !CurGenericSignature || + CurGenericSignature->getRequiredProtocols(dmt->getBase()).size() > 1) { appendAnyGenericType(assocTy->getProtocol()); } return; diff --git a/lib/AST/ASTPrinter.cpp b/lib/AST/ASTPrinter.cpp index df847e9836909..7c71ac36f5a03 100644 --- a/lib/AST/ASTPrinter.cpp +++ b/lib/AST/ASTPrinter.cpp @@ -2612,11 +2612,11 @@ void PrintAST::visitVarDecl(VarDecl *decl) { auto type = decl->getInterfaceType(); Printer << ": "; TypeLoc tyLoc; - if (auto *repr = decl->getTypeReprOrParentPatternTypeRepr()) + if (auto *repr = decl->getTypeReprOrParentPatternTypeRepr()) { tyLoc = TypeLoc(repr, type); - else + } else { tyLoc = TypeLoc::withoutLoc(type); - + } Printer.printDeclResultTypePre(decl, tyLoc); // HACK: When printing result types for vars with opaque result types, diff --git a/lib/AST/AutoDiff.cpp b/lib/AST/AutoDiff.cpp index 090fbcbaddef6..588c5cebb48ec 100644 --- a/lib/AST/AutoDiff.cpp +++ b/lib/AST/AutoDiff.cpp @@ -421,12 +421,20 @@ void DerivativeFunctionTypeError::log(raw_ostream &OS) const { case Kind::MultipleSemanticResults: OS << "has multiple semantic results"; break; - case Kind::NonDifferentiableParameters: - OS << "has non-differentiable parameters: "; - value.indices->print(OS); + case Kind::NoDifferentiabilityParameters: + OS << "has no differentiability parameters"; break; - case Kind::NonDifferentiableResult: - OS << "has non-differentiable result: " << value.type; + case Kind::NonDifferentiableDifferentiabilityParameter: { + auto nonDiffParam = getNonDifferentiableTypeAndIndex(); + OS << "has non-differentiable differentiability parameter " + << nonDiffParam.second << ": " << nonDiffParam.first; break; } + case Kind::NonDifferentiableResult: { + auto nonDiffResult = getNonDifferentiableTypeAndIndex(); + OS << "has non-differentiable result " << nonDiffResult.second << ": " + << nonDiffResult.first; + break; + } + } } diff --git a/lib/AST/Builtins.cpp b/lib/AST/Builtins.cpp index 411a5c558d695..429cc910aa642 100644 --- a/lib/AST/Builtins.cpp +++ b/lib/AST/Builtins.cpp @@ -431,13 +431,26 @@ createGenericParam(ASTContext &ctx, const char *name, unsigned index) { /// Create a generic parameter list with multiple generic parameters. static GenericParamList *getGenericParams(ASTContext &ctx, - unsigned numParameters) { + unsigned numParameters, + bool isAnyObject) { assert(numParameters <= llvm::array_lengthof(GenericParamNames)); SmallVector genericParams; for (unsigned i = 0; i != numParameters; ++i) genericParams.push_back(createGenericParam(ctx, GenericParamNames[i], i)); + + if (isAnyObject) { + CanType ao = ctx.getAnyObjectType(); + SmallVector req; + req.push_back(RequirementRepr::getTypeConstraint(TypeLoc::withoutLoc(genericParams[0]->getInterfaceType()), SourceLoc(), + TypeLoc::withoutLoc(ao))); + + auto paramList = GenericParamList::create(ctx, SourceLoc(), genericParams, + SourceLoc(), req, SourceLoc()); + return paramList; + } + auto paramList = GenericParamList::create(ctx, SourceLoc(), genericParams, SourceLoc()); return paramList; @@ -460,9 +473,10 @@ namespace { SmallVector addedRequirements; public: - BuiltinFunctionBuilder(ASTContext &ctx, unsigned numGenericParams = 1) + BuiltinFunctionBuilder(ASTContext &ctx, unsigned numGenericParams = 1, + bool isAnyObject = false) : Context(ctx) { - TheGenericParamList = getGenericParams(ctx, numGenericParams); + TheGenericParamList = getGenericParams(ctx, numGenericParams, isAnyObject); for (auto gp : TheGenericParamList->getParams()) { genericParamTypes.push_back( gp->getDeclaredInterfaceType()->castTo()); @@ -645,6 +659,14 @@ static ValueDecl *getIsUniqueOperation(ASTContext &Context, Identifier Id) { return builder.build(Id); } +static ValueDecl *getEndCOWMutation(ASTContext &Context, Identifier Id) { + // (@inout T) -> () + BuiltinFunctionBuilder builder(Context); + builder.addParameter(makeGenericParam(), ValueOwnership::InOut); + builder.setResult(makeConcrete(TupleType::getEmpty(Context))); + return builder.build(Id); +} + static ValueDecl *getBindMemoryOperation(ASTContext &Context, Identifier Id) { BuiltinFunctionBuilder builder(Context); builder.addParameter(makeConcrete(Context.TheRawPointerType)); @@ -908,6 +930,16 @@ static ValueDecl *getValueToBridgeObject(ASTContext &C, Identifier Id) { return builder.build(Id); } +static ValueDecl *getCOWBufferForReading(ASTContext &C, Identifier Id) { + // T -> T + // + BuiltinFunctionBuilder builder(C, 1, true); + auto T = makeGenericParam(); + builder.addParameter(T); + builder.setResult(T); + return builder.build(Id); +} + static ValueDecl *getUnsafeGuaranteed(ASTContext &C, Identifier Id) { // T -> (T, Int8Ty) // @@ -2248,9 +2280,16 @@ ValueDecl *swift::getBuiltinValueDecl(ASTContext &Context, Identifier Id) { case BuiltinValueKind::IsUnique: case BuiltinValueKind::IsUnique_native: + case BuiltinValueKind::BeginCOWMutation: + case BuiltinValueKind::BeginCOWMutation_native: if (!Types.empty()) return nullptr; + // BeginCOWMutation has the same signature as IsUnique. return getIsUniqueOperation(Context, Id); + case BuiltinValueKind::EndCOWMutation: + if (!Types.empty()) return nullptr; + return getEndCOWMutation(Context, Id); + case BuiltinValueKind::BindMemory: if (!Types.empty()) return nullptr; return getBindMemoryOperation(Context, Id); @@ -2379,6 +2418,10 @@ ValueDecl *swift::getBuiltinValueDecl(ASTContext &Context, Identifier Id) { if (!Types.empty()) return nullptr; return getValueToBridgeObject(Context, Id); + + case BuiltinValueKind::COWBufferForReading: + return getCOWBufferForReading(Context, Id); + case BuiltinValueKind::UnsafeGuaranteed: return getUnsafeGuaranteed(Context, Id); diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp index fee73960c4fad..dd731542837ba 100644 --- a/lib/AST/Decl.cpp +++ b/lib/AST/Decl.cpp @@ -4123,6 +4123,7 @@ StructDecl::StructDecl(SourceLoc StructLoc, Identifier Name, SourceLoc NameLoc, StructLoc(StructLoc) { Bits.StructDecl.HasUnreferenceableStorage = false; + Bits.StructDecl.IsCxxNotTriviallyCopyable = false; } bool NominalTypeDecl::hasMemberwiseInitializer() const { diff --git a/lib/AST/DeclContext.cpp b/lib/AST/DeclContext.cpp index e4f21d07caa6e..eafb59de31fe6 100644 --- a/lib/AST/DeclContext.cpp +++ b/lib/AST/DeclContext.cpp @@ -773,6 +773,16 @@ IterableDeclContext::getDecl() const { llvm_unreachable("Unhandled IterableDeclContextKind in switch."); } +const GenericContext *IterableDeclContext::getAsGenericContext() const { + switch (getIterableContextKind()) { + case IterableDeclContextKind::NominalTypeDecl: + return cast(this); + case IterableDeclContextKind::ExtensionDecl: + return cast(this); + } + llvm_unreachable("Unhandled IterableDeclContextKind in switch."); +} + ASTContext &IterableDeclContext::getASTContext() const { return getDecl()->getASTContext(); } @@ -860,7 +870,7 @@ bool IterableDeclContext::hasUnparsedMembers() const { if (AddedParsedMembers) return false; - if (!getDecl()->getDeclContext()->getParentSourceFile()) { + if (!getAsGenericContext()->getParentSourceFile()) { // There will never be any parsed members to add, so set the flag to say // we are done so we can short-circuit next time. const_cast(this)->AddedParsedMembers = 1; @@ -880,7 +890,7 @@ void IterableDeclContext::loadAllMembers() const { ASTContext &ctx = getASTContext(); // For contexts within a source file, get the list of parsed members. - if (getDecl()->getDeclContext()->getParentSourceFile()) { + if (getAsGenericContext()->getParentSourceFile()) { // Retrieve the parsed members. Even if we've already added the parsed // members to this context, this call is important for recording the // dependency edge. @@ -918,7 +928,7 @@ void IterableDeclContext::loadAllMembers() const { } bool IterableDeclContext::wasDeserialized() const { - const DeclContext *DC = cast(getDecl()); + const DeclContext *DC = getAsGenericContext(); if (auto F = dyn_cast(DC->getModuleScopeContext())) { return F->getKind() == FileUnitKind::SerializedAST; } @@ -950,7 +960,7 @@ IterableDeclContext::castDeclToIterableDeclContext(const Decl *D) { Optional IterableDeclContext::getBodyFingerprint() const { // Only makes sense for contexts in a source file - if (!getDecl()->getDeclContext()->getParentSourceFile()) + if (!getAsGenericContext()->getParentSourceFile()) return None; auto mutableThis = const_cast(this); return evaluateOrDefault(getASTContext().evaluator, diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp index 3dd7586713141..6ab5da2eb5810 100644 --- a/lib/AST/Expr.cpp +++ b/lib/AST/Expr.cpp @@ -1279,6 +1279,10 @@ SourceRange TupleExpr::getSourceRange() const { } else { // Scan backwards for a valid source loc. for (Expr *expr : llvm::reverse(getElements())) { + // Default arguments are located at the start of their parent tuple, so + // skip over them. + if (isa(expr)) + continue; end = expr->getEndLoc(); if (end.isValid()) { break; diff --git a/lib/AST/GenericSignature.cpp b/lib/AST/GenericSignature.cpp index 80a6bb5166fdd..ade97c2355846 100644 --- a/lib/AST/GenericSignature.cpp +++ b/lib/AST/GenericSignature.cpp @@ -48,15 +48,10 @@ GenericSignatureImpl::GenericSignatureImpl( ArrayRef requirements, bool isKnownCanonical) : NumGenericParams(params.size()), NumRequirements(requirements.size()), CanonicalSignatureOrASTContext() { - auto paramsBuffer = getGenericParamsBuffer(); - for (unsigned i = 0; i < NumGenericParams; ++i) { - paramsBuffer[i] = params[i]; - } - - auto reqtsBuffer = getRequirementsBuffer(); - for (unsigned i = 0; i < NumRequirements; ++i) { - reqtsBuffer[i] = requirements[i]; - } + std::uninitialized_copy(params.begin(), params.end(), + getTrailingObjects()); + std::uninitialized_copy(requirements.begin(), requirements.end(), + getTrailingObjects()); #ifndef NDEBUG // Make sure generic parameters are in the right order, and @@ -76,7 +71,7 @@ GenericSignatureImpl::GenericSignatureImpl( if (isKnownCanonical) CanonicalSignatureOrASTContext = - &GenericSignature::getASTContext(getGenericParams(), requirements); + &GenericSignature::getASTContext(params, requirements); } TypeArrayView @@ -156,7 +151,8 @@ ASTContext &GenericSignature::getASTContext( return requirements.front().getFirstType()->getASTContext(); } -GenericSignatureBuilder *GenericSignatureImpl::getGenericSignatureBuilder() { +GenericSignatureBuilder * +GenericSignatureImpl::getGenericSignatureBuilder() const { // The generic signature builder is associated with the canonical signature. if (!isCanonical()) return getCanonicalSignature()->getGenericSignatureBuilder(); @@ -166,7 +162,7 @@ GenericSignatureBuilder *GenericSignatureImpl::getGenericSignatureBuilder() { CanGenericSignature(this)); } -bool GenericSignatureImpl::isEqual(GenericSignature Other) { +bool GenericSignatureImpl::isEqual(GenericSignature Other) const { return getCanonicalSignature() == Other.getCanonicalSignature(); } @@ -339,24 +335,23 @@ CanGenericSignature GenericSignatureImpl::getCanonicalSignature() const { // A stored ASTContext indicates that this is the canonical // signature. if (CanonicalSignatureOrASTContext.is()) - // TODO: CanGenericSignature should be const-correct. - return CanGenericSignature(const_cast(this)); + return CanGenericSignature(this); // Otherwise, return the stored canonical signature. return CanGenericSignature( - CanonicalSignatureOrASTContext.get()); + CanonicalSignatureOrASTContext.get()); } -GenericEnvironment *GenericSignatureImpl::getGenericEnvironment() { +GenericEnvironment *GenericSignatureImpl::getGenericEnvironment() const { if (GenericEnv == nullptr) { auto *builder = getGenericSignatureBuilder(); - GenericEnv = GenericEnvironment::getIncomplete(this, builder); + const auto impl = const_cast(this); + impl->GenericEnv = GenericEnvironment::getIncomplete(this, builder); } return GenericEnv; } - ASTContext &GenericSignatureImpl::getASTContext() const { // Canonical signatures store the ASTContext directly. if (auto ctx = CanonicalSignatureOrASTContext.dyn_cast()) @@ -378,7 +373,7 @@ GenericSignatureImpl::lookupConformance(CanType type, return M->lookupConformance(type, proto); } -bool GenericSignatureImpl::requiresClass(Type type) { +bool GenericSignatureImpl::requiresClass(Type type) const { assert(type->isTypeParameter() && "Only type parameters can have superclass requirements"); @@ -410,7 +405,7 @@ bool GenericSignatureImpl::requiresClass(Type type) { } /// Determine the superclass bound on the given dependent type. -Type GenericSignatureImpl::getSuperclassBound(Type type) { +Type GenericSignatureImpl::getSuperclassBound(Type type) const { assert(type->isTypeParameter() && "Only type parameters can have superclass requirements"); @@ -429,10 +424,10 @@ Type GenericSignatureImpl::getSuperclassBound(Type type) { return equivClass->superclass; } -/// Determine the set of protocols to which the given dependent type -/// must conform. -SmallVector -GenericSignatureImpl::getConformsTo(Type type) { +/// Determine the set of protocols to which the given type parameter is +/// required to conform. +GenericSignature::RequiredProtocols +GenericSignatureImpl::getRequiredProtocols(Type type) const { if (!type->isTypeParameter()) return { }; auto &builder = *getGenericSignatureBuilder(); @@ -442,12 +437,12 @@ GenericSignatureImpl::getConformsTo(Type type) { ArchetypeResolutionKind::CompleteWellFormed); if (!equivClass) return { }; - // If this type was mapped to a concrete type, then there are no - // requirements. + // If this type parameter was mapped to a concrete type, then there + // are no requirements. if (equivClass->concreteType) return { }; // Retrieve the protocols to which this type conforms. - SmallVector result; + GenericSignature::RequiredProtocols result; for (const auto &conforms : equivClass->conformsTo) result.push_back(conforms.first); @@ -457,7 +452,8 @@ GenericSignatureImpl::getConformsTo(Type type) { return result; } -bool GenericSignatureImpl::conformsToProtocol(Type type, ProtocolDecl *proto) { +bool GenericSignatureImpl::requiresProtocol(Type type, + ProtocolDecl *proto) const { assert(type->isTypeParameter() && "Expected a type parameter"); auto &builder = *getGenericSignatureBuilder(); @@ -467,7 +463,11 @@ bool GenericSignatureImpl::conformsToProtocol(Type type, ProtocolDecl *proto) { ArchetypeResolutionKind::CompleteWellFormed); if (!equivClass) return false; - // FIXME: Deal with concrete conformances here? + // FIXME: Optionally deal with concrete conformances here + // or have a separate method do that additionally? + // + // If this type parameter was mapped to a concrete type, then there + // are no requirements. if (equivClass->concreteType) return false; // Check whether the representative conforms to this protocol. @@ -475,14 +475,14 @@ bool GenericSignatureImpl::conformsToProtocol(Type type, ProtocolDecl *proto) { } /// Determine whether the given dependent type is equal to a concrete type. -bool GenericSignatureImpl::isConcreteType(Type type) { +bool GenericSignatureImpl::isConcreteType(Type type) const { return bool(getConcreteType(type)); } /// Return the concrete type that the given dependent type is constrained to, /// or the null Type if it is not the subject of a concrete same-type /// constraint. -Type GenericSignatureImpl::getConcreteType(Type type) { +Type GenericSignatureImpl::getConcreteType(Type type) const { if (!type->isTypeParameter()) return Type(); auto &builder = *getGenericSignatureBuilder(); @@ -495,7 +495,7 @@ Type GenericSignatureImpl::getConcreteType(Type type) { return equivClass->concreteType; } -LayoutConstraint GenericSignatureImpl::getLayoutConstraint(Type type) { +LayoutConstraint GenericSignatureImpl::getLayoutConstraint(Type type) const { if (!type->isTypeParameter()) return LayoutConstraint(); auto &builder = *getGenericSignatureBuilder(); @@ -508,7 +508,8 @@ LayoutConstraint GenericSignatureImpl::getLayoutConstraint(Type type) { return equivClass->layout; } -bool GenericSignatureImpl::areSameTypeParameterInContext(Type type1, Type type2) { +bool GenericSignatureImpl::areSameTypeParameterInContext(Type type1, + Type type2) const { assert(type1->isTypeParameter()); assert(type2->isTypeParameter()); @@ -531,7 +532,8 @@ bool GenericSignatureImpl::areSameTypeParameterInContext(Type type1, Type type2) return equivClass1 == equivClass2; } -bool GenericSignatureImpl::isRequirementSatisfied(Requirement requirement) { +bool GenericSignatureImpl::isRequirementSatisfied( + Requirement requirement) const { auto GSB = getGenericSignatureBuilder(); auto firstType = requirement.getFirstType(); @@ -543,7 +545,7 @@ bool GenericSignatureImpl::isRequirementSatisfied(Requirement requirement) { auto protocol = protocolType->getDecl(); if (canFirstType->isTypeParameter()) - return conformsToProtocol(canFirstType, protocol); + return requiresProtocol(canFirstType, protocol); else return (bool)GSB->lookupConformance(/*dependentType=*/CanType(), canFirstType, protocol); @@ -619,7 +621,7 @@ SmallVector GenericSignatureImpl::requirementsNotSatisfiedBy( return result; } -bool GenericSignatureImpl::isCanonicalTypeInContext(Type type) { +bool GenericSignatureImpl::isCanonicalTypeInContext(Type type) const { // If the type isn't independently canonical, it's certainly not canonical // in this context. if (!type->isCanonical()) @@ -634,8 +636,8 @@ bool GenericSignatureImpl::isCanonicalTypeInContext(Type type) { return isCanonicalTypeInContext(type, builder); } -bool GenericSignatureImpl::isCanonicalTypeInContext(Type type, - GenericSignatureBuilder &builder) { +bool GenericSignatureImpl::isCanonicalTypeInContext( + Type type, GenericSignatureBuilder &builder) const { // If the type isn't independently canonical, it's certainly not canonical // in this context. if (!type->isCanonical()) @@ -662,8 +664,8 @@ bool GenericSignatureImpl::isCanonicalTypeInContext(Type type, }); } -CanType GenericSignatureImpl::getCanonicalTypeInContext(Type type, - GenericSignatureBuilder &builder) { +CanType GenericSignatureImpl::getCanonicalTypeInContext( + Type type, GenericSignatureBuilder &builder) const { type = type->getCanonicalType(); // All the contextual canonicality rules apply to type parameters, so if the @@ -703,7 +705,7 @@ CanType GenericSignatureImpl::getCanonicalTypeInContext(Type type, return result; } -CanType GenericSignatureImpl::getCanonicalTypeInContext(Type type) { +CanType GenericSignatureImpl::getCanonicalTypeInContext(Type type) const { type = type->getCanonicalType(); // All the contextual canonicality rules apply to type parameters, so if the @@ -835,7 +837,7 @@ void GenericSignatureImpl::buildConformanceAccessPath( SmallVectorImpl &path, ArrayRef reqs, const void *opaqueSource, ProtocolDecl *conformingProto, Type rootType, - ProtocolDecl *requirementSignatureProto) { + ProtocolDecl *requirementSignatureProto) const { auto *source = reinterpret_cast(opaqueSource); // Each protocol requirement is a step along the path. if (source->isProtocolRequirement()) { @@ -968,7 +970,8 @@ void GenericSignatureImpl::buildConformanceAccessPath( } ConformanceAccessPath -GenericSignatureImpl::getConformanceAccessPath(Type type, ProtocolDecl *protocol) { +GenericSignatureImpl::getConformanceAccessPath(Type type, + ProtocolDecl *protocol) const { assert(type->isTypeParameter() && "not a type parameter"); // Resolve this type to a potential archetype. @@ -1030,7 +1033,8 @@ SubstitutionMap GenericSignatureImpl::getIdentitySubstitutionMap() const { MakeAbstractConformanceForGenericType()); } -unsigned GenericSignatureImpl::getGenericParamOrdinal(GenericTypeParamType *param) { +unsigned GenericSignatureImpl::getGenericParamOrdinal( + GenericTypeParamType *param) const { return GenericParamKey(param->getDepth(), param->getIndex()) .findIndexIn(getGenericParams()); } diff --git a/lib/AST/GenericSignatureBuilder.cpp b/lib/AST/GenericSignatureBuilder.cpp index dae212db78f5b..68fecd1fdd1ba 100644 --- a/lib/AST/GenericSignatureBuilder.cpp +++ b/lib/AST/GenericSignatureBuilder.cpp @@ -2542,7 +2542,13 @@ static void concretizeNestedTypeFromConcreteParent( GenericSignatureBuilder &builder) { auto parentEquiv = parent->getEquivalenceClassIfPresent(); assert(parentEquiv && "can't have a concrete type without an equiv class"); + + bool isSuperclassConstrained = false; auto concreteParent = parentEquiv->concreteType; + if (!concreteParent) { + isSuperclassConstrained = true; + concreteParent = parentEquiv->superclass; + } assert(concreteParent && "attempting to resolve concrete nested type of non-concrete PA"); @@ -2564,8 +2570,14 @@ static void concretizeNestedTypeFromConcreteParent( "No conformance requirement"); const RequirementSource *parentConcreteSource = nullptr; for (const auto &constraint : parentEquiv->conformsTo.find(proto)->second) { - if (constraint.source->kind == RequirementSource::Concrete) { - parentConcreteSource = constraint.source; + if (!isSuperclassConstrained) { + if (constraint.source->kind == RequirementSource::Concrete) { + parentConcreteSource = constraint.source; + } + } else { + if (constraint.source->kind == RequirementSource::Superclass) { + parentConcreteSource = constraint.source; + } } } @@ -3501,6 +3513,19 @@ GenericSignatureBuilder::getLookupConformanceFn() return LookUpConformanceInBuilder(this); } +ProtocolConformanceRef +GenericSignatureBuilder::LookUpConformanceInBuilder::operator()( + CanType dependentType, Type conformingReplacementType, + ProtocolDecl *conformedProtocol) const { + // Lookup conformances for opened existential. + if (conformingReplacementType->isOpenedExistential()) { + return conformedProtocol->getModuleContext()->lookupConformance( + conformingReplacementType, conformedProtocol); + } + return builder->lookupConformance(dependentType, conformingReplacementType, + conformedProtocol); +} + ProtocolConformanceRef GenericSignatureBuilder::lookupConformance(CanType dependentType, Type conformingReplacementType, @@ -4286,6 +4311,15 @@ bool GenericSignatureBuilder::updateSuperclass( for (const auto &conforms : equivClass->conformsTo) { (void)resolveSuperConformance(type, conforms.first); } + + // Eagerly resolve any existing nested types to their concrete forms (others + // will be "concretized" as they are constructed, in getNestedType). + for (auto equivT : equivClass->members) { + for (auto nested : equivT->getNestedTypes()) { + concretizeNestedTypeFromConcreteParent(equivT, nested.second.front(), + *this); + } + } }; // If we haven't yet recorded a superclass constraint for this equivalence @@ -7175,6 +7209,12 @@ void GenericSignatureBuilder::dump(llvm::raw_ostream &out) { pa->dump(out, &Context.SourceMgr, 2); } out << "\n"; + + out << "Equivalence classes:\n"; + for (auto &equiv : Impl->EquivalenceClasses) { + equiv.dump(out, this); + } + out << "\n"; } void GenericSignatureBuilder::addGenericSignature(GenericSignature sig) { @@ -7421,7 +7461,7 @@ static bool isCanonicalRequest(GenericSignature baseSignature, GenericSignature AbstractGenericSignatureRequest::evaluate( Evaluator &evaluator, - GenericSignatureImpl *baseSignature, + const GenericSignatureImpl *baseSignature, SmallVector addedParameters, SmallVector addedRequirements) const { // If nothing is added to the base signature, just return the base @@ -7534,7 +7574,7 @@ AbstractGenericSignatureRequest::evaluate( GenericSignature InferredGenericSignatureRequest::evaluate( Evaluator &evaluator, ModuleDecl *parentModule, - GenericSignatureImpl *parentSig, + const GenericSignatureImpl *parentSig, GenericParamSource paramSource, SmallVector addedRequirements, SmallVector inferenceSources, diff --git a/lib/AST/Module.cpp b/lib/AST/Module.cpp index a766aafebf39b..c8a667ebe38ab 100644 --- a/lib/AST/Module.cpp +++ b/lib/AST/Module.cpp @@ -475,6 +475,16 @@ ModuleDecl::ModuleDecl(Identifier name, ASTContext &ctx, setInterfaceType(ModuleType::get(this)); setAccess(AccessLevel::Public); + + Bits.ModuleDecl.TestingEnabled = 0; + Bits.ModuleDecl.FailedToLoad = 0; + Bits.ModuleDecl.RawResilienceStrategy = 0; + Bits.ModuleDecl.HasResolvedImports = 0; + Bits.ModuleDecl.PrivateImportsEnabled = 0; + Bits.ModuleDecl.ImplicitDynamicEnabled = 0; + Bits.ModuleDecl.IsSystemModule = 0; + Bits.ModuleDecl.IsNonSwiftModule = 0; + Bits.ModuleDecl.IsMainModule = 0; } ArrayRef ModuleDecl::getImplicitImports() const { diff --git a/lib/AST/PlatformKind.cpp b/lib/AST/PlatformKind.cpp index 1119601d458c1..707ba4473b5fe 100644 --- a/lib/AST/PlatformKind.cpp +++ b/lib/AST/PlatformKind.cpp @@ -92,7 +92,7 @@ static bool isPlatformActiveForTarget(PlatformKind Platform, llvm_unreachable("bad PlatformKind"); } -bool swift::isPlatformActive(PlatformKind Platform, LangOptions &LangOpts, +bool swift::isPlatformActive(PlatformKind Platform, const LangOptions &LangOpts, bool ForTargetVariant) { llvm::Triple TT = LangOpts.Target; @@ -105,7 +105,7 @@ bool swift::isPlatformActive(PlatformKind Platform, LangOptions &LangOpts, LangOpts.EnableAppExtensionRestrictions); } -PlatformKind swift::targetPlatform(LangOptions &LangOpts) { +PlatformKind swift::targetPlatform(const LangOptions &LangOpts) { if (LangOpts.Target.isMacOSX()) { return (LangOpts.EnableAppExtensionRestrictions ? PlatformKind::OSXApplicationExtension diff --git a/lib/AST/ProtocolConformance.cpp b/lib/AST/ProtocolConformance.cpp index 30db092e0ee38..d612dff6a0574 100644 --- a/lib/AST/ProtocolConformance.cpp +++ b/lib/AST/ProtocolConformance.cpp @@ -120,12 +120,6 @@ ProtocolConformanceRef::subst(Type origType, // Otherwise, compute the substituted type. auto substType = origType.subst(subs, conformances, options); - // Opened existentials trivially conform and do not need to go through - // substitution map lookup. - if (substType->isOpenedExistential() && - !options.contains(SubstFlags::ForceSubstituteOpenedExistentials)) - return *this; - auto *proto = getRequirement(); // If the type is an existential, it must be self-conforming. @@ -1319,11 +1313,12 @@ NominalTypeDecl::getSatisfiedProtocolRequirementsForMember( } SmallVector -DeclContext::getLocalProtocols(ConformanceLookupKind lookupKind) const { +IterableDeclContext::getLocalProtocols(ConformanceLookupKind lookupKind) const { SmallVector result; // Dig out the nominal type. - NominalTypeDecl *nominal = getSelfNominalTypeDecl(); + const auto dc = getAsGenericContext(); + const auto nominal = dc->getSelfNominalTypeDecl(); if (!nominal) { return result; } @@ -1332,7 +1327,7 @@ DeclContext::getLocalProtocols(ConformanceLookupKind lookupKind) const { nominal->prepareConformanceTable(); nominal->ConformanceTable->lookupConformances( nominal, - const_cast(this), + const_cast(dc), lookupKind, &result, nullptr, @@ -1342,11 +1337,13 @@ DeclContext::getLocalProtocols(ConformanceLookupKind lookupKind) const { } SmallVector -DeclContext::getLocalConformances(ConformanceLookupKind lookupKind) const { +IterableDeclContext::getLocalConformances(ConformanceLookupKind lookupKind) + const { SmallVector result; // Dig out the nominal type. - NominalTypeDecl *nominal = getSelfNominalTypeDecl(); + const auto dc = getAsGenericContext(); + const auto nominal = dc->getSelfNominalTypeDecl(); if (!nominal) { return result; } @@ -1365,7 +1362,7 @@ DeclContext::getLocalConformances(ConformanceLookupKind lookupKind) const { nominal->prepareConformanceTable(); nominal->ConformanceTable->lookupConformances( nominal, - const_cast(this), + const_cast(dc), lookupKind, nullptr, &result, @@ -1375,25 +1372,27 @@ DeclContext::getLocalConformances(ConformanceLookupKind lookupKind) const { } SmallVector -DeclContext::takeConformanceDiagnostics() const { +IterableDeclContext::takeConformanceDiagnostics() const { SmallVector result; // Dig out the nominal type. - NominalTypeDecl *nominal = getSelfNominalTypeDecl(); + const auto dc = getAsGenericContext(); + const auto nominal = dc->getSelfNominalTypeDecl(); + if (!nominal) { - return { }; + return result; } // Protocols are not subject to the checks for supersession. if (isa(nominal)) { - return { }; + return result; } // Update to record all potential conformances. nominal->prepareConformanceTable(); nominal->ConformanceTable->lookupConformances( nominal, - const_cast(this), + const_cast(dc), ConformanceLookupKind::All, nullptr, nullptr, diff --git a/lib/AST/SubstitutionMap.cpp b/lib/AST/SubstitutionMap.cpp index 026111141f832..61d4695a4d9db 100644 --- a/lib/AST/SubstitutionMap.cpp +++ b/lib/AST/SubstitutionMap.cpp @@ -368,7 +368,7 @@ SubstitutionMap::lookupConformance(CanType type, ProtocolDecl *proto) const { // If the type doesn't conform to this protocol, the result isn't formed // from these requirements. - if (!genericSig->conformsToProtocol(type, proto)) + if (!genericSig->requiresProtocol(type, proto)) return ProtocolConformanceRef::forInvalid(); auto accessPath = diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp index 11fda2fd3a1c4..df297b9793478 100644 --- a/lib/AST/Type.cpp +++ b/lib/AST/Type.cpp @@ -167,7 +167,7 @@ bool TypeBase::isAnyClassReferenceType() { return getCanonicalType().isAnyClassReferenceType(); } -bool CanType::isReferenceTypeImpl(CanType type, GenericSignatureImpl *sig, +bool CanType::isReferenceTypeImpl(CanType type, const GenericSignatureImpl *sig, bool functionsCount) { switch (type->getKind()) { #define SUGARED_TYPE(id, parent) case TypeKind::id: @@ -253,7 +253,7 @@ bool CanType::isReferenceTypeImpl(CanType type, GenericSignatureImpl *sig, /// - existentials with class or class protocol bounds /// But not: /// - function types -bool TypeBase::allowsOwnership(GenericSignatureImpl *sig) { +bool TypeBase::allowsOwnership(const GenericSignatureImpl *sig) { return getCanonicalType().allowsOwnership(sig); } @@ -3044,12 +3044,21 @@ ProtocolConformanceRef ReplaceOpaqueTypesWithUnderlyingTypes:: operator()(CanType maybeOpaqueType, Type replacementType, ProtocolDecl *protocol) const { auto abstractRef = ProtocolConformanceRef(protocol); - + auto archetypeAndRoot = getArchetypeAndRootOpaqueArchetype(maybeOpaqueType); if (!archetypeAndRoot) { - assert(maybeOpaqueType->isTypeParameter() || - maybeOpaqueType->is()); - return abstractRef; + if (maybeOpaqueType->isTypeParameter() || + maybeOpaqueType->is()) + return abstractRef; + + // SIL type lowering may have already substituted away the opaque type, in + // which case we'll end up "substituting" the same type. + if (maybeOpaqueType->isEqual(replacementType)) { + return inContext->getParentModule() + ->lookupConformance(replacementType, protocol); + } + + llvm_unreachable("origType should have been an opaque type or type parameter"); } auto archetype = archetypeAndRoot->first; @@ -3376,7 +3385,8 @@ void AnyFunctionType::ExtInfo::Uncommon::printClangFunctionType( void AnyFunctionType::ExtInfo::assertIsFunctionType(const clang::Type *type) { #ifndef NDEBUG - if (!(type->isFunctionPointerType() || type->isBlockPointerType())) { + if (!(type->isFunctionPointerType() || type->isBlockPointerType() || + type->isFunctionReferenceType())) { SmallString<256> buf; llvm::raw_svector_ostream os(buf); os << "Expected a Clang function type wrapped in a pointer type or " @@ -3511,24 +3521,28 @@ static Type getMemberForBaseType(LookupConformanceFn lookupConformances, // Retrieve the type witness. auto witness = - conformance.getConcrete()->getTypeWitness(assocType, options); - if (!witness || witness->hasError()) + conformance.getConcrete()->getTypeWitnessAndDecl(assocType, options); + + auto witnessTy = witness.getWitnessType(); + if (!witnessTy || witnessTy->hasError()) return failed(); // This is a hacky feature allowing code completion to migrate to // using Type::subst() without changing output. if (options & SubstFlags::DesugarMemberTypes) { - if (auto *aliasType = - dyn_cast(witness.getPointer())) { - if (!aliasType->is()) - witness = aliasType->getSinglyDesugaredType(); - } + if (auto *aliasType = dyn_cast(witnessTy.getPointer())) + witnessTy = aliasType->getSinglyDesugaredType(); + + // Another hack. If the type witness is a opaque result type. They can + // only be referred using the name of the associated type. + if (witnessTy->is()) + witnessTy = witness.getWitnessDecl()->getDeclaredInterfaceType(); } - if (witness->is()) + if (witnessTy->is()) return failed(); - return witness; + return witnessTy; } return failed(); @@ -3547,6 +3561,11 @@ operator()(CanType dependentType, Type conformingReplacementType, ProtocolConformanceRef LookUpConformanceInSubstitutionMap:: operator()(CanType dependentType, Type conformingReplacementType, ProtocolDecl *conformedProtocol) const { + // Lookup conformances for opened existential. + if (conformingReplacementType->isOpenedExistential()) { + return conformedProtocol->getModuleContext()->lookupConformance( + conformingReplacementType, conformedProtocol); + } return Subs.lookupConformance(dependentType, conformedProtocol); } @@ -3558,12 +3577,23 @@ operator()(CanType dependentType, Type conformingReplacementType, || conformingReplacementType->is() || conformingReplacementType->is()) && "replacement requires looking up a concrete conformance"); + // Lookup conformances for opened existential. + if (conformingReplacementType->isOpenedExistential()) { + return conformedProtocol->getModuleContext()->lookupConformance( + conformingReplacementType, conformedProtocol); + } return ProtocolConformanceRef(conformedProtocol); } ProtocolConformanceRef LookUpConformanceInSignature:: operator()(CanType dependentType, Type conformingReplacementType, ProtocolDecl *conformedProtocol) const { + // Lookup conformances for opened existential. + if (conformingReplacementType->isOpenedExistential()) { + return conformedProtocol->getModuleContext()->lookupConformance( + conformingReplacementType, conformedProtocol); + } + // FIXME: Should pass dependentType instead, once // GenericSignature::lookupConformance() does the right thing return Sig->lookupConformance(conformingReplacementType->getCanonicalType(), @@ -5171,9 +5201,11 @@ llvm::Expected AnyFunctionType::getAutoDiffDerivativeFunctionLinearMapType( IndexSubset *parameterIndices, AutoDiffLinearMapKind kind, LookupConformanceFn lookupConformance, bool makeSelfParamFirst) { - assert(!parameterIndices->isEmpty() && - "Expected at least one differentiability parameter"); auto &ctx = getASTContext(); + // Error if differentiability parameter indices are empty. + if (parameterIndices->isEmpty()) + return llvm::make_error( + this, DerivativeFunctionTypeError::Kind::NoDifferentiabilityParameters); // Get differentiability parameters. SmallVector diffParams; @@ -5202,7 +5234,7 @@ AnyFunctionType::getAutoDiffDerivativeFunctionLinearMapType( if (!resultTan) { return llvm::make_error( this, DerivativeFunctionTypeError::Kind::NonDifferentiableResult, - originalResultType); + std::make_pair(originalResultType, /*index*/ 0)); } auto resultTanType = resultTan->getType(); @@ -5225,15 +5257,17 @@ AnyFunctionType::getAutoDiffDerivativeFunctionLinearMapType( // - Differential: `(T0.Tan, inout T1.Tan, ...) -> Void` SmallVector differentialParams; bool hasInoutDiffParameter = false; - for (auto diffParam : diffParams) { + for (auto i : range(diffParams.size())) { + auto diffParam = diffParams[i]; auto paramType = diffParam.getPlainType(); auto paramTan = paramType->getAutoDiffTangentSpace(lookupConformance); // Error if paraneter has no tangent space. if (!paramTan) { return llvm::make_error( this, - DerivativeFunctionTypeError::Kind::NonDifferentiableParameters, - parameterIndices); + DerivativeFunctionTypeError::Kind:: + NonDifferentiableDifferentiabilityParameter, + std::make_pair(paramType, i)); } differentialParams.push_back(AnyFunctionType::Param( paramTan->getType(), Identifier(), diffParam.getParameterFlags())); @@ -5261,15 +5295,17 @@ AnyFunctionType::getAutoDiffDerivativeFunctionLinearMapType( // - Pullback: `(inout T1.Tan) -> (T0.Tan, ...)` SmallVector pullbackResults; bool hasInoutDiffParameter = false; - for (auto diffParam : diffParams) { + for (auto i : range(diffParams.size())) { + auto diffParam = diffParams[i]; auto paramType = diffParam.getPlainType(); auto paramTan = paramType->getAutoDiffTangentSpace(lookupConformance); // Error if paraneter has no tangent space. if (!paramTan) { return llvm::make_error( this, - DerivativeFunctionTypeError::Kind::NonDifferentiableParameters, - parameterIndices); + DerivativeFunctionTypeError::Kind:: + NonDifferentiableDifferentiabilityParameter, + std::make_pair(paramType, i)); } if (diffParam.isInOut()) { hasInoutDiffParameter = true; diff --git a/lib/AST/TypeCheckRequests.cpp b/lib/AST/TypeCheckRequests.cpp index dc3187fc1c47a..76de6f6c91082 100644 --- a/lib/AST/TypeCheckRequests.cpp +++ b/lib/AST/TypeCheckRequests.cpp @@ -1390,20 +1390,16 @@ void CheckRedeclarationRequest::writeDependencySink( evaluator::DependencySource LookupAllConformancesInContextRequest::readDependencySource( const evaluator::DependencyCollector &collector) const { - auto *dc = std::get<0>(getStorage()); - AccessLevel defaultAccess; - if (auto ext = dyn_cast(dc)) { - const NominalTypeDecl *nominal = ext->getExtendedNominal(); - if (!nominal) { - return {collector.getActiveDependencySourceOrNull(), - evaluator::DependencyScope::Cascading}; - } - defaultAccess = nominal->getFormalAccess(); - } else { - defaultAccess = cast(dc)->getFormalAccess(); + const auto *nominal = std::get<0>(getStorage()) + ->getAsGenericContext() + ->getSelfNominalTypeDecl(); + if (!nominal) { + return {collector.getActiveDependencySourceOrNull(), + evaluator::DependencyScope::Cascading}; } + return {collector.getActiveDependencySourceOrNull(), - evaluator::getScopeForAccessLevel(defaultAccess)}; + evaluator::getScopeForAccessLevel(nominal->getFormalAccess())}; } void LookupAllConformancesInContextRequest::writeDependencySink( @@ -1460,24 +1456,6 @@ void TypeCheckSourceFileRequest::cacheResult(evaluator::SideEffect) const { FrontendStatsTracer tracer(Ctx.Stats, "AST verification"); // Verify the SourceFile. swift::verify(*SF); - - // Verify imported modules. - // - // Skip per-file verification in whole-module mode. Verifying imports - // between files could cause the importer to cache declarations without - // adding them to the ASTContext. This happens when the importer registers a - // declaration without a valid TypeChecker instance, as is the case during - // verification. A subsequent file may require that declaration to be fully - // imported (e.g. to synthesized a function body), but since it has already - // been cached, it will never be added to the ASTContext. The solution is to - // skip verification and avoid caching it. -#ifndef NDEBUG - if (!Ctx.TypeCheckerOpts.DelayWholeModuleChecking && - SF->Kind != SourceFileKind::SIL && - !Ctx.LangOpts.DebuggerSupport) { - Ctx.verifyAllLoadedModules(); - } -#endif } } diff --git a/lib/Basic/FileSystem.cpp b/lib/Basic/FileSystem.cpp index be583f94296ff..00d1e42ce104f 100644 --- a/lib/Basic/FileSystem.cpp +++ b/lib/Basic/FileSystem.cpp @@ -146,8 +146,9 @@ std::error_code swift::atomicallyWritingToFile( if (!OS.hasValue()) { std::error_code error; OS.emplace(outputPath, error, fs::F_None); - if (error) + if (error) { return error; + } } action(OS.getValue()); @@ -169,8 +170,9 @@ swift::areFilesDifferent(const llvm::Twine &source, bool allowDestinationErrors) { namespace fs = llvm::sys::fs; - if (fs::equivalent(source, destination)) + if (fs::equivalent(source, destination)) { return FileDifference::IdenticalFile; + } OpenFileRAII sourceFile; fs::file_status sourceStatus; @@ -187,8 +189,9 @@ swift::areFilesDifferent(const llvm::Twine &source, /// DifferentContents return, depending on `allowDestinationErrors`. auto convertDestinationError = [=](std::error_code error) -> llvm::ErrorOr { - if (allowDestinationErrors) + if (allowDestinationErrors){ return FileDifference::DifferentContents; + } return error; }; @@ -204,12 +207,14 @@ swift::areFilesDifferent(const llvm::Twine &source, } uint64_t size = sourceStatus.getSize(); - if (size != destStatus.getSize()) + if (size != destStatus.getSize()) { // If the files are different sizes, they must be different. return FileDifference::DifferentContents; - if (size == 0) + } + if (size == 0) { // If both files are zero size, they must be the same. return FileDifference::SameContents; + } // The two files match in size, so we have to compare the bytes to determine // if they're the same. @@ -217,21 +222,24 @@ swift::areFilesDifferent(const llvm::Twine &source, fs::mapped_file_region sourceRegion(fs::convertFDToNativeFile(sourceFile.fd), fs::mapped_file_region::readonly, size, 0, sourceRegionErr); - if (sourceRegionErr) + if (sourceRegionErr) { return sourceRegionErr; + } std::error_code destRegionErr; fs::mapped_file_region destRegion(fs::convertFDToNativeFile(destFile.fd), fs::mapped_file_region::readonly, size, 0, destRegionErr); - if (destRegionErr) + if (destRegionErr) { return convertDestinationError(destRegionErr); + } - if (0 == memcmp(sourceRegion.const_data(), destRegion.const_data(), size)) - return FileDifference::SameContents; + if (memcmp(sourceRegion.const_data(), destRegion.const_data(), size) != 0) { + return FileDifference::DifferentContents; + } - return FileDifference::DifferentContents; + return FileDifference::SameContents; } std::error_code swift::moveFileIfDifferent(const llvm::Twine &source, diff --git a/lib/ClangImporter/ClangImporter.cpp b/lib/ClangImporter/ClangImporter.cpp index 0c7d39a385399..b288ff8c46bc7 100644 --- a/lib/ClangImporter/ClangImporter.cpp +++ b/lib/ClangImporter/ClangImporter.cpp @@ -817,7 +817,7 @@ bool ClangImporter::canReadPCH(StringRef PCHFilename) { CI.setInvocation(std::move(invocation)); CI.setTarget(&Impl.Instance->getTarget()); CI.setDiagnostics( - &*CompilerInstance::createDiagnostics(new clang::DiagnosticOptions())); + &*clang::CompilerInstance::createDiagnostics(new clang::DiagnosticOptions())); // Note: Reusing the file manager is safe; this is a component that's already // reused when building PCM files for the module cache. @@ -1903,7 +1903,7 @@ ClangImporter::getWrapperForModule(const clang::Module *mod, return clangUnit->getParentModule(); } -PlatformAvailability::PlatformAvailability(LangOptions &langOpts) +PlatformAvailability::PlatformAvailability(const LangOptions &langOpts) : platformKind(targetPlatform(langOpts)) { switch (platformKind) { case PlatformKind::iOS: diff --git a/lib/ClangImporter/ClangModuleDependencyScanner.cpp b/lib/ClangImporter/ClangModuleDependencyScanner.cpp index 6215984d4db2e..d333d0e966ffb 100644 --- a/lib/ClangImporter/ClangModuleDependencyScanner.cpp +++ b/lib/ClangImporter/ClangModuleDependencyScanner.cpp @@ -225,7 +225,7 @@ static void recordModuleDependencies( Optional ClangImporter::getModuleDependencies( StringRef moduleName, ModuleDependenciesCache &cache, - SubASTContextDelegate &delegate) { + InterfaceSubContextDelegate &delegate) { // Check whether there is already a cached result. if (auto found = cache.findDependencies( moduleName, ModuleDependenciesKind::Clang)) diff --git a/lib/ClangImporter/ImportDecl.cpp b/lib/ClangImporter/ImportDecl.cpp index 47177a0de6c79..af33f3757f3ee 100644 --- a/lib/ClangImporter/ImportDecl.cpp +++ b/lib/ClangImporter/ImportDecl.cpp @@ -48,6 +48,7 @@ #include "clang/AST/DeclCXX.h" #include "clang/Basic/CharInfo.h" #include "swift/Basic/Statistic.h" +#include "clang/Basic/Specifiers.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Lookup.h" @@ -3474,6 +3475,25 @@ namespace { result->setHasUnreferenceableStorage(hasUnreferenceableStorage); + if (auto cxxRecordDecl = dyn_cast(decl)) { + result->setIsCxxNotTriviallyCopyable( + !cxxRecordDecl->isTriviallyCopyable()); + + for (auto ctor : cxxRecordDecl->ctors()) { + if (ctor->isCopyConstructor() && + (ctor->isDeleted() || ctor->getAccess() != clang::AS_public)) { + result->setIsCxxNotTriviallyCopyable(true); + break; + } + } + + if (auto dtor = cxxRecordDecl->getDestructor()) { + if (dtor->isDeleted() || dtor->getAccess() != clang::AS_public) { + result->setIsCxxNotTriviallyCopyable(true); + } + } + } + return result; } diff --git a/lib/ClangImporter/ImportType.cpp b/lib/ClangImporter/ImportType.cpp index 08238b491c2a5..9b7fa76b06db5 100644 --- a/lib/ClangImporter/ImportType.cpp +++ b/lib/ClangImporter/ImportType.cpp @@ -159,6 +159,30 @@ namespace { explicit operator bool() const { return (bool) AbstractType; } }; + static ImportResult importFunctionPointerLikeType(const clang::Type &type, + const Type &pointeeType) { + auto funcTy = pointeeType->castTo(); + return {FunctionType::get( + funcTy->getParams(), funcTy->getResult(), + funcTy->getExtInfo() + .withRepresentation( + AnyFunctionType::Representation::CFunctionPointer) + .withClangFunctionType(&type)), + type.isReferenceType() ? ImportHint::None + : ImportHint::CFunctionPointer}; + } + + static ImportResult importOverAlignedFunctionPointerLikeType( + const clang::Type &type, ClangImporter::Implementation &Impl) { + auto opaquePointer = Impl.SwiftContext.getOpaquePointerDecl(); + if (!opaquePointer) { + return Type(); + } + return {opaquePointer->getDeclaredType(), + type.isReferenceType() ? ImportHint::None + : ImportHint::OtherPointer}; + } + class SwiftTypeConverter : public clang::TypeVisitor { @@ -406,23 +430,11 @@ namespace { // alignment is greater than the maximum Swift alignment, import as // OpaquePointer. if (!pointeeType || Impl.isOverAligned(pointeeQualType)) { - auto opaquePointer = Impl.SwiftContext.getOpaquePointerDecl(); - if (!opaquePointer) - return Type(); - return {opaquePointer->getDeclaredType(), - ImportHint::OtherPointer}; + return importOverAlignedFunctionPointerLikeType(*type, Impl); } - + if (pointeeQualType->isFunctionType()) { - auto funcTy = pointeeType->castTo(); - return { - FunctionType::get(funcTy->getParams(), funcTy->getResult(), - funcTy->getExtInfo() - .withRepresentation( - AnyFunctionType::Representation::CFunctionPointer) - .withClangFunctionType(type)), - ImportHint::CFunctionPointer - }; + return importFunctionPointerLikeType(*type, pointeeType); } PointerTypeKind pointerKind; @@ -472,7 +484,29 @@ namespace { } ImportResult VisitReferenceType(const clang::ReferenceType *type) { - return Type(); + auto pointeeQualType = type->getPointeeType(); + auto quals = pointeeQualType.getQualifiers(); + Type pointeeType = + Impl.importTypeIgnoreIUO(pointeeQualType, ImportTypeKind::Value, + AllowNSUIntegerAsInt, Bridgeability::None); + + if (pointeeQualType->isFunctionType()) { + return importFunctionPointerLikeType(*type, pointeeType); + } + + if (Impl.isOverAligned(pointeeQualType)) { + return importOverAlignedFunctionPointerLikeType(*type, Impl); + } + + PointerTypeKind pointerKind; + if (quals.hasConst()) { + pointerKind = PTK_UnsafePointer; + } else { + pointerKind = PTK_UnsafeMutablePointer; + } + + return {pointeeType->wrapInPointer(pointerKind), + ImportHint::None}; } ImportResult VisitMemberPointer(const clang::MemberPointerType *type) { @@ -2416,7 +2450,7 @@ bool ClangImporter::Implementation::matchesHashableBound(Type type) { if (auto *generic = genericTy->getDecl()) { auto genericSig = generic->getDeclContext()->getGenericSignatureOfContext(); - if (genericSig && genericSig->getConformsTo(type).empty()) { + if (genericSig && genericSig->getRequiredProtocols(type).empty()) { type = genericSig->getSuperclassBound(type); if (!type) return false; diff --git a/lib/ClangImporter/ImporterImpl.h b/lib/ClangImporter/ImporterImpl.h index b974766d1a0e5..72de3a47bcde7 100644 --- a/lib/ClangImporter/ImporterImpl.h +++ b/lib/ClangImporter/ImporterImpl.h @@ -135,9 +135,10 @@ enum class ImportTypeKind { /// Import the type of a function parameter. /// - /// This provides special treatment for C++ references (which become - /// [inout] parameters) and C pointers (which become magic [inout]-able types), - /// among other things, and enables the conversion of bridged types. + /// Special handling: + /// * C and C++ pointers become `UnsafePointer?` or `UnsafeMutablePointer?` + /// * C++ references become `UnsafePointer` or `UnsafeMutablePointer` + /// * Bridging that requires type conversions is allowed. /// Parameters are always considered CF-audited. Parameter, @@ -254,7 +255,7 @@ struct PlatformAvailability { /// API is now unavailable. std::string deprecatedAsUnavailableMessage; - PlatformAvailability(LangOptions &opts); + PlatformAvailability(const LangOptions &opts); private: PlatformAvailability(const PlatformAvailability&) = delete; diff --git a/lib/Demangling/Demangler.cpp b/lib/Demangling/Demangler.cpp index 2eaf9e08d7a57..adde60c19973a 100644 --- a/lib/Demangling/Demangler.cpp +++ b/lib/Demangling/Demangler.cpp @@ -677,12 +677,14 @@ NodePointer Demangler::demangleTypeMangling() { return TypeMangling; } -NodePointer Demangler::demangleSymbolicReference(unsigned char rawKind, - const void *at) { +NodePointer Demangler::demangleSymbolicReference(unsigned char rawKind) { // The symbolic reference is a 4-byte machine integer encoded in the following // four bytes. + if (Pos + 4 > Text.size()) + return nullptr; + const void *at = Text.data() + Pos; int32_t value; - memcpy(&value, Text.data() + Pos, 4); + memcpy(&value, at, 4); Pos += 4; // Map the encoded kind to a specific kind and directness. @@ -734,7 +736,7 @@ NodePointer Demangler::demangleOperator() { goto recur; case 1: case 2: case 3: case 4: case 5: case 6: case 7: case 8: case 9: case 0xA: case 0xB: case 0xC: - return demangleSymbolicReference((unsigned char)c, Text.data() + Pos); + return demangleSymbolicReference((unsigned char)c); case 'A': return demangleMultiSubstitutions(); case 'B': return demangleBuiltinType(); case 'C': return demangleAnyGenericType(Node::Kind::Class); diff --git a/lib/Demangling/OldDemangler.cpp b/lib/Demangling/OldDemangler.cpp index ebe2bec4e5823..d9d9dd4a1c54c 100644 --- a/lib/Demangling/OldDemangler.cpp +++ b/lib/Demangling/OldDemangler.cpp @@ -29,9 +29,6 @@ using namespace swift; using namespace Demangle; -using llvm::Optional; -using llvm::None; - namespace { struct FindPtr { FindPtr(Node *v) : Target(v) {} @@ -251,12 +248,12 @@ class OldDemangler { Parent->addChild(Child, Factory); } - Optional demangleDirectness() { + llvm::Optional demangleDirectness() { if (Mangled.nextIf('d')) return Directness::Direct; if (Mangled.nextIf('i')) return Directness::Indirect; - return None; + return llvm::None; } bool demangleNatural(Node::IndexType &num) { @@ -288,13 +285,13 @@ class OldDemangler { return false; } - Optional demangleValueWitnessKind() { + llvm::Optional demangleValueWitnessKind() { char Code[2]; if (!Mangled) - return None; + return llvm::None; Code[0] = Mangled.next(); if (!Mangled) - return None; + return llvm::None; Code[1] = Mangled.next(); StringRef CodeStr(Code, 2); @@ -302,7 +299,7 @@ class OldDemangler { if (CodeStr == #MANGLING) return ValueWitnessKind::NAME; #include "swift/Demangling/ValueWitnessMangling.def" - return None; + return llvm::None; } NodePointer demangleGlobal() { @@ -374,7 +371,7 @@ class OldDemangler { // Value witnesses. if (Mangled.nextIf('w')) { - Optional w = demangleValueWitnessKind(); + llvm::Optional w = demangleValueWitnessKind(); if (!w.hasValue()) return nullptr; auto witness = @@ -754,7 +751,7 @@ class OldDemangler { return demangleIdentifier(); } - NodePointer demangleIdentifier(Optional kind = None) { + NodePointer demangleIdentifier(llvm::Optional kind = llvm::None) { if (!Mangled) return nullptr; diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp index cd99815238afb..3ea768057ac23 100644 --- a/lib/Frontend/CompilerInvocation.cpp +++ b/lib/Frontend/CompilerInvocation.cpp @@ -629,6 +629,11 @@ static bool ParseLangArgs(LangOptions &Opts, ArgList &Args, } } + if (FrontendOpts.RequestedAction == FrontendOptions::ActionType::EmitSyntax) { + Opts.BuildSyntaxTree = true; + Opts.VerifySyntaxTree = true; + } + return HadError || UnsupportedOS || UnsupportedArch; } @@ -677,6 +682,17 @@ static bool ParseTypeCheckerArgs(TypeCheckerOptions &Opts, ArgList &Args, // body skipping. Opts.SkipNonInlinableFunctionBodies |= Args.hasArg(OPT_tbd_is_installapi); + if (Opts.SkipNonInlinableFunctionBodies && + FrontendOpts.ModuleName == SWIFT_ONONE_SUPPORT) { + // Disable this optimization if we're compiling SwiftOnoneSupport, because + // we _definitely_ need to look inside every declaration to figure out + // what gets prespecialized. + Opts.SkipNonInlinableFunctionBodies = false; + Diags.diagnose(SourceLoc(), + diag::module_incompatible_with_skip_function_bodies, + SWIFT_ONONE_SUPPORT); + } + Opts.DisableConstraintSolverPerformanceHacks |= Args.hasArg(OPT_disable_constraint_solver_performance_hacks); @@ -903,12 +919,14 @@ void parseExclusivityEnforcementOptions(const llvm::opt::Arg *A, static bool ParseSILArgs(SILOptions &Opts, ArgList &Args, IRGenOptions &IRGenOpts, - FrontendOptions &FEOpts, + const FrontendOptions &FEOpts, + const TypeCheckerOptions &TCOpts, DiagnosticEngine &Diags, const llvm::Triple &Triple, ClangImporterOptions &ClangOpts) { using namespace options; + if (const Arg *A = Args.getLastArg(OPT_sil_inline_threshold)) { if (StringRef(A->getValue()).getAsInteger(10, Opts.InlineThreshold)) { Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, @@ -954,8 +972,9 @@ static bool ParseSILArgs(SILOptions &Opts, ArgList &Args, if (Args.hasArg(OPT_sil_merge_partial_modules)) Opts.MergePartialModules = true; - if (Args.hasArg(OPT_experimental_skip_non_inlinable_function_bodies)) - Opts.SkipNonInlinableFunctionBodies = true; + // Propagate the typechecker's understanding of + // -experimental-skip-non-inlinable-function-bodies to SIL. + Opts.SkipNonInlinableFunctionBodies = TCOpts.SkipNonInlinableFunctionBodies; // Parse the optimization level. // Default to Onone settings if no option is passed. @@ -1627,7 +1646,8 @@ bool CompilerInvocation::parseArgs( return true; } - if (ParseSILArgs(SILOpts, ParsedArgs, IRGenOpts, FrontendOpts, Diags, + if (ParseSILArgs(SILOpts, ParsedArgs, IRGenOpts, FrontendOpts, + TypeCheckerOpts, Diags, LangOpts.Target, ClangImporterOpts)) { return true; } diff --git a/lib/Frontend/Frontend.cpp b/lib/Frontend/Frontend.cpp index 36d5d99d0f19a..67393f42ecbd1 100644 --- a/lib/Frontend/Frontend.cpp +++ b/lib/Frontend/Frontend.cpp @@ -311,15 +311,6 @@ bool CompilerInstance::setup(const CompilerInvocation &Invok) { Invocation.getLangOptions().AttachCommentsToDecls = true; } - // Set up the type checker options. - auto &typeCkOpts = Invocation.getTypeCheckerOptions(); - if (isWholeModuleCompilation()) { - typeCkOpts.DelayWholeModuleChecking = true; - } - if (FrontendOptions::isActionImmediate(frontendOpts.RequestedAction)) { - typeCkOpts.InImmediateMode = true; - } - assert(Lexer::isIdentifier(Invocation.getModuleName())); if (isInSILMode()) @@ -717,7 +708,8 @@ ImplicitImportInfo CompilerInstance::getImplicitImportInfo() const { ModuleDecl *CompilerInstance::getMainModule() const { if (!MainModule) { Identifier ID = Context->getIdentifier(Invocation.getModuleName()); - MainModule = ModuleDecl::create(ID, *Context, getImplicitImportInfo()); + MainModule = ModuleDecl::createMainModule(*Context, ID, + getImplicitImportInfo()); if (Invocation.getFrontendOptions().EnableTesting) MainModule->setTestingEnabled(); if (Invocation.getFrontendOptions().EnablePrivateImports) @@ -731,6 +723,24 @@ ModuleDecl *CompilerInstance::getMainModule() const { return MainModule; } +void CompilerInstance::performParseOnly(bool EvaluateConditionals, + bool CanDelayBodies) { + const InputFileKind Kind = Invocation.getInputKind(); + assert((Kind == InputFileKind::Swift || Kind == InputFileKind::SwiftLibrary || + Kind == InputFileKind::SwiftModuleInterface) && + "only supports parsing .swift files"); + (void)Kind; + + SourceFile::ParsingOptions parsingOpts; + if (!EvaluateConditionals) + parsingOpts |= SourceFile::ParsingFlags::DisablePoundIfEvaluation; + if (!CanDelayBodies) + parsingOpts |= SourceFile::ParsingFlags::DisableDelayedBodies; + performSemaUpTo(SourceFile::Unprocessed, parsingOpts); + assert(Context->LoadedModules.size() == 1 && + "Loaded a module during parse-only"); +} + void CompilerInstance::performParseAndResolveImportsOnly() { performSemaUpTo(SourceFile::ImportsResolved); } @@ -739,33 +749,55 @@ void CompilerInstance::performSema() { performSemaUpTo(SourceFile::TypeChecked); } -void CompilerInstance::performSemaUpTo(SourceFile::ASTStage_t LimitStage) { - assert(LimitStage > SourceFile::Unprocessed); - +void CompilerInstance::performSemaUpTo(SourceFile::ASTStage_t LimitStage, + SourceFile::ParsingOptions POpts) { FrontendStatsTracer tracer(getStatsReporter(), "perform-sema"); ModuleDecl *mainModule = getMainModule(); Context->LoadedModules[mainModule->getName()] = mainModule; - if (Invocation.getImplicitStdlibKind() == ImplicitStdlibKind::Stdlib) { - if (!loadStdlib()) - return; + // If we aren't in a parse-only context, load the standard library. + if (LimitStage > SourceFile::Unprocessed && + Invocation.getImplicitStdlibKind() == ImplicitStdlibKind::Stdlib + && !loadStdlib()) { + return; } - // Force loading implicit imports. This is currently needed to allow - // deserialization to resolve cross references into bridging headers. - // FIXME: Once deserialization loads all the modules it needs for cross - // references, this can be removed. - (void)MainModule->getImplicitImports(); - // Make sure the main file is the first file in the module, so do this now. if (MainBufferID != NO_SUCH_BUFFER) { - (void)createSourceFileForMainModule(Invocation.getSourceFileKind(), - MainBufferID); + auto *mainFile = createSourceFileForMainModule( + Invocation.getSourceFileKind(), MainBufferID, POpts); + mainFile->SyntaxParsingCache = Invocation.getMainFileSyntaxParsingCache(); } - bool hadLoadError = parsePartialModulesAndInputFiles(); - if (hadLoadError) + // If we aren't in a parse-only context, load the remaining serialized inputs + // and resolve implicit imports. + if (LimitStage > SourceFile::Unprocessed && + loadPartialModulesAndImplicitImports()) + return; + + // Then parse all the input files. + // FIXME: This is the only demand point for InputSourceCodeBufferIDs. We + // should compute this list of source files lazily. + for (auto BufferID : InputSourceCodeBufferIDs) { + SourceFile *SF; + if (BufferID == MainBufferID) { + // If this is the main file, we've already created it. + SF = &getMainModule()->getMainSourceFile(Invocation.getSourceFileKind()); + } else { + // Otherwise create a library file. + SF = createSourceFileForMainModule(SourceFileKind::Library, + BufferID, POpts); + } + // Trigger parsing of the file. + if (LimitStage == SourceFile::Unprocessed) { + (void)SF->getTopLevelDecls(); + } else { + performImportResolution(*SF); + } + } + + if (LimitStage == SourceFile::Unprocessed) return; assert(llvm::all_of(MainModule->getFiles(), [](const FileUnit *File) -> bool { @@ -808,9 +840,15 @@ bool CompilerInstance::loadStdlib() { return true; } -bool CompilerInstance::parsePartialModulesAndInputFiles() { +bool CompilerInstance::loadPartialModulesAndImplicitImports() { FrontendStatsTracer tracer(getStatsReporter(), - "parse-partial-modules-and-input-files"); + "load-partial-modules-and-implicit-imports"); + // Force loading implicit imports. This is currently needed to allow + // deserialization to resolve cross references into bridging headers. + // FIXME: Once deserialization loads all the modules it needs for cross + // references, this can be removed. + (void)MainModule->getImplicitImports(); + bool hadLoadError = false; // Parse all the partial modules first. for (auto &PM : PartialModules) { @@ -821,36 +859,19 @@ bool CompilerInstance::parsePartialModulesAndInputFiles() { /*treatAsPartialModule*/true)) hadLoadError = true; } - - // Then parse all the input files. - for (auto BufferID : InputSourceCodeBufferIDs) { - SourceFile *SF; - if (BufferID == MainBufferID) { - // If this is the main file, we've already created it. - SF = &getMainModule()->getMainSourceFile(Invocation.getSourceFileKind()); - } else { - // Otherwise create a library file. - SF = createSourceFileForMainModule(SourceFileKind::Library, BufferID); - } - // Import resolution will lazily trigger parsing of the file. - performImportResolution(*SF); - } return hadLoadError; } -static void -forEachSourceFileIn(ModuleDecl *module, - llvm::function_ref fn) { - for (auto fileName : module->getFiles()) { - if (auto SF = dyn_cast(fileName)) - fn(*SF); - } -} - void CompilerInstance::forEachFileToTypeCheck( llvm::function_ref fn) { if (isWholeModuleCompilation()) { - forEachSourceFileIn(MainModule, [&](SourceFile &SF) { fn(SF); }); + for (auto fileName : MainModule->getFiles()) { + auto *SF = dyn_cast(fileName); + if (!SF) { + continue; + } + fn(*SF); + } } else { for (auto *SF : PrimarySourceFiles) { fn(*SF); @@ -859,13 +880,9 @@ void CompilerInstance::forEachFileToTypeCheck( } void CompilerInstance::finishTypeChecking() { - if (getASTContext().TypeCheckerOpts.DelayWholeModuleChecking) { - forEachSourceFileIn(MainModule, [&](SourceFile &SF) { - performWholeModuleTypeChecking(SF); - }); - } - - checkInconsistentImplementationOnlyImports(MainModule); + forEachFileToTypeCheck([](SourceFile &SF) { + performWholeModuleTypeChecking(SF); + }); } SourceFile *CompilerInstance::createSourceFileForMainModule( @@ -903,52 +920,6 @@ SourceFile *CompilerInstance::createSourceFileForMainModule( return inputFile; } -void CompilerInstance::performParseOnly(bool EvaluateConditionals, - bool CanDelayBodies) { - const InputFileKind Kind = Invocation.getInputKind(); - ModuleDecl *const MainModule = getMainModule(); - Context->LoadedModules[MainModule->getName()] = MainModule; - - assert((Kind == InputFileKind::Swift || - Kind == InputFileKind::SwiftLibrary || - Kind == InputFileKind::SwiftModuleInterface) && - "only supports parsing .swift files"); - (void)Kind; - - SourceFile::ParsingOptions parsingOpts; - if (!EvaluateConditionals) - parsingOpts |= SourceFile::ParsingFlags::DisablePoundIfEvaluation; - if (!CanDelayBodies) - parsingOpts |= SourceFile::ParsingFlags::DisableDelayedBodies; - - // Make sure the main file is the first file in the module. - if (MainBufferID != NO_SUCH_BUFFER) { - assert(Kind == InputFileKind::Swift || - Kind == InputFileKind::SwiftModuleInterface); - auto *mainFile = createSourceFileForMainModule( - Invocation.getSourceFileKind(), MainBufferID, parsingOpts); - mainFile->SyntaxParsingCache = Invocation.getMainFileSyntaxParsingCache(); - } - - // Parse all of the input files. - for (auto bufferID : InputSourceCodeBufferIDs) { - SourceFile *SF; - if (bufferID == MainBufferID) { - // If this is the main file, we've already created it. - SF = &MainModule->getMainSourceFile(Invocation.getSourceFileKind()); - } else { - // Otherwise create a library file. - SF = createSourceFileForMainModule(SourceFileKind::Library, bufferID, - parsingOpts); - } - // Force the parsing of the top level decls. - (void)SF->getTopLevelDecls(); - } - - assert(Context->LoadedModules.size() == 1 && - "Loaded a module during parse-only"); -} - void CompilerInstance::freeASTContext() { TheSILTypes.reset(); Context.reset(); diff --git a/lib/Frontend/ModuleInterfaceBuilder.cpp b/lib/Frontend/ModuleInterfaceBuilder.cpp index e2438bd171ad5..93628ccbf1755 100644 --- a/lib/Frontend/ModuleInterfaceBuilder.cpp +++ b/lib/Frontend/ModuleInterfaceBuilder.cpp @@ -69,137 +69,6 @@ static Optional getRelativeDepPath(StringRef DepPath, return None; } -void ModuleInterfaceBuilder::configureSubInvocationInputsAndOutputs( - StringRef OutPath) { - auto &SubFEOpts = subInvocation.getFrontendOptions(); - SubFEOpts.RequestedAction = FrontendOptions::ActionType::EmitModuleOnly; - SubFEOpts.InputsAndOutputs.addPrimaryInputFile(interfacePath); - SupplementaryOutputPaths SOPs; - SOPs.ModuleOutputPath = OutPath.str(); - - // Pick a primary output path that will cause problems to use. - std::string MainOut = "/"; - SubFEOpts.InputsAndOutputs - .setMainAndSupplementaryOutputs({MainOut}, {SOPs}); -} - -void swift::inheritOptionsForBuildingInterface( - CompilerInvocation &Invok, - const SearchPathOptions &SearchPathOpts, - const LangOptions &LangOpts) { - // Start with a SubInvocation that copies various state from our - // invoking ASTContext. - Invok.setImportSearchPaths(SearchPathOpts.ImportSearchPaths); - Invok.setFrameworkSearchPaths(SearchPathOpts.FrameworkSearchPaths); - Invok.setSDKPath(SearchPathOpts.SDKPath); - Invok.setInputKind(InputFileKind::SwiftModuleInterface); - Invok.setRuntimeResourcePath(SearchPathOpts.RuntimeResourcePath); - Invok.setTargetTriple(LangOpts.Target); - - // Inhibit warnings from the SubInvocation since we are assuming the user - // is not in a position to fix them. - Invok.getDiagnosticOptions().SuppressWarnings = true; - - // Inherit this setting down so that it can affect error diagnostics (mostly - // by making them non-fatal). - Invok.getLangOptions().DebuggerSupport = LangOpts.DebuggerSupport; - - // Disable this; deinitializers always get printed with `@objc` even in - // modules that don't import Foundation. - Invok.getLangOptions().EnableObjCAttrRequiresFoundation = false; -} - -void ModuleInterfaceBuilder::configureSubInvocation( - const SearchPathOptions &SearchPathOpts, - const LangOptions &LangOpts, - ClangModuleLoader *ClangLoader) { - inheritOptionsForBuildingInterface(subInvocation, SearchPathOpts, LangOpts); - subInvocation.setModuleName(moduleName); - subInvocation.setClangModuleCachePath(moduleCachePath); - subInvocation.getFrontendOptions().PrebuiltModuleCachePath = - prebuiltCachePath.str(); - subInvocation.getFrontendOptions().TrackSystemDeps = trackSystemDependencies; - - // Respect the detailed-record preprocessor setting of the parent context. - // This, and the "raw" clang module format it implicitly enables, are - // required by sourcekitd. - if (ClangLoader) { - auto &Opts = ClangLoader->getClangInstance().getPreprocessorOpts(); - if (Opts.DetailedRecord) { - subInvocation.getClangImporterOptions().DetailedPreprocessingRecord = true; - } - } - - // Tell the subinvocation to serialize dependency hashes if asked to do so. - auto &frontendOpts = subInvocation.getFrontendOptions(); - frontendOpts.SerializeModuleInterfaceDependencyHashes = - serializeDependencyHashes; - - // Tell the subinvocation to remark on rebuilds from an interface if asked - // to do so. - frontendOpts.RemarkOnRebuildFromModuleInterface = - remarkOnRebuildFromInterface; -} - -bool swift::extractSwiftInterfaceVersionAndArgs( - SourceManager &SM, - DiagnosticEngine &Diags, - StringRef InterfacePath, - version::Version &Vers, - StringRef &CompilerVersion, - llvm::StringSaver &SubArgSaver, - SmallVectorImpl &SubArgs, - SourceLoc diagnosticLoc) { - llvm::vfs::FileSystem &fs = *SM.getFileSystem(); - auto FileOrError = swift::vfs::getFileOrSTDIN(fs, InterfacePath); - if (!FileOrError) { - // Don't use this->diagnose() because it'll just try to re-open - // interfacePath. - Diags.diagnose(diagnosticLoc, diag::error_open_input_file, - InterfacePath, FileOrError.getError().message()); - return true; - } - auto SB = FileOrError.get()->getBuffer(); - auto VersRe = getSwiftInterfaceFormatVersionRegex(); - auto CompRe = getSwiftInterfaceCompilerVersionRegex(); - auto FlagRe = getSwiftInterfaceModuleFlagsRegex(); - SmallVector VersMatches, FlagMatches, CompMatches; - - if (!VersRe.match(SB, &VersMatches)) { - ModuleInterfaceBuilder::diagnose(Diags, SM, InterfacePath, diagnosticLoc, - diag::error_extracting_version_from_module_interface); - return true; - } - if (!FlagRe.match(SB, &FlagMatches)) { - ModuleInterfaceBuilder::diagnose(Diags, SM, InterfacePath, diagnosticLoc, - diag::error_extracting_version_from_module_interface); - return true; - } - assert(VersMatches.size() == 2); - assert(FlagMatches.size() == 2); - // FIXME We should diagnose this at a location that makes sense: - Vers = swift::version::Version(VersMatches[1], SourceLoc(), &Diags); - llvm::cl::TokenizeGNUCommandLine(FlagMatches[1], SubArgSaver, SubArgs); - - if (CompRe.match(SB, &CompMatches)) { - assert(CompMatches.size() == 2); - CompilerVersion = SubArgSaver.save(CompMatches[1]); - } - else { - // Don't diagnose; handwritten module interfaces don't include this field. - CompilerVersion = "(unspecified, file possibly handwritten)"; - } - - return false; -} - -bool ModuleInterfaceBuilder::extractSwiftInterfaceVersionAndArgs( - swift::version::Version &Vers, StringRef &CompilerVersion, - llvm::StringSaver &SubArgSaver, SmallVectorImpl &SubArgs) { - return swift::extractSwiftInterfaceVersionAndArgs(sourceMgr, diags, - interfacePath, Vers, CompilerVersion, SubArgSaver, SubArgs, diagnosticLoc); -} - bool ModuleInterfaceBuilder::collectDepsForSerialization( CompilerInstance &SubInstance, SmallVectorImpl &Deps, bool IsHashBased) { @@ -294,15 +163,13 @@ bool ModuleInterfaceBuilder::buildSwiftModuleInternal( llvm::RestorePrettyStackState(savedInnerPrettyStackState); }; - llvm::vfs::FileSystem &fs = *sourceMgr.getFileSystem(); - - // Note that we don't assume cachePath is the same as the Clang - // module cache path at this point. - if (!moduleCachePath.empty()) - (void)llvm::sys::fs::create_directories(moduleCachePath); - - configureSubInvocationInputsAndOutputs(OutPath); - + SubError = subASTDelegate.runInSubCompilerInstance(moduleName, + interfacePath, + OutPath, + diagnosticLoc, + [&](SubCompilerInstanceInfo &info) { + auto &SubInstance = *info.Instance; + auto subInvocation = SubInstance.getInvocation(); FrontendOptions &FEOpts = subInvocation.getFrontendOptions(); const auto &InputInfo = FEOpts.InputsAndOutputs.firstInput(); StringRef InPath = InputInfo.file(); @@ -310,54 +177,11 @@ bool ModuleInterfaceBuilder::buildSwiftModuleInternal( InputInfo.getPrimarySpecificPaths().SupplementaryOutputs; StringRef OutPath = OutputInfo.ModuleOutputPath; - llvm::BumpPtrAllocator SubArgsAlloc; - llvm::StringSaver SubArgSaver(SubArgsAlloc); - SmallVector SubArgs; - swift::version::Version Vers; - StringRef emittedByCompiler; - if (extractSwiftInterfaceVersionAndArgs(Vers, emittedByCompiler, - SubArgSaver, SubArgs)) { - SubError = true; - return; - } - - // For now: we support anything with the same "major version" and assume - // minor versions might be interesting for debugging, or special-casing a - // compatible field variant. - if (Vers.asMajorVersion() != InterfaceFormatVersion.asMajorVersion()) { - diagnose(diag::unsupported_version_of_module_interface, interfacePath, - Vers); - SubError = true; - return; - } - - SmallString<32> ExpectedModuleName = subInvocation.getModuleName(); - if (subInvocation.parseArgs(SubArgs, diags)) { - SubError = true; - return; - } - - if (subInvocation.getModuleName() != ExpectedModuleName) { - auto DiagKind = diag::serialization_name_mismatch; - if (subInvocation.getLangOptions().DebuggerSupport) - DiagKind = diag::serialization_name_mismatch_repl; - diagnose(DiagKind, subInvocation.getModuleName(), ExpectedModuleName); - SubError = true; - return; - } - // Build the .swiftmodule; this is a _very_ abridged version of the logic // in performCompile in libFrontendTool, specialized, to just the one // module-serialization task we're trying to do here. LLVM_DEBUG(llvm::dbgs() << "Setting up instance to compile " << InPath << " to " << OutPath << "\n"); - CompilerInstance SubInstance; - SubInstance.getSourceMgr().setFileSystem(&fs); - - ForwardingDiagnosticConsumer FDC(diags); - SubInstance.addDiagnosticConsumer(&FDC); - - SubInstance.createDependencyTracker(FEOpts.TrackSystemDeps); SWIFT_DEFER { // Make sure to emit a generic top-level error if a module fails to @@ -368,23 +192,18 @@ bool ModuleInterfaceBuilder::buildSwiftModuleInternal( auto builtByCompiler = getSwiftInterfaceCompilerVersionForCurrentCompiler( SubInstance.getASTContext()); + StringRef emittedByCompiler = info.CompilerVersion; diagnose(diag::module_interface_build_failed, moduleName, emittedByCompiler == builtByCompiler, emittedByCompiler, builtByCompiler); } }; - if (SubInstance.setup(subInvocation)) { - SubError = true; - return; - } - LLVM_DEBUG(llvm::dbgs() << "Performing sema\n"); SubInstance.performSema(); if (SubInstance.getASTContext().hadError()) { LLVM_DEBUG(llvm::dbgs() << "encountered errors\n"); - SubError = true; - return; + return true; } SILOptions &SILOpts = subInvocation.getSILOptions(); @@ -393,8 +212,7 @@ bool ModuleInterfaceBuilder::buildSwiftModuleInternal( auto SILMod = performSILGeneration(Mod, TC, SILOpts); if (!SILMod) { LLVM_DEBUG(llvm::dbgs() << "SILGen did not produce a module\n"); - SubError = true; - return; + return true; } // Setup the callbacks for serialization, which can occur during the @@ -414,8 +232,7 @@ bool ModuleInterfaceBuilder::buildSwiftModuleInternal( SmallVector Deps; bool serializeHashes = FEOpts.SerializeModuleInterfaceDependencyHashes; if (collectDepsForSerialization(SubInstance, Deps, serializeHashes)) { - SubError = true; - return; + return true; } if (ShouldSerializeDeps) SerializationOpts.Dependencies = Deps; @@ -431,11 +248,10 @@ bool ModuleInterfaceBuilder::buildSwiftModuleInternal( LLVM_DEBUG(llvm::dbgs() << "Running SIL processing passes\n"); if (SubInstance.performSILProcessing(SILMod.get())) { LLVM_DEBUG(llvm::dbgs() << "encountered errors\n"); - SubError = true; - return; + return true; } - - SubError = SubInstance.getDiags().hadAnyError(); + return SubInstance.getDiags().hadAnyError(); + }); }); return !RunSuccess || SubError; } diff --git a/lib/Frontend/ModuleInterfaceBuilder.h b/lib/Frontend/ModuleInterfaceBuilder.h index 9b250e93658f3..e07fa18f08b1b 100644 --- a/lib/Frontend/ModuleInterfaceBuilder.h +++ b/lib/Frontend/ModuleInterfaceBuilder.h @@ -16,6 +16,7 @@ #include "swift/Basic/LLVM.h" #include "swift/Basic/SourceLoc.h" #include "swift/Frontend/Frontend.h" +#include "swift/AST/ModuleLoader.h" #include "swift/Serialization/SerializationOptions.h" #include "llvm/Support/StringSaver.h" @@ -35,17 +36,14 @@ class DependencyTracker; class ModuleInterfaceBuilder { SourceManager &sourceMgr; DiagnosticEngine &diags; + InterfaceSubContextDelegate &subASTDelegate; const StringRef interfacePath; const StringRef moduleName; const StringRef moduleCachePath; const StringRef prebuiltCachePath; - const bool serializeDependencyHashes; - const bool trackSystemDependencies; - const bool remarkOnRebuildFromInterface; const bool disableInterfaceFileLock; const SourceLoc diagnosticLoc; DependencyTracker *const dependencyTracker; - CompilerInvocation subInvocation; SmallVector extraDependencies; public: @@ -75,12 +73,6 @@ class ModuleInterfaceBuilder { ID, std::move(Args)...); } - void configureSubInvocationInputsAndOutputs(StringRef OutPath); - - void configureSubInvocation(const SearchPathOptions &SearchPathOpts, - const LangOptions &LangOpts, - ClangModuleLoader *ClangLoader); - /// Populate the provided \p Deps with \c FileDependency entries for all /// dependencies \p SubInstance's DependencyTracker recorded while compiling /// the module, excepting .swiftmodules in \p moduleCachePath or @@ -92,41 +84,24 @@ class ModuleInterfaceBuilder { SmallVectorImpl &Deps, bool IsHashBased); - bool extractSwiftInterfaceVersionAndArgs( - version::Version &Vers, StringRef &CompilerVersion, - llvm::StringSaver &SubArgSaver, SmallVectorImpl &SubArgs); - bool buildSwiftModuleInternal(StringRef OutPath, bool ShouldSerializeDeps, std::unique_ptr *ModuleBuffer); public: ModuleInterfaceBuilder(SourceManager &sourceMgr, DiagnosticEngine &diags, - const SearchPathOptions &searchPathOpts, - const LangOptions &langOpts, - ClangModuleLoader *clangImporter, + InterfaceSubContextDelegate &subASTDelegate, StringRef interfacePath, StringRef moduleName, StringRef moduleCachePath, StringRef prebuiltCachePath, - bool serializeDependencyHashes = false, - bool trackSystemDependencies = false, - bool remarkOnRebuildFromInterface = false, bool disableInterfaceFileLock = false, SourceLoc diagnosticLoc = SourceLoc(), DependencyTracker *tracker = nullptr) : sourceMgr(sourceMgr), diags(diags), + subASTDelegate(subASTDelegate), interfacePath(interfacePath), moduleName(moduleName), moduleCachePath(moduleCachePath), prebuiltCachePath(prebuiltCachePath), - serializeDependencyHashes(serializeDependencyHashes), - trackSystemDependencies(trackSystemDependencies), - remarkOnRebuildFromInterface(remarkOnRebuildFromInterface), disableInterfaceFileLock(disableInterfaceFileLock), - diagnosticLoc(diagnosticLoc), dependencyTracker(tracker) { - configureSubInvocation(searchPathOpts, langOpts, clangImporter); - } - - const CompilerInvocation &getSubInvocation() const { - return subInvocation; - } + diagnosticLoc(diagnosticLoc), dependencyTracker(tracker) {} /// Ensures the requested file name is added as a dependency of the resulting /// module. diff --git a/lib/Frontend/ModuleInterfaceLoader.cpp b/lib/Frontend/ModuleInterfaceLoader.cpp index 67c3aa2f2469a..6419b805edb90 100644 --- a/lib/Frontend/ModuleInterfaceLoader.cpp +++ b/lib/Frontend/ModuleInterfaceLoader.cpp @@ -15,6 +15,7 @@ #include "swift/AST/ASTContext.h" #include "swift/AST/DiagnosticsFrontend.h" #include "swift/AST/FileSystem.h" +#include "swift/AST/DiagnosticsSema.h" #include "swift/AST/Module.h" #include "swift/Basic/Platform.h" #include "swift/Frontend/Frontend.h" @@ -28,6 +29,7 @@ #include "clang/Frontend/CompilerInstance.h" #include "llvm/ADT/Hashing.h" #include "llvm/ADT/APInt.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/Support/xxhash.h" #include "llvm/Support/Debug.h" #include "llvm/Support/CommandLine.h" @@ -371,64 +373,6 @@ class ModuleInterfaceLoaderImpl { remarkOnRebuildFromInterface(remarkOnRebuildFromInterface), disableInterfaceLock(disableInterfaceLock) {} - /// Construct a cache key for the .swiftmodule being generated. There is a - /// balance to be struck here between things that go in the cache key and - /// things that go in the "up to date" check of the cache entry. We want to - /// avoid fighting over a single cache entry too much when (say) running - /// different compiler versions on the same machine or different inputs - /// that happen to have the same short module name, so we will disambiguate - /// those in the key. But we want to invalidate and rebuild a cache entry - /// -- rather than making a new one and potentially filling up the cache - /// with dead entries -- when other factors change, such as the contents of - /// the .swiftinterface input or its dependencies. - std::string getCacheHash(const CompilerInvocation &SubInvocation) { - auto normalizedTargetTriple = - getTargetSpecificModuleTriple(SubInvocation.getLangOptions().Target); - - llvm::hash_code H = hash_combine( - // Start with the compiler version (which will be either tag names or - // revs). Explicitly don't pass in the "effective" language version -- - // this would mean modules built in different -swift-version modes would - // rebuild their dependencies. - swift::version::getSwiftFullVersion(), - - // Simplest representation of input "identity" (not content) is just a - // pathname, and probably all we can get from the VFS in this regard - // anyways. - interfacePath, - - // Include the normalized target triple. In practice, .swiftinterface - // files will be in target-specific subdirectories and would have - // target-specific pieces #if'd out. However, it doesn't hurt to include - // it, and it guards against mistakenly reusing cached modules across - // targets. Note that this normalization explicitly doesn't include the - // minimum deployment target (e.g. the '12.0' in 'ios12.0'). - normalizedTargetTriple.str(), - - // The SDK path is going to affect how this module is imported, so - // include it. - SubInvocation.getSDKPath(), - - // Whether or not we're tracking system dependencies affects the - // invalidation behavior of this cache item. - SubInvocation.getFrontendOptions().TrackSystemDeps); - - return llvm::APInt(64, H).toString(36, /*Signed=*/false); - } - - /// Calculate an output filename in \p SubInvocation's cache path that - /// includes a hash of relevant key data. - void computeCachedOutputPath(const CompilerInvocation &SubInvocation, - llvm::SmallString<256> &OutPath) { - OutPath = SubInvocation.getClangModuleCachePath(); - llvm::sys::path::append(OutPath, SubInvocation.getModuleName()); - OutPath.append("-"); - OutPath.append(getCacheHash(SubInvocation)); - OutPath.append("."); - auto OutExt = file_types::getExtension(file_types::TY_SwiftModuleFile); - OutPath.append(OutExt); - } - /// Constructs the full path of the dependency \p dep by prepending the SDK /// path if necessary. StringRef getFullDependencyPath(const FileDependency &dep, @@ -898,21 +842,29 @@ class ModuleInterfaceLoaderImpl { auto ClangDependencyTracker = dependencyTracker->getClangCollector(); trackSystemDependencies = ClangDependencyTracker->needSystemDependencies(); } - + InterfaceSubContextDelegateImpl astDelegate(ctx.SourceMgr, ctx.Diags, + ctx.SearchPathOpts, ctx.LangOpts, + ctx.getClangModuleLoader(), + /*buildModuleCacheDirIfAbsent*/true, + cacheDir, + prebuiltCacheDir, + /*serializeDependencyHashes*/false, + trackSystemDependencies, + remarkOnRebuildFromInterface, + disableInterfaceLock); // Set up a builder if we need to build the module. It'll also set up // the subinvocation we'll need to use to compute the cache paths. ModuleInterfaceBuilder builder( - ctx.SourceMgr, ctx.Diags, ctx.SearchPathOpts, ctx.LangOpts, - ctx.getClangModuleLoader(), interfacePath, moduleName, cacheDir, - prebuiltCacheDir, /*serializeDependencyHashes*/false, - trackSystemDependencies, remarkOnRebuildFromInterface, + ctx.SourceMgr, ctx.Diags, astDelegate, interfacePath, moduleName, cacheDir, + prebuiltCacheDir, disableInterfaceLock, diagnosticLoc, dependencyTracker); - auto &subInvocation = builder.getSubInvocation(); // Compute the output path if we're loading or emitting a cached module. llvm::SmallString<256> cachedOutputPath; - computeCachedOutputPath(subInvocation, cachedOutputPath); + StringRef CacheHash; + astDelegate.computeCachedOutputPath(moduleName, interfacePath, + cachedOutputPath, CacheHash); // Try to find the right module for this interface, either alongside it, // in the cache, or in the prebuilt cache. @@ -1068,13 +1020,18 @@ bool ModuleInterfaceLoader::buildSwiftModuleFromSwiftInterface( StringRef ModuleName, StringRef InPath, StringRef OutPath, bool SerializeDependencyHashes, bool TrackSystemDependencies, bool RemarkOnRebuildFromInterface, bool DisableInterfaceFileLock) { - ModuleInterfaceBuilder builder(SourceMgr, Diags, SearchPathOpts, LangOpts, - /*clangImporter*/nullptr, InPath, - ModuleName, CacheDir, PrebuiltCacheDir, - SerializeDependencyHashes, - TrackSystemDependencies, - RemarkOnRebuildFromInterface, - DisableInterfaceFileLock); + InterfaceSubContextDelegateImpl astDelegate(SourceMgr, Diags, + SearchPathOpts, LangOpts, + /*clangImporter*/nullptr, + /*CreateCacheDirIfAbsent*/true, + CacheDir, PrebuiltCacheDir, + SerializeDependencyHashes, + TrackSystemDependencies, + RemarkOnRebuildFromInterface, + DisableInterfaceFileLock); + ModuleInterfaceBuilder builder(SourceMgr, Diags, astDelegate, InPath, + ModuleName, CacheDir, PrebuiltCacheDir, + DisableInterfaceFileLock); // FIXME: We really only want to serialize 'important' dependencies here, if // we want to ship the built swiftmodules to another machine. return builder.buildSwiftModule(OutPath, /*shouldSerializeDeps*/true, @@ -1087,3 +1044,347 @@ void ModuleInterfaceLoader::collectVisibleTopLevelModuleNames( names, file_types::getExtension(file_types::TY_SwiftModuleInterfaceFile)); } + +void InterfaceSubContextDelegateImpl::inheritOptionsForBuildingInterface( + const SearchPathOptions &SearchPathOpts, + const LangOptions &LangOpts) { + // Start with a SubInvocation that copies various state from our + // invoking ASTContext. + GenericArgs.push_back("-compile-module-from-interface"); + subInvocation.setTargetTriple(LangOpts.Target); + + auto triple = ArgSaver.save(subInvocation.getTargetTriple()); + if (!triple.empty()) { + GenericArgs.push_back("-target"); + GenericArgs.push_back(triple); + } + + subInvocation.setImportSearchPaths(SearchPathOpts.ImportSearchPaths); + llvm::for_each(SearchPathOpts.ImportSearchPaths, + [&](const std::string &path) { + GenericArgs.push_back("-I"); + GenericArgs.push_back(path); + }); + subInvocation.setFrameworkSearchPaths(SearchPathOpts.FrameworkSearchPaths); + llvm::for_each(SearchPathOpts.FrameworkSearchPaths, + [&](const SearchPathOptions::FrameworkSearchPath &path) { + GenericArgs.push_back(path.IsSystem? "-Fsystem": "-F"); + GenericArgs.push_back(path.Path); + }); + if (!SearchPathOpts.SDKPath.empty()) { + subInvocation.setSDKPath(SearchPathOpts.SDKPath); + GenericArgs.push_back("-sdk"); + GenericArgs.push_back(SearchPathOpts.SDKPath); + } + + subInvocation.setInputKind(InputFileKind::SwiftModuleInterface); + if (!SearchPathOpts.RuntimeResourcePath.empty()) { + subInvocation.setRuntimeResourcePath(SearchPathOpts.RuntimeResourcePath); + GenericArgs.push_back("-resource-dir"); + GenericArgs.push_back(SearchPathOpts.RuntimeResourcePath); + } + + // Inhibit warnings from the SubInvocation since we are assuming the user + // is not in a position to fix them. + subInvocation.getDiagnosticOptions().SuppressWarnings = true; + GenericArgs.push_back("-suppress-warnings"); + + // Inherit this setting down so that it can affect error diagnostics (mostly + // by making them non-fatal). + subInvocation.getLangOptions().DebuggerSupport = LangOpts.DebuggerSupport; + if (LangOpts.DebuggerSupport) { + GenericArgs.push_back("-debugger-support"); + } + + // Disable this; deinitializers always get printed with `@objc` even in + // modules that don't import Foundation. + subInvocation.getLangOptions().EnableObjCAttrRequiresFoundation = false; + GenericArgs.push_back("-disable-objc-attr-requires-foundation-module"); +} + +bool InterfaceSubContextDelegateImpl::extractSwiftInterfaceVersionAndArgs( + SmallVectorImpl &SubArgs, + std::string &CompilerVersion, + StringRef interfacePath, + SourceLoc diagnosticLoc) { + llvm::vfs::FileSystem &fs = *SM.getFileSystem(); + auto FileOrError = swift::vfs::getFileOrSTDIN(fs, interfacePath); + if (!FileOrError) { + // Don't use this->diagnose() because it'll just try to re-open + // interfacePath. + Diags.diagnose(diagnosticLoc, diag::error_open_input_file, + interfacePath, FileOrError.getError().message()); + return true; + } + auto SB = FileOrError.get()->getBuffer(); + auto VersRe = getSwiftInterfaceFormatVersionRegex(); + auto CompRe = getSwiftInterfaceCompilerVersionRegex(); + auto FlagRe = getSwiftInterfaceModuleFlagsRegex(); + SmallVector VersMatches, FlagMatches, CompMatches; + + if (!VersRe.match(SB, &VersMatches)) { + diagnose(interfacePath, diagnosticLoc, + diag::error_extracting_version_from_module_interface); + return true; + } + if (!FlagRe.match(SB, &FlagMatches)) { + diagnose(interfacePath, diagnosticLoc, + diag::error_extracting_version_from_module_interface); + return true; + } + assert(VersMatches.size() == 2); + assert(FlagMatches.size() == 2); + // FIXME We should diagnose this at a location that makes sense: + auto Vers = swift::version::Version(VersMatches[1], SourceLoc(), &Diags); + llvm::cl::TokenizeGNUCommandLine(FlagMatches[1], ArgSaver, SubArgs); + + if (CompRe.match(SB, &CompMatches)) { + assert(CompMatches.size() == 2); + CompilerVersion = ArgSaver.save(CompMatches[1]); + } + else { + // Don't diagnose; handwritten module interfaces don't include this field. + CompilerVersion = "(unspecified, file possibly handwritten)"; + } + + // For now: we support anything with the same "major version" and assume + // minor versions might be interesting for debugging, or special-casing a + // compatible field variant. + if (Vers.asMajorVersion() != InterfaceFormatVersion.asMajorVersion()) { + diagnose(interfacePath, diagnosticLoc, + diag::unsupported_version_of_module_interface, interfacePath, Vers); + return true; + } + + SmallString<32> ExpectedModuleName = subInvocation.getModuleName(); + if (subInvocation.parseArgs(SubArgs, Diags)) { + return true; + } + + if (subInvocation.getModuleName() != ExpectedModuleName) { + auto DiagKind = diag::serialization_name_mismatch; + if (subInvocation.getLangOptions().DebuggerSupport) + DiagKind = diag::serialization_name_mismatch_repl; + diagnose(interfacePath, diagnosticLoc, + DiagKind, subInvocation.getModuleName(), ExpectedModuleName); + return true; + } + + return false; +} + +InterfaceSubContextDelegateImpl::InterfaceSubContextDelegateImpl( + SourceManager &SM, + DiagnosticEngine &Diags, + const SearchPathOptions &searchPathOpts, + const LangOptions &langOpts, + ClangModuleLoader *clangImporter, + bool buildModuleCacheDirIfAbsent, + StringRef moduleCachePath, + StringRef prebuiltCachePath, + bool serializeDependencyHashes, + bool trackSystemDependencies, + bool remarkOnRebuildFromInterface, + bool disableInterfaceFileLock): SM(SM), Diags(Diags), ArgSaver(Allocator) { + inheritOptionsForBuildingInterface(searchPathOpts, langOpts); + // Configure front-end input. + auto &SubFEOpts = subInvocation.getFrontendOptions(); + SubFEOpts.RequestedAction = FrontendOptions::ActionType::EmitModuleOnly; + if (!moduleCachePath.empty()) { + subInvocation.setClangModuleCachePath(moduleCachePath); + GenericArgs.push_back("-module-cache-path"); + GenericArgs.push_back(moduleCachePath); + } + if (!prebuiltCachePath.empty()) { + subInvocation.getFrontendOptions().PrebuiltModuleCachePath = + prebuiltCachePath.str(); + GenericArgs.push_back("-prebuilt-module-cache-path"); + GenericArgs.push_back(prebuiltCachePath); + } + subInvocation.getFrontendOptions().TrackSystemDeps = trackSystemDependencies; + if (trackSystemDependencies) { + GenericArgs.push_back("-track-system-dependencies"); + } + // Respect the detailed-record preprocessor setting of the parent context. + // This, and the "raw" clang module format it implicitly enables, are + // required by sourcekitd. + if (clangImporter) { + auto &Opts = clangImporter->getClangInstance().getPreprocessorOpts(); + if (Opts.DetailedRecord) { + subInvocation.getClangImporterOptions().DetailedPreprocessingRecord = true; + } + } + + // Tell the subinvocation to serialize dependency hashes if asked to do so. + auto &frontendOpts = subInvocation.getFrontendOptions(); + frontendOpts.SerializeModuleInterfaceDependencyHashes = + serializeDependencyHashes; + if (serializeDependencyHashes) { + GenericArgs.push_back("-serialize-module-interface-dependency-hashes"); + } + + // Tell the subinvocation to remark on rebuilds from an interface if asked + // to do so. + frontendOpts.RemarkOnRebuildFromModuleInterface = + remarkOnRebuildFromInterface; + if (remarkOnRebuildFromInterface) { + GenericArgs.push_back("-Rmodule-interface-rebuild"); + } + + // Note that we don't assume cachePath is the same as the Clang + // module cache path at this point. + if (buildModuleCacheDirIfAbsent && !moduleCachePath.empty()) + (void)llvm::sys::fs::create_directories(moduleCachePath); +} + +/// Calculate an output filename in \p SubInvocation's cache path that +/// includes a hash of relevant key data. +StringRef InterfaceSubContextDelegateImpl::computeCachedOutputPath( + StringRef moduleName, + StringRef useInterfacePath, + llvm::SmallString<256> &OutPath, + StringRef &CacheHash) { + OutPath = subInvocation.getClangModuleCachePath(); + llvm::sys::path::append(OutPath, moduleName); + OutPath.append("-"); + auto hashStart = OutPath.size(); + OutPath.append(getCacheHash(useInterfacePath)); + CacheHash = OutPath.str().substr(hashStart); + OutPath.append("."); + auto OutExt = file_types::getExtension(file_types::TY_SwiftModuleFile); + OutPath.append(OutExt); + return OutPath.str(); +} + +/// Construct a cache key for the .swiftmodule being generated. There is a +/// balance to be struck here between things that go in the cache key and +/// things that go in the "up to date" check of the cache entry. We want to +/// avoid fighting over a single cache entry too much when (say) running +/// different compiler versions on the same machine or different inputs +/// that happen to have the same short module name, so we will disambiguate +/// those in the key. But we want to invalidate and rebuild a cache entry +/// -- rather than making a new one and potentially filling up the cache +/// with dead entries -- when other factors change, such as the contents of +/// the .swiftinterface input or its dependencies. +std::string +InterfaceSubContextDelegateImpl::getCacheHash(StringRef useInterfacePath) { + auto normalizedTargetTriple = + getTargetSpecificModuleTriple(subInvocation.getLangOptions().Target); + + llvm::hash_code H = hash_combine( + // Start with the compiler version (which will be either tag names or + // revs). Explicitly don't pass in the "effective" language version -- + // this would mean modules built in different -swift-version modes would + // rebuild their dependencies. + swift::version::getSwiftFullVersion(), + + // Simplest representation of input "identity" (not content) is just a + // pathname, and probably all we can get from the VFS in this regard + // anyways. + useInterfacePath, + + // Include the normalized target triple. In practice, .swiftinterface + // files will be in target-specific subdirectories and would have + // target-specific pieces #if'd out. However, it doesn't hurt to include + // it, and it guards against mistakenly reusing cached modules across + // targets. Note that this normalization explicitly doesn't include the + // minimum deployment target (e.g. the '12.0' in 'ios12.0'). + normalizedTargetTriple.str(), + + // The SDK path is going to affect how this module is imported, so + // include it. + subInvocation.getSDKPath(), + + // Whether or not we're tracking system dependencies affects the + // invalidation behavior of this cache item. + subInvocation.getFrontendOptions().TrackSystemDeps); + + return llvm::APInt(64, H).toString(36, /*Signed=*/false); +} + +bool InterfaceSubContextDelegateImpl::runInSubContext(StringRef moduleName, + StringRef interfacePath, + StringRef outputPath, + SourceLoc diagLoc, + llvm::function_ref, StringRef)> action) { + return runInSubCompilerInstance(moduleName, interfacePath, outputPath, diagLoc, + [&](SubCompilerInstanceInfo &info){ + return action(info.Instance->getASTContext(), info.BuildArguments, + info.Hash); + }); +} + +bool InterfaceSubContextDelegateImpl::runInSubCompilerInstance(StringRef moduleName, + StringRef interfacePath, + StringRef outputPath, + SourceLoc diagLoc, + llvm::function_ref action) { + std::vector BuildArgs(GenericArgs.begin(), GenericArgs.end()); + assert(BuildArgs.size() == GenericArgs.size()); + // Configure inputs + subInvocation.getFrontendOptions().InputsAndOutputs + .addPrimaryInputFile(interfacePath); + BuildArgs.push_back(interfacePath); + subInvocation.setModuleName(moduleName); + BuildArgs.push_back("-module-name"); + BuildArgs.push_back(moduleName); + + // Calculate output path of the module. + llvm::SmallString<256> buffer; + StringRef CacheHash; + auto hashedOutput = computeCachedOutputPath(moduleName, interfacePath, buffer, + CacheHash); + // If no specific output path is given, use the hashed output path. + if (outputPath.empty()) { + outputPath = hashedOutput; + } + + // Configure the outputs in front-end options. There must be an equal number of + // primary inputs and outputs. + auto N = subInvocation.getFrontendOptions().InputsAndOutputs + .primaryInputCount(); + std::vector outputFiles(N, "/"); + ModuleOutputPaths.emplace_back(); + ModuleOutputPaths.back().ModuleOutputPath = outputPath.str(); + assert(N == ModuleOutputPaths.size()); + subInvocation.getFrontendOptions().InputsAndOutputs + .setMainAndSupplementaryOutputs(outputFiles, ModuleOutputPaths); + + // Add -o for building the module explicitly. + BuildArgs.push_back("-o"); + BuildArgs.push_back(outputPath); + + SmallVector SubArgs; + std::string CompilerVersion; + // Extract compiler arguments from the interface file and use them to configure + // the compiler invocation. + if (extractSwiftInterfaceVersionAndArgs(SubArgs, + CompilerVersion, + interfacePath, + diagLoc)) { + return true; + } + // Insert arguments collected from the interface file. + BuildArgs.insert(BuildArgs.end(), SubArgs.begin(), SubArgs.end()); + if (subInvocation.parseArgs(SubArgs, Diags)) { + return true; + } + CompilerInstance subInstance; + SubCompilerInstanceInfo info; + info.Instance = &subInstance; + info.CompilerVersion = CompilerVersion; + + subInstance.getSourceMgr().setFileSystem(SM.getFileSystem()); + + ForwardingDiagnosticConsumer FDC(Diags); + subInstance.addDiagnosticConsumer(&FDC); + subInstance.createDependencyTracker(subInvocation.getFrontendOptions() + .TrackSystemDeps); + if (subInstance.setup(subInvocation)) { + return true; + } + info.BuildArguments = BuildArgs; + info.Hash = CacheHash; + // Run the action under the sub compiler instance. + return action(info); +} diff --git a/lib/Frontend/SerializedDiagnosticConsumer.cpp b/lib/Frontend/SerializedDiagnosticConsumer.cpp index ae66990fcd7ac..43273199f62da 100644 --- a/lib/Frontend/SerializedDiagnosticConsumer.cpp +++ b/lib/Frontend/SerializedDiagnosticConsumer.cpp @@ -2,7 +2,7 @@ // // This source file is part of the Swift.org open source project // -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information @@ -14,6 +14,8 @@ // //===----------------------------------------------------------------------===// +#include "clang/Frontend/SerializedDiagnostics.h" + #include "swift/Frontend/SerializedDiagnosticConsumer.h" #include "swift/AST/DiagnosticConsumer.h" #include "swift/AST/DiagnosticsFrontend.h" @@ -30,39 +32,8 @@ #include "llvm/ADT/SmallString.h" #include "llvm/Bitstream/BitstreamWriter.h" -// For constant values only. -#include "clang/Frontend/SerializedDiagnosticPrinter.h" - using namespace swift; - -//===----------------------------------------------------------------------===// -// These must match Clang's diagnostic IDs. We can consider sharing the -// header files to avoid this copy-paste. -//===----------------------------------------------------------------------===// - -enum BlockIDs { - /// A top-level block which represents any meta data associated - /// with the diagnostics, including versioning of the format. - BLOCK_META = llvm::bitc::FIRST_APPLICATION_BLOCKID, - - /// The this block acts as a container for all the information - /// for a specific diagnostic. - BLOCK_DIAG -}; - -enum RecordIDs { - RECORD_VERSION = 1, - RECORD_DIAG, - RECORD_SOURCE_RANGE, - RECORD_DIAG_FLAG, - RECORD_CATEGORY, - RECORD_FILENAME, - RECORD_FIXIT, - RECORD_FIRST = RECORD_VERSION, - RECORD_LAST = RECORD_FIXIT -}; - -//===----------------------------------------------------------------------===// +using namespace clang::serialized_diags; namespace { class AbbreviationMap { @@ -193,9 +164,6 @@ class SerializedDiagnosticConsumer : public DiagnosticConsumer { void handleDiagnostic(SourceManager &SM, const DiagnosticInfo &Info) override; - /// The version of the diagnostics file. - enum { Version = 1 }; - private: /// Emit bitcode for the preamble. void emitPreamble(); @@ -334,7 +302,7 @@ void SerializedDiagnosticConsumer::emitMetaBlock() { Stream.EnterSubblock(BLOCK_META, 3); Record.clear(); Record.push_back(RECORD_VERSION); - Record.push_back(Version); + Record.push_back(clang::serialized_diags::VersionNumber); Stream.EmitRecordWithAbbrev(Abbrevs.get(RECORD_VERSION), Record); Stream.ExitBlock(); } diff --git a/lib/FrontendTool/FrontendTool.cpp b/lib/FrontendTool/FrontendTool.cpp index 316df9056c85b..704aa40bf3a5c 100644 --- a/lib/FrontendTool/FrontendTool.cpp +++ b/lib/FrontendTool/FrontendTool.cpp @@ -653,9 +653,6 @@ static void debugFailWithCrash() { LLVM_BUILTIN_TRAP; } -static void emitIndexDataIfNeeded(SourceFile *PrimarySourceFile, - const CompilerInstance &Instance); - static void countStatsOfSourceFile(UnifiedStatsReporter &Stats, const CompilerInstance &Instance, SourceFile *SF) { @@ -1115,13 +1112,16 @@ static bool performCompileStepsPostSema(CompilerInstance &Instance, return result; } +static void emitIndexDataForSourceFile(SourceFile *PrimarySourceFile, + const CompilerInstance &Instance); + /// Emits index data for all primary inputs, or the main module. static void emitIndexData(const CompilerInstance &Instance) { if (Instance.getPrimarySourceFiles().empty()) { - emitIndexDataIfNeeded(nullptr, Instance); + emitIndexDataForSourceFile(nullptr, Instance); } else { for (SourceFile *SF : Instance.getPrimarySourceFiles()) - emitIndexDataIfNeeded(SF, Instance); + emitIndexDataForSourceFile(SF, Instance); } } @@ -1200,12 +1200,7 @@ static bool performCompile(CompilerInstance &Instance, FrontendObserver *observer) { const auto &Invocation = Instance.getInvocation(); const auto &opts = Invocation.getFrontendOptions(); - FrontendOptions::ActionType Action = opts.RequestedAction; - - if (Action == FrontendOptions::ActionType::EmitSyntax) { - Instance.getASTContext().LangOpts.BuildSyntaxTree = true; - Instance.getASTContext().LangOpts.VerifySyntaxTree = true; - } + const FrontendOptions::ActionType Action = opts.RequestedAction; // We've been asked to precompile a bridging header or module; we want to // avoid touching any other inputs and just parse, emit and exit. @@ -1288,12 +1283,13 @@ static bool performCompile(CompilerInstance &Instance, SWIFT_DEFER { // We might have freed the ASTContext already, but in that case we must have - // emitted the dependencies first. - if (Instance.hasASTContext()) + // emitted the dependencies and index first. + if (Instance.hasASTContext()) { emitReferenceDependenciesForAllPrimaryInputsIfNeeded(Instance); + emitIndexData(Instance); + } }; - emitIndexData(Instance); if (Context.hadError()) return true; @@ -1489,9 +1485,10 @@ static void freeASTContextIfPossible(CompilerInstance &Instance) { return; } - // Make sure we emit dependencies now, because we can't do it after the - // context is gone. + // Make sure we emit dependencies and index now, because we can't do it after + // the context is gone. emitReferenceDependenciesForAllPrimaryInputsIfNeeded(Instance); + emitIndexData(Instance); Instance.freeASTContext(); } @@ -1664,8 +1661,8 @@ static bool performCompileStepsPostSILGen(CompilerInstance &Instance, HadError; } -static void emitIndexDataIfNeeded(SourceFile *PrimarySourceFile, - const CompilerInstance &Instance) { +static void emitIndexDataForSourceFile(SourceFile *PrimarySourceFile, + const CompilerInstance &Instance) { const auto &Invocation = Instance.getInvocation(); const auto &opts = Invocation.getFrontendOptions(); diff --git a/lib/FrontendTool/ScanDependencies.cpp b/lib/FrontendTool/ScanDependencies.cpp index c9598a0777253..0778dcf3dc757 100644 --- a/lib/FrontendTool/ScanDependencies.cpp +++ b/lib/FrontendTool/ScanDependencies.cpp @@ -59,51 +59,26 @@ static void findAllImportedClangModules(ASTContext &ctx, StringRef moduleName, } } -struct InterfaceSubASTContextDelegate: SubASTContextDelegate { - bool runInSubContext(ASTContext &ctx, StringRef interfacePath, - llvm::function_ref action) override { - // Parse the interface file using the current context to get the additional - // compiler arguments we should use when creating a sub-ASTContext. - // These arguments are in "swift-module-flags:" - version::Version Vers; - StringRef CompilerVersion; - llvm::BumpPtrAllocator Allocator; - llvm::StringSaver SubArgSaver(Allocator); - SmallVector SubArgs; - if (extractSwiftInterfaceVersionAndArgs(ctx.SourceMgr, ctx.Diags, - interfacePath, Vers, CompilerVersion, - SubArgSaver, SubArgs)) { - return true; - } - CompilerInvocation invok; - - // Inherit options from the parent ASTContext so we have all search paths, etc. - inheritOptionsForBuildingInterface(invok, ctx.SearchPathOpts, ctx.LangOpts); - CompilerInstance inst; - // Use the additional flags to setup the compiler instance. - if (invok.parseArgs(SubArgs, ctx.Diags)) { - return true; - } - if (inst.setup(invok)) { - return true; - } - // Add the diag consumers to the sub context to make sure we don't lose - // diagnostics. - for (auto *consumer: ctx.Diags.getConsumers()) { - inst.getDiags().addConsumer(*consumer); - } - // Run the action under the sub-ASTContext. - return action(inst.getASTContext()); - } -}; - /// Resolve the direct dependencies of the given module. static std::vector resolveDirectDependencies( - ASTContext &ctx, ModuleDependencyID module, + CompilerInstance &instance, ModuleDependencyID module, ModuleDependenciesCache &cache) { + auto &ctx = instance.getASTContext(); auto knownDependencies = *cache.findDependencies(module.first, module.second); auto isSwift = knownDependencies.isSwiftModule(); - InterfaceSubASTContextDelegate ASTDelegate; + auto ModuleCachePath = getModuleCachePathFromClang(ctx + .getClangModuleLoader()->getClangInstance()); + auto &FEOpts = instance.getInvocation().getFrontendOptions(); + InterfaceSubContextDelegateImpl ASTDelegate(ctx.SourceMgr, ctx.Diags, + ctx.SearchPathOpts, ctx.LangOpts, + ctx.getClangModuleLoader(), + /*buildModuleCacheDirIfAbsent*/false, + ModuleCachePath, + FEOpts.PrebuiltModuleCachePath, + FEOpts.SerializeModuleInterfaceDependencyHashes, + FEOpts.TrackSystemDeps, + FEOpts.RemarkOnRebuildFromModuleInterface, + FEOpts.DisableInterfaceFileLock); // Find the dependencies of every module this module directly depends on. std::vector result; for (auto dependsOn : knownDependencies.getModuleDependencies()) { @@ -248,7 +223,7 @@ namespace { } static void writeJSON(llvm::raw_ostream &out, - ASTContext &ctx, + CompilerInstance &instance, ModuleDependenciesCache &cache, ArrayRef allModules) { // Write out a JSON description of all of the dependencies. @@ -268,7 +243,7 @@ static void writeJSON(llvm::raw_ostream &out, }; for (const auto &module : allModules) { auto directDependencies = resolveDirectDependencies( - ctx, ModuleDependencyID(module.first, module.second), cache); + instance, ModuleDependencyID(module.first, module.second), cache); // Grab the completed module dependencies. auto moduleDeps = *cache.findDependencies(module.first, module.second); @@ -315,7 +290,21 @@ static void writeJSON(llvm::raw_ostream &out, writeJSONSingleField( out, "moduleInterfacePath", *swiftDeps->swiftInterfaceFile, 5, - /*trailingComma=*/swiftDeps->bridgingHeaderFile.hasValue()); + /*trailingComma=*/true); + writeJSONSingleField(out, "contextHash", + swiftDeps->contextHash, 5, + /*trailingComma=*/true); + out.indent(5 * 2); + out << "\"commandLine\": [\n"; + for (auto &arg :swiftDeps->buildCommandLine) { + out.indent(6 * 2); + out << "\"" << arg << "\""; + if (&arg != &swiftDeps->buildCommandLine.back()) + out << ","; + out << "\n"; + } + out.indent(5 * 2); + out << "]\n"; } /// Bridging header and its source file dependencies, if any. @@ -462,12 +451,12 @@ bool swift::scanDependencies(CompilerInstance &instance) { ++currentModuleIdx) { auto module = allModules[currentModuleIdx]; auto discoveredModules = - resolveDirectDependencies(Context, module, cache); + resolveDirectDependencies(instance, module, cache); allModules.insert(discoveredModules.begin(), discoveredModules.end()); } // Write out the JSON description. - writeJSON(out, Context, cache, allModules.getArrayRef()); + writeJSON(out, instance, cache, allModules.getArrayRef()); // Update the dependency tracker. if (auto depTracker = instance.getDependencyTracker()) { diff --git a/lib/IDE/CodeCompletion.cpp b/lib/IDE/CodeCompletion.cpp index ecfbed03d874f..f6856c91e2e3c 100644 --- a/lib/IDE/CodeCompletion.cpp +++ b/lib/IDE/CodeCompletion.cpp @@ -356,12 +356,14 @@ CodeCompletionString *CodeCompletionString::create(llvm::BumpPtrAllocator &Alloc void CodeCompletionString::print(raw_ostream &OS) const { unsigned PrevNestingLevel = 0; - for (auto C : getChunks()) { + auto chunks = getChunks(); + for (auto I = chunks.begin(), E = chunks.end(); I != E; ++I) { bool AnnotatedTextChunk = false; - if (C.getNestingLevel() < PrevNestingLevel) { + + if (I->getNestingLevel() < PrevNestingLevel) { OS << "#}"; } - switch (C.getKind()) { + switch (I->getKind()) { using ChunkKind = Chunk::ChunkKind; case ChunkKind::AccessControlKeyword: case ChunkKind::DeclAttrKeyword: @@ -390,7 +392,7 @@ void CodeCompletionString::print(raw_ostream &OS) const { case ChunkKind::BaseName: case ChunkKind::TypeIdSystem: case ChunkKind::TypeIdUser: - AnnotatedTextChunk = C.isAnnotation(); + AnnotatedTextChunk = I->isAnnotation(); LLVM_FALLTHROUGH; case ChunkKind::CallParameterName: case ChunkKind::CallParameterInternalName: @@ -401,11 +403,11 @@ void CodeCompletionString::print(raw_ostream &OS) const { case ChunkKind::GenericParameterName: if (AnnotatedTextChunk) OS << "['"; - else if (C.getKind() == ChunkKind::CallParameterInternalName) + else if (I->getKind() == ChunkKind::CallParameterInternalName) OS << "("; - else if (C.getKind() == ChunkKind::CallParameterClosureType) + else if (I->getKind() == ChunkKind::CallParameterClosureType) OS << "##"; - for (char Ch : C.getText()) { + for (char Ch : I->getText()) { if (Ch == '\n') OS << "\\n"; else @@ -413,7 +415,7 @@ void CodeCompletionString::print(raw_ostream &OS) const { } if (AnnotatedTextChunk) OS << "']"; - else if (C.getKind() == ChunkKind::CallParameterInternalName) + else if (I->getKind() == ChunkKind::CallParameterInternalName) OS << ")"; break; case ChunkKind::OptionalBegin: @@ -424,21 +426,32 @@ void CodeCompletionString::print(raw_ostream &OS) const { break; case ChunkKind::DynamicLookupMethodCallTail: case ChunkKind::OptionalMethodCallTail: - OS << C.getText(); + OS << I->getText(); break; + case ChunkKind::TypeAnnotationBegin: { + OS << "[#"; + ++I; + auto level = I->getNestingLevel(); + for (; I != E && !I->endsPreviousNestedGroup(level); ++I) + if (I->hasText()) + OS << I->getText(); + --I; + OS << "#]"; + continue; + } case ChunkKind::TypeAnnotation: OS << "[#"; - OS << C.getText(); + OS << I->getText(); OS << "#]"; break; case ChunkKind::CallParameterClosureExpr: - OS << " {" << C.getText() << "|}"; + OS << " {" << I->getText() << "|}"; break; case ChunkKind::BraceStmtWithCursor: OS << " {|}"; break; } - PrevNestingLevel = C.getNestingLevel(); + PrevNestingLevel = I->getNestingLevel(); } while (PrevNestingLevel > 0) { OS << "#}"; @@ -779,6 +792,15 @@ static ArrayRef> copyStringPairArray( return llvm::makeArrayRef(Buff, Arr.size()); } +void CodeCompletionResultBuilder::withNestedGroup( + CodeCompletionString::Chunk::ChunkKind Kind, + llvm::function_ref body) { + CurrentNestingLevel++; + addSimpleChunk(Kind); + body(); + CurrentNestingLevel--; +} + void CodeCompletionResultBuilder::addChunkWithText( CodeCompletionString::Chunk::ChunkKind Kind, StringRef Text) { addChunkWithTextNoCopy(Kind, copyString(*Sink.Allocator, Text)); @@ -826,22 +848,26 @@ class AnnotatedTypePrinter : public ASTPrinter { } } + void flush() { + if (Buffer.empty()) + return; + Builder.addChunkWithText(CurrChunkKind, Buffer); + Buffer.clear(); + } + public: AnnotatedTypePrinter(CodeCompletionResultBuilder &Builder) : Builder(Builder) {} ~AnnotatedTypePrinter() { - // Add remaining buffer. - Builder.addChunkWithText(CurrChunkKind, Buffer); + // Flush the remainings. + flush(); } void printText(StringRef Text) override { if (CurrChunkKind != NextChunkKind) { - // If the next desired kind is different from the current buffer, flush the - // current buffer. - if (!Buffer.empty()) { - Builder.addChunkWithText(CurrChunkKind, Buffer); - Buffer.clear(); - } + // If the next desired kind is different from the current buffer, flush + // the current buffer. + flush(); CurrChunkKind = NextChunkKind; } Buffer.append(Text); @@ -961,13 +987,10 @@ void CodeCompletionResultBuilder::addCallParameter(Identifier Name, if (ContextTy) PO.setBaseType(ContextTy); if (shouldAnnotateResults()) { - CurrentNestingLevel++; - addSimpleChunk(ChunkKind::CallParameterTypeBegin); - { + withNestedGroup(ChunkKind::CallParameterTypeBegin, [&]() { AnnotatedTypePrinter printer(*this); Ty->print(printer, PO); - } - CurrentNestingLevel--; + }); } else { std::string TypeName = Ty->getString(PO); addChunkWithText(ChunkKind::CallParameterType, TypeName); @@ -1039,6 +1062,30 @@ void CodeCompletionResultBuilder::addCallParameter(Identifier Name, CurrentNestingLevel--; } +void CodeCompletionResultBuilder::addTypeAnnotation(Type T, PrintOptions PO, + StringRef suffix) { + T = T->getReferenceStorageReferent(); + + // Replace '()' with 'Void'. + if (T->isVoid()) + T = T->getASTContext().getVoidDecl()->getDeclaredInterfaceType(); + + if (shouldAnnotateResults()) { + withNestedGroup(CodeCompletionString::Chunk::ChunkKind::TypeAnnotationBegin, + [&]() { + AnnotatedTypePrinter printer(*this); + T->print(printer, PO); + if (!suffix.empty()) + printer.printText(suffix); + }); + } else { + auto str = T.getString(PO); + if (!suffix.empty()) + str += suffix.str(); + addTypeAnnotation(str); + } +} + StringRef CodeCompletionContext::copyString(StringRef Str) { return ::copyString(*CurrentResults.Allocator, Str); } @@ -1396,6 +1443,7 @@ Optional CodeCompletionString::getFirstTextChunkIndex( case ChunkKind::DynamicLookupMethodCallTail: case ChunkKind::OptionalMethodCallTail: case ChunkKind::TypeAnnotation: + case ChunkKind::TypeAnnotationBegin: continue; case ChunkKind::BraceStmtWithCursor: @@ -1417,17 +1465,25 @@ void CodeCompletionString::getName(raw_ostream &OS) const { auto FirstTextChunk = getFirstTextChunkIndex(); int TextSize = 0; if (FirstTextChunk.hasValue()) { - for (auto C : getChunks().slice(*FirstTextChunk)) { + auto chunks = getChunks().slice(*FirstTextChunk); + + for (auto i = chunks.begin(), e = chunks.end(); i != e; ++i) { using ChunkKind = Chunk::ChunkKind; - bool shouldPrint = !C.isAnnotation(); - switch (C.getKind()) { + bool shouldPrint = !i->isAnnotation(); + switch (i->getKind()) { case ChunkKind::TypeAnnotation: case ChunkKind::CallParameterClosureType: case ChunkKind::CallParameterClosureExpr: case ChunkKind::DeclAttrParamColon: case ChunkKind::OptionalMethodCallTail: continue; + case ChunkKind::TypeAnnotationBegin: { + auto level = i->getNestingLevel(); + do { ++i; } while (i != e && !i->endsPreviousNestedGroup(level)); + --i; + continue; + } case ChunkKind::ThrowsKeyword: case ChunkKind::RethrowsKeyword: shouldPrint = true; // Even when they're annotations. @@ -1436,9 +1492,9 @@ void CodeCompletionString::getName(raw_ostream &OS) const { break; } - if (C.hasText() && shouldPrint) { - TextSize += C.getText().size(); - OS << C.getText(); + if (i->hasText() && shouldPrint) { + TextSize += i->getText().size(); + OS << i->getText(); } } } @@ -1526,8 +1582,8 @@ class CodeCompletionCallbacksImpl : public CodeCompletionCallbacks { SemanticContextKind::CurrentNominal, {}); Builder.setKeywordKind(CodeCompletionKeywordKind::kw_super); - Builder.addTextChunk("super"); - Builder.addTypeAnnotation(ST.getString()); + Builder.addKeyword("super"); + Builder.addTypeAnnotation(ST, PrintOptions()); } Optional> typeCheckParsedExpr() { @@ -1693,6 +1749,29 @@ protocolForLiteralKind(CodeCompletionLiteralKind kind) { llvm_unreachable("Unhandled CodeCompletionLiteralKind in switch."); } +static Type +defaultTypeLiteralKind(CodeCompletionLiteralKind kind, ASTContext &Ctx) { + switch (kind) { + case CodeCompletionLiteralKind::BooleanLiteral: + return Ctx.getBoolDecl()->getDeclaredType(); + case CodeCompletionLiteralKind::IntegerLiteral: + return Ctx.getIntDecl()->getDeclaredType(); + case CodeCompletionLiteralKind::StringLiteral: + return Ctx.getStringDecl()->getDeclaredType(); + case CodeCompletionLiteralKind::ArrayLiteral: + return Ctx.getArrayDecl()->getDeclaredType(); + case CodeCompletionLiteralKind::DictionaryLiteral: + return Ctx.getDictionaryDecl()->getDeclaredType(); + case CodeCompletionLiteralKind::NilLiteral: + case CodeCompletionLiteralKind::ColorLiteral: + case CodeCompletionLiteralKind::ImageLiteral: + case CodeCompletionLiteralKind::Tuple: + return Type(); + } + + llvm_unreachable("Unhandled CodeCompletionLiteralKind in switch."); +} + /// Whether funcType has a single argument (not including defaulted arguments) /// that is of type () -> (). static bool hasTrivialTrailingClosure(const FuncDecl *FD, @@ -2193,17 +2272,12 @@ class CompletionLookup final : public swift::VisibleDeclConsumer { } void addTypeAnnotation(CodeCompletionResultBuilder &Builder, Type T) { - T = T->getReferenceStorageReferent(); - if (T->isVoid()) { - Builder.addTypeAnnotation("Void"); - } else { - PrintOptions PO; - PO.OpaqueReturnTypePrinting = - PrintOptions::OpaqueReturnTypePrintingMode::WithoutOpaqueKeyword; - if (auto typeContext = CurrDeclContext->getInnermostTypeContext()) - PO.setBaseType(typeContext->getDeclaredTypeInContext()); - Builder.addTypeAnnotation(T.getString(PO)); - } + PrintOptions PO; + PO.OpaqueReturnTypePrinting = + PrintOptions::OpaqueReturnTypePrintingMode::WithoutOpaqueKeyword; + if (auto typeContext = CurrDeclContext->getInnermostTypeContext()) + PO.setBaseType(typeContext->getDeclaredTypeInContext()); + Builder.addTypeAnnotation(T, PO); } void addTypeAnnotationForImplicitlyUnwrappedOptional( @@ -2218,14 +2292,13 @@ class CompletionLookup final : public swift::VisibleDeclConsumer { suffix = "?"; } - T = T->getReferenceStorageReferent(); PrintOptions PO; PO.PrintOptionalAsImplicitlyUnwrapped = true; PO.OpaqueReturnTypePrinting = PrintOptions::OpaqueReturnTypePrintingMode::WithoutOpaqueKeyword; if (auto typeContext = CurrDeclContext->getInnermostTypeContext()) PO.setBaseType(typeContext->getDeclaredTypeInContext()); - Builder.addTypeAnnotation(T.getString(PO) + suffix); + Builder.addTypeAnnotation(T, PO, suffix); } /// For printing in code completion results, replace archetypes with @@ -2271,7 +2344,7 @@ class CompletionLookup final : public swift::VisibleDeclConsumer { } if (t->isTypeParameter()) { - auto protos = genericSig->getConformsTo(t); + const auto protos = genericSig->getRequiredProtocols(t); if (!protos.empty()) return buildProtocolComposition(protos); } @@ -2889,12 +2962,8 @@ class CompletionLookup final : public swift::VisibleDeclConsumer { else if (FD->getAttrs().hasAttribute()) Builder.addOptionalMethodCallTail(); - llvm::SmallString<32> TypeStr; - if (!AFT) { - llvm::raw_svector_ostream OS(TypeStr); - FunctionType.print(OS); - Builder.addTypeAnnotation(OS.str()); + Builder.addTypeAnnotation(FunctionType, PrintOptions()); return; } @@ -2915,10 +2984,41 @@ class CompletionLookup final : public swift::VisibleDeclConsumer { addThrows(Builder, AFT, FD); } + // Build type annotation. Type ResultType = AFT->getResult(); + // As we did with parameters in addParamPatternFromFunction, + // for regular methods we'll print '!' after implicitly + // unwrapped optional results. + bool IsIUO = + !IsImplicitlyCurriedInstanceMethod && + FD->isImplicitlyUnwrappedOptional(); - // Build type annotation. - { + PrintOptions PO; + PO.OpaqueReturnTypePrinting = + PrintOptions::OpaqueReturnTypePrintingMode::WithoutOpaqueKeyword; + PO.PrintOptionalAsImplicitlyUnwrapped = IsIUO; + if (auto typeContext = CurrDeclContext->getInnermostTypeContext()) + PO.setBaseType(typeContext->getDeclaredTypeInContext()); + + if (Builder.shouldAnnotateResults()) { + Builder.withNestedGroup( + CodeCompletionString::Chunk::ChunkKind::TypeAnnotationBegin, [&] { + AnnotatedTypePrinter printer(Builder); + if (IsImplicitlyCurriedInstanceMethod) { + auto *FnType = ResultType->castTo(); + AnyFunctionType::printParams(FnType->getParams(), printer, + PrintOptions()); + ResultType = FnType->getResult(); + printer.printText(" -> "); + } + + // What's left is the result type. + if (ResultType->isVoid()) + ResultType = Ctx.getVoidDecl()->getDeclaredInterfaceType(); + ResultType.print(printer, PO); + }); + } else { + llvm::SmallString<32> TypeStr; llvm::raw_svector_ostream OS(TypeStr); if (IsImplicitlyCurriedInstanceMethod) { auto *FnType = ResultType->castTo(); @@ -2928,26 +3028,11 @@ class CompletionLookup final : public swift::VisibleDeclConsumer { } // What's left is the result type. - if (ResultType->isVoid()) { - OS << "Void"; - } else { - // As we did with parameters in addParamPatternFromFunction, - // for regular methods we'll print '!' after implicitly - // unwrapped optional results. - bool IsIUO = - !IsImplicitlyCurriedInstanceMethod && - FD->isImplicitlyUnwrappedOptional(); - - PrintOptions PO; - PO.OpaqueReturnTypePrinting = - PrintOptions::OpaqueReturnTypePrintingMode::WithoutOpaqueKeyword; - PO.PrintOptionalAsImplicitlyUnwrapped = IsIUO; - if (auto typeContext = CurrDeclContext->getInnermostTypeContext()) - PO.setBaseType(typeContext->getDeclaredTypeInContext()); - ResultType.print(OS, PO); - } + if (ResultType->isVoid()) + ResultType = Ctx.getVoidDecl()->getDeclaredInterfaceType(); + ResultType.print(OS, PO); + Builder.addTypeAnnotation(TypeStr); } - Builder.addTypeAnnotation(TypeStr); if (isUnresolvedMemberIdealType(ResultType)) Builder.setSemanticContext(SemanticContextKind::ExpressionSpecific); @@ -3910,8 +3995,7 @@ class CompletionLookup final : public swift::VisibleDeclConsumer { } void addTypeRelationFromProtocol(CodeCompletionResultBuilder &builder, - CodeCompletionLiteralKind kind, - StringRef defaultTypeName) { + CodeCompletionLiteralKind kind) { // Check for matching ExpectedTypes. auto *P = Ctx.getProtocol(protocolForLiteralKind(kind)); for (auto T : expectedTypeContext.possibleTypes) { @@ -3940,8 +4024,8 @@ class CompletionLookup final : public swift::VisibleDeclConsumer { } // Fallback to showing the default type. - if (!defaultTypeName.empty()) { - builder.addTypeAnnotation(defaultTypeName); + if (auto defaultTy = defaultTypeLiteralKind(kind, Ctx)) { + builder.addTypeAnnotation(defaultTy, PrintOptions()); builder.setExpectedTypeRelation( expectedTypeContext.possibleTypes.empty() ? CodeCompletionResult::ExpectedTypeRelation::Unknown @@ -3952,8 +4036,7 @@ class CompletionLookup final : public swift::VisibleDeclConsumer { /// Add '#file', '#line', et at. void addPoundLiteralCompletions(bool needPound) { auto addFromProto = [&](StringRef name, CodeCompletionKeywordKind kwKind, - CodeCompletionLiteralKind literalKind, - StringRef defaultTypeName) { + CodeCompletionLiteralKind literalKind) { if (!needPound) name = name.substr(1); @@ -3963,21 +4046,21 @@ class CompletionLookup final : public swift::VisibleDeclConsumer { builder.setLiteralKind(literalKind); builder.setKeywordKind(kwKind); builder.addBaseName(name); - addTypeRelationFromProtocol(builder, literalKind, defaultTypeName); + addTypeRelationFromProtocol(builder, literalKind); }; addFromProto("#function", CodeCompletionKeywordKind::pound_function, - CodeCompletionLiteralKind::StringLiteral, "String"); + CodeCompletionLiteralKind::StringLiteral); addFromProto("#file", CodeCompletionKeywordKind::pound_file, - CodeCompletionLiteralKind::StringLiteral, "String"); + CodeCompletionLiteralKind::StringLiteral); if (Ctx.LangOpts.EnableConcisePoundFile) { addFromProto("#filePath", CodeCompletionKeywordKind::pound_file, - CodeCompletionLiteralKind::StringLiteral, "String"); + CodeCompletionLiteralKind::StringLiteral); } addFromProto("#line", CodeCompletionKeywordKind::pound_line, - CodeCompletionLiteralKind::IntegerLiteral, "Int"); + CodeCompletionLiteralKind::IntegerLiteral); addFromProto("#column", CodeCompletionKeywordKind::pound_column, - CodeCompletionLiteralKind::IntegerLiteral, "Int"); + CodeCompletionLiteralKind::IntegerLiteral); addKeyword(needPound ? "#dsohandle" : "dsohandle", "UnsafeRawPointer", CodeCompletionKeywordKind::pound_dsohandle); @@ -3987,7 +4070,7 @@ class CompletionLookup final : public swift::VisibleDeclConsumer { auto &context = CurrDeclContext->getASTContext(); auto addFromProto = [&]( - CodeCompletionLiteralKind kind, StringRef defaultTypeName, + CodeCompletionLiteralKind kind, llvm::function_ref consumer, bool isKeyword = false) { @@ -3996,7 +4079,7 @@ class CompletionLookup final : public swift::VisibleDeclConsumer { builder.setLiteralKind(kind); consumer(builder); - addTypeRelationFromProtocol(builder, kind, defaultTypeName); + addTypeRelationFromProtocol(builder, kind); }; // FIXME: the pedantically correct way is to resolve Swift.*LiteralType. @@ -4005,29 +4088,29 @@ class CompletionLookup final : public swift::VisibleDeclConsumer { using Builder = CodeCompletionResultBuilder; // Add literal completions that conform to specific protocols. - addFromProto(LK::IntegerLiteral, "Int", [](Builder &builder) { + addFromProto(LK::IntegerLiteral, [](Builder &builder) { builder.addTextChunk("0"); }); - addFromProto(LK::BooleanLiteral, "Bool", [](Builder &builder) { + addFromProto(LK::BooleanLiteral, [](Builder &builder) { builder.addBaseName("true"); }, /*isKeyword=*/true); - addFromProto(LK::BooleanLiteral, "Bool", [](Builder &builder) { + addFromProto(LK::BooleanLiteral, [](Builder &builder) { builder.addBaseName("false"); }, /*isKeyword=*/true); - addFromProto(LK::NilLiteral, "", [](Builder &builder) { + addFromProto(LK::NilLiteral, [](Builder &builder) { builder.addBaseName("nil"); }, /*isKeyword=*/true); - addFromProto(LK::StringLiteral, "String", [&](Builder &builder) { + addFromProto(LK::StringLiteral, [&](Builder &builder) { builder.addTextChunk("\""); builder.addSimpleNamedParameter("abc"); builder.addTextChunk("\""); }); - addFromProto(LK::ArrayLiteral, "Array", [&](Builder &builder) { + addFromProto(LK::ArrayLiteral, [&](Builder &builder) { builder.addLeftBracket(); builder.addSimpleNamedParameter("values"); builder.addRightBracket(); }); - addFromProto(LK::DictionaryLiteral, "Dictionary", [&](Builder &builder) { + addFromProto(LK::DictionaryLiteral, [&](Builder &builder) { builder.addLeftBracket(); builder.addSimpleNamedParameter("key"); builder.addTextChunk(": "); @@ -4036,7 +4119,7 @@ class CompletionLookup final : public swift::VisibleDeclConsumer { }); auto floatType = context.getFloatDecl()->getDeclaredType(); - addFromProto(LK::ColorLiteral, "", [&](Builder &builder) { + addFromProto(LK::ColorLiteral, [&](Builder &builder) { builder.addBaseName("#colorLiteral"); builder.addLeftParen(); builder.addCallParameter(context.getIdentifier("red"), floatType); @@ -4050,7 +4133,7 @@ class CompletionLookup final : public swift::VisibleDeclConsumer { }); auto stringType = context.getStringDecl()->getDeclaredType(); - addFromProto(LK::ImageLiteral, "", [&](Builder &builder) { + addFromProto(LK::ImageLiteral, [&](Builder &builder) { builder.addBaseName("#imageLiteral"); builder.addLeftParen(); builder.addCallParameter(context.getIdentifier("resourceName"), @@ -4652,7 +4735,7 @@ class CompletionOverrideLookup : public swift::VisibleDeclConsumer { bool hasExplicitAnyObject = false; if (auto superTy = genericSig->getSuperclassBound(ResultT)) opaqueTypes.push_back(superTy); - for (auto proto : genericSig->getConformsTo(ResultT)) + for (const auto proto : genericSig->getRequiredProtocols(ResultT)) opaqueTypes.push_back(proto->getDeclaredInterfaceType()); if (auto layout = genericSig->getLayoutConstraint(ResultT)) hasExplicitAnyObject = layout->isClass(); @@ -5398,8 +5481,13 @@ static void addOpaqueTypeKeyword(CodeCompletionResultSink &Sink) { addKeyword(Sink, "some", CodeCompletionKeywordKind::None, "some"); } -static void addAnyTypeKeyword(CodeCompletionResultSink &Sink) { - addKeyword(Sink, "Any", CodeCompletionKeywordKind::None, "Any"); +static void addAnyTypeKeyword(CodeCompletionResultSink &Sink, Type T) { + CodeCompletionResultBuilder Builder(Sink, + CodeCompletionResult::ResultKind::Keyword, + SemanticContextKind::None, {}); + Builder.setKeywordKind(CodeCompletionKeywordKind::None); + Builder.addKeyword("Any"); + Builder.addTypeAnnotation(T, PrintOptions()); } void CodeCompletionCallbacksImpl::addKeywords(CodeCompletionResultSink &Sink, @@ -5454,7 +5542,7 @@ void CodeCompletionCallbacksImpl::addKeywords(CodeCompletionResultSink &Sink, addSuperKeyword(Sink); addLetVarKeywords(Sink); addExprKeywords(Sink); - addAnyTypeKeyword(Sink); + addAnyTypeKeyword(Sink, CurDeclContext->getASTContext().TheAnyType); break; case CompletionKind::CaseStmtKeyword: @@ -5479,7 +5567,7 @@ void CodeCompletionCallbacksImpl::addKeywords(CodeCompletionResultSink &Sink, LLVM_FALLTHROUGH; } case CompletionKind::TypeSimpleBeginning: - addAnyTypeKeyword(Sink); + addAnyTypeKeyword(Sink, CurDeclContext->getASTContext().TheAnyType); break; case CompletionKind::NominalMemberBeginning: { @@ -6011,7 +6099,7 @@ void CodeCompletionCallbacksImpl::doneParsing() { addSuperKeyword(Sink); addLetVarKeywords(Sink); addExprKeywords(Sink); - addAnyTypeKeyword(Sink); + addAnyTypeKeyword(Sink, CurDeclContext->getASTContext().TheAnyType); DoPostfixExprBeginning(); } else { // foo() {} @@ -6156,7 +6244,7 @@ void CodeCompletionCallbacksImpl::doneParsing() { AccessLevel::Internal, TheModule, SourceFile::ImportQueryKind::PrivateOnly), Ctx.LangOpts.CodeCompleteInitsInPostfixExpr, - CompletionContext.getAnnnoateResult(), + CompletionContext.getAnnotateResult(), }; using PairType = llvm::DenseSetgetKind() == CodeCompletionResult::Keyword) continue; Result->printPrefix(OS); - if (PrintAnnotatedDescription) + if (PrintAnnotatedDescription) { printCodeCompletionResultDescriptionAnnotated(*Result, OS, /*leadingPunctuation=*/false); - else + OS << "; typename="; + printCodeCompletionResultTypeNameAnnotated(*Result, OS); + } else { Result->getCompletionString()->print(OS); + } llvm::SmallString<64> Name; llvm::raw_svector_ostream NameOs(Name); @@ -6357,7 +6448,7 @@ void SimpleCachingCodeCompletionConsumer::handleResultsAndModules( if (!V.hasValue()) { // No cached results found. Fill the cache. V = context.Cache.createValue(); - (*V)->Sink.annotateResult = context.getAnnnoateResult(); + (*V)->Sink.annotateResult = context.getAnnotateResult(); lookupCodeCompletionResultsFromModule( (*V)->Sink, R.TheModule, R.Key.AccessPath, R.Key.ResultsHaveLeadingDot, DCForModules); diff --git a/lib/IDE/CodeCompletionResultBuilder.h b/lib/IDE/CodeCompletionResultBuilder.h index bbf6955b11692..bb286e2273c34 100644 --- a/lib/IDE/CodeCompletionResultBuilder.h +++ b/lib/IDE/CodeCompletionResultBuilder.h @@ -81,13 +81,6 @@ class CodeCompletionResultBuilder { CodeCompletionResult::NotRecommendedReason NotRecReason = CodeCompletionResult::NotRecommendedReason::NoReason; - /// Annotated results are requested by the client. - /// - /// This affects the structure of the CodeCompletionString. - bool shouldAnnotateResults() { - return Sink.annotateResult; - } - void addChunkWithText(CodeCompletionString::Chunk::ChunkKind Kind, StringRef Text); @@ -126,6 +119,13 @@ class CodeCompletionResultBuilder { Cancelled = true; } + /// Annotated results are requested by the client. + /// + /// This affects the structure of the CodeCompletionString. + bool shouldAnnotateResults() { + return Sink.annotateResult; + } + void setNumBytesToErase(unsigned N) { NumBytesToErase = N; } @@ -148,6 +148,9 @@ class CodeCompletionResultBuilder { ExpectedTypeRelation = relation; } + void withNestedGroup(CodeCompletionString::Chunk::ChunkKind Kind, + llvm::function_ref body); + void addAccessControlKeyword(AccessLevel Access) { switch (Access) { case AccessLevel::Private: @@ -357,23 +360,23 @@ class CodeCompletionResultBuilder { } void addSimpleNamedParameter(StringRef name) { - CurrentNestingLevel++; - addSimpleChunk(CodeCompletionString::Chunk::ChunkKind::CallParameterBegin); - // Use internal, since we don't want the name to be outside the placeholder. - addChunkWithText( - CodeCompletionString::Chunk::ChunkKind::CallParameterInternalName, - name); - CurrentNestingLevel--; + withNestedGroup(CodeCompletionString::Chunk::ChunkKind::CallParameterBegin, [&] { + // Use internal, since we don't want the name to be outside the + // placeholder. + addChunkWithText( + CodeCompletionString::Chunk::ChunkKind::CallParameterInternalName, + name); + }); } void addSimpleTypedParameter(StringRef Annotation, bool IsVarArg = false) { - CurrentNestingLevel++; - addSimpleChunk(CodeCompletionString::Chunk::ChunkKind::CallParameterBegin); - addChunkWithText(CodeCompletionString::Chunk::ChunkKind::CallParameterType, - Annotation); - if (IsVarArg) - addEllipsis(); - CurrentNestingLevel--; + withNestedGroup(CodeCompletionString::Chunk::ChunkKind::CallParameterBegin, [&] { + addChunkWithText( + CodeCompletionString::Chunk::ChunkKind::CallParameterType, + Annotation); + if (IsVarArg) + addEllipsis(); + }); } void addCallParameter(Identifier Name, Identifier LocalName, Type Ty, @@ -389,12 +392,11 @@ class CodeCompletionResultBuilder { } void addGenericParameter(StringRef Name) { - CurrentNestingLevel++; - addSimpleChunk( - CodeCompletionString::Chunk::ChunkKind::GenericParameterBegin); - addChunkWithText( - CodeCompletionString::Chunk::ChunkKind::GenericParameterName, Name); - CurrentNestingLevel--; + withNestedGroup(CodeCompletionString::Chunk::ChunkKind::GenericParameterBegin, + [&] { + addChunkWithText( + CodeCompletionString::Chunk::ChunkKind::GenericParameterName, Name); + }); } void addDynamicLookupMethodCallTail() { @@ -415,6 +417,8 @@ class CodeCompletionResultBuilder { getLastChunk().setIsAnnotation(); } + void addTypeAnnotation(Type T, PrintOptions PO, StringRef suffix = ""); + void addBraceStmtWithCursor(StringRef Description = "") { addChunkWithText( CodeCompletionString::Chunk::ChunkKind::BraceStmtWithCursor, diff --git a/lib/IDE/CodeCompletionResultPrinter.cpp b/lib/IDE/CodeCompletionResultPrinter.cpp index 9d3a5e76ee340..e74ab3a3d6e91 100644 --- a/lib/IDE/CodeCompletionResultPrinter.cpp +++ b/lib/IDE/CodeCompletionResultPrinter.cpp @@ -43,16 +43,23 @@ void swift::ide::printCodeCompletionResultDescription( C.is(ChunkKind::Whitespace)) continue; + // Skip TypeAnnotation group. + if (C.is(ChunkKind::TypeAnnotationBegin)) { + auto level = I->getNestingLevel(); + do { ++I; } while (I != E && !I->endsPreviousNestedGroup(level)); + --I; + continue; + } + if (isOperator && C.is(ChunkKind::CallParameterType)) continue; if (isOperator && C.is(ChunkKind::CallParameterTypeBegin)) { - auto nestingLevel = C.getNestingLevel(); - ++I; - while (I != E && I->endsPreviousNestedGroup(nestingLevel)) - ++I; + auto level = I->getNestingLevel(); + do { ++I; } while (I != E && !I->endsPreviousNestedGroup(level)); --I; continue; } + if (C.hasText()) { TextSize += C.getText().size(); OS << C.getText(); @@ -64,7 +71,7 @@ void swift::ide::printCodeCompletionResultDescription( } namespace { -class AnnotatingDescriptionPrinter { +class AnnotatingResultPrinter { raw_ostream &OS; /// Print \p content enclosing with \p tag. @@ -154,9 +161,9 @@ class AnnotatingDescriptionPrinter { } public: - AnnotatingDescriptionPrinter(raw_ostream &OS) : OS(OS) {} + AnnotatingResultPrinter(raw_ostream &OS) : OS(OS) {} - void print(const CodeCompletionResult &result, bool leadingPunctuation) { + void printDescription(const CodeCompletionResult &result, bool leadingPunctuation) { auto str = result.getCompletionString(); bool isOperator = result.isOperator(); @@ -165,23 +172,52 @@ class AnnotatingDescriptionPrinter { auto chunks = str->getChunks().slice(*FirstTextChunk); for (auto i = chunks.begin(), e = chunks.end(); i != e; ++i) { using ChunkKind = CodeCompletionString::Chunk::ChunkKind; + + // Skip the type annotation. + if (i->is(ChunkKind::TypeAnnotationBegin)) { + auto level = i->getNestingLevel(); + do { ++i; } while (i != e && !i->endsPreviousNestedGroup(level)); + --i; + continue; + } + + // Print call argument group. if (i->is(ChunkKind::CallParameterBegin)) { - auto start = i++; - for (; i != e; ++i) { - if (i->endsPreviousNestedGroup(start->getNestingLevel())) - break; - } + auto start = i; + auto level = i->getNestingLevel(); + do { ++i; } while (i != e && !i->endsPreviousNestedGroup(level)); if (!isOperator) printCallArg({start, i}); - if (i == e) - break; + --i; + continue; } + if (isOperator && i->is(ChunkKind::CallParameterType)) continue; printTextChunk(*i); } } } + + void printTypeName(const CodeCompletionResult &result) { + auto Chunks = result.getCompletionString()->getChunks(); + + for (auto i = Chunks.begin(), e = Chunks.end(); i != e; ++i) { + + if (i->is(CodeCompletionString::Chunk::ChunkKind::TypeAnnotation)) + OS << i->getText(); + + if (i->is(CodeCompletionString::Chunk::ChunkKind::TypeAnnotationBegin)) { + auto nestingLevel = i->getNestingLevel(); + ++i; + for (; i != e && !i->endsPreviousNestedGroup(nestingLevel); ++i) { + if (i->hasText()) + printTextChunk(*i); + } + --i; + } + } + } }; } // namespace @@ -189,6 +225,33 @@ class AnnotatingDescriptionPrinter { void swift::ide::printCodeCompletionResultDescriptionAnnotated( const CodeCompletionResult &Result, raw_ostream &OS, bool leadingPunctuation) { - AnnotatingDescriptionPrinter printer(OS); - printer.print(Result, leadingPunctuation); + AnnotatingResultPrinter printer(OS); + printer.printDescription(Result, leadingPunctuation); +} + + +void swift::ide::printCodeCompletionResultTypeName(const CodeCompletionResult &Result, + llvm::raw_ostream &OS) { + auto Chunks = Result.getCompletionString()->getChunks(); + + for (auto i = Chunks.begin(), e = Chunks.end(); i != e; ++i) { + + if (i->is(CodeCompletionString::Chunk::ChunkKind::TypeAnnotation)) + OS << i->getText(); + + if (i->is(CodeCompletionString::Chunk::ChunkKind::TypeAnnotationBegin)) { + auto nestingLevel = i->getNestingLevel(); + i++; + for (; i != e && !i->endsPreviousNestedGroup(nestingLevel); ++i) { + if (i->hasText()) + OS << i->getText(); + } + --i; + } + } +} + +void swift::ide::printCodeCompletionResultTypeNameAnnotated(const CodeCompletionResult &Result, llvm::raw_ostream &OS) { + AnnotatingResultPrinter printer(OS); + printer.printTypeName(Result); } diff --git a/lib/IDE/CompletionInstance.cpp b/lib/IDE/CompletionInstance.cpp index 09d0d6708c0c6..27da23c6c82cd 100644 --- a/lib/IDE/CompletionInstance.cpp +++ b/lib/IDE/CompletionInstance.cpp @@ -315,10 +315,12 @@ bool CompletionInstance::performCachedOperationIfPossible( auto tmpBufferID = tmpSM.addMemBufferCopy(completionBuffer); tmpSM.setCodeCompletionPoint(tmpBufferID, Offset); - LangOptions langOpts; + LangOptions langOpts = CI.getASTContext().LangOpts; langOpts.DisableParserLookup = true; - TypeCheckerOptions typeckOpts; - SearchPathOptions searchPathOpts; + // Ensure all non-function-body tokens are hashed into the interface hash + langOpts.EnableTypeFingerprints = false; + TypeCheckerOptions typeckOpts = CI.getASTContext().TypeCheckerOpts; + SearchPathOptions searchPathOpts = CI.getASTContext().SearchPathOpts; DiagnosticEngine tmpDiags(tmpSM); std::unique_ptr tmpCtx( ASTContext::get(langOpts, typeckOpts, searchPathOpts, tmpSM, tmpDiags)); @@ -327,13 +329,18 @@ bool CompletionInstance::performCachedOperationIfPossible( registerTypeCheckerRequestFunctions(tmpCtx->evaluator); registerSILGenRequestFunctions(tmpCtx->evaluator); ModuleDecl *tmpM = ModuleDecl::create(Identifier(), *tmpCtx); - SourceFile *tmpSF = new (*tmpCtx) SourceFile(*tmpM, oldSF->Kind, tmpBufferID); + SourceFile *tmpSF = new (*tmpCtx) + SourceFile(*tmpM, oldSF->Kind, tmpBufferID, /*KeepParsedTokens=*/false, + /*BuildSyntaxTree=*/false, oldSF->getParsingOptions()); tmpSF->enableInterfaceHash(); - // Ensure all non-function-body tokens are hashed into the interface hash - tmpCtx->LangOpts.EnableTypeFingerprints = false; - // Couldn't find any completion token? + // FIXME: Since we don't setup module loaders on the temporary AST context, + // 'canImport()' conditional compilation directive always fails. That causes + // interface hash change and prevents fast-completion. + + // Parse and get the completion context. auto *newState = tmpSF->getDelayedParserState(); + // Couldn't find any completion token? if (!newState->hasCodeCompletionDelayedDeclState()) return false; diff --git a/lib/IDE/ExprContextAnalysis.cpp b/lib/IDE/ExprContextAnalysis.cpp index f34fa82665b14..a8288abf7a917 100644 --- a/lib/IDE/ExprContextAnalysis.cpp +++ b/lib/IDE/ExprContextAnalysis.cpp @@ -393,12 +393,16 @@ static void collectPossibleCalleesByQualifiedLookup( tyExpr->setType(nullptr); } - auto baseTyOpt = getTypeOfCompletionContextExpr( - DC.getASTContext(), &DC, CompletionTypeCheckKind::Normal, baseExpr, ref); - if (!baseTyOpt) - return; - - auto baseTy = (*baseTyOpt)->getWithoutSpecifierType(); + Type baseTy = baseExpr->getType(); + if (!baseTy || baseTy->is()) { + auto baseTyOpt = getTypeOfCompletionContextExpr( + DC.getASTContext(), &DC, CompletionTypeCheckKind::Normal, baseExpr, + ref); + if (!baseTyOpt) + return; + baseTy = *baseTyOpt; + } + baseTy = baseTy->getWithoutSpecifierType(); if (!baseTy->getMetatypeInstanceType()->mayHaveMembers()) return; diff --git a/lib/IDE/REPLCodeCompletion.cpp b/lib/IDE/REPLCodeCompletion.cpp index b6c0025ebf3a5..34bad673dfc98 100644 --- a/lib/IDE/REPLCodeCompletion.cpp +++ b/lib/IDE/REPLCodeCompletion.cpp @@ -81,6 +81,7 @@ static std::string toInsertableString(CodeCompletionResult *Result) { case CodeCompletionString::Chunk::ChunkKind::GenericParameterBegin: case CodeCompletionString::Chunk::ChunkKind::GenericParameterName: case CodeCompletionString::Chunk::ChunkKind::TypeAnnotation: + case CodeCompletionString::Chunk::ChunkKind::TypeAnnotationBegin: return Str; case CodeCompletionString::Chunk::ChunkKind::CallParameterClosureExpr: @@ -108,7 +109,8 @@ static void toDisplayString(CodeCompletionResult *Result, OS << C.getText(); continue; } - if (C.getKind() == CodeCompletionString::Chunk::ChunkKind::TypeAnnotation) { + if (C.is(CodeCompletionString::Chunk::ChunkKind::TypeAnnotation) || + C.is(CodeCompletionString::Chunk::ChunkKind::TypeAnnotationBegin)) { if (Result->getKind() == CodeCompletionResult::Declaration) { switch (Result->getAssociatedDeclKind()) { case CodeCompletionDeclKind::Module: @@ -150,7 +152,8 @@ static void toDisplayString(CodeCompletionResult *Result, } else { OS << ": "; } - OS << C.getText(); + if (C.hasText()) + OS << C.getText(); } } } diff --git a/lib/IDE/Refactoring.cpp b/lib/IDE/Refactoring.cpp index 469fb6bef7fa7..8481c06a5c55a 100644 --- a/lib/IDE/Refactoring.cpp +++ b/lib/IDE/Refactoring.cpp @@ -2801,7 +2801,8 @@ bool RefactoringActionConvertToTernaryExpr::performChange() { /// these stubs should be filled. class FillProtocolStubContext { - std::vector getUnsatisfiedRequirements(const DeclContext *DC); + std::vector + getUnsatisfiedRequirements(const IterableDeclContext *IDC); /// Context in which the content should be filled; this could be either a /// nominal type declaraion or an extension declaration. @@ -2872,12 +2873,12 @@ getContextFromCursorInfo(ResolvedCursorInfo CursorInfo) { } std::vector FillProtocolStubContext:: -getUnsatisfiedRequirements(const DeclContext *DC) { +getUnsatisfiedRequirements(const IterableDeclContext *IDC) { // The results to return. std::vector NonWitnessedReqs; // For each conformance of the extended nominal. - for(ProtocolConformance *Con : DC->getLocalConformances()) { + for(ProtocolConformance *Con : IDC->getLocalConformances()) { // Collect non-witnessed requirements. Con->forEachNonWitnessedRequirement( diff --git a/lib/IRGen/ClassMetadataVisitor.h b/lib/IRGen/ClassMetadataVisitor.h index 3c82701e78448..0b9b5e145d30c 100644 --- a/lib/IRGen/ClassMetadataVisitor.h +++ b/lib/IRGen/ClassMetadataVisitor.h @@ -49,6 +49,9 @@ template class ClassMetadataVisitor public: void layout() { + static_assert(MetadataAdjustmentIndex::Class == 2, + "Adjustment index must be synchronized with this layout"); + // HeapMetadata header. asImpl().addDestructorFunction(); diff --git a/lib/IRGen/EnumMetadataVisitor.h b/lib/IRGen/EnumMetadataVisitor.h index 83a0d6d288bed..9d21ba830447f 100644 --- a/lib/IRGen/EnumMetadataVisitor.h +++ b/lib/IRGen/EnumMetadataVisitor.h @@ -44,6 +44,9 @@ template class EnumMetadataVisitor public: void layout() { + static_assert(MetadataAdjustmentIndex::ValueType == 1, + "Adjustment index must be synchronized with this layout"); + // Metadata header. super::layout(); diff --git a/lib/IRGen/Fulfillment.h b/lib/IRGen/Fulfillment.h index 6ea5e56d3153d..c43b3b3f39ce2 100644 --- a/lib/IRGen/Fulfillment.h +++ b/lib/IRGen/Fulfillment.h @@ -68,7 +68,7 @@ class FulfillmentMap { virtual bool hasLimitedInterestingConformances(CanType type) const = 0; /// Return the limited interesting conformances for an interesting type. - virtual GenericSignature::ConformsToArray + virtual GenericSignature::RequiredProtocols getInterestingConformances(CanType type) const = 0; /// Return the limited interesting conformances for an interesting type. diff --git a/lib/IRGen/GenBuiltin.cpp b/lib/IRGen/GenBuiltin.cpp index cccd52fe0ec5d..c33d650f11239 100644 --- a/lib/IRGen/GenBuiltin.cpp +++ b/lib/IRGen/GenBuiltin.cpp @@ -122,6 +122,13 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, Identifier FnId, SILType resultType, Explosion &args, Explosion &out, SubstitutionMap substitutions) { + if (Builtin.ID == BuiltinValueKind::COWBufferForReading) { + // Just forward the incoming argument. + assert(args.size() == 1 && "Expecting one incoming argument"); + out = std::move(args); + return; + } + if (Builtin.ID == BuiltinValueKind::UnsafeGuaranteedEnd) { // Just consume the incoming argument. assert(args.size() == 1 && "Expecting one incoming argument"); diff --git a/lib/IRGen/GenClass.cpp b/lib/IRGen/GenClass.cpp index f759e17cc9f5e..5c1d736fb99e4 100644 --- a/lib/IRGen/GenClass.cpp +++ b/lib/IRGen/GenClass.cpp @@ -1116,9 +1116,9 @@ namespace { /// Gather protocol records for all of the explicitly-specified Objective-C /// protocol conformances. - void visitConformances(DeclContext *dc) { + void visitConformances(const IterableDeclContext *idc) { llvm::SmallSetVector protocols; - for (auto conformance : dc->getLocalConformances( + for (auto conformance : idc->getLocalConformances( ConformanceLookupKind::OnlyExplicit)) { ProtocolDecl *proto = conformance->getProtocol(); getObjCProtocols(proto, protocols); diff --git a/lib/IRGen/GenDecl.cpp b/lib/IRGen/GenDecl.cpp index 92704b474ffb8..f058afdbe91bf 100644 --- a/lib/IRGen/GenDecl.cpp +++ b/lib/IRGen/GenDecl.cpp @@ -913,9 +913,9 @@ IRGenModule::getConstantReferenceForProtocolDescriptor(ProtocolDecl *proto) { LinkEntity::forProtocolDescriptor(proto)); } -void IRGenModule::addLazyConformances(DeclContext *dc) { +void IRGenModule::addLazyConformances(const IterableDeclContext *idc) { for (const ProtocolConformance *conf : - dc->getLocalConformances(ConformanceLookupKind::All)) { + idc->getLocalConformances(ConformanceLookupKind::All)) { IRGen.addLazyWitnessTable(conf); } } @@ -1527,17 +1527,23 @@ void IRGenerator::emitDynamicReplacements() { llvm::SmallSet newUniqueOpaqueTypes; llvm::SmallSet origUniqueOpaqueTypes; for (auto *newFunc : DynamicReplacements) { - if (!newFunc->getLoweredFunctionType()->hasOpaqueArchetype()) + auto newResultTy = newFunc->getLoweredFunctionType() + ->getAllResultsInterfaceType() + .getASTType(); + if (!newResultTy->hasOpaqueArchetype()) continue; - CanType(newFunc->getLoweredFunctionType()).visit([&](CanType ty) { + newResultTy.visit([&](CanType ty) { if (auto opaque = ty->getAs()) if (newUniqueOpaqueTypes.insert(opaque).second) newFuncTypes.push_back(opaque); }); auto *origFunc = newFunc->getDynamicallyReplacedFunction(); assert(origFunc); - assert(origFunc->getLoweredFunctionType()->hasOpaqueArchetype()); - CanType(origFunc->getLoweredFunctionType()).visit([&](CanType ty) { + auto origResultTy = origFunc->getLoweredFunctionType() + ->getAllResultsInterfaceType() + .getASTType(); + assert(origResultTy->hasOpaqueArchetype()); + origResultTy.visit([&](CanType ty) { if (auto opaque = ty->getAs()) if (origUniqueOpaqueTypes.insert(opaque).second) origFuncTypes.push_back(opaque); diff --git a/lib/IRGen/GenExistential.cpp b/lib/IRGen/GenExistential.cpp index 4374b43d4794b..31238f50c0dac 100644 --- a/lib/IRGen/GenExistential.cpp +++ b/lib/IRGen/GenExistential.cpp @@ -1075,6 +1075,10 @@ class ClassExistentialTypeInfo final (void)e.claim(getNumStoredProtocols()); } + virtual ReferenceCounting getReferenceCountingType() const override { + return Refcounting; + } + // We can just re-use the reference storage types. #define NEVER_LOADABLE_CHECKED_REF_STORAGE_HELPER(Name, name) \ void name##LoadStrong(IRGenFunction &IGF, Address existential, \ diff --git a/lib/IRGen/GenProto.cpp b/lib/IRGen/GenProto.cpp index 27a1fbf837a4f..2cb7ba801893d 100644 --- a/lib/IRGen/GenProto.cpp +++ b/lib/IRGen/GenProto.cpp @@ -101,8 +101,8 @@ class PolymorphicConvention { FulfillmentMap Fulfillments; - GenericSignature::ConformsToArray getConformsTo(Type t) { - return Generics->getConformsTo(t); + GenericSignature::RequiredProtocols getRequiredProtocols(Type t) { + return Generics->getRequiredProtocols(t); } CanType getSuperclassBound(Type t) { @@ -166,9 +166,9 @@ class PolymorphicConvention { bool hasLimitedInterestingConformances(CanType type) const override { return true; } - GenericSignature::ConformsToArray + GenericSignature::RequiredProtocols getInterestingConformances(CanType type) const override { - return Self.getConformsTo(type); + return Self.getRequiredProtocols(type); } CanType getSuperclassBound(CanType type) const override { return Self.getSuperclassBound(type); @@ -1203,7 +1203,7 @@ class AccessorConformanceInfo : public ConformanceInfo { bool hasLimitedInterestingConformances(CanType type) const override { return false; } - GenericSignature::ConformsToArray + GenericSignature::RequiredProtocols getInterestingConformances(CanType type) const override { llvm_unreachable("no limits"); } diff --git a/lib/IRGen/HeapTypeInfo.h b/lib/IRGen/HeapTypeInfo.h index ca4f3e9dfcf8f..1b6640c844607 100644 --- a/lib/IRGen/HeapTypeInfo.h +++ b/lib/IRGen/HeapTypeInfo.h @@ -228,6 +228,10 @@ class HeapTypeInfo return LoadedRef(ptr, true); } + ReferenceCounting getReferenceCountingType() const override { + return asDerived().getReferenceCounting(); + } + // Extra inhabitants of heap object pointers. bool mayHaveExtraInhabitants(IRGenModule &IGM) const override { diff --git a/lib/IRGen/IRGenDebugInfo.cpp b/lib/IRGen/IRGenDebugInfo.cpp index a921d9176a32f..41a640f3e96c3 100644 --- a/lib/IRGen/IRGenDebugInfo.cpp +++ b/lib/IRGen/IRGenDebugInfo.cpp @@ -30,11 +30,11 @@ #include "swift/ClangImporter/ClangImporter.h" #include "swift/ClangImporter/ClangModule.h" #include "swift/Demangling/ManglingMacros.h" -#include "swift/Serialization/SerializedModuleLoader.h" #include "swift/SIL/SILArgument.h" #include "swift/SIL/SILBasicBlock.h" #include "swift/SIL/SILDebugScope.h" #include "swift/SIL/SILModule.h" +#include "swift/Serialization/SerializedModuleLoader.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" #include "clang/AST/ExternalASTSource.h" @@ -140,7 +140,7 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { llvm::DenseSet PreviousLineEntries; SILLocation::DebugLoc PreviousDebugLoc; #endif - + public: IRGenDebugInfoImpl(const IRGenOptions &Opts, ClangImporter &CI, IRGenModule &IGM, llvm::Module &M, @@ -484,8 +484,8 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { } SmallVector Buf; - StringRef Name = (VD->getBaseName().userFacingName() + - Twine(Kind)).toStringRef(Buf); + StringRef Name = + (VD->getBaseName().userFacingName() + Twine(Kind)).toStringRef(Buf); return BumpAllocatedString(Name); } @@ -518,8 +518,7 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { auto FnTy = SILTy.getAs(); if (!FnTy) { LLVM_DEBUG(llvm::dbgs() << "Unexpected function type: "; - SILTy.print(llvm::dbgs()); - llvm::dbgs() << "\n"); + SILTy.print(llvm::dbgs()); llvm::dbgs() << "\n"); return CanSILFunctionType(); } @@ -564,7 +563,8 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { // Create a Forward-declared type. auto Loc = getDebugLoc(*this, NTD); auto File = getOrCreateFile(Loc.Filename); - auto Line = Loc.Line; + // No line numbers are attached to type forward declarations. + auto Line = 0; auto FwdDecl = DBuilder.createReplaceableCompositeType( llvm::dwarf::DW_TAG_structure_type, NTD->getName().str(), getOrCreateContext(DC->getParent()), File, Line, @@ -581,7 +581,8 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { void createParameterType(llvm::SmallVectorImpl &Parameters, SILType type) { auto RealType = type.getASTType(); - auto DbgTy = DebugTypeInfo::getFromTypeInfo(RealType, IGM.getTypeInfo(type)); + auto DbgTy = + DebugTypeInfo::getFromTypeInfo(RealType, IGM.getTypeInfo(type)); Parameters.push_back(getOrCreateType(DbgTy)); } @@ -720,7 +721,7 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { return Desc; return None; } - + llvm::DIModule *getOrCreateModule(ModuleDecl::ImportedModule IM) { ModuleDecl *M = IM.importedModule; if (Optional ModuleDesc = getClangModule(*M)) @@ -794,15 +795,15 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { break; } - + // TODO: Eliminate substitutions in SILFunctionTypes for now. // On platforms where the substitutions affect representation, we will need // to preserve this info and teach type reconstruction about it. - Ty = Ty->replaceSubstitutedSILFunctionTypesWithUnsubstituted(IGM.getSILModule()); + Ty = Ty->replaceSubstitutedSILFunctionTypesWithUnsubstituted( + IGM.getSILModule()); Mangle::ASTMangler Mangler; - std::string Result = Mangler.mangleTypeForDebugger( - Ty, nullptr); + std::string Result = Mangler.mangleTypeForDebugger(Ty, nullptr); if (!Opts.DisableRoundTripDebugTypes) { // Make sure we can reconstruct mangled types for the debugger. @@ -844,17 +845,17 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { return DITy; } - llvm::DINodeArray - getTupleElements(TupleType *TupleTy, llvm::DIScope *Scope, llvm::DIFile *File, - llvm::DINode::DIFlags Flags, unsigned &SizeInBits) { + llvm::DINodeArray getTupleElements(TupleType *TupleTy, llvm::DIScope *Scope, + llvm::DIFile *File, + llvm::DINode::DIFlags Flags, + unsigned &SizeInBits) { SmallVector Elements; unsigned OffsetInBits = 0; auto genericSig = IGM.getCurGenericContext(); for (auto ElemTy : TupleTy->getElementTypes()) { auto &elemTI = IGM.getTypeInfoForUnlowered( AbstractionPattern(genericSig, ElemTy->getCanonicalType()), ElemTy); - auto DbgTy = - DebugTypeInfo::getFromTypeInfo(ElemTy, elemTI); + auto DbgTy = DebugTypeInfo::getFromTypeInfo(ElemTy, elemTI); Elements.push_back(createMemberType(DbgTy, StringRef(), OffsetInBits, Scope, File, Flags)); } @@ -1016,14 +1017,14 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { /// anchor any typedefs that may appear in parameters so they can be /// resolved in the debugger without needing to query the Swift module. llvm::DINodeArray collectGenericParams(BoundGenericType *BGT) { - SmallVector TemplateParams; - for (auto Param : BGT->getGenericArgs()) { - TemplateParams.push_back(DBuilder.createTemplateTypeParameter( - TheCU, "", getOrCreateType(DebugTypeInfo::getForwardDecl(Param)))); - } - return DBuilder.getOrCreateArray(TemplateParams); + SmallVector TemplateParams; + for (auto Param : BGT->getGenericArgs()) { + TemplateParams.push_back(DBuilder.createTemplateTypeParameter( + TheCU, "", getOrCreateType(DebugTypeInfo::getForwardDecl(Param)))); + } + return DBuilder.getOrCreateArray(TemplateParams); } - + /// Create a sized container for a sizeless type. Used to represent /// BoundGenericEnums that may have different sizes depending on what they are /// bound to, but still share a mangled name. @@ -1047,9 +1048,8 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { InnerTypeCache[UID] = llvm::TrackingMDNodeRef(UniqueType); } - llvm::Metadata *Elements[] = { - DBuilder.createMemberType(Scope, "", File, 0, SizeInBits, - AlignInBits, 0, Flags, UniqueType)}; + llvm::Metadata *Elements[] = {DBuilder.createMemberType( + Scope, "", File, 0, SizeInBits, AlignInBits, 0, Flags, UniqueType)}; return DBuilder.createStructType( Scope, "", File, Line, SizeInBits, AlignInBits, Flags, /* DerivedFrom */ nullptr, DBuilder.getOrCreateArray(Elements), @@ -1229,8 +1229,7 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { if (!BaseTy) { LLVM_DEBUG(llvm::dbgs() << "Type without TypeBase: "; - DbgTy.getType()->dump(llvm::dbgs()); - llvm::dbgs() << "\n"); + DbgTy.getType()->dump(llvm::dbgs()); llvm::dbgs() << "\n"); if (!InternalType) { StringRef Name = ""; InternalType = DBuilder.createForwardDecl( @@ -1301,14 +1300,19 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { auto *Decl = StructTy->getDecl(); auto L = getDebugLoc(*this, Decl); auto *File = getOrCreateFile(L.Filename); + unsigned FwdDeclLine = 0; if (Opts.DebugInfoLevel > IRGenDebugInfoLevel::ASTTypes) return createStructType(DbgTy, Decl, StructTy, Scope, File, L.Line, - SizeInBits, AlignInBits, Flags, - nullptr, // DerivedFrom + SizeInBits, AlignInBits, Flags, nullptr, llvm::dwarf::DW_LANG_Swift, MangledName); else - return createOpaqueStruct(Scope, Decl->getName().str(), File, L.Line, - SizeInBits, AlignInBits, Flags, MangledName); + // No line numbers are attached to type forward declarations. This is + // intentional: It interfers with the efficacy of incremental builds. We + // don't want a whitespace change to an secondary file trigger a + // recompilation of the debug info of a primary source file. + return createOpaqueStruct(Scope, Decl->getName().str(), File, + FwdDeclLine, SizeInBits, AlignInBits, Flags, + MangledName); } case TypeKind::Class: { @@ -1318,10 +1322,11 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { auto *ClassTy = BaseTy->castTo(); auto *Decl = ClassTy->getDecl(); auto L = getDebugLoc(*this, Decl); + auto *File = getOrCreateFile(L.Filename); + unsigned FwdDeclLine = 0; assert(SizeInBits == CI.getTargetInfo().getPointerWidth(0)); - return createPointerSizedStruct(Scope, Decl->getNameStr(), - getOrCreateFile(L.Filename), L.Line, - Flags, MangledName); + return createPointerSizedStruct(Scope, Decl->getNameStr(), File, + FwdDeclLine, Flags, MangledName); } case TypeKind::Protocol: { @@ -1329,40 +1334,43 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { auto *Decl = ProtocolTy->getDecl(); // FIXME: (LLVM branch) This should probably be a DW_TAG_interface_type. auto L = getDebugLoc(*this, Decl); - auto File = getOrCreateFile(L.Filename); + auto *File = getOrCreateFile(L.Filename); + unsigned FwdDeclLine = 0; return createOpaqueStruct(Scope, Decl ? Decl->getNameStr() : MangledName, - File, L.Line, SizeInBits, AlignInBits, Flags, - MangledName); + File, FwdDeclLine, SizeInBits, AlignInBits, + Flags, MangledName); } case TypeKind::ProtocolComposition: { auto *Decl = DbgTy.getDecl(); auto L = getDebugLoc(*this, Decl); - auto File = getOrCreateFile(L.Filename); - - // FIXME: emit types - // auto ProtocolCompositionTy = BaseTy->castTo(); + auto *File = getOrCreateFile(L.Filename); + unsigned FwdDeclLine = 0; return createOpaqueStruct(Scope, Decl ? Decl->getNameStr() : MangledName, - File, L.Line, SizeInBits, AlignInBits, Flags, - MangledName); + File, FwdDeclLine, SizeInBits, AlignInBits, + Flags, MangledName); } case TypeKind::UnboundGeneric: { auto *UnboundTy = BaseTy->castTo(); auto *Decl = UnboundTy->getDecl(); auto L = getDebugLoc(*this, Decl); + auto *File = getOrCreateFile(L.Filename); + unsigned FwdDeclLine = 0; assert(SizeInBits == CI.getTargetInfo().getPointerWidth(0)); return createPointerSizedStruct(Scope, Decl ? Decl->getNameStr() : MangledName, - File, L.Line, Flags, MangledName); + File, FwdDeclLine, Flags, MangledName); } case TypeKind::BoundGenericStruct: { auto *StructTy = BaseTy->castTo(); auto *Decl = StructTy->getDecl(); auto L = getDebugLoc(*this, Decl); + auto *File = getOrCreateFile(L.Filename); + unsigned FwdDeclLine = 0; return createOpaqueStructWithSizedContainer( - Scope, Decl ? Decl->getNameStr() : "", File, L.Line, SizeInBits, + Scope, Decl ? Decl->getNameStr() : "", File, FwdDeclLine, SizeInBits, AlignInBits, Flags, MangledName, collectGenericParams(StructTy)); } @@ -1370,16 +1378,20 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { auto *ClassTy = BaseTy->castTo(); auto *Decl = ClassTy->getDecl(); auto L = getDebugLoc(*this, Decl); + auto *File = getOrCreateFile(L.Filename); + unsigned FwdDeclLine = 0; + // TODO: We may want to peek at Decl->isObjC() and set this // attribute accordingly. assert(SizeInBits == CI.getTargetInfo().getPointerWidth(0)); return createPointerSizedStruct(Scope, Decl ? Decl->getNameStr() : MangledName, - File, L.Line, Flags, MangledName); + File, FwdDeclLine, Flags, MangledName); } case TypeKind::Tuple: { - // Tuples are also represented as structs. + // Tuples are also represented as structs. Since tuples are ephemeral + // (not nominal) they don't have a source location. if (Opts.DebugInfoLevel > IRGenDebugInfoLevel::ASTTypes) return createTuple(DbgTy, Scope, SizeInBits, AlignInBits, Flags, MangledName); @@ -1400,20 +1412,22 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { if (auto nested = dyn_cast(Archetype)) assocType = nested->getAssocType(); auto L = getDebugLoc(*this, assocType); + auto *File = getOrCreateFile(L.Filename); + unsigned FwdDeclLine = 0; auto Superclass = Archetype->getSuperclass(); auto DerivedFrom = Superclass.isNull() ? nullptr : getOrCreateDesugaredType(Superclass, DbgTy); auto FwdDecl = llvm::TempDIType(DBuilder.createReplaceableCompositeType( - llvm::dwarf::DW_TAG_structure_type, MangledName, Scope, File, L.Line, - llvm::dwarf::DW_LANG_Swift, SizeInBits, AlignInBits, Flags, - MangledName)); + llvm::dwarf::DW_TAG_structure_type, MangledName, Scope, File, + FwdDeclLine, llvm::dwarf::DW_LANG_Swift, SizeInBits, AlignInBits, + Flags, MangledName)); // Emit the protocols the archetypes conform to. SmallVector Protocols; for (auto *ProtocolDecl : Archetype->getConformsTo()) { - auto PTy = IGM.getLoweredType(ProtocolDecl->getInterfaceType()) - .getASTType(); + auto PTy = + IGM.getLoweredType(ProtocolDecl->getInterfaceType()).getASTType(); auto PDbgTy = DebugTypeInfo::getFromTypeInfo( ProtocolDecl->getInterfaceType(), IGM.getTypeInfoForLowered(PTy)); auto PDITy = getOrCreateType(PDbgTy); @@ -1421,7 +1435,7 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { DBuilder.createInheritance(FwdDecl.get(), PDITy, 0, 0, Flags)); } auto DITy = DBuilder.createStructType( - Scope, MangledName, File, L.Line, SizeInBits, AlignInBits, Flags, + Scope, MangledName, File, FwdDeclLine, SizeInBits, AlignInBits, Flags, DerivedFrom, DBuilder.getOrCreateArray(Protocols), llvm::dwarf::DW_LANG_Swift, nullptr, MangledName); @@ -1435,9 +1449,11 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { // storage. Flags |= llvm::DINode::FlagArtificial; auto L = getDebugLoc(*this, DbgTy.getDecl()); - auto File = getOrCreateFile(L.Filename); + auto *File = getOrCreateFile(L.Filename); + unsigned FwdDeclLine = 0; + return DBuilder.createStructType( - Scope, MangledName, File, L.Line, SizeInBits, AlignInBits, Flags, + Scope, MangledName, File, FwdDeclLine, SizeInBits, AlignInBits, Flags, nullptr, nullptr, llvm::dwarf::DW_LANG_Swift, nullptr, MangledName); } @@ -1457,12 +1473,15 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { auto *Decl = EnumTy->getDecl(); auto L = getDebugLoc(*this, Decl); auto *File = getOrCreateFile(L.Filename); + unsigned FwdDeclLine = 0; + if (Opts.DebugInfoLevel > IRGenDebugInfoLevel::ASTTypes) return createEnumType(DbgTy, Decl, MangledName, Scope, File, L.Line, Flags); else - return createOpaqueStruct(Scope, Decl->getName().str(), File, L.Line, - SizeInBits, AlignInBits, Flags, MangledName); + return createOpaqueStruct(Scope, Decl->getName().str(), File, + FwdDeclLine, SizeInBits, AlignInBits, Flags, + MangledName); } case TypeKind::BoundGenericEnum: { @@ -1470,52 +1489,58 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { auto *Decl = EnumTy->getDecl(); auto L = getDebugLoc(*this, Decl); auto *File = getOrCreateFile(L.Filename); + unsigned FwdDeclLine = 0; + return createOpaqueStructWithSizedContainer( - Scope, Decl->getName().str(), File, L.Line, SizeInBits, AlignInBits, - Flags, MangledName, collectGenericParams(EnumTy)); + Scope, Decl->getName().str(), File, FwdDeclLine, SizeInBits, + AlignInBits, Flags, MangledName, collectGenericParams(EnumTy)); } case TypeKind::BuiltinVector: { - (void)MangledName; // FIXME emit the name somewhere. + // FIXME: Emit the name somewhere. + (void)MangledName; auto *BuiltinVectorTy = BaseTy->castTo(); auto ElemTy = BuiltinVectorTy->getElementType(); auto ElemDbgTy = DebugTypeInfo::getFromTypeInfo( ElemTy, IGM.getTypeInfoForUnlowered(ElemTy)); unsigned Count = BuiltinVectorTy->getNumElements(); auto Subscript = DBuilder.getOrCreateSubrange(0, Count ? Count : -1); - return DBuilder.createVectorType(SizeInBits, - AlignInBits, getOrCreateType(ElemDbgTy), + return DBuilder.createVectorType(SizeInBits, AlignInBits, + getOrCreateType(ElemDbgTy), DBuilder.getOrCreateArray(Subscript)); } // Reference storage types. -#define REF_STORAGE(Name, ...) \ - case TypeKind::Name##Storage: +#define REF_STORAGE(Name, ...) case TypeKind::Name##Storage: #include "swift/AST/ReferenceStorage.def" - { - auto *ReferenceTy = cast(BaseTy); - auto CanTy = ReferenceTy->getReferentType(); - auto L = getDebugLoc(*this, DbgTy.getDecl()); - auto File = getOrCreateFile(L.Filename); - return DBuilder.createTypedef(getOrCreateDesugaredType(CanTy, DbgTy), - MangledName, File, L.Line, File); - } + { + auto *ReferenceTy = cast(BaseTy); + auto CanTy = ReferenceTy->getReferentType(); + auto L = getDebugLoc(*this, DbgTy.getDecl()); + auto *File = getOrCreateFile(L.Filename); + unsigned CompilerGeneratedLine = 0; + + return DBuilder.createTypedef(getOrCreateDesugaredType(CanTy, DbgTy), + MangledName, File, CompilerGeneratedLine, + File); + } - // Sugared types. + // Sugared types. case TypeKind::TypeAlias: { auto *TypeAliasTy = cast(BaseTy); auto *Decl = TypeAliasTy->getDecl(); auto L = getDebugLoc(*this, Decl); auto AliasedTy = TypeAliasTy->getSinglyDesugaredType(); - auto File = getOrCreateFile(L.Filename); + auto *File = getOrCreateFile(L.Filename); + // For TypeAlias types, the DeclContext for the aliased type is // in the decl of the alias type. DebugTypeInfo AliasedDbgTy(AliasedTy, DbgTy.getStorageType(), DbgTy.getSize(), DbgTy.getAlignment(), DbgTy.hasDefaultAlignment(), false); return DBuilder.createTypedef(getOrCreateType(AliasedDbgTy), MangledName, - File, L.Line, Scope); + File, 0, Scope); } case TypeKind::Paren: { @@ -1536,9 +1561,8 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { case TypeKind::GenericTypeParam: { // FIXME: Provide a more meaningful debug type. return DBuilder.createStructType( - Scope, MangledName, File, 0, SizeInBits, AlignInBits, Flags, - nullptr, nullptr, - llvm::dwarf::DW_LANG_Swift, nullptr, MangledName); + Scope, MangledName, File, 0, SizeInBits, AlignInBits, Flags, nullptr, + nullptr, llvm::dwarf::DW_LANG_Swift, nullptr, MangledName); } // The following types exist primarily for internal use by the type @@ -1554,8 +1578,7 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { case TypeKind::BuiltinUnsafeValueBuffer: LLVM_DEBUG(llvm::dbgs() << "Unhandled type: "; - DbgTy.getType()->dump(llvm::dbgs()); - llvm::dbgs() << "\n"); + DbgTy.getType()->dump(llvm::dbgs()); llvm::dbgs() << "\n"); MangledName = ""; } return DBuilder.createBasicType(MangledName, SizeInBits, Encoding); @@ -1676,7 +1699,7 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { llvm::dwarf::DW_LANG_Swift, 0, 0, llvm::DINode::FlagFwdDecl, MangledName); ReplaceMap.emplace_back( - std::piecewise_construct, std::make_tuple(DbgTy.getType()), + std::piecewise_construct, std::make_tuple(DbgTy.getType()), std::make_tuple(static_cast(FwdDecl))); return FwdDecl; } @@ -1751,12 +1774,11 @@ IRGenDebugInfoImpl::IRGenDebugInfoImpl(const IRGenOptions &Opts, } TheCU = DBuilder.createCompileUnit( - Lang, MainFile, - Producer, Opts.shouldOptimize(), Opts.getDebugFlags(PD), + Lang, MainFile, Producer, Opts.shouldOptimize(), Opts.getDebugFlags(PD), MajorRuntimeVersion, SplitName, Opts.DebugInfoLevel > IRGenDebugInfoLevel::LineTables ? llvm::DICompileUnit::FullDebug - : llvm::DICompileUnit::LineTablesOnly, + : llvm::DICompileUnit::LineTablesOnly, /* DWOId */ 0, /* SplitDebugInlining */ true, /* DebugInfoForProfiling */ false, llvm::DICompileUnit::DebugNameTableKind::Default, @@ -1792,9 +1814,14 @@ IRGenDebugInfoImpl::IRGenDebugInfoImpl(const IRGenOptions &Opts, OS << '"'; for (char c : Macro) switch (c) { - case '\\': OS << "\\\\"; break; - case '"': OS << "\\\""; break; - default: OS << c; + case '\\': + OS << "\\\\"; + break; + case '"': + OS << "\\\""; + break; + default: + OS << c; } OS << '"'; } @@ -1926,20 +1953,20 @@ void IRGenDebugInfoImpl::addFailureMessageToCurrentLoc(IRBuilder &Builder, FuncName += failureMsg; llvm::DISubprogram *TrapSP = DBuilder.createFunction( - MainModule, FuncName, StringRef(), TrapLoc->getFile(), 0, DIFnTy, 0, - llvm::DINode::FlagArtificial, llvm::DISubprogram::SPFlagDefinition, - nullptr, nullptr, nullptr); + MainModule, FuncName, StringRef(), TrapLoc->getFile(), 0, DIFnTy, 0, + llvm::DINode::FlagArtificial, llvm::DISubprogram::SPFlagDefinition, + nullptr, nullptr, nullptr); ScopeCache[TrapSc] = llvm::TrackingMDNodeRef(TrapSP); LastScope = TrapSc; - + assert(parentScopesAreSane(TrapSc) && "parent scope sanity check failed"); - + // Wrap the existing TrapLoc into the failure function. auto DL = llvm::DebugLoc::get(0, 0, TrapSP, TrapLoc); Builder.SetCurrentDebugLocation(DL); } - + void IRGenDebugInfoImpl::clearLoc(IRBuilder &Builder) { LastDebugLoc = {}; LastScope = nullptr; @@ -2259,7 +2286,7 @@ void IRGenDebugInfoImpl::emitVariableDeclaration( llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero; if (Artificial || DITy->isArtificial() || DITy == InternalType) Flags |= llvm::DINode::FlagArtificial; - + // This could be Opts.Optimize if we would also unique DIVariables here. bool Optimized = false; // Create the descriptor for the variable. @@ -2385,8 +2412,7 @@ void IRGenDebugInfoImpl::emitGlobalVariableDeclaration( void IRGenDebugInfoImpl::emitTypeMetadata(IRGenFunction &IGF, llvm::Value *Metadata, unsigned Depth, - unsigned Index, - StringRef Name) { + unsigned Index, StringRef Name) { if (Opts.DebugInfoLevel <= IRGenDebugInfoLevel::LineTables) return; @@ -2424,12 +2450,10 @@ std::unique_ptr IRGenDebugInfo::createIRGenDebugInfo( const IRGenOptions &Opts, ClangImporter &CI, IRGenModule &IGM, llvm::Module &M, StringRef MainOutputFilenameForDebugInfo, StringRef PrivateDiscriminator) { - return std::make_unique(Opts, CI, IGM, M, - MainOutputFilenameForDebugInfo, - PrivateDiscriminator); + return std::make_unique( + Opts, CI, IGM, M, MainOutputFilenameForDebugInfo, PrivateDiscriminator); } - IRGenDebugInfo::~IRGenDebugInfo() {} // Forwarding to the private implementation. @@ -2444,8 +2468,8 @@ void IRGenDebugInfo::setCurrentLoc(IRBuilder &Builder, const SILDebugScope *DS, void IRGenDebugInfo::addFailureMessageToCurrentLoc(IRBuilder &Builder, StringRef failureMsg) { - static_cast(this)-> - addFailureMessageToCurrentLoc(Builder, failureMsg); + static_cast(this)->addFailureMessageToCurrentLoc( + Builder, failureMsg); } void IRGenDebugInfo::clearLoc(IRBuilder &Builder) { @@ -2485,10 +2509,9 @@ void IRGenDebugInfo::emitImport(ImportDecl *D) { llvm::DISubprogram * IRGenDebugInfo::emitFunction(const SILDebugScope *DS, llvm::Function *Fn, SILFunctionTypeRepresentation Rep, SILType Ty, - DeclContext *DeclCtx, - GenericEnvironment *GE) { + DeclContext *DeclCtx, GenericEnvironment *GE) { return static_cast(this)->emitFunction(DS, Fn, Rep, Ty, - DeclCtx); + DeclCtx); } llvm::DISubprogram *IRGenDebugInfo::emitFunction(SILFunction &SILFn, @@ -2497,10 +2520,9 @@ llvm::DISubprogram *IRGenDebugInfo::emitFunction(SILFunction &SILFn, } void IRGenDebugInfo::emitArtificialFunction(IRBuilder &Builder, - llvm::Function *Fn, - SILType SILTy) { - static_cast(this)->emitArtificialFunction(Builder, - Fn, SILTy); + llvm::Function *Fn, SILType SILTy) { + static_cast(this)->emitArtificialFunction(Builder, Fn, + SILTy); } void IRGenDebugInfo::emitVariableDeclaration( @@ -2531,8 +2553,8 @@ void IRGenDebugInfo::emitGlobalVariableDeclaration( void IRGenDebugInfo::emitTypeMetadata(IRGenFunction &IGF, llvm::Value *Metadata, unsigned Depth, unsigned Index, StringRef Name) { - static_cast(this)->emitTypeMetadata( - IGF, Metadata, Depth, Index, Name); + static_cast(this)->emitTypeMetadata(IGF, Metadata, + Depth, Index, Name); } llvm::DIBuilder &IRGenDebugInfo::getBuilder() { diff --git a/lib/IRGen/IRGenModule.cpp b/lib/IRGen/IRGenModule.cpp index 5f5a644923558..e8d72437d1106 100644 --- a/lib/IRGen/IRGenModule.cpp +++ b/lib/IRGen/IRGenModule.cpp @@ -55,6 +55,7 @@ #include "ConformanceDescription.h" #include "GenDecl.h" #include "GenEnum.h" +#include "GenMeta.h" #include "GenPointerAuth.h" #include "GenIntegerLiteral.h" #include "GenType.h" @@ -342,6 +343,8 @@ IRGenModule::IRGenModule(IRGenerator &irgen, // A full type metadata record is basically just an adjustment to the // address point of a type metadata. Resilience may cause // additional data to be laid out prior to this address point. + static_assert(MetadataAdjustmentIndex::ValueType == 1, + "Adjustment index must be synchronized with this layout"); FullTypeMetadataStructTy = createStructType(*this, "swift.full_type", { WitnessTablePtrTy, TypeMetadataStructTy @@ -354,6 +357,8 @@ IRGenModule::IRGenModule(IRGenerator &irgen, // A full heap metadata is basically just an additional small prefix // on a full metadata, used for metadata corresponding to heap // allocations. + static_assert(MetadataAdjustmentIndex::Class == 2, + "Adjustment index must be synchronized with this layout"); FullHeapMetadataStructTy = createStructType(*this, "swift.full_heapmetadata", { dtorPtrTy, @@ -1214,14 +1219,9 @@ static bool isFirstObjectFileInModule(IRGenModule &IGM) { if (IGM.getSILModule().isWholeModule()) return IGM.IRGen.getPrimaryIGM() == &IGM; - const DeclContext *DC = IGM.getSILModule().getAssociatedContext(); - if (!DC) - return false; - - assert(!isa(DC) && "that would be a whole module build"); - assert(isa(DC) && "compiling something smaller than a file?"); - ModuleDecl *containingModule = cast(DC)->getParentModule(); - return containingModule->getFiles().front() == DC; + auto *file = cast(IGM.getSILModule().getAssociatedContext()); + auto *containingModule = file->getParentModule(); + return containingModule->getFiles().front() == file; } void IRGenModule::emitAutolinkInfo() { diff --git a/lib/IRGen/IRGenModule.h b/lib/IRGen/IRGenModule.h index 125efea4d0d4d..a04212ba9b958 100644 --- a/lib/IRGen/IRGenModule.h +++ b/lib/IRGen/IRGenModule.h @@ -1594,8 +1594,9 @@ private: \ void addRuntimeResolvableType(GenericTypeDecl *nominal); void maybeEmitOpaqueTypeDecl(OpaqueTypeDecl *opaque); - /// Add all conformances of the given \c DeclContext LazyWitnessTables. - void addLazyConformances(DeclContext *dc); + /// Add all conformances of the given \c IterableDeclContext + /// LazyWitnessTables. + void addLazyConformances(const IterableDeclContext *idc); //--- Global context emission -------------------------------------------------- public: diff --git a/lib/IRGen/IRGenSIL.cpp b/lib/IRGen/IRGenSIL.cpp index 6728276874615..04406818b278f 100644 --- a/lib/IRGen/IRGenSIL.cpp +++ b/lib/IRGen/IRGenSIL.cpp @@ -975,6 +975,8 @@ class IRGenSILFunction : void visitStrongRetainInst(StrongRetainInst *i); void visitStrongReleaseInst(StrongReleaseInst *i); void visitIsUniqueInst(IsUniqueInst *i); + void visitBeginCOWMutationInst(BeginCOWMutationInst *i); + void visitEndCOWMutationInst(EndCOWMutationInst *i); void visitIsEscapingClosureInst(IsEscapingClosureInst *i); void visitDeallocStackInst(DeallocStackInst *i); void visitDeallocBoxInst(DeallocBoxInst *i); @@ -2659,16 +2661,20 @@ void IRGenSILFunction::visitPartialApplyInst(swift::PartialApplyInst *i) { if (isSimplePartialApply(*this, i)) { Explosion function; - auto schema = IGM.getTypeInfo(v->getType()).getSchema(); + auto &ti = IGM.getTypeInfo(v->getType()); + auto schema = ti.getSchema(); assert(schema.size() == 2); auto calleeTy = schema[0].getScalarType(); auto contextTy = schema[1].getScalarType(); - auto callee = getLoweredExplosion(i->getCallee()); auto calleeValue = callee.claimNext(); assert(callee.empty()); calleeValue = Builder.CreateBitOrPointerCast(calleeValue, calleeTy); - function.add(calleeValue); + + // Re-sign the implementation pointer as a closure entry point. + auto calleeFn = FunctionPointer::forExplosionValue(*this, calleeValue, + i->getOrigCalleeType()); + function.add(calleeFn.getExplosionValue(*this, i->getFunctionType())); Explosion context; for (auto arg : i->getArguments()) { @@ -4159,6 +4165,42 @@ void IRGenSILFunction::visitIsUniqueInst(swift::IsUniqueInst *i) { setLoweredExplosion(i, out); } +void IRGenSILFunction::visitBeginCOWMutationInst(BeginCOWMutationInst *i) { + SILValue ref = i->getOperand(); + Explosion bufferEx = getLoweredExplosion(ref); + llvm::Value *buffer = *bufferEx.begin(); + setLoweredExplosion(i->getBufferResult(), bufferEx); + + Explosion isUnique; + if (hasReferenceSemantics(*this, ref->getType())) { + if (i->getUniquenessResult()->use_empty()) { + // No need to call isUnique if the result is not used. + isUnique.add(llvm::UndefValue::get(IGM.Int1Ty)); + } else { + ReferenceCounting style = cast( + getTypeInfo(ref->getType())).getReferenceCountingType(); + if (i->isNative()) + style = ReferenceCounting::Native; + + llvm::Value *castBuffer = + Builder.CreateBitCast(buffer, IGM.getReferenceType(style)); + + isUnique.add(emitIsUniqueCall(castBuffer, i->getLoc().getSourceLoc(), + /*isNonNull*/ true)); + } + } else { + emitTrap("beginCOWMutation called for a non-reference", + /*EmitUnreachable=*/false); + isUnique.add(llvm::UndefValue::get(IGM.Int1Ty)); + } + setLoweredExplosion(i->getUniquenessResult(), isUnique); +} + +void IRGenSILFunction::visitEndCOWMutationInst(EndCOWMutationInst *i) { + Explosion v = getLoweredExplosion(i->getOperand()); + setLoweredExplosion(i, v); +} + void IRGenSILFunction::visitIsEscapingClosureInst( swift::IsEscapingClosureInst *i) { // The closure operand is allowed to be an optional closure. diff --git a/lib/IRGen/LocalTypeData.cpp b/lib/IRGen/LocalTypeData.cpp index 28d0b71fc7e70..9270cb335ac77 100644 --- a/lib/IRGen/LocalTypeData.cpp +++ b/lib/IRGen/LocalTypeData.cpp @@ -497,7 +497,7 @@ void LocalTypeDataCache::addAbstractForTypeMetadata(IRGenFunction &IGF, bool hasLimitedInterestingConformances(CanType type) const override { return false; } - GenericSignature::ConformsToArray + GenericSignature::RequiredProtocols getInterestingConformances(CanType type) const override { llvm_unreachable("no limits"); } diff --git a/lib/IRGen/MetadataRequest.cpp b/lib/IRGen/MetadataRequest.cpp index 8d89f64ac4b98..5ab92e9dbd286 100644 --- a/lib/IRGen/MetadataRequest.cpp +++ b/lib/IRGen/MetadataRequest.cpp @@ -741,7 +741,7 @@ bool irgen::isNominalGenericContextTypeMetadataAccessTrivial( auto allWitnessTablesAreReferenceable = llvm::all_of(environment->getGenericParams(), [&](auto parameter) { auto signature = environment->getGenericSignature(); - auto protocols = signature->getConformsTo(parameter); + const auto protocols = signature->getRequiredProtocols(parameter); auto argument = ((Type *)parameter)->subst(substitutions); auto canonicalType = argument->getCanonicalType(); auto witnessTablesAreReferenceable = [&]() { diff --git a/lib/IRGen/NominalMetadataVisitor.h b/lib/IRGen/NominalMetadataVisitor.h index 212d7c65640c9..5670aed1755ed 100644 --- a/lib/IRGen/NominalMetadataVisitor.h +++ b/lib/IRGen/NominalMetadataVisitor.h @@ -20,6 +20,7 @@ #include "GenericRequirement.h" #include "GenProto.h" +#include "GenMeta.h" #include "IRGenModule.h" #include "MetadataVisitor.h" diff --git a/lib/IRGen/ReferenceTypeInfo.h b/lib/IRGen/ReferenceTypeInfo.h index 0595b85650223..78c65d00cb6dc 100644 --- a/lib/IRGen/ReferenceTypeInfo.h +++ b/lib/IRGen/ReferenceTypeInfo.h @@ -45,6 +45,10 @@ class ReferenceTypeInfo : public LoadableTypeInfo { virtual void strongRelease(IRGenFunction &IGF, Explosion &in, Atomicity atomicity) const = 0; + virtual ReferenceCounting getReferenceCountingType() const { + llvm_unreachable("not supported"); + } + #define REF_STORAGE_HELPER(Name) \ virtual const TypeInfo *create##Name##StorageType(TypeConverter &TC, \ bool isOptional) const = 0; diff --git a/lib/IRGen/StructMetadataVisitor.h b/lib/IRGen/StructMetadataVisitor.h index 91efa15f306b6..bf778cbe0949e 100644 --- a/lib/IRGen/StructMetadataVisitor.h +++ b/lib/IRGen/StructMetadataVisitor.h @@ -43,6 +43,9 @@ template class StructMetadataVisitor public: void layout() { + static_assert(MetadataAdjustmentIndex::ValueType == 1, + "Adjustment index must be synchronized with this layout"); + // Metadata header. super::layout(); diff --git a/lib/Index/Index.cpp b/lib/Index/Index.cpp index 63d696b14ab30..719f383871292 100644 --- a/lib/Index/Index.cpp +++ b/lib/Index/Index.cpp @@ -777,11 +777,12 @@ bool IndexSwiftASTWalker::visitImports( } bool IndexSwiftASTWalker::handleWitnesses(Decl *D, SmallVectorImpl &explicitWitnesses) { - auto DC = dyn_cast(D); - if (!DC) + const auto *const IDC = dyn_cast(D); + if (!IDC) return true; - for (auto *conf : DC->getLocalConformances()) { + const auto DC = IDC->getAsGenericContext(); + for (auto *conf : IDC->getLocalConformances()) { if (conf->isInvalid()) continue; diff --git a/lib/Parse/ParseIfConfig.cpp b/lib/Parse/ParseIfConfig.cpp index f8799b2ec9c43..ac68043a60193 100644 --- a/lib/Parse/ParseIfConfig.cpp +++ b/lib/Parse/ParseIfConfig.cpp @@ -676,6 +676,10 @@ ParserResult Parser::parseIfConfig( SmallVector Elements; llvm::SaveAndRestore S(InInactiveClauseEnvironment, InInactiveClauseEnvironment || !isActive); + // Disable updating the interface hash inside inactive blocks. + llvm::SaveAndRestore> T( + CurrentTokenHash, isActive ? CurrentTokenHash : nullptr); + if (isActive || !isVersionCondition) { parseElements(Elements, isActive); } else if (SyntaxContext->isEnabled()) { diff --git a/lib/Parse/ParsePattern.cpp b/lib/Parse/ParsePattern.cpp index 8b29e0e10d365..5a552a69dd7c7 100644 --- a/lib/Parse/ParsePattern.cpp +++ b/lib/Parse/ParsePattern.cpp @@ -1124,8 +1124,7 @@ ParserResult Parser::parsePatternTuple() { /// pattern-type-annotation ::= (':' type)? /// ParserResult Parser:: -parseOptionalPatternTypeAnnotation(ParserResult result, - bool isOptional) { +parseOptionalPatternTypeAnnotation(ParserResult result) { if (!Tok.is(tok::colon)) return result; @@ -1152,11 +1151,6 @@ parseOptionalPatternTypeAnnotation(ParserResult result, if (!repr) repr = new (Context) ErrorTypeRepr(PreviousLoc); - // In an if-let, the actual type of the expression is Optional of whatever - // was written. - if (isOptional) - repr = new (Context) OptionalTypeRepr(repr, SourceLoc()); - return makeParserResult(status, new (Context) TypedPattern(P, repr)); } diff --git a/lib/Parse/ParseRequests.cpp b/lib/Parse/ParseRequests.cpp index 73b92abe67b57..c424b959e9dde 100644 --- a/lib/Parse/ParseRequests.cpp +++ b/lib/Parse/ParseRequests.cpp @@ -48,7 +48,7 @@ void swift::simple_display(llvm::raw_ostream &out, FingerprintAndMembers ParseMembersRequest::evaluate(Evaluator &evaluator, IterableDeclContext *idc) const { - SourceFile &sf = *idc->getDecl()->getDeclContext()->getParentSourceFile(); + SourceFile &sf = *idc->getAsGenericContext()->getParentSourceFile(); unsigned bufferID = *sf.getBufferID(); // Lexer diaganostics have been emitted during skipping, so we disable lexer's diff --git a/lib/Parse/ParseStmt.cpp b/lib/Parse/ParseStmt.cpp index 5c6dfdc92d8c2..414f6ef2d4c18 100644 --- a/lib/Parse/ParseStmt.cpp +++ b/lib/Parse/ParseStmt.cpp @@ -1502,8 +1502,7 @@ Parser::parseStmtConditionElement(SmallVectorImpl &result, P->setImplicit(); } - ThePattern = parseOptionalPatternTypeAnnotation(ThePattern, - BindingKindStr != "case"); + ThePattern = parseOptionalPatternTypeAnnotation(ThePattern); if (ThePattern.hasCodeCompletion()) Status.setHasCodeCompletion(); @@ -2123,7 +2122,7 @@ ParserResult Parser::parseStmtForEach(LabeledStmtInfo LabelInfo) { llvm::SaveAndRestore T(InVarOrLetPattern, Parser::IVOLP_InMatchingPattern); pattern = parseMatchingPattern(/*isExprBasic*/true); - pattern = parseOptionalPatternTypeAnnotation(pattern, /*isOptional*/false); + pattern = parseOptionalPatternTypeAnnotation(pattern); } else if (!IsCStyleFor || Tok.is(tok::kw_var)) { // Change the parser state to know that the pattern we're about to parse is // implicitly mutable. Bound variables can be changed to mutable explicitly diff --git a/lib/PrintAsObjC/ModuleContentsWriter.cpp b/lib/PrintAsObjC/ModuleContentsWriter.cpp index d850955c51e4b..a9b36541f31b2 100644 --- a/lib/PrintAsObjC/ModuleContentsWriter.cpp +++ b/lib/PrintAsObjC/ModuleContentsWriter.cpp @@ -71,8 +71,7 @@ class ReferencedTypeFinder : public TypeDeclFinder { if (sig->getSuperclassBound(paramTy)) return true; - auto conformsTo = sig->getConformsTo(paramTy); - return !conformsTo.empty(); + return !sig->getRequiredProtocols(paramTy).empty(); } Action visitBoundGenericType(BoundGenericType *boundGeneric) override { diff --git a/lib/SIL/CMakeLists.txt b/lib/SIL/CMakeLists.txt index c05e0dd9bb708..74ca0507393d3 100644 --- a/lib/SIL/CMakeLists.txt +++ b/lib/SIL/CMakeLists.txt @@ -1,39 +1,16 @@ - -set(SIL_SOURCES) - -function(_list_transform newvar) - set(sources ${ARGN}) - set(dir ${CMAKE_CURRENT_SOURCE_DIR}) - set(tmp) - foreach (s ${sources}) - list(APPEND tmp "${dir}/${s}") - endforeach() - set(${newvar} "${tmp}" PARENT_SCOPE) -endfunction() - -macro(sil_register_sources) - precondition(new_transformed_sources - NEGATE - MESSAGE "Expected this to be empty since we clear after each run") - _list_transform(new_transformed_sources ${ARGN}) - list_union("${SIL_SOURCES}" "${new_transformed_sources}" out) - set(SIL_SOURCES "${out}" PARENT_SCOPE) - set(new_transformed_sources) -endmacro() - -add_subdirectory(IR) -add_subdirectory(Utils) -add_subdirectory(Verifier) -add_subdirectory(Parser) - add_swift_host_library(swiftSIL STATIC - ${SIL_SOURCES}) + SIL.cpp) target_link_libraries(swiftSIL PUBLIC swiftDemangling) target_link_libraries(swiftSIL PRIVATE swiftSema swiftSerialization) +add_subdirectory(IR) +add_subdirectory(Utils) +add_subdirectory(Verifier) +add_subdirectory(Parser) + # intrinsics_gen is the LLVM tablegen target that generates the include files # where intrinsics and attributes are declared. swiftSIL depends on these # headers. diff --git a/lib/SIL/IR/AbstractionPattern.cpp b/lib/SIL/IR/AbstractionPattern.cpp index ce5d24fb5ab37..fd89e9d9d8d54 100644 --- a/lib/SIL/IR/AbstractionPattern.cpp +++ b/lib/SIL/IR/AbstractionPattern.cpp @@ -319,6 +319,8 @@ getClangFunctionType(const clang::Type *clangType) { clangType = ptrTy->getPointeeType().getTypePtr(); } else if (auto blockTy = clangType->getAs()) { clangType = blockTy->getPointeeType().getTypePtr(); + } else if (auto refTy = clangType->getAs()) { + clangType = refTy->getPointeeType().getTypePtr(); } return clangType->castAs(); } diff --git a/lib/SIL/IR/CMakeLists.txt b/lib/SIL/IR/CMakeLists.txt index 6c3c1b6ce6b05..d5b426ed47358 100644 --- a/lib/SIL/IR/CMakeLists.txt +++ b/lib/SIL/IR/CMakeLists.txt @@ -1,4 +1,4 @@ -sil_register_sources( +target_sources(swiftSIL PRIVATE AbstractionPattern.cpp Bridging.cpp Linker.cpp @@ -31,5 +31,4 @@ sil_register_sources( SILValue.cpp SILWitnessTable.cpp TypeLowering.cpp - ValueOwnership.cpp -) + ValueOwnership.cpp) diff --git a/lib/SIL/IR/OperandOwnership.cpp b/lib/SIL/IR/OperandOwnership.cpp index dc5ba790c88b3..4befbd0596293 100644 --- a/lib/SIL/IR/OperandOwnership.cpp +++ b/lib/SIL/IR/OperandOwnership.cpp @@ -171,6 +171,8 @@ CONSTANT_OWNERSHIP_INST(Owned, MustBeInvalidated, DeallocExistentialBox) CONSTANT_OWNERSHIP_INST(Owned, MustBeInvalidated, DeallocRef) CONSTANT_OWNERSHIP_INST(Owned, MustBeInvalidated, DestroyValue) CONSTANT_OWNERSHIP_INST(Owned, MustBeInvalidated, EndLifetime) +CONSTANT_OWNERSHIP_INST(Owned, MustBeInvalidated, BeginCOWMutation) +CONSTANT_OWNERSHIP_INST(Owned, MustBeInvalidated, EndCOWMutation) CONSTANT_OWNERSHIP_INST(None, MustBeLive, AbortApply) CONSTANT_OWNERSHIP_INST(None, MustBeLive, AddressToPointer) CONSTANT_OWNERSHIP_INST(None, MustBeLive, BeginAccess) @@ -1021,6 +1023,7 @@ ANY_OWNERSHIP_BUILTIN(TypePtrAuthDiscriminator) ValueOwnershipKind::OWNERSHIP, \ UseLifetimeConstraint::USE_LIFETIME_CONSTRAINT); \ } +CONSTANT_OWNERSHIP_BUILTIN(Owned, MustBeInvalidated, COWBufferForReading) CONSTANT_OWNERSHIP_BUILTIN(Owned, MustBeInvalidated, UnsafeGuaranteed) #undef CONSTANT_OWNERSHIP_BUILTIN diff --git a/lib/SIL/IR/SIL.cpp b/lib/SIL/IR/SIL.cpp index 3cf225f96d411..e9c739af20805 100644 --- a/lib/SIL/IR/SIL.cpp +++ b/lib/SIL/IR/SIL.cpp @@ -127,12 +127,6 @@ bool SILModule::isTypeMetadataAccessible(CanType type) { // Private declarations are inaccessible from different files unless // this is WMO and we're in the same module. case FormalLinkage::Private: { - // The only time we don't have an associated DC is in the - // integrated REPL, where we also don't have a concept of other - // source files within the current module. - if (!AssociatedDeclContext) - return (decl->getModuleContext() != getSwiftModule()); - // The associated DC should be either a SourceFile or, in WMO mode, // a ModuleDecl. In the WMO modes, IRGen will ensure that private // declarations are usable throughout the module. Therefore, in diff --git a/lib/SIL/IR/SILBasicBlock.cpp b/lib/SIL/IR/SILBasicBlock.cpp index a817c15b1eab9..e352a97924f79 100644 --- a/lib/SIL/IR/SILBasicBlock.cpp +++ b/lib/SIL/IR/SILBasicBlock.cpp @@ -226,7 +226,9 @@ SILPhiArgument *SILBasicBlock::replacePhiArgumentAndReplaceAllUses( // any uses. SmallVector operands; SILValue undef = SILUndef::get(ty, *getParent()); - for (auto *use : getArgument(i)->getUses()) { + SILArgument *arg = getArgument(i); + while (!arg->use_empty()) { + Operand *use = *arg->use_begin(); use->set(undef); operands.push_back(use); } diff --git a/lib/SIL/IR/SILInstruction.cpp b/lib/SIL/IR/SILInstruction.cpp index 593858ce03784..24ab6b8fb1925 100644 --- a/lib/SIL/IR/SILInstruction.cpp +++ b/lib/SIL/IR/SILInstruction.cpp @@ -383,6 +383,16 @@ namespace { } return true; } + + bool visitBeginCOWMutationInst(const BeginCOWMutationInst *RHS) { + auto *left = cast(LHS); + return left->isNative() == RHS->isNative(); + } + + bool visitEndCOWMutationInst(const EndCOWMutationInst *RHS) { + auto *left = cast(LHS); + return left->doKeepUnique() == RHS->doKeepUnique(); + } bool visitAllocRefDynamicInst(const AllocRefDynamicInst *RHS) { return true; @@ -1126,6 +1136,7 @@ bool SILInstruction::mayRelease() const { bool SILInstruction::mayReleaseOrReadRefCount() const { switch (getKind()) { case SILInstructionKind::IsUniqueInst: + case SILInstructionKind::BeginCOWMutationInst: case SILInstructionKind::IsEscapingClosureInst: return true; default: diff --git a/lib/SIL/IR/SILInstructions.cpp b/lib/SIL/IR/SILInstructions.cpp index f72f53628ad8a..235b5098eb149 100644 --- a/lib/SIL/IR/SILInstructions.cpp +++ b/lib/SIL/IR/SILInstructions.cpp @@ -2166,6 +2166,36 @@ OpenExistentialValueInst::OpenExistentialValueInst(SILDebugLocation DebugLoc, SILType SelfTy) : UnaryInstructionBase(DebugLoc, Operand, SelfTy) {} +BeginCOWMutationInst::BeginCOWMutationInst(SILDebugLocation loc, + SILValue operand, + ArrayRef resultTypes, + ArrayRef resultOwnerships, + bool isNative) + : UnaryInstructionBase(loc, operand), + MultipleValueInstructionTrailingObjects(this, resultTypes, + resultOwnerships) { + assert(resultTypes.size() == 2 && resultOwnerships.size() == 2); + assert(operand->getType() == resultTypes[1]); + setNative(isNative); +} + +BeginCOWMutationInst * +BeginCOWMutationInst::create(SILDebugLocation loc, SILValue operand, + SILType boolTy, SILFunction &F, bool isNative) { + + SILType resultTypes[2] = { boolTy, operand->getType() }; + ValueOwnershipKind ownerships[2] = { ValueOwnershipKind::None, ValueOwnershipKind::Owned }; + + void *buffer = + allocateTrailingInst( + F, 1, 2); + return ::new(buffer) BeginCOWMutationInst(loc, operand, + ArrayRef(resultTypes, 2), + ArrayRef(ownerships, 2), + isNative); +} + UncheckedRefCastInst * UncheckedRefCastInst::create(SILDebugLocation DebugLoc, SILValue Operand, SILType Ty, SILFunction &F, diff --git a/lib/SIL/IR/SILModule.cpp b/lib/SIL/IR/SILModule.cpp index b061f08484d08..50aadb293da66 100644 --- a/lib/SIL/IR/SILModule.cpp +++ b/lib/SIL/IR/SILModule.cpp @@ -92,13 +92,18 @@ class SILModule::SerializationCallback final } }; -SILModule::SILModule(ModuleDecl *SwiftModule, TypeConverter &TC, - const SILOptions &Options, const DeclContext *associatedDC, - bool wholeModule) - : TheSwiftModule(SwiftModule), - AssociatedDeclContext(associatedDC), - Stage(SILStage::Raw), wholeModule(wholeModule), Options(Options), - serialized(false), SerializeSILAction(), Types(TC) { +SILModule::SILModule(llvm::PointerUnion context, + Lowering::TypeConverter &TC, const SILOptions &Options) + : Stage(SILStage::Raw), Options(Options), serialized(false), + SerializeSILAction(), Types(TC) { + assert(!context.isNull()); + if (auto *file = context.dyn_cast()) { + AssociatedDeclContext = file; + } else { + AssociatedDeclContext = context.get(); + } + TheSwiftModule = AssociatedDeclContext->getParentModule(); + // We always add the base SILModule serialization callback. std::unique_ptr callback( new SILModule::SerializationCallback()); @@ -122,11 +127,10 @@ SILModule::~SILModule() { } } -std::unique_ptr -SILModule::createEmptyModule(ModuleDecl *M, TypeConverter &TC, const SILOptions &Options, - bool WholeModule) { - return std::unique_ptr( - new SILModule(M, TC, Options, M, WholeModule)); +std::unique_ptr SILModule::createEmptyModule( + llvm::PointerUnion context, + Lowering::TypeConverter &TC, const SILOptions &Options) { + return std::unique_ptr(new SILModule(context, TC, Options)); } ASTContext &SILModule::getASTContext() const { diff --git a/lib/SIL/IR/SILPrinter.cpp b/lib/SIL/IR/SILPrinter.cpp index 452aca6f0e59e..4cb789766a4d6 100644 --- a/lib/SIL/IR/SILPrinter.cpp +++ b/lib/SIL/IR/SILPrinter.cpp @@ -1775,13 +1775,15 @@ class SILPrinter : public SILInstructionVisitor { *this << EI->getField()->getName().get(); } void visitRefElementAddrInst(RefElementAddrInst *EI) { - *this << getIDAndType(EI->getOperand()) << ", #"; + *this << (EI->isImmutable() ? "[immutable] " : "") + << getIDAndType(EI->getOperand()) << ", #"; printFullContext(EI->getField()->getDeclContext(), PrintState.OS); *this << EI->getField()->getName().get(); } void visitRefTailAddrInst(RefTailAddrInst *RTAI) { - *this << getIDAndType(RTAI->getOperand()) << ", " << RTAI->getTailType(); + *this << (RTAI->isImmutable() ? "[immutable] " : "") + << getIDAndType(RTAI->getOperand()) << ", " << RTAI->getTailType(); } void visitDestructureStructInst(DestructureStructInst *DSI) { @@ -1939,6 +1941,16 @@ class SILPrinter : public SILInstructionVisitor { void visitIsUniqueInst(IsUniqueInst *CUI) { *this << getIDAndType(CUI->getOperand()); } + void visitBeginCOWMutationInst(BeginCOWMutationInst *BCMI) { + if (BCMI->isNative()) + *this << "[native] "; + *this << getIDAndType(BCMI->getOperand()); + } + void visitEndCOWMutationInst(EndCOWMutationInst *ECMI) { + if (ECMI->doKeepUnique()) + *this << "[keep_unique] "; + *this << getIDAndType(ECMI->getOperand()); + } void visitIsEscapingClosureInst(IsEscapingClosureInst *CUI) { if (CUI->getVerificationType()) *this << "[objc] "; @@ -3520,6 +3532,9 @@ ID SILPrintContext::getID(const SILNode *node) { return {ID::SILUndef, 0}; SILBasicBlock *BB = node->getParentBlock(); + if (!BB) { + return { ID::Null, 0 }; + } if (SILFunction *F = BB->getParent()) { setContext(F); // Lazily initialize the instruction -> ID mapping. diff --git a/lib/SIL/IR/TypeLowering.cpp b/lib/SIL/IR/TypeLowering.cpp index 60b8274e90b08..576d1c4f75109 100644 --- a/lib/SIL/IR/TypeLowering.cpp +++ b/lib/SIL/IR/TypeLowering.cpp @@ -1446,6 +1446,10 @@ namespace { if (handleResilience(structType, D, properties)) return handleAddressOnly(structType, properties); + if (D->isCxxNotTriviallyCopyable()) { + properties.setAddressOnly(); + } + auto subMap = structType->getContextSubstitutionMap(&TC.M, D); // Classify the type according to its stored properties. diff --git a/lib/SIL/IR/ValueOwnership.cpp b/lib/SIL/IR/ValueOwnership.cpp index 992a481bea876..88981eddc91f0 100644 --- a/lib/SIL/IR/ValueOwnership.cpp +++ b/lib/SIL/IR/ValueOwnership.cpp @@ -74,6 +74,7 @@ CONSTANT_OWNERSHIP_INST(None, AllocValueBuffer) CONSTANT_OWNERSHIP_INST(Owned, CopyBlock) CONSTANT_OWNERSHIP_INST(Owned, CopyBlockWithoutEscaping) CONSTANT_OWNERSHIP_INST(Owned, CopyValue) +CONSTANT_OWNERSHIP_INST(Owned, EndCOWMutation) CONSTANT_OWNERSHIP_INST(Owned, KeyPath) CONSTANT_OWNERSHIP_INST(Owned, InitExistentialValue) CONSTANT_OWNERSHIP_INST(Owned, GlobalValue) // TODO: is this correct? @@ -299,6 +300,11 @@ ValueOwnershipKind ValueOwnershipKindClassifier::visitBeginApplyResult( return Result->getOwnershipKind(); } +ValueOwnershipKind ValueOwnershipKindClassifier::visitBeginCOWMutationResult( + BeginCOWMutationResult *Result) { + return Result->getOwnershipKind(); +} + ValueOwnershipKind ValueOwnershipKindClassifier::visitSILFunctionArgument( SILFunctionArgument *Arg) { return Arg->getOwnershipKind(); @@ -386,6 +392,7 @@ struct ValueOwnershipKindBuiltinVisitor } // This returns a value at +1 that is destroyed strictly /after/ the // UnsafeGuaranteedEnd. This provides the guarantee that we want. +CONSTANT_OWNERSHIP_BUILTIN(Owned, COWBufferForReading) CONSTANT_OWNERSHIP_BUILTIN(Owned, UnsafeGuaranteed) CONSTANT_OWNERSHIP_BUILTIN(None, AShr) CONSTANT_OWNERSHIP_BUILTIN(None, GenericAShr) diff --git a/lib/SIL/Parser/CMakeLists.txt b/lib/SIL/Parser/CMakeLists.txt index e864b5d499568..914bf44b479ba 100644 --- a/lib/SIL/Parser/CMakeLists.txt +++ b/lib/SIL/Parser/CMakeLists.txt @@ -1,4 +1,2 @@ -sil_register_sources( - ParseSIL.cpp -) - +target_sources(swiftSIL PRIVATE + ParseSIL.cpp) diff --git a/lib/SIL/Parser/ParseSIL.cpp b/lib/SIL/Parser/ParseSIL.cpp index 63d19c2f2107a..7aa8308e3b4ed 100644 --- a/lib/SIL/Parser/ParseSIL.cpp +++ b/lib/SIL/Parser/ParseSIL.cpp @@ -117,9 +117,8 @@ ParseSILModuleRequest::evaluate(Evaluator &evaluator, auto bufferID = SF->getBufferID(); assert(bufferID); - auto *mod = SF->getParentModule(); - auto silMod = SILModule::createEmptyModule(mod, desc.conv, desc.opts, - desc.isWholeModule()); + auto silMod = SILModule::createEmptyModule(desc.context, desc.conv, + desc.opts); SILParserState parserState(silMod.get()); Parser parser(*bufferID, *SF, parserState.Impl.get()); PrettyStackTraceParser StackTrace(parser); @@ -128,8 +127,7 @@ ParseSILModuleRequest::evaluate(Evaluator &evaluator, if (hadError) { // The rest of the SIL pipeline expects well-formed SIL, so if we encounter // a parsing error, just return an empty SIL module. - return SILModule::createEmptyModule(mod, desc.conv, desc.opts, - desc.isWholeModule()); + return SILModule::createEmptyModule(desc.context, desc.conv, desc.opts); } return silMod; } @@ -2996,6 +2994,24 @@ bool SILParser::parseSpecificSILInstruction(SILBuilder &B, #undef UNARY_INSTRUCTION #undef REFCOUNTING_INSTRUCTION + case SILInstructionKind::BeginCOWMutationInst: { + bool native = false; + if (parseSILOptional(native, *this, "native") || + parseTypedValueRef(Val, B) || + parseSILDebugLocation(InstLoc, B)) + return true; + ResultVal = B.createBeginCOWMutation(InstLoc, Val, native); + break; + } + case SILInstructionKind::EndCOWMutationInst: { + bool keepUnique = false; + if (parseSILOptional(keepUnique, *this, "keep_unique") || + parseTypedValueRef(Val, B) || + parseSILDebugLocation(InstLoc, B)) + return true; + ResultVal = B.createEndCOWMutation(InstLoc, Val, keepUnique); + break; + } case SILInstructionKind::IsEscapingClosureInst: { bool IsObjcVerifcationType = false; if (parseSILOptional(IsObjcVerifcationType, *this, "objc")) @@ -4459,7 +4475,9 @@ bool SILParser::parseSpecificSILInstruction(SILBuilder &B, case SILInstructionKind::RefElementAddrInst: { ValueDecl *FieldV; SourceLoc NameLoc; - if (parseTypedValueRef(Val, B) || + bool IsImmutable = false; + if (parseSILOptional(IsImmutable, *this, "immutable") || + parseTypedValueRef(Val, B) || P.parseToken(tok::comma, diag::expected_tok_in_sil_instr, ",") || parseSILDottedPath(FieldV) || parseSILDebugLocation(InstLoc, B)) return true; @@ -4470,18 +4488,21 @@ bool SILParser::parseSpecificSILInstruction(SILBuilder &B, VarDecl *Field = cast(FieldV); auto ResultTy = Val->getType().getFieldType(Field, SILMod, B.getTypeExpansionContext()); - ResultVal = B.createRefElementAddr(InstLoc, Val, Field, ResultTy); + ResultVal = B.createRefElementAddr(InstLoc, Val, Field, ResultTy, + IsImmutable); break; } case SILInstructionKind::RefTailAddrInst: { SourceLoc NameLoc; SILType ResultObjTy; - if (parseTypedValueRef(Val, B) || + bool IsImmutable = false; + if (parseSILOptional(IsImmutable, *this, "immutable") || + parseTypedValueRef(Val, B) || P.parseToken(tok::comma, diag::expected_tok_in_sil_instr, ",") || parseSILType(ResultObjTy) || parseSILDebugLocation(InstLoc, B)) return true; SILType ResultTy = ResultObjTy.getAddressType(); - ResultVal = B.createRefTailAddr(InstLoc, Val, ResultTy); + ResultVal = B.createRefTailAddr(InstLoc, Val, ResultTy, IsImmutable); break; } case SILInstructionKind::IndexAddrInst: { diff --git a/lib/SIL/SIL.cpp b/lib/SIL/SIL.cpp new file mode 100644 index 0000000000000..eb3bfb201fe4d --- /dev/null +++ b/lib/SIL/SIL.cpp @@ -0,0 +1,15 @@ +//===--- SIL.cpp ---------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +// DO NOT MODIFY THIS FILE! +// The SIL library is split into sub-components, modify the respective +// sub-component. diff --git a/lib/SIL/Utils/CMakeLists.txt b/lib/SIL/Utils/CMakeLists.txt index ddfee2ba3a31d..503af715ecab2 100644 --- a/lib/SIL/Utils/CMakeLists.txt +++ b/lib/SIL/Utils/CMakeLists.txt @@ -1,4 +1,4 @@ -sil_register_sources( +target_sources(swiftSIL PRIVATE BasicBlockUtils.cpp DebugUtils.cpp Dominance.cpp @@ -13,5 +13,4 @@ sil_register_sources( SILInstructionWorklist.cpp SILOpenedArchetypesTracker.cpp SILRemarkStreamer.cpp - ValueUtils.cpp -) + ValueUtils.cpp) diff --git a/lib/SIL/Utils/MemAccessUtils.cpp b/lib/SIL/Utils/MemAccessUtils.cpp index 973cc216d8237..6f041f59995e3 100644 --- a/lib/SIL/Utils/MemAccessUtils.cpp +++ b/lib/SIL/Utils/MemAccessUtils.cpp @@ -528,6 +528,7 @@ static void visitBuiltinAddress(BuiltinInst *builtin, case BuiltinValueKind::Unreachable: case BuiltinValueKind::CondUnreachable: case BuiltinValueKind::DestroyArray: + case BuiltinValueKind::COWBufferForReading: case BuiltinValueKind::UnsafeGuaranteed: case BuiltinValueKind::UnsafeGuaranteedEnd: case BuiltinValueKind::Swift3ImplicitObjCEntrypoint: diff --git a/lib/SIL/Verifier/CMakeLists.txt b/lib/SIL/Verifier/CMakeLists.txt index 844eacf6d69c9..b9e7fb1fb5910 100644 --- a/lib/SIL/Verifier/CMakeLists.txt +++ b/lib/SIL/Verifier/CMakeLists.txt @@ -1,7 +1,6 @@ -sil_register_sources( +target_sources(swiftSIL PRIVATE LoadBorrowInvalidationChecker.cpp LinearLifetimeChecker.cpp MemoryLifetime.cpp SILOwnershipVerifier.cpp - SILVerifier.cpp -) + SILVerifier.cpp) diff --git a/lib/SIL/Verifier/SILVerifier.cpp b/lib/SIL/Verifier/SILVerifier.cpp index 28f25217d8fed..227d33df6ff7e 100644 --- a/lib/SIL/Verifier/SILVerifier.cpp +++ b/lib/SIL/Verifier/SILVerifier.cpp @@ -2936,9 +2936,8 @@ class SILVerifier : public SILVerifierBase { require(selfRequirement && selfRequirement->getKind() == RequirementKind::Conformance, "first non-same-typerequirement should be conformance requirement"); - auto conformsTo = genericSig->getConformsTo(selfGenericParam); - require(std::find(conformsTo.begin(), conformsTo.end(), protocol) - != conformsTo.end(), + const auto protos = genericSig->getRequiredProtocols(selfGenericParam); + require(std::find(protos.begin(), protos.end(), protocol) != protos.end(), "requirement Self parameter must conform to called protocol"); auto lookupType = AMI->getLookupType(); diff --git a/lib/SILGen/ManagedValue.h b/lib/SILGen/ManagedValue.h index f96525d8a223e..1187c82593437 100644 --- a/lib/SILGen/ManagedValue.h +++ b/lib/SILGen/ManagedValue.h @@ -121,6 +121,13 @@ class ManagedValue { return ManagedValue::forOwnedObjectRValue(value, cleanup); } + static ManagedValue + forExclusivelyBorrowedOwnedObjectRValue(SILValue value, + CleanupHandle cleanup) { + assert(value->getType().isObject()); + return ManagedValue::forOwnedObjectRValue(value, cleanup); + } + /// Create a managed value for a +0 borrowed non-trivial rvalue object. static ManagedValue forBorrowedObjectRValue(SILValue value) { diff --git a/lib/SILGen/SILGen.cpp b/lib/SILGen/SILGen.cpp index df74a38f66dfd..9154b366cd97d 100644 --- a/lib/SILGen/SILGen.cpp +++ b/lib/SILGen/SILGen.cpp @@ -531,7 +531,7 @@ static SILFunction *getFunctionToInsertAfter(SILGenModule &SGM, // be inserted after. auto foundDelayed = SGM.delayedFunctions.find(insertAfter); if (foundDelayed != SGM.delayedFunctions.end()) { - insertAfter = foundDelayed->second.insertAfter; + insertAfter = foundDelayed->second; } else { break; } @@ -578,18 +578,39 @@ static bool isEmittedOnDemand(SILModule &M, SILDeclRef constant) { return false; auto *d = constant.getDecl(); - auto *dc = d->getDeclContext()->getModuleScopeContext(); + auto *dc = d->getDeclContext(); - if (isa(dc)) - return true; + switch (constant.kind) { + case SILDeclRef::Kind::Func: { + auto *fd = cast(d); + if (!fd->hasBody()) + return false; - if (auto *func = dyn_cast(d)) - if (func->hasForcedStaticDispatch()) + if (isa(dc->getModuleScopeContext())) return true; - if (auto *sf = dyn_cast(dc)) - if (M.isWholeModule() || M.getAssociatedContext() == dc) - return false; + if (fd->hasForcedStaticDispatch()) + return true; + + break; + } + case SILDeclRef::Kind::Allocator: { + auto *cd = cast(d); + // For factories, we don't need to emit a special thunk; the normal + // foreign-to-native thunk is sufficient. + if (isa(dc->getModuleScopeContext()) && + !cd->isFactoryInit() && + (dc->getSelfClassDecl() || + cd->hasBody())) + return true; + + break; + } + case SILDeclRef::Kind::EnumElement: + return true; + default: + break; + } return false; } @@ -618,18 +639,10 @@ SILFunction *SILGenModule::getFunction(SILDeclRef constant, emittedFunctions[constant] = F; - if (isEmittedOnDemand(M, constant) && - !delayedFunctions.count(constant)) { - auto *d = constant.getDecl(); - if (auto *func = dyn_cast(d)) { - if (constant.kind == SILDeclRef::Kind::Func) - emitFunction(func); - } else if (auto *ctor = dyn_cast(d)) { - // For factories, we don't need to emit a special thunk; the normal - // foreign-to-native thunk is sufficient. - if (!ctor->isFactoryInit() && - constant.kind == SILDeclRef::Kind::Allocator) - emitConstructor(ctor); + if (!delayedFunctions.count(constant)) { + if (isEmittedOnDemand(M, constant)) { + forcedFunctions.push_back(constant); + return F; } } @@ -639,14 +652,14 @@ SILFunction *SILGenModule::getFunction(SILDeclRef constant, // Move the function to its proper place within the module. M.functions.remove(F); SILFunction *insertAfter = getFunctionToInsertAfter(*this, - foundDelayed->second.insertAfter); + foundDelayed->second); if (!insertAfter) { M.functions.push_front(F); } else { M.functions.insertAfter(insertAfter->getIterator(), F); } - forcedFunctions.push_back(*foundDelayed); + forcedFunctions.push_back(constant); delayedFunctions.erase(foundDelayed); } else { // We would have registered a delayed function as "last emitted" when we @@ -663,30 +676,196 @@ bool SILGenModule::hasFunction(SILDeclRef constant) { void SILGenModule::visitFuncDecl(FuncDecl *fd) { emitFunction(fd); } +static void emitDelayedFunction(SILGenModule &SGM, + SILDeclRef constant, + SILFunction *f) { + switch (constant.kind) { + case SILDeclRef::Kind::Func: { + auto *fd = cast(constant.getDecl()); + + SGM.preEmitFunction(constant, fd, f, fd); + PrettyStackTraceSILFunction X("silgen emitFunction", f); + SILGenFunction(SGM, *f, fd).emitFunction(fd); + SGM.postEmitFunction(constant, f); + break; + } + + case SILDeclRef::Kind::Allocator: { + auto *decl = cast(constant.getDecl()); + + if (decl->getDeclContext()->getSelfClassDecl() && + (decl->isDesignatedInit() || + decl->isObjC())) { + SGM.preEmitFunction(constant, decl, f, decl); + PrettyStackTraceSILFunction X("silgen emitConstructor", f); + SILGenFunction(SGM, *f, decl).emitClassConstructorAllocator(decl); + SGM.postEmitFunction(constant, f); + } else { + SGM.preEmitFunction(constant, decl, f, decl); + PrettyStackTraceSILFunction X("silgen emitConstructor", f); + f->createProfiler(decl, constant, ForDefinition); + SILGenFunction(SGM, *f, decl).emitValueConstructor(decl); + SGM.postEmitFunction(constant, f); + } + break; + } + + case SILDeclRef::Kind::Initializer: { + auto *decl = cast(constant.getDecl()); + assert(decl->getDeclContext()->getSelfClassDecl()); + + SGM.preEmitFunction(constant, decl, f, decl); + PrettyStackTraceSILFunction X("silgen constructor initializer", f); + f->createProfiler(decl, constant, ForDefinition); + SILGenFunction(SGM, *f, decl).emitClassConstructorInitializer(decl); + SGM.postEmitFunction(constant, f); + break; + } + + case SILDeclRef::Kind::DefaultArgGenerator: { + auto *decl = constant.getDecl(); + auto *param = getParameterAt(decl, constant.defaultArgIndex); + auto *initDC = param->getDefaultArgumentInitContext(); + + switch (param->getDefaultArgumentKind()) { + case DefaultArgumentKind::Normal: { + auto arg = param->getTypeCheckedDefaultExpr(); + SGM.preEmitFunction(constant, arg, f, arg); + PrettyStackTraceSILFunction X("silgen emitDefaultArgGenerator ", f); + SILGenFunction SGF(SGM, *f, initDC); + SGF.emitGeneratorFunction(constant, arg); + SGM.postEmitFunction(constant, f); + break; + } + + case DefaultArgumentKind::StoredProperty: { + auto arg = param->getStoredProperty(); + SGM.preEmitFunction(constant, arg, f, arg); + PrettyStackTraceSILFunction X("silgen emitDefaultArgGenerator ", f); + SILGenFunction SGF(SGM, *f, initDC); + SGF.emitGeneratorFunction(constant, arg); + SGM.postEmitFunction(constant, f); + break; + } + + default: + llvm_unreachable("Bad default argument kind"); + } + + break; + } + + case SILDeclRef::Kind::StoredPropertyInitializer: { + auto *var = cast(constant.getDecl()); + + auto *pbd = var->getParentPatternBinding(); + unsigned idx = pbd->getPatternEntryIndexForVarDecl(var); + auto *init = pbd->getInit(idx); + auto *initDC = pbd->getInitContext(idx); + auto captureInfo = pbd->getCaptureInfo(idx); + assert(!pbd->isInitializerSubsumed(idx)); + + // If this is the backing storage for a property with an attached wrapper + // that was initialized with `=`, use that expression as the initializer. + if (auto originalProperty = var->getOriginalWrappedProperty()) { + if (originalProperty + ->isPropertyMemberwiseInitializedWithWrappedType()) { + auto wrapperInfo = + originalProperty->getPropertyWrapperBackingPropertyInfo(); + assert(wrapperInfo.originalInitialValue); + init = wrapperInfo.originalInitialValue; + } + } + + SGM.preEmitFunction(constant, init, f, init); + PrettyStackTraceSILFunction X("silgen emitStoredPropertyInitialization", f); + f->createProfiler(init, constant, ForDefinition); + SILGenFunction SGF(SGM, *f, initDC); + + // If this is a stored property initializer inside a type at global scope, + // it may close over a global variable. If we're emitting top-level code, + // then emit a "mark_function_escape" that lists the captured global + // variables so that definite initialization can reason about this + // escape point. + if (!var->getDeclContext()->isLocalContext() && + SGM.TopLevelSGF && SGM.TopLevelSGF->B.hasValidInsertionPoint()) { + SGM.emitMarkFunctionEscapeForTopLevelCodeGlobals(var, captureInfo); + } + + SGF.emitGeneratorFunction(constant, init, /*EmitProfilerIncrement=*/true); + SGM.postEmitFunction(constant, f); + break; + } + + case SILDeclRef::Kind::PropertyWrapperBackingInitializer: { + auto *var = cast(constant.getDecl()); + + SGM.preEmitFunction(constant, var, f, var); + PrettyStackTraceSILFunction X( + "silgen emitPropertyWrapperBackingInitializer", f); + auto wrapperInfo = var->getPropertyWrapperBackingPropertyInfo(); + assert(wrapperInfo.initializeFromOriginal); + f->createProfiler(wrapperInfo.initializeFromOriginal, constant, + ForDefinition); + auto varDC = var->getInnermostDeclContext(); + SILGenFunction SGF(SGM, *f, varDC); + SGF.emitGeneratorFunction(constant, wrapperInfo.initializeFromOriginal); + SGM.postEmitFunction(constant, f); + break; + } + + case SILDeclRef::Kind::GlobalAccessor: { + auto *global = cast(constant.getDecl()); + auto found = SGM.delayedGlobals.find(global); + assert(found != SGM.delayedGlobals.end()); + + auto *onceToken = found->second.first; + auto *onceFunc = found->second.second; + + SGM.preEmitFunction(constant, global, f, global); + PrettyStackTraceSILFunction X("silgen emitGlobalAccessor", f); + SILGenFunction(SGM, *f, global->getDeclContext()) + .emitGlobalAccessor(global, onceToken, onceFunc); + SGM.postEmitFunction(constant, f); + break; + } + + case SILDeclRef::Kind::EnumElement: { + auto *decl = cast(constant.getDecl()); + + SGM.preEmitFunction(constant, decl, f, decl); + PrettyStackTraceSILFunction X("silgen enum constructor", f); + SILGenFunction(SGM, *f, decl->getDeclContext()).emitEnumConstructor(decl); + SGM.postEmitFunction(constant, f); + break; + } + + case SILDeclRef::Kind::Destroyer: + case SILDeclRef::Kind::Deallocator: + case SILDeclRef::Kind::IVarInitializer: + case SILDeclRef::Kind::IVarDestroyer: + llvm_unreachable("Cannot emit as a delayed function"); + break; + } +} + /// Emit a function now, if it's externally usable or has been referenced in /// the current TU, or remember how to emit it later if not. -template static void emitOrDelayFunction(SILGenModule &SGM, SILDeclRef constant, - Fn &&emitter, bool forceEmission = false) { + assert(!constant.isThunk()); + assert(!constant.isClangImported()); + auto emitAfter = SGM.lastEmittedFunction; SILFunction *f = nullptr; - // If the function is explicit or may be externally referenced, or if we're - // forcing emission, we must emit it. - bool mayDelay; - // Shared thunks and Clang-imported definitions can always be delayed. - if (constant.isThunk() || constant.isClangImported()) { - mayDelay = !forceEmission; // Implicit decls may be delayed if they can't be used externally. - } else { - auto linkage = constant.getLinkage(ForDefinition); - mayDelay = !forceEmission && - (constant.isImplicit() && - !isPossiblyUsedExternally(linkage, SGM.M.isWholeModule())); - } + auto linkage = constant.getLinkage(ForDefinition); + bool mayDelay = !forceEmission && + (constant.isImplicit() && + !isPossiblyUsedExternally(linkage, SGM.M.isWholeModule())); // Avoid emitting a delayable definition if it hasn't already been referenced. if (mayDelay) @@ -696,8 +875,7 @@ static void emitOrDelayFunction(SILGenModule &SGM, // If we don't want to emit now, remember how for later. if (!f) { - SGM.delayedFunctions.insert({constant, {emitAfter, - std::forward(emitter)}}); + SGM.delayedFunctions.insert({constant, emitAfter}); // Even though we didn't emit the function now, update the // lastEmittedFunction so that we preserve the original ordering that // the symbols would have been emitted in. @@ -705,7 +883,7 @@ static void emitOrDelayFunction(SILGenModule &SGM, return; } - emitter(f); + emitDelayedFunction(SGM, constant, f); } void SILGenModule::preEmitFunction(SILDeclRef constant, @@ -927,20 +1105,10 @@ void SILGenModule::emitFunction(FuncDecl *fd) { emitAbstractFuncDecl(fd); if (fd->hasBody()) { - FrontendStatsTracer Tracer(getASTContext().Stats, - "SILGen-funcdecl", fd); - PrettyStackTraceDecl stackTrace("emitting SIL for", fd); - SILDeclRef constant(decl); - bool ForCoverageMapping = doesASTRequireProfiling(M, fd); - - emitOrDelayFunction(*this, constant, [this,constant,fd](SILFunction *f){ - preEmitFunction(constant, fd, f, fd); - PrettyStackTraceSILFunction X("silgen emitFunction", f); - SILGenFunction(*this, *f, fd).emitFunction(fd); - postEmitFunction(constant, f); - }, /*forceEmission=*/ForCoverageMapping); + emitOrDelayFunction(*this, constant, + /*forceEmission=*/ForCoverageMapping); } } @@ -958,13 +1126,6 @@ void SILGenModule::emitConstructor(ConstructorDecl *decl) { if (isa(decl->getDeclContext())) return; - // Always-unavailable imported constructors are factory methods - // that have been imported as constructors and then hidden by an - // imported init method. - if (decl->hasClangNode() && - decl->getAttrs().isUnavailable(decl->getASTContext())) - return; - SILDeclRef constant(decl); DeclContext *declCtx = decl->getDeclContext(); @@ -975,31 +1136,14 @@ void SILGenModule::emitConstructor(ConstructorDecl *decl) { // initializers, have have separate entry points for allocation and // initialization. if (decl->isDesignatedInit() || decl->isObjC()) { - emitOrDelayFunction( - *this, constant, [this, constant, decl](SILFunction *f) { - preEmitFunction(constant, decl, f, decl); - PrettyStackTraceSILFunction X("silgen emitConstructor", f); - SILGenFunction(*this, *f, decl).emitClassConstructorAllocator(decl); - postEmitFunction(constant, f); - }); - - // Constructors may not have bodies if they've been imported, or if they've - // been parsed from a module interface. + emitOrDelayFunction(*this, constant); + if (decl->hasBody()) { SILDeclRef initConstant(decl, SILDeclRef::Kind::Initializer); - emitOrDelayFunction( - *this, initConstant, - [this, initConstant, decl](SILFunction *initF) { - preEmitFunction(initConstant, decl, initF, decl); - PrettyStackTraceSILFunction X("silgen constructor initializer", - initF); - initF->createProfiler(decl, initConstant, ForDefinition); - SILGenFunction(*this, *initF, decl) - .emitClassConstructorInitializer(decl); - postEmitFunction(initConstant, initF); - }, - /*forceEmission=*/ForCoverageMapping); + emitOrDelayFunction(*this, initConstant, + /*forceEmission=*/ForCoverageMapping); } + return; } } @@ -1007,28 +1151,10 @@ void SILGenModule::emitConstructor(ConstructorDecl *decl) { // Struct and enum constructors do everything in a single function, as do // non-@objc convenience initializers for classes. if (decl->hasBody()) { - emitOrDelayFunction( - *this, constant, [this, constant, decl](SILFunction *f) { - preEmitFunction(constant, decl, f, decl); - PrettyStackTraceSILFunction X("silgen emitConstructor", f); - f->createProfiler(decl, constant, ForDefinition); - SILGenFunction(*this, *f, decl).emitValueConstructor(decl); - postEmitFunction(constant, f); - }); + emitOrDelayFunction(*this, constant); } } -void SILGenModule::emitEnumConstructor(EnumElementDecl *decl) { - // Enum element constructors are always emitted by need, so don't need - // delayed emission. - SILDeclRef constant(decl); - SILFunction *f = getFunction(constant, ForDefinition); - preEmitFunction(constant, decl, f, decl); - PrettyStackTraceSILFunction X("silgen enum constructor", f); - SILGenFunction(*this, *f, decl->getDeclContext()).emitEnumConstructor(decl); - postEmitFunction(constant, f); -} - SILFunction *SILGenModule::emitClosure(AbstractClosureExpr *ce) { SILDeclRef constant(ce); SILFunction *f = getFunction(constant, ForDefinition); @@ -1177,37 +1303,14 @@ void SILGenModule::emitDestructor(ClassDecl *cd, DestructorDecl *dd) { void SILGenModule::emitDefaultArgGenerator(SILDeclRef constant, ParamDecl *param) { - auto initDC = param->getDefaultArgumentInitContext(); - switch (param->getDefaultArgumentKind()) { case DefaultArgumentKind::None: llvm_unreachable("No default argument here?"); - case DefaultArgumentKind::Normal: { - auto arg = param->getTypeCheckedDefaultExpr(); - emitOrDelayFunction(*this, constant, - [this,constant,arg,initDC](SILFunction *f) { - preEmitFunction(constant, arg, f, arg); - PrettyStackTraceSILFunction X("silgen emitDefaultArgGenerator ", f); - SILGenFunction SGF(*this, *f, initDC); - SGF.emitGeneratorFunction(constant, arg); - postEmitFunction(constant, f); - }); - return; - } - - case DefaultArgumentKind::StoredProperty: { - auto arg = param->getStoredProperty(); - emitOrDelayFunction(*this, constant, - [this,constant,arg,initDC](SILFunction *f) { - preEmitFunction(constant, arg, f, arg); - PrettyStackTraceSILFunction X("silgen emitDefaultArgGenerator ", f); - SILGenFunction SGF(*this, *f, initDC); - SGF.emitGeneratorFunction(constant, arg); - postEmitFunction(constant, f); - }); - return; - } + case DefaultArgumentKind::Normal: + case DefaultArgumentKind::StoredProperty: + emitOrDelayFunction(*this, constant); + break; case DefaultArgumentKind::Inherited: case DefaultArgumentKind::Column: @@ -1219,69 +1322,21 @@ void SILGenModule::emitDefaultArgGenerator(SILDeclRef constant, case DefaultArgumentKind::NilLiteral: case DefaultArgumentKind::EmptyArray: case DefaultArgumentKind::EmptyDictionary: - return; + break; } } void SILGenModule:: emitStoredPropertyInitialization(PatternBindingDecl *pbd, unsigned i) { auto *var = pbd->getAnchoringVarDecl(i); - auto *init = pbd->getInit(i); - auto *initDC = pbd->getInitContext(i); - auto captureInfo = pbd->getCaptureInfo(i); - assert(!pbd->isInitializerSubsumed(i)); - - // If this is the backing storage for a property with an attached wrapper - // that was initialized with `=`, use that expression as the initializer. - if (auto originalProperty = var->getOriginalWrappedProperty()) { - if (originalProperty - ->isPropertyMemberwiseInitializedWithWrappedType()) { - auto wrapperInfo = - originalProperty->getPropertyWrapperBackingPropertyInfo(); - assert(wrapperInfo.originalInitialValue); - init = wrapperInfo.originalInitialValue; - } - } - SILDeclRef constant(var, SILDeclRef::Kind::StoredPropertyInitializer); - emitOrDelayFunction(*this, constant, - [this,var,captureInfo,constant,init,initDC](SILFunction *f) { - preEmitFunction(constant, init, f, init); - PrettyStackTraceSILFunction X("silgen emitStoredPropertyInitialization", f); - f->createProfiler(init, constant, ForDefinition); - SILGenFunction SGF(*this, *f, initDC); - - // If this is a stored property initializer inside a type at global scope, - // it may close over a global variable. If we're emitting top-level code, - // then emit a "mark_function_escape" that lists the captured global - // variables so that definite initialization can reason about this - // escape point. - if (!var->getDeclContext()->isLocalContext() && - TopLevelSGF && TopLevelSGF->B.hasValidInsertionPoint()) { - emitMarkFunctionEscapeForTopLevelCodeGlobals(var, captureInfo); - } - - SGF.emitGeneratorFunction(constant, init, /*EmitProfilerIncrement=*/true); - postEmitFunction(constant, f); - }); + emitOrDelayFunction(*this, constant); } void SILGenModule:: emitPropertyWrapperBackingInitializer(VarDecl *var) { SILDeclRef constant(var, SILDeclRef::Kind::PropertyWrapperBackingInitializer); - emitOrDelayFunction(*this, constant, [this, constant, var](SILFunction *f) { - preEmitFunction(constant, var, f, var); - PrettyStackTraceSILFunction X( - "silgen emitPropertyWrapperBackingInitializer", f); - auto wrapperInfo = var->getPropertyWrapperBackingPropertyInfo(); - assert(wrapperInfo.initializeFromOriginal); - f->createProfiler(wrapperInfo.initializeFromOriginal, constant, - ForDefinition); - auto varDC = var->getInnermostDeclContext(); - SILGenFunction SGF(*this, *f, varDC); - SGF.emitGeneratorFunction(constant, wrapperInfo.initializeFromOriginal); - postEmitFunction(constant, f); - }); + emitOrDelayFunction(*this, constant); } SILFunction *SILGenModule::emitLazyGlobalInitializer(StringRef funcName, @@ -1314,14 +1369,8 @@ void SILGenModule::emitGlobalAccessor(VarDecl *global, SILGlobalVariable *onceToken, SILFunction *onceFunc) { SILDeclRef accessor(global, SILDeclRef::Kind::GlobalAccessor); - emitOrDelayFunction(*this, accessor, - [this,accessor,global,onceToken,onceFunc](SILFunction *f){ - preEmitFunction(accessor, global, f, global); - PrettyStackTraceSILFunction X("silgen emitGlobalAccessor", f); - SILGenFunction(*this, *f, global->getDeclContext()) - .emitGlobalAccessor(global, onceToken, onceFunc); - postEmitFunction(accessor, f); - }); + delayedGlobals[global] = std::make_pair(onceToken, onceFunc); + emitOrDelayFunction(*this, accessor); } void SILGenModule::emitDefaultArgGenerators(SILDeclRef::Loc decl, @@ -1818,6 +1867,8 @@ class SILGenModuleRAII { public: void emitSourceFile(SourceFile *sf) { + assert(sf->ASTStage == SourceFile::TypeChecked); + SourceFileScope scope(SGM, sf); for (Decl *D : sf->getTopLevelDecls()) { FrontendStatsTracer StatsTracer(SGM.getASTContext().Stats, @@ -1836,7 +1887,7 @@ class SILGenModuleRAII { } } - SILGenModuleRAII(SILModule &M, ModuleDecl *SM) : SGM{M, SM} {} + explicit SILGenModuleRAII(SILModule &M) : SGM{M, M.getSwiftModule()} {} ~SILGenModuleRAII() { // Emit any delayed definitions that were forced. @@ -1846,7 +1897,8 @@ class SILGenModuleRAII { || !SGM.pendingConformances.empty()) { while (!SGM.forcedFunctions.empty()) { auto &front = SGM.forcedFunctions.front(); - front.second.emitter(SGM.getFunction(front.first, ForDefinition)); + emitDelayedFunction(SGM, front, + SGM.getEmittedFunction(front, ForDefinition)); SGM.forcedFunctions.pop_front(); } while (!SGM.pendingConformances.empty()) { @@ -1859,60 +1911,35 @@ class SILGenModuleRAII { } // end anonymous namespace std::unique_ptr -SILGenSourceFileRequest::evaluate(Evaluator &evaluator, - SILGenDescriptor desc) const { - // If we have a .sil file to parse, defer to the parsing request. - if (desc.getSourceFileToParse()) { - return llvm::cantFail(evaluator(ParseSILModuleRequest{desc})); - } - - auto *unit = desc.context.get(); - auto *mod = unit->getParentModule(); - auto M = std::unique_ptr( - new SILModule(mod, desc.conv, desc.opts, unit, /*wholeModule*/ false)); - SILGenModuleRAII scope(*M, mod); - - if (auto *file = dyn_cast(unit)) { - scope.emitSourceFile(file); - } else if (auto *file = dyn_cast(unit)) { - if (file->isSIB()) - M->getSILLoader()->getAllForModule(mod->getName(), file); - } - - return M; -} - -std::unique_ptr -SILGenWholeModuleRequest::evaluate(Evaluator &evaluator, - SILGenDescriptor desc) const { +SILGenerationRequest::evaluate(Evaluator &evaluator, + SILGenDescriptor desc) const { // If we have a .sil file to parse, defer to the parsing request. if (desc.getSourceFileToParse()) { return llvm::cantFail(evaluator(ParseSILModuleRequest{desc})); } - auto *mod = desc.context.get(); - auto M = std::unique_ptr( - new SILModule(mod, desc.conv, desc.opts, mod, /*wholeModule*/ true)); - SILGenModuleRAII scope(*M, mod); + // Otherwise perform SIL generation of the passed SourceFiles. + auto silMod = SILModule::createEmptyModule(desc.context, desc.conv, + desc.opts); + SILGenModuleRAII scope(*silMod); - for (auto file : mod->getFiles()) { - auto nextSF = dyn_cast(file); - if (!nextSF || nextSF->ASTStage != SourceFile::TypeChecked) - continue; - scope.emitSourceFile(nextSF); + for (auto file : desc.getFiles()) { + if (auto *nextSF = dyn_cast(file)) + scope.emitSourceFile(nextSF); } - // Also make sure to process any intermediate files that may contain SIL - bool hasSIB = std::any_of(mod->getFiles().begin(), - mod->getFiles().end(), - [](const FileUnit *File) -> bool { + // Also make sure to process any intermediate files that may contain SIL. + bool hasSIB = llvm::any_of(desc.getFiles(), [](const FileUnit *File) -> bool { auto *SASTF = dyn_cast(File); return SASTF && SASTF->isSIB(); }); - if (hasSIB) - M->getSILLoader()->getAllForModule(mod->getName(), nullptr); + if (hasSIB) { + auto primary = desc.context.dyn_cast(); + silMod->getSILLoader()->getAllForModule(silMod->getSwiftModule()->getName(), + primary); + } - return M; + return silMod; } std::unique_ptr @@ -1920,7 +1947,7 @@ swift::performSILGeneration(ModuleDecl *mod, Lowering::TypeConverter &tc, const SILOptions &options) { auto desc = SILGenDescriptor::forWholeModule(mod, tc, options); return llvm::cantFail( - mod->getASTContext().evaluator(SILGenWholeModuleRequest{desc})); + mod->getASTContext().evaluator(SILGenerationRequest{desc})); } std::unique_ptr @@ -1928,5 +1955,5 @@ swift::performSILGeneration(FileUnit &sf, Lowering::TypeConverter &tc, const SILOptions &options) { auto desc = SILGenDescriptor::forFile(sf, tc, options); return llvm::cantFail( - sf.getASTContext().evaluator(SILGenSourceFileRequest{desc})); + sf.getASTContext().evaluator(SILGenerationRequest{desc})); } diff --git a/lib/SILGen/SILGen.h b/lib/SILGen/SILGen.h index 287d656fe1261..0f3a72b595156 100644 --- a/lib/SILGen/SILGen.h +++ b/lib/SILGen/SILGen.h @@ -60,19 +60,16 @@ class LLVM_LIBRARY_VISIBILITY SILGenModule : public ASTVisitor { /// Mapping from ProtocolConformances to emitted SILWitnessTables. llvm::DenseMap emittedWitnessTables; - struct DelayedFunction { - /// Insert the entity after the given function when it's emitted. - SILDeclRef insertAfter; - /// Code that generates the function. - std::function emitter; - }; - - /// Mapping from SILDeclRefs to delayed SILFunction generators for - /// non-externally-visible symbols. - llvm::DenseMap delayedFunctions; + /// Mapping from SILDeclRefs to where the given function will be inserted + /// when it's emitted. Used for non-externally visible symbols. + llvm::DenseMap delayedFunctions; /// Queue of delayed SILFunctions that need to be forced. - std::deque> forcedFunctions; + std::deque forcedFunctions; + + /// Mapping global VarDecls to their onceToken and onceFunc, respectively. + llvm::DenseMap> delayedGlobals; /// The most recent declaration we considered for emission. SILDeclRef lastEmittedFunction; @@ -287,10 +284,6 @@ class LLVM_LIBRARY_VISIBILITY SILGenModule : public ASTVisitor { /// SILDeclRef(cd, Destructor). void emitDestructor(ClassDecl *cd, DestructorDecl *dd); - /// Generates the enum constructor for the given - /// EnumElementDecl under the name SILDeclRef(decl). - void emitEnumConstructor(EnumElementDecl *decl); - /// Emits the default argument generator with the given expression. void emitDefaultArgGenerator(SILDeclRef constant, ParamDecl *param); diff --git a/lib/SILGen/SILGenApply.cpp b/lib/SILGen/SILGenApply.cpp index 0a101dd2f0c76..7d1798d5fc803 100644 --- a/lib/SILGen/SILGenApply.cpp +++ b/lib/SILGen/SILGenApply.cpp @@ -1243,7 +1243,7 @@ class SILGenApply : public Lowering::ExprVisitor { if (superMV.getValue() != SGF.InitDelegationSelf.getValue()) { SILValue underlyingSelf = SGF.InitDelegationSelf.getValue(); SGF.InitDelegationSelf = ManagedValue::forUnmanaged(underlyingSelf); - CleanupHandle newWriteback = SGF.enterDelegateInitSelfWritebackCleanup( + CleanupHandle newWriteback = SGF.enterOwnedValueWritebackCleanup( SGF.InitDelegationLoc.getValue(), SGF.InitDelegationSelfBox, superMV.forward(SGF)); SGF.SuperInitDelegationSelf = @@ -3818,7 +3818,7 @@ RValue CallEmission::applyEnumElementConstructor(SGFContext C) { resultFnType.getParams(), /*canonicalVararg*/ true); auto arg = RValue(SGF, argVals, payloadTy->getCanonicalType()); - payload = ArgumentSource(element, std::move(arg)); + payload = ArgumentSource(uncurriedLoc, std::move(arg)); formalResultType = cast(formalResultType).getResult(); origFormalType = origFormalType.getFunctionResultType(); } else { diff --git a/lib/SILGen/SILGenBuiltin.cpp b/lib/SILGen/SILGenBuiltin.cpp index 45e94b5575123..8ed54c56622c5 100644 --- a/lib/SILGen/SILGenBuiltin.cpp +++ b/lib/SILGen/SILGenBuiltin.cpp @@ -901,6 +901,60 @@ emitBuiltinIsUnique_native(SILGenFunction &SGF, return ManagedValue::forUnmanaged(result); } +static ManagedValue +emitBuiltinBeginCOWMutation(SILGenFunction &SGF, + SILLocation loc, + SubstitutionMap subs, + ArrayRef args, + SGFContext C) { + + assert(subs.getReplacementTypes().size() == 1 && + "BeginCOWMutation should have one sub."); + assert(args.size() == 1 && "isUnique_native should have one arg."); + + SILValue refAddr = args[0].getValue(); + auto *ref = SGF.B.createLoad(loc, refAddr, LoadOwnershipQualifier::Take); + BeginCOWMutationInst *beginCOW = SGF.B.createBeginCOWMutation(loc, ref, /*isNative*/ false); + SGF.B.createStore(loc, beginCOW->getBufferResult(), refAddr, StoreOwnershipQualifier::Init); + return ManagedValue::forUnmanaged(beginCOW->getUniquenessResult()); +} + +static ManagedValue +emitBuiltinBeginCOWMutation_native(SILGenFunction &SGF, + SILLocation loc, + SubstitutionMap subs, + ArrayRef args, + SGFContext C) { + + assert(subs.getReplacementTypes().size() == 1 && + "BeginCOWMutation should have one sub."); + assert(args.size() == 1 && "isUnique_native should have one arg."); + + SILValue refAddr = args[0].getValue(); + auto *ref = SGF.B.createLoad(loc, refAddr, LoadOwnershipQualifier::Take); + BeginCOWMutationInst *beginCOW = SGF.B.createBeginCOWMutation(loc, ref, /*isNative*/ true); + SGF.B.createStore(loc, beginCOW->getBufferResult(), refAddr, StoreOwnershipQualifier::Init); + return ManagedValue::forUnmanaged(beginCOW->getUniquenessResult()); +} + +static ManagedValue +emitBuiltinEndCOWMutation(SILGenFunction &SGF, + SILLocation loc, + SubstitutionMap subs, + ArrayRef args, + SGFContext C) { + + assert(subs.getReplacementTypes().size() == 1 && + "EndCOWMutation should have one sub."); + assert(args.size() == 1 && "isUnique_native should have one arg."); + + SILValue refAddr = args[0].getValue(); + auto ref = SGF.B.createLoad(loc, refAddr, LoadOwnershipQualifier::Take); + auto endRef = SGF.B.createEndCOWMutation(loc, ref); + SGF.B.createStore(loc, endRef, refAddr, StoreOwnershipQualifier::Init); + return ManagedValue::forUnmanaged(SGF.emitEmptyTuple(loc)); +} + static ManagedValue emitBuiltinBindMemory(SILGenFunction &SGF, SILLocation loc, SubstitutionMap subs, diff --git a/lib/SILGen/SILGenConstructor.cpp b/lib/SILGen/SILGenConstructor.cpp index ac0e18ca3e4f2..6e1a94e9a2eff 100644 --- a/lib/SILGen/SILGenConstructor.cpp +++ b/lib/SILGen/SILGenConstructor.cpp @@ -252,6 +252,8 @@ static void emitImplicitValueConstructor(SILGenFunction &SGF, selfTy.getFieldType(field, SGF.SGM.M, SGF.getTypeExpansionContext()); RValue value; + FullExpr scope(SGF.Cleanups, field->getParentPatternBinding()); + // If it's memberwise initialized, do so now. if (field->isMemberwiseInitialized(/*preferDeclaredProperties=*/false)) { assert(elti != eltEnd && "number of args does not match number of fields"); @@ -276,7 +278,6 @@ static void emitImplicitValueConstructor(SILGenFunction &SGF, } // Cleanup after this initialization. - FullExpr scope(SGF.Cleanups, field->getParentPatternBinding()); SILValue v = maybeEmitPropertyWrapperInitFromValue(SGF, Loc, field, subs, std::move(value)) .forwardAsSingleStorageValue(SGF, fieldTy, Loc); diff --git a/lib/SILGen/SILGenExpr.cpp b/lib/SILGen/SILGenExpr.cpp index d04d6aa71cdfa..58f6c3396929d 100644 --- a/lib/SILGen/SILGenExpr.cpp +++ b/lib/SILGen/SILGenExpr.cpp @@ -717,11 +717,11 @@ SILValue SILGenFunction::emitEmptyTuple(SILLocation loc) { namespace { -/// This is a simple cleanup class that is only meant to help with delegating -/// initializers. Specifically, if the delegating initializer fails to consume -/// the loaded self, we want to write back self into the slot to ensure that -/// ownership is preserved. -struct DelegateInitSelfWritebackCleanup : Cleanup { +/// This is a simple cleanup class that at the end of a lexical scope consumes +/// an owned value by writing it back to memory. The user can forward this +/// cleanup to take ownership of the value and thus prevent it form being +/// written back. +struct OwnedValueWritebackCleanup final : Cleanup { /// We store our own loc so that we can ensure that DI ignores our writeback. SILLocation loc; @@ -729,8 +729,8 @@ struct DelegateInitSelfWritebackCleanup : Cleanup { SILValue lvalueAddress; SILValue value; - DelegateInitSelfWritebackCleanup(SILLocation loc, SILValue lvalueAddress, - SILValue value) + OwnedValueWritebackCleanup(SILLocation loc, SILValue lvalueAddress, + SILValue value) : loc(loc), lvalueAddress(lvalueAddress), value(value) {} void emit(SILGenFunction &SGF, CleanupLocation l, ForUnwind_t forUnwind) override { @@ -749,14 +749,13 @@ struct DelegateInitSelfWritebackCleanup : Cleanup { lvalueObjTy); } - auto &lowering = SGF.B.getTypeLowering(lvalueAddress->getType()); - lowering.emitStore(SGF.B, loc, valueToStore, lvalueAddress, - StoreOwnershipQualifier::Init); + SGF.B.emitStoreValueOperation(loc, valueToStore, lvalueAddress, + StoreOwnershipQualifier::Init); } void dump(SILGenFunction &) const override { #ifndef NDEBUG - llvm::errs() << "SimpleWritebackCleanup " + llvm::errs() << "OwnedValueWritebackCleanup " << "State:" << getState() << "\n" << "lvalueAddress:" << lvalueAddress << "value:" << value << "\n"; @@ -766,10 +765,9 @@ struct DelegateInitSelfWritebackCleanup : Cleanup { } // end anonymous namespace -CleanupHandle SILGenFunction::enterDelegateInitSelfWritebackCleanup( +CleanupHandle SILGenFunction::enterOwnedValueWritebackCleanup( SILLocation loc, SILValue address, SILValue newValue) { - Cleanups.pushCleanup(loc, address, - newValue); + Cleanups.pushCleanup(loc, address, newValue); return Cleanups.getTopCleanup(); } @@ -815,8 +813,8 @@ RValue SILGenFunction::emitRValueForSelfInDelegationInit(SILLocation loc, // Forward our initial value for init delegation self and create a new // cleanup that performs a writeback at the end of lexical scope if our // value is not consumed. - InitDelegationSelf = ManagedValue( - self, enterDelegateInitSelfWritebackCleanup(*InitDelegationLoc, addr, self)); + InitDelegationSelf = ManagedValue::forExclusivelyBorrowedOwnedObjectRValue( + self, enterOwnedValueWritebackCleanup(*InitDelegationLoc, addr, self)); InitDelegationSelfBox = addr; return RValue(*this, loc, refType, InitDelegationSelf); } @@ -4099,8 +4097,6 @@ static bool isVerbatimNullableTypeInC(SILModule &M, Type ty) { // Other types like UnsafePointer can also be nullable. const DeclContext *DC = M.getAssociatedContext(); - if (!DC) - DC = M.getSwiftModule(); ty = OptionalType::get(ty); return ty->isTriviallyRepresentableIn(ForeignLanguage::C, DC); } diff --git a/lib/SILGen/SILGenFunction.h b/lib/SILGen/SILGenFunction.h index 305a2a12086d2..854fa87b50c0e 100644 --- a/lib/SILGen/SILGenFunction.h +++ b/lib/SILGen/SILGenFunction.h @@ -1188,9 +1188,9 @@ class LLVM_LIBRARY_VISIBILITY SILGenFunction CleanupHandle enterDeallocateUninitializedArrayCleanup(SILValue array); void emitUninitializedArrayDeallocation(SILLocation loc, SILValue array); - CleanupHandle enterDelegateInitSelfWritebackCleanup(SILLocation loc, - SILValue address, - SILValue newValue); + CleanupHandle enterOwnedValueWritebackCleanup(SILLocation loc, + SILValue address, + SILValue newValue); SILValue emitConversionToSemanticRValue(SILLocation loc, SILValue value, const TypeLowering &valueTL); diff --git a/lib/SILGen/SILGenRequests.cpp b/lib/SILGen/SILGenRequests.cpp index 96a17359ec328..a854ebe37a5ee 100644 --- a/lib/SILGen/SILGenRequests.cpp +++ b/lib/SILGen/SILGenRequests.cpp @@ -46,14 +46,18 @@ SourceLoc swift::extractNearestSourceLoc(const SILGenDescriptor &desc) { return SourceLoc(); } -evaluator::DependencySource SILGenSourceFileRequest::readDependencySource( +evaluator::DependencySource SILGenerationRequest::readDependencySource( const evaluator::DependencyCollector &e) const { auto &desc = std::get<0>(getStorage()); + + // We don't track dependencies in whole-module mode. + if (auto *mod = desc.context.dyn_cast()) { + return {nullptr, e.getActiveSourceScope()}; + } + + // If we have a single source file, it's the source of dependencies. auto *unit = desc.context.get(); - return { - dyn_cast_or_null(unit), - evaluator::DependencyScope::Cascading - }; + return {dyn_cast(unit), evaluator::DependencyScope::Cascading}; } ArrayRef SILGenDescriptor::getFiles() const { @@ -65,10 +69,6 @@ ArrayRef SILGenDescriptor::getFiles() const { return llvm::makeArrayRef(*context.getAddrOfPtr1()); } -bool SILGenDescriptor::isWholeModule() const { - return context.is(); -} - SourceFile *SILGenDescriptor::getSourceFileToParse() const { #ifndef NDEBUG auto sfCount = llvm::count_if(getFiles(), [](FileUnit *file) { diff --git a/lib/SILGen/SILGenThunk.cpp b/lib/SILGen/SILGenThunk.cpp index c8fce513fa347..d0087194d217f 100644 --- a/lib/SILGen/SILGenThunk.cpp +++ b/lib/SILGen/SILGenThunk.cpp @@ -138,8 +138,6 @@ SILGenFunction::emitGlobalFunctionRef(SILLocation loc, SILDeclRef constant, SGM.emitForeignToNativeThunk(constant); } else if (constant.isNativeToForeignThunk()) { SGM.emitNativeToForeignThunk(constant); - } else if (constant.kind == SILDeclRef::Kind::EnumElement) { - SGM.emitEnumConstructor(cast(constant.getDecl())); } } diff --git a/lib/SILOptimizer/ARC/CMakeLists.txt b/lib/SILOptimizer/ARC/CMakeLists.txt index 2a05a36f27826..2d3c592834b1b 100644 --- a/lib/SILOptimizer/ARC/CMakeLists.txt +++ b/lib/SILOptimizer/ARC/CMakeLists.txt @@ -1,4 +1,4 @@ -silopt_register_sources( +target_sources(swiftSILOptimizer PRIVATE ARCBBState.cpp ARCLoopOpts.cpp ARCMatchingSet.cpp @@ -8,5 +8,4 @@ silopt_register_sources( GlobalLoopARCSequenceDataflow.cpp RCStateTransition.cpp RCStateTransitionVisitors.cpp - RefCountState.cpp -) + RefCountState.cpp) diff --git a/lib/SILOptimizer/Analysis/ARCAnalysis.cpp b/lib/SILOptimizer/Analysis/ARCAnalysis.cpp index 68d06dab96b1f..c64298fad8890 100644 --- a/lib/SILOptimizer/Analysis/ARCAnalysis.cpp +++ b/lib/SILOptimizer/Analysis/ARCAnalysis.cpp @@ -474,6 +474,10 @@ mayGuaranteedUseValue(SILInstruction *User, SILValue Ptr, AliasAnalysis *AA) { // FIXME: this is overly conservative. It should return true only of the // RC identity of the single operand matches Ptr. return true; + case SILInstructionKind::BeginCOWMutationInst: + // begin_cow_mutation takes the argument as owned and produces a new + // owned result. + return false; default: llvm_unreachable("Unexpected check-ref-count instruction."); } diff --git a/lib/SILOptimizer/Analysis/CMakeLists.txt b/lib/SILOptimizer/Analysis/CMakeLists.txt index 89ef0cdbc55c6..bd79d58025cd4 100644 --- a/lib/SILOptimizer/Analysis/CMakeLists.txt +++ b/lib/SILOptimizer/Analysis/CMakeLists.txt @@ -1,4 +1,4 @@ -silopt_register_sources( +target_sources(swiftSILOptimizer PRIVATE ARCAnalysis.cpp AccessSummaryAnalysis.cpp AccessedStorageAnalysis.cpp @@ -25,5 +25,4 @@ silopt_register_sources( SideEffectAnalysis.cpp SimplifyInstruction.cpp TypeExpansionAnalysis.cpp - ValueTracking.cpp -) + ValueTracking.cpp) diff --git a/lib/SILOptimizer/CMakeLists.txt b/lib/SILOptimizer/CMakeLists.txt index 544c554d14340..4bf45e0ebd8db 100644 --- a/lib/SILOptimizer/CMakeLists.txt +++ b/lib/SILOptimizer/CMakeLists.txt @@ -1,25 +1,7 @@ - -set(SILOPTIMIZER_SOURCES) - -function(_list_transform newvar) - set(sources ${ARGN}) - set(dir ${CMAKE_CURRENT_SOURCE_DIR}) - set(tmp) - foreach (s ${sources}) - list(APPEND tmp "${dir}/${s}") - endforeach() - set(${newvar} "${tmp}" PARENT_SCOPE) -endfunction() - -macro(silopt_register_sources) - precondition(new_transformed_sources - NEGATE - MESSAGE "Expected this to be empty since we clear after each run") - _list_transform(new_transformed_sources ${ARGN}) - list_union("${SILOPTIMIZER_SOURCES}" "${new_transformed_sources}" out) - set(SILOPTIMIZER_SOURCES "${out}" PARENT_SCOPE) - set(new_transformed_sources) -endmacro() +add_swift_host_library(swiftSILOptimizer STATIC + SILOptimizer.cpp) +target_link_libraries(swiftSILOptimizer PRIVATE + swiftSIL) add_subdirectory(ARC) add_subdirectory(Analysis) @@ -33,8 +15,3 @@ add_subdirectory(SILCombiner) add_subdirectory(Transforms) add_subdirectory(UtilityPasses) add_subdirectory(Utils) - -add_swift_host_library(swiftSILOptimizer STATIC - ${SILOPTIMIZER_SOURCES}) -target_link_libraries(swiftSILOptimizer PRIVATE - swiftSIL) diff --git a/lib/SILOptimizer/Differentiation/CMakeLists.txt b/lib/SILOptimizer/Differentiation/CMakeLists.txt index 1fac810bd483b..ce2d9571b95c6 100644 --- a/lib/SILOptimizer/Differentiation/CMakeLists.txt +++ b/lib/SILOptimizer/Differentiation/CMakeLists.txt @@ -1,4 +1,4 @@ -silopt_register_sources( +target_sources(swiftSILOptimizer PRIVATE ADContext.cpp Common.cpp DifferentiationInvoker.cpp @@ -6,5 +6,4 @@ silopt_register_sources( LinearMapInfo.cpp PullbackEmitter.cpp Thunk.cpp - VJPEmitter.cpp -) + VJPEmitter.cpp) diff --git a/lib/SILOptimizer/FunctionSignatureTransforms/CMakeLists.txt b/lib/SILOptimizer/FunctionSignatureTransforms/CMakeLists.txt index 56ea0c3542ace..fa1601acfabe3 100644 --- a/lib/SILOptimizer/FunctionSignatureTransforms/CMakeLists.txt +++ b/lib/SILOptimizer/FunctionSignatureTransforms/CMakeLists.txt @@ -1,8 +1,7 @@ -silopt_register_sources( +target_sources(swiftSILOptimizer PRIVATE FunctionSignatureOpts.cpp DeadArgumentTransform.cpp ArgumentExplosionTransform.cpp OwnedToGuaranteedTransform.cpp ExistentialSpecializer.cpp - ExistentialTransform.cpp -) + ExistentialTransform.cpp) diff --git a/lib/SILOptimizer/IPO/CMakeLists.txt b/lib/SILOptimizer/IPO/CMakeLists.txt index 57cdb7d60904e..e746c95dec22f 100644 --- a/lib/SILOptimizer/IPO/CMakeLists.txt +++ b/lib/SILOptimizer/IPO/CMakeLists.txt @@ -1,4 +1,4 @@ -silopt_register_sources( +target_sources(swiftSILOptimizer PRIVATE CapturePromotion.cpp CapturePropagation.cpp ClosureSpecializer.cpp diff --git a/lib/SILOptimizer/LoopTransforms/CMakeLists.txt b/lib/SILOptimizer/LoopTransforms/CMakeLists.txt index 32a9dc6d7d88b..8b9985a2f1ab5 100644 --- a/lib/SILOptimizer/LoopTransforms/CMakeLists.txt +++ b/lib/SILOptimizer/LoopTransforms/CMakeLists.txt @@ -1,9 +1,8 @@ -silopt_register_sources( +target_sources(swiftSILOptimizer PRIVATE ArrayBoundsCheckOpts.cpp ArrayPropertyOpt.cpp COWArrayOpt.cpp LoopRotate.cpp LoopUnroll.cpp LICM.cpp - ForEachLoopUnroll.cpp -) + ForEachLoopUnroll.cpp) diff --git a/lib/SILOptimizer/LoopTransforms/LICM.cpp b/lib/SILOptimizer/LoopTransforms/LICM.cpp index 171a10abd863a..a70f93d764c29 100644 --- a/lib/SILOptimizer/LoopTransforms/LICM.cpp +++ b/lib/SILOptimizer/LoopTransforms/LICM.cpp @@ -963,6 +963,41 @@ static bool storesCommonlyDominateLoopExits(SILValue addr, SILLoop *loop, if (stores.count(header) != 0) return true; + // Also a store in the pre-header dominates all exists. Although the situation + // is a bit different here: the store in the pre-header remains - it's not + // (re)moved by the LICM transformation. + // But even if the loop-stores are not dominating the loop exits, it + // makes sense to move them out of the loop if this case. When this is done, + // dead-store-elimination can then most likely eliminate the store in the + // pre-header. + // + // pre_header: + // store %v1 to %addr + // header: + // cond_br %cond, then, tail + // then: + // store %v2 to %addr // a conditional store in the loop + // br tail + // tail: + // cond_br %loop_cond, header, exit + // exit: + // + // will be transformed to + // + // pre_header: + // store %v1 to %addr // <- can be removed by DSE afterwards + // header: + // cond_br %cond, then, tail + // then: + // br tail + // tail(%phi): + // cond_br %loop_cond, header, exit + // exit: + // store %phi to %addr + // + if (stores.count(loop->getLoopPreheader()) != 0) + return true; + // Propagate the store-is-not-alive flag through the control flow in the loop, // starting at the header. SmallPtrSet storesNotAlive; diff --git a/lib/SILOptimizer/Mandatory/CMakeLists.txt b/lib/SILOptimizer/Mandatory/CMakeLists.txt index 91902fd5323a0..ab367289f536c 100644 --- a/lib/SILOptimizer/Mandatory/CMakeLists.txt +++ b/lib/SILOptimizer/Mandatory/CMakeLists.txt @@ -1,4 +1,4 @@ -silopt_register_sources( +target_sources(swiftSILOptimizer PRIVATE AccessEnforcementSelection.cpp AccessMarkerElimination.cpp AddressLowering.cpp @@ -25,5 +25,4 @@ silopt_register_sources( YieldOnceCheck.cpp MandatoryCombine.cpp OSLogOptimization.cpp - OwnershipModelEliminator.cpp -) + OwnershipModelEliminator.cpp) diff --git a/lib/SILOptimizer/PassManager/CMakeLists.txt b/lib/SILOptimizer/PassManager/CMakeLists.txt index 6db4ea12d8504..c86ce80a756dc 100644 --- a/lib/SILOptimizer/PassManager/CMakeLists.txt +++ b/lib/SILOptimizer/PassManager/CMakeLists.txt @@ -1,7 +1,6 @@ -silopt_register_sources( +target_sources(swiftSILOptimizer PRIVATE PassManager.cpp Passes.cpp PassPipeline.cpp PrettyStackTrace.cpp - SILOptimizerRequests.cpp -) + SILOptimizerRequests.cpp) diff --git a/lib/SILOptimizer/SILCombiner/CMakeLists.txt b/lib/SILOptimizer/SILCombiner/CMakeLists.txt index 64cc710bd48ef..679c7bcc86fe8 100644 --- a/lib/SILOptimizer/SILCombiner/CMakeLists.txt +++ b/lib/SILOptimizer/SILCombiner/CMakeLists.txt @@ -1,7 +1,6 @@ -silopt_register_sources( +target_sources(swiftSILOptimizer PRIVATE SILCombine.cpp SILCombinerApplyVisitors.cpp SILCombinerBuiltinVisitors.cpp SILCombinerCastVisitors.cpp - SILCombinerMiscVisitors.cpp -) + SILCombinerMiscVisitors.cpp) diff --git a/lib/SILOptimizer/SILCombiner/SILCombiner.h b/lib/SILOptimizer/SILCombiner/SILCombiner.h index 31585fee4ee03..8a7435272e26c 100644 --- a/lib/SILOptimizer/SILCombiner/SILCombiner.h +++ b/lib/SILOptimizer/SILCombiner/SILCombiner.h @@ -225,6 +225,8 @@ class SILCombiner : // Optimize the "isConcrete" builtin. SILInstruction *optimizeBuiltinIsConcrete(BuiltinInst *I); + SILInstruction *optimizeBuiltinCOWBufferForReading(BuiltinInst *BI); + // Optimize the "trunc_N1_M2" builtin. if N1 is a result of "zext_M1_*" and // the following holds true: N1 > M1 and M2>= M1 SILInstruction *optimizeBuiltinTruncOrBitCast(BuiltinInst *I); diff --git a/lib/SILOptimizer/SILCombiner/SILCombinerApplyVisitors.cpp b/lib/SILOptimizer/SILCombiner/SILCombinerApplyVisitors.cpp index d18d165842e7d..6dfeffba7d1bc 100644 --- a/lib/SILOptimizer/SILCombiner/SILCombinerApplyVisitors.cpp +++ b/lib/SILOptimizer/SILCombiner/SILCombinerApplyVisitors.cpp @@ -207,9 +207,9 @@ SILCombiner::optimizeApplyOfConvertFunctionInst(FullApplySite AI, // we got this far it is legal to perform the transformation (since // otherwise, we would be creating malformed SIL). bool setNonThrowing = FRI->getFunctionType()->hasErrorResult(); - SILInstruction *NAI = Builder.createApply(AI.getLoc(), FRI, SubstitutionMap(), - Args, setNonThrowing); - assert(FullApplySite::isa(NAI).getSubstCalleeType()->getAllResultsSubstType( + ApplyInst *NAI = Builder.createApply(AI.getLoc(), FRI, SubstitutionMap(), + Args, setNonThrowing); + assert(FullApplySite(NAI).getSubstCalleeType()->getAllResultsSubstType( AI.getModule(), AI.getFunction()->getTypeExpansionContext()) == AI.getSubstCalleeType()->getAllResultsSubstType( AI.getModule(), AI.getFunction()->getTypeExpansionContext()) && @@ -952,8 +952,7 @@ SILInstruction *SILCombiner::createApplyWithConcreteType( return CEI.lookupExistentialConformance(proto); } return ProtocolConformanceRef(proto); - }, - SubstFlags::ForceSubstituteOpenedExistentials); + }); } // We need to make sure that we can a) update Apply to use the new args and b) diff --git a/lib/SILOptimizer/SILCombiner/SILCombinerBuiltinVisitors.cpp b/lib/SILOptimizer/SILCombiner/SILCombinerBuiltinVisitors.cpp index bd5fd485adb83..d8fb87f60f5fd 100644 --- a/lib/SILOptimizer/SILCombiner/SILCombinerBuiltinVisitors.cpp +++ b/lib/SILOptimizer/SILCombiner/SILCombinerBuiltinVisitors.cpp @@ -109,6 +109,49 @@ SILInstruction *SILCombiner::optimizeBuiltinIsConcrete(BuiltinInst *BI) { return Builder.createIntegerLiteral(BI->getLoc(), BI->getType(), 1); } +/// Replace +/// \code +/// %b = builtin "COWBufferForReading" %r +/// %a = ref_element_addr %b +/// \endcode +/// with +/// \code +/// %a = ref_element_addr [immutable] %r +/// \endcode +/// The same for ref_tail_addr. +SILInstruction *SILCombiner::optimizeBuiltinCOWBufferForReading(BuiltinInst *BI) { + auto useIter = BI->use_begin(); + while (useIter != BI->use_end()) { + auto nextIter = std::next(useIter); + SILInstruction *user = useIter->getUser(); + SILValue ref = BI->getOperand(0); + switch (user->getKind()) { + case SILInstructionKind::RefElementAddrInst: { + auto *REAI = cast(user); + REAI->setOperand(ref); + REAI->setImmutable(); + break; + } + case SILInstructionKind::RefTailAddrInst: { + auto *RTAI = cast(user); + RTAI->setOperand(ref); + RTAI->setImmutable(); + break; + } + case SILInstructionKind::StrongReleaseInst: + cast(user)->setOperand(ref); + break; + default: + break; + } + useIter = nextIter; + } + // If there are unknown users, keep the builtin, and IRGen will handle it. + if (BI->use_empty()) + return eraseInstFromFunction(*BI); + return nullptr; +} + static unsigned getTypeWidth(SILType Ty) { if (auto BuiltinIntTy = Ty.getAs()) { if (BuiltinIntTy->isFixedWidth()) { @@ -541,6 +584,8 @@ SILInstruction *SILCombiner::visitBuiltinInst(BuiltinInst *I) { return optimizeBuiltinCanBeObjCClass(I); if (I->getBuiltinInfo().ID == BuiltinValueKind::IsConcrete) return optimizeBuiltinIsConcrete(I); + if (I->getBuiltinInfo().ID == BuiltinValueKind::COWBufferForReading) + return optimizeBuiltinCOWBufferForReading(I); if (I->getBuiltinInfo().ID == BuiltinValueKind::TakeArrayFrontToBack || I->getBuiltinInfo().ID == BuiltinValueKind::TakeArrayBackToFront || I->getBuiltinInfo().ID == BuiltinValueKind::TakeArrayNoAlias || diff --git a/lib/SILOptimizer/SILCombiner/SILCombinerCastVisitors.cpp b/lib/SILOptimizer/SILCombiner/SILCombinerCastVisitors.cpp index e68a0ba7cfc5a..e370a62e16e8e 100644 --- a/lib/SILOptimizer/SILCombiner/SILCombinerCastVisitors.cpp +++ b/lib/SILOptimizer/SILCombiner/SILCombinerCastVisitors.cpp @@ -440,6 +440,11 @@ SILCombiner::visitThickToObjCMetatypeInst(ThickToObjCMetatypeInst *TTOCMI) { if (TTOCMI->getFunction()->hasOwnership()) return nullptr; + if (auto *OCTTMI = dyn_cast(TTOCMI->getOperand())) { + TTOCMI->replaceAllUsesWith(OCTTMI->getOperand()); + return eraseInstFromFunction(*TTOCMI); + } + // Perform the following transformations: // (thick_to_objc_metatype (metatype @thick)) -> // (metatype @objc_metatype) @@ -460,6 +465,11 @@ SILCombiner::visitObjCToThickMetatypeInst(ObjCToThickMetatypeInst *OCTTMI) { if (OCTTMI->getFunction()->hasOwnership()) return nullptr; + if (auto *TTOCMI = dyn_cast(OCTTMI->getOperand())) { + OCTTMI->replaceAllUsesWith(TTOCMI->getOperand()); + return eraseInstFromFunction(*OCTTMI); + } + // Perform the following transformations: // (objc_to_thick_metatype (metatype @objc_metatype)) -> // (metatype @thick) diff --git a/lib/SILOptimizer/SILOptimizer.cpp b/lib/SILOptimizer/SILOptimizer.cpp new file mode 100644 index 0000000000000..eb03eeea9abf6 --- /dev/null +++ b/lib/SILOptimizer/SILOptimizer.cpp @@ -0,0 +1,15 @@ +//===--- SILOptimizer.cpp -------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +// DO NOT MODIFY THIS FILE! +// The SILOptimizer library is split into sub-components, modify the respective +// sub-component. diff --git a/lib/SILOptimizer/Transforms/AccessEnforcementReleaseSinking.cpp b/lib/SILOptimizer/Transforms/AccessEnforcementReleaseSinking.cpp index ee73a35d78a92..0ff7b4c2f1d9b 100644 --- a/lib/SILOptimizer/Transforms/AccessEnforcementReleaseSinking.cpp +++ b/lib/SILOptimizer/Transforms/AccessEnforcementReleaseSinking.cpp @@ -138,6 +138,7 @@ static bool isBarrier(SILInstruction *inst) { case BuiltinValueKind::PoundAssert: case BuiltinValueKind::TypePtrAuthDiscriminator: case BuiltinValueKind::GlobalStringTablePointer: + case BuiltinValueKind::COWBufferForReading: return false; // Handle some rare builtins that may be sensitive to object lifetime diff --git a/lib/SILOptimizer/Transforms/CMakeLists.txt b/lib/SILOptimizer/Transforms/CMakeLists.txt index e060c0208c1e6..305fbc3dca4c0 100644 --- a/lib/SILOptimizer/Transforms/CMakeLists.txt +++ b/lib/SILOptimizer/Transforms/CMakeLists.txt @@ -1,4 +1,4 @@ -silopt_register_sources( +target_sources(swiftSILOptimizer PRIVATE ARCCodeMotion.cpp AccessEnforcementDom.cpp AccessEnforcementOpts.cpp @@ -37,5 +37,4 @@ silopt_register_sources( SpeculativeDevirtualizer.cpp StackPromotion.cpp TempRValueElimination.cpp - UnsafeGuaranteedPeephole.cpp -) + UnsafeGuaranteedPeephole.cpp) diff --git a/lib/SILOptimizer/Transforms/DeadObjectElimination.cpp b/lib/SILOptimizer/Transforms/DeadObjectElimination.cpp index f9225d72085a6..350db1805b13d 100644 --- a/lib/SILOptimizer/Transforms/DeadObjectElimination.cpp +++ b/lib/SILOptimizer/Transforms/DeadObjectElimination.cpp @@ -835,9 +835,7 @@ static bool getDeadInstsAfterInitializerRemoved( bool DeadObjectElimination::processAllocApply(ApplyInst *AI, DeadEndBlocks &DEBlocks) { // Currently only handle array.uninitialized - if (ArraySemanticsCall(AI).getKind() != ArrayCallKind::kArrayUninitialized && - ArraySemanticsCall(AI).getKind() != - ArrayCallKind::kArrayUninitializedIntrinsic) + if (!isAllocatingApply(AI)) return false; llvm::SmallVector instsDeadAfterInitializerRemoved; diff --git a/lib/SILOptimizer/Transforms/RedundantLoadElimination.cpp b/lib/SILOptimizer/Transforms/RedundantLoadElimination.cpp index 5ae552dd8db2a..e7bfef35bbb4e 100644 --- a/lib/SILOptimizer/Transforms/RedundantLoadElimination.cpp +++ b/lib/SILOptimizer/Transforms/RedundantLoadElimination.cpp @@ -1585,8 +1585,7 @@ bool RLEContext::run() { LLVM_DEBUG(for (unsigned i = 0; i < LocationVault.size(); ++i) { llvm::dbgs() << "LSLocation #" << i; - getLocation(i).print(llvm::dbgs(), &Fn->getModule(), - TypeExpansionContext(*Fn)); + getLocation(i).print(llvm::dbgs()); }); if (Optimistic) diff --git a/lib/SILOptimizer/Transforms/SpeculativeDevirtualizer.cpp b/lib/SILOptimizer/Transforms/SpeculativeDevirtualizer.cpp index 6913c3a3c94ee..1991379ee6481 100644 --- a/lib/SILOptimizer/Transforms/SpeculativeDevirtualizer.cpp +++ b/lib/SILOptimizer/Transforms/SpeculativeDevirtualizer.cpp @@ -285,8 +285,6 @@ static bool isDefaultCaseKnown(ClassHierarchyAnalysis *CHA, auto *Method = CMI->getMember().getAbstractFunctionDecl(); assert(Method && "not a function"); - const DeclContext *DC = AI.getModule().getAssociatedContext(); - if (CD->isFinal()) return true; @@ -295,13 +293,8 @@ static bool isDefaultCaseKnown(ClassHierarchyAnalysis *CHA, if (CD->checkAncestry(AncestryFlags::ObjC)) return false; - // Without an associated context we cannot perform any - // access-based optimizations. - if (!DC) - return false; - // Only handle classes defined within the SILModule's associated context. - if (!CD->isChildContextOf(DC)) + if (!CD->isChildContextOf(AI.getModule().getAssociatedContext())) return false; if (!CD->hasAccess()) diff --git a/lib/SILOptimizer/UtilityPasses/CMakeLists.txt b/lib/SILOptimizer/UtilityPasses/CMakeLists.txt index 24f268320b8da..2fa9f1fa0f2ea 100644 --- a/lib/SILOptimizer/UtilityPasses/CMakeLists.txt +++ b/lib/SILOptimizer/UtilityPasses/CMakeLists.txt @@ -1,4 +1,4 @@ -silopt_register_sources( +target_sources(swiftSILOptimizer PRIVATE AADumper.cpp AccessSummaryDumper.cpp AccessedStorageDumper.cpp @@ -31,5 +31,4 @@ silopt_register_sources( SimplifyUnreachableContainingBlocks.cpp StripDebugInfo.cpp OwnershipDumper.cpp - OwnershipVerifierTextualErrorDumper.cpp -) + OwnershipVerifierTextualErrorDumper.cpp) diff --git a/lib/SILOptimizer/UtilityPasses/LSLocationPrinter.cpp b/lib/SILOptimizer/UtilityPasses/LSLocationPrinter.cpp index e30cbc9cce79f..b99ba4fb1cc4c 100644 --- a/lib/SILOptimizer/UtilityPasses/LSLocationPrinter.cpp +++ b/lib/SILOptimizer/UtilityPasses/LSLocationPrinter.cpp @@ -171,7 +171,7 @@ class LSLocationPrinter : public SILModuleTransform { llvm::outs() << "#" << Counter++ << II; for (auto &Loc : Locs) { - Loc.print(llvm::outs(), &Fn.getModule(), TypeExpansionContext(Fn)); + Loc.print(llvm::outs()); } Locs.clear(); } @@ -227,7 +227,7 @@ class LSLocationPrinter : public SILModuleTransform { LSLocation::reduce(L, &Fn.getModule(), TypeExpansionContext(Fn), SLocs); llvm::outs() << "#" << Counter++ << II; for (auto &Loc : SLocs) { - Loc.print(llvm::outs(), &Fn.getModule(), TypeExpansionContext(Fn)); + Loc.print(llvm::outs()); } L.reset(); Locs.clear(); diff --git a/lib/SILOptimizer/UtilityPasses/SerializeSILPass.cpp b/lib/SILOptimizer/UtilityPasses/SerializeSILPass.cpp index bac10dffc61f1..11604e211c9d8 100644 --- a/lib/SILOptimizer/UtilityPasses/SerializeSILPass.cpp +++ b/lib/SILOptimizer/UtilityPasses/SerializeSILPass.cpp @@ -332,6 +332,8 @@ static bool hasOpaqueArchetype(TypeExpansionContext context, case SILInstructionKind::LinearFunctionInst: case SILInstructionKind::LinearFunctionExtractInst: case SILInstructionKind::DifferentiabilityWitnessFunctionInst: + case SILInstructionKind::BeginCOWMutationInst: + case SILInstructionKind::EndCOWMutationInst: // Handle by operand and result check. break; diff --git a/lib/SILOptimizer/Utils/CMakeLists.txt b/lib/SILOptimizer/Utils/CMakeLists.txt index 72587675a5500..40359206beeb2 100644 --- a/lib/SILOptimizer/Utils/CMakeLists.txt +++ b/lib/SILOptimizer/Utils/CMakeLists.txt @@ -1,4 +1,4 @@ -silopt_register_sources( +target_sources(swiftSILOptimizer PRIVATE BasicBlockOptUtils.cpp CFGOptUtils.cpp CanonicalizeInstruction.cpp @@ -21,5 +21,4 @@ silopt_register_sources( SILSSAUpdater.cpp SpecializationMangler.cpp StackNesting.cpp - ValueLifetime.cpp -) + ValueLifetime.cpp) diff --git a/lib/SILOptimizer/Utils/Devirtualize.cpp b/lib/SILOptimizer/Utils/Devirtualize.cpp index f07a35cf7d47e..498a4ea2d3b59 100644 --- a/lib/SILOptimizer/Utils/Devirtualize.cpp +++ b/lib/SILOptimizer/Utils/Devirtualize.cpp @@ -89,13 +89,6 @@ static bool isEffectivelyFinalMethod(FullApplySite applySite, CanType classType, if (cd && cd->isFinal()) return true; - const DeclContext *dc = applySite.getModule().getAssociatedContext(); - - // Without an associated context we cannot perform any - // access-based optimizations. - if (!dc) - return false; - auto *cmi = cast(applySite.getCallee()); if (!calleesAreStaticallyKnowable(applySite.getModule(), cmi->getMember())) @@ -149,18 +142,11 @@ static bool isEffectivelyFinalMethod(FullApplySite applySite, CanType classType, /// it is a whole-module compilation. static bool isKnownFinalClass(ClassDecl *cd, SILModule &module, ClassHierarchyAnalysis *cha) { - const DeclContext *dc = module.getAssociatedContext(); - if (cd->isFinal()) return true; - // Without an associated context we cannot perform any - // access-based optimizations. - if (!dc) - return false; - // Only handle classes defined within the SILModule's associated context. - if (!cd->isChildContextOf(dc)) + if (!cd->isChildContextOf(module.getAssociatedContext())) return false; if (!cd->hasAccess()) diff --git a/lib/SILOptimizer/Utils/InstOptUtils.cpp b/lib/SILOptimizer/Utils/InstOptUtils.cpp index dfef347c29333..46ff29ceffe3b 100644 --- a/lib/SILOptimizer/Utils/InstOptUtils.cpp +++ b/lib/SILOptimizer/Utils/InstOptUtils.cpp @@ -1815,12 +1815,8 @@ bool swift::calleesAreStaticallyKnowable(SILModule &module, SILDeclRef decl) { /// knowable based on the Decl and the compilation mode? bool swift::calleesAreStaticallyKnowable(SILModule &module, AbstractFunctionDecl *afd) { - const DeclContext *assocDC = module.getAssociatedContext(); - if (!assocDC) - return false; - // Only handle members defined within the SILModule's associated context. - if (!afd->isChildContextOf(assocDC)) + if (!afd->isChildContextOf(module.getAssociatedContext())) return false; if (afd->isDynamic()) { @@ -1859,12 +1855,8 @@ bool swift::calleesAreStaticallyKnowable(SILModule &module, // FIXME: Merge this with calleesAreStaticallyKnowable above bool swift::calleesAreStaticallyKnowable(SILModule &module, EnumElementDecl *eed) { - const DeclContext *assocDC = module.getAssociatedContext(); - if (!assocDC) - return false; - // Only handle members defined within the SILModule's associated context. - if (!eed->isChildContextOf(assocDC)) + if (!eed->isChildContextOf(module.getAssociatedContext())) return false; if (eed->isDynamic()) { diff --git a/lib/SILOptimizer/Utils/SILInliner.cpp b/lib/SILOptimizer/Utils/SILInliner.cpp index 29df76e4b416f..ded5b22a009b0 100644 --- a/lib/SILOptimizer/Utils/SILInliner.cpp +++ b/lib/SILOptimizer/Utils/SILInliner.cpp @@ -778,6 +778,7 @@ InlineCost swift::instructionInlineCost(SILInstruction &I) { case SILInstructionKind::ThrowInst: case SILInstructionKind::UnwindInst: case SILInstructionKind::YieldInst: + case SILInstructionKind::EndCOWMutationInst: return InlineCost::Free; case SILInstructionKind::AbortApplyInst: @@ -869,6 +870,7 @@ InlineCost swift::instructionInlineCost(SILInstruction &I) { case SILInstructionKind::UnconditionalCheckedCastValueInst: case SILInstructionKind::IsEscapingClosureInst: case SILInstructionKind::IsUniqueInst: + case SILInstructionKind::BeginCOWMutationInst: case SILInstructionKind::InitBlockStorageHeaderInst: case SILInstructionKind::SelectEnumAddrInst: case SILInstructionKind::SelectEnumInst: diff --git a/lib/Sema/CSApply.cpp b/lib/Sema/CSApply.cpp index 6bbc088b4bcbd..b6db0facd2475 100644 --- a/lib/Sema/CSApply.cpp +++ b/lib/Sema/CSApply.cpp @@ -8321,7 +8321,7 @@ ExprWalker::rewriteTarget(SolutionApplicationTarget target) { result.setExpr(resultExpr); auto &ctx = cs.getASTContext(); - if (ctx.TypeCheckerOpts.DebugConstraintSolver) { + if (cs.isDebugMode()) { auto &log = ctx.TypeCheckerDebug->getStream(); log << "---Type-checked expression---\n"; resultExpr->dump(log); diff --git a/lib/Sema/CSBindings.cpp b/lib/Sema/CSBindings.cpp index a73e1f60bb484..02e435e2c1fd3 100644 --- a/lib/Sema/CSBindings.cpp +++ b/lib/Sema/CSBindings.cpp @@ -107,7 +107,7 @@ ConstraintSystem::determineBestBindings() { inferTransitiveSupertypeBindings(cache, bindings); - if (getASTContext().TypeCheckerOpts.DebugConstraintSolver) { + if (isDebugMode()) { auto &log = getASTContext().TypeCheckerDebug->getStream(); bindings.dump(typeVar, log, solverState->depth * 2); } @@ -1087,27 +1087,27 @@ bool TypeVariableBinding::attempt(ConstraintSystem &cs) const { // resolved and had to be bound to a placeholder "hole" type. cs.increaseScore(SK_Hole); + ConstraintFix *fix = nullptr; if (auto *GP = TypeVar->getImpl().getGenericParameter()) { auto path = dstLocator->getPath(); // Drop `generic parameter` locator element so that all missing // generic parameters related to the same path can be coalesced later. - auto *fix = DefaultGenericArgument::create( + fix = DefaultGenericArgument::create( cs, GP, cs.getConstraintLocator(dstLocator->getAnchor(), path.drop_back())); - if (cs.recordFix(fix)) - return true; + } else if (TypeVar->getImpl().isClosureParameterType()) { + fix = SpecifyClosureParameterType::create(cs, dstLocator); } else if (TypeVar->getImpl().isClosureResultType()) { - auto *fix = SpecifyClosureReturnType::create( - cs, TypeVar->getImpl().getLocator()); - if (cs.recordFix(fix)) - return true; + fix = SpecifyClosureReturnType::create(cs, dstLocator); } else if (srcLocator->getAnchor() && isExpr(srcLocator->getAnchor())) { - auto *fix = SpecifyObjectLiteralTypeImport::create( - cs, TypeVar->getImpl().getLocator()); - if (cs.recordFix(fix)) - return true; + fix = SpecifyObjectLiteralTypeImport::create(cs, dstLocator); + } else if (srcLocator->isKeyPathRoot()) { + fix = SpecifyKeyPathRootType::create(cs, dstLocator); } + + if (fix && cs.recordFix(fix)) + return true; } } diff --git a/lib/Sema/CSDiagnostics.cpp b/lib/Sema/CSDiagnostics.cpp index b6cd1270eefc3..c5602e6b445f9 100644 --- a/lib/Sema/CSDiagnostics.cpp +++ b/lib/Sema/CSDiagnostics.cpp @@ -41,6 +41,12 @@ using namespace swift; using namespace constraints; +static bool hasFixFor(const Solution &solution, ConstraintLocator *locator) { + return llvm::any_of(solution.Fixes, [&locator](const ConstraintFix *fix) { + return fix->getLocator() == locator; + }); +} + FailureDiagnostic::~FailureDiagnostic() {} bool FailureDiagnostic::diagnose(bool asNote) { @@ -6127,6 +6133,75 @@ bool MissingContextualBaseInMemberRefFailure::diagnoseAsError() { return true; } +bool UnableToInferClosureParameterType::diagnoseAsError() { + auto *closure = castToExpr(getRawAnchor()); + + // Let's check whether this closure is an argument to + // a call which couldn't be properly resolved e.g. + // missing member or invalid contextual reference and + // if so let's not diagnose this problem because main + // issue here is inability to establish context for + // closure inference. + // + // TODO(diagnostics): Once we gain an ability to determine + // originating source of type holes this check could be + // significantly simplified. + { + auto &solution = getSolution(); + + // If there is a contextual mismatch associated with this + // closure, let's not diagnose any parameter type issues. + if (hasFixFor(solution, getConstraintLocator( + closure, LocatorPathElt::ContextualType()))) + return false; + + if (auto *parentExpr = findParentExpr(closure)) { + while (parentExpr && + (isa(parentExpr) || isa(parentExpr))) { + parentExpr = findParentExpr(parentExpr); + } + + if (parentExpr) { + // Missing or invalid member reference in call. + if (auto *AE = dyn_cast(parentExpr)) { + if (getType(AE->getFn())->isHole()) + return false; + } + + // Any fix anchored on parent expression makes it unnecessary + // to diagnose unability to infer parameter type because it's + // an indication that proper context couldn't be established to + // resolve the closure. + ASTNode parentNode(parentExpr); + if (llvm::any_of(solution.Fixes, + [&parentNode](const ConstraintFix *fix) -> bool { + return fix->getAnchor() == parentNode; + })) + return false; + } + } + } + + auto paramIdx = getLocator() + ->castLastElementTo() + .getIndex(); + + auto *PD = closure->getParameters()->get(paramIdx); + + llvm::SmallString<16> id; + llvm::raw_svector_ostream OS(id); + + if (PD->isAnonClosureParam()) { + OS << "$" << paramIdx; + } else { + OS << "'" << PD->getParameterName() << "'"; + } + + auto loc = PD->isAnonClosureParam() ? getLoc() : PD->getLoc(); + emitDiagnosticAt(loc, diag::cannot_infer_closure_parameter_type, OS.str()); + return true; +} + bool UnableToInferClosureReturnType::diagnoseAsError() { auto *closure = castToExpr(getRawAnchor()); @@ -6273,3 +6348,25 @@ bool MultiArgFuncKeyPathFailure::diagnoseAsError() { resolveType(functionType)); return true; } + +bool UnableToInferKeyPathRootFailure::diagnoseAsError() { + assert(isExpr(getAnchor()) && "Expected key path expression"); + auto &ctx = getASTContext(); + auto contextualType = getContextualType(getAnchor()); + auto *keyPathExpr = castToExpr(getAnchor()); + + auto emitKeyPathDiagnostic = [&]() { + if (contextualType && + contextualType->getAnyNominal() == ctx.getAnyKeyPathDecl()) { + return emitDiagnostic( + diag::cannot_infer_keypath_root_anykeypath_context); + } + return emitDiagnostic( + diag::cannot_infer_contextual_keypath_type_specify_root); + }; + + emitKeyPathDiagnostic() + .highlight(keyPathExpr->getLoc()) + .fixItInsertAfter(keyPathExpr->getStartLoc(), "<#Root#>"); + return true; +} diff --git a/lib/Sema/CSDiagnostics.h b/lib/Sema/CSDiagnostics.h index 931899196999c..64428d227d763 100644 --- a/lib/Sema/CSDiagnostics.h +++ b/lib/Sema/CSDiagnostics.h @@ -1971,6 +1971,15 @@ class MissingContextualBaseInMemberRefFailure final : public FailureDiagnostic { bool diagnoseAsError(); }; +class UnableToInferClosureParameterType final : public FailureDiagnostic { +public: + UnableToInferClosureParameterType(const Solution &solution, + ConstraintLocator *locator) + : FailureDiagnostic(solution, locator) {} + + bool diagnoseAsError(); +}; + class UnableToInferClosureReturnType final : public FailureDiagnostic { public: UnableToInferClosureReturnType(const Solution &solution, @@ -2058,6 +2067,21 @@ class MultiArgFuncKeyPathFailure final : public FailureDiagnostic { bool diagnoseAsError() override; }; +/// Diagnose a failure to infer a KeyPath type by context. +/// +/// ```swift +/// _ = \.x +/// let _ : AnyKeyPath = \.x +/// ``` +class UnableToInferKeyPathRootFailure final : public FailureDiagnostic { +public: + UnableToInferKeyPathRootFailure(const Solution &solution, + ConstraintLocator *locator) + : FailureDiagnostic(solution, locator) {} + + bool diagnoseAsError() override; +}; + } // end namespace constraints } // end namespace swift diff --git a/lib/Sema/CSFix.cpp b/lib/Sema/CSFix.cpp index 5ba0f6a2e746f..29d3f6006c529 100644 --- a/lib/Sema/CSFix.cpp +++ b/lib/Sema/CSFix.cpp @@ -22,6 +22,7 @@ #include "ConstraintSystem.h" #include "OverloadChoice.h" #include "swift/AST/Expr.h" +#include "swift/AST/ParameterList.h" #include "swift/AST/Type.h" #include "swift/AST/Types.h" #include "swift/Basic/SourceManager.h" @@ -1229,6 +1230,38 @@ SpecifyBaseTypeForContextualMember *SpecifyBaseTypeForContextualMember::create( SpecifyBaseTypeForContextualMember(cs, member, locator); } +std::string SpecifyClosureParameterType::getName() const { + std::string name; + llvm::raw_string_ostream OS(name); + + auto *closure = castToExpr(getAnchor()); + auto paramLoc = + getLocator()->castLastElementTo(); + + auto *PD = closure->getParameters()->get(paramLoc.getIndex()); + + OS << "specify type for parameter "; + if (PD->isAnonClosureParam()) { + OS << "$" << paramLoc.getIndex(); + } else { + OS << "'" << PD->getParameterName() << "'"; + } + + return OS.str(); +} + +bool SpecifyClosureParameterType::diagnose(const Solution &solution, + bool asNote) const { + UnableToInferClosureParameterType failure(solution, getLocator()); + return failure.diagnose(asNote); +} + +SpecifyClosureParameterType * +SpecifyClosureParameterType::create(ConstraintSystem &cs, + ConstraintLocator *locator) { + return new (cs.getAllocator()) SpecifyClosureParameterType(cs, locator); +} + bool SpecifyClosureReturnType::diagnose(const Solution &solution, bool asNote) const { UnableToInferClosureReturnType failure(solution, getLocator()); @@ -1323,3 +1356,17 @@ AllowKeyPathRootTypeMismatch::create(ConstraintSystem &cs, Type lhs, Type rhs, return new (cs.getAllocator()) AllowKeyPathRootTypeMismatch(cs, lhs, rhs, locator); } + +SpecifyKeyPathRootType * +SpecifyKeyPathRootType::create(ConstraintSystem &cs, + ConstraintLocator *locator) { + return new (cs.getAllocator()) + SpecifyKeyPathRootType(cs, locator); +} + +bool SpecifyKeyPathRootType::diagnose(const Solution &solution, + bool asNote) const { + UnableToInferKeyPathRootFailure failure(solution, getLocator()); + + return failure.diagnose(asNote); +} diff --git a/lib/Sema/CSFix.h b/lib/Sema/CSFix.h index 73d1e788c8438..1decf4baa85d1 100644 --- a/lib/Sema/CSFix.h +++ b/lib/Sema/CSFix.h @@ -234,6 +234,10 @@ enum class FixKind : uint8_t { /// inferred and has to be specified explicitly. SpecifyBaseTypeForContextualMember, + /// Type of the closure parameter used in the body couldn't be inferred + /// and has to be specified explicitly. + SpecifyClosureParameterType, + /// Closure return type has to be explicitly specified because it can't be /// inferred in current context e.g. because it's a multi-statement closure. SpecifyClosureReturnType, @@ -253,13 +257,17 @@ enum class FixKind : uint8_t { /// A warning fix that allows a coercion to perform a force-cast. AllowCoercionToForceCast, - + /// Allow key path root type mismatch when applying a key path that has a /// root type not convertible to the type of the base instance. AllowKeyPathRootTypeMismatch, /// Allow key path to be bound to a function type with more than 1 argument - AllowMultiArgFuncKeyPathMismatch + AllowMultiArgFuncKeyPathMismatch, + + /// Specify key path root type when it cannot be infered from context. + SpecifyKeyPathRootType, + }; class ConstraintFix { @@ -1708,6 +1716,19 @@ class SpecifyBaseTypeForContextualMember final : public ConstraintFix { create(ConstraintSystem &cs, DeclNameRef member, ConstraintLocator *locator); }; +class SpecifyClosureParameterType final : public ConstraintFix { + SpecifyClosureParameterType(ConstraintSystem &cs, ConstraintLocator *locator) + : ConstraintFix(cs, FixKind::SpecifyClosureParameterType, locator) {} + +public: + std::string getName() const; + + bool diagnose(const Solution &solution, bool asNote = false) const; + + static SpecifyClosureParameterType *create(ConstraintSystem &cs, + ConstraintLocator *locator); +}; + class SpecifyClosureReturnType final : public ConstraintFix { SpecifyClosureReturnType(ConstraintSystem &cs, ConstraintLocator *locator) : ConstraintFix(cs, FixKind::SpecifyClosureReturnType, locator) {} @@ -1805,7 +1826,7 @@ class AllowCoercionToForceCast final : public ContextualMismatch { /// bar[keyPath: keyPath] /// } /// \endcode -class AllowKeyPathRootTypeMismatch : public ContextualMismatch { +class AllowKeyPathRootTypeMismatch final : public ContextualMismatch { protected: AllowKeyPathRootTypeMismatch(ConstraintSystem &cs, Type lhs, Type rhs, ConstraintLocator *locator) @@ -1823,6 +1844,21 @@ class AllowKeyPathRootTypeMismatch : public ContextualMismatch { create(ConstraintSystem &cs, Type lhs, Type rhs, ConstraintLocator *locator); }; +class SpecifyKeyPathRootType final : public ConstraintFix { + SpecifyKeyPathRootType(ConstraintSystem &cs, ConstraintLocator *locator) + : ConstraintFix(cs, FixKind::SpecifyKeyPathRootType, locator) {} + + public: + std::string getName() const { + return "specify key path root type"; + } + + bool diagnose(const Solution &solution, bool asNote = false) const; + + static SpecifyKeyPathRootType *create(ConstraintSystem &cs, + ConstraintLocator *locator); +}; + } // end namespace constraints } // end namespace swift diff --git a/lib/Sema/CSGen.cpp b/lib/Sema/CSGen.cpp index a4ea12163c394..a4ac62ba8be84 100644 --- a/lib/Sema/CSGen.cpp +++ b/lib/Sema/CSGen.cpp @@ -914,6 +914,35 @@ namespace { }; } // end anonymous namespace +namespace { +// Check if \p E is a call expression to curried thunk of "KeyPath as function". +// i.e. '{ `$kp$` in { $0[keyPath: $kp$] } }(keypath)' +static bool isKeyPathCurriedThunkCallExpr(Expr *E) { + auto CE = dyn_cast(E); + if (!CE) + return false; + auto thunk = dyn_cast(CE->getFn()); + if (!thunk) + return false; + if (thunk->getParameters()->size() != 1 || + thunk->getParameters()->get(0)->getParameterName().str() != "$kp$") + return false; + + auto PE = dyn_cast(CE->getArg()); + if (!PE) + return false; + return isa(PE->getSubExpr()); +} + +// Extract the keypath expression from the curried thunk expression. +static Expr *extractKeyPathFromCurryThunkCall(Expr *E) { + assert(isKeyPathCurriedThunkCallExpr(E)); + auto call = cast(E); + auto arg = cast(call->getArg()); + return arg->getSubExpr(); +} +} // end anonymous namespace + namespace { class ConstraintGenerator : public ExprVisitor { @@ -2162,8 +2191,12 @@ namespace { auto declaredTy = param->getType(); externalType = CS.openUnboundGenericType(declaredTy, paramLoc); } else { + // Let's allow parameters which haven't been explicitly typed + // to become holes by default, this helps in situations like + // `foo { a in }` where `foo` doesn't exist. externalType = CS.createTypeVariable( - paramLoc, TVO_CanBindToInOut | TVO_CanBindToNoEscape); + paramLoc, + TVO_CanBindToInOut | TVO_CanBindToNoEscape | TVO_CanBindToHole); } closureParams.push_back(param->toFunctionParam(externalType)); @@ -3398,7 +3431,8 @@ namespace { auto rootLocator = CS.getConstraintLocator(E, ConstraintLocator::KeyPathRoot); auto locator = CS.getConstraintLocator(E); - Type root = CS.createTypeVariable(rootLocator, TVO_CanBindToNoEscape); + Type root = CS.createTypeVariable(rootLocator, TVO_CanBindToNoEscape | + TVO_CanBindToHole); // If a root type was explicitly given, then resolve it now. if (auto rootRepr = E->getRootType()) { @@ -3540,7 +3574,8 @@ namespace { // path components. auto typeLoc = CS.getConstraintLocator(locator, ConstraintLocator::KeyPathType); - Type kpTy = CS.createTypeVariable(typeLoc, TVO_CanBindToNoEscape); + Type kpTy = CS.createTypeVariable(typeLoc, TVO_CanBindToNoEscape | + TVO_CanBindToHole); CS.addKeyPathConstraint(kpTy, root, rvalueBase, componentTypeVars, locator); return kpTy; @@ -3785,6 +3820,12 @@ namespace { continue; } + // Extract keypath from '{ `$kp$` in { $0[keyPath: $kp$] } }(keypath)' + if (isKeyPathCurriedThunkCallExpr(expr)) { + expr = extractKeyPathFromCurryThunkCall(expr); + continue; + } + // Restore '@autoclosure'd value. if (auto ACE = dyn_cast(expr)) { // This is only valid if the closure doesn't have parameters. @@ -3792,6 +3833,7 @@ namespace { expr = ACE->getSingleExpressionBody(); continue; } + llvm_unreachable("other AutoClosureExpr must be handled specially"); } // Remove any semantic expression injected by typechecking. @@ -4362,7 +4404,7 @@ bool ConstraintSystem::generateConstraints( target = *resultTarget; } - if (getASTContext().TypeCheckerOpts.DebugConstraintSolver) { + if (isDebugMode()) { auto &log = getASTContext().TypeCheckerDebug->getStream(); log << "---Initial constraints for the given expression---\n"; print(log, expr); diff --git a/lib/Sema/CSRanking.cpp b/lib/Sema/CSRanking.cpp index d42a49b6eefe3..86dafa1f24723 100644 --- a/lib/Sema/CSRanking.cpp +++ b/lib/Sema/CSRanking.cpp @@ -35,7 +35,7 @@ void ConstraintSystem::increaseScore(ScoreKind kind, unsigned value) { unsigned index = static_cast(kind); CurrentScore.Data[index] += value; - if (getASTContext().TypeCheckerOpts.DebugConstraintSolver && value > 0) { + if (isDebugMode() && value > 0) { auto &log = getASTContext().TypeCheckerDebug->getStream(); if (solverState) log.indent(solverState->depth * 2); @@ -102,7 +102,7 @@ bool ConstraintSystem::worseThanBestSolution() const { CurrentScore <= *solverState->BestScore) return false; - if (getASTContext().TypeCheckerOpts.DebugConstraintSolver) { + if (isDebugMode()) { auto &log = getASTContext().TypeCheckerDebug->getStream(); log.indent(solverState->depth * 2) << "(solution is worse than the best solution)\n"; @@ -386,7 +386,9 @@ bool CompareDeclSpecializationRequest::evaluate( Evaluator &eval, DeclContext *dc, ValueDecl *decl1, ValueDecl *decl2, bool isDynamicOverloadComparison) const { auto &C = decl1->getASTContext(); - if (C.TypeCheckerOpts.DebugConstraintSolver) { + // Construct a constraint system to compare the two declarations. + ConstraintSystem cs(dc, ConstraintSystemOptions()); + if (cs.isDebugMode()) { auto &log = C.TypeCheckerDebug->getStream(); log << "Comparing declarations\n"; decl1->print(log); @@ -397,8 +399,8 @@ bool CompareDeclSpecializationRequest::evaluate( log << ")\n"; } - auto completeResult = [&C](bool result) { - if (C.TypeCheckerOpts.DebugConstraintSolver) { + auto completeResult = [&C, &cs](bool result) { + if (cs.isDebugMode()) { auto &log = C.TypeCheckerDebug->getStream(); log << "comparison result: " << (result ? "better" : "not better") << "\n"; @@ -499,8 +501,6 @@ bool CompareDeclSpecializationRequest::evaluate( return cs.openType(type, replacements); }; - // Construct a constraint system to compare the two declarations. - ConstraintSystem cs(dc, ConstraintSystemOptions()); bool knownNonSubtype = false; auto *locator = cs.getConstraintLocator({}); @@ -737,7 +737,7 @@ static void addKeyPathDynamicMemberOverloads( SolutionCompareResult ConstraintSystem::compareSolutions( ConstraintSystem &cs, ArrayRef solutions, const SolutionDiff &diff, unsigned idx1, unsigned idx2) { - if (cs.getASTContext().TypeCheckerOpts.DebugConstraintSolver) { + if (cs.isDebugMode()) { auto &log = cs.getASTContext().TypeCheckerDebug->getStream(); log.indent(cs.solverState->depth * 2) << "comparing solutions " << idx1 << " and " << idx2 <<"\n"; @@ -1261,7 +1261,7 @@ ConstraintSystem::findBestSolution(SmallVectorImpl &viable, if (viable.size() == 1) return 0; - if (getASTContext().TypeCheckerOpts.DebugConstraintSolver) { + if (isDebugMode()) { auto &log = getASTContext().TypeCheckerDebug->getStream(); log.indent(solverState->depth * 2) << "Comparing " << viable.size() << " viable solutions\n"; diff --git a/lib/Sema/CSSimplify.cpp b/lib/Sema/CSSimplify.cpp index 0f1279a2a578a..a088fc1fbc233 100644 --- a/lib/Sema/CSSimplify.cpp +++ b/lib/Sema/CSSimplify.cpp @@ -5674,6 +5674,19 @@ static bool isForKeyPathSubscript(ConstraintSystem &cs, return false; } +static bool isForKeyPathSubscriptWithoutLabel(ConstraintSystem &cs, + ConstraintLocator *locator) { + if (!locator || !locator->getAnchor()) + return false; + + if (auto *SE = getAsExpr(locator->getAnchor())) { + auto *indexExpr = SE->getIndex(); + return isa(indexExpr) && + isa(indexExpr->getSemanticsProvidingExpr()); + } + return false; +} + /// Determine whether all of the given candidate overloads /// found through conditional conformances of a given base type. /// This is useful to figure out whether it makes sense to @@ -5801,7 +5814,13 @@ performMemberLookup(ConstraintKind constraintKind, DeclNameRef memberName, MemberLookupResult result; result.OverallResult = MemberLookupResult::HasResults; - if (isForKeyPathSubscript(*this, memberLocator)) { + // Add key path result. + // If we are including inaccessible members, check for the use of a keypath + // subscript without a `keyPath:` label. Add it to the result so that it + // can be caught by the missing argument label checking later. + if (isForKeyPathSubscript(*this, memberLocator) || + (isForKeyPathSubscriptWithoutLabel(*this, memberLocator) + && includeInaccessibleMembers)) { if (baseTy->isAnyObject()) { result.addUnviable( OverloadChoice(baseTy, OverloadChoiceKind::KeyPathApplication), @@ -7663,6 +7682,32 @@ ConstraintSystem::simplifyKeyPathConstraint( return true; }; + + // If we have a hole somewhere in the key path, the solver won't be able to + // infer the key path type. So let's just assume this is solved. + if (shouldAttemptFixes()) { + if (keyPathTy->isHole()) + return SolutionKind::Solved; + + // If the root type has been bound to a hole, we cannot infer it. + if (getFixedTypeRecursive(rootTy, /*wantRValue*/ true)->isHole()) + return SolutionKind::Solved; + + // If we have e.g a missing member somewhere, a component type variable + // will have been marked as a potential hole. + // FIXME: This relies on the fact that we only mark an overload type + // variable as a potential hole once we've added a corresponding fix. We + // can't use 'isHole' instead, as that doesn't handle cases where the + // overload type variable gets bound to another type from the context rather + // than a hole. We need to come up with a better way of handling the + // relationship between key paths and overloads. + if (llvm::any_of(componentTypeVars, [&](TypeVariableType *tv) { + return tv->getImpl().getLocator()->isForKeyPathComponent() && + tv->getImpl().canBindToHole(); + })) { + return SolutionKind::Solved; + } + } // If we're fixed to a bound generic type, trying harvesting context from it. // However, we don't want a solution that fixes the expression type to @@ -7712,34 +7757,11 @@ ConstraintSystem::simplifyKeyPathConstraint( // to determine whether the result will be a function type vs BGT KeyPath // type, so continue through components to create new constraint at the // end. - if (!overload || anyComponentsUnresolved) { + if (!overload) { if (flags.contains(TMF_GenerateConstraints)) { anyComponentsUnresolved = true; continue; } - - if (shouldAttemptFixes()) { - auto typeVar = - llvm::find_if(componentTypeVars, [&](TypeVariableType *typeVar) { - auto *locator = typeVar->getImpl().getLocator(); - auto elt = locator->findLast(); - return elt && elt->getIndex() == i; - }); - - // If one of the components haven't been resolved, let's check - // whether it has been determined to be a "hole" and if so, - // let's allow component validation to contiunue. - // - // This helps to, for example, diagnose problems with missing - // members used as part of a key path. - if (typeVar != componentTypeVars.end() && - (*typeVar)->getImpl().canBindToHole()) { - anyComponentsUnresolved = true; - capability = ReadOnly; - continue; - } - } - return SolutionKind::Unsolved; } @@ -8123,7 +8145,7 @@ bool ConstraintSystem::simplifyAppliedOverloadsImpl( // If we have a common result type, bind the expected result type to it. if (commonResultType && !commonResultType->is()) { ASTContext &ctx = getASTContext(); - if (ctx.TypeCheckerOpts.DebugConstraintSolver) { + if (isDebugMode()) { auto &log = ctx.TypeCheckerDebug->getStream(); log.indent(solverState ? solverState->depth * 2 : 0) << "(common result type for $T" << fnTypeVar->getID() << " is " @@ -9265,7 +9287,7 @@ static bool isAugmentingFix(ConstraintFix *fix) { bool ConstraintSystem::recordFix(ConstraintFix *fix, unsigned impact) { auto &ctx = getASTContext(); - if (ctx.TypeCheckerOpts.DebugConstraintSolver) { + if (isDebugMode()) { auto &log = ctx.TypeCheckerDebug->getStream(); log.indent(solverState ? solverState->depth * 2 : 0) << "(attempting fix "; @@ -9482,7 +9504,8 @@ ConstraintSystem::SolutionKind ConstraintSystem::simplifyFixConstraint( case FixKind::CoerceToCheckedCast: case FixKind::SpecifyObjectLiteralTypeImport: case FixKind::AllowKeyPathRootTypeMismatch: - case FixKind::AllowCoercionToForceCast: { + case FixKind::AllowCoercionToForceCast: + case FixKind::SpecifyKeyPathRootType: { return recordFix(fix) ? SolutionKind::Error : SolutionKind::Solved; } @@ -9583,6 +9606,7 @@ ConstraintSystem::SolutionKind ConstraintSystem::simplifyFixConstraint( case FixKind::AllowTupleSplatForSingleParameter: case FixKind::AllowInvalidUseOfTrailingClosure: case FixKind::AllowNonClassTypeToConvertToAnyObject: + case FixKind::SpecifyClosureParameterType: case FixKind::SpecifyClosureReturnType: case FixKind::AddQualifierToAccessTopLevelName: llvm_unreachable("handled elsewhere"); @@ -9693,14 +9717,21 @@ ConstraintSystem::addKeyPathApplicationRootConstraint(Type root, ConstraintLocat path[0].getKind() == ConstraintLocator::SubscriptMember) || (path.size() == 2 && path[1].getKind() == ConstraintLocator::KeyPathDynamicMember)); + auto indexTuple = dyn_cast(subscript->getIndex()); - if (!indexTuple || indexTuple->getNumElements() != 1) - return; - - auto keyPathExpr = dyn_cast(indexTuple->getElement(0)); + auto indexParen = dyn_cast(subscript->getIndex()); + // If a keypath subscript is used without the expected `keyPath:` label, + // continue with type-checking when attempting fixes so that it gets caught + // by the argument label checking. In such cases, the KeyPathExpr is contained + // in a ParenExpr, instead of a TupleExpr. + assert(((indexTuple && indexTuple->getNumElements() == 1) || indexParen) && + "Expected KeyPathExpr to be in either TupleExpr or ParenExpr"); + + auto keyPathExpr = dyn_cast( + indexTuple ? indexTuple->getElement(0) : indexParen->getSubExpr()); if (!keyPathExpr) return; - + auto typeVar = getType(keyPathExpr)->getAs(); if (!typeVar) return; diff --git a/lib/Sema/CSSolver.cpp b/lib/Sema/CSSolver.cpp index b010ef5ac298f..53ae160860a01 100644 --- a/lib/Sema/CSSolver.cpp +++ b/lib/Sema/CSSolver.cpp @@ -375,11 +375,10 @@ ConstraintSystem::SolverState::SolverState( // If we're supposed to debug a specific constraint solver attempt, // turn on debugging now. ASTContext &ctx = CS.getASTContext(); - auto &tyOpts = ctx.TypeCheckerOpts; - OldDebugConstraintSolver = tyOpts.DebugConstraintSolver; + const auto &tyOpts = ctx.TypeCheckerOpts; if (tyOpts.DebugConstraintSolverAttempt && tyOpts.DebugConstraintSolverAttempt == SolutionAttempt) { - tyOpts.DebugConstraintSolver = true; + CS.Options |= ConstraintSystemFlags::DebugConstraints; llvm::raw_ostream &dbgOut = ctx.TypeCheckerDebug->getStream(); dbgOut << "---Constraint system #" << SolutionAttempt << "---\n"; CS.print(dbgOut); @@ -420,9 +419,14 @@ ConstraintSystem::SolverState::~SolverState() { CS.activateConstraint(constraint); } - // Restore debugging state. - TypeCheckerOptions &tyOpts = CS.getASTContext().TypeCheckerOpts; - tyOpts.DebugConstraintSolver = OldDebugConstraintSolver; + // If global constraing debugging is off and we are finished logging the + // current solution attempt, switch debugging back off. + const auto &tyOpts = CS.getASTContext().TypeCheckerOpts; + if (!tyOpts.DebugConstraintSolver && + tyOpts.DebugConstraintSolverAttempt && + tyOpts.DebugConstraintSolverAttempt == SolutionAttempt) { + CS.Options -= ConstraintSystemFlags::DebugConstraints; + } // Write our local statistics back to the overall statistics. #define CS_STATISTIC(Name, Description) JOIN2(Overall,Name) += Name; @@ -627,8 +631,8 @@ bool ConstraintSystem::Candidate::solve( return false; auto &ctx = cs.getASTContext(); - if (ctx.TypeCheckerOpts.DebugConstraintSolver) { - auto &log = cs.getASTContext().TypeCheckerDebug->getStream(); + if (cs.isDebugMode()) { + auto &log = ctx.TypeCheckerDebug->getStream(); log << "--- Solving candidate for shrinking at "; auto R = E->getSourceRange(); if (R.isValid()) { @@ -664,8 +668,8 @@ bool ConstraintSystem::Candidate::solve( cs.solveImpl(solutions); } - if (ctx.TypeCheckerOpts.DebugConstraintSolver) { - auto &log = cs.getASTContext().TypeCheckerDebug->getStream(); + if (cs.isDebugMode()) { + auto &log = ctx.TypeCheckerDebug->getStream(); if (solutions.empty()) { log << "--- No Solutions ---\n"; } else { @@ -1130,14 +1134,15 @@ Optional> ConstraintSystem::solve( SolutionApplicationTarget &target, FreeTypeVariableBinding allowFreeTypeVariables ) { - llvm::SaveAndRestore debugForExpr( - getASTContext().TypeCheckerOpts.DebugConstraintSolver, - debugConstraintSolverForTarget(getASTContext(), target)); + llvm::SaveAndRestore debugForExpr(Options); + if (debugConstraintSolverForTarget(getASTContext(), target)) { + Options |= ConstraintSystemFlags::DebugConstraints; + } /// Dump solutions for debugging purposes. auto dumpSolutions = [&](const SolutionResult &result) { // Debug-print the set of solutions. - if (getASTContext().TypeCheckerOpts.DebugConstraintSolver) { + if (isDebugMode()) { auto &log = getASTContext().TypeCheckerDebug->getStream(); if (result.getKind() == SolutionResult::Success) { log << "---Solution---\n"; @@ -1224,7 +1229,7 @@ Optional> ConstraintSystem::solve( SolutionResult ConstraintSystem::solveImpl(SolutionApplicationTarget &target, FreeTypeVariableBinding allowFreeTypeVariables) { - if (getASTContext().TypeCheckerOpts.DebugConstraintSolver) { + if (isDebugMode()) { auto &log = getASTContext().TypeCheckerDebug->getStream(); log << "---Constraint solving at "; auto R = target.getSourceRange(); @@ -1272,7 +1277,7 @@ bool ConstraintSystem::solve(SmallVectorImpl &solutions, // Solve the system. solveImpl(solutions); - if (getASTContext().TypeCheckerOpts.DebugConstraintSolver) { + if (isDebugMode()) { auto &log = getASTContext().TypeCheckerDebug->getStream(); log << "---Solver statistics---\n"; log << "Total number of scopes explored: " << solverState->NumStatesExplored << "\n"; @@ -1409,7 +1414,7 @@ ConstraintSystem::filterDisjunction( continue; } - if (ctx.TypeCheckerOpts.DebugConstraintSolver) { + if (isDebugMode()) { auto &log = ctx.TypeCheckerDebug->getStream(); log.indent(solverState ? solverState->depth * 2 : 0) << "(disabled disjunction term "; @@ -1470,7 +1475,7 @@ ConstraintSystem::filterDisjunction( recordDisjunctionChoice(disjunction->getLocator(), choiceIdx); } - if (ctx.TypeCheckerOpts.DebugConstraintSolver) { + if (isDebugMode()) { auto &log = ctx.TypeCheckerDebug->getStream(); log.indent(solverState ? solverState->depth * 2 : 0) << "(introducing single enabled disjunction term "; diff --git a/lib/Sema/CSStep.cpp b/lib/Sema/CSStep.cpp index 95687f104cc06..8717fd15d469f 100644 --- a/lib/Sema/CSStep.cpp +++ b/lib/Sema/CSStep.cpp @@ -100,7 +100,7 @@ void SplitterStep::computeFollowupSteps( return; } - if (isDebugMode()) { + if (CS.isDebugMode()) { auto &log = getDebugLogger(); // Verify that the constraint graph is valid. CG.verify(); @@ -238,7 +238,7 @@ bool SplitterStep::mergePartialSolutions() const { // Finalize this solution. auto solution = CS.finalize(); solutionMemory += solution.getTotalMemory(); - if (isDebugMode()) + if (CS.isDebugMode()) getDebugLogger() << "(composed solution " << CS.CurrentScore << ")\n"; // Save this solution. @@ -391,7 +391,7 @@ StepResult ComponentStep::take(bool prevFailed) { } auto solution = CS.finalize(); - if (isDebugMode()) + if (CS.isDebugMode()) getDebugLogger() << "(found solution " << getCurrentScore() << ")\n"; Solutions.push_back(std::move(solution)); @@ -408,7 +408,7 @@ StepResult ComponentStep::finalize(bool isSuccess) { // Rewind all modifications done to constraint system. ComponentScope.reset(); - if (isDebugMode()) { + if (CS.isDebugMode()) { auto &log = getDebugLogger(); log << (isSuccess ? "finished" : "failed") << " component #" << Index << ")\n"; @@ -440,7 +440,7 @@ StepResult ComponentStep::finalize(bool isSuccess) { void TypeVariableStep::setup() { ++CS.solverState->NumTypeVariablesBound; - if (isDebugMode()) { + if (CS.isDebugMode()) { PrintOptions PO; PO.PrintTypesForDebugging = true; auto &log = getDebugLogger(); @@ -478,7 +478,7 @@ StepResult TypeVariableStep::resume(bool prevFailed) { // Rewind back all of the changes made to constraint system. ActiveChoice.reset(); - if (isDebugMode()) + if (CS.isDebugMode()) getDebugLogger() << ")\n"; // Let's check if we should stop right before @@ -517,7 +517,7 @@ StepResult DisjunctionStep::resume(bool prevFailed) { // Rewind back the constraint system information. ActiveChoice.reset(); - if (isDebugMode()) + if (CS.isDebugMode()) getDebugLogger() << ")\n"; // Attempt next disjunction choice (if any left). @@ -530,7 +530,7 @@ bool DisjunctionStep::shouldSkip(const DisjunctionChoice &choice) const { bool attemptFixes = CS.shouldAttemptFixes(); // Enable all disabled choices in "diagnostic" mode. if (!attemptFixes && choice.isDisabled()) { - if (isDebugMode()) { + if (CS.isDebugMode()) { auto &log = getDebugLogger(); log << "(skipping "; choice.print(log, &ctx.SourceMgr); diff --git a/lib/Sema/CSStep.h b/lib/Sema/CSStep.h index 8fd3b8bca027c..330981d7d07a2 100644 --- a/lib/Sema/CSStep.h +++ b/lib/Sema/CSStep.h @@ -231,12 +231,6 @@ class SolverStep { CS.filterSolutions(solutions, minimize); } - /// Check whether constraint solver is running in "debug" mode, - /// which should output diagnostic information. - bool isDebugMode() const { - return CS.getASTContext().TypeCheckerOpts.DebugConstraintSolver; - } - llvm::raw_ostream &getDebugLogger(bool indent = true) const { auto &log = CS.getASTContext().TypeCheckerDebug->getStream(); return indent ? log.indent(CS.solverState->depth * 2) : log; @@ -471,7 +465,7 @@ class ComponentStep final : public SolverStep { if (IsSingle) return; - if (isDebugMode()) + if (CS.isDebugMode()) getDebugLogger() << "(solving component #" << Index << '\n'; ComponentScope = std::make_unique(*this); @@ -515,7 +509,7 @@ template class BindingStep : public SolverStep { if (shouldStopAt(*choice)) break; - if (isDebugMode()) { + if (CS.isDebugMode()) { auto &log = getDebugLogger(); log << "(attempting "; choice->print(log, &CS.getASTContext().SourceMgr); @@ -530,7 +524,7 @@ template class BindingStep : public SolverStep { } } - if (isDebugMode()) + if (CS.isDebugMode()) getDebugLogger() << ")\n"; // If this binding didn't match, let's check if we've attempted diff --git a/lib/Sema/ConstraintGraph.cpp b/lib/Sema/ConstraintGraph.cpp index 0288a9a40f323..97f9a88f3474b 100644 --- a/lib/Sema/ConstraintGraph.cpp +++ b/lib/Sema/ConstraintGraph.cpp @@ -872,7 +872,7 @@ namespace { contractedCycle = false; for (const auto &edge : cycleEdges) { if (unionSets(edge.first, edge.second)) { - if (ctx.TypeCheckerOpts.DebugConstraintSolver) { + if (cs.isDebugMode()) { auto &log = ctx.TypeCheckerDebug->getStream(); if (cs.solverState) log.indent(cs.solverState->depth * 2); @@ -1126,6 +1126,10 @@ bool ConstraintGraph::contractEdges() { if (isParamBindingConstraint && tyvar1->getImpl().canBindToInOut()) { bool isNotContractable = true; if (auto bindings = CS.getPotentialBindings(tyvar1)) { + // Holes can't be contracted. + if (bindings.IsHole) + continue; + for (auto &binding : bindings.Bindings) { auto type = binding.BindingType; isNotContractable = type.findIf([&](Type nestedType) -> bool { @@ -1155,7 +1159,7 @@ bool ConstraintGraph::contractEdges() { rep2->getImpl().canBindToLValue()) || // Allow l-value contractions when binding parameter types. isParamBindingConstraint)) { - if (CS.getASTContext().TypeCheckerOpts.DebugConstraintSolver) { + if (CS.isDebugMode()) { auto &log = CS.getASTContext().TypeCheckerDebug->getStream(); if (CS.solverState) log.indent(CS.solverState->depth * 2); diff --git a/lib/Sema/ConstraintSystem.cpp b/lib/Sema/ConstraintSystem.cpp index ad374c6911a5a..adb6d36538246 100644 --- a/lib/Sema/ConstraintSystem.cpp +++ b/lib/Sema/ConstraintSystem.cpp @@ -74,6 +74,12 @@ ConstraintSystem::ConstraintSystem(DeclContext *dc, CG(*new ConstraintGraph(*this)) { assert(DC && "context required"); + // Respect the global debugging flag, but turn off debugging while + // parsing and loading other modules. + if (Context.TypeCheckerOpts.DebugConstraintSolver && + DC->getParentModule()->isMainModule()) { + Options |= ConstraintSystemFlags::DebugConstraints; + } } ConstraintSystem::~ConstraintSystem() { @@ -216,9 +222,19 @@ getDynamicResultSignature(ValueDecl *decl) { } if (auto asd = dyn_cast(decl)) { + auto ty = asd->getInterfaceType(); + + // Strip off a generic signature if we have one. This matches the logic + // for methods, and ensures that we don't take a protocol's generic + // signature into account for a subscript requirement. + if (auto *genericFn = ty->getAs()) { + ty = FunctionType::get(genericFn->getParams(), genericFn->getResult(), + genericFn->getExtInfo()); + } + // Handle properties and subscripts, anchored by the getter's selector. return std::make_tuple(asd->isStatic(), asd->getObjCGetterSelector(), - asd->getInterfaceType()->getCanonicalType()); + ty->getCanonicalType()); } llvm_unreachable("Not a valid @objc member"); @@ -2401,7 +2417,7 @@ void ConstraintSystem::resolveOverload(ConstraintLocator *locator, verifyThatArgumentIsHashable); } - if (getASTContext().TypeCheckerOpts.DebugConstraintSolver) { + if (isDebugMode()) { PrintOptions PO; PO.PrintTypesForDebugging = true; auto &log = getASTContext().TypeCheckerDebug->getStream(); @@ -2564,7 +2580,7 @@ bool OverloadChoice::isImplicitlyUnwrappedValueOrReturnValue() const { SolutionResult ConstraintSystem::salvage() { auto &ctx = getASTContext(); - if (ctx.TypeCheckerOpts.DebugConstraintSolver) { + if (isDebugMode()) { auto &log = ctx.TypeCheckerDebug->getStream(); log << "---Attempting to salvage and emit diagnostics---\n"; } @@ -2627,7 +2643,7 @@ SolutionResult ConstraintSystem::salvage() { } // SWIFT_ENABLE_TENSORFLOW - if (getASTContext().TypeCheckerOpts.DebugConstraintSolver) { + if (isDebugMode()) { auto &log = getASTContext().TypeCheckerDebug->getStream(); log << "---Ambiguity error: " << viable.size() << " solutions found---\n"; @@ -4286,7 +4302,7 @@ SolutionApplicationTarget SolutionApplicationTarget::forInitialization( bool bindPatternVarsOneWay) { // Determine the contextual type for the initialization. TypeLoc contextualType; - if (!isa(pattern) && + if (!(isa(pattern) && !pattern->isImplicit()) && patternType && !patternType->isHole()) { contextualType = TypeLoc::withoutLoc(patternType); diff --git a/lib/Sema/ConstraintSystem.h b/lib/Sema/ConstraintSystem.h index a68185f4825f8..37a2bf9d627c8 100644 --- a/lib/Sema/ConstraintSystem.h +++ b/lib/Sema/ConstraintSystem.h @@ -301,6 +301,10 @@ class TypeVariableType::Implementation { /// Determine whether this type variable represents a closure type. bool isClosureType() const; + /// Determine whether this type variable represents one of the + /// parameter types associated with a closure. + bool isClosureParameterType() const; + /// Determine whether this type variable represents a closure result type. bool isClosureResultType() const; @@ -1089,6 +1093,16 @@ enum class ConstraintSystemFlags { /// If set, constraint system always reuses type of pre-typechecked /// expression, and doesn't dig into its subexpressions. ReusePrecheckedType = 0x08, + + /// If set, verbose output is enabled for this constraint system. + /// + /// Note that this flag is automatically applied to all constraint systems + /// when \c DebugConstraintSolver is set in \c TypeCheckerOptions. It can be + /// automatically enabled for select constraint solving attempts by setting + /// \c DebugConstraintSolverAttempt. Finally, it be automatically enabled + /// for a pre-configured set of expressions on line numbers by setting + /// \c DebugConstraintSolverOnLines. + DebugConstraints = 0x10, }; /// Options that affect the constraint system as a whole. @@ -1434,7 +1448,8 @@ class SolutionApplicationTarget { bool isOptionalSomePatternInit() const { return kind == Kind::expression && expression.contextualPurpose == CTP_Initialization && - isa(expression.pattern); + isa(expression.pattern) && + !expression.pattern->isImplicit(); } /// Whether to bind the types of any variables within the pattern via @@ -1895,11 +1910,6 @@ class ConstraintSystem { FreeTypeVariableBinding AllowFreeTypeVariables; - /// Old value of DebugConstraintSolver. - /// FIXME: Move the "debug constraint solver" bit into the constraint - /// system itself. - bool OldDebugConstraintSolver; - /// Depth of the solution stack. unsigned depth = 0; @@ -2320,6 +2330,12 @@ class ConstraintSystem { /// variables. bool hasFreeTypeVariables(); + /// Check whether constraint solver is running in "debug" mode, + /// which should output diagnostic information. + bool isDebugMode() const { + return Options.contains(ConstraintSystemFlags::DebugConstraints); + } + private: /// Finalize this constraint system; we're done attempting to solve /// it. @@ -2946,7 +2962,7 @@ class ConstraintSystem { /// Whether we should record the failure of a constraint. bool shouldRecordFailedConstraint() const { // If we're debugging, always note a failure so we can print it out. - if (getASTContext().TypeCheckerOpts.DebugConstraintSolver) + if (isDebugMode()) return true; // Otherwise, only record it if we don't already have a failed constraint. @@ -2961,7 +2977,7 @@ class ConstraintSystem { if (!failedConstraint) failedConstraint = constraint; - if (getASTContext().TypeCheckerOpts.DebugConstraintSolver) { + if (isDebugMode()) { auto &log = getASTContext().TypeCheckerDebug->getStream(); log.indent(solverState ? solverState->depth * 2 : 0) << "(failed constraint "; diff --git a/lib/Sema/DerivedConformanceEquatableHashable.cpp b/lib/Sema/DerivedConformanceEquatableHashable.cpp index ab5066d57895d..eca9d63ff55e0 100644 --- a/lib/Sema/DerivedConformanceEquatableHashable.cpp +++ b/lib/Sema/DerivedConformanceEquatableHashable.cpp @@ -1024,11 +1024,11 @@ getHashValueRequirement(ASTContext &C) { } static ProtocolConformance * -getHashableConformance(Decl *parentDecl) { +getHashableConformance(const Decl *parentDecl) { ASTContext &C = parentDecl->getASTContext(); - auto DC = cast(parentDecl); + const auto IDC = cast(parentDecl); auto hashableProto = C.getProtocol(KnownProtocolKind::Hashable); - for (auto conformance: DC->getLocalConformances()) { + for (auto conformance: IDC->getLocalConformances()) { if (conformance->getProtocol() == hashableProto) { return conformance; } diff --git a/lib/Sema/ImportResolution.cpp b/lib/Sema/ImportResolution.cpp index 1f26edbc6edff..fe886389cd1a8 100644 --- a/lib/Sema/ImportResolution.cpp +++ b/lib/Sema/ImportResolution.cpp @@ -674,6 +674,87 @@ void UnboundImport::diagnoseInvalidAttr(DeclAttrKind attrKind, attr->setInvalid(); } +evaluator::SideEffect +CheckInconsistentImplementationOnlyImportsRequest::evaluate( + Evaluator &evaluator, ModuleDecl *mod) const { + bool hasAnyImplementationOnlyImports = + llvm::any_of(mod->getFiles(), [](const FileUnit *F) -> bool { + auto *SF = dyn_cast(F); + return SF && SF->hasImplementationOnlyImports(); + }); + if (!hasAnyImplementationOnlyImports) + return {}; + + auto diagnose = [mod](const ImportDecl *normalImport, + const ImportDecl *implementationOnlyImport) { + auto &diags = mod->getDiags(); + { + InFlightDiagnostic warning = + diags.diagnose(normalImport, diag::warn_implementation_only_conflict, + normalImport->getModule()->getName()); + if (normalImport->getAttrs().isEmpty()) { + // Only try to add a fix-it if there's no other annotations on the + // import to avoid creating things like + // `@_implementationOnly @_exported import Foo`. The developer can + // resolve those manually. + warning.fixItInsert(normalImport->getStartLoc(), + "@_implementationOnly "); + } + } + diags.diagnose(implementationOnlyImport, + diag::implementation_only_conflict_here); + }; + + llvm::DenseMap> normalImports; + llvm::DenseMap implementationOnlyImports; + + for (const FileUnit *file : mod->getFiles()) { + auto *SF = dyn_cast(file); + if (!SF) + continue; + + for (auto *topLevelDecl : SF->getTopLevelDecls()) { + auto *nextImport = dyn_cast(topLevelDecl); + if (!nextImport) + continue; + + ModuleDecl *module = nextImport->getModule(); + if (!module) + continue; + + if (nextImport->getAttrs().hasAttribute()) { + // We saw an implementation-only import. + bool isNew = + implementationOnlyImports.insert({module, nextImport}).second; + if (!isNew) + continue; + + auto seenNormalImportPosition = normalImports.find(module); + if (seenNormalImportPosition != normalImports.end()) { + for (auto *seenNormalImport : seenNormalImportPosition->getSecond()) + diagnose(seenNormalImport, nextImport); + + // We're done with these; keep the map small if possible. + normalImports.erase(seenNormalImportPosition); + } + continue; + } + + // We saw a non-implementation-only import. Is that in conflict with what + // we've seen? + if (auto *seenImplementationOnlyImport = + implementationOnlyImports.lookup(module)) { + diagnose(nextImport, seenImplementationOnlyImport); + continue; + } + + // Otherwise, record it for later. + normalImports[module].push_back(nextImport); + } + } + return {}; +} + //===----------------------------------------------------------------------===// // MARK: Scoped imports //===----------------------------------------------------------------------===// diff --git a/lib/Sema/LookupVisibleDecls.cpp b/lib/Sema/LookupVisibleDecls.cpp index 6adddb84a0a03..1cb3e916a6659 100644 --- a/lib/Sema/LookupVisibleDecls.cpp +++ b/lib/Sema/LookupVisibleDecls.cpp @@ -539,7 +539,7 @@ static void // wrappers. static void synthesizePropertyWrapperStorageWrapperProperties(IterableDeclContext *IDC) { - auto SF = IDC->getDecl()->getDeclContext()->getParentSourceFile(); + auto SF = IDC->getAsGenericContext()->getParentSourceFile(); if (!SF || SF->Kind == SourceFileKind::Interface) return; diff --git a/lib/Sema/MiscDiagnostics.cpp b/lib/Sema/MiscDiagnostics.cpp index 2b3e1fbc908cd..6cc28b42d9cdf 100644 --- a/lib/Sema/MiscDiagnostics.cpp +++ b/lib/Sema/MiscDiagnostics.cpp @@ -137,6 +137,9 @@ static void diagSyntacticUseRestrictions(const Expr *E, const DeclContext *DC, if (auto *SE = dyn_cast(E)) CallArgs.insert(SE->getIndex()); + if (auto *DSE = dyn_cast(E)) + CallArgs.insert(DSE->getIndex()); + if (auto *KPE = dyn_cast(E)) { for (auto Comp : KPE->getComponents()) { if (auto *Arg = Comp.getIndexExpr()) @@ -3749,10 +3752,9 @@ checkImplicitPromotionsInCondition(const StmtConditionElement &cond, // checking for a type, which forced it to be promoted to a double optional // type. if (auto ooType = subExpr->getType()->getOptionalObjectType()) { - if (auto TP = dyn_cast(p)) + if (auto OSP = dyn_cast(p)) { // Check for 'if let' to produce a tuned diagnostic. - if (isa(TP->getSubPattern()) && - TP->getSubPattern()->isImplicit()) { + if (auto *TP = dyn_cast(OSP->getSubPattern())) { ctx.Diags.diagnose(cond.getIntroducerLoc(), diag::optional_check_promotion, subExpr->getType()) @@ -3761,6 +3763,7 @@ checkImplicitPromotionsInCondition(const StmtConditionElement &cond, ooType->getString()); return; } + } ctx.Diags.diagnose(cond.getIntroducerLoc(), diag::optional_pattern_match_promotion, subExpr->getType(), cond.getInitializer()->getType()) diff --git a/lib/Sema/SourceLoader.cpp b/lib/Sema/SourceLoader.cpp index 133553fdadfd8..b535a6986391d 100644 --- a/lib/Sema/SourceLoader.cpp +++ b/lib/Sema/SourceLoader.cpp @@ -104,10 +104,6 @@ ModuleDecl *SourceLoader::loadModule(SourceLoc importLoc, dependencyTracker->addDependency(inputFile->getBufferIdentifier(), /*isSystem=*/false); - // Turn off debugging while parsing other modules. - llvm::SaveAndRestore - turnOffDebug(Ctx.TypeCheckerOpts.DebugConstraintSolver, false); - unsigned bufferID; if (auto BufID = Ctx.SourceMgr.getIDForBufferIdentifier(inputFile->getBufferIdentifier())) diff --git a/lib/Sema/TypeCheckAccess.cpp b/lib/Sema/TypeCheckAccess.cpp index 3d7e372d05d61..c67093cd5c843 100644 --- a/lib/Sema/TypeCheckAccess.cpp +++ b/lib/Sema/TypeCheckAccess.cpp @@ -42,15 +42,6 @@ enum class DowngradeToWarning: bool { Yes }; -/// A uniquely-typed boolean to reduce the chances of accidentally inverting -/// a check. -/// -/// \see checkTypeAccessImpl -enum class FromSPI: bool { - No, - Yes -}; - /// Calls \p callback for each type in each requirement provided by /// \p source. static void forAllRequirementTypes( @@ -87,7 +78,7 @@ class AccessControlCheckerBase { void checkTypeAccessImpl( Type type, TypeRepr *typeRepr, AccessScope contextAccessScope, - const DeclContext *useDC, bool mayBeInferred, FromSPI fromSPI, + const DeclContext *useDC, bool mayBeInferred, llvm::function_ref diagnose); void checkTypeAccess( @@ -109,7 +100,7 @@ class AccessControlCheckerBase { llvm::function_ref diagnose) { forAllRequirementTypes(std::move(source), [&](Type type, TypeRepr *typeRepr) { checkTypeAccessImpl(type, typeRepr, accessScope, useDC, - /*mayBeInferred*/false, FromSPI::No, diagnose); + /*mayBeInferred*/false, diagnose); }); } @@ -196,12 +187,9 @@ class TypeAccessScopeDiagnoser : private ASTWalker { /// using `Array` to mean `Array` in an extension of Array.) If /// \p typeRepr is known to be absent, it's okay to pass \c false for /// \p mayBeInferred. -/// -/// If searching from an SPI context, pass \c FromSPI::YES for \p fromSPI. -/// In this mode, all types must be public and diagnostic messages are adapted. void AccessControlCheckerBase::checkTypeAccessImpl( Type type, TypeRepr *typeRepr, AccessScope contextAccessScope, - const DeclContext *useDC, bool mayBeInferred, FromSPI fromSPI, + const DeclContext *useDC, bool mayBeInferred, llvm::function_ref diagnose) { auto &Context = useDC->getASTContext(); @@ -310,9 +298,8 @@ void AccessControlCheckerBase::checkTypeAccess( context->getFormalAccessScope( context->getDeclContext(), checkUsableFromInline); - auto fromSPI = static_cast(context->isSPI()); checkTypeAccessImpl(type, typeRepr, contextAccessScope, DC, mayBeInferred, - fromSPI, diagnose); + diagnose); } /// Highlights the given TypeRepr, and adds a note pointing to the type's @@ -372,10 +359,6 @@ void AccessControlCheckerBase::checkGenericParamAccess( }; auto *DC = ownerDecl->getDeclContext(); - auto fromSPI = FromSPI::No; - if (auto ownerValueDecl = dyn_cast(ownerDecl)) { - fromSPI = static_cast(ownerValueDecl->isSPI()); - } for (auto param : *params) { if (param->getInherited().empty()) @@ -384,7 +367,7 @@ void AccessControlCheckerBase::checkGenericParamAccess( checkTypeAccessImpl(param->getInherited().front().getType(), param->getInherited().front().getTypeRepr(), accessScope, DC, /*mayBeInferred*/false, - fromSPI, callback); + callback); } callbackACEK = ACEK::Requirement; @@ -1483,11 +1466,14 @@ class UsableFromInlineChecker : public AccessControlCheckerBase, } }; +// Diagnose public APIs exposing types that are either imported as +// implementation-only or declared as SPI. class ExportabilityChecker : public DeclVisitor { class Diagnoser; void checkTypeImpl( Type type, const TypeRepr *typeRepr, const SourceFile &SF, + const Decl *context, const Diagnoser &diagnoser) { // Don't bother checking errors. if (type && type->hasError()) @@ -1496,18 +1482,19 @@ class ExportabilityChecker : public DeclVisitor { bool foundAnyIssues = false; // Check the TypeRepr first (if present), because that will give us a - // better diagonstic. + // better diagnostic. if (typeRepr) { const_cast(typeRepr)->walk(TypeReprIdentFinder( [&](const ComponentIdentTypeRepr *component) { - ModuleDecl *M = component->getBoundDecl()->getModuleContext(); - if (!SF.isImportedImplementationOnly(M) && - !SF.isImportedAsSPI(component->getBoundDecl())) - return true; - - diagnoser.diagnoseType(component->getBoundDecl(), component, - SF.isImportedImplementationOnly(M)); - foundAnyIssues = true; + TypeDecl *typeDecl = component->getBoundDecl(); + ModuleDecl *M = typeDecl->getModuleContext(); + bool isImplementationOnly = SF.isImportedImplementationOnly(M); + if (isImplementationOnly || + (SF.isImportedAsSPI(typeDecl) && !context->isSPI())) { + diagnoser.diagnoseType(typeDecl, component, isImplementationOnly); + foundAnyIssues = true; + } + // We still continue even in the diagnostic case to report multiple // violations. return true; @@ -1525,19 +1512,19 @@ class ExportabilityChecker : public DeclVisitor { class ProblematicTypeFinder : public TypeDeclFinder { const SourceFile &SF; + const Decl *context; const Diagnoser &diagnoser; public: - ProblematicTypeFinder(const SourceFile &SF, const Diagnoser &diagnoser) - : SF(SF), diagnoser(diagnoser) {} + ProblematicTypeFinder(const SourceFile &SF, const Decl *context, const Diagnoser &diagnoser) + : SF(SF), context(context), diagnoser(diagnoser) {} void visitTypeDecl(const TypeDecl *typeDecl) { ModuleDecl *M = typeDecl->getModuleContext(); - if (!SF.isImportedImplementationOnly(M) && - !SF.isImportedAsSPI(typeDecl)) - return; - - diagnoser.diagnoseType(typeDecl, /*typeRepr*/nullptr, - SF.isImportedImplementationOnly(M)); + bool isImplementationOnly = SF.isImportedImplementationOnly(M); + if (isImplementationOnly || + (SF.isImportedAsSPI(typeDecl) && !context->isSPI())) + diagnoser.diagnoseType(typeDecl, /*typeRepr*/nullptr, + isImplementationOnly); } void visitSubstitutionMap(SubstitutionMap subs) { @@ -1597,7 +1584,7 @@ class ExportabilityChecker : public DeclVisitor { } }; - type.walk(ProblematicTypeFinder(SF, diagnoser)); + type.walk(ProblematicTypeFinder(SF, context, diagnoser)); } void checkType( @@ -1605,7 +1592,7 @@ class ExportabilityChecker : public DeclVisitor { const Diagnoser &diagnoser) { auto *SF = context->getDeclContext()->getParentSourceFile(); assert(SF && "checking a non-source declaration?"); - return checkTypeImpl(type, typeRepr, *SF, diagnoser); + return checkTypeImpl(type, typeRepr, *SF, context, diagnoser); } void checkType( @@ -1702,7 +1689,7 @@ class ExportabilityChecker : public DeclVisitor { AccessScope accessScope = VD->getFormalAccessScope(nullptr, /*treatUsableFromInlineAsPublic*/true); - if (accessScope.isPublic() && !accessScope.isSPI()) + if (accessScope.isPublic()) return false; // Is this a stored property in a non-resilient struct or class? diff --git a/lib/Sema/TypeCheckAttr.cpp b/lib/Sema/TypeCheckAttr.cpp index b7f4239842819..e5e64d28ed56c 100644 --- a/lib/Sema/TypeCheckAttr.cpp +++ b/lib/Sema/TypeCheckAttr.cpp @@ -866,6 +866,7 @@ void AttributeChecker::visitSetterAccessAttr( void AttributeChecker::visitSPIAccessControlAttr(SPIAccessControlAttr *attr) { if (auto VD = dyn_cast(D)) { + // VD must be public or open to use an @_spi attribute. auto declAccess = VD->getFormalAccess(); if (declAccess < AccessLevel::Public) { diagnoseAndRemoveAttr(attr, @@ -873,6 +874,52 @@ void AttributeChecker::visitSPIAccessControlAttr(SPIAccessControlAttr *attr) { declAccess, D->getDescriptiveKind()); } + + // If VD is a public protocol requirement it can be SPI only if there's + // a default implementation. + if (auto protocol = dyn_cast(D->getDeclContext())) { + auto implementations = TypeChecker::lookupMember( + D->getDeclContext(), + protocol->getDeclaredType(), + VD->createNameRef(), + NameLookupFlags::ProtocolMembers); + bool hasDefaultImplementation = llvm::any_of(implementations, + [&](const LookupResultEntry &entry) { + auto entryDecl = entry.getValueDecl(); + auto DC = entryDecl->getDeclContext(); + auto extension = dyn_cast(DC); + + // The implementation must be defined in the same module in + // an unconstrained extension. + if (!extension || + extension->getParentModule() != protocol->getParentModule() || + extension->isConstrainedExtension()) + return false; + + // For computed properties and subscripts, check that the default + // implementation defines `set` if the protocol declares it. + if (auto protoStorage = dyn_cast(VD)) + if (auto entryStorage = dyn_cast(entryDecl)) + if (protoStorage->getAccessor(AccessorKind::Set) && + !entryStorage->getAccessor(AccessorKind::Set)) + return false; + + return true; + }); + + if (!hasDefaultImplementation) + diagnoseAndRemoveAttr(attr, + diag::spi_attribute_on_protocol_requirement, + VD->getName()); + } + + // Forbid stored properties marked SPI in frozen types. + if (auto property = dyn_cast(VD)) + if (auto DC = dyn_cast(D->getDeclContext())) + if (property->hasStorage() && !DC->isFormallyResilient()) + diagnoseAndRemoveAttr(attr, + diag::spi_attribute_on_frozen_stored_properties, + VD->getName()); } } @@ -2706,7 +2753,7 @@ TypeEraserHasViableInitRequest::evaluate(Evaluator &evaluator, ParamDecl *param = *init->getParameters()->begin(); if (param->getArgumentName() != ctx.Id_erasing || !param->getInterfaceType()->isEqual(genericParamType) || - !genericSignature->conformsToProtocol(genericParamType, protocol)) + !genericSignature->requiresProtocol(genericParamType, protocol)) return false; // Allow other constraints as long as the init can be called with any @@ -3566,51 +3613,6 @@ static IndexSubset *computeDifferentiabilityParameters( return IndexSubset::get(ctx, parameterBits); } -// Checks if the given differentiability parameter indices are valid for the -// given original or derivative `AbstractFunctionDecl` and original function -// type in the given derivative generic environment and module context. Returns -// true on error. -// -// The parsed differentiability parameters and attribute location are used in -// diagnostics. -static bool checkDifferentiabilityParameters( - AbstractFunctionDecl *AFD, IndexSubset *diffParamIndices, - AnyFunctionType *functionType, GenericEnvironment *derivativeGenEnv, - ModuleDecl *module, ArrayRef parsedDiffParams, - SourceLoc attrLoc) { - auto &ctx = AFD->getASTContext(); - auto &diags = ctx.Diags; - - // Diagnose empty differentiability indices. No differentiability parameters - // were resolved or inferred. - if (diffParamIndices->isEmpty()) { - diags.diagnose(attrLoc, diag::diff_params_clause_no_inferred_parameters); - return true; - } - - // Check that differentiability parameters have allowed types. - SmallVector diffParams; - functionType->getSubsetParameters(diffParamIndices, diffParams); - for (unsigned i : range(diffParams.size())) { - SourceLoc loc = - parsedDiffParams.empty() ? attrLoc : parsedDiffParams[i].getLoc(); - auto diffParamType = diffParams[i].getPlainType(); - if (!diffParamType->hasTypeParameter()) - diffParamType = diffParamType->mapTypeOutOfContext(); - if (derivativeGenEnv) - diffParamType = derivativeGenEnv->mapTypeIntoContext(diffParamType); - else - diffParamType = AFD->mapTypeIntoContext(diffParamType); - // Parameter must conform to `Differentiable`. - if (!conformsToDifferentiable(diffParamType, AFD)) { - diags.diagnose(loc, diag::diff_params_clause_param_not_differentiable, - diffParamType); - return true; - } - } - return false; -} - // Returns the function declaration corresponding to the given function name and // lookup context. If the base type of the function is specified, member lookup // is performed. Otherwise, unqualified lookup is performed. @@ -4105,9 +4107,11 @@ bool resolveDifferentiableAttrDerivativeGenericSignature( /// `diffParamIndices`, and returns true. bool resolveDifferentiableAttrDifferentiabilityParameters( DifferentiableAttr *attr, AbstractFunctionDecl *original, - AnyFunctionType *derivativeFnTy, GenericEnvironment *derivativeGenEnv, + AnyFunctionType *originalFnRemappedTy, GenericEnvironment *derivativeGenEnv, IndexSubset *&diffParamIndices) { diffParamIndices = nullptr; + auto &ctx = original->getASTContext(); + auto &diags = ctx.Diags; // Get the parsed differentiability parameter indices, which have not yet been // resolved. Parsed differentiability parameter indices are defined only for @@ -4123,11 +4127,57 @@ bool resolveDifferentiableAttrDifferentiabilityParameters( } // Check if differentiability parameter indices are valid. - if (checkDifferentiabilityParameters(original, diffParamIndices, - derivativeFnTy, derivativeGenEnv, - original->getModuleContext(), - parsedDiffParams, attr->getLocation())) { + // Do this by compute the expected differential type and checking whether + // there is an error. + auto expectedLinearMapTypeOrError = + originalFnRemappedTy->getAutoDiffDerivativeFunctionLinearMapType( + diffParamIndices, AutoDiffLinearMapKind::Differential, + LookUpConformanceInModule(original->getModuleContext()), + /*makeSelfParamFirst*/ true); + + // Helper for diagnosing derivative function type errors. + auto errorHandler = [&](const DerivativeFunctionTypeError &error) { attr->setInvalid(); + switch (error.kind) { + case DerivativeFunctionTypeError::Kind::NoSemanticResults: + diags + .diagnose(attr->getLocation(), + diag::autodiff_attr_original_void_result, + original->getName()) + .highlight(original->getSourceRange()); + return; + case DerivativeFunctionTypeError::Kind::MultipleSemanticResults: + diags + .diagnose(attr->getLocation(), + diag::autodiff_attr_original_multiple_semantic_results) + .highlight(original->getSourceRange()); + return; + case DerivativeFunctionTypeError::Kind::NoDifferentiabilityParameters: + diags.diagnose(attr->getLocation(), + diag::diff_params_clause_no_inferred_parameters); + return; + case DerivativeFunctionTypeError::Kind:: + NonDifferentiableDifferentiabilityParameter: { + auto nonDiffParam = error.getNonDifferentiableTypeAndIndex(); + SourceLoc loc = parsedDiffParams.empty() + ? attr->getLocation() + : parsedDiffParams[nonDiffParam.second].getLoc(); + diags.diagnose(loc, diag::diff_params_clause_param_not_differentiable, + nonDiffParam.first); + return; + } + case DerivativeFunctionTypeError::Kind::NonDifferentiableResult: + auto nonDiffResult = error.getNonDifferentiableTypeAndIndex(); + diags.diagnose(attr->getLocation(), + diag::autodiff_attr_result_not_differentiable, + nonDiffResult.first); + return; + } + }; + // Diagnose any derivative function type errors. + if (!expectedLinearMapTypeOrError) { + auto error = expectedLinearMapTypeOrError.takeError(); + handleAllErrors(std::move(error), errorHandler); return true; } @@ -4224,52 +4274,19 @@ IndexSubset *DifferentiableAttributeTypeCheckRequest::evaluate( derivativeGenEnv = derivativeGenSig->getGenericEnvironment(); // Compute the derivative function type. - auto derivativeFnTy = originalFnTy; + auto originalFnRemappedTy = originalFnTy; if (derivativeGenEnv) - derivativeFnTy = derivativeGenEnv->mapTypeIntoContext(derivativeFnTy) - ->castTo(); + originalFnRemappedTy = + derivativeGenEnv->mapTypeIntoContext(originalFnRemappedTy) + ->castTo(); // Resolve and validate the differentiability parameters. IndexSubset *resolvedDiffParamIndices = nullptr; if (resolveDifferentiableAttrDifferentiabilityParameters( - attr, original, derivativeFnTy, derivativeGenEnv, + attr, original, originalFnRemappedTy, derivativeGenEnv, resolvedDiffParamIndices)) return nullptr; - // Get the original semantic result type. - llvm::SmallVector originalResults; - autodiff::getFunctionSemanticResultTypes(originalFnTy, originalResults, - derivativeGenEnv); - // Check that original function has at least one semantic result, i.e. - // that the original semantic result type is not `Void`. - if (originalResults.empty()) { - diags - .diagnose(attr->getLocation(), diag::autodiff_attr_original_void_result, - original->getName()) - .highlight(original->getSourceRange()); - attr->setInvalid(); - return nullptr; - } - // Check that original function does not have multiple semantic results. - if (originalResults.size() > 1) { - diags - .diagnose(attr->getLocation(), - diag::autodiff_attr_original_multiple_semantic_results) - .highlight(original->getSourceRange()); - attr->setInvalid(); - return nullptr; - } - auto originalResult = originalResults.front(); - auto originalResultTy = originalResult.type; - // Check that the original semantic result conforms to `Differentiable`. - if (!conformsToDifferentiable(originalResultTy, original)) { - diags.diagnose(attr->getLocation(), - diag::differentiable_attr_result_not_differentiable, - originalResultTy); - attr->setInvalid(); - return nullptr; - } - if (auto *asd = dyn_cast(D)) { // Remove `@differentiable` attribute from storage declaration to prevent // duplicate attribute registration during SILGen. @@ -4338,8 +4355,6 @@ static bool typeCheckDerivativeAttr(ASTContext &Ctx, Decl *D, if (checkIfDifferentiableProgrammingEnabled(Ctx, attr, D->getDeclContext())) return true; auto *derivative = cast(D); - auto lookupConformance = - LookUpConformanceInModule(D->getDeclContext()->getParentModule()); auto originalName = attr->getOriginalFunctionName(); auto *derivativeInterfaceType = @@ -4572,61 +4587,54 @@ static bool typeCheckDerivativeAttr(ASTContext &Ctx, Decl *D, if (!resolvedDiffParamIndices) return true; - // Check if the differentiability parameter indices are valid. - if (checkDifferentiabilityParameters( - originalAFD, resolvedDiffParamIndices, originalFnType, - derivative->getGenericEnvironment(), derivative->getModuleContext(), - parsedDiffParams, attr->getLocation())) - return true; - // Set the resolved differentiability parameter indices in the attribute. + // Differentiability parameter indices verification is done by + // `AnyFunctionType::getAutoDiffDerivativeFunctionLinearMapType` below. attr->setParameterIndices(resolvedDiffParamIndices); // Compute the expected differential/pullback type. auto expectedLinearMapTypeOrError = originalFnType->getAutoDiffDerivativeFunctionLinearMapType( - resolvedDiffParamIndices, kind.getLinearMapKind(), lookupConformance, + resolvedDiffParamIndices, kind.getLinearMapKind(), + LookUpConformanceInModule(derivative->getModuleContext()), /*makeSelfParamFirst*/ true); // Helper for diagnosing derivative function type errors. auto errorHandler = [&](const DerivativeFunctionTypeError &error) { + attr->setInvalid(); switch (error.kind) { case DerivativeFunctionTypeError::Kind::NoSemanticResults: diags .diagnose(attr->getLocation(), - diag::autodiff_attr_original_multiple_semantic_results) + diag::autodiff_attr_original_void_result, + originalAFD->getName()) .highlight(attr->getOriginalFunctionName().Loc.getSourceRange()); - attr->setInvalid(); return; case DerivativeFunctionTypeError::Kind::MultipleSemanticResults: diags .diagnose(attr->getLocation(), diag::autodiff_attr_original_multiple_semantic_results) .highlight(attr->getOriginalFunctionName().Loc.getSourceRange()); - attr->setInvalid(); return; - case DerivativeFunctionTypeError::Kind::NonDifferentiableParameters: { - auto *nonDiffParamIndices = error.getNonDifferentiableParameterIndices(); - SmallVector diffParams; - error.functionType->getSubsetParameters(resolvedDiffParamIndices, - diffParams); - for (unsigned i : range(diffParams.size())) { - if (!nonDiffParamIndices->contains(i)) - continue; - SourceLoc loc = parsedDiffParams.empty() ? attr->getLocation() - : parsedDiffParams[i].getLoc(); - auto diffParamType = diffParams[i].getPlainType(); - diags.diagnose(loc, diag::diff_params_clause_param_not_differentiable, - diffParamType); - } + case DerivativeFunctionTypeError::Kind::NoDifferentiabilityParameters: + diags.diagnose(attr->getLocation(), + diag::diff_params_clause_no_inferred_parameters); + return; + case DerivativeFunctionTypeError::Kind:: + NonDifferentiableDifferentiabilityParameter: { + auto nonDiffParam = error.getNonDifferentiableTypeAndIndex(); + SourceLoc loc = parsedDiffParams.empty() + ? attr->getLocation() + : parsedDiffParams[nonDiffParam.second].getLoc(); + diags.diagnose(loc, diag::diff_params_clause_param_not_differentiable, + nonDiffParam.first); return; } case DerivativeFunctionTypeError::Kind::NonDifferentiableResult: - auto originalResultType = error.getNonDifferentiableResultType(); + auto nonDiffResult = error.getNonDifferentiableTypeAndIndex(); diags.diagnose(attr->getLocation(), - diag::differentiable_attr_result_not_differentiable, - originalResultType); - attr->setInvalid(); + diag::autodiff_attr_result_not_differentiable, + nonDiffResult.first); return; } }; diff --git a/lib/Sema/TypeCheckConstraints.cpp b/lib/Sema/TypeCheckConstraints.cpp index 05a11aea80e7a..ebb57af2e5ae6 100644 --- a/lib/Sema/TypeCheckConstraints.cpp +++ b/lib/Sema/TypeCheckConstraints.cpp @@ -90,6 +90,14 @@ bool TypeVariableType::Implementation::isClosureType() const { return isExpr(locator->getAnchor()) && locator->getPath().empty(); } +bool TypeVariableType::Implementation::isClosureParameterType() const { + if (!(locator && locator->getAnchor())) + return false; + + return isExpr(locator->getAnchor()) && + locator->isLastElement(); +} + bool TypeVariableType::Implementation::isClosureResultType() const { if (!(locator && locator->getAnchor())) return false; @@ -2214,7 +2222,7 @@ getTypeOfCompletionOperatorImpl(DeclContext *DC, Expr *expr, if (!expr) return nullptr; - if (Context.TypeCheckerOpts.DebugConstraintSolver) { + if (CS.isDebugMode()) { auto &log = Context.TypeCheckerDebug->getStream(); log << "---Initial constraints for the given expression---\n"; expr->dump(log); @@ -2228,7 +2236,7 @@ getTypeOfCompletionOperatorImpl(DeclContext *DC, Expr *expr, return nullptr; auto &solution = viable[0]; - if (Context.TypeCheckerOpts.DebugConstraintSolver) { + if (CS.isDebugMode()) { auto &log = Context.TypeCheckerDebug->getStream(); log << "---Solution---\n"; solution.dump(log); diff --git a/lib/Sema/TypeCheckDeclObjC.cpp b/lib/Sema/TypeCheckDeclObjC.cpp index efffa42cb726e..21002610485e9 100644 --- a/lib/Sema/TypeCheckDeclObjC.cpp +++ b/lib/Sema/TypeCheckDeclObjC.cpp @@ -304,16 +304,17 @@ static bool isParamListRepresentableInObjC(const AbstractFunctionDecl *AFD, /// Check whether the given declaration contains its own generic parameters, /// and therefore is not representable in Objective-C. -static bool checkObjCWithGenericParams(const AbstractFunctionDecl *AFD, - ObjCReason Reason) { - bool Diagnose = shouldDiagnoseObjCReason(Reason, AFD->getASTContext()); +static bool checkObjCWithGenericParams(const ValueDecl *VD, ObjCReason Reason) { + bool Diagnose = shouldDiagnoseObjCReason(Reason, VD->getASTContext()); - if (AFD->getGenericParams()) { + auto *GC = VD->getAsGenericContext(); + assert(GC); + if (GC->getGenericParams()) { // Diagnose this problem, if asked to. if (Diagnose) { - AFD->diagnose(diag::objc_invalid_with_generic_params, - getObjCDiagnosticAttrKind(Reason)); - describeObjCReason(AFD, Reason); + VD->diagnose(diag::objc_invalid_with_generic_params, + VD->getDescriptiveKind(), getObjCDiagnosticAttrKind(Reason)); + describeObjCReason(VD, Reason); } return true; @@ -855,6 +856,8 @@ bool swift::isRepresentableInObjC(const SubscriptDecl *SD, ObjCReason Reason) { if (checkObjCInForeignClassContext(SD, Reason)) return false; + if (checkObjCWithGenericParams(SD, Reason)) + return false; // ObjC doesn't support class subscripts. if (!SD->isInstanceMember()) { diff --git a/lib/Sema/TypeCheckExpr.cpp b/lib/Sema/TypeCheckExpr.cpp index c68113bb49531..ae84ed824b78a 100644 --- a/lib/Sema/TypeCheckExpr.cpp +++ b/lib/Sema/TypeCheckExpr.cpp @@ -789,7 +789,7 @@ Expr *CallerSideDefaultArgExprRequest::evaluate( // Re-create the default argument using the location info of the call site. auto *initExpr = - synthesizeCallerSideDefault(param, defaultExpr->getArgumentListLoc()); + synthesizeCallerSideDefault(param, defaultExpr->getLoc()); auto *dc = defaultExpr->ContextOrCallerSideExpr.get(); assert(dc && "Expected a DeclContext before type-checking caller-side arg"); diff --git a/lib/Sema/TypeCheckPattern.cpp b/lib/Sema/TypeCheckPattern.cpp index 3cd1cf5a743cb..d74a4841258bf 100644 --- a/lib/Sema/TypeCheckPattern.cpp +++ b/lib/Sema/TypeCheckPattern.cpp @@ -655,12 +655,8 @@ Pattern *TypeChecker::resolvePattern(Pattern *P, DeclContext *DC, // "if let" implicitly looks inside of an optional, so wrap it in an // OptionalSome pattern. - InnerP = new (Context) OptionalSomePattern(InnerP, InnerP->getEndLoc()); - InnerP->setImplicit(); - if (auto *TP = dyn_cast(P)) - TP->setSubPattern(InnerP); - else - P = InnerP; + P = new (Context) OptionalSomePattern(P, P->getEndLoc()); + P->setImplicit(); } return P; @@ -811,9 +807,24 @@ Type PatternTypeRequest::evaluate(Evaluator &evaluator, // // Refutable patterns occur when checking the PatternBindingDecls in if/let, // while/let, and let/else conditions. + case PatternKind::OptionalSome: { + // Annotated if-let patterns are rewritten by TypeChecker::resolvePattern + // to have an enclosing implicit (...)? pattern. If we can resolve the inner + // typed pattern, the resulting pattern must have optional type. + auto somePat = cast(P); + if (somePat->isImplicit() && isa(somePat->getSubPattern())) { + auto resolution = TypeResolution::forContextual(dc, options); + TypedPattern *TP = cast(somePat->getSubPattern()); + auto type = validateTypedPattern(resolution, TP, options); + if (type && !type->hasError()) { + return OptionalType::get(type); + } + } + LLVM_FALLTHROUGH; + } + case PatternKind::Is: case PatternKind::EnumElement: - case PatternKind::OptionalSome: case PatternKind::Bool: case PatternKind::Expr: // In a let/else, these always require an initial value to match against. diff --git a/lib/Sema/TypeCheckProtocol.cpp b/lib/Sema/TypeCheckProtocol.cpp index 1387c641ae003..6eb4752922ca5 100644 --- a/lib/Sema/TypeCheckProtocol.cpp +++ b/lib/Sema/TypeCheckProtocol.cpp @@ -3683,23 +3683,30 @@ ResolveWitnessResult ConformanceChecker::resolveWitnessViaDefault( # pragma mark Type witness resolution -CheckTypeWitnessResult swift::checkTypeWitness(DeclContext *dc, - ProtocolDecl *proto, +CheckTypeWitnessResult swift::checkTypeWitness(Type type, AssociatedTypeDecl *assocType, - Type type) { - auto genericSig = proto->getGenericSignature(); - auto *depTy = DependentMemberType::get(proto->getSelfInterfaceType(), - assocType); - + NormalProtocolConformance *Conf) { if (type->hasError()) - return ErrorType::get(proto->getASTContext()); + return ErrorType::get(assocType->getASTContext()); + + const auto proto = Conf->getProtocol(); + const auto dc = Conf->getDeclContext(); + const auto genericSig = proto->getGenericSignature(); + const auto depTy = DependentMemberType::get(proto->getSelfInterfaceType(), + assocType); Type contextType = type->hasTypeParameter() ? dc->mapTypeIntoContext(type) : type; - // FIXME: This is incorrect; depTy is written in terms of the protocol's - // associated types, and we need to substitute in known type witnesses. if (auto superclass = genericSig->getSuperclassBound(depTy)) { + // If the superclass has a type parameter, substitute in known type + // witnesses. + if (superclass->hasTypeParameter()) { + const auto subMap = SubstitutionMap::getProtocolSubstitutions( + proto, Conf->getType(), ProtocolConformanceRef(Conf)); + + superclass = superclass.subst(subMap); + } if (!superclass->isExactSuperclassOf(contextType)) return superclass; } @@ -3707,7 +3714,7 @@ CheckTypeWitnessResult swift::checkTypeWitness(DeclContext *dc, auto *module = dc->getParentModule(); // Check protocol conformances. - for (auto reqProto : genericSig->getConformsTo(depTy)) { + for (const auto reqProto : genericSig->getRequiredProtocols(depTy)) { if (module->lookupConformance(contextType, reqProto) .isInvalid()) return CheckTypeWitnessResult(reqProto->getDeclaredType()); @@ -3796,7 +3803,7 @@ ResolveWitnessResult ConformanceChecker::resolveTypeWitnessViaLookup( // Check this type against the protocol requirements. if (auto checkResult = - checkTypeWitness(DC, Proto, assocType, candidate.MemberType)) { + checkTypeWitness(candidate.MemberType, assocType, Conformance)) { nonViable.push_back({candidate.Member, checkResult}); } else { viable.push_back(candidate); @@ -5023,8 +5030,8 @@ diagnoseMissingAppendInterpolationMethod(NominalTypeDecl *typeDecl) { SmallVector LookupAllConformancesInContextRequest::evaluate( - Evaluator &eval, const DeclContext *DC) const { - return DC->getLocalConformances(ConformanceLookupKind::All); + Evaluator &eval, const IterableDeclContext *IDC) const { + return IDC->getLocalConformances(ConformanceLookupKind::All); } void TypeChecker::checkConformancesInContext(DeclContext *dc, @@ -5050,7 +5057,7 @@ void TypeChecker::checkConformancesInContext(DeclContext *dc, // Check each of the conformances associated with this context. auto conformances = evaluateOrDefault(dc->getASTContext().evaluator, - LookupAllConformancesInContextRequest{dc}, {}); + LookupAllConformancesInContextRequest{idc}, {}); // The conformance checker bundle that checks all conformances in the context. auto &Context = dc->getASTContext(); @@ -5106,7 +5113,7 @@ void TypeChecker::checkConformancesInContext(DeclContext *dc, groupChecker.getUnsatisfiedRequirements().end()); // Diagnose any conflicts attributed to this declaration context. - for (const auto &diag : dc->takeConformanceDiagnostics()) { + for (const auto &diag : idc->takeConformanceDiagnostics()) { // Figure out the declaration of the existing conformance. Decl *existingDecl = dyn_cast(diag.ExistingDC); if (!existingDecl) diff --git a/lib/Sema/TypeCheckProtocol.h b/lib/Sema/TypeCheckProtocol.h index 40a01a5aaa2b1..5149fbf6c5a8f 100644 --- a/lib/Sema/TypeCheckProtocol.h +++ b/lib/Sema/TypeCheckProtocol.h @@ -91,12 +91,12 @@ class CheckTypeWitnessResult { }; /// Check whether the given type witness can be used for the given -/// associated type. +/// associated type in the given conformance. /// /// \returns an empty result on success, or a description of the error. -CheckTypeWitnessResult checkTypeWitness(DeclContext *dc, ProtocolDecl *proto, +CheckTypeWitnessResult checkTypeWitness(Type type, AssociatedTypeDecl *assocType, - Type type); + NormalProtocolConformance *Conf); /// The set of associated types that have been inferred by matching /// the given value witness to its corresponding requirement. diff --git a/lib/Sema/TypeCheckProtocolInference.cpp b/lib/Sema/TypeCheckProtocolInference.cpp index 99ded4102488b..9e4a214187f22 100644 --- a/lib/Sema/TypeCheckProtocolInference.cpp +++ b/lib/Sema/TypeCheckProtocolInference.cpp @@ -171,25 +171,17 @@ AssociatedTypeInference::inferTypeWitnessesViaValueWitnesses( InferredAssociatedTypesByWitnesses result; - auto isExtensionUsableForInference = [&](ExtensionDecl *extension) -> bool { - - // The extension where the conformance being checked is declared. - auto conformanceExtension = checker.Conformance-> - getDeclContext()->getAsDecl(); - if (extension == conformanceExtension) + auto isExtensionUsableForInference = [&](const ExtensionDecl *extension) { + // The context the conformance being checked is declared on. + const auto conformanceCtx = checker.Conformance->getDeclContext(); + if (extension == conformanceCtx) return true; - auto *extendedNominal = extension->getExtendedNominal(); - // Invalid case. + const auto extendedNominal = extension->getExtendedNominal(); if (extendedNominal == nullptr) return true; - - // Assume unconstrained concrete extensions we found witnesses in are - // always viable. - if (!isa(extendedNominal)) - return !extension->isConstrainedExtension(); - + // FIXME: The extension may not have a generic signature set up yet as // resolving signatures may trigger associated type inference. This cycle // is now detectable and we should look into untangling it @@ -197,9 +189,21 @@ AssociatedTypeInference::inferTypeWitnessesViaValueWitnesses( if (!extension->hasComputedGenericSignature()) return true; - // Build a generic signature. - auto extensionSig = extension->getGenericSignature(); - + // Retrieve the generic signature of the extension. + const auto extensionSig = extension->getGenericSignature(); + + // If the extension is bound to the nominal the conformance is + // declared on, it is viable for inference when its conditional + // requirements are satisfied by those of the conformance context. + if (!isa(extendedNominal)) { + // Extensions of non-generic nominals are always viable for inference. + if (!extensionSig) + return true; + + return extensionSig->requirementsNotSatisfiedBy( + conformanceCtx->getGenericSignatureOfContext()).empty(); + } + // The condition here is a bit more fickle than // `isExtensionApplied`. That check would prematurely reject // extensions like `P where AssocType == T` if we're relying on a @@ -376,7 +380,7 @@ AssociatedTypeInference::inferTypeWitnessesViaValueWitnesses( // Check that the type witness meets the // requirements on the associated type. if (auto failed = - checkTypeWitness(dc, proto, result.first, result.second)) { + checkTypeWitness(result.second, result.first, conformance)) { witnessResult.NonViable.push_back( std::make_tuple(result.first,result.second,failed)); LLVM_DEBUG(llvm::dbgs() << "-- doesn't fulfill requirements\n"); @@ -413,9 +417,7 @@ AssociatedTypeInference::inferTypeWitnessesViaValueWitnesses( InferredAssociatedTypes result; for (auto member : proto->getMembers()) { auto req = dyn_cast(member); - if (!req) - continue; - if (!req->isProtocolRequirement()) + if (!req || !req->isProtocolRequirement()) continue; // Infer type witnesses for associated types. @@ -449,19 +451,14 @@ AssociatedTypeInference::inferTypeWitnessesViaValueWitnesses( // Check whether any of the associated types we care about are // referenced in this value requirement. - bool anyAssocTypeMatches = false; - for (auto assocType : checker.getReferencedAssociatedTypes(req)) { - if (assocTypes.count(assocType) > 0) { - anyAssocTypeMatches = true; - break; - } + { + const auto referenced = checker.getReferencedAssociatedTypes(req); + if (llvm::find_if(referenced, [&](AssociatedTypeDecl *const assocType) { + return assocTypes.count(assocType); + }) == referenced.end()) + continue; } - // We cannot deduce anything from the witnesses of this - // requirement; skip it. - if (!anyAssocTypeMatches) - continue; - // Infer associated types from the potential value witnesses for // this requirement. auto reqInferred = @@ -857,7 +854,7 @@ Type AssociatedTypeInference::computeDefaultTypeWitness( if (defaultType->hasError()) return Type(); - if (auto failed = checkTypeWitness(dc, proto, assocType, defaultType)) { + if (auto failed = checkTypeWitness(defaultType, assocType, conformance)) { // Record the failure, if we haven't seen one already. if (!failedDefaultedAssocType && !failed.isError()) { failedDefaultedAssocType = defaultedAssocType; @@ -889,7 +886,7 @@ Type AssociatedTypeInference::computeDerivedTypeWitness( return Type(); // Make sure that the derived type is sane. - if (checkTypeWitness(dc, proto, assocType, derivedType)) { + if (checkTypeWitness(derivedType, assocType, conformance)) { /// FIXME: Diagnose based on this. failedDerivedAssocType = assocType; failedDerivedWitness = derivedType; diff --git a/lib/Sema/TypeCheckSwitchStmt.cpp b/lib/Sema/TypeCheckSwitchStmt.cpp index 7e24182b48257..dd5e04c6a590d 100644 --- a/lib/Sema/TypeCheckSwitchStmt.cpp +++ b/lib/Sema/TypeCheckSwitchStmt.cpp @@ -212,12 +212,6 @@ namespace { } return Space(T, H, SP); } - static Space forConstructor(Type T, DeclName H, - std::forward_list SP) { - // No need to filter SP here; this is only used to copy other - // Constructor spaces. - return Space(T, H, SP); - } static Space forBool(bool C) { return Space(C); } @@ -1441,7 +1435,7 @@ namespace { if (subSpace.getKind() == SpaceKind::Constructor && subSpace.getHead().getBaseIdentifier().empty()) { return Space::forConstructor(item->getType(), name, - std::move(subSpace.getSpaces())); + {subSpace}); } return Space::forConstructor(item->getType(), name, subSpace); } diff --git a/lib/Sema/TypeChecker.cpp b/lib/Sema/TypeChecker.cpp index edbd80a4027ec..44e964fad2b53 100644 --- a/lib/Sema/TypeChecker.cpp +++ b/lib/Sema/TypeChecker.cpp @@ -332,13 +332,6 @@ TypeCheckSourceFileRequest::evaluate(Evaluator &eval, SourceFile *SF) const { FrontendStatsTracer tracer(Ctx.Stats, "Type checking and Semantic analysis"); - if (Ctx.TypeCheckerOpts.SkipNonInlinableFunctionBodies) - // Disable this optimization if we're compiling SwiftOnoneSupport, because - // we _definitely_ need to look inside every declaration to figure out - // what gets prespecialized. - if (SF->getParentModule()->isOnoneSupportModule()) - Ctx.TypeCheckerOpts.SkipNonInlinableFunctionBodies = false; - if (!Ctx.LangOpts.DisableAvailabilityChecking) { // Build the type refinement hierarchy for the primary // file before type checking. @@ -358,10 +351,11 @@ TypeCheckSourceFileRequest::evaluate(Evaluator &eval, SourceFile *SF) const { typeCheckDelayedFunctions(*SF); } - // Checking that benefits from having the whole module available. - if (!Ctx.TypeCheckerOpts.DelayWholeModuleChecking) { - performWholeModuleTypeChecking(*SF); - } + // Check to see if there's any inconsistent @_implementationOnly imports. + evaluateOrDefault( + Ctx.evaluator, + CheckInconsistentImplementationOnlyImportsRequest{SF->getParentModule()}, + {}); // Perform various AST transforms we've been asked to perform. if (!Ctx.hadError() && Ctx.LangOpts.DebuggerTestingTransform) @@ -413,84 +407,6 @@ bool swift::isAdditiveArithmeticConformanceDerivationEnabled(SourceFile &SF) { return isDifferentiableProgrammingEnabled(SF); } -void swift::checkInconsistentImplementationOnlyImports(ModuleDecl *MainModule) { - bool hasAnyImplementationOnlyImports = - llvm::any_of(MainModule->getFiles(), [](const FileUnit *F) -> bool { - auto *SF = dyn_cast(F); - return SF && SF->hasImplementationOnlyImports(); - }); - if (!hasAnyImplementationOnlyImports) - return; - - auto diagnose = [MainModule](const ImportDecl *normalImport, - const ImportDecl *implementationOnlyImport) { - auto &diags = MainModule->getDiags(); - { - InFlightDiagnostic warning = - diags.diagnose(normalImport, diag::warn_implementation_only_conflict, - normalImport->getModule()->getName()); - if (normalImport->getAttrs().isEmpty()) { - // Only try to add a fix-it if there's no other annotations on the - // import to avoid creating things like - // `@_implementationOnly @_exported import Foo`. The developer can - // resolve those manually. - warning.fixItInsert(normalImport->getStartLoc(), - "@_implementationOnly "); - } - } - diags.diagnose(implementationOnlyImport, - diag::implementation_only_conflict_here); - }; - - llvm::DenseMap> normalImports; - llvm::DenseMap implementationOnlyImports; - - for (const FileUnit *file : MainModule->getFiles()) { - auto *SF = dyn_cast(file); - if (!SF) - continue; - - for (auto *topLevelDecl : SF->getTopLevelDecls()) { - auto *nextImport = dyn_cast(topLevelDecl); - if (!nextImport) - continue; - - ModuleDecl *module = nextImport->getModule(); - if (!module) - continue; - - if (nextImport->getAttrs().hasAttribute()) { - // We saw an implementation-only import. - bool isNew = - implementationOnlyImports.insert({module, nextImport}).second; - if (!isNew) - continue; - - auto seenNormalImportPosition = normalImports.find(module); - if (seenNormalImportPosition != normalImports.end()) { - for (auto *seenNormalImport : seenNormalImportPosition->getSecond()) - diagnose(seenNormalImport, nextImport); - - // We're done with these; keep the map small if possible. - normalImports.erase(seenNormalImportPosition); - } - continue; - } - - // We saw a non-implementation-only import. Is that in conflict with what - // we've seen? - if (auto *seenImplementationOnlyImport = - implementationOnlyImports.lookup(module)) { - diagnose(nextImport, seenImplementationOnlyImport); - continue; - } - - // Otherwise, record it for later. - normalImports[module].push_back(nextImport); - } - } -} - bool swift::performTypeLocChecking(ASTContext &Ctx, TypeLoc &T, DeclContext *DC, bool ProduceDiagnostics) { diff --git a/lib/Serialization/DeserializeSIL.cpp b/lib/Serialization/DeserializeSIL.cpp index 7a650c6eaf1e1..5fbd28c55672b 100644 --- a/lib/Serialization/DeserializeSIL.cpp +++ b/lib/Serialization/DeserializeSIL.cpp @@ -1837,6 +1837,28 @@ bool SILDeserializer::readSILInstruction(SILFunction *Fn, SILBasicBlock *BB, break; } + case SILInstructionKind::BeginCOWMutationInst: { + assert(RecordKind == SIL_ONE_OPERAND && "Layout should be OneOperand."); + unsigned isNative = Attr; + ResultVal = Builder.createBeginCOWMutation( + Loc, + getLocalValue(ValID, + getSILType(MF->getType(TyID), (SILValueCategory)TyCategory, Fn)), + isNative != 0); + break; + } + + case SILInstructionKind::EndCOWMutationInst: { + assert(RecordKind == SIL_ONE_OPERAND && "Layout should be OneOperand."); + unsigned keepUnique = Attr; + ResultVal = Builder.createEndCOWMutation( + Loc, + getLocalValue(ValID, getSILType(MF->getType(TyID), + (SILValueCategory)TyCategory, Fn)), + keepUnique != 0); + break; + } + case SILInstructionKind::DestructureTupleInst: { assert(RecordKind == SIL_ONE_OPERAND && "Layout should be OneOperand."); SILValue Operand = getLocalValue( @@ -2298,19 +2320,19 @@ bool SILDeserializer::readSILInstruction(SILFunction *Fn, SILBasicBlock *BB, auto ResultTy = Val->getType().getFieldType( Field, SILMod, Builder.getTypeExpansionContext()); ResultVal = Builder.createRefElementAddr(Loc, Val, Field, - ResultTy); + ResultTy, /*Immutable*/ Attr & 0x1); break; } case SILInstructionKind::RefTailAddrInst: { assert(RecordKind == SIL_ONE_TYPE_ONE_OPERAND && "Layout should be OneTypeOneOperand."); - assert(Attr == 0); assert((SILValueCategory)TyCategory == SILValueCategory::Address); ResultVal = Builder.createRefTailAddr( Loc, getLocalValue(ValID, getSILType(MF->getType(TyID2), (SILValueCategory)TyCategory2, Fn)), - getSILType(MF->getType(TyID), SILValueCategory::Address, Fn)); + getSILType(MF->getType(TyID), SILValueCategory::Address, Fn), + /*Immutable*/ Attr & 0x1); break; } case SILInstructionKind::ClassMethodInst: diff --git a/lib/Serialization/ModuleDependencyScanner.cpp b/lib/Serialization/ModuleDependencyScanner.cpp index 2b3c5896233fc..12669f8bf5a22 100644 --- a/lib/Serialization/ModuleDependencyScanner.cpp +++ b/lib/Serialization/ModuleDependencyScanner.cpp @@ -33,12 +33,13 @@ class ModuleDependencyScanner : public SerializedModuleLoaderBase { ErrorOr scanInterfaceFile( Twine moduleInterfacePath); - SubASTContextDelegate &astDelegate; + InterfaceSubContextDelegate &astDelegate; public: Optional dependencies; ModuleDependencyScanner(ASTContext &ctx, ModuleLoadingMode LoadMode, - Identifier moduleName, SubASTContextDelegate &astDelegate) + Identifier moduleName, + InterfaceSubContextDelegate &astDelegate) : SerializedModuleLoaderBase(ctx, nullptr, LoadMode, /*IgnoreSwiftSourceInfoFile=*/true), moduleName(moduleName), astDelegate(astDelegate) { } @@ -101,12 +102,17 @@ ErrorOr ModuleDependencyScanner::scanInterfaceFile( auto newExt = file_types::getExtension(file_types::TY_SwiftModuleFile); llvm::SmallString<32> modulePath = moduleName.str(); llvm::sys::path::replace_extension(modulePath, newExt); - ModuleDependencies Result = ModuleDependencies::forSwiftInterface( - modulePath.str().str(), moduleInterfacePath.str()); + Optional Result; std::error_code code; - auto hasError = astDelegate.runInSubContext(Ctx, - moduleInterfacePath.str(), - [&](ASTContext &Ctx) { + auto hasError = astDelegate.runInSubContext(moduleName.str(), + moduleInterfacePath.str(), + StringRef(), + SourceLoc(), + [&](ASTContext &Ctx, ArrayRef Args, StringRef Hash) { + Result = ModuleDependencies::forSwiftInterface(modulePath.str().str(), + moduleInterfacePath.str(), + Args, + Hash); // Open the interface file. auto &fs = *Ctx.SourceMgr.getFileSystem(); auto interfaceBuf = fs.getBufferForFile(moduleInterfacePath); @@ -123,19 +129,19 @@ ErrorOr ModuleDependencyScanner::scanInterfaceFile( // Walk the source file to find the import declarations. llvm::StringSet<> alreadyAddedModules; - Result.addModuleDependencies(*sourceFile, alreadyAddedModules); + Result->addModuleDependencies(*sourceFile, alreadyAddedModules); return false; }); if (hasError) { return code; } - return Result; + return *Result; } Optional SerializedModuleLoaderBase::getModuleDependencies( StringRef moduleName, ModuleDependenciesCache &cache, - SubASTContextDelegate &delegate) { + InterfaceSubContextDelegate &delegate) { // Check whether we've cached this result. if (auto found = cache.findDependencies( moduleName, ModuleDependenciesKind::Swift)) diff --git a/lib/Serialization/ModuleFormat.h b/lib/Serialization/ModuleFormat.h index 5d00fbc3d4d5c..f338cde1562f1 100644 --- a/lib/Serialization/ModuleFormat.h +++ b/lib/Serialization/ModuleFormat.h @@ -55,7 +55,7 @@ const uint16_t SWIFTMODULE_VERSION_MAJOR = 0; /// describe what change you made. The content of this comment isn't important; /// it just ensures a conflict if two people change the module format. /// Don't worry about adhering to the 80-column limit for this line. -const uint16_t SWIFTMODULE_VERSION_MINOR = 556; // dont serialize Pattern::isImplicit +const uint16_t SWIFTMODULE_VERSION_MINOR = 557; // COW instructions /// A standard hash seed used for all string hashes in a serialized module. /// diff --git a/lib/Serialization/SerializeSIL.cpp b/lib/Serialization/SerializeSIL.cpp index 3a6b090eba568..0ba068798906f 100644 --- a/lib/Serialization/SerializeSIL.cpp +++ b/lib/Serialization/SerializeSIL.cpp @@ -1288,6 +1288,8 @@ void SILSerializer::writeSILInstruction(const SILInstruction &SI) { case SILInstructionKind::StrongReleaseInst: case SILInstructionKind::StrongRetainInst: case SILInstructionKind::IsUniqueInst: + case SILInstructionKind::BeginCOWMutationInst: + case SILInstructionKind::EndCOWMutationInst: case SILInstructionKind::AbortApplyInst: case SILInstructionKind::EndApplyInst: case SILInstructionKind::ReturnInst: @@ -1309,6 +1311,10 @@ void SILSerializer::writeSILInstruction(const SILInstruction &SI) { Attr = unsigned(SILValue(UOCI).getOwnershipKind()); } else if (auto *IEC = dyn_cast(&SI)) { Attr = IEC->getVerificationType(); + } else if (auto *BCMI = dyn_cast(&SI)) { + Attr = BCMI->isNative(); + } else if (auto *ECMI = dyn_cast(&SI)) { + Attr = ECMI->doKeepUnique(); } writeOneOperandLayout(SI.getKind(), Attr, SI.getOperand(0)); break; @@ -1794,11 +1800,13 @@ void SILSerializer::writeSILInstruction(const SILInstruction &SI) { // where the field decl is streamed as a ValueID. SILValue operand; Decl *tDecl; + unsigned attr = 0; switch (SI.getKind()) { default: llvm_unreachable("Out of sync with parent switch"); case SILInstructionKind::RefElementAddrInst: operand = cast(&SI)->getOperand(); tDecl = cast(&SI)->getField(); + attr = unsigned(cast(&SI)->isImmutable()); break; case SILInstructionKind::StructElementAddrInst: operand = cast(&SI)->getOperand(); @@ -1827,7 +1835,7 @@ void SILSerializer::writeSILInstruction(const SILInstruction &SI) { } SILOneValueOneOperandLayout::emitRecord(Out, ScratchRecord, SILAbbrCodes[SILOneValueOneOperandLayout::Code], - (unsigned)SI.getKind(), 0, S.addDeclRef(tDecl), + (unsigned)SI.getKind(), attr, S.addDeclRef(tDecl), S.addTypeRef(operand->getType().getASTType()), (unsigned)operand->getType().getCategory(), addValueRef(operand)); @@ -1835,7 +1843,7 @@ void SILSerializer::writeSILInstruction(const SILInstruction &SI) { } case SILInstructionKind::RefTailAddrInst: { auto *RTAI = cast(&SI); - writeOneTypeOneOperandLayout(RTAI->getKind(), 0, + writeOneTypeOneOperandLayout(RTAI->getKind(), unsigned(RTAI->isImmutable()), RTAI->getType(), RTAI->getOperand()); break; @@ -2645,9 +2653,6 @@ void SILSerializer::writeSILBlock(const SILModule *SILMod) { // Go through all SILVTables in SILMod and write them if we should // serialize everything. // FIXME: Resilience: could write out vtable for fragile classes. - const DeclContext *assocDC = SILMod->getAssociatedContext(); - assert(assocDC && "cannot serialize SIL without an associated DeclContext"); - (void)assocDC; for (const SILVTable &vt : SILMod->getVTables()) { if ((ShouldSerializeAll || vt.isSerialized()) && SILMod->shouldSerializeEntitiesAssociatedWithDeclContext(vt.getClass())) diff --git a/lib/Serialization/SerializedModuleLoader.cpp b/lib/Serialization/SerializedModuleLoader.cpp index 8aca189d454b5..93288b423e063 100644 --- a/lib/Serialization/SerializedModuleLoader.cpp +++ b/lib/Serialization/SerializedModuleLoader.cpp @@ -196,18 +196,20 @@ void SerializedModuleLoaderBase::collectVisibleTopLevelModuleNamesImpl( bool requireTargetSpecificModule = Ctx.LangOpts.Target.isOSDarwin(); forEachDirectoryEntryPath(searchPath, [&](StringRef path) { auto pathExt = llvm::sys::path::extension(path); - if (requireTargetSpecificModule) { - if (pathExt != moduleSuffix) - return; - if (!checkTargetFiles(path)) + + if (pathExt != moduleSuffix) + if (requireTargetSpecificModule || pathExt != suffix) return; - } else { - if (suffix != pathExt) + + if (!checkTargetFiles(path)) { + if (requireTargetSpecificModule) return; + auto stat = fs.status(path); if (!stat || stat->isDirectory()) return; } + // Extract module name. auto name = llvm::sys::path::filename(path).drop_back(pathExt.size()); names.push_back(Ctx.getIdentifier(name)); diff --git a/lib/SymbolGraphGen/Edge.cpp b/lib/SymbolGraphGen/Edge.cpp index 3184d5382edc5..49c78fc20e6d4 100644 --- a/lib/SymbolGraphGen/Edge.cpp +++ b/lib/SymbolGraphGen/Edge.cpp @@ -42,20 +42,19 @@ void Edge::serialize(llvm::json::OStream &OS) const { } if (ConformanceExtension) { - if (const auto *Generics = ConformanceExtension->getAsGenericContext()) { - SmallVector FilteredRequirements; - filterGenericRequirements(Generics->getGenericRequirements(), - ConformanceExtension->getExtendedNominal() - ->getDeclContext()->getSelfNominalTypeDecl(), - FilteredRequirements); - if (!FilteredRequirements.empty()) { - OS.attributeArray("swiftConstraints", [&](){ - for (const auto &Req : - ConformanceExtension->getGenericRequirements()) { - ::serialize(Req, OS); - } - }); - } + SmallVector FilteredRequirements; + filterGenericRequirements( + ConformanceExtension->getGenericRequirements(), + ConformanceExtension->getExtendedNominal() + ->getDeclContext()->getSelfNominalTypeDecl(), + FilteredRequirements); + if (!FilteredRequirements.empty()) { + OS.attributeArray("swiftConstraints", [&](){ + for (const auto &Req : + ConformanceExtension->getGenericRequirements()) { + ::serialize(Req, OS); + } + }); } } }); diff --git a/lib/SymbolGraphGen/JSON.cpp b/lib/SymbolGraphGen/JSON.cpp index 9aa3625c76c82..2517b08d96cd2 100644 --- a/lib/SymbolGraphGen/JSON.cpp +++ b/lib/SymbolGraphGen/JSON.cpp @@ -66,21 +66,20 @@ void swift::symbolgraphgen::serialize(const ExtensionDecl *Extension, OS.attribute("extendedModule", ExtendedModule->getNameStr()); } } - if (const auto Generics = Extension->getAsGenericContext()) { - SmallVector FilteredRequirements; - - filterGenericRequirements(Generics->getGenericRequirements(), - Extension->getExtendedNominal() - ->getDeclContext()->getSelfNominalTypeDecl(), - FilteredRequirements); - - if (!FilteredRequirements.empty()) { - OS.attributeArray("constraints", [&](){ - for (const auto &Requirement : FilteredRequirements) { - serialize(Requirement, OS); - } - }); // end constraints: - } + + SmallVector FilteredRequirements; + + filterGenericRequirements(Extension->getGenericRequirements(), + Extension->getExtendedNominal() + ->getDeclContext()->getSelfNominalTypeDecl(), + FilteredRequirements); + + if (!FilteredRequirements.empty()) { + OS.attributeArray("constraints", [&](){ + for (const auto &Requirement : FilteredRequirements) { + serialize(Requirement, OS); + } + }); // end constraints: } }); // end swiftExtension: } diff --git a/lib/TBDGen/TBDGen.cpp b/lib/TBDGen/TBDGen.cpp index 97400c9dd7fb6..9301f9281c888 100644 --- a/lib/TBDGen/TBDGen.cpp +++ b/lib/TBDGen/TBDGen.cpp @@ -444,8 +444,8 @@ void TBDGenVisitor::addBaseConformanceDescriptor( addSymbol(entity); } -void TBDGenVisitor::addConformances(DeclContext *DC) { - for (auto conformance : DC->getLocalConformances( +void TBDGenVisitor::addConformances(const IterableDeclContext *IDC) { + for (auto conformance : IDC->getLocalConformances( ConformanceLookupKind::NonInherited)) { auto protocol = conformance->getProtocol(); auto needsWTable = @@ -462,8 +462,9 @@ void TBDGenVisitor::addConformances(DeclContext *DC) { // We cannot emit the witness table symbol if the protocol is imported from // another module and it's resilient, because initialization of that protocol // is necessary in this case - if (!rootConformance->getProtocol()->isResilient(DC->getParentModule(), - ResilienceExpansion::Maximal)) + if (!rootConformance->getProtocol()->isResilient( + IDC->getAsGenericContext()->getParentModule(), + ResilienceExpansion::Maximal)) addSymbol(LinkEntity::forProtocolWitnessTable(rootConformance)); addSymbol(LinkEntity::forProtocolConformanceDescriptor(rootConformance)); diff --git a/lib/TBDGen/TBDGenVisitor.h b/lib/TBDGen/TBDGenVisitor.h index ac09ddec94bca..002f0f8f02414 100644 --- a/lib/TBDGen/TBDGenVisitor.h +++ b/lib/TBDGen/TBDGenVisitor.h @@ -95,7 +95,7 @@ class TBDGenVisitor : public ASTVisitor { void addSymbol(LinkEntity entity); - void addConformances(DeclContext *DC); + void addConformances(const IterableDeclContext *IDC); void addDispatchThunk(SILDeclRef declRef); diff --git a/stdlib/cmake/modules/AddSwiftStdlib.cmake b/stdlib/cmake/modules/AddSwiftStdlib.cmake index d332e7b42e1a4..f5177e9de5499 100644 --- a/stdlib/cmake/modules/AddSwiftStdlib.cmake +++ b/stdlib/cmake/modules/AddSwiftStdlib.cmake @@ -849,6 +849,10 @@ function(_add_swift_target_library_single target name) ${SWIFTLIB_SINGLE_EXTERNAL_SOURCES} ${INCORPORATED_OBJECT_LIBRARIES_EXPRESSIONS} ${SWIFTLIB_SINGLE_XCODE_WORKAROUND_SOURCES}) + # NOTE: always inject the LLVMSupport directory before anything else. We want + # to ensure that the runtime is built with our local copy of LLVMSupport + target_include_directories(${target} BEFORE PRIVATE + ${SWIFT_SOURCE_DIR}/stdlib/include) if(("${SWIFT_SDK_${SWIFTLIB_SINGLE_SDK}_OBJECT_FORMAT}" STREQUAL "ELF" OR "${SWIFT_SDK_${SWIFTLIB_SINGLE_SDK}_OBJECT_FORMAT}" STREQUAL "COFF") AND SWIFTLIB_SINGLE_TARGET_LIBRARY) diff --git a/stdlib/include/llvm-c/DataTypes.h b/stdlib/include/llvm-c/DataTypes.h new file mode 100644 index 0000000000000..0f27ba81865e0 --- /dev/null +++ b/stdlib/include/llvm-c/DataTypes.h @@ -0,0 +1,84 @@ +/*===-- include/llvm-c/DataTypes.h - Define fixed size types ------*- C -*-===*\ +|* *| +|* Part of the LLVM Project, under the Apache License v2.0 with LLVM *| +|* Exceptions. *| +|* See https://llvm.org/LICENSE.txt for license information. *| +|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *| +|* *| +|*===----------------------------------------------------------------------===*| +|* *| +|* This file contains definitions to figure out the size of _HOST_ data types.*| +|* This file is important because different host OS's define different macros,*| +|* which makes portability tough. This file exports the following *| +|* definitions: *| +|* *| +|* [u]int(32|64)_t : typedefs for signed and unsigned 32/64 bit system types*| +|* [U]INT(8|16|32|64)_(MIN|MAX) : Constants for the min and max values. *| +|* *| +|* No library is required when using these functions. *| +|* *| +|*===----------------------------------------------------------------------===*/ + +/* Please leave this file C-compatible. */ + +#ifndef LLVM_C_DATATYPES_H +#define LLVM_C_DATATYPES_H + +#include +#include + +#ifndef _MSC_VER + +#if !defined(UINT32_MAX) +# error "The standard header is not C++11 compliant. Must #define "\ + "__STDC_LIMIT_MACROS before #including llvm-c/DataTypes.h" +#endif + +#if !defined(UINT32_C) +# error "The standard header is not C++11 compliant. Must #define "\ + "__STDC_CONSTANT_MACROS before #including llvm-c/DataTypes.h" +#endif + +/* Note that includes , if this is a C99 system. */ +#include + +#ifdef _AIX +// GCC is strict about defining large constants: they must have LL modifier. +#undef INT64_MAX +#undef INT64_MIN +#endif + +#else /* _MSC_VER */ +#ifdef __cplusplus +#include +#include +#else +#include +#include +#endif +#include + +#if defined(_WIN64) +typedef signed __int64 ssize_t; +#else +typedef signed int ssize_t; +#endif /* _WIN64 */ + +#endif /* _MSC_VER */ + +/* Set defaults for constants which we cannot find. */ +#if !defined(INT64_MAX) +# define INT64_MAX 9223372036854775807LL +#endif +#if !defined(INT64_MIN) +# define INT64_MIN ((-INT64_MAX)-1) +#endif +#if !defined(UINT64_MAX) +# define UINT64_MAX 0xffffffffffffffffULL +#endif + +#ifndef HUGE_VALF +#define HUGE_VALF (float)HUGE_VAL +#endif + +#endif /* LLVM_C_DATATYPES_H */ diff --git a/stdlib/include/llvm/ADT/ArrayRef.h b/stdlib/include/llvm/ADT/ArrayRef.h new file mode 100644 index 0000000000000..f5af01936095e --- /dev/null +++ b/stdlib/include/llvm/ADT/ArrayRef.h @@ -0,0 +1,560 @@ +//===- ArrayRef.h - Array Reference Wrapper ---------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_ARRAYREF_H +#define LLVM_ADT_ARRAYREF_H + +#include "llvm/ADT/Hashing.h" +#include "llvm/ADT/None.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Support/Compiler.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + + /// ArrayRef - Represent a constant reference to an array (0 or more elements + /// consecutively in memory), i.e. a start pointer and a length. It allows + /// various APIs to take consecutive elements easily and conveniently. + /// + /// This class does not own the underlying data, it is expected to be used in + /// situations where the data resides in some other buffer, whose lifetime + /// extends past that of the ArrayRef. For this reason, it is not in general + /// safe to store an ArrayRef. + /// + /// This is intended to be trivially copyable, so it should be passed by + /// value. + template + class LLVM_GSL_POINTER LLVM_NODISCARD ArrayRef { + public: + using iterator = const T *; + using const_iterator = const T *; + using size_type = size_t; + using reverse_iterator = std::reverse_iterator; + + private: + /// The start of the array, in an external buffer. + const T *Data = nullptr; + + /// The number of elements. + size_type Length = 0; + + public: + /// @name Constructors + /// @{ + + /// Construct an empty ArrayRef. + /*implicit*/ ArrayRef() = default; + + /// Construct an empty ArrayRef from None. + /*implicit*/ ArrayRef(NoneType) {} + + /// Construct an ArrayRef from a single element. + /*implicit*/ ArrayRef(const T &OneElt) + : Data(&OneElt), Length(1) {} + + /// Construct an ArrayRef from a pointer and length. + /*implicit*/ ArrayRef(const T *data, size_t length) + : Data(data), Length(length) {} + + /// Construct an ArrayRef from a range. + ArrayRef(const T *begin, const T *end) + : Data(begin), Length(end - begin) {} + + /// Construct an ArrayRef from a SmallVector. This is templated in order to + /// avoid instantiating SmallVectorTemplateCommon whenever we + /// copy-construct an ArrayRef. + template + /*implicit*/ ArrayRef(const SmallVectorTemplateCommon &Vec) + : Data(Vec.data()), Length(Vec.size()) { + } + + /// Construct an ArrayRef from a std::vector. + template + /*implicit*/ ArrayRef(const std::vector &Vec) + : Data(Vec.data()), Length(Vec.size()) {} + + /// Construct an ArrayRef from a std::array + template + /*implicit*/ constexpr ArrayRef(const std::array &Arr) + : Data(Arr.data()), Length(N) {} + + /// Construct an ArrayRef from a C array. + template + /*implicit*/ constexpr ArrayRef(const T (&Arr)[N]) : Data(Arr), Length(N) {} + + /// Construct an ArrayRef from a std::initializer_list. +#if LLVM_GNUC_PREREQ(9, 0, 0) +// Disable gcc's warning in this constructor as it generates an enormous amount +// of messages. Anyone using ArrayRef should already be aware of the fact that +// it does not do lifetime extension. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Winit-list-lifetime" +#endif + /*implicit*/ ArrayRef(const std::initializer_list &Vec) + : Data(Vec.begin() == Vec.end() ? (T*)nullptr : Vec.begin()), + Length(Vec.size()) {} +#if LLVM_GNUC_PREREQ(9, 0, 0) +#pragma GCC diagnostic pop +#endif + + /// Construct an ArrayRef from ArrayRef. This uses SFINAE to + /// ensure that only ArrayRefs of pointers can be converted. + template + ArrayRef(const ArrayRef &A, + std::enable_if_t::value> + * = nullptr) + : Data(A.data()), Length(A.size()) {} + + /// Construct an ArrayRef from a SmallVector. This is + /// templated in order to avoid instantiating SmallVectorTemplateCommon + /// whenever we copy-construct an ArrayRef. + template + /*implicit*/ ArrayRef( + const SmallVectorTemplateCommon &Vec, + std::enable_if_t::value> * = + nullptr) + : Data(Vec.data()), Length(Vec.size()) {} + + /// Construct an ArrayRef from std::vector. This uses SFINAE + /// to ensure that only vectors of pointers can be converted. + template + ArrayRef(const std::vector &Vec, + std::enable_if_t::value> + * = 0) + : Data(Vec.data()), Length(Vec.size()) {} + + /// @} + /// @name Simple Operations + /// @{ + + iterator begin() const { return Data; } + iterator end() const { return Data + Length; } + + reverse_iterator rbegin() const { return reverse_iterator(end()); } + reverse_iterator rend() const { return reverse_iterator(begin()); } + + /// empty - Check if the array is empty. + bool empty() const { return Length == 0; } + + const T *data() const { return Data; } + + /// size - Get the array size. + size_t size() const { return Length; } + + /// front - Get the first element. + const T &front() const { + assert(!empty()); + return Data[0]; + } + + /// back - Get the last element. + const T &back() const { + assert(!empty()); + return Data[Length-1]; + } + + // copy - Allocate copy in Allocator and return ArrayRef to it. + template ArrayRef copy(Allocator &A) { + T *Buff = A.template Allocate(Length); + std::uninitialized_copy(begin(), end(), Buff); + return ArrayRef(Buff, Length); + } + + /// equals - Check for element-wise equality. + bool equals(ArrayRef RHS) const { + if (Length != RHS.Length) + return false; + return std::equal(begin(), end(), RHS.begin()); + } + + /// slice(n, m) - Chop off the first N elements of the array, and keep M + /// elements in the array. + ArrayRef slice(size_t N, size_t M) const { + assert(N+M <= size() && "Invalid specifier"); + return ArrayRef(data()+N, M); + } + + /// slice(n) - Chop off the first N elements of the array. + ArrayRef slice(size_t N) const { return slice(N, size() - N); } + + /// Drop the first \p N elements of the array. + ArrayRef drop_front(size_t N = 1) const { + assert(size() >= N && "Dropping more elements than exist"); + return slice(N, size() - N); + } + + /// Drop the last \p N elements of the array. + ArrayRef drop_back(size_t N = 1) const { + assert(size() >= N && "Dropping more elements than exist"); + return slice(0, size() - N); + } + + /// Return a copy of *this with the first N elements satisfying the + /// given predicate removed. + template ArrayRef drop_while(PredicateT Pred) const { + return ArrayRef(find_if_not(*this, Pred), end()); + } + + /// Return a copy of *this with the first N elements not satisfying + /// the given predicate removed. + template ArrayRef drop_until(PredicateT Pred) const { + return ArrayRef(find_if(*this, Pred), end()); + } + + /// Return a copy of *this with only the first \p N elements. + ArrayRef take_front(size_t N = 1) const { + if (N >= size()) + return *this; + return drop_back(size() - N); + } + + /// Return a copy of *this with only the last \p N elements. + ArrayRef take_back(size_t N = 1) const { + if (N >= size()) + return *this; + return drop_front(size() - N); + } + + /// Return the first N elements of this Array that satisfy the given + /// predicate. + template ArrayRef take_while(PredicateT Pred) const { + return ArrayRef(begin(), find_if_not(*this, Pred)); + } + + /// Return the first N elements of this Array that don't satisfy the + /// given predicate. + template ArrayRef take_until(PredicateT Pred) const { + return ArrayRef(begin(), find_if(*this, Pred)); + } + + /// @} + /// @name Operator Overloads + /// @{ + const T &operator[](size_t Index) const { + assert(Index < Length && "Invalid index!"); + return Data[Index]; + } + + /// Disallow accidental assignment from a temporary. + /// + /// The declaration here is extra complicated so that "arrayRef = {}" + /// continues to select the move assignment operator. + template + std::enable_if_t::value, ArrayRef> & + operator=(U &&Temporary) = delete; + + /// Disallow accidental assignment from a temporary. + /// + /// The declaration here is extra complicated so that "arrayRef = {}" + /// continues to select the move assignment operator. + template + std::enable_if_t::value, ArrayRef> & + operator=(std::initializer_list) = delete; + + /// @} + /// @name Expensive Operations + /// @{ + std::vector vec() const { + return std::vector(Data, Data+Length); + } + + /// @} + /// @name Conversion operators + /// @{ + operator std::vector() const { + return std::vector(Data, Data+Length); + } + + /// @} + }; + + /// MutableArrayRef - Represent a mutable reference to an array (0 or more + /// elements consecutively in memory), i.e. a start pointer and a length. It + /// allows various APIs to take and modify consecutive elements easily and + /// conveniently. + /// + /// This class does not own the underlying data, it is expected to be used in + /// situations where the data resides in some other buffer, whose lifetime + /// extends past that of the MutableArrayRef. For this reason, it is not in + /// general safe to store a MutableArrayRef. + /// + /// This is intended to be trivially copyable, so it should be passed by + /// value. + template + class LLVM_NODISCARD MutableArrayRef : public ArrayRef { + public: + using iterator = T *; + using reverse_iterator = std::reverse_iterator; + + /// Construct an empty MutableArrayRef. + /*implicit*/ MutableArrayRef() = default; + + /// Construct an empty MutableArrayRef from None. + /*implicit*/ MutableArrayRef(NoneType) : ArrayRef() {} + + /// Construct a MutableArrayRef from a single element. + /*implicit*/ MutableArrayRef(T &OneElt) : ArrayRef(OneElt) {} + + /// Construct a MutableArrayRef from a pointer and length. + /*implicit*/ MutableArrayRef(T *data, size_t length) + : ArrayRef(data, length) {} + + /// Construct a MutableArrayRef from a range. + MutableArrayRef(T *begin, T *end) : ArrayRef(begin, end) {} + + /// Construct a MutableArrayRef from a SmallVector. + /*implicit*/ MutableArrayRef(SmallVectorImpl &Vec) + : ArrayRef(Vec) {} + + /// Construct a MutableArrayRef from a std::vector. + /*implicit*/ MutableArrayRef(std::vector &Vec) + : ArrayRef(Vec) {} + + /// Construct a MutableArrayRef from a std::array + template + /*implicit*/ constexpr MutableArrayRef(std::array &Arr) + : ArrayRef(Arr) {} + + /// Construct a MutableArrayRef from a C array. + template + /*implicit*/ constexpr MutableArrayRef(T (&Arr)[N]) : ArrayRef(Arr) {} + + T *data() const { return const_cast(ArrayRef::data()); } + + iterator begin() const { return data(); } + iterator end() const { return data() + this->size(); } + + reverse_iterator rbegin() const { return reverse_iterator(end()); } + reverse_iterator rend() const { return reverse_iterator(begin()); } + + /// front - Get the first element. + T &front() const { + assert(!this->empty()); + return data()[0]; + } + + /// back - Get the last element. + T &back() const { + assert(!this->empty()); + return data()[this->size()-1]; + } + + /// slice(n, m) - Chop off the first N elements of the array, and keep M + /// elements in the array. + MutableArrayRef slice(size_t N, size_t M) const { + assert(N + M <= this->size() && "Invalid specifier"); + return MutableArrayRef(this->data() + N, M); + } + + /// slice(n) - Chop off the first N elements of the array. + MutableArrayRef slice(size_t N) const { + return slice(N, this->size() - N); + } + + /// Drop the first \p N elements of the array. + MutableArrayRef drop_front(size_t N = 1) const { + assert(this->size() >= N && "Dropping more elements than exist"); + return slice(N, this->size() - N); + } + + MutableArrayRef drop_back(size_t N = 1) const { + assert(this->size() >= N && "Dropping more elements than exist"); + return slice(0, this->size() - N); + } + + /// Return a copy of *this with the first N elements satisfying the + /// given predicate removed. + template + MutableArrayRef drop_while(PredicateT Pred) const { + return MutableArrayRef(find_if_not(*this, Pred), end()); + } + + /// Return a copy of *this with the first N elements not satisfying + /// the given predicate removed. + template + MutableArrayRef drop_until(PredicateT Pred) const { + return MutableArrayRef(find_if(*this, Pred), end()); + } + + /// Return a copy of *this with only the first \p N elements. + MutableArrayRef take_front(size_t N = 1) const { + if (N >= this->size()) + return *this; + return drop_back(this->size() - N); + } + + /// Return a copy of *this with only the last \p N elements. + MutableArrayRef take_back(size_t N = 1) const { + if (N >= this->size()) + return *this; + return drop_front(this->size() - N); + } + + /// Return the first N elements of this Array that satisfy the given + /// predicate. + template + MutableArrayRef take_while(PredicateT Pred) const { + return MutableArrayRef(begin(), find_if_not(*this, Pred)); + } + + /// Return the first N elements of this Array that don't satisfy the + /// given predicate. + template + MutableArrayRef take_until(PredicateT Pred) const { + return MutableArrayRef(begin(), find_if(*this, Pred)); + } + + /// @} + /// @name Operator Overloads + /// @{ + T &operator[](size_t Index) const { + assert(Index < this->size() && "Invalid index!"); + return data()[Index]; + } + }; + + /// This is a MutableArrayRef that owns its array. + template class OwningArrayRef : public MutableArrayRef { + public: + OwningArrayRef() = default; + OwningArrayRef(size_t Size) : MutableArrayRef(new T[Size], Size) {} + + OwningArrayRef(ArrayRef Data) + : MutableArrayRef(new T[Data.size()], Data.size()) { + std::copy(Data.begin(), Data.end(), this->begin()); + } + + OwningArrayRef(OwningArrayRef &&Other) { *this = std::move(Other); } + + OwningArrayRef &operator=(OwningArrayRef &&Other) { + delete[] this->data(); + this->MutableArrayRef::operator=(Other); + Other.MutableArrayRef::operator=(MutableArrayRef()); + return *this; + } + + ~OwningArrayRef() { delete[] this->data(); } + }; + + /// @name ArrayRef Convenience constructors + /// @{ + + /// Construct an ArrayRef from a single element. + template + ArrayRef makeArrayRef(const T &OneElt) { + return OneElt; + } + + /// Construct an ArrayRef from a pointer and length. + template + ArrayRef makeArrayRef(const T *data, size_t length) { + return ArrayRef(data, length); + } + + /// Construct an ArrayRef from a range. + template + ArrayRef makeArrayRef(const T *begin, const T *end) { + return ArrayRef(begin, end); + } + + /// Construct an ArrayRef from a SmallVector. + template + ArrayRef makeArrayRef(const SmallVectorImpl &Vec) { + return Vec; + } + + /// Construct an ArrayRef from a SmallVector. + template + ArrayRef makeArrayRef(const SmallVector &Vec) { + return Vec; + } + + /// Construct an ArrayRef from a std::vector. + template + ArrayRef makeArrayRef(const std::vector &Vec) { + return Vec; + } + + /// Construct an ArrayRef from a std::array. + template + ArrayRef makeArrayRef(const std::array &Arr) { + return Arr; + } + + /// Construct an ArrayRef from an ArrayRef (no-op) (const) + template ArrayRef makeArrayRef(const ArrayRef &Vec) { + return Vec; + } + + /// Construct an ArrayRef from an ArrayRef (no-op) + template ArrayRef &makeArrayRef(ArrayRef &Vec) { + return Vec; + } + + /// Construct an ArrayRef from a C array. + template + ArrayRef makeArrayRef(const T (&Arr)[N]) { + return ArrayRef(Arr); + } + + /// Construct a MutableArrayRef from a single element. + template + MutableArrayRef makeMutableArrayRef(T &OneElt) { + return OneElt; + } + + /// Construct a MutableArrayRef from a pointer and length. + template + MutableArrayRef makeMutableArrayRef(T *data, size_t length) { + return MutableArrayRef(data, length); + } + + /// @} + /// @name ArrayRef Comparison Operators + /// @{ + + template + inline bool operator==(ArrayRef LHS, ArrayRef RHS) { + return LHS.equals(RHS); + } + + template + inline bool operator==(SmallVectorImpl &LHS, ArrayRef RHS) { + return ArrayRef(LHS).equals(RHS); + } + + template + inline bool operator!=(ArrayRef LHS, ArrayRef RHS) { + return !(LHS == RHS); + } + + template + inline bool operator!=(SmallVectorImpl &LHS, ArrayRef RHS) { + return !(LHS == RHS); + } + + /// @} + + template hash_code hash_value(ArrayRef S) { + return hash_combine_range(S.begin(), S.end()); + } + +} // end namespace llvm +}} // namespace swift::runtime + +#endif // LLVM_ADT_ARRAYREF_H diff --git a/stdlib/include/llvm/ADT/DenseMap.h b/stdlib/include/llvm/ADT/DenseMap.h new file mode 100644 index 0000000000000..d09a747bf8a40 --- /dev/null +++ b/stdlib/include/llvm/ADT/DenseMap.h @@ -0,0 +1,1221 @@ +//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the DenseMap class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_DENSEMAP_H +#define LLVM_ADT_DENSEMAP_H + +#include "llvm/ADT/DenseMapInfo.h" +#include "llvm/Support/AlignOf.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/MemAlloc.h" +#include "llvm/Support/type_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + +namespace detail { + +// We extend a pair to allow users to override the bucket type with their own +// implementation without requiring two members. +template +struct DenseMapPair : public std::pair { + using std::pair::pair; + + KeyT &getFirst() { return std::pair::first; } + const KeyT &getFirst() const { return std::pair::first; } + ValueT &getSecond() { return std::pair::second; } + const ValueT &getSecond() const { return std::pair::second; } +}; + +} // end namespace detail + +template , + typename Bucket = llvm::detail::DenseMapPair, + bool IsConst = false> +class DenseMapIterator; + +template +class DenseMapBase { + template + using const_arg_type_t = typename const_pointer_or_const_ref::type; + +public: + using size_type = unsigned; + using key_type = KeyT; + using mapped_type = ValueT; + using value_type = BucketT; + + using iterator = DenseMapIterator; + using const_iterator = + DenseMapIterator; + + inline iterator begin() { + // When the map is empty, avoid the overhead of advancing/retreating past + // empty buckets. + if (empty()) + return end(); + return makeIterator(getBuckets(), getBucketsEnd()); + } + inline iterator end() { + return makeIterator(getBucketsEnd(), getBucketsEnd(), true); + } + inline const_iterator begin() const { + if (empty()) + return end(); + return makeConstIterator(getBuckets(), getBucketsEnd()); + } + inline const_iterator end() const { + return makeConstIterator(getBucketsEnd(), getBucketsEnd(), true); + } + + LLVM_NODISCARD bool empty() const { + return getNumEntries() == 0; + } + unsigned size() const { return getNumEntries(); } + + /// Grow the densemap so that it can contain at least \p NumEntries items + /// before resizing again. + void reserve(size_type NumEntries) { + auto NumBuckets = getMinBucketToReserveForEntries(NumEntries); + if (NumBuckets > getNumBuckets()) + grow(NumBuckets); + } + + void clear() { + if (getNumEntries() == 0 && getNumTombstones() == 0) return; + + // If the capacity of the array is huge, and the # elements used is small, + // shrink the array. + if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) { + shrink_and_clear(); + return; + } + + const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); + if (std::is_trivially_destructible::value) { + // Use a simpler loop when values don't need destruction. + for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) + P->getFirst() = EmptyKey; + } else { + unsigned NumEntries = getNumEntries(); + for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { + if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) { + if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) { + P->getSecond().~ValueT(); + --NumEntries; + } + P->getFirst() = EmptyKey; + } + } + assert(NumEntries == 0 && "Node count imbalance!"); + } + setNumEntries(0); + setNumTombstones(0); + } + + /// Return 1 if the specified key is in the map, 0 otherwise. + size_type count(const_arg_type_t Val) const { + const BucketT *TheBucket; + return LookupBucketFor(Val, TheBucket) ? 1 : 0; + } + + iterator find(const_arg_type_t Val) { + BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return makeIterator(TheBucket, getBucketsEnd(), true); + return end(); + } + const_iterator find(const_arg_type_t Val) const { + const BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return makeConstIterator(TheBucket, getBucketsEnd(), true); + return end(); + } + + /// Alternate version of find() which allows a different, and possibly + /// less expensive, key type. + /// The DenseMapInfo is responsible for supplying methods + /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key + /// type used. + template + iterator find_as(const LookupKeyT &Val) { + BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return makeIterator(TheBucket, getBucketsEnd(), true); + return end(); + } + template + const_iterator find_as(const LookupKeyT &Val) const { + const BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return makeConstIterator(TheBucket, getBucketsEnd(), true); + return end(); + } + + /// lookup - Return the entry for the specified key, or a default + /// constructed value if no such entry exists. + ValueT lookup(const_arg_type_t Val) const { + const BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return TheBucket->getSecond(); + return ValueT(); + } + + // Inserts key,value pair into the map if the key isn't already in the map. + // If the key is already in the map, it returns false and doesn't update the + // value. + std::pair insert(const std::pair &KV) { + return try_emplace(KV.first, KV.second); + } + + // Inserts key,value pair into the map if the key isn't already in the map. + // If the key is already in the map, it returns false and doesn't update the + // value. + std::pair insert(std::pair &&KV) { + return try_emplace(std::move(KV.first), std::move(KV.second)); + } + + // Inserts key,value pair into the map if the key isn't already in the map. + // The value is constructed in-place if the key is not in the map, otherwise + // it is not moved. + template + std::pair try_emplace(KeyT &&Key, Ts &&... Args) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return std::make_pair(makeIterator(TheBucket, getBucketsEnd(), true), + false); // Already in map. + + // Otherwise, insert the new element. + TheBucket = + InsertIntoBucket(TheBucket, std::move(Key), std::forward(Args)...); + return std::make_pair(makeIterator(TheBucket, getBucketsEnd(), true), true); + } + + // Inserts key,value pair into the map if the key isn't already in the map. + // The value is constructed in-place if the key is not in the map, otherwise + // it is not moved. + template + std::pair try_emplace(const KeyT &Key, Ts &&... Args) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return std::make_pair(makeIterator(TheBucket, getBucketsEnd(), true), + false); // Already in map. + + // Otherwise, insert the new element. + TheBucket = InsertIntoBucket(TheBucket, Key, std::forward(Args)...); + return std::make_pair(makeIterator(TheBucket, getBucketsEnd(), true), true); + } + + /// Alternate version of insert() which allows a different, and possibly + /// less expensive, key type. + /// The DenseMapInfo is responsible for supplying methods + /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key + /// type used. + template + std::pair insert_as(std::pair &&KV, + const LookupKeyT &Val) { + BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return std::make_pair(makeIterator(TheBucket, getBucketsEnd(), *this, true), + false); // Already in map. + + // Otherwise, insert the new element. + TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first), + std::move(KV.second), Val); + return std::make_pair(makeIterator(TheBucket, getBucketsEnd(), *this, true), + true); + } + + /// insert - Range insertion of pairs. + template + void insert(InputIt I, InputIt E) { + for (; I != E; ++I) + insert(*I); + } + + bool erase(const KeyT &Val) { + BucketT *TheBucket; + if (!LookupBucketFor(Val, TheBucket)) + return false; // not in map. + + TheBucket->getSecond().~ValueT(); + TheBucket->getFirst() = getTombstoneKey(); + decrementNumEntries(); + incrementNumTombstones(); + return true; + } + void erase(iterator I) { + BucketT *TheBucket = &*I; + TheBucket->getSecond().~ValueT(); + TheBucket->getFirst() = getTombstoneKey(); + decrementNumEntries(); + incrementNumTombstones(); + } + + value_type& FindAndConstruct(const KeyT &Key) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return *TheBucket; + + return *InsertIntoBucket(TheBucket, Key); + } + + ValueT &operator[](const KeyT &Key) { + return FindAndConstruct(Key).second; + } + + value_type& FindAndConstruct(KeyT &&Key) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return *TheBucket; + + return *InsertIntoBucket(TheBucket, std::move(Key)); + } + + ValueT &operator[](KeyT &&Key) { + return FindAndConstruct(std::move(Key)).second; + } + + /// isPointerIntoBucketsArray - Return true if the specified pointer points + /// somewhere into the DenseMap's array of buckets (i.e. either to a key or + /// value in the DenseMap). + bool isPointerIntoBucketsArray(const void *Ptr) const { + return Ptr >= getBuckets() && Ptr < getBucketsEnd(); + } + + /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets + /// array. In conjunction with the previous method, this can be used to + /// determine whether an insertion caused the DenseMap to reallocate. + const void *getPointerIntoBucketsArray() const { return getBuckets(); } + +protected: + DenseMapBase() = default; + + void destroyAll() { + if (getNumBuckets() == 0) // Nothing to do. + return; + + const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); + for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { + if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) + P->getSecond().~ValueT(); + P->getFirst().~KeyT(); + } + } + + void initEmpty() { + setNumEntries(0); + setNumTombstones(0); + + assert((getNumBuckets() & (getNumBuckets()-1)) == 0 && + "# initial buckets must be a power of two!"); + const KeyT EmptyKey = getEmptyKey(); + for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B) + ::new (&B->getFirst()) KeyT(EmptyKey); + } + + /// Returns the number of buckets to allocate to ensure that the DenseMap can + /// accommodate \p NumEntries without need to grow(). + unsigned getMinBucketToReserveForEntries(unsigned NumEntries) { + // Ensure that "NumEntries * 4 < NumBuckets * 3" + if (NumEntries == 0) + return 0; + // +1 is required because of the strict equality. + // For example if NumEntries is 48, we need to return 401. + return NextPowerOf2(NumEntries * 4 / 3 + 1); + } + + void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) { + initEmpty(); + + // Insert all the old elements. + const KeyT EmptyKey = getEmptyKey(); + const KeyT TombstoneKey = getTombstoneKey(); + for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) { + if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) { + // Insert the key/value into the new table. + BucketT *DestBucket; + bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket); + (void)FoundVal; // silence warning. + assert(!FoundVal && "Key already in new map?"); + DestBucket->getFirst() = std::move(B->getFirst()); + ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond())); + incrementNumEntries(); + + // Free the value. + B->getSecond().~ValueT(); + } + B->getFirst().~KeyT(); + } + } + + template + void copyFrom( + const DenseMapBase &other) { + assert(&other != this); + assert(getNumBuckets() == other.getNumBuckets()); + + setNumEntries(other.getNumEntries()); + setNumTombstones(other.getNumTombstones()); + + if (is_trivially_copyable::value && + is_trivially_copyable::value) + memcpy(reinterpret_cast(getBuckets()), other.getBuckets(), + getNumBuckets() * sizeof(BucketT)); + else + for (size_t i = 0; i < getNumBuckets(); ++i) { + ::new (&getBuckets()[i].getFirst()) + KeyT(other.getBuckets()[i].getFirst()); + if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) && + !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey())) + ::new (&getBuckets()[i].getSecond()) + ValueT(other.getBuckets()[i].getSecond()); + } + } + + static unsigned getHashValue(const KeyT &Val) { + return KeyInfoT::getHashValue(Val); + } + + template + static unsigned getHashValue(const LookupKeyT &Val) { + return KeyInfoT::getHashValue(Val); + } + + static const KeyT getEmptyKey() { + static_assert(std::is_base_of::value, + "Must pass the derived type to this template!"); + return KeyInfoT::getEmptyKey(); + } + + static const KeyT getTombstoneKey() { + return KeyInfoT::getTombstoneKey(); + } + +private: + iterator makeIterator(BucketT *P, BucketT *E, bool NoAdvance=false) { + return iterator(P, E, NoAdvance); + } + + const_iterator makeConstIterator(const BucketT *P, const BucketT *E, + const bool NoAdvance=false) const { + return const_iterator(P, E, NoAdvance); + } + + unsigned getNumEntries() const { + return static_cast(this)->getNumEntries(); + } + + void setNumEntries(unsigned Num) { + static_cast(this)->setNumEntries(Num); + } + + void incrementNumEntries() { + setNumEntries(getNumEntries() + 1); + } + + void decrementNumEntries() { + setNumEntries(getNumEntries() - 1); + } + + unsigned getNumTombstones() const { + return static_cast(this)->getNumTombstones(); + } + + void setNumTombstones(unsigned Num) { + static_cast(this)->setNumTombstones(Num); + } + + void incrementNumTombstones() { + setNumTombstones(getNumTombstones() + 1); + } + + void decrementNumTombstones() { + setNumTombstones(getNumTombstones() - 1); + } + + const BucketT *getBuckets() const { + return static_cast(this)->getBuckets(); + } + + BucketT *getBuckets() { + return static_cast(this)->getBuckets(); + } + + unsigned getNumBuckets() const { + return static_cast(this)->getNumBuckets(); + } + + BucketT *getBucketsEnd() { + return getBuckets() + getNumBuckets(); + } + + const BucketT *getBucketsEnd() const { + return getBuckets() + getNumBuckets(); + } + + void grow(unsigned AtLeast) { + static_cast(this)->grow(AtLeast); + } + + void shrink_and_clear() { + static_cast(this)->shrink_and_clear(); + } + + template + BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key, + ValueArgs &&... Values) { + TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket); + + TheBucket->getFirst() = std::forward(Key); + ::new (&TheBucket->getSecond()) ValueT(std::forward(Values)...); + return TheBucket; + } + + template + BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key, + ValueT &&Value, LookupKeyT &Lookup) { + TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket); + + TheBucket->getFirst() = std::move(Key); + ::new (&TheBucket->getSecond()) ValueT(std::move(Value)); + return TheBucket; + } + + template + BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup, + BucketT *TheBucket) { + // If the load of the hash table is more than 3/4, or if fewer than 1/8 of + // the buckets are empty (meaning that many are filled with tombstones), + // grow the table. + // + // The later case is tricky. For example, if we had one empty bucket with + // tons of tombstones, failing lookups (e.g. for insertion) would have to + // probe almost the entire table until it found the empty bucket. If the + // table completely filled with tombstones, no lookup would ever succeed, + // causing infinite loops in lookup. + unsigned NewNumEntries = getNumEntries() + 1; + unsigned NumBuckets = getNumBuckets(); + if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) { + this->grow(NumBuckets * 2); + LookupBucketFor(Lookup, TheBucket); + NumBuckets = getNumBuckets(); + } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <= + NumBuckets/8)) { + this->grow(NumBuckets); + LookupBucketFor(Lookup, TheBucket); + } + assert(TheBucket); + + // Only update the state after we've grown our bucket space appropriately + // so that when growing buckets we have self-consistent entry count. + incrementNumEntries(); + + // If we are writing over a tombstone, remember this. + const KeyT EmptyKey = getEmptyKey(); + if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey)) + decrementNumTombstones(); + + return TheBucket; + } + + /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in + /// FoundBucket. If the bucket contains the key and a value, this returns + /// true, otherwise it returns a bucket with an empty marker or tombstone and + /// returns false. + template + bool LookupBucketFor(const LookupKeyT &Val, + const BucketT *&FoundBucket) const { + const BucketT *BucketsPtr = getBuckets(); + const unsigned NumBuckets = getNumBuckets(); + + if (NumBuckets == 0) { + FoundBucket = nullptr; + return false; + } + + // FoundTombstone - Keep track of whether we find a tombstone while probing. + const BucketT *FoundTombstone = nullptr; + const KeyT EmptyKey = getEmptyKey(); + const KeyT TombstoneKey = getTombstoneKey(); + assert(!KeyInfoT::isEqual(Val, EmptyKey) && + !KeyInfoT::isEqual(Val, TombstoneKey) && + "Empty/Tombstone value shouldn't be inserted into map!"); + + unsigned BucketNo = getHashValue(Val) & (NumBuckets-1); + unsigned ProbeAmt = 1; + while (true) { + const BucketT *ThisBucket = BucketsPtr + BucketNo; + // Found Val's bucket? If so, return it. + if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) { + FoundBucket = ThisBucket; + return true; + } + + // If we found an empty bucket, the key doesn't exist in the set. + // Insert it and return the default value. + if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) { + // If we've already seen a tombstone while probing, fill it in instead + // of the empty bucket we eventually probed to. + FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket; + return false; + } + + // If this is a tombstone, remember it. If Val ends up not in the map, we + // prefer to return it than something that would require more probing. + if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) && + !FoundTombstone) + FoundTombstone = ThisBucket; // Remember the first tombstone found. + + // Otherwise, it's a hash collision or a tombstone, continue quadratic + // probing. + BucketNo += ProbeAmt++; + BucketNo &= (NumBuckets-1); + } + } + + template + bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) { + const BucketT *ConstFoundBucket; + bool Result = const_cast(this) + ->LookupBucketFor(Val, ConstFoundBucket); + FoundBucket = const_cast(ConstFoundBucket); + return Result; + } + +public: + /// Return the approximate size (in bytes) of the actual map. + /// This is just the raw memory used by DenseMap. + /// If entries are pointers to objects, the size of the referenced objects + /// are not included. + size_t getMemorySize() const { + return getNumBuckets() * sizeof(BucketT); + } +}; + +/// Equality comparison for DenseMap. +/// +/// Iterates over elements of LHS confirming that each (key, value) pair in LHS +/// is also in RHS, and that no additional pairs are in RHS. +/// Equivalent to N calls to RHS.find and N value comparisons. Amortized +/// complexity is linear, worst case is O(N^2) (if every hash collides). +template +bool operator==( + const DenseMapBase &LHS, + const DenseMapBase &RHS) { + if (LHS.size() != RHS.size()) + return false; + + for (auto &KV : LHS) { + auto I = RHS.find(KV.first); + if (I == RHS.end() || I->second != KV.second) + return false; + } + + return true; +} + +/// Inequality comparison for DenseMap. +/// +/// Equivalent to !(LHS == RHS). See operator== for performance notes. +template +bool operator!=( + const DenseMapBase &LHS, + const DenseMapBase &RHS) { + return !(LHS == RHS); +} + +template , + typename BucketT = llvm::detail::DenseMapPair> +class DenseMap : public DenseMapBase, + KeyT, ValueT, KeyInfoT, BucketT> { + friend class DenseMapBase; + + // Lift some types from the dependent base class into this class for + // simplicity of referring to them. + using BaseT = DenseMapBase; + + BucketT *Buckets; + unsigned NumEntries; + unsigned NumTombstones; + unsigned NumBuckets; + +public: + /// Create a DenseMap wth an optional \p InitialReserve that guarantee that + /// this number of elements can be inserted in the map without grow() + explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); } + + DenseMap(const DenseMap &other) : BaseT() { + init(0); + copyFrom(other); + } + + DenseMap(DenseMap &&other) : BaseT() { + init(0); + swap(other); + } + + template + DenseMap(const InputIt &I, const InputIt &E) { + init(std::distance(I, E)); + this->insert(I, E); + } + + DenseMap(std::initializer_list Vals) { + init(Vals.size()); + this->insert(Vals.begin(), Vals.end()); + } + + ~DenseMap() { + this->destroyAll(); + deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT)); + } + + void swap(DenseMap& RHS) { + std::swap(Buckets, RHS.Buckets); + std::swap(NumEntries, RHS.NumEntries); + std::swap(NumTombstones, RHS.NumTombstones); + std::swap(NumBuckets, RHS.NumBuckets); + } + + DenseMap& operator=(const DenseMap& other) { + if (&other != this) + copyFrom(other); + return *this; + } + + DenseMap& operator=(DenseMap &&other) { + this->destroyAll(); + deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT)); + init(0); + swap(other); + return *this; + } + + void copyFrom(const DenseMap& other) { + this->destroyAll(); + deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT)); + if (allocateBuckets(other.NumBuckets)) { + this->BaseT::copyFrom(other); + } else { + NumEntries = 0; + NumTombstones = 0; + } + } + + void init(unsigned InitNumEntries) { + auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries); + if (allocateBuckets(InitBuckets)) { + this->BaseT::initEmpty(); + } else { + NumEntries = 0; + NumTombstones = 0; + } + } + + void grow(unsigned AtLeast) { + unsigned OldNumBuckets = NumBuckets; + BucketT *OldBuckets = Buckets; + + allocateBuckets(std::max(64, static_cast(NextPowerOf2(AtLeast-1)))); + assert(Buckets); + if (!OldBuckets) { + this->BaseT::initEmpty(); + return; + } + + this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets); + + // Free the old table. + deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets, + alignof(BucketT)); + } + + void shrink_and_clear() { + unsigned OldNumBuckets = NumBuckets; + unsigned OldNumEntries = NumEntries; + this->destroyAll(); + + // Reduce the number of buckets. + unsigned NewNumBuckets = 0; + if (OldNumEntries) + NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1)); + if (NewNumBuckets == NumBuckets) { + this->BaseT::initEmpty(); + return; + } + + deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets, + alignof(BucketT)); + init(NewNumBuckets); + } + +private: + unsigned getNumEntries() const { + return NumEntries; + } + + void setNumEntries(unsigned Num) { + NumEntries = Num; + } + + unsigned getNumTombstones() const { + return NumTombstones; + } + + void setNumTombstones(unsigned Num) { + NumTombstones = Num; + } + + BucketT *getBuckets() const { + return Buckets; + } + + unsigned getNumBuckets() const { + return NumBuckets; + } + + bool allocateBuckets(unsigned Num) { + NumBuckets = Num; + if (NumBuckets == 0) { + Buckets = nullptr; + return false; + } + + Buckets = static_cast( + allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT))); + return true; + } +}; + +template , + typename BucketT = llvm::detail::DenseMapPair> +class SmallDenseMap + : public DenseMapBase< + SmallDenseMap, KeyT, + ValueT, KeyInfoT, BucketT> { + friend class DenseMapBase; + + // Lift some types from the dependent base class into this class for + // simplicity of referring to them. + using BaseT = DenseMapBase; + + static_assert(isPowerOf2_64(InlineBuckets), + "InlineBuckets must be a power of 2."); + + unsigned Small : 1; + unsigned NumEntries : 31; + unsigned NumTombstones; + + struct LargeRep { + BucketT *Buckets; + unsigned NumBuckets; + }; + + /// A "union" of an inline bucket array and the struct representing + /// a large bucket. This union will be discriminated by the 'Small' bit. + AlignedCharArrayUnion storage; + +public: + explicit SmallDenseMap(unsigned NumInitBuckets = 0) { + init(NumInitBuckets); + } + + SmallDenseMap(const SmallDenseMap &other) : BaseT() { + init(0); + copyFrom(other); + } + + SmallDenseMap(SmallDenseMap &&other) : BaseT() { + init(0); + swap(other); + } + + template + SmallDenseMap(const InputIt &I, const InputIt &E) { + init(NextPowerOf2(std::distance(I, E))); + this->insert(I, E); + } + + ~SmallDenseMap() { + this->destroyAll(); + deallocateBuckets(); + } + + void swap(SmallDenseMap& RHS) { + unsigned TmpNumEntries = RHS.NumEntries; + RHS.NumEntries = NumEntries; + NumEntries = TmpNumEntries; + std::swap(NumTombstones, RHS.NumTombstones); + + const KeyT EmptyKey = this->getEmptyKey(); + const KeyT TombstoneKey = this->getTombstoneKey(); + if (Small && RHS.Small) { + // If we're swapping inline bucket arrays, we have to cope with some of + // the tricky bits of DenseMap's storage system: the buckets are not + // fully initialized. Thus we swap every key, but we may have + // a one-directional move of the value. + for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { + BucketT *LHSB = &getInlineBuckets()[i], + *RHSB = &RHS.getInlineBuckets()[i]; + bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey)); + bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey)); + if (hasLHSValue && hasRHSValue) { + // Swap together if we can... + std::swap(*LHSB, *RHSB); + continue; + } + // Swap separately and handle any assymetry. + std::swap(LHSB->getFirst(), RHSB->getFirst()); + if (hasLHSValue) { + ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond())); + LHSB->getSecond().~ValueT(); + } else if (hasRHSValue) { + ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond())); + RHSB->getSecond().~ValueT(); + } + } + return; + } + if (!Small && !RHS.Small) { + std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets); + std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets); + return; + } + + SmallDenseMap &SmallSide = Small ? *this : RHS; + SmallDenseMap &LargeSide = Small ? RHS : *this; + + // First stash the large side's rep and move the small side across. + LargeRep TmpRep = std::move(*LargeSide.getLargeRep()); + LargeSide.getLargeRep()->~LargeRep(); + LargeSide.Small = true; + // This is similar to the standard move-from-old-buckets, but the bucket + // count hasn't actually rotated in this case. So we have to carefully + // move construct the keys and values into their new locations, but there + // is no need to re-hash things. + for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { + BucketT *NewB = &LargeSide.getInlineBuckets()[i], + *OldB = &SmallSide.getInlineBuckets()[i]; + ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst())); + OldB->getFirst().~KeyT(); + if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) { + ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond())); + OldB->getSecond().~ValueT(); + } + } + + // The hard part of moving the small buckets across is done, just move + // the TmpRep into its new home. + SmallSide.Small = false; + new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep)); + } + + SmallDenseMap& operator=(const SmallDenseMap& other) { + if (&other != this) + copyFrom(other); + return *this; + } + + SmallDenseMap& operator=(SmallDenseMap &&other) { + this->destroyAll(); + deallocateBuckets(); + init(0); + swap(other); + return *this; + } + + void copyFrom(const SmallDenseMap& other) { + this->destroyAll(); + deallocateBuckets(); + Small = true; + if (other.getNumBuckets() > InlineBuckets) { + Small = false; + new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets())); + } + this->BaseT::copyFrom(other); + } + + void init(unsigned InitBuckets) { + Small = true; + if (InitBuckets > InlineBuckets) { + Small = false; + new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets)); + } + this->BaseT::initEmpty(); + } + + void grow(unsigned AtLeast) { + if (AtLeast > InlineBuckets) + AtLeast = std::max(64, NextPowerOf2(AtLeast-1)); + + if (Small) { + // First move the inline buckets into a temporary storage. + AlignedCharArrayUnion TmpStorage; + BucketT *TmpBegin = reinterpret_cast(TmpStorage.buffer); + BucketT *TmpEnd = TmpBegin; + + // Loop over the buckets, moving non-empty, non-tombstones into the + // temporary storage. Have the loop move the TmpEnd forward as it goes. + const KeyT EmptyKey = this->getEmptyKey(); + const KeyT TombstoneKey = this->getTombstoneKey(); + for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) { + if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) { + assert(size_t(TmpEnd - TmpBegin) < InlineBuckets && + "Too many inline buckets!"); + ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst())); + ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond())); + ++TmpEnd; + P->getSecond().~ValueT(); + } + P->getFirst().~KeyT(); + } + + // AtLeast == InlineBuckets can happen if there are many tombstones, + // and grow() is used to remove them. Usually we always switch to the + // large rep here. + if (AtLeast > InlineBuckets) { + Small = false; + new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); + } + this->moveFromOldBuckets(TmpBegin, TmpEnd); + return; + } + + LargeRep OldRep = std::move(*getLargeRep()); + getLargeRep()->~LargeRep(); + if (AtLeast <= InlineBuckets) { + Small = true; + } else { + new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); + } + + this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets); + + // Free the old table. + deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets, + alignof(BucketT)); + } + + void shrink_and_clear() { + unsigned OldSize = this->size(); + this->destroyAll(); + + // Reduce the number of buckets. + unsigned NewNumBuckets = 0; + if (OldSize) { + NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1); + if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u) + NewNumBuckets = 64; + } + if ((Small && NewNumBuckets <= InlineBuckets) || + (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) { + this->BaseT::initEmpty(); + return; + } + + deallocateBuckets(); + init(NewNumBuckets); + } + +private: + unsigned getNumEntries() const { + return NumEntries; + } + + void setNumEntries(unsigned Num) { + // NumEntries is hardcoded to be 31 bits wide. + assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries"); + NumEntries = Num; + } + + unsigned getNumTombstones() const { + return NumTombstones; + } + + void setNumTombstones(unsigned Num) { + NumTombstones = Num; + } + + const BucketT *getInlineBuckets() const { + assert(Small); + // Note that this cast does not violate aliasing rules as we assert that + // the memory's dynamic type is the small, inline bucket buffer, and the + // 'storage.buffer' static type is 'char *'. + return reinterpret_cast(storage.buffer); + } + + BucketT *getInlineBuckets() { + return const_cast( + const_cast(this)->getInlineBuckets()); + } + + const LargeRep *getLargeRep() const { + assert(!Small); + // Note, same rule about aliasing as with getInlineBuckets. + return reinterpret_cast(storage.buffer); + } + + LargeRep *getLargeRep() { + return const_cast( + const_cast(this)->getLargeRep()); + } + + const BucketT *getBuckets() const { + return Small ? getInlineBuckets() : getLargeRep()->Buckets; + } + + BucketT *getBuckets() { + return const_cast( + const_cast(this)->getBuckets()); + } + + unsigned getNumBuckets() const { + return Small ? InlineBuckets : getLargeRep()->NumBuckets; + } + + void deallocateBuckets() { + if (Small) + return; + + deallocate_buffer(getLargeRep()->Buckets, + sizeof(BucketT) * getLargeRep()->NumBuckets, + alignof(BucketT)); + getLargeRep()->~LargeRep(); + } + + LargeRep allocateBuckets(unsigned Num) { + assert(Num > InlineBuckets && "Must allocate more buckets than are inline"); + LargeRep Rep = {static_cast(allocate_buffer( + sizeof(BucketT) * Num, alignof(BucketT))), + Num}; + return Rep; + } +}; + +template +class DenseMapIterator { + friend class DenseMapIterator; + friend class DenseMapIterator; + + using ConstIterator = DenseMapIterator; + +public: + using difference_type = ptrdiff_t; + using value_type = + typename std::conditional::type; + using pointer = value_type *; + using reference = value_type &; + using iterator_category = std::forward_iterator_tag; + +private: + pointer Ptr = nullptr; + pointer End = nullptr; + +public: + DenseMapIterator() = default; + + DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false) + : Ptr(Pos), End(E) { + if (NoAdvance) return; + AdvancePastEmptyBuckets(); + } + + // Converting ctor from non-const iterators to const iterators. SFINAE'd out + // for const iterator destinations so it doesn't end up as a user defined copy + // constructor. + template > + DenseMapIterator( + const DenseMapIterator &I) + : Ptr(I.Ptr), End(I.End) {} + + reference operator*() const { + assert(Ptr != End && "dereferencing end() iterator"); + return *Ptr; + } + pointer operator->() const { + assert(Ptr != End && "dereferencing end() iterator"); + return Ptr; + } + + bool operator==(const ConstIterator &RHS) const { + return Ptr == RHS.Ptr; + } + bool operator!=(const ConstIterator &RHS) const { + return Ptr != RHS.Ptr; + } + + inline DenseMapIterator& operator++() { // Preincrement + assert(Ptr != End && "incrementing end() iterator"); + ++Ptr; + AdvancePastEmptyBuckets(); + return *this; + } + DenseMapIterator operator++(int) { // Postincrement + DenseMapIterator tmp = *this; ++*this; return tmp; + } + +private: + void AdvancePastEmptyBuckets() { + assert(Ptr <= End); + const KeyT Empty = KeyInfoT::getEmptyKey(); + const KeyT Tombstone = KeyInfoT::getTombstoneKey(); + + while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) || + KeyInfoT::isEqual(Ptr->getFirst(), Tombstone))) + ++Ptr; + } + + void RetreatPastEmptyBuckets() { + assert(Ptr >= End); + const KeyT Empty = KeyInfoT::getEmptyKey(); + const KeyT Tombstone = KeyInfoT::getTombstoneKey(); + + while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) || + KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone))) + --Ptr; + } +}; + +template +inline size_t capacity_in_bytes(const DenseMap &X) { + return X.getMemorySize(); +} + +} // end namespace llvm +}} // namespace swift::runtime + +#endif // LLVM_ADT_DENSEMAP_H diff --git a/stdlib/include/llvm/ADT/DenseMapInfo.h b/stdlib/include/llvm/ADT/DenseMapInfo.h new file mode 100644 index 0000000000000..f648fb3d5d6ae --- /dev/null +++ b/stdlib/include/llvm/ADT/DenseMapInfo.h @@ -0,0 +1,354 @@ +//===- llvm/ADT/DenseMapInfo.h - Type traits for DenseMap -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines DenseMapInfo traits for DenseMap. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_DENSEMAPINFO_H +#define LLVM_ADT_DENSEMAPINFO_H + +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/Hashing.h" +#include "llvm/ADT/StringRef.h" +#include +#include +#include +#include + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + +namespace detail { + +/// Simplistic combination of 32-bit hash values into 32-bit hash values. +static inline unsigned combineHashValue(unsigned a, unsigned b) { + uint64_t key = (uint64_t)a << 32 | (uint64_t)b; + key += ~(key << 32); + key ^= (key >> 22); + key += ~(key << 13); + key ^= (key >> 8); + key += (key << 3); + key ^= (key >> 15); + key += ~(key << 27); + key ^= (key >> 31); + return (unsigned)key; +} + +} // end namespace detail + +template +struct DenseMapInfo { + //static inline T getEmptyKey(); + //static inline T getTombstoneKey(); + //static unsigned getHashValue(const T &Val); + //static bool isEqual(const T &LHS, const T &RHS); +}; + +// Provide DenseMapInfo for all pointers. Come up with sentinel pointer values +// that are aligned to alignof(T) bytes, but try to avoid requiring T to be +// complete. This allows clients to instantiate DenseMap with forward +// declared key types. Assume that no pointer key type requires more than 4096 +// bytes of alignment. +template +struct DenseMapInfo { + // The following should hold, but it would require T to be complete: + // static_assert(alignof(T) <= (1 << Log2MaxAlign), + // "DenseMap does not support pointer keys requiring more than " + // "Log2MaxAlign bits of alignment"); + static constexpr uintptr_t Log2MaxAlign = 12; + + static inline T* getEmptyKey() { + uintptr_t Val = static_cast(-1); + Val <<= Log2MaxAlign; + return reinterpret_cast(Val); + } + + static inline T* getTombstoneKey() { + uintptr_t Val = static_cast(-2); + Val <<= Log2MaxAlign; + return reinterpret_cast(Val); + } + + static unsigned getHashValue(const T *PtrVal) { + return (unsigned((uintptr_t)PtrVal) >> 4) ^ + (unsigned((uintptr_t)PtrVal) >> 9); + } + + static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; } +}; + +// Provide DenseMapInfo for chars. +template<> struct DenseMapInfo { + static inline char getEmptyKey() { return ~0; } + static inline char getTombstoneKey() { return ~0 - 1; } + static unsigned getHashValue(const char& Val) { return Val * 37U; } + + static bool isEqual(const char &LHS, const char &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned chars. +template <> struct DenseMapInfo { + static inline unsigned char getEmptyKey() { return ~0; } + static inline unsigned char getTombstoneKey() { return ~0 - 1; } + static unsigned getHashValue(const unsigned char &Val) { return Val * 37U; } + + static bool isEqual(const unsigned char &LHS, const unsigned char &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned shorts. +template <> struct DenseMapInfo { + static inline unsigned short getEmptyKey() { return 0xFFFF; } + static inline unsigned short getTombstoneKey() { return 0xFFFF - 1; } + static unsigned getHashValue(const unsigned short &Val) { return Val * 37U; } + + static bool isEqual(const unsigned short &LHS, const unsigned short &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned ints. +template<> struct DenseMapInfo { + static inline unsigned getEmptyKey() { return ~0U; } + static inline unsigned getTombstoneKey() { return ~0U - 1; } + static unsigned getHashValue(const unsigned& Val) { return Val * 37U; } + + static bool isEqual(const unsigned& LHS, const unsigned& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned longs. +template<> struct DenseMapInfo { + static inline unsigned long getEmptyKey() { return ~0UL; } + static inline unsigned long getTombstoneKey() { return ~0UL - 1L; } + + static unsigned getHashValue(const unsigned long& Val) { + return (unsigned)(Val * 37UL); + } + + static bool isEqual(const unsigned long& LHS, const unsigned long& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned long longs. +template<> struct DenseMapInfo { + static inline unsigned long long getEmptyKey() { return ~0ULL; } + static inline unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; } + + static unsigned getHashValue(const unsigned long long& Val) { + return (unsigned)(Val * 37ULL); + } + + static bool isEqual(const unsigned long long& LHS, + const unsigned long long& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for shorts. +template <> struct DenseMapInfo { + static inline short getEmptyKey() { return 0x7FFF; } + static inline short getTombstoneKey() { return -0x7FFF - 1; } + static unsigned getHashValue(const short &Val) { return Val * 37U; } + static bool isEqual(const short &LHS, const short &RHS) { return LHS == RHS; } +}; + +// Provide DenseMapInfo for ints. +template<> struct DenseMapInfo { + static inline int getEmptyKey() { return 0x7fffffff; } + static inline int getTombstoneKey() { return -0x7fffffff - 1; } + static unsigned getHashValue(const int& Val) { return (unsigned)(Val * 37U); } + + static bool isEqual(const int& LHS, const int& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for longs. +template<> struct DenseMapInfo { + static inline long getEmptyKey() { + return (1UL << (sizeof(long) * 8 - 1)) - 1UL; + } + + static inline long getTombstoneKey() { return getEmptyKey() - 1L; } + + static unsigned getHashValue(const long& Val) { + return (unsigned)(Val * 37UL); + } + + static bool isEqual(const long& LHS, const long& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for long longs. +template<> struct DenseMapInfo { + static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; } + static inline long long getTombstoneKey() { return -0x7fffffffffffffffLL-1; } + + static unsigned getHashValue(const long long& Val) { + return (unsigned)(Val * 37ULL); + } + + static bool isEqual(const long long& LHS, + const long long& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for all pairs whose members have info. +template +struct DenseMapInfo> { + using Pair = std::pair; + using FirstInfo = DenseMapInfo; + using SecondInfo = DenseMapInfo; + + static inline Pair getEmptyKey() { + return std::make_pair(FirstInfo::getEmptyKey(), + SecondInfo::getEmptyKey()); + } + + static inline Pair getTombstoneKey() { + return std::make_pair(FirstInfo::getTombstoneKey(), + SecondInfo::getTombstoneKey()); + } + + static unsigned getHashValue(const Pair& PairVal) { + return detail::combineHashValue(FirstInfo::getHashValue(PairVal.first), + SecondInfo::getHashValue(PairVal.second)); + } + + static bool isEqual(const Pair &LHS, const Pair &RHS) { + return FirstInfo::isEqual(LHS.first, RHS.first) && + SecondInfo::isEqual(LHS.second, RHS.second); + } +}; + +// Provide DenseMapInfo for all tuples whose members have info. +template struct DenseMapInfo> { + using Tuple = std::tuple; + + static inline Tuple getEmptyKey() { + return Tuple(DenseMapInfo::getEmptyKey()...); + } + + static inline Tuple getTombstoneKey() { + return Tuple(DenseMapInfo::getTombstoneKey()...); + } + + template + static unsigned getHashValueImpl(const Tuple &values, std::false_type) { + using EltType = typename std::tuple_element::type; + std::integral_constant atEnd; + return detail::combineHashValue( + DenseMapInfo::getHashValue(std::get(values)), + getHashValueImpl(values, atEnd)); + } + + template + static unsigned getHashValueImpl(const Tuple &values, std::true_type) { + return 0; + } + + static unsigned getHashValue(const std::tuple &values) { + std::integral_constant atEnd; + return getHashValueImpl<0>(values, atEnd); + } + + template + static bool isEqualImpl(const Tuple &lhs, const Tuple &rhs, std::false_type) { + using EltType = typename std::tuple_element::type; + std::integral_constant atEnd; + return DenseMapInfo::isEqual(std::get(lhs), std::get(rhs)) && + isEqualImpl(lhs, rhs, atEnd); + } + + template + static bool isEqualImpl(const Tuple &lhs, const Tuple &rhs, std::true_type) { + return true; + } + + static bool isEqual(const Tuple &lhs, const Tuple &rhs) { + std::integral_constant atEnd; + return isEqualImpl<0>(lhs, rhs, atEnd); + } +}; + +// Provide DenseMapInfo for StringRefs. +template <> struct DenseMapInfo { + static inline StringRef getEmptyKey() { + return StringRef(reinterpret_cast(~static_cast(0)), + 0); + } + + static inline StringRef getTombstoneKey() { + return StringRef(reinterpret_cast(~static_cast(1)), + 0); + } + + static unsigned getHashValue(StringRef Val) { + assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!"); + assert(Val.data() != getTombstoneKey().data() && + "Cannot hash the tombstone key!"); + return (unsigned)(hash_value(Val)); + } + + static bool isEqual(StringRef LHS, StringRef RHS) { + if (RHS.data() == getEmptyKey().data()) + return LHS.data() == getEmptyKey().data(); + if (RHS.data() == getTombstoneKey().data()) + return LHS.data() == getTombstoneKey().data(); + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for ArrayRefs. +template struct DenseMapInfo> { + static inline ArrayRef getEmptyKey() { + return ArrayRef(reinterpret_cast(~static_cast(0)), + size_t(0)); + } + + static inline ArrayRef getTombstoneKey() { + return ArrayRef(reinterpret_cast(~static_cast(1)), + size_t(0)); + } + + static unsigned getHashValue(ArrayRef Val) { + assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!"); + assert(Val.data() != getTombstoneKey().data() && + "Cannot hash the tombstone key!"); + return (unsigned)(hash_value(Val)); + } + + static bool isEqual(ArrayRef LHS, ArrayRef RHS) { + if (RHS.data() == getEmptyKey().data()) + return LHS.data() == getEmptyKey().data(); + if (RHS.data() == getTombstoneKey().data()) + return LHS.data() == getTombstoneKey().data(); + return LHS == RHS; + } +}; + +template <> struct DenseMapInfo { + static inline hash_code getEmptyKey() { return hash_code(-1); } + static inline hash_code getTombstoneKey() { return hash_code(-2); } + static unsigned getHashValue(hash_code val) { return val; } + static bool isEqual(hash_code LHS, hash_code RHS) { return LHS == RHS; } +}; + +} // end namespace llvm +}} // namespace swift::runtime + +#endif // LLVM_ADT_DENSEMAPINFO_H diff --git a/stdlib/include/llvm/ADT/DenseSet.h b/stdlib/include/llvm/ADT/DenseSet.h new file mode 100644 index 0000000000000..ae8109f3c4736 --- /dev/null +++ b/stdlib/include/llvm/ADT/DenseSet.h @@ -0,0 +1,291 @@ +//===- llvm/ADT/DenseSet.h - Dense probed hash table ------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the DenseSet and SmallDenseSet classes. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_DENSESET_H +#define LLVM_ADT_DENSESET_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DenseMapInfo.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/type_traits.h" +#include +#include +#include +#include +#include + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + +namespace detail { + +struct DenseSetEmpty {}; + +// Use the empty base class trick so we can create a DenseMap where the buckets +// contain only a single item. +template class DenseSetPair : public DenseSetEmpty { + KeyT key; + +public: + KeyT &getFirst() { return key; } + const KeyT &getFirst() const { return key; } + DenseSetEmpty &getSecond() { return *this; } + const DenseSetEmpty &getSecond() const { return *this; } +}; + +/// Base class for DenseSet and DenseSmallSet. +/// +/// MapTy should be either +/// +/// DenseMap> +/// +/// or the equivalent SmallDenseMap type. ValueInfoT must implement the +/// DenseMapInfo "concept". +template +class DenseSetImpl { + static_assert(sizeof(typename MapTy::value_type) == sizeof(ValueT), + "DenseMap buckets unexpectedly large!"); + MapTy TheMap; + + template + using const_arg_type_t = typename const_pointer_or_const_ref::type; + +public: + using key_type = ValueT; + using value_type = ValueT; + using size_type = unsigned; + + explicit DenseSetImpl(unsigned InitialReserve = 0) : TheMap(InitialReserve) {} + + template + DenseSetImpl(const InputIt &I, const InputIt &E) + : DenseSetImpl(PowerOf2Ceil(std::distance(I, E))) { + insert(I, E); + } + + DenseSetImpl(std::initializer_list Elems) + : DenseSetImpl(PowerOf2Ceil(Elems.size())) { + insert(Elems.begin(), Elems.end()); + } + + bool empty() const { return TheMap.empty(); } + size_type size() const { return TheMap.size(); } + size_t getMemorySize() const { return TheMap.getMemorySize(); } + + /// Grow the DenseSet so that it has at least Size buckets. Will not shrink + /// the Size of the set. + void resize(size_t Size) { TheMap.resize(Size); } + + /// Grow the DenseSet so that it can contain at least \p NumEntries items + /// before resizing again. + void reserve(size_t Size) { TheMap.reserve(Size); } + + void clear() { + TheMap.clear(); + } + + /// Return 1 if the specified key is in the set, 0 otherwise. + size_type count(const_arg_type_t V) const { + return TheMap.count(V); + } + + bool erase(const ValueT &V) { + return TheMap.erase(V); + } + + void swap(DenseSetImpl &RHS) { TheMap.swap(RHS.TheMap); } + + // Iterators. + + class ConstIterator; + + class Iterator { + typename MapTy::iterator I; + friend class DenseSetImpl; + friend class ConstIterator; + + public: + using difference_type = typename MapTy::iterator::difference_type; + using value_type = ValueT; + using pointer = value_type *; + using reference = value_type &; + using iterator_category = std::forward_iterator_tag; + + Iterator() = default; + Iterator(const typename MapTy::iterator &i) : I(i) {} + + ValueT &operator*() { return I->getFirst(); } + const ValueT &operator*() const { return I->getFirst(); } + ValueT *operator->() { return &I->getFirst(); } + const ValueT *operator->() const { return &I->getFirst(); } + + Iterator& operator++() { ++I; return *this; } + Iterator operator++(int) { auto T = *this; ++I; return T; } + bool operator==(const ConstIterator& X) const { return I == X.I; } + bool operator!=(const ConstIterator& X) const { return I != X.I; } + }; + + class ConstIterator { + typename MapTy::const_iterator I; + friend class DenseSetImpl; + friend class Iterator; + + public: + using difference_type = typename MapTy::const_iterator::difference_type; + using value_type = ValueT; + using pointer = const value_type *; + using reference = const value_type &; + using iterator_category = std::forward_iterator_tag; + + ConstIterator() = default; + ConstIterator(const Iterator &B) : I(B.I) {} + ConstIterator(const typename MapTy::const_iterator &i) : I(i) {} + + const ValueT &operator*() const { return I->getFirst(); } + const ValueT *operator->() const { return &I->getFirst(); } + + ConstIterator& operator++() { ++I; return *this; } + ConstIterator operator++(int) { auto T = *this; ++I; return T; } + bool operator==(const ConstIterator& X) const { return I == X.I; } + bool operator!=(const ConstIterator& X) const { return I != X.I; } + }; + + using iterator = Iterator; + using const_iterator = ConstIterator; + + iterator begin() { return Iterator(TheMap.begin()); } + iterator end() { return Iterator(TheMap.end()); } + + const_iterator begin() const { return ConstIterator(TheMap.begin()); } + const_iterator end() const { return ConstIterator(TheMap.end()); } + + iterator find(const_arg_type_t V) { return Iterator(TheMap.find(V)); } + const_iterator find(const_arg_type_t V) const { + return ConstIterator(TheMap.find(V)); + } + + /// Alternative version of find() which allows a different, and possibly less + /// expensive, key type. + /// The DenseMapInfo is responsible for supplying methods + /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key type + /// used. + template + iterator find_as(const LookupKeyT &Val) { + return Iterator(TheMap.find_as(Val)); + } + template + const_iterator find_as(const LookupKeyT &Val) const { + return ConstIterator(TheMap.find_as(Val)); + } + + void erase(Iterator I) { return TheMap.erase(I.I); } + void erase(ConstIterator CI) { return TheMap.erase(CI.I); } + + std::pair insert(const ValueT &V) { + detail::DenseSetEmpty Empty; + return TheMap.try_emplace(V, Empty); + } + + std::pair insert(ValueT &&V) { + detail::DenseSetEmpty Empty; + return TheMap.try_emplace(std::move(V), Empty); + } + + /// Alternative version of insert that uses a different (and possibly less + /// expensive) key type. + template + std::pair insert_as(const ValueT &V, + const LookupKeyT &LookupKey) { + return TheMap.insert_as({V, detail::DenseSetEmpty()}, LookupKey); + } + template + std::pair insert_as(ValueT &&V, const LookupKeyT &LookupKey) { + return TheMap.insert_as({std::move(V), detail::DenseSetEmpty()}, LookupKey); + } + + // Range insertion of values. + template + void insert(InputIt I, InputIt E) { + for (; I != E; ++I) + insert(*I); + } +}; + +/// Equality comparison for DenseSet. +/// +/// Iterates over elements of LHS confirming that each element is also a member +/// of RHS, and that RHS contains no additional values. +/// Equivalent to N calls to RHS.count. Amortized complexity is linear, worst +/// case is O(N^2) (if every hash collides). +template +bool operator==(const DenseSetImpl &LHS, + const DenseSetImpl &RHS) { + if (LHS.size() != RHS.size()) + return false; + + for (auto &E : LHS) + if (!RHS.count(E)) + return false; + + return true; +} + +/// Inequality comparison for DenseSet. +/// +/// Equivalent to !(LHS == RHS). See operator== for performance notes. +template +bool operator!=(const DenseSetImpl &LHS, + const DenseSetImpl &RHS) { + return !(LHS == RHS); +} + +} // end namespace detail + +/// Implements a dense probed hash-table based set. +template > +class DenseSet : public detail::DenseSetImpl< + ValueT, DenseMap>, + ValueInfoT> { + using BaseT = + detail::DenseSetImpl>, + ValueInfoT>; + +public: + using BaseT::BaseT; +}; + +/// Implements a dense probed hash-table based set with some number of buckets +/// stored inline. +template > +class SmallDenseSet + : public detail::DenseSetImpl< + ValueT, SmallDenseMap>, + ValueInfoT> { + using BaseT = detail::DenseSetImpl< + ValueT, SmallDenseMap>, + ValueInfoT>; + +public: + using BaseT::BaseT; +}; + +} // end namespace llvm +}} // namespace swift::runtime + +#endif // LLVM_ADT_DENSESET_H diff --git a/stdlib/include/llvm/ADT/Hashing.h b/stdlib/include/llvm/ADT/Hashing.h new file mode 100644 index 0000000000000..3cc288bc73b91 --- /dev/null +++ b/stdlib/include/llvm/ADT/Hashing.h @@ -0,0 +1,659 @@ +//===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements the newly proposed standard C++ interfaces for hashing +// arbitrary data and building hash functions for user-defined types. This +// interface was originally proposed in N3333[1] and is currently under review +// for inclusion in a future TR and/or standard. +// +// The primary interfaces provide are comprised of one type and three functions: +// +// -- 'hash_code' class is an opaque type representing the hash code for some +// data. It is the intended product of hashing, and can be used to implement +// hash tables, checksumming, and other common uses of hashes. It is not an +// integer type (although it can be converted to one) because it is risky +// to assume much about the internals of a hash_code. In particular, each +// execution of the program has a high probability of producing a different +// hash_code for a given input. Thus their values are not stable to save or +// persist, and should only be used during the execution for the +// construction of hashing datastructures. +// +// -- 'hash_value' is a function designed to be overloaded for each +// user-defined type which wishes to be used within a hashing context. It +// should be overloaded within the user-defined type's namespace and found +// via ADL. Overloads for primitive types are provided by this library. +// +// -- 'hash_combine' and 'hash_combine_range' are functions designed to aid +// programmers in easily and intuitively combining a set of data into +// a single hash_code for their object. They should only logically be used +// within the implementation of a 'hash_value' routine or similar context. +// +// Note that 'hash_combine_range' contains very special logic for hashing +// a contiguous array of integers or pointers. This logic is *extremely* fast, +// on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were +// benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys +// under 32-bytes. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_HASHING_H +#define LLVM_ADT_HASHING_H + +#include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/SwapByteOrder.h" +#include "llvm/Support/type_traits.h" +#include +#include +#include +#include +#include + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + +/// An opaque object representing a hash code. +/// +/// This object represents the result of hashing some entity. It is intended to +/// be used to implement hashtables or other hashing-based data structures. +/// While it wraps and exposes a numeric value, this value should not be +/// trusted to be stable or predictable across processes or executions. +/// +/// In order to obtain the hash_code for an object 'x': +/// \code +/// using llvm::hash_value; +/// llvm::hash_code code = hash_value(x); +/// \endcode +class hash_code { + size_t value; + +public: + /// Default construct a hash_code. + /// Note that this leaves the value uninitialized. + hash_code() = default; + + /// Form a hash code directly from a numerical value. + hash_code(size_t value) : value(value) {} + + /// Convert the hash code to its numerical value for use. + /*explicit*/ operator size_t() const { return value; } + + friend bool operator==(const hash_code &lhs, const hash_code &rhs) { + return lhs.value == rhs.value; + } + friend bool operator!=(const hash_code &lhs, const hash_code &rhs) { + return lhs.value != rhs.value; + } + + /// Allow a hash_code to be directly run through hash_value. + friend size_t hash_value(const hash_code &code) { return code.value; } +}; + +/// Compute a hash_code for any integer value. +/// +/// Note that this function is intended to compute the same hash_code for +/// a particular value without regard to the pre-promotion type. This is in +/// contrast to hash_combine which may produce different hash_codes for +/// differing argument types even if they would implicit promote to a common +/// type without changing the value. +template +std::enable_if_t::value, hash_code> hash_value(T value); + +/// Compute a hash_code for a pointer's address. +/// +/// N.B.: This hashes the *address*. Not the value and not the type. +template hash_code hash_value(const T *ptr); + +/// Compute a hash_code for a pair of objects. +template +hash_code hash_value(const std::pair &arg); + +/// Compute a hash_code for a standard string. +template +hash_code hash_value(const std::basic_string &arg); + + +/// Override the execution seed with a fixed value. +/// +/// This hashing library uses a per-execution seed designed to change on each +/// run with high probability in order to ensure that the hash codes are not +/// attackable and to ensure that output which is intended to be stable does +/// not rely on the particulars of the hash codes produced. +/// +/// That said, there are use cases where it is important to be able to +/// reproduce *exactly* a specific behavior. To that end, we provide a function +/// which will forcibly set the seed to a fixed value. This must be done at the +/// start of the program, before any hashes are computed. Also, it cannot be +/// undone. This makes it thread-hostile and very hard to use outside of +/// immediately on start of a simple program designed for reproducible +/// behavior. +void set_fixed_execution_hash_seed(uint64_t fixed_value); + + +// All of the implementation details of actually computing the various hash +// code values are held within this namespace. These routines are included in +// the header file mainly to allow inlining and constant propagation. +namespace hashing { +namespace detail { + +inline uint64_t fetch64(const char *p) { + uint64_t result; + memcpy(&result, p, sizeof(result)); + if (sys::IsBigEndianHost) + sys::swapByteOrder(result); + return result; +} + +inline uint32_t fetch32(const char *p) { + uint32_t result; + memcpy(&result, p, sizeof(result)); + if (sys::IsBigEndianHost) + sys::swapByteOrder(result); + return result; +} + +/// Some primes between 2^63 and 2^64 for various uses. +static constexpr uint64_t k0 = 0xc3a5c85c97cb3127ULL; +static constexpr uint64_t k1 = 0xb492b66fbe98f273ULL; +static constexpr uint64_t k2 = 0x9ae16a3b2f90404fULL; +static constexpr uint64_t k3 = 0xc949d7c7509e6557ULL; + +/// Bitwise right rotate. +/// Normally this will compile to a single instruction, especially if the +/// shift is a manifest constant. +inline uint64_t rotate(uint64_t val, size_t shift) { + // Avoid shifting by 64: doing so yields an undefined result. + return shift == 0 ? val : ((val >> shift) | (val << (64 - shift))); +} + +inline uint64_t shift_mix(uint64_t val) { + return val ^ (val >> 47); +} + +inline uint64_t hash_16_bytes(uint64_t low, uint64_t high) { + // Murmur-inspired hashing. + const uint64_t kMul = 0x9ddfea08eb382d69ULL; + uint64_t a = (low ^ high) * kMul; + a ^= (a >> 47); + uint64_t b = (high ^ a) * kMul; + b ^= (b >> 47); + b *= kMul; + return b; +} + +inline uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed) { + uint8_t a = s[0]; + uint8_t b = s[len >> 1]; + uint8_t c = s[len - 1]; + uint32_t y = static_cast(a) + (static_cast(b) << 8); + uint32_t z = static_cast(len) + (static_cast(c) << 2); + return shift_mix(y * k2 ^ z * k3 ^ seed) * k2; +} + +inline uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed) { + uint64_t a = fetch32(s); + return hash_16_bytes(len + (a << 3), seed ^ fetch32(s + len - 4)); +} + +inline uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed) { + uint64_t a = fetch64(s); + uint64_t b = fetch64(s + len - 8); + return hash_16_bytes(seed ^ a, rotate(b + len, len)) ^ b; +} + +inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) { + uint64_t a = fetch64(s) * k1; + uint64_t b = fetch64(s + 8); + uint64_t c = fetch64(s + len - 8) * k2; + uint64_t d = fetch64(s + len - 16) * k0; + return hash_16_bytes(rotate(a - b, 43) + rotate(c ^ seed, 30) + d, + a + rotate(b ^ k3, 20) - c + len + seed); +} + +inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) { + uint64_t z = fetch64(s + 24); + uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0; + uint64_t b = rotate(a + z, 52); + uint64_t c = rotate(a, 37); + a += fetch64(s + 8); + c += rotate(a, 7); + a += fetch64(s + 16); + uint64_t vf = a + z; + uint64_t vs = b + rotate(a, 31) + c; + a = fetch64(s + 16) + fetch64(s + len - 32); + z = fetch64(s + len - 8); + b = rotate(a + z, 52); + c = rotate(a, 37); + a += fetch64(s + len - 24); + c += rotate(a, 7); + a += fetch64(s + len - 16); + uint64_t wf = a + z; + uint64_t ws = b + rotate(a, 31) + c; + uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0); + return shift_mix((seed ^ (r * k0)) + vs) * k2; +} + +inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) { + if (length >= 4 && length <= 8) + return hash_4to8_bytes(s, length, seed); + if (length > 8 && length <= 16) + return hash_9to16_bytes(s, length, seed); + if (length > 16 && length <= 32) + return hash_17to32_bytes(s, length, seed); + if (length > 32) + return hash_33to64_bytes(s, length, seed); + if (length != 0) + return hash_1to3_bytes(s, length, seed); + + return k2 ^ seed; +} + +/// The intermediate state used during hashing. +/// Currently, the algorithm for computing hash codes is based on CityHash and +/// keeps 56 bytes of arbitrary state. +struct hash_state { + uint64_t h0 = 0, h1 = 0, h2 = 0, h3 = 0, h4 = 0, h5 = 0, h6 = 0; + + /// Create a new hash_state structure and initialize it based on the + /// seed and the first 64-byte chunk. + /// This effectively performs the initial mix. + static hash_state create(const char *s, uint64_t seed) { + hash_state state = { + 0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49), + seed * k1, shift_mix(seed), 0 }; + state.h6 = hash_16_bytes(state.h4, state.h5); + state.mix(s); + return state; + } + + /// Mix 32-bytes from the input sequence into the 16-bytes of 'a' + /// and 'b', including whatever is already in 'a' and 'b'. + static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) { + a += fetch64(s); + uint64_t c = fetch64(s + 24); + b = rotate(b + a + c, 21); + uint64_t d = a; + a += fetch64(s + 8) + fetch64(s + 16); + b += rotate(a, 44) + d; + a += c; + } + + /// Mix in a 64-byte buffer of data. + /// We mix all 64 bytes even when the chunk length is smaller, but we + /// record the actual length. + void mix(const char *s) { + h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1; + h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1; + h0 ^= h6; + h1 += h3 + fetch64(s + 40); + h2 = rotate(h2 + h5, 33) * k1; + h3 = h4 * k1; + h4 = h0 + h5; + mix_32_bytes(s, h3, h4); + h5 = h2 + h6; + h6 = h1 + fetch64(s + 16); + mix_32_bytes(s + 32, h5, h6); + std::swap(h2, h0); + } + + /// Compute the final 64-bit hash code value based on the current + /// state and the length of bytes hashed. + uint64_t finalize(size_t length) { + return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2, + hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0); + } +}; + + +/// A global, fixed seed-override variable. +/// +/// This variable can be set using the \see llvm::set_fixed_execution_seed +/// function. See that function for details. Do not, under any circumstances, +/// set or read this variable. +extern uint64_t fixed_seed_override; + +inline uint64_t get_execution_seed() { + // FIXME: This needs to be a per-execution seed. This is just a placeholder + // implementation. Switching to a per-execution seed is likely to flush out + // instability bugs and so will happen as its own commit. + // + // However, if there is a fixed seed override set the first time this is + // called, return that instead of the per-execution seed. + const uint64_t seed_prime = 0xff51afd7ed558ccdULL; + static uint64_t seed = fixed_seed_override ? fixed_seed_override : seed_prime; + return seed; +} + + +/// Trait to indicate whether a type's bits can be hashed directly. +/// +/// A type trait which is true if we want to combine values for hashing by +/// reading the underlying data. It is false if values of this type must +/// first be passed to hash_value, and the resulting hash_codes combined. +// +// FIXME: We want to replace is_integral_or_enum and is_pointer here with +// a predicate which asserts that comparing the underlying storage of two +// values of the type for equality is equivalent to comparing the two values +// for equality. For all the platforms we care about, this holds for integers +// and pointers, but there are platforms where it doesn't and we would like to +// support user-defined types which happen to satisfy this property. +template struct is_hashable_data + : std::integral_constant::value || + std::is_pointer::value) && + 64 % sizeof(T) == 0)> {}; + +// Special case std::pair to detect when both types are viable and when there +// is no alignment-derived padding in the pair. This is a bit of a lie because +// std::pair isn't truly POD, but it's close enough in all reasonable +// implementations for our use case of hashing the underlying data. +template struct is_hashable_data > + : std::integral_constant::value && + is_hashable_data::value && + (sizeof(T) + sizeof(U)) == + sizeof(std::pair))> {}; + +/// Helper to get the hashable data representation for a type. +/// This variant is enabled when the type itself can be used. +template +std::enable_if_t::value, T> +get_hashable_data(const T &value) { + return value; +} +/// Helper to get the hashable data representation for a type. +/// This variant is enabled when we must first call hash_value and use the +/// result as our data. +template +std::enable_if_t::value, size_t> +get_hashable_data(const T &value) { + using ::llvm::hash_value; + return hash_value(value); +} + +/// Helper to store data from a value into a buffer and advance the +/// pointer into that buffer. +/// +/// This routine first checks whether there is enough space in the provided +/// buffer, and if not immediately returns false. If there is space, it +/// copies the underlying bytes of value into the buffer, advances the +/// buffer_ptr past the copied bytes, and returns true. +template +bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value, + size_t offset = 0) { + size_t store_size = sizeof(value) - offset; + if (buffer_ptr + store_size > buffer_end) + return false; + const char *value_data = reinterpret_cast(&value); + memcpy(buffer_ptr, value_data + offset, store_size); + buffer_ptr += store_size; + return true; +} + +/// Implement the combining of integral values into a hash_code. +/// +/// This overload is selected when the value type of the iterator is +/// integral. Rather than computing a hash_code for each object and then +/// combining them, this (as an optimization) directly combines the integers. +template +hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) { + const uint64_t seed = get_execution_seed(); + char buffer[64], *buffer_ptr = buffer; + char *const buffer_end = std::end(buffer); + while (first != last && store_and_advance(buffer_ptr, buffer_end, + get_hashable_data(*first))) + ++first; + if (first == last) + return hash_short(buffer, buffer_ptr - buffer, seed); + assert(buffer_ptr == buffer_end); + + hash_state state = state.create(buffer, seed); + size_t length = 64; + while (first != last) { + // Fill up the buffer. We don't clear it, which re-mixes the last round + // when only a partial 64-byte chunk is left. + buffer_ptr = buffer; + while (first != last && store_and_advance(buffer_ptr, buffer_end, + get_hashable_data(*first))) + ++first; + + // Rotate the buffer if we did a partial fill in order to simulate doing + // a mix of the last 64-bytes. That is how the algorithm works when we + // have a contiguous byte sequence, and we want to emulate that here. + std::rotate(buffer, buffer_ptr, buffer_end); + + // Mix this chunk into the current state. + state.mix(buffer); + length += buffer_ptr - buffer; + }; + + return state.finalize(length); +} + +/// Implement the combining of integral values into a hash_code. +/// +/// This overload is selected when the value type of the iterator is integral +/// and when the input iterator is actually a pointer. Rather than computing +/// a hash_code for each object and then combining them, this (as an +/// optimization) directly combines the integers. Also, because the integers +/// are stored in contiguous memory, this routine avoids copying each value +/// and directly reads from the underlying memory. +template +std::enable_if_t::value, hash_code> +hash_combine_range_impl(ValueT *first, ValueT *last) { + const uint64_t seed = get_execution_seed(); + const char *s_begin = reinterpret_cast(first); + const char *s_end = reinterpret_cast(last); + const size_t length = std::distance(s_begin, s_end); + if (length <= 64) + return hash_short(s_begin, length, seed); + + const char *s_aligned_end = s_begin + (length & ~63); + hash_state state = state.create(s_begin, seed); + s_begin += 64; + while (s_begin != s_aligned_end) { + state.mix(s_begin); + s_begin += 64; + } + if (length & 63) + state.mix(s_end - 64); + + return state.finalize(length); +} + +} // namespace detail +} // namespace hashing + + +/// Compute a hash_code for a sequence of values. +/// +/// This hashes a sequence of values. It produces the same hash_code as +/// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences +/// and is significantly faster given pointers and types which can be hashed as +/// a sequence of bytes. +template +hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) { + return ::llvm::hashing::detail::hash_combine_range_impl(first, last); +} + + +// Implementation details for hash_combine. +namespace hashing { +namespace detail { + +/// Helper class to manage the recursive combining of hash_combine +/// arguments. +/// +/// This class exists to manage the state and various calls involved in the +/// recursive combining of arguments used in hash_combine. It is particularly +/// useful at minimizing the code in the recursive calls to ease the pain +/// caused by a lack of variadic functions. +struct hash_combine_recursive_helper { + char buffer[64] = {}; + hash_state state; + const uint64_t seed; + +public: + /// Construct a recursive hash combining helper. + /// + /// This sets up the state for a recursive hash combine, including getting + /// the seed and buffer setup. + hash_combine_recursive_helper() + : seed(get_execution_seed()) {} + + /// Combine one chunk of data into the current in-flight hash. + /// + /// This merges one chunk of data into the hash. First it tries to buffer + /// the data. If the buffer is full, it hashes the buffer into its + /// hash_state, empties it, and then merges the new chunk in. This also + /// handles cases where the data straddles the end of the buffer. + template + char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) { + if (!store_and_advance(buffer_ptr, buffer_end, data)) { + // Check for skew which prevents the buffer from being packed, and do + // a partial store into the buffer to fill it. This is only a concern + // with the variadic combine because that formation can have varying + // argument types. + size_t partial_store_size = buffer_end - buffer_ptr; + memcpy(buffer_ptr, &data, partial_store_size); + + // If the store fails, our buffer is full and ready to hash. We have to + // either initialize the hash state (on the first full buffer) or mix + // this buffer into the existing hash state. Length tracks the *hashed* + // length, not the buffered length. + if (length == 0) { + state = state.create(buffer, seed); + length = 64; + } else { + // Mix this chunk into the current state and bump length up by 64. + state.mix(buffer); + length += 64; + } + // Reset the buffer_ptr to the head of the buffer for the next chunk of + // data. + buffer_ptr = buffer; + + // Try again to store into the buffer -- this cannot fail as we only + // store types smaller than the buffer. + if (!store_and_advance(buffer_ptr, buffer_end, data, + partial_store_size)) + llvm_unreachable("buffer smaller than stored type"); + } + return buffer_ptr; + } + + /// Recursive, variadic combining method. + /// + /// This function recurses through each argument, combining that argument + /// into a single hash. + template + hash_code combine(size_t length, char *buffer_ptr, char *buffer_end, + const T &arg, const Ts &...args) { + buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg)); + + // Recurse to the next argument. + return combine(length, buffer_ptr, buffer_end, args...); + } + + /// Base case for recursive, variadic combining. + /// + /// The base case when combining arguments recursively is reached when all + /// arguments have been handled. It flushes the remaining buffer and + /// constructs a hash_code. + hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) { + // Check whether the entire set of values fit in the buffer. If so, we'll + // use the optimized short hashing routine and skip state entirely. + if (length == 0) + return hash_short(buffer, buffer_ptr - buffer, seed); + + // Mix the final buffer, rotating it if we did a partial fill in order to + // simulate doing a mix of the last 64-bytes. That is how the algorithm + // works when we have a contiguous byte sequence, and we want to emulate + // that here. + std::rotate(buffer, buffer_ptr, buffer_end); + + // Mix this chunk into the current state. + state.mix(buffer); + length += buffer_ptr - buffer; + + return state.finalize(length); + } +}; + +} // namespace detail +} // namespace hashing + +/// Combine values into a single hash_code. +/// +/// This routine accepts a varying number of arguments of any type. It will +/// attempt to combine them into a single hash_code. For user-defined types it +/// attempts to call a \see hash_value overload (via ADL) for the type. For +/// integer and pointer types it directly combines their data into the +/// resulting hash_code. +/// +/// The result is suitable for returning from a user's hash_value +/// *implementation* for their user-defined type. Consumers of a type should +/// *not* call this routine, they should instead call 'hash_value'. +template hash_code hash_combine(const Ts &...args) { + // Recursively hash each argument using a helper class. + ::llvm::hashing::detail::hash_combine_recursive_helper helper; + return helper.combine(0, helper.buffer, helper.buffer + 64, args...); +} + +// Implementation details for implementations of hash_value overloads provided +// here. +namespace hashing { +namespace detail { + +/// Helper to hash the value of a single integer. +/// +/// Overloads for smaller integer types are not provided to ensure consistent +/// behavior in the presence of integral promotions. Essentially, +/// "hash_value('4')" and "hash_value('0' + 4)" should be the same. +inline hash_code hash_integer_value(uint64_t value) { + // Similar to hash_4to8_bytes but using a seed instead of length. + const uint64_t seed = get_execution_seed(); + const char *s = reinterpret_cast(&value); + const uint64_t a = fetch32(s); + return hash_16_bytes(seed + (a << 3), fetch32(s + 4)); +} + +} // namespace detail +} // namespace hashing + +// Declared and documented above, but defined here so that any of the hashing +// infrastructure is available. +template +std::enable_if_t::value, hash_code> hash_value(T value) { + return ::llvm::hashing::detail::hash_integer_value( + static_cast(value)); +} + +// Declared and documented above, but defined here so that any of the hashing +// infrastructure is available. +template hash_code hash_value(const T *ptr) { + return ::llvm::hashing::detail::hash_integer_value( + reinterpret_cast(ptr)); +} + +// Declared and documented above, but defined here so that any of the hashing +// infrastructure is available. +template +hash_code hash_value(const std::pair &arg) { + return hash_combine(arg.first, arg.second); +} + +// Declared and documented above, but defined here so that any of the hashing +// infrastructure is available. +template +hash_code hash_value(const std::basic_string &arg) { + return hash_combine_range(arg.begin(), arg.end()); +} + +} // namespace llvm +}} // namespace swift::runtime + +#endif diff --git a/stdlib/include/llvm/ADT/None.h b/stdlib/include/llvm/ADT/None.h new file mode 100644 index 0000000000000..a3ca67951cd22 --- /dev/null +++ b/stdlib/include/llvm/ADT/None.h @@ -0,0 +1,28 @@ +//===-- None.h - Simple null value for implicit construction ------*- C++ -*-=// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides None, an enumerator for use in implicit constructors +// of various (usually templated) types to make such construction more +// terse. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_NONE_H +#define LLVM_ADT_NONE_H + +inline namespace __swift { inline namespace __runtime { +namespace llvm { +/// A simple null object to allow implicit construction of Optional +/// and similar types without having to spell out the specialization's name. +// (constant value 1 in an attempt to workaround MSVC build issue... ) +enum class NoneType { None = 1 }; +const NoneType None = NoneType::None; +} +}} // swift::runtime + +#endif diff --git a/stdlib/include/llvm/ADT/Optional.h b/stdlib/include/llvm/ADT/Optional.h new file mode 100644 index 0000000000000..729e1bab901cf --- /dev/null +++ b/stdlib/include/llvm/ADT/Optional.h @@ -0,0 +1,447 @@ +//===- Optional.h - Simple variant for passing optional values --*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides Optional, a template class modeled in the spirit of +// OCaml's 'opt' variant. The idea is to strongly type whether or not +// a value can be optional. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_OPTIONAL_H +#define LLVM_ADT_OPTIONAL_H + +#include "llvm/ADT/None.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/type_traits.h" +#include +#include +#include +#include + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + +class raw_ostream; + +namespace optional_detail { + +struct in_place_t {}; + +/// Storage for any type. +template ::value> +class OptionalStorage { + union { + char empty; + T value; + }; + bool hasVal; + +public: + ~OptionalStorage() { reset(); } + + OptionalStorage() noexcept : empty(), hasVal(false) {} + + OptionalStorage(OptionalStorage const &other) : OptionalStorage() { + if (other.hasValue()) { + emplace(other.value); + } + } + OptionalStorage(OptionalStorage &&other) : OptionalStorage() { + if (other.hasValue()) { + emplace(std::move(other.value)); + } + } + + template + explicit OptionalStorage(in_place_t, Args &&... args) + : value(std::forward(args)...), hasVal(true) {} + + void reset() noexcept { + if (hasVal) { + value.~T(); + hasVal = false; + } + } + + bool hasValue() const noexcept { return hasVal; } + + T &getValue() LLVM_LVALUE_FUNCTION noexcept { + assert(hasVal); + return value; + } + T const &getValue() const LLVM_LVALUE_FUNCTION noexcept { + assert(hasVal); + return value; + } +#if LLVM_HAS_RVALUE_REFERENCE_THIS + T &&getValue() && noexcept { + assert(hasVal); + return std::move(value); + } +#endif + + template void emplace(Args &&... args) { + reset(); + ::new ((void *)std::addressof(value)) T(std::forward(args)...); + hasVal = true; + } + + OptionalStorage &operator=(T const &y) { + if (hasValue()) { + value = y; + } else { + ::new ((void *)std::addressof(value)) T(y); + hasVal = true; + } + return *this; + } + OptionalStorage &operator=(T &&y) { + if (hasValue()) { + value = std::move(y); + } else { + ::new ((void *)std::addressof(value)) T(std::move(y)); + hasVal = true; + } + return *this; + } + + OptionalStorage &operator=(OptionalStorage const &other) { + if (other.hasValue()) { + if (hasValue()) { + value = other.value; + } else { + ::new ((void *)std::addressof(value)) T(other.value); + hasVal = true; + } + } else { + reset(); + } + return *this; + } + + OptionalStorage &operator=(OptionalStorage &&other) { + if (other.hasValue()) { + if (hasValue()) { + value = std::move(other.value); + } else { + ::new ((void *)std::addressof(value)) T(std::move(other.value)); + hasVal = true; + } + } else { + reset(); + } + return *this; + } +}; + +template class OptionalStorage { + union { + char empty; + T value; + }; + bool hasVal = false; + +public: + ~OptionalStorage() = default; + + OptionalStorage() noexcept : empty{} {} + + OptionalStorage(OptionalStorage const &other) = default; + OptionalStorage(OptionalStorage &&other) = default; + + OptionalStorage &operator=(OptionalStorage const &other) = default; + OptionalStorage &operator=(OptionalStorage &&other) = default; + + template + explicit OptionalStorage(in_place_t, Args &&... args) + : value(std::forward(args)...), hasVal(true) {} + + void reset() noexcept { + if (hasVal) { + value.~T(); + hasVal = false; + } + } + + bool hasValue() const noexcept { return hasVal; } + + T &getValue() LLVM_LVALUE_FUNCTION noexcept { + assert(hasVal); + return value; + } + T const &getValue() const LLVM_LVALUE_FUNCTION noexcept { + assert(hasVal); + return value; + } +#if LLVM_HAS_RVALUE_REFERENCE_THIS + T &&getValue() && noexcept { + assert(hasVal); + return std::move(value); + } +#endif + + template void emplace(Args &&... args) { + reset(); + ::new ((void *)std::addressof(value)) T(std::forward(args)...); + hasVal = true; + } + + OptionalStorage &operator=(T const &y) { + if (hasValue()) { + value = y; + } else { + ::new ((void *)std::addressof(value)) T(y); + hasVal = true; + } + return *this; + } + OptionalStorage &operator=(T &&y) { + if (hasValue()) { + value = std::move(y); + } else { + ::new ((void *)std::addressof(value)) T(std::move(y)); + hasVal = true; + } + return *this; + } +}; + +} // namespace optional_detail + +template class Optional { + optional_detail::OptionalStorage Storage; + +public: + using value_type = T; + + constexpr Optional() {} + constexpr Optional(NoneType) {} + + Optional(const T &y) : Storage(optional_detail::in_place_t{}, y) {} + Optional(const Optional &O) = default; + + Optional(T &&y) : Storage(optional_detail::in_place_t{}, std::move(y)) {} + Optional(Optional &&O) = default; + + Optional &operator=(T &&y) { + Storage = std::move(y); + return *this; + } + Optional &operator=(Optional &&O) = default; + + /// Create a new object by constructing it in place with the given arguments. + template void emplace(ArgTypes &&... Args) { + Storage.emplace(std::forward(Args)...); + } + + static inline Optional create(const T *y) { + return y ? Optional(*y) : Optional(); + } + + Optional &operator=(const T &y) { + Storage = y; + return *this; + } + Optional &operator=(const Optional &O) = default; + + void reset() { Storage.reset(); } + + const T *getPointer() const { return &Storage.getValue(); } + T *getPointer() { return &Storage.getValue(); } + const T &getValue() const LLVM_LVALUE_FUNCTION { return Storage.getValue(); } + T &getValue() LLVM_LVALUE_FUNCTION { return Storage.getValue(); } + + explicit operator bool() const { return hasValue(); } + bool hasValue() const { return Storage.hasValue(); } + const T *operator->() const { return getPointer(); } + T *operator->() { return getPointer(); } + const T &operator*() const LLVM_LVALUE_FUNCTION { return getValue(); } + T &operator*() LLVM_LVALUE_FUNCTION { return getValue(); } + + template + constexpr T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION { + return hasValue() ? getValue() : std::forward(value); + } + + /// Apply a function to the value if present; otherwise return None. + template + auto map(const Function &F) const LLVM_LVALUE_FUNCTION + -> Optional { + if (*this) return F(getValue()); + return None; + } + +#if LLVM_HAS_RVALUE_REFERENCE_THIS + T &&getValue() && { return std::move(Storage.getValue()); } + T &&operator*() && { return std::move(Storage.getValue()); } + + template + T getValueOr(U &&value) && { + return hasValue() ? std::move(getValue()) : std::forward(value); + } + + /// Apply a function to the value if present; otherwise return None. + template + auto map(const Function &F) && + -> Optional { + if (*this) return F(std::move(*this).getValue()); + return None; + } +#endif +}; + +template +bool operator==(const Optional &X, const Optional &Y) { + if (X && Y) + return *X == *Y; + return X.hasValue() == Y.hasValue(); +} + +template +bool operator!=(const Optional &X, const Optional &Y) { + return !(X == Y); +} + +template +bool operator<(const Optional &X, const Optional &Y) { + if (X && Y) + return *X < *Y; + return X.hasValue() < Y.hasValue(); +} + +template +bool operator<=(const Optional &X, const Optional &Y) { + return !(Y < X); +} + +template +bool operator>(const Optional &X, const Optional &Y) { + return Y < X; +} + +template +bool operator>=(const Optional &X, const Optional &Y) { + return !(X < Y); +} + +template +bool operator==(const Optional &X, NoneType) { + return !X; +} + +template +bool operator==(NoneType, const Optional &X) { + return X == None; +} + +template +bool operator!=(const Optional &X, NoneType) { + return !(X == None); +} + +template +bool operator!=(NoneType, const Optional &X) { + return X != None; +} + +template bool operator<(const Optional &X, NoneType) { + return false; +} + +template bool operator<(NoneType, const Optional &X) { + return X.hasValue(); +} + +template bool operator<=(const Optional &X, NoneType) { + return !(None < X); +} + +template bool operator<=(NoneType, const Optional &X) { + return !(X < None); +} + +template bool operator>(const Optional &X, NoneType) { + return None < X; +} + +template bool operator>(NoneType, const Optional &X) { + return X < None; +} + +template bool operator>=(const Optional &X, NoneType) { + return None <= X; +} + +template bool operator>=(NoneType, const Optional &X) { + return X <= None; +} + +template bool operator==(const Optional &X, const T &Y) { + return X && *X == Y; +} + +template bool operator==(const T &X, const Optional &Y) { + return Y && X == *Y; +} + +template bool operator!=(const Optional &X, const T &Y) { + return !(X == Y); +} + +template bool operator!=(const T &X, const Optional &Y) { + return !(X == Y); +} + +template bool operator<(const Optional &X, const T &Y) { + return !X || *X < Y; +} + +template bool operator<(const T &X, const Optional &Y) { + return Y && X < *Y; +} + +template bool operator<=(const Optional &X, const T &Y) { + return !(Y < X); +} + +template bool operator<=(const T &X, const Optional &Y) { + return !(Y < X); +} + +template bool operator>(const Optional &X, const T &Y) { + return Y < X; +} + +template bool operator>(const T &X, const Optional &Y) { + return Y < X; +} + +template bool operator>=(const Optional &X, const T &Y) { + return !(X < Y); +} + +template bool operator>=(const T &X, const Optional &Y) { + return !(X < Y); +} + +raw_ostream &operator<<(raw_ostream &OS, NoneType); + +template () + << std::declval())> +raw_ostream &operator<<(raw_ostream &OS, const Optional &O) { + if (O) + OS << *O; + else + OS << None; + return OS; +} + +} // end namespace llvm +}} // namespace swift::runtime + +#endif // LLVM_ADT_OPTIONAL_H diff --git a/stdlib/include/llvm/ADT/PointerIntPair.h b/stdlib/include/llvm/ADT/PointerIntPair.h new file mode 100644 index 0000000000000..9d5c9de9f6f83 --- /dev/null +++ b/stdlib/include/llvm/ADT/PointerIntPair.h @@ -0,0 +1,246 @@ +//===- llvm/ADT/PointerIntPair.h - Pair for pointer and int -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the PointerIntPair class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_POINTERINTPAIR_H +#define LLVM_ADT_POINTERINTPAIR_H + +#include "llvm/Support/Compiler.h" +#include "llvm/Support/PointerLikeTypeTraits.h" +#include "llvm/Support/type_traits.h" +#include +#include +#include + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + +template struct DenseMapInfo; +template +struct PointerIntPairInfo; + +/// PointerIntPair - This class implements a pair of a pointer and small +/// integer. It is designed to represent this in the space required by one +/// pointer by bitmangling the integer into the low part of the pointer. This +/// can only be done for small integers: typically up to 3 bits, but it depends +/// on the number of bits available according to PointerLikeTypeTraits for the +/// type. +/// +/// Note that PointerIntPair always puts the IntVal part in the highest bits +/// possible. For example, PointerIntPair will put the bit for +/// the bool into bit #2, not bit #0, which allows the low two bits to be used +/// for something else. For example, this allows: +/// PointerIntPair, 1, bool> +/// ... and the two bools will land in different bits. +template , + typename Info = PointerIntPairInfo> +class PointerIntPair { + // Used by MSVC visualizer and generally helpful for debugging/visualizing. + using InfoTy = Info; + intptr_t Value = 0; + +public: + constexpr PointerIntPair() = default; + + PointerIntPair(PointerTy PtrVal, IntType IntVal) { + setPointerAndInt(PtrVal, IntVal); + } + + explicit PointerIntPair(PointerTy PtrVal) { initWithPointer(PtrVal); } + + PointerTy getPointer() const { return Info::getPointer(Value); } + + IntType getInt() const { return (IntType)Info::getInt(Value); } + + void setPointer(PointerTy PtrVal) LLVM_LVALUE_FUNCTION { + Value = Info::updatePointer(Value, PtrVal); + } + + void setInt(IntType IntVal) LLVM_LVALUE_FUNCTION { + Value = Info::updateInt(Value, static_cast(IntVal)); + } + + void initWithPointer(PointerTy PtrVal) LLVM_LVALUE_FUNCTION { + Value = Info::updatePointer(0, PtrVal); + } + + void setPointerAndInt(PointerTy PtrVal, IntType IntVal) LLVM_LVALUE_FUNCTION { + Value = Info::updateInt(Info::updatePointer(0, PtrVal), + static_cast(IntVal)); + } + + PointerTy const *getAddrOfPointer() const { + return const_cast(this)->getAddrOfPointer(); + } + + PointerTy *getAddrOfPointer() { + assert(Value == reinterpret_cast(getPointer()) && + "Can only return the address if IntBits is cleared and " + "PtrTraits doesn't change the pointer"); + return reinterpret_cast(&Value); + } + + void *getOpaqueValue() const { return reinterpret_cast(Value); } + + void setFromOpaqueValue(void *Val) LLVM_LVALUE_FUNCTION { + Value = reinterpret_cast(Val); + } + + static PointerIntPair getFromOpaqueValue(void *V) { + PointerIntPair P; + P.setFromOpaqueValue(V); + return P; + } + + // Allow PointerIntPairs to be created from const void * if and only if the + // pointer type could be created from a const void *. + static PointerIntPair getFromOpaqueValue(const void *V) { + (void)PtrTraits::getFromVoidPointer(V); + return getFromOpaqueValue(const_cast(V)); + } + + bool operator==(const PointerIntPair &RHS) const { + return Value == RHS.Value; + } + + bool operator!=(const PointerIntPair &RHS) const { + return Value != RHS.Value; + } + + bool operator<(const PointerIntPair &RHS) const { return Value < RHS.Value; } + bool operator>(const PointerIntPair &RHS) const { return Value > RHS.Value; } + + bool operator<=(const PointerIntPair &RHS) const { + return Value <= RHS.Value; + } + + bool operator>=(const PointerIntPair &RHS) const { + return Value >= RHS.Value; + } +}; + +// Specialize is_trivially_copyable to avoid limitation of llvm::is_trivially_copyable +// when compiled with gcc 4.9. +template +struct is_trivially_copyable> : std::true_type { +#ifdef HAVE_STD_IS_TRIVIALLY_COPYABLE + static_assert(std::is_trivially_copyable>::value, + "inconsistent behavior between llvm:: and std:: implementation of is_trivially_copyable"); +#endif +}; + + +template +struct PointerIntPairInfo { + static_assert(PtrTraits::NumLowBitsAvailable < + std::numeric_limits::digits, + "cannot use a pointer type that has all bits free"); + static_assert(IntBits <= PtrTraits::NumLowBitsAvailable, + "PointerIntPair with integer size too large for pointer"); + enum MaskAndShiftConstants : uintptr_t { + /// PointerBitMask - The bits that come from the pointer. + PointerBitMask = + ~(uintptr_t)(((intptr_t)1 << PtrTraits::NumLowBitsAvailable) - 1), + + /// IntShift - The number of low bits that we reserve for other uses, and + /// keep zero. + IntShift = (uintptr_t)PtrTraits::NumLowBitsAvailable - IntBits, + + /// IntMask - This is the unshifted mask for valid bits of the int type. + IntMask = (uintptr_t)(((intptr_t)1 << IntBits) - 1), + + // ShiftedIntMask - This is the bits for the integer shifted in place. + ShiftedIntMask = (uintptr_t)(IntMask << IntShift) + }; + + static PointerT getPointer(intptr_t Value) { + return PtrTraits::getFromVoidPointer( + reinterpret_cast(Value & PointerBitMask)); + } + + static intptr_t getInt(intptr_t Value) { + return (Value >> IntShift) & IntMask; + } + + static intptr_t updatePointer(intptr_t OrigValue, PointerT Ptr) { + intptr_t PtrWord = + reinterpret_cast(PtrTraits::getAsVoidPointer(Ptr)); + assert((PtrWord & ~PointerBitMask) == 0 && + "Pointer is not sufficiently aligned"); + // Preserve all low bits, just update the pointer. + return PtrWord | (OrigValue & ~PointerBitMask); + } + + static intptr_t updateInt(intptr_t OrigValue, intptr_t Int) { + intptr_t IntWord = static_cast(Int); + assert((IntWord & ~IntMask) == 0 && "Integer too large for field"); + + // Preserve all bits other than the ones we are updating. + return (OrigValue & ~ShiftedIntMask) | IntWord << IntShift; + } +}; + +// Provide specialization of DenseMapInfo for PointerIntPair. +template +struct DenseMapInfo> { + using Ty = PointerIntPair; + + static Ty getEmptyKey() { + uintptr_t Val = static_cast(-1); + Val <<= PointerLikeTypeTraits::NumLowBitsAvailable; + return Ty::getFromOpaqueValue(reinterpret_cast(Val)); + } + + static Ty getTombstoneKey() { + uintptr_t Val = static_cast(-2); + Val <<= PointerLikeTypeTraits::NumLowBitsAvailable; + return Ty::getFromOpaqueValue(reinterpret_cast(Val)); + } + + static unsigned getHashValue(Ty V) { + uintptr_t IV = reinterpret_cast(V.getOpaqueValue()); + return unsigned(IV) ^ unsigned(IV >> 9); + } + + static bool isEqual(const Ty &LHS, const Ty &RHS) { return LHS == RHS; } +}; + +// Teach SmallPtrSet that PointerIntPair is "basically a pointer". +template +struct PointerLikeTypeTraits< + PointerIntPair> { + static inline void * + getAsVoidPointer(const PointerIntPair &P) { + return P.getOpaqueValue(); + } + + static inline PointerIntPair + getFromVoidPointer(void *P) { + return PointerIntPair::getFromOpaqueValue(P); + } + + static inline PointerIntPair + getFromVoidPointer(const void *P) { + return PointerIntPair::getFromOpaqueValue(P); + } + + static constexpr int NumLowBitsAvailable = + PtrTraits::NumLowBitsAvailable - IntBits; +}; + +} // end namespace llvm +}} // namespace swift::runtime + +#endif // LLVM_ADT_POINTERINTPAIR_H diff --git a/stdlib/include/llvm/ADT/PointerUnion.h b/stdlib/include/llvm/ADT/PointerUnion.h new file mode 100644 index 0000000000000..1aeeaba303e10 --- /dev/null +++ b/stdlib/include/llvm/ADT/PointerUnion.h @@ -0,0 +1,301 @@ +//===- llvm/ADT/PointerUnion.h - Discriminated Union of 2 Ptrs --*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the PointerUnion class, which is a discriminated union of +// pointer types. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_POINTERUNION_H +#define LLVM_ADT_POINTERUNION_H + +#include "llvm/ADT/DenseMapInfo.h" +#include "llvm/ADT/PointerIntPair.h" +#include "llvm/Support/PointerLikeTypeTraits.h" +#include +#include +#include + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + +template struct PointerUnionTypeSelectorReturn { + using Return = T; +}; + +/// Get a type based on whether two types are the same or not. +/// +/// For: +/// +/// \code +/// using Ret = typename PointerUnionTypeSelector::Return; +/// \endcode +/// +/// Ret will be EQ type if T1 is same as T2 or NE type otherwise. +template +struct PointerUnionTypeSelector { + using Return = typename PointerUnionTypeSelectorReturn::Return; +}; + +template +struct PointerUnionTypeSelector { + using Return = typename PointerUnionTypeSelectorReturn::Return; +}; + +template +struct PointerUnionTypeSelectorReturn< + PointerUnionTypeSelector> { + using Return = + typename PointerUnionTypeSelector::Return; +}; + +namespace pointer_union_detail { + /// Determine the number of bits required to store integers with values < n. + /// This is ceil(log2(n)). + constexpr int bitsRequired(unsigned n) { + return n > 1 ? 1 + bitsRequired((n + 1) / 2) : 0; + } + + template constexpr int lowBitsAvailable() { + return std::min({PointerLikeTypeTraits::NumLowBitsAvailable...}); + } + + /// Find the index of a type in a list of types. TypeIndex::Index + /// is the index of T in Us, or sizeof...(Us) if T does not appear in the + /// list. + template struct TypeIndex; + template struct TypeIndex { + static constexpr int Index = 0; + }; + template + struct TypeIndex { + static constexpr int Index = 1 + TypeIndex::Index; + }; + template struct TypeIndex { + static constexpr int Index = 0; + }; + + /// Find the first type in a list of types. + template struct GetFirstType { + using type = T; + }; + + /// Provide PointerLikeTypeTraits for void* that is used by PointerUnion + /// for the template arguments. + template class PointerUnionUIntTraits { + public: + static inline void *getAsVoidPointer(void *P) { return P; } + static inline void *getFromVoidPointer(void *P) { return P; } + static constexpr int NumLowBitsAvailable = lowBitsAvailable(); + }; + + /// Implement assignment in terms of construction. + template struct AssignableFrom { + Derived &operator=(T t) { + return static_cast(*this) = Derived(t); + } + }; + + template + class PointerUnionMembers; + + template + class PointerUnionMembers { + protected: + ValTy Val; + PointerUnionMembers() = default; + PointerUnionMembers(ValTy Val) : Val(Val) {} + + friend struct PointerLikeTypeTraits; + }; + + template + class PointerUnionMembers + : public PointerUnionMembers { + using Base = PointerUnionMembers; + public: + using Base::Base; + PointerUnionMembers() = default; + PointerUnionMembers(Type V) + : Base(ValTy(const_cast( + PointerLikeTypeTraits::getAsVoidPointer(V)), + I)) {} + + using Base::operator=; + Derived &operator=(Type V) { + this->Val = ValTy( + const_cast(PointerLikeTypeTraits::getAsVoidPointer(V)), + I); + return static_cast(*this); + }; + }; +} + +/// A discriminated union of two or more pointer types, with the discriminator +/// in the low bit of the pointer. +/// +/// This implementation is extremely efficient in space due to leveraging the +/// low bits of the pointer, while exposing a natural and type-safe API. +/// +/// Common use patterns would be something like this: +/// PointerUnion P; +/// P = (int*)0; +/// printf("%d %d", P.is(), P.is()); // prints "1 0" +/// X = P.get(); // ok. +/// Y = P.get(); // runtime assertion failure. +/// Z = P.get(); // compile time failure. +/// P = (float*)0; +/// Y = P.get(); // ok. +/// X = P.get(); // runtime assertion failure. +template +class PointerUnion + : public pointer_union_detail::PointerUnionMembers< + PointerUnion, + PointerIntPair< + void *, pointer_union_detail::bitsRequired(sizeof...(PTs)), int, + pointer_union_detail::PointerUnionUIntTraits>, + 0, PTs...> { + // The first type is special because we want to directly cast a pointer to a + // default-initialized union to a pointer to the first type. But we don't + // want PointerUnion to be a 'template ' + // because it's much more convenient to have a name for the whole pack. So + // split off the first type here. + using First = typename pointer_union_detail::GetFirstType::type; + using Base = typename PointerUnion::PointerUnionMembers; + +public: + PointerUnion() = default; + + PointerUnion(std::nullptr_t) : PointerUnion() {} + using Base::Base; + + /// Test if the pointer held in the union is null, regardless of + /// which type it is. + bool isNull() const { return !this->Val.getPointer(); } + + explicit operator bool() const { return !isNull(); } + + /// Test if the Union currently holds the type matching T. + template bool is() const { + constexpr int Index = pointer_union_detail::TypeIndex::Index; + static_assert(Index < sizeof...(PTs), + "PointerUnion::is given type not in the union"); + return this->Val.getInt() == Index; + } + + /// Returns the value of the specified pointer type. + /// + /// If the specified pointer type is incorrect, assert. + template T get() const { + assert(is() && "Invalid accessor called"); + return PointerLikeTypeTraits::getFromVoidPointer(this->Val.getPointer()); + } + + /// Returns the current pointer if it is of the specified pointer type, + /// otherwises returns null. + template T dyn_cast() const { + if (is()) + return get(); + return T(); + } + + /// If the union is set to the first pointer type get an address pointing to + /// it. + First const *getAddrOfPtr1() const { + return const_cast(this)->getAddrOfPtr1(); + } + + /// If the union is set to the first pointer type get an address pointing to + /// it. + First *getAddrOfPtr1() { + assert(is() && "Val is not the first pointer"); + assert( + PointerLikeTypeTraits::getAsVoidPointer(get()) == + this->Val.getPointer() && + "Can't get the address because PointerLikeTypeTraits changes the ptr"); + return const_cast( + reinterpret_cast(this->Val.getAddrOfPointer())); + } + + /// Assignment from nullptr which just clears the union. + const PointerUnion &operator=(std::nullptr_t) { + this->Val.initWithPointer(nullptr); + return *this; + } + + /// Assignment from elements of the union. + using Base::operator=; + + void *getOpaqueValue() const { return this->Val.getOpaqueValue(); } + static inline PointerUnion getFromOpaqueValue(void *VP) { + PointerUnion V; + V.Val = decltype(V.Val)::getFromOpaqueValue(VP); + return V; + } +}; + +template +bool operator==(PointerUnion lhs, PointerUnion rhs) { + return lhs.getOpaqueValue() == rhs.getOpaqueValue(); +} + +template +bool operator!=(PointerUnion lhs, PointerUnion rhs) { + return lhs.getOpaqueValue() != rhs.getOpaqueValue(); +} + +template +bool operator<(PointerUnion lhs, PointerUnion rhs) { + return lhs.getOpaqueValue() < rhs.getOpaqueValue(); +} + +// Teach SmallPtrSet that PointerUnion is "basically a pointer", that has +// # low bits available = min(PT1bits,PT2bits)-1. +template +struct PointerLikeTypeTraits> { + static inline void *getAsVoidPointer(const PointerUnion &P) { + return P.getOpaqueValue(); + } + + static inline PointerUnion getFromVoidPointer(void *P) { + return PointerUnion::getFromOpaqueValue(P); + } + + // The number of bits available are the min of the pointer types minus the + // bits needed for the discriminator. + static constexpr int NumLowBitsAvailable = PointerLikeTypeTraits::Val)>::NumLowBitsAvailable; +}; + +// Teach DenseMap how to use PointerUnions as keys. +template struct DenseMapInfo> { + using Union = PointerUnion; + using FirstInfo = + DenseMapInfo::type>; + + static inline Union getEmptyKey() { return Union(FirstInfo::getEmptyKey()); } + + static inline Union getTombstoneKey() { + return Union(FirstInfo::getTombstoneKey()); + } + + static unsigned getHashValue(const Union &UnionVal) { + intptr_t key = (intptr_t)UnionVal.getOpaqueValue(); + return DenseMapInfo::getHashValue(key); + } + + static bool isEqual(const Union &LHS, const Union &RHS) { + return LHS == RHS; + } +}; + +} // end namespace llvm +}} // swift::runtime + +#endif // LLVM_ADT_POINTERUNION_H diff --git a/stdlib/include/llvm/ADT/STLExtras.h b/stdlib/include/llvm/ADT/STLExtras.h new file mode 100644 index 0000000000000..8beab3f85aafe --- /dev/null +++ b/stdlib/include/llvm/ADT/STLExtras.h @@ -0,0 +1,1945 @@ +//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains some templates that are useful if you are working with the +// STL at all. +// +// No library is required when using these functions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_STLEXTRAS_H +#define LLVM_ADT_STLEXTRAS_H + +#include "llvm/ADT/Optional.h" +#include "llvm/ADT/iterator.h" +#include "llvm/ADT/iterator_range.h" +#include "llvm/Support/ErrorHandling.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef EXPENSIVE_CHECKS +#include // for std::mt19937 +#endif + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + +// Only used by compiler if both template types are the same. Useful when +// using SFINAE to test for the existence of member functions. +template struct SameType; + +namespace detail { + +template +using IterOfRange = decltype(std::begin(std::declval())); + +template +using ValueOfRange = typename std::remove_reference()))>::type; + +} // end namespace detail + +//===----------------------------------------------------------------------===// +// Extra additions to +//===----------------------------------------------------------------------===// + +template +struct negation : std::integral_constant {}; + +template struct conjunction : std::true_type {}; +template struct conjunction : B1 {}; +template +struct conjunction + : std::conditional, B1>::type {}; + +template struct make_const_ptr { + using type = + typename std::add_pointer::type>::type; +}; + +template struct make_const_ref { + using type = typename std::add_lvalue_reference< + typename std::add_const::type>::type; +}; + +/// Utilities for detecting if a given trait holds for some set of arguments +/// 'Args'. For example, the given trait could be used to detect if a given type +/// has a copy assignment operator: +/// template +/// using has_copy_assign_t = decltype(std::declval() +/// = std::declval()); +/// bool fooHasCopyAssign = is_detected::value; +namespace detail { +template using void_t = void; +template class Op, class... Args> struct detector { + using value_t = std::false_type; +}; +template