Bläddra i källkod

SPEC-6276 Move ci_build and ci_build_metrics to scripts\build

Esteban Papp 4 år sedan
förälder
incheckning
4a3d24f189
100 ändrade filer med 0 tillägg och 14148 borttagningar
  1. 0 193
      Tools/build/JenkinsScripts/build/PackageEnv.py
  2. 0 81
      Tools/build/JenkinsScripts/build/Params.py
  3. 0 358
      Tools/build/JenkinsScripts/build/cmake_package.py
  4. 0 106
      Tools/build/JenkinsScripts/build/cmake_package_env.json
  5. 0 80
      Tools/build/JenkinsScripts/build/download_latest_package_from_bucket.py
  6. 0 55
      Tools/build/JenkinsScripts/build/download_packages.py
  7. 0 57
      Tools/build/JenkinsScripts/build/jenkins_scm_metrics.py
  8. 0 12
      Tools/build/JenkinsScripts/build/utils/__init__.py
  9. 0 51
      Tools/build/JenkinsScripts/build/utils/copy_LAD_3rdParty.xml
  10. 0 102
      Tools/build/JenkinsScripts/build/utils/download_from_s3.py
  11. 0 129
      Tools/build/JenkinsScripts/build/utils/email_to_lionbridge.py
  12. 0 662
      Tools/build/JenkinsScripts/build/utils/incremental_build_util.py
  13. 0 57
      Tools/build/JenkinsScripts/build/utils/jenkins_scm_metrics.py
  14. 0 12
      Tools/build/JenkinsScripts/build/utils/lib/__init__.py
  15. 0 174
      Tools/build/JenkinsScripts/build/utils/lib/glob3.py
  16. 0 75
      Tools/build/JenkinsScripts/build/utils/packaging_version.py
  17. 0 212
      Tools/build/JenkinsScripts/build/utils/scrubbing_test.py
  18. 0 82
      Tools/build/JenkinsScripts/build/utils/update_bootstrap_cfg.py
  19. 0 174
      Tools/build/JenkinsScripts/build/utils/upload_benchmarks.py
  20. 0 183
      Tools/build/JenkinsScripts/build/utils/upload_metrics_to_kinesis.py
  21. 0 107
      Tools/build/JenkinsScripts/build/utils/upload_to_s3.py
  22. 0 65
      Tools/build/JenkinsScripts/build/utils/util.py
  23. 0 64
      Tools/build/JenkinsScripts/distribution/AWS_PyTools/LyChecksum.py
  24. 0 76
      Tools/build/JenkinsScripts/distribution/AWS_PyTools/LyCloudfrontOps.py
  25. 0 12
      Tools/build/JenkinsScripts/distribution/AWS_PyTools/__init__.py
  26. 0 303
      Tools/build/JenkinsScripts/distribution/AWS_WAF_Updater/update_internal_whitelist.py
  27. 0 3
      Tools/build/JenkinsScripts/distribution/Installer/BootstrapperLogo.png
  28. 0 289
      Tools/build/JenkinsScripts/distribution/Installer/BuildInstaller.py
  29. 0 206
      Tools/build/JenkinsScripts/distribution/Installer/BuildInstallerUtils.py
  30. 0 85
      Tools/build/JenkinsScripts/distribution/Installer/BuildInstallerWixUtils.py
  31. 0 145
      Tools/build/JenkinsScripts/distribution/Installer/Candle.py
  32. 0 257
      Tools/build/JenkinsScripts/distribution/Installer/Heat.py
  33. 0 3
      Tools/build/JenkinsScripts/distribution/Installer/HeatDevPackageBase.wxs
  34. 0 3
      Tools/build/JenkinsScripts/distribution/Installer/HeatPackageBase.wxs
  35. 0 67
      Tools/build/JenkinsScripts/distribution/Installer/Insignia.py
  36. 0 115
      Tools/build/JenkinsScripts/distribution/Installer/InstallerArgs.py
  37. 0 273
      Tools/build/JenkinsScripts/distribution/Installer/InstallerAutomation.py
  38. 0 3
      Tools/build/JenkinsScripts/distribution/Installer/InstallerIcon.ico
  39. 0 225
      Tools/build/JenkinsScripts/distribution/Installer/InstallerPackaging.py
  40. 0 93
      Tools/build/JenkinsScripts/distribution/Installer/InstallerParams.py
  41. 0 57
      Tools/build/JenkinsScripts/distribution/Installer/Light.py
  42. 0 3
      Tools/build/JenkinsScripts/distribution/Installer/LumberyardBootstrapper.wxs
  43. 0 14
      Tools/build/JenkinsScripts/distribution/Installer/LumberyardDevCertSetup.bat
  44. 0 61
      Tools/build/JenkinsScripts/distribution/Installer/LumberyardThemeGDC.wxl
  45. 0 85
      Tools/build/JenkinsScripts/distribution/Installer/LumberyardThemeGDC.xml
  46. 0 29
      Tools/build/JenkinsScripts/distribution/Installer/PackageExeSigning.py
  47. 0 3
      Tools/build/JenkinsScripts/distribution/Installer/Redistributables.wxs
  48. 0 212
      Tools/build/JenkinsScripts/distribution/Installer/SignTool.py
  49. 0 186
      Tools/build/JenkinsScripts/distribution/Installer/TestInstaller.py
  50. 0 10
      Tools/build/JenkinsScripts/distribution/Installer/__init__.py
  51. 0 23
      Tools/build/JenkinsScripts/distribution/Installer/allowed_empty_folders.json
  52. 0 50
      Tools/build/JenkinsScripts/distribution/Installer/dir_filelist.json
  53. 0 3
      Tools/build/JenkinsScripts/distribution/Installer/editor_icon_setup.ico
  54. BIN
      Tools/build/JenkinsScripts/distribution/Installer/license.rtf
  55. 0 66
      Tools/build/JenkinsScripts/distribution/Metrics/GameTemplates/BuildGameTemplateWhitelist.py
  56. 0 30
      Tools/build/JenkinsScripts/distribution/Metrics/GameTemplates/BuildGameTemplateWhitelistArgs.py
  57. 0 309
      Tools/build/JenkinsScripts/distribution/ThirdParty/BuildThirdPartyPackages.py
  58. 0 29
      Tools/build/JenkinsScripts/distribution/ThirdParty/BuildThirdPartyUtils.py
  59. 0 207
      Tools/build/JenkinsScripts/distribution/ThirdParty/SDKPackager.py
  60. 0 76
      Tools/build/JenkinsScripts/distribution/ThirdParty/ThirdPartySDKAWS.py
  61. 0 12
      Tools/build/JenkinsScripts/distribution/__init__.py
  62. 0 12
      Tools/build/JenkinsScripts/distribution/copyright.txt
  63. 0 83
      Tools/build/JenkinsScripts/distribution/copyright_prepender.py
  64. 0 16
      Tools/build/JenkinsScripts/distribution/copyright_removal/Categorizer.py
  65. 0 26
      Tools/build/JenkinsScripts/distribution/copyright_removal/CommentCategory.py
  66. 0 20
      Tools/build/JenkinsScripts/distribution/copyright_removal/SlashComment.py
  67. 0 20
      Tools/build/JenkinsScripts/distribution/copyright_removal/StarComment.py
  68. 0 580
      Tools/build/JenkinsScripts/distribution/copyright_removal/copyright_header_manual_tool.py
  69. 0 813
      Tools/build/JenkinsScripts/distribution/copyright_removal/copyright_update.py
  70. 0 473
      Tools/build/JenkinsScripts/distribution/copyright_removal/copyrighttool.py
  71. 0 0
      Tools/build/JenkinsScripts/distribution/copyright_removal/crytek_3.8.1_source.txt
  72. 0 126
      Tools/build/JenkinsScripts/distribution/copyright_removal/replace_crytek_copyright.py
  73. 0 20
      Tools/build/JenkinsScripts/distribution/get_changelist_number.py
  74. 0 143
      Tools/build/JenkinsScripts/distribution/git_release/GitDailyValidation.py
  75. 0 23
      Tools/build/JenkinsScripts/distribution/git_release/GitHashList.json
  76. 0 203
      Tools/build/JenkinsScripts/distribution/git_release/GitIntegrityChecker.py
  77. 0 47
      Tools/build/JenkinsScripts/distribution/git_release/GitIntegrityCheckerTester.py
  78. 0 331
      Tools/build/JenkinsScripts/distribution/git_release/GitMoveDetection.py
  79. 0 75
      Tools/build/JenkinsScripts/distribution/git_release/GitOpsCodeCommit.py
  80. 0 24
      Tools/build/JenkinsScripts/distribution/git_release/GitOpsCommon.py
  81. 0 49
      Tools/build/JenkinsScripts/distribution/git_release/GitOpsGitHub.py
  82. 0 417
      Tools/build/JenkinsScripts/distribution/git_release/GitPromotion.py
  83. 0 217
      Tools/build/JenkinsScripts/distribution/git_release/GitRelease.py
  84. 0 825
      Tools/build/JenkinsScripts/distribution/git_release/GitStaging.py
  85. 0 146
      Tools/build/JenkinsScripts/distribution/git_release/build.xml
  86. 0 1136
      Tools/build/JenkinsScripts/distribution/git_release/git_bootstrap.py
  87. 0 208
      Tools/build/JenkinsScripts/distribution/git_release/git_bootstrap_test.py
  88. 0 26
      Tools/build/JenkinsScripts/distribution/git_release/inject/.github/ISSUE_TEMPLATE/bug_report.md
  89. 0 17
      Tools/build/JenkinsScripts/distribution/git_release/inject/.github/ISSUE_TEMPLATE/feature_request.md
  90. 0 20
      Tools/build/JenkinsScripts/distribution/git_release/inject/.github/ISSUE_TEMPLATE/question.md
  91. 0 45
      Tools/build/JenkinsScripts/distribution/git_release/inject/CONTRIBUTING.md
  92. 0 57
      Tools/build/JenkinsScripts/distribution/git_release/inject/README.md
  93. 0 167
      Tools/build/JenkinsScripts/distribution/inject_signed_binaries.py
  94. 0 59
      Tools/build/JenkinsScripts/distribution/ly_dep_version_tool.py
  95. 0 24
      Tools/build/JenkinsScripts/distribution/modify_lylauncherconfig.py
  96. 0 44
      Tools/build/JenkinsScripts/distribution/package_source_assets.bat
  97. 0 47
      Tools/build/JenkinsScripts/distribution/release_automation_tool.py
  98. 0 379
      Tools/build/JenkinsScripts/distribution/s3multiput.py
  99. 0 450
      Tools/build/JenkinsScripts/distribution/s3put.py
  100. 0 66
      Tools/build/JenkinsScripts/distribution/update_version_strings.py

+ 0 - 193
Tools/build/JenkinsScripts/build/PackageEnv.py

@@ -1,193 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-from Params import Params
-from utils.util import *
-
-
-class PackageEnv(Params):
-    def __init__(self, target_platform, json_file):
-        super(PackageEnv, self).__init__()
-        self.__cur_dir = os.path.dirname(os.path.abspath(__file__))
-        with open(json_file, 'r') as source:
-            self.__data = json.load(source)
-            self.__platforms = self.__data.get('platforms')
-            if target_platform not in self.__platforms:
-                ly_build_error('Target platform {} is not supported'.format(target_platform))
-            self.__target_platform = target_platform
-
-            # visited_platform is used to track the platform reference chain, in order to avoid chain cycle.
-            visited_platform = [target_platform]
-            platform_env = self.__platforms.get(target_platform)
-            # If platform_env starts with @, it means that platform_env references another platform
-            while isinstance(platform_env, str) and platform_env.startswith('@'):
-                referenced_platform = platform_env.lstrip('@')
-                if referenced_platform in visited_platform:
-                    ly_build_error('Found reference chain cycle started from {}.\nSee {}'.format(referenced_platform, json_file))
-                visited_platform.append(referenced_platform)
-                platform_env = self.__platforms.get(referenced_platform)
-
-            self.__platform_env = platform_env
-            self.__global_env = self.__data.get('global')
-
-    def get_target_platform(self):
-        return self.__target_platform
-
-    def get_global_env(self):
-        return self.__global_env
-
-    def get_platform_env(self):
-        return self.__platform_env
-
-    def __get_global_value(self, key):
-        key = key.upper()
-        value = self.__global_env.get(key)
-        if value is None:
-            ly_build_error('{} is not defined in global env'.format(key))
-        return value
-
-    def __get_platform_value(self, key):
-        key = key.upper()
-        value = self.__platform_env.get(key)
-        if value is None:
-            ly_build_error('{} is not defined in platform env for {}'.format(key, self.__target_platform))
-        return value
-
-    def __evaluate_boolean(self, v):
-        return str(v).lower() in ['1', 'true']
-
-    def __get_engine_root(self):
-        def validate_engine_root(engine_root):
-            if not os.path.isdir(engine_root):
-                return False
-            return os.path.exists(os.path.join(engine_root, 'engine.json'))
-
-        # Jenkins only
-        workspace = os.getenv('WORKSPACE')
-        if workspace is not None:
-            print('Environment variable WORKSPACE={} detected'.format(workspace))
-            if validate_engine_root(workspace):
-                print('Setting ENGINE_ROOT to {}'.format(workspace))
-                return workspace
-            engine_root = os.path.join(workspace, 'dev')
-            if validate_engine_root(engine_root):
-                print('Setting ENGINE_ROOT to {}'.format(engine_root))
-                return engine_root
-            print('Cannot locate ENGINE_ROOT with Environment variable WORKSPACE')
-            # End Jenkins only
-            
-        engine_root = os.getenv('ENGINE_ROOT', '')
-        if validate_engine_root(engine_root):
-            return engine_root
-
-        print('Environment variable ENGINE_ROOT is not set or invalid, checking ENGINE_ROOT in env json file')
-        engine_root = self.__global_env.get('ENGINE_ROOT')
-        if validate_engine_root(engine_root):
-            return engine_root
-
-        # Set engine_root based on script location
-        engine_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(self.__cur_dir))))
-        print('ENGINE_ROOT from env json file is invalid, defaulting to {}'.format(engine_root))
-        if validate_engine_root(engine_root):
-            return engine_root
-        else:
-            error('Cannot Locate ENGINE_ROOT')
-
-    def __get_thirdparty_home(self):
-        third_party_home = os.getenv('ENV_3RDPARTY_PATH', '')
-        if os.path.exists(third_party_home):
-            print('ENV_3RDPARTY_PATH found, using {} as 3rdParty path.'.format(third_party_home))
-            return third_party_home
-        third_party_home = self.__get_global_value('THIRDPARTY_HOME')
-        if os.path.isdir(third_party_home):
-            return third_party_home
-
-        # Set engine_root based on script location
-        print('THIRDPARTY_HOME is not valid, looking for THIRD_PARTY_HOME')
-
-        # Finding THIRD_PARTY_HOME
-        cur_dir = self.__get_engine_root()
-        last_dir = None
-        while last_dir != cur_dir:
-            third_party_home = os.path.join(cur_dir, '3rdParty')
-            print('Cheking THIRDPARTY_HOME {}'.format(third_party_home))
-            if os.path.exists(os.path.join(third_party_home, '3rdParty.txt')):
-                print('Setting THIRDPARTY_HOME to {}'.format(third_party_home))
-                return third_party_home
-            last_dir = cur_dir
-            cur_dir = os.path.dirname(cur_dir)
-        error('Cannot locate THIRDPARTY_HOME')
-
-    def __get_package_name_pattern(self):
-        package_name_pattern = self.__get_global_value('PACKAGE_NAME_PATTERN')
-        if os.getenv('PACKAGE_NAME_PATTERN') is not None:
-            package_name_pattern = os.getenv('PACKAGE_NAME_PATTERN')
-        return package_name_pattern
-
-    def __get_build_number(self):
-        build_number = self.__get_global_value('BUILD_NUMBER')
-        if os.getenv('BUILD_NUMBER') is not None:
-            build_number = os.getenv('BUILD_NUMBER')
-        return build_number
-
-    def __get_p4_changelist(self):
-        p4_changelist = self.__get_global_value('P4_CHANGELIST')
-        if os.getenv('P4_CHANGELIST') is not None:
-            p4_changelist = os.getenv('P4_CHANGELIST')
-        return p4_changelist
-
-    def __get_major_version(self):
-        major_version = self.__get_global_value('MAJOR_VERSION')
-        if os.getenv('MAJOR_VERSION') is not None:
-            major_version = os.getenv('MAJOR_VERSION')
-        return major_version
-
-    def __get_minor_version(self):
-        minor_version = self.__get_global_value('MINOR_VERSION')
-        if os.getenv('MINOR_VERSION') is not None:
-            minor_version = os.getenv('MINOR_VERSION')
-        return minor_version
-
-    def __get_scrub_params(self):
-        return self.__get_platform_value('SCRUB_PARAMS')
-
-    def __get_validator_platforms(self):
-        return self.__get_platform_value('VALIDATOR_PLATFORMS')
-
-    def __get_package_targets(self):
-        return self.__get_platform_value('PACKAGE_TARGETS')
-
-    def __get_build_targets(self):
-        return self.__get_platform_value('BUILD_TARGETS')
-
-    def __get_asset_processor_path(self):
-        return self.__get_platform_value('ASSET_PROCESSOR_PATH')
-
-    def __get_asset_game_folders(self):
-        return self.__get_platform_value('ASSET_GAME_FOLDERS')
-
-    def __get_asset_platform(self):
-        return self.__get_platform_value('ASSET_PLATFORM')
-
-    def __get_bootstrap_cfg_game_folder(self):
-        return self.__get_platform_value('BOOTSTRAP_CFG_GAME_FOLDER')
-
-    def __get_run_launcher_unit_test(self):
-        run_launcher_unit_test = os.getenv('RUN_LAUNCHER_UNIT_TEST')
-        if run_launcher_unit_test is None:
-            run_launcher_unit_test = self.__platform_env.get('RUN_LAUNCHER_UNIT_TEST')
-        return self.__evaluate_boolean(run_launcher_unit_test)
-
-    def __get_skip_build(self):
-        skip_build = os.getenv('SKIP_BUILD')
-        if skip_build is None:
-            skip_build = self.__platform_env.get('SKIP_BUILD')
-        return self.__evaluate_boolean(skip_build)

+ 0 - 81
Tools/build/JenkinsScripts/build/Params.py

@@ -1,81 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-from utils.util import *
-
-
-class Params(object):
-    def __init__(self):
-        # Cache params
-        self.__params = {}
-
-    def get(self, param_name):
-        param_value = self.__params.get(param_name)
-        if param_value is not None:
-            return param_value
-        # Call __get_${param_name} function
-        func = getattr(self, '_{}__get_{}'.format(self.__class__.__name__, param_name.lower()), None)
-        if func is not None:
-            param_value = func()
-            # Replace all ${env} in value
-            if isinstance(param_value, str):
-                param_value = self.__process_string(param_name, param_value)
-            elif isinstance(param_value, list):
-                param_value = self.__process_list(param_name, param_value)
-            elif isinstance(param_value, dict):
-                param_value = self.__process_dict(param_name, param_value)
-            # Cache param
-            self.__params[param_name] = param_value
-            return param_value
-        ly_build_error('method __get_{} is not defined in class {}'.format(param_name.lower(), self.__class__.__name__))
-
-    def set(self, param_name, param_value):
-        self.__params[param_name] = param_value
-
-    def exists(self, param_name):
-        try:
-            self.get(param_name)
-        except LyBuildError:
-            return False
-        return True
-
-    def __process_string(self, param_name, param_value):
-        # Find all param with format ${param}
-        params = re.findall('\${(\w+)}', param_value)
-        # Avoid using the same param name in value, like 'WORKSPACE': '${WORKSPACE} some string'
-        if param_name in params:
-            ly_build_error('The use of same parameter name({}) in value is not allowed'.format(param_name))
-        # Replace ${param} with actual value
-        for param in params:
-            param_value = param_value.replace('${' + param + '}', self.get(param))
-        return param_value
-
-    def __process_list(self, param_name, param_value):
-        processed_list = []
-        for entry in param_value:
-            if isinstance(entry, str):
-                entry = self.__process_string(param_name, entry)
-            elif isinstance(entry, list):
-                entry = self.__process_list(param_name, entry)
-            elif isinstance(entry, dict):
-                entry = self.__process_dict(param_name, entry)
-            processed_list.append(entry)
-        return processed_list
-
-    def __process_dict(self, param_name, param_value):
-        for key in param_value:
-            if isinstance(param_value[key], str):
-                param_value[key] = self.__process_string(param_name, param_value[key])
-            elif isinstance(param_value[key], list):
-                param_value[key] = self.__process_list(param_name, param_value[key])
-            elif isinstance(param_value[key], dict):
-                param_value[key] = self.__process_dict(param_name, param_value[key])
-        return param_value

+ 0 - 358
Tools/build/JenkinsScripts/build/cmake_package.py

@@ -1,358 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import sys
-import glob_to_regex
-import zipfile
-import timeit
-import stat
-from optparse import OptionParser
-from PackageEnv import PackageEnv
-from ci_build import build
-from utils.util import *
-from utils.lib.glob3 import glob
-
-
-def package(options):
-    package_platform = options.package_platform
-    package_env = PackageEnv(package_platform, options.package_env)
-    engine_root = package_env.get('ENGINE_ROOT')
-
-    # Ask the validator code to tell us which files need to be removed from the package
-    prohibited_file_mask = get_prohibited_file_mask(package_platform, engine_root)
-
-    # Scrub files. This is destructive, but is necessary to allow the current file existance checks to work properly. Better to copy and then build, or to
-    # mask on sync, but this is what we have for now
-    # No need to run scrubbing script since all restricted platform codes are moved to dev/restricted folder
-    #scrub_files(package_env, prohibited_file_mask)
-
-    # validate files
-    validate_restricted_files(package_platform, package_env)
-
-    # Override values in bootstrap.cfg for PC package
-    override_bootstrap_cfg(package_env)
-
-    # Generate GameTemplates whitelist information for metrics reporting
-    template_whitelist_script = os.path.join(engine_root, 'Tools/build/JenkinsScripts/distribution/Metrics/GameTemplates/buildGameTemplateWhitelist.py')
-    if os.path.exists(template_whitelist_script):
-        if sys.platform == 'win32':
-            python = os.path.join(engine_root, 'Tools', 'Python', 'python.cmd')
-        else:
-            python = os.path.join(engine_root, 'Tools', 'Python', 'python.sh')
-        project_templates_folder = os.path.join(engine_root, 'ProjectTemplates')
-        args = [python, template_whitelist_script, '--projectTemplatesFolder', project_templates_folder]
-        #execute_system_call(args)
-
-    if not package_env.get('SKIP_BUILD'):
-        print('SKIP_BUILD is False, running CMake build...')
-        cmake_build(package_env)
-
-    # TODO Compile Assets
-    #if package_env.exists('ASSET_PROCESSOR_PATH'):
-    #    compile_assets(package_env)
-
-    #create packages
-    create_packages(package_env)
-
-
-def override_bootstrap_cfg(package_env):
-    print('Override values in bootstrap.cfg')
-    engine_root = package_env.get('ENGINE_ROOT')
-    bootstrap_path = os.path.join(engine_root, 'bootstrap.cfg')
-    replace_values = {'project_path':'{}'.format(package_env.get('BOOTSTRAP_CFG_GAME_FOLDER'))}
-    try:
-        with open(bootstrap_path, 'r') as bootstrap_cfg:
-            content = bootstrap_cfg.read()
-    except:
-        error('Cannot read file {}'.format(bootstrap_path))
-    content = content.split('\n')
-    new_content = []
-    for line in content:
-        if not line.startswith('--'):
-            strs = line.split('=')
-            if len(strs):
-                key = strs[0].strip(' ')
-                if key in replace_values:
-                    line = '{}={}'.format(key, replace_values[key])
-        new_content.append(line)
-    try:
-        with open(bootstrap_path, 'w') as out:
-            out.write('\n'.join(new_content))
-    except:
-        error('Cannot write to file {}'.format(bootstrap_path))
-    print('{} updated with value {}'.format(bootstrap_path, replace_values))
-
-
-def get_prohibited_file_mask(package_platform, engine_root):
-    sys.path.append(os.path.join(engine_root, 'Tools', 'build', 'JenkinsScripts', 'distribution', 'scrubbing'))
-    from validator_data_LEGAL_REVIEW_REQUIRED import get_prohibited_platforms_for_package
-
-    # The list of prohibited platforms is controlled by the validator on a per-package basis
-    prohibited_platforms = get_prohibited_platforms_for_package(package_platform)
-    prohibited_platforms.append('all')
-    excludes_list = []
-    for p in prohibited_platforms:
-        platform_excludes = glob_to_regex.generate_excludes_for_platform(engine_root, p)
-        excludes_list.extend(platform_excludes)
-    prohibited_file_mask = re.compile('|'.join(excludes_list), re.IGNORECASE)
-    return prohibited_file_mask
-
-
-def scrub_files(package_env, prohibited_file_mask):
-    print('Perform the Code Scrubbing')
-    engine_root = package_env.get('ENGINE_ROOT')
-
-    success = True
-    for dirname, subFolders, files in os.walk(engine_root):
-        for filename in files:
-            full_path = os.path.join(dirname, filename)
-            if prohibited_file_mask.match(full_path):
-                try:
-                    print('Deleting: {}'.format(full_path))
-                    os.chmod(full_path, stat.S_IWRITE)
-                    os.unlink(full_path)
-                except:
-                    e = sys.exc_info()[0]
-                    sys.stderr.write('Error: could not delete {} ... aborting.\n'.format(full_path))
-                    sys.stderr.write('{}\n'.format(str(e)))
-                    success = False
-    if not success:
-        sys.stderr.write('ERROR: scrub_files failed\n')
-        sys.exit(1)
-
-
-def validate_restricted_files(package, package_env):
-    print('Perform the Code Scrubbing')
-    engine_root = package_env.get('ENGINE_ROOT')
-
-    # Run validator
-    success = True
-    validator_path = os.path.join(engine_root, 'Tools/build/JenkinsScripts/distribution/scrubbing/validator.py')
-    if sys.platform == 'win32':
-        python = os.path.join(engine_root, 'Tools', 'Python', 'python3.cmd')
-    else:
-        python = os.path.join(engine_root, 'Tools', 'Python', 'python3.sh')
-    args = [python, validator_path, '--package', package, engine_root]
-    return_code = safe_execute_system_call(args)
-    if return_code != 0:
-        success = False
-    if not success:
-        error('Restricted file validator failed.')
-    print('Restricted file validator completed successfully.')
-
-
-def cmake_build(package_env):
-    build_targets = package_env.get('BUILD_TARGETS')
-    for build_target in build_targets:
-        build(build_target['BUILD_CONFIG_FILENAME'], build_target['PLATFORM'], build_target['TYPE'])
-
-
-def create_packages(package_env):
-    package_targets = package_env.get('PACKAGE_TARGETS')
-    for package_target in package_targets:
-        print('Creating zipfile for package target {}'.format(package_target))
-        cur_dir = os.path.dirname(os.path.abspath(__file__))
-        filelist = os.path.join(cur_dir, 'package_filelists', '{}.json'.format(package_target['TYPE']))
-        with open(filelist, 'r') as source:
-            data = json.load(source)
-        lyengine = os.path.dirname(package_env.get('ENGINE_ROOT'))
-        print('Calculating filelists...')
-        files = {}
-        # We have to include 3rdParty in Mac/Console packages until LAD is available for those platforms
-        # Remove this when LAD is available for those platforms.
-        if package_target['TYPE'] in ['cmake_consoles', 'consoles']:
-            files.update(get_3rdparty_filelist(package_env, 'common'))
-            files.update(get_3rdparty_filelist(package_env, 'vc141'))
-            files.update(get_3rdparty_filelist(package_env, 'vc142'))
-            files.update(get_3rdparty_filelist(package_env, 'provo'))
-        elif package_target['TYPE'] in ['cmake_atom_pc']:
-            files.update(get_3rdparty_filelist(package_env, 'common'))
-            files.update(get_3rdparty_filelist(package_env, 'vc141'))
-            files.update(get_3rdparty_filelist(package_env, 'vc142'))
-        elif package_target['TYPE'] in ['cmake_all']:
-            if package_env.get_target_platform() == 'mac':
-                files.update(get_3rdparty_filelist(package_env, 'common'))
-                files.update(get_3rdparty_filelist(package_env, 'mac'))
-            elif package_env.get_target_platform() == 'consoles':
-                files.update(get_3rdparty_filelist(package_env, 'common'))
-                files.update(get_3rdparty_filelist(package_env, 'vc141'))
-                files.update(get_3rdparty_filelist(package_env, 'vc142'))
-                files.update(get_3rdparty_filelist(package_env, 'provo'))
-
-        if '@lyengine' in data:
-            if '@engine_root' in data['@lyengine']:
-                engine_root_basename = os.path.basename(package_env.get('ENGINE_ROOT'))
-                data['@lyengine'][engine_root_basename] = data['@lyengine']['@engine_root']
-                data['@lyengine'].pop('@engine_root')
-            files.update(filter_files(data['@lyengine'], lyengine))
-        if '@3rdParty' in data:
-            files.update(filter_files(data['@3rdParty'], package_env.get('THIRDPARTY_HOME')))
-        package_path = os.path.join(lyengine, package_target['PACKAGE_NAME'])
-        print('Creating zipfile at {}'.format(package_path))
-        start = timeit.default_timer()
-
-        with zipfile.ZipFile(package_path, 'w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as myzip:
-            for f in files:
-                if os.path.islink(f):
-                    zipInfo = zipfile.ZipInfo(files[f])
-                    zipInfo.create_system = 3
-                    # long type of hex val of '0xA1ED0000L',
-                    # say, symlink attr magic...
-                    #zipInfo.external_attr = 0xA1ED0000L
-                    zipInfo.external_attr |= 0xA0000000
-                    myzip.writestr(zipInfo, os.readlink(f))
-                else:
-                    myzip.write(f, files[f])
-
-        stop = timeit.default_timer()
-        total_time = int(stop - start)
-        print('{} is created. Total time: {} seconds.'.format(package_path, total_time))
-
-        def get_MD5(file_path):
-            from hashlib import md5
-            chunk_size = 200 * 1024
-            h = md5()
-            with open(file_path, 'rb') as f:
-                while True:
-                    chunk = f.read(chunk_size)
-                    if len(chunk):
-                        h.update(chunk)
-                    else:
-                        break
-            return h.hexdigest()
-
-        md5_file = '{}.MD5'.format(package_path)
-        print('Creating MD5 file at {}'.format(md5_file))
-        start = timeit.default_timer()
-        with open(md5_file, 'w') as output:
-            output.write(get_MD5(package_path))
-        stop = timeit.default_timer()
-        total_time = int(stop - start)
-        print('{} is created. Total time: {} seconds.'.format(md5_file, total_time))
-
-
-def filter_files(data, base, prefix='', support_symlinks=True):
-    includes = {}
-    excludes = set()
-    for key, value in data.items():
-        pattern = os.path.join(base, prefix, key)
-        if not isinstance(value, dict):
-            pattern = os.path.normpath(pattern)
-            result = glob(pattern, recursive=True)
-            files = [x for x in result if os.path.isfile(x) or (support_symlinks and os.path.islink(x))]
-            if value == "#exclude":
-                excludes.update(files)
-            elif value == "#include":
-                for file in files:
-                    includes[file] = os.path.relpath(file, base)
-            else:
-                if value.startswith('#move:'):
-                    for file in files:
-                        file_name = os.path.relpath(file, os.path.join(base, prefix))
-                        dst_dir = value.replace('#move:', '').strip(' ')
-                        includes[file] = os.path.join(dst_dir, file_name)
-                elif value.startswith('#rename:'):
-                    for file in files:
-                        dst_file = value.replace('#rename:', '').strip(' ')
-                        includes[file] = dst_file
-                else:
-                    warn('Unknown directive {} for pattern {}'.format(value, pattern))
-        else:
-            includes.update(filter_files(value, base, os.path.join(prefix, key), support_symlinks))
-
-    for exclude in excludes:
-        try:
-            includes.pop(exclude)
-        except KeyError:
-            pass
-    return includes
-
-
-def get_3rdparty_filelist(package_env, platform, support_symlinks=True):
-    engine_root = package_env.get('ENGINE_ROOT')
-    include_pattern_file = 'include_pattern_file'
-    if os.path.isfile(include_pattern_file):
-        os.remove(include_pattern_file)
-    exclude_pattern_file = 'exclude_pattern_file'
-    if os.path.isfile(exclude_pattern_file):
-        os.remove(exclude_pattern_file)
-    versions_file = 'versions_file'
-    if os.path.isfile(versions_file):
-        os.remove(versions_file)
-
-    # Generate 3rdParty version file
-    ly_dep_version_tool = os.path.join(engine_root, 'Tools/build/JenkinsScripts/distribution/ly_dep_version_tool.py')
-    setup_assistant_config = os.path.join(engine_root, 'SetupAssistantConfig.json')
-    if sys.platform == 'win32':
-        python = os.path.join(engine_root, 'Tools', 'Python', 'python.cmd')
-    else:
-        python = os.path.join(engine_root, 'Tools', 'Python', 'python.sh')
-    args = [python, ly_dep_version_tool, '-o', versions_file, '-s', setup_assistant_config]
-    execute_system_call(args)
-
-    # Generate 3rdParty include pattern and exclude pattern
-    generate_external_3rdparty_file_list = os.path.join(engine_root, 'Tools/build/JenkinsScripts/distribution/ThirdParty/generate_external_3rdparty_file_list.py')
-    package_config = os.path.join(engine_root, 'Tools/build/JenkinsScripts/distribution/ThirdParty/CMakePackageConfig.json')
-    args = [python, generate_external_3rdparty_file_list, '-s', versions_file, '-c', package_config, '-p', platform, '-i', include_pattern_file, '-e', exclude_pattern_file]
-    execute_system_call(args)
-
-    # Calculate filelist using include pattern and exclude pattern
-    thirdparty_home = package_env.get('THIRDPARTY_HOME')
-    filelist = {}
-    with open(include_pattern_file, 'r') as source:
-        include_patterns = source.readlines()
-    for include_pattern in include_patterns:
-        pattern = os.path.join(thirdparty_home, include_pattern.strip('\n'))
-        pattern = os.path.normpath(pattern)
-        result = glob(pattern, recursive=True)
-        files = [x for x in result if os.path.isfile(x) or (support_symlinks and os.path.islink(x))]
-        for file in files:
-            filelist[file] = os.path.join('3rdParty', os.path.relpath(file, thirdparty_home))
-
-    with open(exclude_pattern_file, 'r') as source:
-        exclude_patterns = source.readlines()
-    for exclude_pattern in exclude_patterns:
-        pattern = os.path.join(thirdparty_home, exclude_pattern.strip('\n'))
-        pattern = os.path.normpath(pattern)
-        result = glob(pattern, recursive=True)
-        files = [x for x in result if os.path.isfile(x) or (support_symlinks and os.path.islink(x))]
-        for file in files:
-            try:
-                filelist.pop(file)
-            except KeyError:
-                pass
-    return filelist
-
-
-def parse_args():
-    cur_dir = os.path.dirname(os.path.abspath(__file__))
-    parser = OptionParser()
-    parser.add_option("--release", dest="release", default=False, action='store_true', help="Release build")
-    parser.add_option("--package_platform", dest="package_platform", default='consoles', help="Target platform to package")
-    parser.add_option("--package_env", dest="package_env", default=os.path.join(cur_dir, "cmake_package_env.json"),
-                      help="JSON file that defines package environment variables")
-    parser.add_option("--package_build_configurations_json", dest="package_build_configurations_json",
-                      default=os.path.join(cur_dir, "package_build_configurations.json"),
-                      help="JSON file that defines build parameters")
-    (options, args) = parser.parse_args()
-
-    if options.package_platform is None:
-        error('No package platform specified')
-    return options, args
-
-
-if __name__ == "__main__":
-    (options, args) = parse_args()
-    package(options)
-
-
-
-

+ 0 - 106
Tools/build/JenkinsScripts/build/cmake_package_env.json

@@ -1,106 +0,0 @@
-{
-    "global":{
-        "ENGINE_ROOT":"",
-        "THIRDPARTY_HOME":"",
-        "PACKAGE_NAME_PATTERN":"lumberyard-${MAJOR_VERSION}.${MINOR_VERSION}-${P4_CHANGELIST}",
-        "BUILD_NUMBER":"0",
-        "P4_CHANGELIST":"0",
-        "MAJOR_VERSION":"0",
-        "MINOR_VERSION":"0",
-        "LAD_PACKAGE_STORAGE_URL":"https://d7qxx8qkrwa8l.cloudfront.net"
-    },
-    "platforms":{
-        "consoles":{
-            "PACKAGE_TARGETS":[
-                {
-                    "TYPE": "cmake_all",
-                    "PACKAGE_NAME": "${PACKAGE_NAME_PATTERN}-consoles-${BUILD_NUMBER}.zip"
-                },
-                {
-                    "TYPE": "symbols",
-                    "PACKAGE_NAME": "${PACKAGE_NAME_PATTERN}-consoles-symbols-${BUILD_NUMBER}.zip"
-                }
-            ],
-            "BOOTSTRAP_CFG_GAME_FOLDER":"AutomatedTesting",
-            "SKIP_BUILD": 1,
-            "BUILD_TARGETS":[
-                {
-                    "BUILD_CONFIG_FILENAME": "build_config.json",
-                    "PLATFORM": "Windows",
-                    "TYPE": "profile_vs2017"
-                },
-                {
-                    "BUILD_CONFIG_FILENAME": "build_config.json",
-                    "PLATFORM": "Windows",
-                    "TYPE": "profile_vs2019"
-                },
-                {
-                    "BUILD_CONFIG_FILENAME": "build_config.json",
-                    "PLATFORM": "Provo",
-                    "TYPE": "profile"
-                }
-            ]
-        },
-        "cmake_atom_pc":{
-            "PACKAGE_TARGETS":[
-                {
-                    "TYPE": "cmake_atom_pc",
-                    "PACKAGE_NAME": "${PACKAGE_NAME_PATTERN}-cmake_atom_pc-${BUILD_NUMBER}.zip"
-                }
-            ],
-            "BOOTSTRAP_CFG_GAME_FOLDER":"AtomSampleViewer;AtomTest",
-            "SKIP_BUILD": 1,
-            "BUILD_TARGETS":[
-                {
-                    "BUILD_CONFIG_FILENAME": "package_build_config.json",
-                    "PLATFORM": "Windows",
-                    "TYPE": "profile_vs2017_atom"
-                },
-                {
-                    "BUILD_CONFIG_FILENAME": "package_build_config.json",
-                    "PLATFORM": "Windows",
-                    "TYPE": "profile_vs2019_atom"
-                }
-            ]
-        },
-        "mac":{
-            "PACKAGE_TARGETS":[
-                {
-                    "TYPE": "cmake_all",
-                    "PACKAGE_NAME": "${PACKAGE_NAME_PATTERN}-cmake_mac_all-${BUILD_NUMBER}.zip"
-                }
-            ],
-            "BOOTSTRAP_CFG_GAME_FOLDER":"AutomatedTesting",
-            "SKIP_BUILD": 1,
-            "BUILD_TARGETS":[
-                {
-                    "BUILD_CONFIG_FILENAME": "build_config.json",
-                    "PLATFORM": "Mac",
-                    "TYPE": "profile"
-                },
-                {
-                    "BUILD_CONFIG_FILENAME": "build_config.json",
-                    "PLATFORM": "iOS",
-                    "TYPE": "profile"
-                }
-            ]
-        },
-        "linux":{
-            "PACKAGE_TARGETS":[
-                {
-                    "TYPE": "cmake_all",
-                    "PACKAGE_NAME": "${PACKAGE_NAME_PATTERN}-cmake_linux_all-${BUILD_NUMBER}.zip"
-                }
-            ],
-            "BOOTSTRAP_CFG_GAME_FOLDER":"AutomatedTesting",
-            "SKIP_BUILD": 1,
-            "BUILD_TARGETS":[
-                {
-                    "BUILD_CONFIG_FILENAME": "build_config.json",
-                    "PLATFORM": "Linux",
-                    "TYPE": "profile"
-                }
-            ]
-        }
-    }
-}

+ 0 - 80
Tools/build/JenkinsScripts/build/download_latest_package_from_bucket.py

@@ -1,80 +0,0 @@
-"""
-All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-its licensors.
-
-For complete copyright and license terms please see the LICENSE at the root of this
-distribution (the "License"). All use of this software is governed by the License,
-or, if provided, by the license below or the license accompanying this file. Do not
-remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-
-Downloads the latest package from a S3 and unzips it to a desired location.
-"""
-import argparse
-import boto3
-import os
-import re
-import zipfile
-
-
-def download_and_unzip_package(bucket_name, package_regex, build_number_regex, folder_path, destination_path):
-    """
-    Downloads a given package from a S3 and unzips it.
-    :param bucket_name: S3 bucket
-    :param package_regex: Regex to find the desired package
-    :param build_number_regex: Regex to find the build number from the package name
-    :param folder_path: Folder path to the package
-    :param destination_path: Where to download the package to
-    :return:
-    """
-    # Make sure the directory exists
-    if not os.path.isdir(destination_path):
-        os.makedirs(destination_path)
-
-    # Sorting function for latest package
-    def get_build_number(file_name_to_parse):
-        return re.search(build_number_regex, file_name_to_parse).group(0)[:-4]  # [:-4] removes the .zip extension
-
-    s3 = boto3.resource('s3')
-    bucket = s3.Bucket(bucket_name)
-    largest_build_number = -1
-    latest_file = 'No file found!'
-    # Find the latest package
-    print 'Reading files from bucket...'
-    for bucket_file in bucket.objects.filter(Prefix=folder_path):
-        file_name = bucket_file.key
-        if re.search(package_regex, file_name) and get_build_number(file_name) > largest_build_number:
-            largest_build_number = get_build_number(file_name)
-            latest_file = file_name
-
-    package_name = latest_file.split('/')[-1]
-
-    # Download the package
-    print('Downloading package: {0} from bucket {1} to {2}'.format(latest_file, bucket_name, destination_path))
-    s3.Bucket(bucket_name).download_file(latest_file, os.path.join(destination_path, package_name))
-
-    # Unzip the package
-    with zipfile.ZipFile(os.path.join(destination_path, package_name), 'r') as zip_ref:
-        print('Unzipping package: {0} to {1}'.format(package_name, destination_path))
-        zip_ref.extractall(destination_path)
-
-
-def main():
-    parser = argparse.ArgumentParser()
-    parser.add_argument('-b', '--bucket_name', required=True, help='Bucket that holds the package.')
-    parser.add_argument('-p', '--package_regex', required=True,
-                        help='Regex to identify a package.  Such as: lumberyard-0.0-[\d]{6,7}-pc-[\d]{4}.zip\s to find '
-                             'the main pc package.')
-    parser.add_argument('-n', '--build_number_regex', required=True,
-                        help='Regex to identify the build number. Such as [\d]{4,5}.zip$ to find the build number from '
-                             'the name of the main pc package')
-    parser.add_argument('-d', '--destination_path', required=True, help='Destination for the contents of the packages.')
-    parser.add_argument('-f', '--folder_path', help='Folder that contains the package, must include /.')
-
-    args = parser.parse_args()
-    download_and_unzip_package(args.bucket_name, args.package_regex, args.build_number_regex, args.folder_path,
-                               args.destination_path)
-
-
-if __name__ == "__main__":
-    main()

+ 0 - 55
Tools/build/JenkinsScripts/build/download_packages.py

@@ -1,55 +0,0 @@
-"""
-All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-its licensors.
-
-For complete copyright and license terms please see the LICENSE at the root of this
-distribution (the "License"). All use of this software is governed by the License,
-or, if provided, by the license below or the license accompanying this file. Do not
-remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-
-Downloads packages and unzips them.
-"""
-import argparse
-import boto3
-import os
-import zipfile
-
-
-def download_and_unzip_packages(bucket_name, package_key, folder_path, destination_path):
-    """
-    Downloads a given package from a S3 and unzips it.
-    :param bucket_name: S3 bucket
-    :param package_key: Key for the package
-    :param folder_path: Folder path to the package
-    :param destination_path: Where to download the package to
-    :return:
-    """
-    # Make sure the directory exists
-    if not os.path.isdir(destination_path):
-        os.makedirs(destination_path)
-
-    # Download the package
-    s3 = boto3.resource('s3')
-    print('Downloading package: {0} from bucket {1} to {2}'.format(package_key, bucket_name, destination_path))
-    s3.Bucket(bucket_name).download_file(folder_path + package_key, os.path.join(destination_path, package_key))
-
-    # Unzip the package
-    with zipfile.ZipFile(os.path.join(destination_path, package_key), 'r') as zip_ref:
-        print('Unzipping package: {0} to {1}'.format(package_key, destination_path))
-        zip_ref.extractall(destination_path)
-
-
-def main():
-    parser = argparse.ArgumentParser()
-    parser.add_argument('-b', '--bucket_name', required=True, help='Bucket that holds the package.')
-    parser.add_argument('-p', '--package_key', required=True, help='Desired package\'s key.')
-    parser.add_argument('-d', '--destination_path', required=True, help='Destination for the contents of the packages.')
-    parser.add_argument('-f', '--folder_path', help='Folder that contains the package, must include /.')
-
-    args = parser.parse_args()
-    download_and_unzip_packages(args.bucket_name, args.package_key, args.folder_path, args.destination_path)
-
-
-if __name__ == "__main__":
-    main()

+ 0 - 57
Tools/build/JenkinsScripts/build/jenkins_scm_metrics.py

@@ -1,57 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-'''
-All this script is doing is writing grabbing a file that was written previously marking the start of when the Perforce would run
-and then getting the current time to find out how long we spent in Perforce
-'''
-
-import time
-
-from utils.util import *
-
-
-def write_metrics():
-    enable_build_metrics = os.environ.get('ENABLE_BUILD_METRICS')
-    metrics_namespace = os.environ.get('METRICS_NAMESPACE')
-    if enable_build_metrics == 'true':
-        scm_end = int(time.time())
-        workspace = os.environ.get('WORKSPACE')
-        metrics_file_name = 'scm_start.txt'
-        if workspace is None:
-            safe_exit_with_error('{} must be run in Jenkins job.'.format(os.path.basename(__file__)))
-        try:
-            with open(os.path.join(workspace, metrics_file_name), 'r') as f:
-                scm_start = int(f.readline())
-        except:
-            safe_exit_with_error('Failed to read from {}'.format(metrics_file_name))
-
-        scm_total = scm_end - scm_start
-
-        script_path = os.path.join(workspace, 'dev/Tools/build/waf-1.7.13/build_metrics/write_build_metric.py')
-
-        build_tag = os.environ.get('BUILD_TAG')
-        p4_changelist = os.environ.get('P4_CHANGELIST')
-
-        if build_tag is not None and p4_changelist is not None:
-            os.environ['BUILD_ID'] = '{0}.{1}'.format(build_tag, p4_changelist)
-
-        cwd = os.getcwd()
-        os.chdir(os.path.join(workspace, 'dev'))
-        cmd = 'python {} SCMTime {} Seconds --enable-build-metrics {} --metrics-namespace {} --project-spec None'.format(script_path, scm_total, True, metrics_namespace)
-        # metrics call shouldn't fail the job
-        safe_execute_system_call(cmd, shell=True)
-        os.chdir(cwd)
-
-
-if __name__ == "__main__":
-    write_metrics()

+ 0 - 12
Tools/build/JenkinsScripts/build/utils/__init__.py

@@ -1,12 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-

+ 0 - 51
Tools/build/JenkinsScripts/build/utils/copy_LAD_3rdParty.xml

@@ -1,51 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!--
- Copyright (c) Amazon.com, Inc.
--->
-<project name="CopyLadThirdParty" default="CopyLadThirdParty" basedir="../../../">
-    <fail message="Error: 3rdParty.home is not set">
-        <condition>
-            <not>
-                <isset property="3rdParty.home"/>
-            </not>
-        </condition>
-    </fail>
-    <fail message="Error: 3rdParty.destination is not set">
-        <condition>
-            <not>
-                <isset property="3rdParty.destination"/>
-            </not>
-        </condition>
-    </fail>
-    <fail message="Error: platform is not set">
-        <condition>
-            <not>
-                <isset property="platform"/>
-            </not>
-        </condition>
-    </fail>
-    
-    <include file="../../distribution/package/3rdParty.xml" optional="false" />
-    <target name="CopyLadThirdParty">
-        <ThirdPartySDKsGeneratePlatformPatternSet platform="${platform}"/>
-        <copy todir="${3rdParty.destination}">
-            <fileset dir="${3rdParty.home}">
-                <patternset refid="include-3rdparty-patternset-common" />
-            </fileset>
-        </copy>
-        <copy todir="${3rdParty.destination}">
-            <fileset dir="${3rdParty.home}">
-                <patternset refid="include-3rdparty-patternset-${platform}" />
-            </fileset>
-        </copy>
-        <copy todir="${3rdParty.destination}">
-            <fileset dir="${3rdParty.home}">
-                <patternset id="include-3rdparty-patternset-non-shipped">
-                     <include name="FbxSdk/**"/>
-                </patternset>
-            </fileset>
-        </copy>
-        
-        
-    </target>
-</project>

+ 0 - 102
Tools/build/JenkinsScripts/build/utils/download_from_s3.py

@@ -1,102 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-'''
-Usage:
-Use EC2 role to download files to %WORKSPACE% folder from bucket bucket_name:
-python download_from_s3.py --base_dir %WORKSPACE% --files_to_download "file1,file2" --bucket bucket_name
-
-Use profile to download files to %WORKSPACE% folder from bucket bucket_name:
-python download_from_s3.py --base_dir %WORKSPACE% --profile profile --files_to_download "file1,file2" --bucket bucket_name
-'''
-
-
-import os
-import json
-import boto3
-from optparse import OptionParser
-from util import error
-
-
-def parse_args():
-    parser = OptionParser()
-    parser.add_option("--base_dir", dest="base_dir", default=os.getcwd(), help="Base directory to download files, If not given, then current directory is used.")
-    parser.add_option("--files_to_download", dest="files_to_download", default=None, help="Files to download, separated by comma.")
-    parser.add_option("--profile", dest="profile", default=None, help="The name of a profile to use. If not given, then the default profile is used.")
-    parser.add_option("--bucket", dest="bucket", default=None, help="S3 bucket the files are downloaded from.")
-    parser.add_option("--key_prefix", dest="key_prefix", default='', help="Object key prefix.")
-    '''
-    ExtraArgs used to call s3.download_file(), should be in json format. extra_args key must be one of: ACL, CacheControl, ContentDisposition, ContentEncoding, ContentLanguage, ContentType, Expires,
-    GrantFullControl, GrantRead, GrantReadACP, GrantWriteACP, Metadata, RequestPayer, ServerSideEncryption, StorageClass,
-    SSECustomerAlgorithm, SSECustomerKey, SSECustomerKeyMD5, SSEKMSKeyId, WebsiteRedirectLocation
-    '''
-    parser.add_option("--extra_args", dest="extra_args", default=None, help="Additional parameters used to download file.")
-    parser.add_option("--max_retry", dest="max_retry", default=1, help="Maximum retry times to download file.")
-    (options, args) = parser.parse_args()
-    if not os.path.isdir(options.base_dir):
-        error('{} is not a valid directory'.format(options.base_dir))
-    if not options.files_to_download:
-        error('Use --files_to_download to specify files to download, separated by comma.')
-    if not options.bucket:
-        error('Use --bucket to specify bucket that the files are downloaded from.')
-    return options
-
-
-def get_client(service_name, profile_name=None):
-    session = boto3.session.Session(profile_name=profile_name)
-    client = session.client(service_name)
-    return client
-
-
-def s3_download_file(client, base_dir, file, bucket, key_prefix=None, extra_args=None, max_retry=1):
-    print 'Downloading file {} from bucket {}.'.format(file, bucket)
-    key = file if key_prefix is None else '{}/{}'.format(key_prefix, file)
-    for x in range(max_retry):
-        try:
-            client.download_file(
-                bucket, key, os.path.join(base_dir, file),
-                ExtraArgs=extra_args
-            )
-            print 'Download succeeded'
-            return True
-        except:
-            print 'Retrying download...'
-    print 'Download failed'
-    return False
-
-
-def download_files(base_dir, files_to_download, bucket, key_prefix=None, profile=None, extra_args=None, max_retry=1):
-    client = get_client('s3', profile)
-    files_to_download = files_to_download.split(',')
-    extra_args = json.loads(extra_args) if extra_args else None
-
-    print 'Downloading {} files from bucket {}.'.format(len(files_to_download), bucket)
-    failure = []
-    success = []
-    for file in files_to_download:
-        if not s3_download_file(client, base_dir, file, bucket, key_prefix, extra_args, max_retry):
-            failure.append(file)
-        else:
-            success.append(file)
-    print '{} files are downloaded successfully:'.format(len(success))
-    print '\n'.join(success)
-    print '{} files failed to download:'.format(len(failure))
-    print '\n'.join(failure)
-    # Exit with error code 1 if any file is failed to download
-    if len(failure) > 0:
-        return False
-    return True
-
-
-if __name__ == "__main__":
-    options = parse_args()
-    download_files(options.base_dir, options.files_to_download, options.bucket, options.key_prefix, options.profile, options.extra_args, options.max_retry)

+ 0 - 129
Tools/build/JenkinsScripts/build/utils/email_to_lionbridge.py

@@ -1,129 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-"""
-This script will be used in https://jenkins.agscollab.com/view/%7ESandbox/job/PACKAGE_COPY_S3/
-PACKAGE_COPY_S3 is a downstream job of nightly packaging job, it copies the nightly packages from Infra S3 bucket to Lionbridge S3 bucket based on the INCLUDE_FILTER passed from packaging job
-"""
-import os
-import re
-import json
-import requests
-from requests.auth import HTTPBasicAuth
-import boto3
-from util import error, warn
-
-
-# Write EMAIL_TEMPLATE to a file and inject it into the email sent to Lionbridge
-EMAIL_TEMPLATE = '''Packages are uploaded to S3 bucket {}
-Package List:
-{}
-
-
-Changelists:
-{}
-'''
-
-
-def get_jenkins_env(key):
-    try:
-        return os.environ[key]
-    except KeyError:
-        print 'Error: Jenkins parameters {} is not set.'.format(key)
-    return None
-
-
-JENKINS_USERNAME = get_jenkins_env('JENKINS_USERNAME')
-JENKINS_API_TOKEN = get_jenkins_env('JENKINS_API_TOKEN')
-JENKINS_URL = get_jenkins_env('JENKINS_URL')
-WORKSPACE = get_jenkins_env('WORKSPACE')
-S3_TARGET = get_jenkins_env('S3_TARGET')
-INCLUDE_FILTER = get_jenkins_env('INCLUDE_FILTER')
-EMAIL_TEMPLATE_FILE = get_jenkins_env('EMAIL_TEMPLATE_FILE')
-if None in [JENKINS_USERNAME, JENKINS_API_TOKEN, JENKINS_URL, WORKSPACE, S3_TARGET, INCLUDE_FILTER, EMAIL_TEMPLATE_FILE]:
-    error('Please make sure all Jenkins parameters are set correctly.')
-
-
-def parse_include_filter(include_filter):
-    try:
-        res = re.search('^(\w*)-*lumberyard-(\d+)\.(\d+)-(\d+)-(\w+).*\*(\d+)\.\*', include_filter)
-        branch = res.group(1)
-        major_version = int(res.group(2))
-        minor_version = int(res.group(3))
-        changelist_number = res.group(4)
-        platform = res.group(5)
-        build_number = res.group(6)
-        return branch, major_version, minor_version, changelist_number, platform, build_number
-    except (AttributeError, IndexError):
-        error('Unable to parse INCLUDE_FILTER, please make sure the INCLUDE_FILTER is set correctly')
-
-
-# Get the changelists that trigger the build
-def get_changelists(job_name, build_number):
-    changelists = []
-    headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
-    try:
-        res = requests.get('{}/job/{}/{}/api/json'.format(JENKINS_URL, job_name, build_number),
-                           auth=HTTPBasicAuth(JENKINS_USERNAME, JENKINS_API_TOKEN), headers=headers, verify=False)
-        res = json.loads(res.content)
-        changelists = res.get('changeSet').get('items')
-        return changelists
-    except:
-        warn('Error: Failed to get changes from build {} in job {}'.format(build_number, job_name))
-        return []
-
-
-def get_packaging_job_name(branch, major_version, minor_version, platform):
-    if branch == '':
-        branch = 'ML' if major_version + minor_version == 0 else 'v{}_{}'.format(major_version, minor_version)
-    job_name = 'PKG_{}_{}'.format(branch, platform.capitalize())
-    return job_name
-
-
-# Get package names by looking up S3 bucket
-def get_package_names(branch, major_version, minor_version, include_filter, build_number):
-    package_names = []
-    prefix = include_filter[:include_filter.find('*')]
-    pattern = '.*{}.*{}..*'.format(prefix, build_number)
-    if branch == '':
-        bucket_name = 'ly-packages-mainline' if major_version + minor_version == 0 else 'ly-packages-release-candidate'
-        folder = 'lumberyard-packages'
-    else:
-        bucket_name = 'ly-packages-feature-branches'
-        folder = 'lumberyard-packages/{}'.format(branch)
-    s3 = boto3.resource('s3')
-    bucket = s3.Bucket(bucket_name)
-    for obj in bucket.objects.filter(Prefix='{}/{}'.format(folder, prefix)):
-        package_name = obj.key
-        if re.match(pattern, package_name):
-            package_names.append(package_name.replace('{}/'.format(folder), ''))
-    return package_names
-
-
-if __name__ == "__main__":
-    branch, major_version, minor_version, changelist_number, platform, build_number = parse_include_filter(INCLUDE_FILTER)
-    packaging_job_name = get_packaging_job_name(branch, major_version, minor_version, platform)
-    changelists = get_changelists(packaging_job_name, build_number)
-    package_names = get_package_names(branch, major_version, minor_version, INCLUDE_FILTER, build_number)
-    with open(os.path.join(WORKSPACE, EMAIL_TEMPLATE_FILE), 'w+') as output:
-        if len(package_names) > 0:
-            package_list_str = '\n'.join(package_names)
-            changelists_str = ''
-            for item in changelists:
-                changelists_str += '---------------------------------------------------------------------------------------------\n'
-                try:
-                    changelists_str += 'CL{} by {} on {}\n{}\n'.format(item['changeNumber'], item['author']['fullName'], item['changeTime'], item['msg'].encode('utf-8', 'ignore'))
-                except KeyError:
-                    error('Internal error, check the output of Jenkins API.')
-            output.write(EMAIL_TEMPLATE.format(S3_TARGET, package_list_str, changelists_str))
-
-

+ 0 - 662
Tools/build/JenkinsScripts/build/utils/incremental_build_util.py

@@ -1,662 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-import ast
-import boto3
-import datetime
-import urllib2
-import os
-import time
-import subprocess
-import sys
-import tempfile
-import traceback
-import shutil
-import platform
-import stat
-
-IAM_ROLE_NAME = 'ec2-jenkins-node'
-
-if os.name == 'nt':
-    import ctypes
-    import win32api
-    import collections
-    import locale
-
-    locale.setlocale(locale.LC_ALL, '')  # set locale to default to get thousands separators
-
-    PULARGE_INTEGER = ctypes.POINTER(ctypes.c_ulonglong)  # Pointer to large unsigned integer
-    kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
-    kernel32.GetDiskFreeSpaceExW.argtypes = (ctypes.c_wchar_p,) + (PULARGE_INTEGER,) * 3
-
-    class UsageTuple(collections.namedtuple('UsageTuple', 'total, used, free')):
-        def __str__(self):
-            # Add thousands separator to numbers displayed
-            return self.__class__.__name__ + '(total={:n}, used={:n}, free={:n})'.format(*self)
-
-    def is_dir_symlink(path):
-        FILE_ATTRIBUTE_REPARSE_POINT = 0x0400
-        return os.path.isdir(path) and (ctypes.windll.kernel32.GetFileAttributesW(unicode(path)) & FILE_ATTRIBUTE_REPARSE_POINT)
-
-    def get_free_space_mb(path):
-        if sys.version_info < (3,):  # Python 2?
-            saved_conversion_mode = ctypes.set_conversion_mode('mbcs', 'strict')
-        else:
-            try:
-                path = os.fsdecode(path)  # allows str or bytes (or os.PathLike in Python 3.6+)
-            except AttributeError:  # fsdecode() not added until Python 3.2
-                pass
-
-        # Define variables to receive results when passed as "by reference" arguments
-        _, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), ctypes.c_ulonglong()
-
-        success = kernel32.GetDiskFreeSpaceExW(
-            path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
-        if not success:
-            error_code = ctypes.get_last_error()
-
-        if sys.version_info < (3,):  # Python 2?
-            ctypes.set_conversion_mode(*saved_conversion_mode)  # restore conversion mode
-
-        if not success:
-            windows_error_message = ctypes.FormatError(error_code)
-            raise ctypes.WinError(error_code, '{} {!r}'.format(windows_error_message, path))
-
-        used = total.value - free.value
-
-        return free.value / 1024 / 1024#for now
-else:
-    def get_free_space_mb(dirname):
-        st = os.statvfs(dirname)
-        return st.f_bavail * st.f_frsize / 1024 / 1024
-
-
-def get_iam_role_credentials(role_name):
-    security_metadata = None
-    try:
-        response = urllib2.urlopen(
-            'http://169.254.169.254/latest/meta-data/iam/security-credentials/{0}'.format(role_name)).read()
-        security_metadata = ast.literal_eval(response)
-    except:
-        print 'Unable to get iam role credentials'
-        print traceback.print_exc()
-
-    return security_metadata
-
-
-def create_volume(ec2_client, availability_zone, project_name, volume_counter):
-    response = ec2_client.create_volume(
-        AvailabilityZone=availability_zone,
-        Size=300,
-        VolumeType='gp2',
-        TagSpecifications=
-        [
-            {
-                'ResourceType': 'volume',
-                'Tags':
-                    [
-                        {
-                            'Key': 'Name',
-                            'Value': '{0}'.format(project_name)
-                        },
-                        {
-                            'Key': 'VolumeCounter',
-                            'Value': str(volume_counter)
-                        }
-                    ]
-            }
-        ]
-    )
-    print response
-    volume_id = response['VolumeId']
-
-    # give some time for the creation call to complete
-    time.sleep(1)
-
-    response = ec2_client.describe_volumes(VolumeIds=[volume_id, ])
-    while (response['Volumes'][0]['State'] != 'available'):
-        time.sleep(1)
-        response = ec2_client.describe_volumes(VolumeIds=[volume_id, ])
-
-    return volume_id
-
-
-def delete_volume(ec2_client, volume_id):
-    response = ec2_client.delete_volume(VolumeId=volume_id)
-
-
-def unmount_build_volume_from_node():
-    if os.name == 'nt':
-        f = tempfile.NamedTemporaryFile(delete=False)
-        f.write("""
-          select disk 1
-          offline disk
-          """)
-        f.close()
-
-        subprocess.call('diskpart /s %s' % f.name)
-
-        os.unlink(f.name)
-    else:
-        subprocess.call(['umount', '/data'])
-
-
-def detach_volume_from_node(ec2_client, volume, instance_id, force):
-    ec2_client.delete_tags(Resources=[volume.volume_id],
-                           Tags=[
-                               {
-                                   'Key': 'jenkins_attachment_node',
-                               },
-                               {
-                                   'Key': 'jenkins_attachment_time',
-                               },
-                               {
-                                   'Key': 'jenkins_attachment_build'
-                               }
-                           ])
-
-    incremental_keys = ['jenkins_attachment_node', 'jenkins_attachment_time', 'jenkins_attachment_build']
-
-    volume.load()
-
-    print 'searching for keys adding during incremental build: {}'.format(incremental_keys)
-
-    while len(incremental_keys):
-        tag_keys = set()
-        for tag in volume.tags:
-            tag_keys.add(tag['Key'])
-
-        print 'found tags on instace {}'.format(tag_keys)
-
-        for incremental_key in list(incremental_keys):
-            if incremental_key not in tag_keys:
-                print 'incremental key {} has been successfully removed'.format(incremental_key)
-                incremental_keys.remove(incremental_key)
-
-        volume.load()
-
-    volume.detach_from_instance(Device='xvdf',
-                                Force=force,
-                                InstanceId=instance_id,
-                                VolumeId=volume.volume_id)
-
-    while (len(volume.attachments) and volume.attachments[0]['State'] != 'detached'):
-        time.sleep(1)
-        volume.load()
-
-    volume.load()
-
-    if (len(volume.attachments)):
-        print 'Volume still has attachments'
-        for attachment in volume.attachments:
-            print 'Volume {} {} to instance {}'.format(attachment['VolumeId'], attachment['State'], attachment['InstanceId'])
-
-
-def cleanup_node(workspace_name):
-    if os.name == 'nt':
-        jenkins_base = os.getenv('BASE')
-        dev_path = '{}\\workspace\\{}\\dev'.format(jenkins_base, workspace_name)
-    else:
-        dev_path = '/home/lybuilder/ly/workspace/{}/dev'.format(workspace_name)
-
-    if os.path.exists(dev_path):
-        if os.name == 'nt':
-            if is_dir_symlink(dev_path):
-                print "removing symlink path {}".format(dev_path)
-                os.rmdir(dev_path)
-            else:
-                # this shouldn't happen, but is here for sanity's sake, if we sync to the build node erroneously we want to clean it up if we can
-                print "given symlink path was not a symlink, deleting the full tree to prevent future build failures"
-                retcode = os.system('rmdir /S /Q {}'.format(dev_path))
-                if retcode != 0:
-                    raise Exception("rmdir failed to remove directory: {}".format(dev_path))
-                return True
-        else:
-            if os.path.islink(dev_path):
-                print "unlinking symlink path {}".format(dev_path)
-                os.unlink(dev_path)
-            else:
-                print "given symlink path was not a symlink, deleting the full tree to prevent future build failures"
-                os.chmod(dev_path, stat.S_IWUSR)
-                shutil.rmtree(dev_path, ignore_errors=True)
-                return True
-        # check to make sure the directory was actually deleted
-        if os.path.exists(dev_path):
-            raise Exception("Failed to remove directory: {}".format(dev_path))
-    return False
-
-
-def setup_volume(workspace_name, created):
-    if os.name == 'nt':
-        f = tempfile.NamedTemporaryFile(delete=False)
-        f.write("""
-      select disk 1
-      online disk 
-      attribute disk clear readonly
-      """) # assume disk # for now
-
-        if created:
-            f.write("""create partition primary
-          select partition 1
-          format quick fs=ntfs
-          assign
-          active
-          """)
-
-        f.close()
-
-        subprocess.call(['diskpart', '/s', f.name])
-
-        time.sleep(2)
-
-        drives_after = win32api.GetLogicalDriveStrings()
-        drives_after = drives_after.split('\000')[:-1]
-
-        print drives_after
-
-        #drive_letter = next(item for item in drives_after if item not in drives_before)
-        drive_letter = 'D:\\'
-
-        os.unlink(f.name)
-
-        time.sleep(1)
-
-        dev_path = '{}ly\workspace\{}\dev'.format(drive_letter, workspace_name)
-
-    else:
-        subprocess.call(['file', '-s', '/dev/xvdf'])
-        if created:
-            subprocess.call(['mkfs', '-t', 'ext4', '/dev/xvdf'])
-        subprocess.call(['mount', '/dev/xvdf', '/data'])
-
-        dev_path = '/data/ly/workspace/{}/dev'.format(workspace_name)
-
-    return dev_path
-
-
-def attach_volume_to_instance(volume, volume_id, instance_id, instance_name):
-    volume.attach_to_instance(Device='xvdf',
-                              InstanceId=instance_id,
-                              VolumeId=volume_id)
-    # give a little bit of time for the aws call to process
-    time.sleep(2)
-
-    # reload the volume just in case
-    volume.load()
-
-    while (len(volume.attachments) and volume.attachments[0]['State'] != 'attached'):
-        time.sleep(1)
-        volume.load()
-
-    volume.create_tags(Tags=[
-        {
-            'Key':'last_attachment_time',
-            'Value':datetime.datetime.utcnow().isoformat()
-        }
-    ])
-
-    volume.create_tags(Tags=[
-        {
-            'Key':'jenkins_attachment_node',
-            'Value':instance_name,
-        },
-        {
-            'Key':'jenkins_attachment_time',
-            'Value':datetime.datetime.utcnow().isoformat()
-        },
-        {
-            'Key':'jenkins_attachment_build',
-            'Value':os.getenv('BUILD_TAG')
-        }
-    ])
-
-
-def prepare_incremental_build(workspace_name):
-    job_name = os.getenv('JOB_NAME', None)
-    clean_build = os.getenv('CLEAN_BUILD', 'false').lower() == 'true'
-
-    android_home = os.getenv('ANDROID_HOME', None)
-    if android_home is not None:
-        path = os.getenv('PATH').split(';')
-        print path
-
-        java_home = os.getenv('JAVA_HOME', None)
-        print java_home
-
-        os.environ['LY_NDK_PATH'] = 'C:\\ly\\3rdParty\\android-ndk\\r12'
-        print os.getenv('LY_NDK_PATH')
-
-        path = [x for x in path if not (java_home in x or android_home in x)]
-        print path
-
-        path.append(java_home)
-        path.append(android_home)
-        path.append(os.getenv('LY_NDK_PATH'))
-        print path
-
-        os.environ['PATH'] = ';'.join(path)
-
-    credentials = get_iam_role_credentials(IAM_ROLE_NAME)
-
-    aws_access_key_id = None
-    aws_secret_access_key = None
-    aws_session_token = None
-
-    if credentials is not None:
-        keys = ['AccessKeyId', 'SecretAccessKey', 'Token']
-        for key in keys:
-            if key not in credentials:
-                print 'Unable to find {0} in get_iam_role_credentials response {1}'.format(key, credentials)
-                return
-
-        aws_access_key_id = credentials['AccessKeyId']
-        aws_secret_access_key = credentials['SecretAccessKey']
-        aws_session_token = credentials['Token']
-
-    session = boto3.session.Session()
-    region = session.region_name
-
-    try:
-        instance_id = urllib2.urlopen('http://169.254.169.254/latest/meta-data/instance-id').read()
-    except:
-        # this likely means we're not an ec2 instance
-        raise Exception('No EC2 metadata!')
-
-
-    try:
-        availability_zone = urllib2.urlopen(
-            'http://169.254.169.254/latest/meta-data/placement/availability-zone').read()
-    except:
-        # also likely means we're not an ec2 instance
-        raise Exception('No EC2 metadata')
-
-
-    if region is None:
-        region = 'us-west-2'
-
-    client = boto3.client('ec2', region_name=region, aws_access_key_id=aws_access_key_id,
-                          aws_secret_access_key=aws_secret_access_key,
-                          aws_session_token=aws_session_token)
-
-    project_name = job_name
-
-    ec2_resource = boto3.resource('ec2', region_name=region)
-    instance = ec2_resource.Instance(instance_id)
-
-    volume_counter = 0
-
-    for volume in instance.volumes.all():
-        for attachment in volume.attachments:
-            print 'attachment device: {}'.format(attachment['Device'])
-            if 'xvdf' in attachment['Device'] and attachment['State'] != 'detached':
-                print 'A device is already attached to xvdf. This likely means a previous build failed to detach it\'s' \
-                      'build volume. This volume is considered orphaned and will be force detached from this instance.'
-                unmount_build_volume_from_node()
-                detach_volume_from_node(client, volume, instance_id, True)
-
-    if cleanup_node(workspace_name):
-        clean_build = True
-
-    response = client.describe_volumes(Filters=
-    [
-        {
-            'Name': 'tag:Name',
-            'Values':
-                [
-                    '{0}'.format(project_name)
-                ]
-        }
-    ])
-
-    created = False
-
-    if 'Volumes' in response and not len(response['Volumes']):
-        print 'Volume for {0} doesn\'t exist creating it...'.format(project_name)
-        # volume doesn't exist, create it
-        volume_id = create_volume(client, availability_zone, project_name, volume_counter)
-        created = True
-
-    elif len(response['Volumes']) > 1:
-        latest_volume = None
-        max_counter = 0
-
-        for volume in response['Volumes']:
-            for tag in volume['Tags']:
-                if tag['Key'] == 'VolumeCounter':
-                    if int(tag['Value']) > max_counter:
-                        max_counter = int(tag['Value'])
-                        latest_volume = volume
-
-        volume_counter = max_counter
-        volume_id = latest_volume['VolumeId']
-    else:
-        volume = response['Volumes'][0]
-        if len(volume['Attachments']):
-            # this is bad we shouldn't be attached, we should have detached at the end of a build
-            attachment = volume['Attachments'][0]
-            print ('Volume already has attachment {}'.format(attachment))
-            print 'Creating new volume for {} and orphaning previous volume'.format(project_name)
-
-            for tag in volume['Tags']:
-                if tag['Key'] == 'VolumeCounter':
-                    volume_counter = int(tag['Value']) + 1
-                    break
-
-            volume_id = create_volume(client, availability_zone, project_name, volume_counter)
-            created = True
-        else:
-            volume_id = volume['VolumeId']
-
-    if clean_build and not created:
-        print 'CLEAN_BUILD option was set, deleting volume {0}'.format(volume_id)
-        revert_workspace(job_name)
-        delete_volume(client, volume_id)
-        volume_id = create_volume(client, availability_zone, project_name, volume_counter)
-        created = True
-
-    print 'attaching volume {} to instance {}'.format(volume_id, instance_id)
-    volume = ec2_resource.Volume(volume_id)
-
-    instance_name = next(tag['Value'] for tag in instance.tags if tag['Key'] == 'Name')
-
-    if os.name == 'nt':
-        drives_before = win32api.GetLogicalDriveStrings()
-        drives_before = drives_before.split('\000')[:-1]
-
-        print drives_before
-
-    attach_volume_to_instance(volume, volume_id, instance_id, instance_name)
-
-    dev_path = setup_volume(workspace_name, created)
-
-    dev_existed = True
-
-    if os.name == 'nt':
-        free_space_path =  'D:\\'
-    else:
-        free_space_path = '/data/'
-
-    if get_free_space_mb(free_space_path) < 1024:
-        print 'Volume is running low on disk space. Recreating volume and running clean build.'
-        unmount_build_volume_from_node()
-        detach_volume_from_node(client, volume, instance_id, False)
-        delete_volume(client, volume_id)
-
-        volume_id = create_volume(client, availability_zone, project_name, volume_counter)
-        volume = ec2_resource.Volume(volume_id)
-        attach_volume_to_instance(volume, volume_id, instance_id, instance_name)
-        setup_volume(workspace_name, True)
-
-    if not os.path.exists(dev_path):
-        print 'creating directory structure for {}'.format(dev_path)
-        os.makedirs(dev_path)
-        if os.name != 'nt':
-            print 'taking ownership of {}'.format(dev_path)
-            subprocess.call(['chown', '-R', 'lybuilder:root', dev_path])
-        dev_existed = False
-
-    if os.name == 'nt':
-        jenkins_base = os.getenv('BASE')
-        try:
-            symlink_path = '{}\\workspace\\{}\\dev'.format(jenkins_base, workspace_name)
-            print 'creating symlink to path: {}'.format(symlink_path)
-            subprocess.call(['cmd', '/c', 'mklink', '/J', symlink_path, dev_path])
-            #subprocess.call(['cmd', '/c', 'mklink', '/J', '{}\\3rdParty'.format(jenkins_base), 'E:\\3rdParty'])
-        except Exception as e:
-            print e
-    else:
-        subprocess.call(['ln', '-s', '-f', dev_path, '/home/lybuilder/ly/workspace/{}'.format(workspace_name)])
-        subprocess.call(['ln', '-s', '-f', '/home/lybuilder/ly/workspace/3rdParty', '/data/ly/workspace'])
-
-    if not dev_existed:
-        print 'flushing perforce #have revision'
-        subprocess.call(['p4', 'trust'])
-        subprocess.call(['p4', 'flush', '-f', '//ly_jenkins_{}/dev/...#none'.format(job_name)])
-        #subprocess.call(['p4', 'sync', '-f', '//ly_jenkins_{}/dev/...'.format(job_name)])
-
-
-def revert_workspace(job_name):
-    try:
-        # Workaround for LY-86789: Revert bootstrap.cfg checkout.
-        print "REVERTING workspace {}".format(job_name)
-        subprocess.check_call(['p4', 'revert', '//ly_jenkins_{}/dev/...'.format(job_name)])
-    except subprocess.CalledProcessError as e:
-        print e.output
-        raise e
-    except Exception as e:
-        print e
-        raise e
-
-
-def teardown_incremental_build(workspace_name):
-    job_name = os.getenv('JOB_NAME', None)
-
-    if os.path.isfile('envinject.properties'):
-        os.remove('envinject.properties')
-
-    credentials = get_iam_role_credentials(IAM_ROLE_NAME)
-
-    aws_access_key_id = None
-    aws_secret_access_key = None
-    aws_session_token = None
-
-    if credentials is not None:
-        keys = ['AccessKeyId', 'SecretAccessKey', 'Token']
-        for key in keys:
-            if key not in credentials:
-                raise Exception('Unable to find {0} in get_iam_role_credentials response {1}'.format(key, credentials))
-
-        aws_access_key_id = credentials['AccessKeyId']
-        aws_secret_access_key = credentials['SecretAccessKey']
-        aws_session_token = credentials['Token']
-
-    session = boto3.session.Session()
-    region = session.region_name
-
-    try:
-        instance_id = urllib2.urlopen('http://169.254.169.254/latest/meta-data/instance-id').read()
-    except:
-        # this likely means we're not an ec2 instance
-        raise Exception('No EC2 metadata!')
-
-    if region is None:
-        region = 'us-west-2'
-
-    client = boto3.client('ec2', region_name=region, aws_access_key_id=aws_access_key_id,
-                          aws_secret_access_key=aws_secret_access_key,
-                          aws_session_token=aws_session_token)
-
-    project_name = job_name
-    response = client.describe_volumes(Filters=
-    [
-        {
-            'Name': 'tag:Name',
-            'Values':
-                [
-                    '{0}'.format(project_name)
-                ]
-        }
-    ])
-
-    ec2_resource = boto3.resource('ec2', region_name=region)
-    instance = ec2_resource.Instance(instance_id)
-
-    volume = None
-
-    for attached_volume in instance.volumes.all():
-        for attachment in attached_volume.attachments:
-            print 'attachment device: {}'.format(attachment['Device'])
-            if attachment['Device'] == 'xvdf':
-                volume = attached_volume
-
-    if volume is None:
-        # volume doesn't exist, do nothing
-        print 'Volume for {} does not exist or is not attached to the current instance. This probably isn\'t an issue but should be reported.'.format(project_name)
-        return
-    else:
-        revert_workspace(job_name)
-
-        unmount_build_volume_from_node()
-
-        detach_volume_from_node(client, volume, instance_id, False)
-
-        cleanup_node(workspace_name)
-
-
-def prepare_incremental_build_mac(workspace_name):
-    job_name = os.getenv('JOB_NAME', None)
-    clean_build = os.getenv('CLEAN_BUILD', 'false').lower() == 'true'
-
-    subprocess.call(['mount', '-t', 'smbfs', '//lybuilder:[email protected]/inc-build/ly', '/data/ly'])
-
-    dev_path = '/data/ly/workspace/{}/dev'.format(workspace_name)
-
-    dev_existed = True
-
-    if clean_build:
-        print 'cleaning {}'.format(dev_path)
-        subprocess.call(['rm', '-rf', dev_path])
-    if not os.path.exists(dev_path):
-        print 'creating directory structure for {}'.format(dev_path)
-        os.makedirs(dev_path)
-        dev_existed = False
-
-    #subprocess.call(['ln', '-s', '-f', '/data/ly/workspace', '/Users/lybuilder'])
-
-    #subprocess.call(['ln', '-s', '-f', '/Users/lybuilder/workspace/3rdParty', '/data/ly/workspace'])
-
-    if not dev_existed:
-        print 'flushing perforce #have revision'
-        subprocess.call(['p4', 'trust'])
-        subprocess.call(['p4', 'flush', '-f', '//ly_jenkins_{}/dev/...#none'.format(job_name)])
-
-
-def main():
-    action = sys.argv[1]
-    workspace_name = sys.argv[2]
-
-    if action.lower() == 'prepare':
-        if platform.system().lower() == 'darwin':
-            prepare_incremental_build_mac(workspace_name)
-        else:
-            prepare_incremental_build(workspace_name)
-    elif action.lower() == 'teardown':
-        if platform.system().lower() == 'darwin':
-            pass
-        else:
-            teardown_incremental_build(workspace_name)
-    else:
-        'Invalid command. Valid actions are either "prepare" or "teardown."'
-
-
-if __name__ == '__main__':
-    main()

+ 0 - 57
Tools/build/JenkinsScripts/build/utils/jenkins_scm_metrics.py

@@ -1,57 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-'''
-All this script is doing is writing grabbing a file that was written previously marking the start of when the Perforce would run
-and then getting the current time to find out how long we spent in Perforce
-'''
-
-import time
-
-from util import *
-
-
-def write_metrics():
-    enable_build_metrics = os.environ.get('ENABLE_BUILD_METRICS')
-    metrics_namespace = os.environ.get('METRICS_NAMESPACE')
-    if enable_build_metrics == 'true':
-        scm_end = int(time.time())
-        workspace = os.environ.get('WORKSPACE')
-        metrics_file_name = 'scm_start.txt'
-        if workspace is None:
-            safe_exit_with_error('{} must be run in Jenkins job.'.format(os.path.basename(__file__)))
-        try:
-            with open(os.path.join(workspace, metrics_file_name), 'r') as f:
-                scm_start = int(f.readline())
-        except:
-            safe_exit_with_error('Failed to read from {}'.format(metrics_file_name))
-
-        scm_total = scm_end - scm_start
-
-        script_path = os.path.join(workspace, 'dev/Tools/build/waf-1.7.13/build_metrics/write_build_metric.py')
-
-        build_tag = os.environ.get('BUILD_TAG')
-        p4_changelist = os.environ.get('P4_CHANGELIST')
-
-        if build_tag is not None and p4_changelist is not None:
-            os.environ['BUILD_ID'] = '{0}.{1}'.format(build_tag, p4_changelist)
-
-        cwd = os.getcwd()
-        os.chdir(os.path.join(workspace, 'dev'))
-        cmd = 'python {} SCMTime {} Seconds --enable-build-metrics {} --metrics-namespace {} --project-spec None'.format(script_path, scm_total, True, metrics_namespace)
-        # metrics call shouldn't fail the job
-        safe_execute_system_call(cmd, shell=True)
-        os.chdir(cwd)
-
-
-if __name__ == "__main__":
-    write_metrics()

+ 0 - 12
Tools/build/JenkinsScripts/build/utils/lib/__init__.py

@@ -1,12 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-

+ 0 - 174
Tools/build/JenkinsScripts/build/utils/lib/glob3.py

@@ -1,174 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-"""
-Filename globbing utility.
-Modified using https://github.com/python/cpython/blob/3.7/Lib/glob.py to be compatible with Python2
-Original file Copyright Python Software Foundation, used under license.
-Modifications copyright Amazon.com, Inc. or its affiliates.
-"""
-
-import os
-import re
-import fnmatch
-
-__all__ = ["glob", "iglob", "escape"]
-
-def glob(pathname, recursive=False):
-    """Return a list of paths matching a pathname pattern.
-    The pattern may contain simple shell-style wildcards a la
-    fnmatch. However, unlike fnmatch, filenames starting with a
-    dot are special cases that are not matched by '*' and '?'
-    patterns.
-    If recursive is true, the pattern '**' will match any files and
-    zero or more directories and subdirectories.
-    """
-    return list(iglob(pathname, recursive=recursive))
-
-def iglob(pathname, recursive=False):
-    """Return an iterator which yields the paths matching a pathname pattern.
-    The pattern may contain simple shell-style wildcards a la
-    fnmatch. However, unlike fnmatch, filenames starting with a
-    dot are special cases that are not matched by '*' and '?'
-    patterns.
-    If recursive is true, the pattern '**' will match any files and
-    zero or more directories and subdirectories.
-    """
-    it = _iglob(pathname, recursive, False)
-    if recursive and _isrecursive(pathname):
-        s = next(it)  # skip empty string
-        assert not s
-    return it
-
-def _iglob(pathname, recursive, dironly):
-    dirname, basename = os.path.split(pathname)
-    if not has_magic(pathname):
-        assert not dironly
-        if basename:
-            if os.path.lexists(pathname):
-                yield pathname
-        else:
-            # Patterns ending with a slash should match only directories
-            if os.path.isdir(dirname):
-                yield pathname
-        return
-    if not dirname:
-        if recursive and _isrecursive(basename):
-            yield _glob2(dirname, basename, dironly)
-        else:
-            yield _glob1(dirname, basename, dironly)
-        return
-    # `os.path.split()` returns the argument itself as a dirname if it is a
-    # drive or UNC path.  Prevent an infinite recursion if a drive or UNC path
-    # contains magic characters (i.e. r'\\?\C:').
-    if dirname != pathname and has_magic(dirname):
-        dirs = _iglob(dirname, recursive, True)
-    else:
-        dirs = [dirname]
-    if has_magic(basename):
-        if recursive and _isrecursive(basename):
-            glob_in_dir = _glob2
-        else:
-            glob_in_dir = _glob1
-    else:
-        glob_in_dir = _glob0
-    for dirname in dirs:
-        for name in glob_in_dir(dirname, basename, dironly):
-            yield os.path.join(dirname, name)
-
-# These 2 helper functions non-recursively glob inside a literal directory.
-# They return a list of basenames.  _glob1 accepts a pattern while _glob0
-# takes a literal basename (so it only has to check for its existence).
-
-def _glob1(dirname, pattern, dironly):
-    names = list(_iterdir(dirname, dironly))
-    return fnmatch.filter(names, pattern)
-
-def _glob0(dirname, basename, dironly):
-    if not basename:
-        # `os.path.split()` returns an empty basename for paths ending with a
-        # directory separator.  'q*x/' should match only directories.
-        if os.path.isdir(dirname):
-            return [basename]
-    else:
-        if os.path.lexists(os.path.join(dirname, basename)):
-            return [basename]
-    return []
-
-# Following functions are not public but can be used by third-party code.
-
-def glob0(dirname, pattern):
-    return _glob0(dirname, pattern, False)
-
-def glob1(dirname, pattern):
-    return _glob1(dirname, pattern, False)
-
-# This helper function recursively yields relative pathnames inside a literal
-# directory.
-
-def _glob2(dirname, pattern, dironly):
-    assert _isrecursive(pattern)
-    return [pattern[:0]] + list(_rlistdir(dirname, dironly))
-
-# If dironly is false, yields all file names inside a directory.
-# If dironly is true, yields only directory names.
-def _iterdir(dirname, dironly):
-    if not dirname:
-        if isinstance(dirname, bytes):
-            dirname = bytes(os.curdir, 'ASCII')
-        else:
-            dirname = os.curdir
-    try:
-        for entry in os.listdir(dirname):
-            yield entry
-    except OSError:
-        return
-
-# Recursively yields relative pathnames inside a literal directory.
-def _rlistdir(dirname, dironly):
-    if not os.path.islink(dirname):
-        names = list(_iterdir(dirname, dironly))
-        for x in names:
-            yield x
-            path = os.path.join(dirname, x) if dirname else x
-            for y in _rlistdir(path, dironly):
-                yield os.path.join(x, y)
-magic_check = re.compile('([*?[])')
-magic_check_bytes = re.compile(b'([*?[])')
-
-def has_magic(s):
-    if isinstance(s, bytes):
-        match = magic_check_bytes.search(s)
-    else:
-        match = magic_check.search(s)
-    return match is not None
-
-def _ishidden(path):
-    return path[0] in ('.', b'.'[0])
-
-def _isrecursive(pattern):
-    if isinstance(pattern, bytes):
-        return pattern == b'**'
-    else:
-        return pattern == '**'
-
-def escape(pathname):
-    """Escape all special characters.
-    """
-    # Escaping is done by wrapping any of "*?[" between square brackets.
-    # Metacharacters do not work in the drive part and shouldn't be escaped.
-    drive, pathname = os.path.splitdrive(pathname)
-    if isinstance(pathname, bytes):
-        pathname = magic_check_bytes.sub(br'[\1]', pathname)
-    else:
-        pathname = magic_check.sub(r'[\1]', pathname)
-    return drive + pathname

+ 0 - 75
Tools/build/JenkinsScripts/build/utils/packaging_version.py

@@ -1,75 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-import os
-import sys
-from validate_ly_version import _read_version_from_branch_spec
-from argparse import ArgumentParser
-
-
-def main(args):
-    waf_branch_spec_file_directory = os.path.join(os.environ['WORKSPACE'], 'dev')
-    waf_branch_spec_file_name = 'waf_branch_spec.py'
-    
-    if not os.path.exists(os.path.join(waf_branch_spec_file_directory, waf_branch_spec_file_name)):
-        raise Exception("Invalid workspace directory: {}".format(waf_branch_spec_file_directory))
-
-    waf_branch_spec_version = _read_version_from_branch_spec(waf_branch_spec_file_directory, waf_branch_spec_file_name)
-    if waf_branch_spec_version is None:
-        raise Exception("Unable to read branch spec version from {}.".format(waf_branch_spec_file_name))
-
-    if args.version:
-        waf_branch_spec_version = args.version
-        
-    if waf_branch_spec_version=='0.0.0.0' and not args.allow_unversioned:
-        raise Exception('Version "{}" is invalid. Please specify a valid, non-zero LUMBERYARD_VERSION in {}.'.format(
-            waf_branch_spec_version,
-            os.path.join(waf_branch_spec_file_directory, waf_branch_spec_file_name)
-        ))
-
-    versions = waf_branch_spec_version.split('.')
-    if len(versions) != 4:
-        raise Exception("Invalid branch spec version '{}'. Must use format 'X.X.X.X'".format(waf_branch_spec_version))
-        
-    major_version = versions[0]
-    minor_version = versions[1]
-     
-    env_inject_file_path = os.path.join(os.environ['WORKSPACE'], os.environ['ENV_INJECT_FILE'])
-
-    print major_version
-    print minor_version
-     
-    with open(env_inject_file_path, 'w') as env_inject_file:
-        env_inject_file.write('MAJOR_VERSION={}\n'.format(major_version))
-        env_inject_file.write('MINOR_VERSION={}\n'.format(minor_version))
-
-    
-def check_env(*vars):
-    missing = []
-    for var in vars:
-        if var not in os.environ:
-            missing += (var,)
-    if missing:
-        raise Exception("Missing one or more environment variables: {}".format(", ".join(missing)))
-
-
-if __name__ == "__main__":
-    parser = ArgumentParser()
-    parser.add_argument('--allow-unversioned', default=False, action='store_true',
-                        help="Allow version '0.0.0.0'. Default is to fail if an invalid version is found")
-    parser.add_argument('--version', type=str,
-                        help="Manually specify version to use, instead of scanning dev root.")
-    args = parser.parse_args()
-    
-    check_env("WORKSPACE", "ENV_INJECT_FILE")
-    
-    main(args)

+ 0 - 212
Tools/build/JenkinsScripts/build/utils/scrubbing_test.py

@@ -1,212 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import requests
-from requests.auth import HTTPBasicAuth
-from P4 import P4, P4Exception
-from util import *
-from zipfile import ZipFile
-from download_from_s3 import s3_download_file, get_client
-from botocore.exceptions import ClientError
-from upload_to_s3 import s3_upload_file
-import os
-import shutil
-import json
-import urllib
-
-PACKAGE_NAME_REGEX = r'^lumberyard-\d\.\d-\d+-\w+-\d+\.(zip|tgz)$'
-P4_USER = 'lybuilder'
-BUCKET = 'ly-scrubbing-test'
-
-# Scrubber will fail if any of these files is missing, copy these files to scrubbing workspace before the test
-SCRUBBING_REQUIRED_FILES = [
-]
-
-try:
-    JENKINS_USERNAME = os.environ['JENKINS_USERNAME']
-    JENKINS_API_TOKEN = os.environ['JENKINS_API_TOKEN']
-    JENKINS_SERVER = os.environ['JENKINS_URL']
-    P4_PORT = os.environ['ENV_P4_PORT']
-    JOB_NAME = os.environ['JOB_NAME']
-    BUILD_NUMBER = int(os.environ['BUILD_NUMBER'])
-    WORKSPACE = os.environ['WORKSPACE']
-    SCRUBBING_WORKSPACE = os.environ['SCRUBBING_WORKSPACE']
-except KeyError:
-    error('This script has to run on Jenkins')
-
-
-class File:
-    def __init__(self, path, action):
-        self.path = path
-        self.action = action
-
-
-# Get the changelist numbers that trigger the build
-def get_changelist_numbers():
-    changelist_numbers = []
-    changeset = []
-    headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
-    try:
-        res = requests.get('{}/job/{}/{}/api/json'.format(JENKINS_SERVER, JOB_NAME, BUILD_NUMBER),
-                           auth=HTTPBasicAuth(JENKINS_USERNAME, JENKINS_API_TOKEN), headers=headers, verify=False)
-        res = json.loads(res.content)
-        changeset = res.get('changeSet').get('items')
-    except:
-        print 'Error: Failed to get changes from build {} in job {}'.format(BUILD_NUMBER, JOB_NAME)
-    for item in changeset:
-        changelist_numbers.append(item.get('changeNumber'))
-    return changelist_numbers
-
-
-# Get file list and actions that trigger the Jenkins job
-def get_files():
-    p4 = P4()
-    p4.port = P4_PORT
-    p4.user = P4_USER
-    p4.connect()
-
-    files = []
-    changelist_numbers = get_changelist_numbers()
-    for changelist_number in changelist_numbers:
-        cmd = ['describe', '-s', changelist_number]
-        try:
-            res = p4.run(cmd)[0]
-            file_list = res.get('depotFile')
-            actions = res.get('action')
-            for action, file_path in zip(actions, file_list):
-                # P4 returns file paths that are url encoded
-                file_path = urllib.unquote(file_path).decode("utf8")
-                # Ignore files which are not in dev
-                p = file_path.find('dev')
-                if p != -1:
-                    files.append(File(file_path[p:], action))
-        except P4Exception:
-            error('Internal error, please contact Build System')
-    return files
-
-
-def copy_file(src, dst, overwrite=False):
-    if os.path.exists(dst) and not overwrite:
-        return
-    print 'Copying file from {} to {}'.format(src, dst)
-    dest_file_dir = os.path.dirname(dst)
-    if not os.path.exists(dest_file_dir):
-        os.makedirs(dest_file_dir)
-    shutil.copyfile(src, dst)
-
-
-# Run scrubbing scripts
-def scrub():
-    print 'Perform the Code Scrubbing'
-    scrubber_path = os.path.join(WORKSPACE, 'dev/Tools/build/JenkinsScripts/distribution/scrubbing/scrub_all.py')
-    scrub_params = ["-p", "-d", "-o"]
-    # Scrub code
-    args = ['python', scrubber_path, '-p', '-d', '-o', os.path.join(SCRUBBING_WORKSPACE, 'dev/Code'), os.path.join(SCRUBBING_WORKSPACE, 'dev')]
-    return_code = safe_execute_system_call(args)
-    if return_code != 0:
-        print 'ERROR: Code scrubbing failed.'
-        return False
-    print 'Code scrubbing complete successfully.'
-    return True
-
-
-# Run scrubbing validator
-def validate():
-    # Run validator
-    print 'Running validator'
-    validator_platforms = ["provo", "salem", "jasper"]
-    success = True
-    for validator_platform in validator_platforms:
-        validator_path = os.path.join(WORKSPACE, 'dev/Tools/build/JenkinsScripts/distribution/scrubbing/validator.py')
-        args = ['python', validator_path, '-p', validator_platform, os.path.join(SCRUBBING_WORKSPACE, 'dev')]
-        if safe_execute_system_call(args):
-            success = False
-    if not success:
-        print 'ERROR: Scrubbing validator failed.'
-        return False
-    print 'Scrubbing validator complete successfully.'
-    return True
-
-
-def scrubbing_test():
-    if os.path.exists(SCRUBBING_WORKSPACE):
-        os.system('rmdir /s /q \"{}\"'.format(SCRUBBING_WORKSPACE))
-    os.mkdir(SCRUBBING_WORKSPACE)
-
-    client = get_client('s3')
-    zip_name = '{}.zip'.format(JOB_NAME)
-    # Check if zipfile exists in S3 bucket
-    try:
-        client.head_object(Bucket=BUCKET, Key=zip_name)
-    except ClientError as e:
-        if e.response['Error']['Code'] == '404':
-            print 'No previous zipfile found in S3 bucket {}'.format(BUCKET)
-        else:
-            raise
-    else:
-        # Download the zipfile from S3 bucket if the zipfile exists
-        if not s3_download_file(client, SCRUBBING_WORKSPACE, zip_name, BUCKET, max_retry=3):
-            warn('Failed to download {} from S3 bucket {}'.format(zip_name, BUCKET))
-
-    # Unzip the zipfile to SCRUBBING_WORKSPACE
-    zip_path = os.path.join(SCRUBBING_WORKSPACE, zip_name)
-    if os.path.exists(zip_path):
-        zip_file = ZipFile(os.path.join(SCRUBBING_WORKSPACE, zip_name), 'r')
-        zip_file.extractall(SCRUBBING_WORKSPACE)
-        zip_file.close()
-
-    # Copy scrubbing required files to SCRUBBING_WORKSPACE, no overwrite
-    for file in SCRUBBING_REQUIRED_FILES:
-        src_file = os.path.join(WORKSPACE, file)
-        dst_file = os.path.join(SCRUBBING_WORKSPACE, file)
-        copy_file(src_file, dst_file)
-
-    # Get file list and actions that trigger the Jenkins job
-    files = get_files()
-
-    # Copy or delete each file in SCRUBBING_WORKSPACE
-    for f in files:
-        dst_file = os.path.join(SCRUBBING_WORKSPACE, f.path)
-        if 'delete' in f.action:
-            if os.path.exists(dst_file):
-                print 'Deleting {}'.format(dst_file)
-                os.remove(dst_file)
-        else:
-            src_file = os.path.join(WORKSPACE, f.path)
-            copy_file(src_file, dst_file, overwrite=True)
-
-    # Backup the unmodified files and run scrubber and validator
-    backup_path = os.path.join(SCRUBBING_WORKSPACE, 'backup')
-    scrubbing_dev = os.path.join(SCRUBBING_WORKSPACE, 'dev')
-    success = True
-    if os.path.exists(scrubbing_dev):
-        shutil.copytree(scrubbing_dev, os.path.join(backup_path, 'dev'))
-        success = scrub() and validate()
-
-    if success:
-        # Delete zipfile from S3 if validator run successfully
-        try:
-            print 'Deleting {} from bucket {}'.format(zip_name, BUCKET)
-            client.delete_object(Bucket=BUCKET, Key=zip_name)
-        except:
-            warn('Failed to delete {} from bucket {}'.format(zip_name, BUCKET))
-    else:
-        # Upload backup files to S3 bucket
-        if os.path.exists(backup_path):
-            zip_path = os.path.join(SCRUBBING_WORKSPACE, JOB_NAME)
-            shutil.make_archive(zip_path, 'zip', backup_path)
-            if not s3_upload_file(client, SCRUBBING_WORKSPACE, zip_name, BUCKET, max_retry=3):
-                error('Failed to upload {} to S3 bucket {}'.format(zip_name, BUCKET))
-        exit(1)
-
-
-if __name__ == "__main__":
-    scrubbing_test()

+ 0 - 82
Tools/build/JenkinsScripts/build/utils/update_bootstrap_cfg.py

@@ -1,82 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-'''
-This script is to update the configuration in dev/bootstrap.cfg
-Usage: python update_bootstrap_cfg.py --bootstrap_cfg file_path --replace key1=value1,key2=value2
-'''
-
-from optparse import OptionParser
-import os
-import stat
-
-
-def update_bootstrap_cfg(file, replace_values):
-    try:
-        with open(file, 'r') as bootstrap_cfg:
-            content = bootstrap_cfg.read()
-    except:
-        error('Cannot read file {}'.format(file))
-    content = content.split('\n')
-    new_content = []
-    for line in content:
-        if not line.startswith('--'):
-            strs = line.split('=')
-            if len(strs):
-                key = strs[0].strip(' ')
-                if key in replace_values:
-                    line = '{}={}'.format(key, replace_values[key])
-        new_content.append(line)
-
-    try:
-        with open(file, 'w') as out:
-            out.write('\n'.join(new_content))
-    except:
-        error('Cannot write to file {}'.format(file))
-    print '{} updated with value {}'.format(file, replace_values)
-
-
-def error(msg):
-    print msg
-    exit(1)
-
-
-def parse_args():
-    parser = OptionParser()
-    parser.add_option("--bootstrap_cfg", dest="bootstrap_cfg", default=None, help="File path of bootstrap.cfg to be updated.")
-    parser.add_option("--replace", dest="replace", default=None, help="Target platform to package")
-    (options, args) = parser.parse_args()
-    bootstrap_cfg = options.bootstrap_cfg
-    replace = options.replace
-
-    if not bootstrap_cfg:
-        error('bootstrap.cfg is not specified.')
-    if not os.path.isfile(bootstrap_cfg):
-        error('File {} not found.'.format(bootstrap_cfg))
-    replace_values = {}
-    if replace:
-        try:
-            replace = replace.split(',')
-            for r in replace:
-                r = r.split('=')
-                key = r[0].strip(' ')
-                value = r[1].strip(' ')
-                replace_values[key] = value
-        except IndexError:
-            error('Please check the format of argument --replace.')
-
-    return bootstrap_cfg, replace_values
-
-
-if __name__ == "__main__":
-    (file, replace_values) = parse_args()
-    update_bootstrap_cfg(file, replace_values)

+ 0 - 174
Tools/build/JenkinsScripts/build/utils/upload_benchmarks.py

@@ -1,174 +0,0 @@
-#!/usr/bin/env python
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import argparse
-from datetime import datetime, timezone
-import hashlib
-import os
-import pathlib
-import platform
-import re
-import sys
-import subprocess
-import zipfile
-
-'''
-Creates zip file of <BuildDir>/BenchmarkResults folder and sends the the zip via email to team email
-'''
-
-def compute_sha256(input_filepath):
-    '''
-    Computes a SHA-2 hash using a digest that is 256 bits
-
-    Args:
-        input_filepath: File whose content will be hashed using the SHA-2 hash function
-
-    Returns:
-        bytes: byte array containing hash digest in hex
-    '''
-    hasher = hashlib.sha256()
-    hash_result = None
-
-    CHUNK_SIZE = 128 * (1 << 10) # Chunk Size for Sha256 hashing reads file in chunks of 128 KiB
-    with open(input_filepath, 'rb') as hash_file:
-        buf = hash_file.read(CHUNK_SIZE)
-        hasher.update(buf)
-        hash_result = hasher.hexdigest()
-
-    return hash_result
-
-def create_sha256sums_file(input_filepath):
-    '''
-    Create a sha256sums file from the contents of the input_filepath
-    The sha256sums file will be named using the input_filepath path with an added
-    extension of .sha256sums
-
-    Args:
-        input_filepath: File whose content will be hashed using the SHA-2 hash function
-
-    Returns:
-        string: Path to sha256sums file
-    '''
-    sha256_hash = compute_sha256(input_filepath)
-    if not sha256_hash:
-        print(f'Unable to compute sha256 hash for file {input_filepath}')
-        return None
-
-    hash_filepath = f'{input_filepath}.sha256sums'
-    with open(hash_filepath, "wb") as archive_hash_file:
-        new_hash_contents = f'{sha256_hash} *{os.path.basename(input_filepath)}\n'
-        archive_hash_file.write(new_hash_contents.encode("utf8"))
-
-    return hash_filepath
-
-def get_files_to_archive(base_dir, regex):
-    '''
-    Gathers list of filepaths to add to archive file
-    Files are looked up with the base directory and cross checked against the supplied regular expression
-    which acts as an inclusion filter
-
-    Args:
-        base_dir: Directory to scan for files
-        regex: Regular expression that is matched against each filename to determine if the file should be
-               added to the archive
-    '''
-    # Get all file names in base directory
-    with os.scandir(base_dir) as dir_entry:
-        filepaths = [pathlib.PurePath(entry.path) for entry in dir_entry if entry.is_file()]
-        # Get all file names matching the regular expression, those file will be added to zip archive
-        archive_files = [str(filepath) for filepath in filepaths if re.match(regex, filepath.as_posix())]
-        return archive_files
-    return None
-
-
-def create_archive_file(archive_file_prefix, input_filepaths, base_dir):
-    '''
-    Creates a zip file using the supplied input files
-    LZMA compression is used by default for the zip file compression
-
-    Args:
-        archive_file_prefix: Prefix to use as the name of the zip file that should be created
-        input_filepaths: List of input file paths that will be added to zip file
-        base_dir: Directory which is used to create relative paths for each input file path from
-    '''
-    try:
-        zipfile_name = '{}-{:%Y%m%d_%H%M%S}.zip'.format(archive_file_prefix,datetime.now(timezone.utc))
-        # The lzma shared library isn't installed by default on Mac.
-        compression_type = zipfile.ZIP_LZMA if platform.system() != 'Darwin' else zipfile.ZIP_BZIP2
-        with zipfile.ZipFile(zipfile_name, mode='w', compression=compression_type) as benchmark_archive:
-            zipfile_name = benchmark_archive.filename
-            for input_filepath in input_filepaths:
-                # Make input files relative to base_dir when storing them as archived names
-                input_filepath_relpath = os.path.relpath(input_filepath, start=base_dir)
-                benchmark_archive.write(input_filepath, input_filepath_relpath)
-    except OSError as err:
-        print(f'Failed to write benchmark files to zip archive with error {err}')
-        sys.exit(1)
-    except RuntimeError as zip_err:
-        print(f'Runtime Error in zipfile module {zip_err}')
-        sys.exit(1)
-    return zipfile_name
-
-def upload_to_s3(upload_script_path, base_dir, path_regex, bucket, key_prefix):
-    '''
-    Uploads files which located within the base directory using the upload_to_s3.py script
-
-    Args:
-        base_dir: The directory to pass as the --base-dir value to the upload_to_s3.py script
-        path_regex: The regular expression to pass to the upload_to_s3.py script --file-regex parameter
-        bucket: The s3 bucket to use for the --bucket argument for upload_to_s3.py 
-        key_prefix: The prefix to store the uploaded files to within the s3 bucket, 
-                    It is passed --key-prefix argument to upload_to_s3.py
-    '''
-    try:
-        subprocess.run(['python', upload_script_path, '--base_dir',
-            base_dir, '--file_regex', path_regex,
-            '--bucket', bucket, '--key_prefix', key_prefix],
-            check=True)
-    except subprocess.CalledProcessError as err:
-        print(f'{upload_script_path} failed with error {err}')
-        sys.exit(1)
-
-def upload_benchmarks(args):
-    '''
-    Main function responsible for determine which files to add to the output zip file and uploading
-    the results to s3
-
-    Args:
-        args: Parse argument list of python command line parameters using the argparse module
-    '''
-    files_to_archive = get_files_to_archive(args.base_dir, args.file_regex)
-    archive_zip_path = create_archive_file(args.output_prefix, files_to_archive, args.base_dir)
-    # Create Sha256sum hash file of zip
-    create_sha256sums_file(archive_zip_path)
-    
-    upload_dir = str(pathlib.Path(archive_zip_path).parent)
-    upload_regex = fr'{pathlib.Path(archive_zip_path).name}.*'
-    upload_to_s3(args.upload_to_s3_script_path, upload_dir, upload_regex, args.bucket, args.key_prefix)
-
-def parse_args():
-    cur_dir = os.path.dirname(os.path.abspath(__file__))
-    parser = argparse.ArgumentParser()
-    parser.add_argument("--base_dir", default=os.getcwd(), help="Base directory to files which should be archived, If not given, then current directory is used.")
-    parser.add_argument("--upload-to-s3-script-path", default=os.path.join(cur_dir, 'upload_to_s3.py'), help="Path to upload_to_s3.py script. Script is used for uploading benchmarks to s3")
-    parser.add_argument("--file_regex", default=r'.*BenchmarkResults/.+\.json', help="Regular expression that used to match file names to archive.")
-    parser.add_argument("-o", "--output-prefix", default='benchmarks_results', help="Prefix to use to construct the name of the zip file where the benchmark results are zipped."
-        " A timestamp will be added to the end of filename")
-    parser.add_argument("--bucket", dest="bucket", default='ly-jenkins-cmake-benchmarks', help="S3 bucket the files are uploaded to.")
-    parser.add_argument("-k", "--key_prefix", default='user_build', dest="key_prefix", help="Object key prefix.")
-    args = parser.parse_args()
-
-    return args
-
-if __name__ == '__main__':
-    args = parse_args();
-    upload_benchmarks(args)

+ 0 - 183
Tools/build/JenkinsScripts/build/utils/upload_metrics_to_kinesis.py

@@ -1,183 +0,0 @@
-########################################################################################
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-#
-# Original file Copyright Crytek GMBH or its affiliates, used under license.
-#
-########################################################################################
-import ast
-import boto3
-from botocore.exceptions import ClientError
-from datetime import datetime
-import logging
-import os
-import shutil
-import sys
-import time
-import traceback
-import urllib2
-import uuid
-
-KINESIS_STREAM_NAME = 'lumberyard-metrics-stream'
-KINESIS_MAX_RECORD_SIZE = 1048576  # 1 MB
-S3_BACKUP_BUCKET = 'infrastructure-build-metrics-backup'
-IAM_ROLE_NAME = 'ec2-jenkins-node'
-LOG_FILE_NAME = 'kinesis_upload.log'
-
-MAX_RECORD_SIZE = KINESIS_MAX_RECORD_SIZE - 4  # to account for version header
-MAX_RETRIES = 5
-RETRY_EXCEPTIONS = ('ProvisionedThroughputExceededException',
-                    'ThrottlingException')
-
-# truncate the log file, eventually we need to send the logs to cloudwatch logs
-with open(LOG_FILE_NAME, 'w'):
-    pass
-
-logger = logging.getLogger('KinesisUploader')
-
-fileHdlr = logging.FileHandler(LOG_FILE_NAME)
-# uncomment this line and the two below to have logs go to stdout for debugging purposes
-#streamHdlr = logging.StreamHandler(sys.stdout)
-
-formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
-fileHdlr.setFormatter(formatter)
-#streamHdlr.setFormatter(formatter)
-
-logger.addHandler(fileHdlr)
-#logger.addHandler(streamHdlr)
-logger.setLevel(logging.DEBUG)
-
-def backup_file_to_s3(s3_client, bucket_name, file_location, s3_file_name):
-    try:
-        s3_client.meta.client.upload_file(file_location, bucket_name, s3_file_name)
-        os.remove(file_location)
-    except:
-        logger.error('Failed to upload backup file to S3. This is non-fatal!')
-        # logger.error(traceback.print_exc())
-
-
-def get_iam_role_credentials(role_name):
-    security_metadata = None
-    try:
-        response = urllib2.urlopen(
-            'http://169.254.169.254/latest/meta-data/iam/security-credentials/{0}'.format(role_name)).read()
-        security_metadata = ast.literal_eval(response)
-    except:
-        logger.error('Unable to get iam role credentials')
-        logger.error(traceback.print_exc())
-
-    return security_metadata
-
-
-def splitFileByRecord(stream, maxSize):
-    version = 1
-
-    # currently using random GUID for partition key, but in the future we may want to partition by some build id
-    # or by build host
-    partition_key = uuid.uuid4()
-
-    entry_size = 0
-    put_entries = []
-
-    current_entry = ''
-    for line in stream:
-        line_size_in_bytes = len(line.encode('utf-8'))
-        entry_size = entry_size + line_size_in_bytes
-
-        if (entry_size > MAX_RECORD_SIZE):
-            put_entries.append({
-                'Data': str(version) + '\n' + str(current_entry),
-                'PartitionKey': str(partition_key)
-            })
-
-            current_entry = line
-            entry_size = line_size_in_bytes
-        else:
-            current_entry = current_entry + line
-
-    if current_entry:
-        put_entries.append({
-            'Data': str(version) + '\n' + str(current_entry),
-            'PartitionKey': str(partition_key)
-        })
-
-    return put_entries
-
-
-def main():
-    credentials = get_iam_role_credentials(IAM_ROLE_NAME)
-
-    aws_access_key_id = None
-    aws_secret_access_key = None
-    aws_session_token = None
-
-    if credentials is not None:
-        keys = ['AccessKeyId', 'SecretAccessKey', 'Token']
-        for key in keys:
-            if key not in credentials:
-                logger.error('Unable to find {0} in get_iam_role_credentials response {1}'.format(key, credentials))
-                return
-
-        aws_access_key_id = credentials['AccessKeyId']
-        aws_secret_access_key = credentials['SecretAccessKey']
-        aws_session_token = credentials['Token']
-
-    kinesis_client = boto3.client('kinesis', region_name='us-west-2', aws_access_key_id=aws_access_key_id,
-                                  aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token)
-    s3_client = boto3.resource('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key,
-                               aws_session_token=aws_session_token)
-
-    file_location = sys.argv[1]
-    filename = os.path.basename(file_location)
-    backup_file_location = file_location + '.bak'
-
-    try:
-        if os.path.isfile(backup_file_location):
-            logger.info('Found pre-existing backup file.  Uploading to S3.')
-            backup_file_to_s3(s3_client, S3_BACKUP_BUCKET, backup_file_location,
-                              '{0}.{1}'.format(filename, datetime.now().isoformat()))
-
-        if os.path.isfile(file_location):
-            shutil.copyfile(file_location, backup_file_location)
-            backup_file_to_s3(s3_client, S3_BACKUP_BUCKET, backup_file_location,
-                              '{0}.{1}'.format(filename, datetime.now().isoformat()))
-
-            logger.info('Opening metrics file {0}'.format(file_location))
-            with open(file_location, 'r+') as f:
-                records = splitFileByRecord(f, MAX_RECORD_SIZE)
-                i = 0
-                retries = 0
-                while i < len(records):
-                    record = records[i]
-                    try:
-                        logger.info('Uploading {0} bytes of metrics to Kinesis...'.format(len(record)))
-                        kinesis_client.put_record(StreamName=KINESIS_STREAM_NAME,
-                                                  Data=record['Data'],
-                                                  PartitionKey=record['PartitionKey'])
-                        retries = 0
-                    except ClientError as ex:
-                        if ex.response['Error']['Code'] not in RETRY_EXCEPTIONS:
-                            raise
-
-                        sleep_time = 2 ** retries
-                        logger.warn('Request throttled by Kinesis, '
-                                     'sleeping and retrying in {0} seconds'.format(2 ** retries))
-                        time.sleep(sleep_time)
-                        retries += 1
-                        i -= 1
-
-                    i += 1
-
-                f.truncate(0)
-    except:
-        logger.error(traceback.print_exc())
-
-if __name__ == '__main__':
-    main()

+ 0 - 107
Tools/build/JenkinsScripts/build/utils/upload_to_s3.py

@@ -1,107 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-'''
-Usage:
-Use EC2 role to upload all .zip and .MD5 files in %WORKSPACE% folder to bucket ly-packages-mainline:
-python upload_to_s3.py --base_dir %WORKSPACE% --file_regex "(.*zip$|.*MD5$)" --bucket ly-packages-mainline
-
-Use profile to upload all .zip and .MD5 files in %WORKSPACE% folder to bucket ly-packages-mainline:
-python upload_to_s3.py --base_dir %WORKSPACE% --profile profile --file_regex "(.*zip$|.*MD5$)" --bucket ly-packages-mainline
-
-'''
-
-
-import os
-import re
-import json
-import boto3
-from optparse import OptionParser
-from util import error
-
-
-def parse_args():
-    parser = OptionParser()
-    parser.add_option("--base_dir", dest="base_dir", default=os.getcwd(), help="Base directory to upload files, If not given, then current directory is used.")
-    parser.add_option("--file_regex", dest="file_regex", default=None, help="Regular expression that used to match file names to upload.")
-    parser.add_option("--profile", dest="profile", default=None, help="The name of a profile to use. If not given, then the default profile is used.")
-    parser.add_option("--bucket", dest="bucket", default=None, help="S3 bucket the files are uploaded to.")
-    parser.add_option("--key_prefix", dest="key_prefix", default='', help="Object key prefix.")
-    '''
-    ExtraArgs used to call s3.upload_file(), should be in json format. extra_args key must be one of: ACL, CacheControl, ContentDisposition, ContentEncoding, ContentLanguage, ContentType, Expires,
-    GrantFullControl, GrantRead, GrantReadACP, GrantWriteACP, Metadata, RequestPayer, ServerSideEncryption, StorageClass,
-    SSECustomerAlgorithm, SSECustomerKey, SSECustomerKeyMD5, SSEKMSKeyId, WebsiteRedirectLocation
-    '''
-    parser.add_option("--extra_args", dest="extra_args", default=None, help="Additional parameters used to upload file.")
-    parser.add_option("--max_retry", dest="max_retry", default=1, help="Maximum retry times to upload file.")
-    (options, args) = parser.parse_args()
-    if not os.path.isdir(options.base_dir):
-        error('{} is not a valid directory'.format(options.base_dir))
-    if not options.file_regex:
-        error('Use --file_regex to specify regular expression that used to match file names to upload.')
-    if not options.bucket:
-        error('Use --bucket to specify bucket that the files are uploaded to.')
-    return options
-
-
-def get_client(service_name, profile_name):
-    session = boto3.session.Session(profile_name=profile_name)
-    client = session.client(service_name)
-    return client
-
-
-def get_files_to_upload(base_dir, regex):
-    # Get all file names in base directory
-    files = [x for x in os.listdir(base_dir) if os.path.isfile(os.path.join(base_dir, x))]
-    # Get all file names matching the regular expression, those file will be uploaded to S3
-    files_to_upload = [x for x in files if re.match(regex, x)]
-    return files_to_upload
-
-
-def s3_upload_file(client, base_dir, file, bucket, key_prefix=None, extra_args=None, max_retry=1):
-    print('Uploading file {} to bucket {}.'.format(file, bucket))
-    key = file if key_prefix is None else '{}/{}'.format(key_prefix, file)
-    for x in range(max_retry):
-        try:
-            client.upload_file(
-                os.path.join(base_dir, file), bucket, key,
-                ExtraArgs=extra_args
-            )
-            print('Upload succeeded')
-            return True
-        except Exception as err:
-            print('exception while uploading: {}'.format(err))
-            print('Retrying upload...')
-    print('Upload failed')
-    return False
-
-
-if __name__ == "__main__":
-    options = parse_args()
-    client = get_client('s3', options.profile)
-    files_to_upload = get_files_to_upload(options.base_dir, options.file_regex)
-    extra_args = json.loads(options.extra_args) if options.extra_args else None
-
-    print('Uploading {} files to bucket {}.'.format(len(files_to_upload), options.bucket))
-    failure = []
-    success = []
-    for file in files_to_upload:
-        if not s3_upload_file(client, options.base_dir, file, options.bucket, options.key_prefix, extra_args, 2):
-            failure.append(file)
-        else:
-            success.append(file)
-    print('Upload finished.')
-    print('{} files are uploaded successfully:'.format(len(success)))
-    print('\n'.join(success))
-    if len(failure) > 0:
-        print('{} files failed to upload:'.format(len(failure)))
-        print('\n'.join(failure))
-        # Exit with error code 1 if any file is failed to upload
-        exit(1)

+ 0 - 65
Tools/build/JenkinsScripts/build/utils/util.py

@@ -1,65 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-import json
-import os
-import re
-import subprocess
-
-
-class LyBuildError(Exception):
-    def __init__(self, message):
-        super(LyBuildError, self).__init__(message)
-
-    def __str__(self):
-        return str(self.message)
-
-
-def ly_build_error(message):
-    raise LyBuildError(message)
-
-
-def error(message):
-    print('Error: {}'.format(message))
-    exit(1)
-
-
-# Exit with status code 0 means it won't fail the whole build process
-def safe_exit_with_error(message):
-    print('Error: {}'.format(message))
-    exit(0)
-
-
-def warn(message):
-    print('Warning: {}'.format(message))
-
-
-def execute_system_call(command, **kwargs):
-    print('Executing subprocess.check_call({})'.format(command))
-    try:
-        subprocess.check_call(command, **kwargs)
-    except subprocess.CalledProcessError as e:
-        print(e.output)
-        error('Executing subprocess.check_call({}) failed with error {}'.format(command, e))
-    except FileNotFoundError as e:
-        error("File Not Found - Failed to call {} with error {}".format(command, e))
-
-
-def safe_execute_system_call(command, **kwargs):
-    print('Executing subprocess.check_call({})'.format(command))
-    try:
-        subprocess.check_call(command, **kwargs)
-    except subprocess.CalledProcessError as e:
-        print(e.output)
-        warn('Executing subprocess.check_call({}) failed'.format(command))
-        return e.returncode
-    return 0

+ 0 - 64
Tools/build/JenkinsScripts/distribution/AWS_PyTools/LyChecksum.py

@@ -1,64 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-import hashlib
-import re
-
-
-def generateFilesetChecksum(filePaths):
-    filesetHash = hashlib.sha512()
-    for filePath in filePaths:
-        updateHashWithFileChecksum(filePath, filesetHash)
-    return filesetHash
-
-
-def getChecksumForSingleFile(filePath, openMode='rb'):
-    filesetHash = hashlib.sha512()
-    updateHashWithFileChecksum(filePath, filesetHash, openMode)
-    return filesetHash
-
-
-def getMD5ChecksumForSingleFile(filePath, openMode='rb'):
-    filesetHash = hashlib.md5()
-    updateHashWithFileChecksum(filePath, filesetHash, openMode)
-    return filesetHash
-
-
-def updateHashWithFileChecksum(filePath, filesetHash, openMode='rb'):
-    BLOCKSIZE = 65536
-    with open(filePath.strip('\n'), openMode) as file:
-        buf = file.read(BLOCKSIZE)
-        while len(buf) > 0:
-            filesetHash.update(buf)
-            buf = file.read(BLOCKSIZE)
-
-
-def is_valid_hash_sha1(checksum):
-    # sha1 hashes are 40 hex characters long.
-    if len(checksum) is not 40:
-        return False
-    sha1_re = re.compile("(^[0-9A-Fa-f]{40}$)")
-    result = sha1_re.match(checksum)
-    if not result:
-        return False
-    return True
-
-
-def is_valid_hash_sha512(checksum):
-    # sha512 hashes are 128 hex characters long.
-    if len(checksum) is not 128:
-        return False
-    sha512_re = re.compile("(^[0-9A-Fa-f]{128}$)")
-    result = sha512_re.match(checksum)
-    if not result:
-        return False
-    return True

+ 0 - 76
Tools/build/JenkinsScripts/distribution/AWS_PyTools/LyCloudfrontOps.py

@@ -1,76 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-import os.path
-import boto3
-try:
-    import urllib.parse as urllib_compat
-except:
-    import urlparse as urllib_compat
-
-def getCloudfrontDistPath(uploadURL):
-    pathFromDomainName = urllib_compat.urlparse(uploadURL)[2]
-    # need to remove the first slash, otherwise it will create a nameless directory on S3
-    return pathFromDomainName[1:]
-
-
-def getCloudfrontDistribution(cloudfrontUrl, awsCredentialProfileName):
-    session = boto3.session.Session(profile_name=awsCredentialProfileName)
-    cloudfront = session.client('cloudfront')
-    distributionList = cloudfront.list_distributions()
-    targetDistId = None
-    for distribution in distributionList["DistributionList"]["Items"]:
-        if distribution["DomainName"] == urllib_compat.urlparse(cloudfrontUrl)[1]:
-            targetDistId = distribution["Id"]
-            pass
-    assert (targetDistId is not None), "No distribution with the domain name {} found.".format(cloudfrontUrl)
-    targetDist = cloudfront.get_distribution(Id=targetDistId)
-    return targetDist
-
-
-def getBucket(cloudfrontDistribution, awsCredentialProfileName):
-    bucketName = getBucketName(cloudfrontDistribution)
-    session = boto3.session.Session(profile_name=awsCredentialProfileName)
-    s3 = session.resource('s3')
-    return s3.Bucket(bucketName)
-
-
-def getBucketName(cloudfrontDistribution):
-    s3Info = cloudfrontDistribution["Distribution"]["DistributionConfig"]["Origins"]["Items"][0]
-    bucketDomainName = s3Info["DomainName"]
-    return bucketDomainName.split('.')[0] # first part of the domain name is the bucket name
-
-
-def buildBucketPath(cloudfrontUrl, cloudfrontDistribution):
-    s3Info = cloudfrontDistribution["Distribution"]["DistributionConfig"]["Origins"]["Items"][0]
-    originPath = s3Info["OriginPath"]
-    bucketPath = None
-    if originPath:
-        # Start originPath after the first character (presumed to be '/') to avoid nameless directory in S3.
-        bucketPath = str.format("{0}/{1}", originPath[1:], getCloudfrontDistPath(cloudfrontUrl))
-    else:
-        bucketPath = getCloudfrontDistPath(cloudfrontUrl)
-    return bucketPath
-
-
-def uploadFileToCloudfrontURL(absFilePath, cloudfrontBaseUrl, awsCredentialProfileName, overwrite):
-    cloudfrontDist = getCloudfrontDistribution(cloudfrontBaseUrl, awsCredentialProfileName)
-    s3Bucket = getBucket(cloudfrontDist, awsCredentialProfileName)
-    s3BucketPath = buildBucketPath(cloudfrontBaseUrl, cloudfrontDist)
-    targetBucketPath = urllib_compat.urljoin(s3BucketPath, os.path.basename(absFilePath))
-
-    # Check if file already exists in the S3 bucket.
-    file_exists = len(list(s3Bucket.objects.filter(Prefix=targetBucketPath))) > 0
-    if not file_exists or overwrite:
-        s3Bucket.upload_file(absFilePath, targetBucketPath)
-
-    return targetBucketPath

+ 0 - 12
Tools/build/JenkinsScripts/distribution/AWS_PyTools/__init__.py

@@ -1,12 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-

+ 0 - 303
Tools/build/JenkinsScripts/distribution/AWS_WAF_Updater/update_internal_whitelist.py

@@ -1,303 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-import argparse
-import boto3
-import hashlib
-import json
-import math
-import netaddr
-import time
-import urllib2
-
-# Order of operations
-#   1) Get list of internal IPs from dogfish
-#   2) Translate IP ranges into WAF-vaild CIDR Subnets
-#   3) Get InternalIPWhitelist rule from WAF
-#   4) Get all list of all IP Sets on that rule
-#   5) Loop through all IP ranges in all IP sets
-#       a) If the IP address is not in the set of WAF-valid CIDR Subnets, remove it from the IPSet lists
-#       b) If the IP address is in the set of WAF-valid CIDR Subnets, remove it from the WAF-valid CIDR subnet list
-#   6) Anything left in the WAF-valid CIDR Subnet list needs to be added to IP Sets
-#       a) create a new IP set if necessary (max 1000 ranges per IP Set)
-#   7) Push updates to WAF
-
-rule_name = "Internal_IP_Whitelist"
-ip_set_basename = "Amazon_Internal_IPs"
-
-# Maximum number of IP descriptors per IP Set
-max_ranges_per_ip_set = 1000
-
-# Maximum number of IP descriptors updates per call
-max_ranges_per_update = 1000
-
-ip_version = "IPV4"
-
-
-def parse_args():
-    """Handle argument parsing and validate destination folder exists before returning argparse args"""
-    parser = argparse.ArgumentParser(description='Use AWS CLI to update the Internal IP Whitelist in dev or prod')
-
-    parser.add_argument('-l', '--list', action='store_true',
-                        help="List the WAF Valid IPs that the list of IPs translates to instead of running the update.")
-    parser.add_argument('-p', '--profile', default='default', help="The name of the AWS CLI profile to use.")
-
-    args = parser.parse_args()
-    return args
-
-
-def translate_and_apply_to_waf(ip_ranges, args):
-    subnet_list = list()
-    for ip_range in ip_ranges:
-        subnet_list.extend(translate_range_to_subnet_list(ip_range))
-
-    session = boto3.Session(profile_name=args.profile)
-    waf = session.client('waf')
-    change_tokens = apply_updates_to_waf(subnet_list, waf)
-
-    # wait for every change to finish before ending
-    waiting_on_changes = True
-    while waiting_on_changes:
-        change_in_progress = False
-        time.sleep(.3)
-        for token in change_tokens:
-            status = waf.get_change_token_status(ChangeToken=token["ChangeToken"])
-            change_in_progress = change_in_progress and (status == "PENDING")
-            if change_in_progress:
-                break
-        waiting_on_changes = change_in_progress
-
-    return not waiting_on_changes
-
-
-def apply_updates_to_waf(ip_list, waf):
-    white_list_rule = get_ip_whitelist_rule(waf)
-    ip_set_list, negate_conditions = get_ip_sets_from_rule(white_list_rule, waf)
-
-    ip_to_add = list(ip_list)
-    adds = list()
-
-    change_tokens = list()
-
-    # for each ip set, get the ip ranges in each descriptor and compare against the ip list
-    # remove duplicates from the list to add
-    for ip_set in ip_set_list:
-        for ip_range_descriptor in ip_set["IPSetDescriptors"]:
-            ip = netaddr.IPNetwork(ip_range_descriptor["Value"])
-            # if an ip in the list is already in an IPSet, then remove it from the list of stuff to add
-            if ip in ip_list:
-                ip_to_add.remove(ip)
-
-    # create add operations
-    for ip in ip_to_add:
-        adds.append(
-            {
-                'Action': 'INSERT',
-                'IPSetDescriptor': {
-                    'Type': ip_version,
-                    'Value': str(ip)
-                }
-            }
-        )
-
-    # make delete operations for anything in a set that isn't in the list of IPs, then populate the rest of the list
-    #   with adds until we hit max of the IPSet
-    for ip_set in ip_set_list:
-        removes = list()
-
-        for ip_range_descriptor in ip_set["IPSetDescriptors"]:
-            ip = netaddr.IPNetwork(ip_range_descriptor["Value"])
-            if ip not in ip_list:
-                removes.append(
-                    {
-                        'Action': 'DELETE',
-                        'IPSetDescriptor': ip_range_descriptor
-                    }
-                )
-
-        # perform the updates to this set
-        perform_updates_to_existing_ipset(ip_set, removes, adds, waf, change_tokens)
-
-    # we've done all the removes, and filled all existing ip_sets with adds. if we have leftovers, we need to make a
-    #   new ip_set and add it to the rule
-    if len(adds) > 0:
-        num_ip_sets = len(ip_set_list)
-        rule_updates = list()
-        while len(adds) > 0:
-            create_token = waf.get_change_token()
-            change_tokens.append(create_token)
-            ip_set = waf.create_ip_set(Name="{0}_{1}".format(ip_set_basename, num_ip_sets),
-                                       ChangeToken=create_token["ChangeToken"])["IPSet"]
-            num_ip_sets += 1
-            ip_set_id = ip_set["IPSetId"]
-            rule_updates.append(
-                {
-                    'Action': 'INSERT',
-                    'Predicate': {
-                        'Negated': negate_conditions,
-                        'Type': 'IPMatch',
-                        'DataId': ip_set_id
-                    }
-                }
-            )
-
-            # populate the new ip_set
-            batch = move_updates_to_batch(adds)
-            update_set_token = waf.get_change_token()
-            change_tokens.append(update_set_token)
-            waf.update_ip_set(IPSetId=ip_set_id,
-                              ChangeToken=update_set_token["ChangeToken"],
-                              Updates=batch)
-
-        # update the rule with the new ip_lists
-        update_rule_token = waf.get_change_token()
-        change_tokens.append(update_rule_token)
-        waf.update_rule(RuleId=white_list_rule["RuleId"],
-                        ChangeToken=update_rule_token["ChangeToken"],
-                        Updates=rule_updates)
-
-    return change_tokens
-
-
-def perform_updates_to_existing_ipset(ip_set, removes, adds, waf, change_tokens):
-    # figure out how many adds can be done on this ip_set after all of the removes
-    space_remaining_in_set = max_ranges_per_ip_set - (len(ip_set["IPSetDescriptors"]) - len(removes))
-
-    # fill the list of updates to perform with removes and adds until the end result is either a full set or
-    #   all operations have been performed.
-    updates = list(removes)
-    updates.extend(list(adds[0:space_remaining_in_set]))
-    adds[0:space_remaining_in_set] = []
-
-    # make batches of updates (max per batch = max_ranges_per_update)
-    update_batches = list()
-    while len(updates) > 0:
-        batch = move_updates_to_batch(updates)
-        update_batches.append(batch)
-
-    # submit an update set request for each batch
-    for batch in update_batches:
-        change_token = waf.get_change_token()
-        change_tokens.append(change_token)
-        waf.update_ip_set(IPSetId=ip_set["IPSetId"],
-                          ChangeToken=change_token["ChangeToken"],
-                          Updates=batch)
-
-
-def move_updates_to_batch(original_list):
-    items_to_batch = min(len(original_list), max_ranges_per_update)
-    batch = list(original_list[0:items_to_batch])
-    original_list[0:items_to_batch] = []
-    return batch
-
-
-def get_ip_whitelist_rule(waf_client):
-    rules_list = waf_client.list_rules(Limit=100)
-
-    whitelist_rule_id = None
-    for rule in rules_list["Rules"]:
-        if rule["Name"] == rule_name:
-            whitelist_rule_id = rule["RuleId"]
-
-    if whitelist_rule_id is None:
-        return None
-
-    return waf_client.get_rule(RuleId=whitelist_rule_id)["Rule"]
-
-
-def get_ip_sets_from_rule(rule, waf_client):
-    ip_sets = list()
-    negate_conditions = False
-    for condition in rule["Predicates"]:
-        if condition["Type"] == 'IPMatch':
-            ip_set_id = condition["DataId"]
-            ip_set = waf_client.get_ip_set(IPSetId=ip_set_id)
-
-            if ip_set is not None and ip_set_basename in ip_set["IPSet"]["Name"]:
-                ip_sets.append(ip_set["IPSet"])
-            else:
-                print "No IPSet with ID {0}".format(ip_set_id)
-
-            negate_conditions = condition["Negated"]
-
-    return ip_sets, negate_conditions
-
-
-def translate_range_to_subnet_list(ip_range):
-    """
-
-    :param ip_range: The IP address + CIDR prefix (IPNetwork object)
-    :return: A list of all WAF-valid subnets that compose the given IP range
-    """
-    prefix = ip_range.prefixlen
-    waf_valid_prefix = find_closest_waf_range(prefix)
-    return list(ip_range.subnet(waf_valid_prefix))
-
-
-def find_closest_waf_range(cidr_prefix_length):
-    """
-    AWS WAF only accepts CIDR ranges of /8, /16, /24, /32. figure out what the closest safe range we need to convert to.
-    This will always return a smaller range than what is passed in for security reasons.
-
-    :param cidr_prefix_length: The arbitrary CIDR range to convert to a WAF-valid range
-    :return: The closest WAF-valid range. i.e. if cidr_prefix_length = 18, this fuction will return 24
-    """
-    multiple = math.trunc(cidr_prefix_length / 8)
-    return (multiple + 1) * 8   # needs to be 1 based to get accurate range
-
-
-def list_all_waf_valid_subnets(ip_list):
-    num_ips = 0
-    for ip_range in ip_list:
-        subnets = translate_range_to_subnet_list(ip_range)
-        num_ips += len(subnets)
-        for ip in subnets:
-            print ip
-    return num_ips
-
-
-def main():
-    # IPs taken from https://w.amazon.com/index.php/PublicIPRanges
-    ips = ["207.171.176.0/20",  # SEA
-           "205.251.224.0/22",
-           "176.32.120.0/22",
-           "54.240.196.0/24",
-           "54.231.244.0/22",
-           "52.95.52.0/22",
-           "205.251.232.0/22",  # PDX
-           "54.240.230.0/23",
-           "54.240.248.0/21",
-           "54.231.160.0/19",
-           "54.239.2.0/23",
-           "54.239.48.0/22",
-           "52.93.12.0/22",
-           "52.94.208.0/21",
-           "52.218.128.0/17",
-           "204.246.160.0/22",  # SFO
-           "205.251.228.0/22",
-           "176.32.112.0/21",
-           "54.240.198.0/24",
-           "54.231.232.0/21",
-           "52.219.20.0/22",
-           "52.219.24.0/21"]
-
-    args = parse_args()
-
-    ip_objects = list(netaddr.IPNetwork(ip) for ip in ips)
-
-    if args.list:
-        print list_all_waf_valid_subnets(ip_objects)
-    else:
-        return translate_and_apply_to_waf(ip_objects, args)
-
-if __name__ == '__main__':
-    main()

+ 0 - 3
Tools/build/JenkinsScripts/distribution/Installer/BootstrapperLogo.png

@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:45c80fc02c64747d3eb8fe76d89979b573401b92482c27d8c95bd01df22c525f
-size 6582

+ 0 - 289
Tools/build/JenkinsScripts/distribution/Installer/BuildInstaller.py

@@ -1,289 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import glob
-import shutil
-import InstallerParams
-from Insignia import *
-from InstallerArgs import *
-from InstallerPackaging import *
-from Light import *
-from SignTool import *
-
-
-# Per Installer:
-#     Heat the includes to build the fragment
-#     candle + light to build the installer / merge module / whatever
-#     Collect the results
-#     If Signing:
-#         Copy installer and cabs to a clean folder
-#         Sign the CAB files
-#         insignia the installer to update its cab file references
-#         Sign the installer
-# For the Bootstrapper:
-#     Generate S3 links or whatever data we need to shove into the final installer.
-#     Generate a WXS file if we need to based on the above data
-#     candle + light generated WXS + pre-built WXS to build the installer (from
-#       the unsigned files or, if signing, the signed files of the installers)
-#     If Signing:
-#         Copy bootstrapper and cabs to a clean folder
-#         Detach the burn engine from the bootstrapper, and sign it
-#         Reattach the burn engine to the bootstrapper, and sign the bootstrapper
-# After completion, clean up temp files if necessary.
-
-
-class OperationMode:
-    StepCounting, BuildInstaller = range(2)
-
-    # It's good practice to define an init for a class in Python, but this
-    # class only exists to serve as an enum for the operation mode.
-    # Defining the __init__ with just a pass is a way to stub out this function so
-    # Python IDEs don't get upset that there is a class with no init.
-    def __init__(self):
-        pass
-
-
-args = createArgs()
-try:
-    params = InstallerParams.InstallerParams(args)
-    validateArgs(args, params)
-except InstallerParams.InstallerParamError as error:
-    raise Exception('Installer Params failed to be created with error:\n{}'.format(error))
-
-# Tracking this like this to simplify calls to performStep.
-# Otherwise, it has to look like this:
-#     def performStep(mode, stepsTaken, maxSteps, message, operation, *operationArgs):
-#         return stepsTaken+1, result
-# and calls to performStep have to also capture the stepsTaken.
-stepsTaken = 0
-maxSteps = 0
-operationMode = OperationMode.StepCounting
-
-
-# The goal of authoring this function is to simplify calls in, which reduces friction when using it.
-# Call this when you want to only call a function during full operational mode, and not during step counting.
-# This will use the function's name as the message for the printout.
-def performStep(operation, *operationArgs):
-    return performStepWithMessage(operation.__name__, operation, *operationArgs)
-
-
-# Call when you want a message that does not match the function's name.
-def performStepWithMessage(message, operation, *operationArgs):
-    global stepsTaken
-    result = None
-    if operationMode == OperationMode.BuildInstaller:
-        printProgress(message, stepsTaken, maxSteps)
-        result = operation(*operationArgs)
-    stepsTaken += 1
-    return result
-
-
-# Call when you want to handle branching yourself. Can be used to trigger a branch for the operation mode.
-def describeStep(message):
-    global stepsTaken
-    if operationMode == OperationMode.BuildInstaller:
-        printProgress(message, stepsTaken, maxSteps)
-    stepsTaken += 1
-
-
-def buildInstaller():
-    global stepsTaken
-    stepsTaken = 0
-    # CREATE PACKAGES
-    if not params.skipMsiAndCabCreation:
-        performStep(createThirdPartyPackages, args, params)
-        performStep(createDevPackage, args, params)
-        performStep(createRootPackage, args, params)
-    else:
-        describeStep("Skipping Create Packages (MSIs and Cabs) step, re-using existing packages.")
-
-    if not params.doSigning or not args.bootstrapOnly:
-        params.msiFileNameList = performStepWithMessage("Gathering MSIs",
-                                                        get_file_names_in_directory,
-                                                        params.intermediateInstallerPath,
-                                                        ".msi")
-        params.cabFileNameList = performStepWithMessage("Gathering CABs",
-                                                        get_file_names_in_directory,
-                                                        params.intermediateInstallerPath,
-                                                        ".cab")
-
-    # SIGN CABs AND MSIs
-    if params.doSigning and not args.bootstrapOnly:
-        # we don't want to modify the original metrics exe, so copy it to where the
-        #   other clean files are, and update the path to it
-        if params.metricsPath is not params.intermediateInstallerPath:
-            describeStep("Copying metrics to signing path")
-            if operationMode == OperationMode.BuildInstaller:
-                params.metricsPath = params.intermediateInstallerPath
-                safe_shutil_file_copy(params.fullPathToMetrics, os.path.join(params.metricsPath, params.metricsExe))
-
-        # copy the clean cab and msi files to a new directory to sign them.
-        if params.installerPath is not params.intermediateInstallerPath:
-            describeStep("Copying clean MSI, CAB, and EXE files to signing path")
-            if operationMode == OperationMode.BuildInstaller:
-                if os.path.exists(params.installerPath):
-                    verbose_print(args.verbose, "Removing old files from signing path.")
-                    shutil.rmtree(params.installerPath)
-                shutil.copytree(params.intermediateInstallerPath, params.installerPath)
-                # don't need the wixpdb files in the signing folder, so get rid of them
-                for filepath in glob.glob(os.path.join(params.installerPath, "*.wixpdb")):
-                    os.remove(filepath)
-
-        # Sign the Cab files, verify signing was successful
-        performStep(signtoolSignAndVerifyFiles,
-                    params.cabFileNameList,
-                    params.installerPath,
-                    params.intermediateInstallerPath,
-                    params.signingType,
-                    args.timestampServer,
-                    args.verbose)
-
-        # Run Insignia on the MSI files
-        performStep(insigniaMSIs,
-                    params.installerPath,
-                    args.verbose,
-                    params.msiFileNameList)
-
-        # Sign the MSIs, verify signing was successful
-        performStepWithMessage("Signing and verifying MSIs",
-                               signtoolSignAndVerifyFiles,
-                               params.msiFileNameList,
-                               params.installerPath,
-                               params.intermediateInstallerPath,
-                               params.signingType,
-                               args.timestampServer,
-                               args.verbose)
-
-        # Sign the Metrics executable
-        performStepWithMessage("Signing and verifying metrics.exe",
-                               signtoolSignAndVerifyFile,
-                               params.metricsExe,
-                               params.installerPath,
-                               params.intermediateInstallerPath,
-                               params.signingType,
-                               args.timestampServer,
-                               args.verbose)
-
-        # make sure that the bootstrapper will get the metrics exe from the right place
-        params.metricsPath = params.installerPath
-
-    # CREATE BOOTSTRAP
-    packageNameList = ""
-    if operationMode is OperationMode.BuildInstaller:
-        for msiName in params.msiFileNameList:
-            packageName = os.path.splitext(msiName)[0]
-            packageNameList += '{};'.format(packageName)
-        # Remove the last semi-colon from the list.
-        packageNameList = packageNameList[:-1]
-
-    # CANDLE BOOTSTRAP
-    success = performStep(candleBootstrap,
-                          params.bootstrapWixObjDir,
-                          params.installerPath,
-                          packageNameList,
-                          "LumberyardBootstrapper.wxs Redistributables.wxs",
-                          args.hostURL,
-                          args.verbose,
-                          args.lyVersion,
-                          params.metricsPath,
-                          params.metricsExe,
-                          params.pathTo2015Thru2019Redist,
-                          params.redist2015Thru2019Exe,
-                          create_id('LumberyardBootstrapper', 'BOOTSTRAPPER', args.lyVersion, args.buildId))
-
-    assert (operationMode is not OperationMode.BuildInstaller or success == 0), \
-        "Failed to generate wixobj file for bootstrapper."
-
-    # LIGHT BOOTSTRAP
-    success = performStep(lightBootstrap,
-                          params.bootstrapOutputPath,
-                          os.path.join(params.bootstrapWixObjDir, "*.wixobj"),
-                          args.verbose,
-                          args.cabCachePath)
-
-    assert (operationMode is not OperationMode.BuildInstaller or success == 0), \
-        "Failed to generate executable file for bootstrapper."
-
-    # SIGN ENGINE AND BOOTSTRAPPER
-    if params.doSigning:
-        # make sure the c++ redist gets copied from the temp directory to the actual
-        #   installer output path with the bootstrapper.
-        if operationMode == OperationMode.BuildInstaller:
-            safe_shutil_file_copy(os.path.join(params.tempBootstrapOutputDir, params.redist2015Thru2019Exe),
-                                  os.path.join(params.installerPath, params.redist2015Thru2019Exe))
-
-        unsignedBootstrapPath = os.path.join(params.installerPath, params.tempBootstrapName)
-        signingBootstrapPath = os.path.join(params.installerPath, params.bootstrapName)
-        signingEnginePath = os.path.join(params.installerPath, "engine.exe")
-        if operationMode == OperationMode.BuildInstaller:
-            shutil.copy(params.bootstrapOutputPath, unsignedBootstrapPath)
-
-        # copy bootstrapper to installerPath?
-        if operationMode == OperationMode.BuildInstaller and \
-           params.installerPath is not params.intermediateInstallerPath and \
-           os.path.exists(signingEnginePath):
-            os.remove(signingEnginePath)
-
-        # extract the engine from the bootstrapper with Insignia
-        success = performStep(insigniaDetachBurnEngine,
-                              unsignedBootstrapPath,
-                              signingEnginePath,
-                              args.verbose)
-        assert (operationMode is not OperationMode.BuildInstaller or success == 0), \
-            "Failed to detach burn engine from bootstrapper."
-
-        # sign the engine, verify signing was successful
-        performStep(signtoolSignAndVerifyFile,
-                    signingEnginePath,
-                    params.installerPath,
-                    params.intermediateInstallerPath,
-                    params.signingType,
-                    args.timestampServer,
-                    args.verbose)
-
-        # attach the engine back to the bootstrapper with Insignia
-        success = performStep(insigniaAttachBurnEngine,
-                              unsignedBootstrapPath,
-                              signingEnginePath,
-                              signingBootstrapPath,
-                              args.verbose)
-        assert (operationMode is not OperationMode.BuildInstaller or success == -1 or success == 0), \
-            "Failed to reattach burn engine to bootstrapper with error {}.".format(success)
-
-        # delete the stray engine file since it has been reattached to the installer
-        if operationMode is OperationMode.BuildInstaller:
-            os.remove(signingEnginePath)
-            os.remove(unsignedBootstrapPath)
-
-        # sign the bootstrapper, verify the signing was successful
-        performStep(signtoolSignAndVerifyFile,
-                    signingBootstrapPath,
-                    params.installerPath,
-                    params.intermediateInstallerPath,
-                    params.signingType,
-                    args.timestampServer,
-                    args.verbose)
-
-    if args.buildId is not None:
-        performStep(create_version_file, args.buildId, params.installerPath)
-
-    if not args.keep:
-        performStep(cleanTempFiles, params)
-    describeStep("All steps completed")
-
-
-operationMode = OperationMode.StepCounting
-maxSteps = 0
-buildInstaller()
-
-operationMode = OperationMode.BuildInstaller
-maxSteps = stepsTaken
-buildInstaller()

+ 0 - 206
Tools/build/JenkinsScripts/distribution/Installer/BuildInstallerUtils.py

@@ -1,206 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import json
-import os
-import re
-import shutil
-import uuid
-from urllib.parse import urlparse
-
-
-download_url_base = "http://gamedev.amazon.com/lumberyard/releases/"
-
-
-def set_download_url_base(url):
-    global download_url_base
-    download_url_base = url
-
-
-def is_url(potential_url):
-    return urlparse(potential_url)[0] == 'https'
-
-
-def get_package_name(package_path):
-    path_to_file = package_path
-    if is_url(package_path):
-        # index 2 is everything that isn't a parameter in the URL after the high level domain
-        #   see https://docs.python.org/2/library/urlparse.html#urlparse.urlparse for more info
-        path_to_file = urlparse(package_path)[2]
-    return os.path.basename(path_to_file)
-
-
-def get_ly_version_from_package(args, unpacked_location):
-    version = None
-    path_to_default_settings = os.path.join(unpacked_location, 'dev/_WAF_/default_settings.json')
-    verbose_print(args.verbose, 'Searching for Lumberyard version in {}'.format(path_to_default_settings))
-    with open(path_to_default_settings) as default_settings_file:
-        default_settings_data = json.load(default_settings_file)
-        default_settings_file.close()
-        for build_option in default_settings_data['Build Options']:
-            if 'attribute' in build_option and 'default_value' in build_option:
-                if build_option['attribute'] == 'version':
-                    version = build_option['default_value']
-
-    if version is None:
-        verbose_print(args.verbose, 'Version not found in package')
-        raise Exception('Version was not available in default settings.json')
-
-    return version
-
-
-def append_trailing_slash_to_url(url):
-    if not url.endswith(tuple(['/', '\\'])):
-        url += '/'
-    return url
-
-
-def generate_target_url(base_target_url, version, build_id, suppress_version_in_path, append_build_id):
-    output_url = base_target_url
-    if append_build_id:
-        output_url = append_trailing_slash_to_url(output_url)
-        output_url += build_id
-    if not suppress_version_in_path:
-        output_url = append_trailing_slash_to_url(output_url)
-        output_url += '{}/installer'.format(version)
-    return output_url
-
-
-# PRODUCT & UPGRADE/PATCH GUID CREATION
-def create_id(name, seed, version, build_id):
-    """
-    Generate the Product GUID using the name, the version, and a changelist value.
-    @param name - Name of the product.
-    @param seed - String used to create a unique GUID.
-    @param version - The version of the product in the form "PRODUCT.MAJOR.MINOR.PATCH".
-    @param build_id - An optional identifier for the build to be used in GUID generation.
-    @return - A GUID for this version of the product.
-    """
-    # Temporary. Replace the download_url_base with wherever we pass in to the public host or host url.
-    uuid_seed = download_url_base + seed + name + version
-    if build_id is not None:
-        uuid_seed += build_id
-    return str(uuid.uuid3(uuid.NAMESPACE_URL, uuid_seed)).upper()
-# END PRODUCT & UPGRADE/PATCH GUID CREATION
-
-
-def replace_leading_numbers(source_string):
-    pattern = re.compile(r'^[0-9]')
-    return pattern.sub('N', source_string)
-
-
-def strip_special_characters(source_string):
-    """
-    Remove all non-alphanumeric characters from the given sourceString.
-    @return - A new string that is a copy of the original, containing only
-        letters and numbers.
-    """
-    if source_string.isalnum():
-        return source_string
-
-    pattern = re.compile(r'[\W_]+')
-    return pattern.sub('', source_string)
-
-
-def check_for_empty_subfolders(package_root, allowed_empty_folders):
-    # find empty folders
-    empty_folders_found = []
-    for root, dirs, files in os.walk(package_root):
-        if dirs == [] and files == []:
-            empty_folders_found.append(os.path.normpath(os.path.relpath(root, package_root)))
-    if allowed_empty_folders is not None:
-        whitelist_root = 'Whitelist'
-        whitelist_folders = []
-        assert (os.path.exists(allowed_empty_folders)), 'The whitelist file specified at {} does not exist.'.format(allowed_empty_folders)
-        with open(allowed_empty_folders, 'r') as source:
-            json_whitelist = json.load(source)
-            try:
-                for allowed_folder in json_whitelist[whitelist_root]:
-                    whitelist_folders.append(os.path.normpath(allowed_folder))
-            except KeyError:
-                print('Unknown json root {}, please check the json root specified.'.format(whitelist_root))
-                exit(1)
-        assert (len(whitelist_folders) > 0), 'The whitelist in the file specified at {} is empty. Either populate the list or omit the argument.'.format(allowed_empty_folders)
-        for folder in empty_folders_found:
-            assert (folder in whitelist_folders), 'The empty folder {} could not be found in the whitelist of empty folders.'.format(folder)
-
-
-def get_immediate_subdirectories(root_dir):
-    """
-    Create a list of all directories that exist in the given directory, without
-        recursing through the subdirectories' children.
-    @param root_dir - The directory to search for subdirectories.
-    @return - A list of subdirectories in this directory, excluding their children.
-    """
-    directories = []
-    for directory in os.listdir(root_dir):
-        if os.path.isdir(os.path.join(root_dir, directory)):
-            directories.append(directory)
-
-    return directories
-
-
-def get_file_names_in_directory(directory, file_extension=None):
-    """
-    Create a list of all files in a directory. If given a file extension, it
-        will list all files with that extension.
-    @param directory - The directory to gather files from.
-    @param file_extension - (Optional) The type of files to find. Must be a
-        string in the ".extension" format. (Default None)
-    @retun - A list of names of all files in this directory (that match the given
-        extension if provided).
-    """
-    file_list = []
-    if file_extension:
-        for file in os.listdir(directory):
-            if file.endswith(file_extension):
-                file_list.append(os.path.basename(file))
-    else:
-        for file in os.listdir(directory):
-            file_list.append(os.path.basename(file))
-
-    return file_list
-
-
-# VERBOSE RELATED FUNCTIONS
-
-def verbose_print(isVerbose, message):
-    if isVerbose:
-        print(message)
-
-
-def find_file_in_package(packageRoot, fileToFind, pathFilters=None):
-    """
-    Searchs the package path for a file.
-    @param packageRoot: Path to the root of content.
-    @param fileToFind: Name of the file to find.
-    @param pathFilters: Path filter to apply to find.
-    @return: The full path to the file if found, otherwise None.
-    """
-    for root, dirs, files in os.walk(packageRoot):
-        if fileToFind in files:
-            if pathFilters is None or any(pathFilter in root for pathFilter in pathFilters):
-                return os.path.join(root, fileToFind)
-    return None
-
-
-def safe_shutil_file_copy(src, dst):
-    # need to remove the old version of dst if it already exists due to a bug
-    #   in shutil.copy that causes both the src and dst files to be zeroed out
-    #   if they are identical files.
-    if os.path.exists(dst):
-        os.remove(dst)
-    shutil.copy(src, dst)
-
-
-def create_version_file(buildId, installerPath):
-    with open(os.path.join(installerPath, 'version.txt'), 'w') as versionFile:
-        versionFile.write(buildId)

+ 0 - 85
Tools/build/JenkinsScripts/distribution/Installer/BuildInstallerWixUtils.py

@@ -1,85 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import glob
-import os
-import shutil
-from BuildInstallerUtils import *
-
-
-def boolToWixBool(value):
-    """
-    WiX uses strings "yes" and "no" for their boolean values.
-    @return - "yes" if value == True, otherwise "no".
-    """
-    if value:
-        return "yes"
-    else:
-        return "no"
-
-
-def createPackageInfo(directoryName, rootDirectory, outputDirectory, outputPrefix=""):
-    """
-    Generate commonly used information about a package that is used for WiX functions.
-    @param directoryName - The name of the directory that will be packaged.
-    @param rootDirectory - The path to the given package.
-    @param outputDirectory - The full path (including file name) to the wxs file
-        generated for this package.
-    @param outputPrefix - A string to append to the beginning of the name of the
-        package when creating the output file. (Default = "").
-    @return - A dictionary containing the package name, source information, and
-        output information.
-    """
-    safeDirectoryName = directoryName
-    if not safeDirectoryName:
-        safeDirectoryName = "packageRoot"
-    moduleName = strip_special_characters(safeDirectoryName)
-    sourceDirectory = os.path.join(rootDirectory, directoryName)
-    wxsName = '{}{}'.format(outputPrefix, moduleName)
-    outputPath = os.path.join(outputDirectory, '{}.wxs'.format(wxsName))
-    componentGroupRef = '{}CG'.format(replace_leading_numbers(wxsName))
-
-    packageInfo = {
-        'name': moduleName,
-        'wxsName': wxsName,
-        'wxsPath': outputPath,
-        'sourceName': safeDirectoryName,
-        'sourcePath': sourceDirectory,
-        'componentGroupRefs': componentGroupRef
-    }
-    return packageInfo
-
-
-def getVerboseCommand(verboseMode):
-    if verboseMode:
-        return " -v"
-    else:
-        return ""
-
-
-def printProgress(message, stepCount, maxSteps):
-    # We want the step count to line up at the end to the total steps, so add one.
-    print('{}/{}: {}'.format(stepCount + 1, maxSteps, message))
-
-
-def cleanTempFiles(params):
-    dirs_to_delete = [
-        params.wxsRoot,
-        params.wixObjOutput,
-        params.bootstrapWixObjDir,
-        params.tempBootstrapOutputDir,
-    ]
-
-    for del_dir in dirs_to_delete:
-        if os.path.exists(del_dir):
-            shutil.rmtree(del_dir)
-    for filepath in glob.glob(os.path.join(params.intermediateInstallerPath, "*.wixpdb")):
-        os.remove(filepath)

+ 0 - 145
Tools/build/JenkinsScripts/distribution/Installer/Candle.py

@@ -1,145 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import os
-from BuildInstallerWixUtils import *
-
-# CANDLE COMMANDLINE TEMPLATES
-candleCommandBase = "candle.exe -nologo -o {outputDirectory} {verbose} {preprocessorParams} {wxsFile}"
-candlePackagePreprocessor = " -dProductGUID={productGUID} -dProductUpgradeGUID={upgradeCodeGUID}" \
-                            " -dLumberyardVersion={lumberyardVersion}" \
-                            " -dCabPrefix={cabPrefix}" \
-                            " -dComponentGroupRefs={packageComponentGroup}" \
-                            ' -dComponentRefs={componentRefs} -dPackageName="{packageName}"'
-candleDownloadPreprocessor = " -dROOTURL={downloadURL}"
-candleBootstrapPreprocessor = "-dLYInstallerPath={installerPath} -dLYInstallerNameList={installerList}" \
-                              " -dLumberyardVersion={lumberyardVersion}" \
-                              " -dUseStreaming={useStreaming}" \
-                              ' -dMetricsSourcePath="{metricsSourcePath}"' \
-                              ' -dMetricsExeName="{metricsExe}"' \
-                              ' -dPathTo2015Thru2019Redist="{pathTo2015Thru2019Redist}"' \
-                              ' -dFilename2015Thru2019Redist="{redist2015Thru2019Exe}"' \
-                              " -dUpgradeCode={upgradeGUID} -ext WixBalExtension -ext WixUtilExtension"
-usedCabPrefixList = set()
-
-
-def candlePackageContent(outputDir, wxsPath, verboseMode):
-    verboseCmd = getVerboseCommand(verboseMode)
-    candleCommand = candleCommandBase.format(outputDirectory=os.path.join(outputDir, ''),
-                                             verbose=verboseCmd, preprocessorParams="", wxsFile=wxsPath)
-
-    verbose_print(verboseMode, '\n{}\n'.format(candleCommand))
-    return os.system(candleCommand)
-
-
-def candleAllPackagesContent(packageInfoMap, wixobjDir, verboseMode):
-    for packageInfo in packageInfoMap.values():
-        outputDir = os.path.join(wixobjDir, packageInfo['wxsName'])
-        success = candlePackageContent(outputDir, packageInfo['wxsPath'], verboseMode)
-        assert (success == 0), 'Failed to generate wixobj file for {} content.'.format(packageInfo['name'])
-
-
-# Cab prefixes have to be less than 8 characters, including the sequential numbers for multiple cabs.
-# To give room for multiple cabs for an MSI, we're dropping down to 5 characters.
-# When we strip some paths in 3rd party to 5 characters, they collide, so we do a little extra
-# Logic to help with collisions.
-def generateCabPrefix(packageName):
-    # We want a cab prefix length of five to give room for 100+ cabs for an MSI
-    cabPrefixLength = 5
-
-    uniquenessIndex = -1
-    uniquenessValue = 0
-    cabName = packageName[:cabPrefixLength]
-
-    # If there is a cab name collision when two packages truncate to the same five digit
-    # value, then we want to fiddle with the cab prefix to get something unique.
-    # This simple logic starts at the last character in the prefix and replaces it with a numeral
-    # it keeps trying that, and moving earlier in the string as it does so.
-    while cabName in usedCabPrefixList:
-        cabName = cabName[:cabPrefixLength+uniquenessIndex] + str(uniquenessValue) + cabName[cabPrefixLength+uniquenessIndex+1:]
-        uniquenessValue += 1
-        if uniquenessValue > 9:
-            uniquenessValue = 0
-            uniquenessIndex -= 1
-            if uniquenessIndex <= -cabPrefixLength:
-                raise Exception("Could not generate unique cab name for {}".format(packageName))
-
-    usedCabPrefixList.add(cabName)
-    return cabName
-
-
-def candlePackage(outputDir, wxsTemplatePath, packageInfo, verboseMode, lyVersion, buildId):
-    verboseCmd = getVerboseCommand(verboseMode)
-
-    cabPrefix = generateCabPrefix(packageInfo['name'])
-    productGUID = create_id(packageInfo['name'], 'PRODUCT', lyVersion, buildId)
-    productUpgradeGUID = create_id(packageInfo['name'], 'PRODUCTUPDATE', lyVersion, buildId)
-    componentRefs = packageInfo.get('componentRefs', '')
-
-    candlePreprocessor = candlePackagePreprocessor.format(productGUID=productGUID,
-                                                          upgradeCodeGUID=productUpgradeGUID,
-                                                          lumberyardVersion=lyVersion,
-                                                          componentRefs=componentRefs,
-                                                          cabPrefix=cabPrefix,
-                                                          packageComponentGroup=packageInfo['componentGroupRefs'],
-                                                          packageName=packageInfo['sourceName'])
-
-    candleCommand = candleCommandBase.format(outputDirectory=outputDir,
-                                             verbose=verboseCmd,
-                                             preprocessorParams=candlePreprocessor,
-                                             wxsFile=wxsTemplatePath)
-
-    verbose_print(verboseMode, '\n{}\n'.format(candleCommand))
-    return os.system(candleCommand)
-
-
-def candlePackages(packageInfoMap, wixobjDir, wxsTemplatePath, verboseMode, lyVersion, buildId):
-    for packageInfo in packageInfoMap.values():
-        outputDir = os.path.join(wixobjDir, packageInfo['wxsName'], 'HeatPackage{}.wixobj'.format(packageInfo['wxsName']))
-        success = candlePackage(outputDir, wxsTemplatePath, packageInfo, verboseMode, lyVersion, buildId)
-        assert (success == 0), 'Failed to generate wixobj file for {}.'.format(packageInfo['name'])
-
-
-def candleBootstrap(outputDir,
-                    installersPath,
-                    installerNameList,
-                    wxsFileName,
-                    downloadURL,
-                    verboseMode,
-                    lyVersion,
-                    metricsSourcePath,
-                    metricsExe,
-                    pathTo2015Thru2019Redist,
-                    redist2015Thru2019Exe,
-                    upgradeCodeGUID):
-    verboseCmd = getVerboseCommand(verboseMode)
-    useStreaming = downloadURL is not None
-    useStreamingText = boolToWixBool(useStreaming)
-    candlePreprocessor = candleBootstrapPreprocessor.format(installerPath=os.path.join(installersPath, ''),
-                                                            installerList=installerNameList,
-                                                            lumberyardVersion=lyVersion,
-                                                            useStreaming=useStreamingText,
-                                                            upgradeGUID=upgradeCodeGUID,
-                                                            metricsSourcePath=metricsSourcePath,
-                                                            metricsExe=metricsExe,
-                                                            pathTo2015Thru2019Redist=pathTo2015Thru2019Redist,
-                                                            redist2015Thru2019Exe=redist2015Thru2019Exe)
-    if useStreaming:
-        # The WXS file appends the trailing slash before the package name: "$(var.ROOTURL)/{2}"
-        # We need to strip the trailing slash here if it was passed in with one.
-        downloadURL = downloadURL.rstrip('/')
-        candlePreprocessor += candleDownloadPreprocessor.format(downloadURL=downloadURL)
-
-    candleCommand = candleCommandBase.format(outputDirectory=outputDir, verbose=verboseCmd,
-                                             preprocessorParams=candlePreprocessor, wxsFile=wxsFileName)
-
-    verbose_print(verboseMode, '\n{}\n'.format(candleCommand))
-    return os.system(candleCommand)

+ 0 - 257
Tools/build/JenkinsScripts/distribution/Installer/Heat.py

@@ -1,257 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import os
-import sys
-import traceback
-import xml.etree.ElementTree as ET
-
-from BuildInstallerWixUtils import *
-
-# HEAT COMMANDLINE TEMPLATES
-heatCommandBase = 'heat.exe {harvestType} "{harvestSource}" -nologo -sreg -gg'
-heatCommandRequiredArgs = " -dr {directoryRef} -cg {componentGroup} -out {outputPath}"
-
-
-def convertToForwardSlashAndLower(pathString):
-    """
-    Used to ensure path strings are formatted in a consistent manner.
-    """
-    return pathString.replace('\\', '/').lower()
-
-
-def lowerDictValues(dictToLower):
-    """
-    Used to lower-case all values in the given dictionary.
-    """
-    for key in dictToLower.keys():
-        if isinstance(dictToLower[key], list):
-            dictToLower[key][:] = [convertToForwardSlashAndLower(valueStr) for valueStr in dictToLower[key]]
-
-
-def removeWxsEntriesWithJsonDirFilelist(wxsFilepath, jsonDirFilelist, directoryKey):
-    """
-    Removes entries from the given *.wxs file with the given jsonDirFilelist.
-    @param wxsFilepath - Path to *.wxs file to operate the given jsonDirFilelist against
-    @param jsonDirFilelist - Dictionary populated from given "dirFilelist" JSON. Files
-        in the *.wxs files that don't have corresponding entries in dirFilelist will be
-        removed from the *.wxs XML content.
-    """
-    # Without registering the namespace, the ET XML serialized output
-    # is pretty funky, and WiX will not be happy.
-    namespaceStr = 'http://schemas.microsoft.com/wix/2006/wi'
-    ET.register_namespace('', namespaceStr)
-    tree = ET.ElementTree()
-    tree.parse(wxsFilepath)
-    xmlRoot = tree.getroot()
-
-    # Find all Component tags that have File tags
-    ns = {'wixns': namespaceStr}
-    componentGroupList = xmlRoot.findall('.//wixns:ComponentGroup', ns)
-    for componentGroup in componentGroupList:
-        componentList = componentGroup.findall('.//wixns:Component[wixns:File]', ns)
-
-        for component in componentList:
-            for childFileTag in component:
-                # Entries are typically prefixed with an unnecessary 'SourceDir\' string
-                sourceValue = convertToForwardSlashAndLower(childFileTag.attrib['Source']).replace('sourcedir/', '')
-                if sourceValue not in jsonDirFilelist[directoryKey]:
-                    component.remove(childFileTag)
-
-            # WXS files generated by our installer typically only have one File tag per
-            # ComponentGroup, but we'll check that it's empty, just in case.
-            if len(component.getchildren()) < 1:
-                componentGroup.remove(component)
-
-    tree.write(wxsFilepath)
-
-
-def heatDirectory(wxsName, sourceDirectory, outputPath, componentGroup, directoryRefName, verbose):
-    """
-    Generate a .wxs file for a given directory, including all subdirectories,
-        and place it at the given outputPath.
-    @remarks - Intentionally hardcoding harvest type to "dir" here. For other
-        types of harvests, a new function should be created. Any logic not
-        directly related to executing the heat command should exist outside this
-        function.
-    """
-    verboseCmd = getVerboseCommand(verbose)
-
-    # Intentionally hardcoding harvest type to "dir" here. For other types of harvests, a new
-    #   function should be created. Any logic not directly related to executing
-    #   the heat command should exist outside this function.
-    heatCommand = heatCommandBase.format(harvestType="dir",
-                                         harvestSource=sourceDirectory)
-    heatCommand += verboseCmd
-    heatCommand += heatCommandRequiredArgs.format(directoryRef=directoryRefName,
-                                                  componentGroup=componentGroup, outputPath=outputPath)
-
-    verbose_print(verbose, '\n{}\n'.format(heatCommand))
-    return os.system(heatCommand)
-
-
-def heatDirectories(rootDirectory, outputDirectory, directoryRefName, verbose, outputPrefix="", dirFilelist=None):
-    """
-    Generates package info for every directory in the given rootDirectory, and
-        gathers each package's content information into a .wxs file.
-    @param rootDirectory - The directory containing the source of many packages.
-    @param outputDirectory - The directory to put all generated .wxs files.
-    @param directoryRefName - The ID of the directory for these source files to
-        be placed when installed. (Must match HeatPackageBase directory ID.)
-    @param verbose - Running in verbose mode?
-    @param outputPrefix - A string to append to the beginning of the name of the
-        package when creating the output file. (Default = "").
-    @param dirFilelist - A string that gives a path to a JSON file containing a
-        list of directories, and for each directory, a list of files. Only the 
-        files listed in the JSON file will be included in the installer output
-        for the directory specified. This can be used to remove unnecessary
-        files that aren't needed on a customer's machine, but are included in a
-        packaged build created by Jenkins, for example.
-    @return - A dictionary of package names to their associated package information (dictionaries).
-    """
-
-    # Attempt to parse jsonDirFilelist JSON.
-    jsonDirFilelist = {}
-    if dirFilelist is not None:
-        assert (os.path.exists(dirFilelist)), 'The "dirFilelist" argument was provided but the JSON file at {} does not exist.'.format(dirFilelist)
-        with open(dirFilelist, 'r') as source:
-            try:
-                jsonDirFilelist = json.load(source)
-            except ValueError as e:
-                print(traceback.format_exc())
-                print('Error parsing the given JSON file at {} with exception: {}'.format(dirFilelist, e))
-                sys.exit()
-            except:
-                print(traceback.format_exc())
-                print('Unexpected error parsing the given JSON file at {}. Please verify that the file is correctly formatted.'.format(dirFilelist))
-                sys.exit()
-
-    combinedWxsResults = {}
-    jsonValuesLowered = False
-
-    for directoryName in get_immediate_subdirectories(rootDirectory):
-        packageInfo = createPackageInfo(directoryName, rootDirectory, outputDirectory, outputPrefix)
-
-        moduleName = packageInfo['name']
-        sourceDirectory = packageInfo['sourcePath']
-        wxsName = packageInfo['wxsName']
-        outputPath = packageInfo['wxsPath']
-        # There will only be one component group in the reference list at this point.
-        componentGroup = packageInfo['componentGroupRefs']
-
-        # check for existence of name collision
-        if moduleName in combinedWxsResults:
-            print('ERROR when creating module "{0}" from "{1}". A module with the name "{0}" already exists.'.format(moduleName, sourceDirectory))
-            # Passing let us rapidly iterate on this tool, feel free to upgrade to raising an exception.
-            pass
-
-        combinedWxsResults[moduleName] = packageInfo
-        success = heatDirectory(wxsName, sourceDirectory, outputPath, componentGroup, directoryRefName, verbose)
-        assert (success == 0), 'Failed to generate WXS file for {}.'.format(moduleName)
-
-        sourceDirFormatted = convertToForwardSlashAndLower(sourceDirectory)
-        for key in jsonDirFilelist.keys():
-            if sourceDirFormatted.endswith(convertToForwardSlashAndLower(key)):
-
-                # Lower-case all entries to allow case-insensitive compare
-                if not jsonValuesLowered:
-                    lowerDictValues(jsonDirFilelist)
-                    jsonValuesLowered = True
-
-                # Alter XML contents of WXS file by filtering it against the JSON
-                # directory list of files.
-                removeWxsEntriesWithJsonDirFilelist(outputPath, jsonDirFilelist, key)
-
-    return combinedWxsResults
-
-
-def heatFile(file,
-             directoryRefName,
-             rootDirectory,
-             verbose,
-             componentGroup,
-             outputPath):
-    """
-    Generates a WXS file for an individual file.
-    @param file: Full path to the file to heat.
-    @param directoryRefName: The ID of the directory for these source files to
-        be placed when installed. (Must match HeatPackageBase directory ID.)
-    @param verbose: Running in verbose mode?
-    @param componentGroup: The component group to set in the file.
-    @param outputPath: Where to output the generated WXS file.
-    @return:
-    """
-    heatCommand = heatCommandBase.format(harvestType="file", harvestSource=file)
-    heatCommand += getVerboseCommand(verbose)
-    # According to Wix's docs ( http://wixtoolset.org/documentation/manual/v3/overview/heat.html )
-    # SRD's description implies that it suppresses root directory harvesting, which seems to imply it works only
-    # in directory harvesting mode. It also implies that it takes in no parameters.
-    # This is either poor documentation, or incorrect (I'm assuming poor documentation).
-    # The actual behavior is:
-    # Normally when harvesting with Heat (directory or file), directories and directory references are generated
-    # based on the path to the root directory. By calling suppress root directory harvesting and passing in a directory,
-    # then the generated nested directory path in the generated wxs file will not include pathing based on this.
-    # This is necessary when harvesting the loose files in a folder's root: Heat's harvesting does not support
-    # whitelist / blacklist functionality, and all subfolders of the package have been harvested in other ways.
-    # This means that all of the loose files in the directory roots that weren't included elsewhere need to be
-    # included in some other installers. If the root directory is not suppressed, then they generate a directory
-    # hierarchy that collides with each other.
-    heatCommand += " -srd " + rootDirectory
-    heatCommand += heatCommandRequiredArgs.format(directoryRef=directoryRefName,
-                                                  componentGroup=componentGroup,
-                                                  outputPath=outputPath)
-
-    verbose_print(verbose, '\n{}\n'.format(heatCommand))
-    return os.system(heatCommand)
-
-
-def heatFiles(fileList,
-              rootDirectory,
-              outputDirectory,
-              directoryRefName,
-              verbose,
-              outputPrefix = ""):
-    """
-    Generates WXS files for every file in the file list, and returns a mapping containing the associated
-    WXS files and component groups.
-    @param fileList: The list of files to generate WXS files for.
-    @param outputDirectory: The directory to put generated WXS files.
-    @param directoryRefName: The ID of the directory for these source files to
-        be placed when installed. (Must match HeatPackageBase directory ID.)
-    @param verbose: Running in verbose mode?
-    @param outputPrefix: A string to append to the beginning of the name of the
-        package when creating the output file. (Default = "").
-    @return: A dictionary of input files to their associated WXS information (component group, WXS file location).
-    """
-    combinedWxsResults = {}
-    for file in fileList:
-        moduleName = strip_special_characters(file)
-        wxsName = '{}{}'.format(outputPrefix, moduleName)
-        outputPath = os.path.join(outputDirectory, '{}.wxs'.format(wxsName))
-        componentGroup = '{}CG'.format(replace_leading_numbers(strip_special_characters(file)))
-
-        wxsInfo = {
-            'name': moduleName,
-            'wxsName': wxsName,
-            'wxsPath': outputPath,
-            'componentGroupRefs': componentGroup
-        }
-        combinedWxsResults[file] = wxsInfo
-
-        success = heatFile(os.path.join(rootDirectory,file),
-                 directoryRefName,
-                 rootDirectory,
-                 verbose,
-                 componentGroup,
-                 outputPath)
-        assert (success == 0), 'Failed to generate WXS file for {}.'.format(moduleName)
-
-    return combinedWxsResults

+ 0 - 3
Tools/build/JenkinsScripts/distribution/Installer/HeatDevPackageBase.wxs

@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:92fb31b309b613d1f47feb3a728d4528d11d27d9a5e539b109800e209cab0580
-size 5754

+ 0 - 3
Tools/build/JenkinsScripts/distribution/Installer/HeatPackageBase.wxs

@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:e708bd9a6e9191431ecc5840e6adaf3f4e5ca52f3f994acefa454bb908476c97
-size 5435

+ 0 - 67
Tools/build/JenkinsScripts/distribution/Installer/Insignia.py

@@ -1,67 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import os
-from BuildInstallerWixUtils import *
-
-# Insignia COMMANDLINE TEMPLATES
-insigniaCommandBase = "insignia.exe -nologo {verbose} {commandType} {filename} {commandTypeParams}"
-insigniaDetachEngineParams = "-o {outputPath}"
-insigniaAttachEngineParams = "{bootstrapName} -o {outputPath}"
-
-
-def insigniaMSI(filename, verbose):
-    verboseCmd = getVerboseCommand(verbose)
-    insigniaCommand = insigniaCommandBase.format(verbose=verboseCmd,
-        commandType="-im", filename=filename, commandTypeParams="")
-
-    verbose_print(verbose, '\n{}\n'.format(insigniaCommand))
-    return os.system(insigniaCommand)
-
-
-def insigniaMSIs(directory, verbose, fileList=None):
-    if fileList is None:
-        for file in os.listdir(directory):
-            if file.endswith(".msi"):
-                success = insigniaMSI(file, verbose)
-                assert (success == 0), "Failed to update {} with the signed CAB files' information.".format(os.path.basename(file))
-    else:
-        for file in fileList:
-            filepath = os.path.join(directory, file)
-            success = insigniaMSI(filepath, verbose)
-            assert (success == 0), "Failed to update {} with the signed CAB files' information.".format(os.path.basename(file))
-
-
-def insigniaDetachBurnEngine(bootstrapName, engineName, verbose):
-    verboseCmd = getVerboseCommand(verbose)
-
-    insigniaParams = insigniaDetachEngineParams.format(outputPath=engineName)
-    insigniaCommand = insigniaCommandBase.format(verbose=verboseCmd,
-                                                 commandType="-ib",
-                                                 filename=bootstrapName,
-                                                 commandTypeParams=insigniaParams)
-
-    verbose_print(verbose, '\n{}\n'.format(insigniaCommand))
-    return os.system(insigniaCommand)
-
-
-def insigniaAttachBurnEngine(bootstrapName, engineName, outputName, verbose):
-    verboseCmd = getVerboseCommand(verbose)
-
-    insigniaParams = insigniaAttachEngineParams.format(outputPath=outputName,
-                                                       bootstrapName=bootstrapName)
-    insigniaCommand = insigniaCommandBase.format(verbose=verboseCmd,
-                                                 commandType="-ab",
-                                                 filename=engineName,
-                                                 commandTypeParams=insigniaParams)
-
-    verbose_print(verbose, '\n{}\n'.format(insigniaCommand))
-    return os.system(insigniaCommand)

+ 0 - 115
Tools/build/JenkinsScripts/distribution/Installer/InstallerArgs.py

@@ -1,115 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import argparse
-import BuildInstallerUtils
-from SignTool import *
-import os
-import tempfile
-import ctypes
-
-
-def createArgs():
-    defaultTempLocation = os.path.join(tempfile.gettempdir(), "LYPackage")
-
-    parser = argparse.ArgumentParser(description='Builds the WiX based Lumberyard Installer for Windows.')
-    # PRIMARY ARGS - many if not all will be used in production
-    # INPUT/OUTPUT
-    parser.add_argument('--packageRoot', default=None, help="Path to the root of the package (default None)")
-    parser.add_argument('--genRoot', default=defaultTempLocation, help="Path for temp data (default Python's temp directory + '/LYPackage')")
-    parser.add_argument('--allowedEmptyFolders', default=None, help="Path to a JSON file containing a list of empty folders that are allowed to exist in --packageRoot")
-    # The default is applied in Build
-    parser.add_argument('--bootstrapName', default=None, help="Bootstrap name (default 'LumberyardInstaller{}.exe'.format(args.lyVersion))")
-    parser.add_argument('--metricsExe', default=None, help="Override path to metrics executable, if not provided this script will search for LyInstallerMetrics.exe in the package root (default None)")
-    # VERSION & GUID
-    parser.add_argument('--lyVersion', default="0.2.2.1", help="Lumberyard Version (default '0.2.2.1')")
-    parser.add_argument('--buildId', default=None, help="The build number of the package that the installer was made from. If not provided, there will be no version file created in the output. (default None)")
-    # SIGNING
-    # more information on the step by step details can be found at:
-    #   https://wiki.labcollab.net/confluence/display/lmbr/Sign+Lumberyard+Binaries
-    parser.add_argument('--privateKey', default=None, help="The signing private key to use to sign the output of this script. Will only attempt to sign if this switch or --certName is specified. Use only one of these two switches. (default None)")
-    parser.add_argument('--password', default=None, help="The password for using the signing private key. Must include this if signing should occur. (default None)")
-    parser.add_argument('--certName', default=None, help="The subject name of the signing certificate to use to sign with. Will only attempt to sign if this switch or --privateKey is specified. Use only one of these two switches. (default None)")
-    parser.add_argument('--timestampServer', default="http://tsa.starfieldtech.com", help="The timestamp server to use for signing. (default http://tsa.starfieldtech.com)")
-    # VERBOSE OUTPUT
-    parser.add_argument('-v', '--verbose', action='store_true', help='Enables logging messages (default False)')
-    # URL INFO
-    parser.add_argument('--hostURL', default=None, help='The URL for the installer to download its packages from (msi + cab files). No URL implies the files will be on local disk already. (default None)')
-    # RAPID ITERATION ARGS
-    parser.add_argument('--bootstrapOnly', action='store_true', help="Only create a bootstrapper. Will assume packageRoot contains all necessary MSIs and CABs. Ignores cabCachePath if it was provided.")
-    parser.add_argument('--signOnly', action="store_true", help="Will sign already existing CAB and MSI files. Will then generate a bootstrapper with those signed MSIs, and sign the bootstrapper.")
-    parser.add_argument('--cabCachePath', default=None, help='Path to a cache of the cab files. Should only be used if no new packages are being added, and everything already has cabs built. (default None)')
-    parser.add_argument('-k', '--keep', action='store_true', help="Don't delete temp files")
-    parser.add_argument('--dirFilelist', default=None, help="A list of files (by directory) to include in the install content.")
-    args = parser.parse_args()
-    print("Installer arguments:")
-    print(args)
-
-    # don't allow ambiguity of which way to sign. Have to do this here as we need to only create one SignType to keep in the params object
-    assert (args.privateKey is None or args.certName is None), "Both a private key and a certificate name was provided, introducing ambiguity. Please only specify one way to sign."
-
-    return args
-
-
-def validateArgs(args, params):
-    assert (args.hostURL is not None), "No URL to provide to the bootstrapper was given. Please use --hostURL to specify where the bootstrapper will download the MSI and CAB files from."
-
-    # find empty folders
-    BuildInstallerUtils.check_for_empty_subfolders(args.packageRoot, args.allowedEmptyFolders)
-    
-    # BootstrapOnly validation
-    if args.bootstrapOnly:
-        assert (not args.cabCachePath), "Error: Ignoring cabCachePath since bootstrapOnly was specified. Please choose one to use."
-        if args.packageRoot:
-            print("Warning: Specifying packageRoot with bootstrapOnly. Base package not used in bootstrapOnly builds.")
-        # make sure there are MSI files in the installer directory
-        hasInstallerFiles = False
-        for file in os.listdir(params.intermediateInstallerPath):
-            hasInstallerFiles = file.endswith(".msi") or file.endswith(".cab")
-            if hasInstallerFiles:
-                return
-        assert hasInstallerFiles, "The packageRoot given ({}) does not contain any MSI or CAB files. When using bootstrapOnly, packageRoot should point to the directory with MSIs and CAB files.".format(params.intermediateInstallerPath)
-
-    # Make sure metrics and Visual Studio 2015-2019 redist exist somewhere
-    assert (params.fullPathToMetrics is not None), "Metrics executable path was not provided and was not found in the package."
-    assert (os.path.exists(params.fullPathToMetrics)), "Metrics executable expected at {}, but cannot be found.".format(params.fullPathToMetrics)
-    assert (params.fullPathTo2015Thru2019Redist is not None), "Visual Studio 2015-2019 redist could not be found in the package."
-    assert (os.path.exists(params.fullPathTo2015Thru2019Redist)), "Visual Studio 2015-2019 redist expected at {}, but cannot be found.".format(params.fullPathTo2015Thru2019Redist)
-
-    # Signing parameter asserts
-    if params.doSigning:
-        # Installation requires administration privileges.
-        try:
-            is_admin = os.getuid() == 0
-        except AttributeError:
-            is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
-        assert is_admin, "Administrator privileges must be enabled to sign an installer."
-
-        if args.privateKey is not None:
-            # make sure that private key is valid and we were given a password to use.
-            assert (os.path.exists(args.privateKey)), "No private key exists at the given path."
-            assert (args.privateKey.endswith(".pfx")), "The file at {} is not a signing private key.".format(args.privateKey)
-            assert (args.password is not None), "Must include the password needed for the signing private key to sign."
-        # if --certName is specified, the verification of that name being valid must be done prior to this script being run, as it is platform dependent.
-
-        # Make sure that all CABs and MSI files are signed if only the bootstrapper needs to be built.
-        if args.bootstrapOnly:
-            for filename in params.msiFileNameList:
-                signed = signtoolVerifySign(os.path.join(params.installerPath, filename), args.verbose)
-                assert signed, "Not all MSI and CAB files in {} are verified to have been signed. Please use --signOnly instead.".format(params.installerPath)
-            for filename in params.cabFileNameList:
-                signed = signtoolVerifySign(os.path.join(params.installerPath, filename), args.verbose)
-                assert signed, "Not all MSI and CAB files in {} are verified to have been signed. Please use --signOnly False instead.".format(params.installerPath)
-            signed = signtoolVerifySign(params.fullPathToMetrics, args.verbose)
-            assert signed, "Metrics executable at {} is not verified to have been signed. Please either create a signed executable or use --signOnly instead.".format(params.fullPathToMetrics)
-
-    if args.signOnly:
-        assert params.doSigning, "Cannot specify --signOnly if not given a private key and password, or a certificate name for signing."

+ 0 - 273
Tools/build/JenkinsScripts/distribution/Installer/InstallerAutomation.py

@@ -1,273 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import argparse
-import os
-import re
-import shutil
-import time
-import zipfile
-from urllib.parse import urlparse
-from urllib.request import urlopen
-
-import BuildInstallerUtils
-import PackageExeSigning
-import SignTool
-import boto3
-
-
-def getCloudfrontDistDomain(uploadURL):
-    return urlparse(uploadURL)[1]
-
-
-def getCloudfrontDistPath(uploadURL):
-    pathFromDomainName = urlparse(uploadURL)[2]
-    return pathFromDomainName[1:]   # need to remove the first slash, otherwise it will create a nameless directory on S3
-
-
-def testSigningCredentials(args):
-    # there are no credentials to test when certName was specified.
-    if args.certName is not None:
-        return True
-
-    result = SignTool.signtoolTestCredentials(args.signingType,
-                                              args.timestampServer,
-                                              False)
-    return result
-
-
-defaultFilesToSign = ["dev/Tools/LmbrSetup/Win/SetupAssistant.exe",
-                      "dev/Tools/LmbrSetup/Win/SetupAssistantBatch.exe",
-                      "dev/Bin64vc141/ProjectConfigurator.exe",
-                      "dev/Bin64vc141/lmbr.exe",
-                      "dev/Bin64vc141/Lyzard.exe",
-                      "dev/Bin64vc141/Editor.exe",
-                      "dev/Bin64vc142/ProjectConfigurator.exe",
-                      "dev/Bin64vc142/lmbr.exe",
-                      "dev/Bin64vc142/Lyzard.exe",
-                      "dev/Bin64vc142/Editor.exe"]
-
-defaultFilesToSignHelpText = 'Additional files to sign, if signing. (default {})'.format(', '.join(defaultFilesToSign))
-
-
-def createArgs():
-    parser = argparse.ArgumentParser(description='Builds the WiX based Lumberyard Installer for Windows.')
-    parser.add_argument('--packagePath', required=True, help="Path to package, can be url or local.")
-    parser.add_argument('--workingDir', default="%TEMP%/installerAuto", help="Working directory (default '%%TEMP%%/installerAuto')")
-    parser.add_argument('--allowedEmptyFolders', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), "allowed_empty_folders.json"), help="The JSON file containing the whitelist of empty folders that we expect in the source package.")
-    parser.add_argument('--targetURL', required=True, help="Target URL to download the installer from.")
-    parser.add_argument('--awsProfile', default=None, help='The aws cli profile to use to read from from s3 and cloudfront, and upload to s3. (Default None)') # if on a build machine, it will use the IAM role, if local, it will use [default] in aws credentials file.
-    parser.add_argument('--lyVersion', default=None, help='Specifies the version used to identify the version of LY installed by this installer. Use of this field will ignore the default behavior of reading the value for this field from the default_settings.json file in the package. (DO NOT USE unless you know what you are doing.)')
-    parser.add_argument('--suppressVersionInPath', action='store_true', help="Suppresses modification to the target paths with a version (default False)")
-    parser.add_argument('-bi', '--addBuildIdToPath', action='store_true', help="Add the build version to the target paths prepending to the version of Lumberyard, i.e. buildId/version/installer. (default False)")
-    parser.add_argument('--privateKey', default=None, help="The signing private key to use to sign the output of this script. Will only attempt to sign if this switch or --certName is specified. Use only one of these two switches. (default None)")
-    parser.add_argument('--certName', default=None, help="The subject name of the signing certificate to use to sign with. Will only attempt to sign if this switch or --privateKey is specified. Use only one of these two switches. (default None)")
-    parser.add_argument('-v', '--verbose', action='store_true', help='Enables logging messages (default False)')
-    parser.add_argument('-k', '--keep', action='store_true', help='Keeps temp files (default False)')
-    parser.add_argument('--timestampServer', default="http://tsa.starfieldtech.com", help="The timestamp server to use for signing. (default http://tsa.starfieldtech.com)")
-    parser.add_argument('--filesToSign', nargs='+', default=defaultFilesToSign, help=defaultFilesToSignHelpText)
-    args, unknown = parser.parse_known_args()
-    print("Installer automation arguments:")
-    print(args)
-    return args
-
-
-def validateArgs(args):
-    args.signingPassword = None
-    assert (os.path.exists(args.allowedEmptyFolders)), 'The whitelist file specified at {} does not exist.'.format(args.allowedEmptyFolders)
-    # don't allow ambiguity of which way to sign. Have to do this here as we need to only create one SignType to keep in the params object
-    assert (args.privateKey is None or args.certName is None), "Both a private key and a certificate name was provided, introducing ambiguity. Please only specify one way to sign."
-
-    args.signingType = None
-    if args.privateKey is not None:
-        # get password for signing
-        import getpass
-        args.signingPassword = getpass.getpass("Please provide the signing password: ")
-        args.signingType = SignTool.KeySigning(args.privateKey, args.signingPassword)
-    elif args.certName is not None:
-        args.signingType = SignTool.NameSigning(args.certName)
-    args.doSigning = args.signingType is not None
-
-    if args.doSigning is True:
-        # Signing requires administration privileges.
-        import ctypes
-        try:
-            is_admin = os.getuid() == 0
-        except AttributeError:
-            is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
-        assert is_admin, "Administrator privileges must be enabled to sign an installer."
-        assert (testSigningCredentials(args)), "Signing password is incorrect. Failed to sign and verify test file."
-
-    if args.awsProfile:
-        if args.awsProfile is "":   # ANT jobs might pass an empty string to represent None, since ant doesn't have a concept of None or null
-            args.awsProfile = None
-        assert (boto3.Session(profile_name=args.awsProfile) is not None), "The AWS CLI profile name specified does not exist on this machine. Please specify an existing AWS CLI profile."
-
-    if args.lyVersion:
-        # check to make sure the value of lyVersion matches the format #.#.#.#
-        r = re.compile(r"\d+\.\d+\.\d+\.\d+")
-        assert (r.match(args.lyVersion) is not None), "The value of lyVersion given is not in the form of '<Product>.<Major>.<Minor>.<Patch>'. Please input a version with the correct format."
-
-
-def run(args):
-    expandedWorkingDir = os.path.expandvars(args.workingDir)
-    expandedPackagePath = os.path.expandvars(args.packagePath)
-
-    unpackedLocation = os.path.join(expandedWorkingDir, 'unpacked')
-    fileName = BuildInstallerUtils.get_package_name(expandedPackagePath)
-    downloadFileOnDisk = os.path.join(expandedWorkingDir, fileName)
-
-    # Make sure temp directories exist
-    BuildInstallerUtils.verbose_print(args.verbose, "Cleaning temp working directories")
-    if not os.path.exists(expandedWorkingDir):
-        os.makedirs(expandedWorkingDir)
-
-    if os.path.exists(unpackedLocation):
-        shutil.rmtree(unpackedLocation)
-
-    os.makedirs(unpackedLocation)
-
-    if os.path.isfile(downloadFileOnDisk):
-        os.remove(downloadFileOnDisk)
-
-    isDownloadFileTemp = False
-    # 1. Download zip from S3 if it is an URL
-    if BuildInstallerUtils.is_url(expandedPackagePath):
-        isDownloadFileTemp = True
-        BuildInstallerUtils.verbose_print(args.verbose, "Downloading package {}".format(expandedPackagePath))
-        package = urlopen(expandedPackagePath)
-        with open(downloadFileOnDisk, 'wb') as output:
-            output.write(package.read())
-    elif os.path.isfile(expandedPackagePath):
-        BuildInstallerUtils.verbose_print(args.verbose, "using on disk package at {}".format(expandedPackagePath))
-        downloadFileOnDisk = expandedPackagePath
-    else:
-        raise Exception('Could not find package "{}" at path {}'.format(fileName, expandedPackagePath))
-
-    # 2. Unzip zip file.
-    BuildInstallerUtils.verbose_print(args.verbose, "Unpacking package to {}".format(unpackedLocation))
-    z = zipfile.ZipFile(downloadFileOnDisk, "r")
-    z.extractall(unpackedLocation)
-    # Preserver file's original timestamp
-    for f in z.infolist():
-        name, date_time = f.filename, f.date_time
-        name = os.path.join(unpackedLocation, name)
-        date_time = time.mktime(date_time + (0, 0, -1))
-        os.utime(name, (date_time, date_time))
-    z.close()
-
-    #  Sign exes in Lumberyard
-    if args.privateKey is not None or args.certName is not None:
-        PackageExeSigning.SignLumberyardExes(unpackedLocation,
-                                             args.signingType,
-                                             args.timestampServer,
-                                             args.verbose,
-                                             args.filesToSign)
-
-    # 3. Discover Lumberyard version.
-    buildId = os.path.splitext(fileName)[0]
-    packageVersion = BuildInstallerUtils.get_ly_version_from_package(args, unpackedLocation)
-    version = packageVersion
-    if args.lyVersion:
-        version = args.lyVersion
-        BuildInstallerUtils.verbose_print(args.verbose, "Package version is {}, but forcing version to value given for --lyVersion of {}".format(packageVersion, args.lyVersion))
-
-    BuildInstallerUtils.verbose_print(args.verbose, "Building installer for Lumberyard v{}".format(version))
-
-    # 4. Build installer.
-    pathToBuild = os.path.join(expandedWorkingDir, version)
-
-    targetUrl = BuildInstallerUtils.generate_target_url(args.targetURL, version, buildId, args.suppressVersionInPath, args.addBuildIdToPath)
-
-    pathToDirFilelist = os.path.dirname(os.path.realpath(__file__)) + os.sep + 'dir_filelist.json'
-
-    # take the name of the package without the file extension to use as the buildId
-    buildCommand = "python BuildInstaller.py --packageRoot {} " \
-                   "--lyVersion {} " \
-                   "--genRoot {} " \
-                   "--hostURL {} " \
-                   "--allowedEmptyFolders {} " \
-                   "--buildId {} " \
-                   "--dirFilelist {}".format(unpackedLocation, version, pathToBuild, targetUrl,
-                                             args.allowedEmptyFolders, buildId, pathToDirFilelist)
-
-    if args.verbose:
-        buildCommand += " -v"
-    if args.keep:
-        buildCommand += " --keep"
-    if args.privateKey is not None:
-        buildCommand += " --privateKey {} --password {}".format(args.privateKey, args.signingPassword)
-    elif args.certName is not None:
-        buildCommand += ' --certName "{}"'.format(args.certName)
-    if args.doSigning:
-        buildCommand += " --timestampServer {}".format(args.timestampServer)
-
-    BuildInstallerUtils.verbose_print(args.verbose, "Creating build of installer with command:")
-    BuildInstallerUtils.verbose_print(args.verbose, buildCommand)
-    build_result = os.system(buildCommand)
-    assert(build_result == 0), "Running BuildInstaller.py failed with result {}".format(build_result)
-    BuildInstallerUtils.verbose_print(args.verbose, "Installer creation completed, build is available at {}".format(pathToBuild))
-
-    # 5. Upload to the proper S3 bucket
-    # Get the Cloudfront Distribution ID from the URL we expect to download from (targetUrl)
-    BuildInstallerUtils.verbose_print(args.verbose, "Beginning upload of installer to S3")
-    session = boto3.Session(profile_name=args.awsProfile)
-    client = session.client('cloudfront')
-    targetDomain = getCloudfrontDistDomain(targetUrl)
-    distributionList = client.list_distributions()
-    targetDistId = None
-    for distribution in distributionList["DistributionList"]["Items"]:
-        if distribution["DomainName"] == targetDomain:
-            targetDistId = distribution["Id"]
-            pass
-    assert (targetDistId is not None), "No distribution with the domain name {} found.".format(targetDomain)
-
-    # Get the s3 bucket info from the Distribution ID, and figure out where we are putting files in the bucket
-    targetDist = client.get_distribution(Id=targetDistId)
-    s3Info = targetDist["Distribution"]["DistributionConfig"]["Origins"]["Items"][0]
-    bucketDomainName = s3Info["DomainName"]
-    bucketName = bucketDomainName.split('.')[0] # first part of the domain name is the bucket name
-    BuildInstallerUtils.verbose_print(args.verbose, "S3 bucket associated with targetUrl: {}".format(bucketName))
-    originPath = s3Info["OriginPath"]
-    bucketPath = None
-    if originPath:
-        # Start originPath after the first character (presumed to be '/') to avoid nameless directory in S3.
-        bucketPath = '{}/{}'.format(originPath[1:], getCloudfrontDistPath(targetUrl))
-    else:
-        bucketPath = getCloudfrontDistPath(targetUrl)
-    BuildInstallerUtils.verbose_print(args.verbose, "Uploading completed installer to S3 location: {}/{}".format(bucketName, bucketPath))
-
-    # Upload each file to the S3 bucket
-    s3 = session.resource('s3')
-    s3Bucket = s3.Bucket(bucketName)
-    installerOutputDir = None
-    if args.signingType is not None:
-        installerOutputDir = os.path.join(pathToBuild, "installer")
-    else:
-        installerOutputDir = os.path.join(pathToBuild, "unsignedInstaller")
-    for file in os.listdir(installerOutputDir):
-        fullFilePath = os.path.join(installerOutputDir, file)
-        targetBucketPath = '{}/{}'.format(bucketPath, os.path.basename(file))
-        s3Bucket.upload_file(fullFilePath, targetBucketPath)
-
-    if not args.keep:
-        if os.path.isfile(downloadFileOnDisk) and isDownloadFileTemp:
-            os.remove(downloadFileOnDisk)
-
-
-def main():
-    args = createArgs()
-    validateArgs(args)
-    run(args)
-
-
-if __name__ == "__main__":
-    main()

+ 0 - 3
Tools/build/JenkinsScripts/distribution/Installer/InstallerIcon.ico

@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:30befe12828ae67d1f3b4a9725c98d910c2190b81caa0f9d919e9c42c432c33a
-size 101185

+ 0 - 225
Tools/build/JenkinsScripts/distribution/Installer/InstallerPackaging.py

@@ -1,225 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import os
-from Heat import *
-from Candle import *
-from Light import *
-from BuildInstallerWixUtils import *
-
-
-def createLooseFilePackage(args,
-                           params,
-                           packageGroupInfoMap,
-                           path,
-                           wixDirId,
-                           directory,
-                           wixSafePathId):
-    # DISCOVER LOOSE FILES IN PATH
-    looseFiles = []
-    for (dirpath, dirnames, filenames) in os.walk(path):
-        looseFiles.extend(filenames)
-        # Only the files in the root directory need to
-        # be discovered this way, so break on first result.
-        break
-
-    # If there are no loose files, there is no package to create.
-    if not looseFiles:
-        return False
-
-    # HEAT LOOSE FILES
-    looseFileMap = heatFiles(looseFiles,
-                             path,
-                             params.wxsRoot,
-                             wixDirId,
-                             args.verbose,
-                             wixSafePathId)
-
-    # CANDLE LOOSE FILES
-    rootInfo = createPackageInfo(directory, args.packageRoot, params.wxsRoot)
-
-    # There is no general component group for loose files, so clear it out.
-    rootInfo['componentGroupRefs'] = ''
-    packageGroupInfoMap[rootInfo['name']] = rootInfo
-
-    candleSubDirectory = directory
-    if not candleSubDirectory:
-        candleSubDirectory = wixSafePathId
-
-    for looseFile in looseFileMap:
-        candlePackageContent(os.path.join(params.wixObjOutput, candleSubDirectory),
-                             looseFileMap[looseFile]['wxsPath'],
-                             args.verbose)
-        if rootInfo.get('componentGroupRefs', ''):
-            rootInfo['componentGroupRefs'] += ';' + looseFileMap[looseFile]['componentGroupRefs']
-        else:
-            rootInfo['componentGroupRefs'] = looseFileMap[looseFile]['componentGroupRefs']
-
-    return True
-
-
-def createThirdPartyPackages(args, params):
-    """
-    Creates a .msi and .cab files for each folder in 3rdParty, and places them at
-        intermediateInstallerPath to be used when creating the bootstrapper.
-    @return - A dictionary of the package information that was generated for the
-        3rd Party directories.
-    """
-    thirdPartyWixDirId = "THIRDPARTYDIR"
-    thirdPartyPath = os.path.join(args.packageRoot, "3rdParty")
-    # HEAT PACKAGE
-    thirdPartyInfoMap = heatDirectories(thirdPartyPath, params.wxsRoot,
-                                    thirdPartyWixDirId, args.verbose, "ThirdParty")
-    # CANDLE PACKAGE CONTENTS
-    candleAllPackagesContent(thirdPartyInfoMap, params.wixObjOutput, args.verbose)
-
-    # CREATE PACKAGE INFORMATION FOR LOOSE FILES IN 3RD PARTY ROOT
-    createLooseFilePackage(args,
-                           params,
-                           thirdPartyInfoMap,
-                           thirdPartyPath,
-                           thirdPartyWixDirId,
-                           "3rdParty",
-                           "ThirdParty")
-
-    # CANDLE PACKAGES
-    candlePackages(thirdPartyInfoMap,
-                   params.wixObjOutput,
-                   params.heatPackageBase,
-                   args.verbose,
-                   args.lyVersion,
-                   args.buildId)
-
-    # LIGHT PACKAGES
-    lightPackages(thirdPartyInfoMap,
-                  params.packagesPath,
-                  params.wixObjOutput,
-                  args.verbose,
-                  args.cabCachePath)
-
-    return thirdPartyInfoMap
-
-
-def createRootPackage(args, params):
-    rootInfoMap = {}
-    if createLooseFilePackage(args,
-                              params,
-                              rootInfoMap,
-                              os.path.join(args.packageRoot, ""),
-                              "INSTALLDIR",
-                              "",
-                              "packageRoot"):
-        # CANDLE PACKAGES
-        candlePackages(rootInfoMap,
-                       params.wixObjOutput,
-                       params.heatPackageBase,
-                       args.verbose,
-                       args.lyVersion,
-                       args.buildId)
-        rootInfoMap['packageRoot']['sourcePath'] = os.path.abspath(args.packageRoot)
-        # LIGHT PACKAGES
-        lightPackages(rootInfoMap,
-                      params.packagesPath,
-                      params.wixObjOutput,
-                      args.verbose,
-                      args.cabCachePath)
-
-    return rootInfoMap
-
-
-def createDevPackage(args, params):
-    """
-    Creates a .msi and .cab files for the dev folder, and places them at
-        intermediateInstallerPath to be used when creating the bootstrapper.
-    @return - A dictionary of the package information that was generated for the
-        dev directory.
-    """
-    devWixDirId = "DEVDIR"
-    devPath = os.path.join(args.packageRoot, "dev")
-
-    # HEAT PACKAGE
-    devInfoMap = heatDirectories(devPath, params.wxsRoot, devWixDirId, args.verbose, "dev", args.dirFilelist)
-
-    # CANDLE PACKAGE CONTENTS
-    candleAllPackagesContent(devInfoMap, params.wixObjOutput, args.verbose)
-
-    # CREATE PACKAGE INFORMATION FOR LOOSE FILES IN DEV ROOT
-    createLooseFilePackage(args,
-                           params,
-                           devInfoMap,
-                           devPath,
-                           devWixDirId,
-                           "dev",
-                           "dev")
-
-    devInfo = devInfoMap["dev"]
-    devInfo['componentRefs'] = 'DesktopShortcuts;StartMenuShortcuts;LevelListRegistryKeys'
-
-    # CANDLE PACKAGE
-    candlePackages(devInfoMap,
-                   params.wixObjOutput,
-                   params.heatPackageBase,
-                   args.verbose,
-                   args.lyVersion,
-                   args.buildId)
-
-    # Build the dev-specific wxs components (for shortcuts)
-    candlePackage('{}/{}/'.format(params.wixObjOutput, devInfo['name']),
-                  'HeatDevPackageBase.wxs',
-                  devInfo,
-                  args.verbose,
-                  args.lyVersion,
-                  args.buildId)
-
-    # LIGHT PACKAGE
-    lightPackages(devInfoMap,
-                  params.packagesPath,
-                  params.wixObjOutput,
-                  args.verbose,
-                  args.cabCachePath)
-
-    return devInfoMap
-
-
-def createRootFolderPackage(args, params, folderName):
-    """
-    Creates a .msi and .cab files for a folder in the package root, like docs, and places them at
-        intermediateInstallerPath to be used when creating the bootstrapper.
-    @return - A dictionary of the package information that was generated for the
-        docs directory.
-    """
-    folderInfo = createPackageInfo(folderName, args.packageRoot, params.wxsRoot)
-    folderInfoMap = {}
-    folderInfoMap[folderInfo['name']] = folderInfo
-
-    # HEAT PACKAGE
-    heatDirectory(folderInfo['wxsName'], folderInfo['sourcePath'], folderInfo['wxsPath'],
-        folderInfo['componentGroupRefs'], "INSTALLDIR", args.verbose)
-
-    # CANDLE PACKAGE CONTENTS
-    candleAllPackagesContent(folderInfoMap, params.wixObjOutput, args.verbose)
-
-    # CANDLE PACKAGE
-    candlePackages(folderInfoMap,
-                   params.wixObjOutput,
-                   params.heatPackageBase,
-                   args.verbose,
-                   args.lyVersion,
-                   args.buildId)
-
-    # LIGHT PACKAGE
-    lightPackages(folderInfoMap,
-                  params.packagesPath,
-                  params.wixObjOutput,
-                  args.verbose,
-                  args.cabCachePath)
-
-    return folderInfoMap

+ 0 - 93
Tools/build/JenkinsScripts/distribution/Installer/InstallerParams.py

@@ -1,93 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import os
-from SignTool import *
-
-
-class InstallerParamError(Exception):
-    def __init__(self, value):
-        self.value = value
-
-    def __str__(self):
-        return repr(self.value)
-
-
-class InstallerParams(object):
-    def __init__(self, args):
-        self.fullPathTo2015Thru2019Redist = None
-        self.signingType = None
-
-        if args.privateKey is not None:
-            if not args.password:
-                import getpass
-                args.password = getpass.getpass("Please provide the signing password: ")
-            self.signingType = KeySigning(args.privateKey, args.password)
-        elif args.certName is not None:
-            self.signingType = NameSigning(args.certName)
-
-        self.doSigning = self.signingType is not None
-
-        self.wxsRoot = os.path.join(args.genRoot, "wxs")
-        self.wixObjOutput = os.path.join(args.genRoot, "wixobj_module")
-        self.heatPackageBase = "HeatPackageBase.wxs"
-        self.packagesPath = os.path.join(args.genRoot, "unsignedInstaller")
-        # Wix primarily differentiates between a file and directory by looking for a trailing slash.
-        # os.path.join with an empty string is a way to guarantee a trailing slash is added to a path.
-        self.bootstrapWixObjDir = os.path.join(os.path.join(args.genRoot, "wixobj_bootstrap"), '')
-        self.intermediateInstallerPath = os.path.join(args.genRoot, "unsignedInstaller")
-
-        self.installerPath = self.intermediateInstallerPath
-        if self.doSigning:
-            self.installerPath = os.path.join(args.genRoot, "installer")
-
-        self.sourcePath = args.packageRoot
-        if args.bootstrapOnly:
-            self.sourcePath = self.installerPath
-
-        self.fullPathToMetrics = args.metricsExe
-        if self.fullPathToMetrics is None:
-            self.fullPathToMetrics = find_file_in_package(self.sourcePath, "LyInstallerMetrics.exe", ["InternalSDKs"])
-        if self.fullPathToMetrics is None:
-            raise InstallerParamError('Path to LyInstallerMetrics.exe could not be found underneath the Tools/InternalSDKs folder in the package ')
-
-        # Gather information on the Visual Studio 2015-2019 redistributable.
-        # Note that this is a hardcoded path, and not a search. This is because there are multiple different redistributables
-        # with the exact same name, the easiest way to identify which is which is based on the location in the package.
-        # Also, this VS 2015-2019 redistributable appears multiple times in the package. For consistency, we want to make sure
-        # the exact same one is used.
-        self.name2015Thru2019Redist = "VC_redist.x64.exe"
-        self.fullPathTo2015Thru2019Redist = os.path.join(args.packageRoot, "dev", "Tools", "Redistributables", "Visual Studio 2015-2019", self.name2015Thru2019Redist)
-        # When rebuilding just the bootstrap for a package, the source package root is a pointer to where the installer was
-        # generated previously. At this point, the redistributable is already in the package root.
-        if args.bootstrapOnly:
-            self.fullPathTo2015Thru2019Redist = find_file_in_package(self.sourcePath, [self.name2015Thru2019Redist])
-
-        self.skipMsiAndCabCreation = args.signOnly or args.bootstrapOnly
-        self.metricsPath, self.metricsExe = os.path.split(self.fullPathToMetrics)
-        self.pathTo2015Thru2019Redist, self.redist2015Thru2019Exe = os.path.split(self.fullPathTo2015Thru2019Redist)
-        self.tempBootstrapOutputDir = os.path.join(args.genRoot, "buildBootstrap")
-
-        if self.doSigning and args.bootstrapOnly:
-            self.msiFileNameList = get_file_names_in_directory(self.installerPath, ".msi")
-            self.cabFileNameList = get_file_names_in_directory(self.installerPath, ".cab")
-
-        # Default to LumberyardInstallerVERSION.exe, unless args.bootstrapName is set
-        if args.bootstrapName:
-            self.bootstrapName = args.bootstrapName
-        else:
-            self.bootstrapName = "LumberyardInstaller{}.exe".format(args.lyVersion)
-
-        if self.doSigning:
-            self.tempBootstrapName = "temp" + self.bootstrapName
-            self.bootstrapOutputPath = os.path.join(self.tempBootstrapOutputDir, self.tempBootstrapName)
-        else:
-            self.bootstrapOutputPath = os.path.join(self.installerPath, self.bootstrapName)

+ 0 - 57
Tools/build/JenkinsScripts/distribution/Installer/Light.py

@@ -1,57 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import os
-from BuildInstallerWixUtils import *
-
-# LIGHT COMMANDLINE TEMPLATES
-lightCommandBase = 'light.exe -nologo -o {outputPath} -ext WixUIExtension -ext WiXUtilExtension -b "{packageSource}" {verbose} {wixobjFile}'
-lightCommandBootstrap = 'light.exe -nologo -o {outputPath} -ext WixBalExtension -ext WiXUtilExtension {verbose} {wixobjFile}'
-lightCommandCabCache = " -cc {cabCachePath} -reusecab"
-
-
-def lightPackage(outputPath, sourceDirectory, wixobjFiles, verbose, cabCachePath):
-    verboseCmd = getVerboseCommand(verbose)
-    lightCommand = lightCommandBase.format(outputPath=outputPath,
-                                           verbose=verboseCmd,
-                                           packageSource=sourceDirectory,
-                                           wixobjFile=wixobjFiles)
-
-    if cabCachePath is not None:
-        lightCommand += lightCommandCabCache.format(cabCachePath=cabCachePath)
-
-    verbose_print(verbose, '\n{}\n'.format(lightCommand))
-    return os.system(lightCommand)
-
-
-def lightPackages(packageInfoMap, packagesPath, wixobjFiles, verboseMode, cabCachePath):
-    numPackagesBuilt = 0
-
-    for packageInfo in packageInfoMap.values():
-        outputPath = os.path.join(packagesPath, '{}.msi'.format(packageInfo['wxsName']))
-        packageWixObjPath = os.path.join(os.path.join(wixobjFiles, packageInfo['wxsName']), "*.wixobj")
-
-        success = lightPackage(outputPath, packageInfo['sourcePath'], packageWixObjPath, verboseMode, cabCachePath)
-        assert (success == 0), 'Failed to generate msi and cab files for {}.'.format(packageInfo['name'])
-
-        numPackagesBuilt += 1
-        verbose_print(verboseMode, '\nPackages Built: {}\n\n'.format(numPackagesBuilt))
-
-
-def lightBootstrap(outputPath, wixobjFiles, verbose, cabCachePath):
-    verboseCmd = getVerboseCommand(verbose)
-    lightCommand = lightCommandBootstrap.format(outputPath=outputPath, verbose=verboseCmd, wixobjFile=wixobjFiles)
-
-    if cabCachePath is not None:
-        lightCommand += lightCommandCabCache.format(cabCachePath=cabCachePath)
-
-    verbose_print(verbose, '\n{}\n'.format(lightCommand))
-    return os.system(lightCommand)

+ 0 - 3
Tools/build/JenkinsScripts/distribution/Installer/LumberyardBootstrapper.wxs

@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:52fa9d61db7371183e66e422e8f959fd613a6c3e46035a8c1d05a4329445855b
-size 5303

+ 0 - 14
Tools/build/JenkinsScripts/distribution/Installer/LumberyardDevCertSetup.bat

@@ -1,14 +0,0 @@
-REM 
-REM 
-REM  All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-REM  its licensors.
-REM 
-REM  For complete copyright and license terms please see the LICENSE at the root of this
-REM  distribution (the "License"). All use of this software is governed by the License,
-REM  or, if provided, by the license below or the license accompanying this file. Do not
-REM  remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-REM  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-REM 
-
-@ECHO off
-certutil -user -addstore Root LumberyardDevCA.cer

+ 0 - 61
Tools/build/JenkinsScripts/distribution/Installer/LumberyardThemeGDC.wxl

@@ -1,61 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<WixLocalization Culture="en-us" Language="1033" xmlns="http://schemas.microsoft.com/wix/2006/localization">
-  <String Id="Caption">[WixBundleName] PC Setup</String>
-  <String Id="Title">[WixBundleName] PC</String>
-  <String Id="InstallHeader">Welcome</String>
-  <String Id="InstallMessage">This setup tool installs the PC version of [WixBundleName] on Local Disk (C:). To change the install directory, click Options.</String>
-  <String Id="InstallVersion">Version [WixBundleVersion] PC</String>
-  <String Id="ConfirmCancelMessage">Are you sure you want to cancel?</String>
-  <String Id="ExecuteUpgradeRelatedBundleMessage">Previous version</String>
-  <String Id="HelpHeader">Setup Help</String>
-  <String Id="HelpText">/install | /repair | /uninstall | /layout [directory] - installs, repairs, uninstalls or
-   creates a complete local copy of the bundle in directory. Install is the default.
-
-/passive | /quiet -  displays minimal UI with no prompts or displays no UI and
-   no prompts. By default UI and all prompts are displayed.
-
-/norestart   - suppress any attempts to restart. By default UI will prompt before restart.
-/log log.txt - logs to a specific file. By default a log file is created in %TEMP%.</String>
-  <String Id="HelpCloseButton">&amp;Close</String>
-  <String Id="InstallEulaAgreementText">By installing Lumberyard you agree to the &lt;a href="https://aws.amazon.com/agreement"&gt;AWS Customer Agreement&lt;/a&gt;, &lt;a href="https://aws.amazon.com/service-terms/#57._Amazon_Lumberyard_Engine"&gt;Lumberyard Service Terms&lt;/a&gt;, and &lt;a href="https://aws.amazon.com/privacy"&gt;Privacy Notice&lt;/a&gt;.</String>
-  <String Id="InstallLicenseLinkText">[WixBundleName] &lt;a href="#"&gt;license terms&lt;/a&gt;.</String>
-  <String Id="InstallAcceptCheckbox">I &amp;agree to the license terms and conditions</String>
-  <String Id="InstallOptionsButton">&amp;Options</String>
-  <String Id="InstallInstallButton">&amp;Install</String>
-  <String Id="InstallCloseButton">&amp;Cancel</String>
-  <String Id="OptionsHeader">Setup options</String>
-  <String Id="OptionsLocationLabel">Install location:</String>
-  <String Id="OptionsBrowseButton">&amp;Browse</String>
-  <String Id="OptionsOkButton">&amp;OK</String>
-  <String Id="OptionsCancelButton">&amp;Cancel</String>
-  <String Id="ProgressHeader">Setup Progress</String>
-  <String Id="DownloadLabel">Acquiring:</String>
-  <String Id="ProgressLabel">Processing:</String>
-  <String Id="OverallProgressPackageText">Initializing...</String>
-  <String Id="ProgressCancelButton">&amp;Cancel</String>
-  <String Id="ModifyHeader">Modify Setup</String>
-  <String Id="ModifyRepairButton">&amp;Repair</String>
-  <String Id="ModifyUninstallButton">&amp;Uninstall</String>
-  <String Id="ModifyCloseButton">&amp;Close</String>
-  <String Id="SuccessRepairHeader">Repair Successfully Completed</String>
-  <String Id="SuccessUninstallHeader">Uninstall Successfully Completed</String>
-  <String Id="SuccessInstallHeader">Installation Successfully Completed</String>
-  <String Id="SuccessHeader">Setup Successful</String>
-  <String Id="SuccessLaunchButton">&amp;Launch Lumberyard Setup Assistant</String>
-  <String Id="SuccessRestartText">You must restart your computer before you can use the software.</String>
-  <String Id="SuccessRestartButton">&amp;Restart</String>
-  <String Id="FailureHeader">Setup Failed</String>
-  <String Id="FailureInstallHeader">Setup Failed</String>
-  <String Id="FailureUninstallHeader">Uninstall Failed</String>
-  <String Id="FailureRepairHeader">Repair Failed</String>
-  <String Id="FailureHyperlinkLogText">One or more issues caused the setup to fail. Please fix the issues and then retry setup. For more information see the &lt;a href="#"&gt;log file&lt;/a&gt;.</String>
-  <String Id="FailureRestartText">You must restart your computer to complete the rollback of the software.</String>
-  <String Id="FailureRestartButton">&amp;Restart</String>
-  <String Id="FailureCloseButton">&amp;Close</String>
-  <String Id="FilesInUseHeader">Files In Use</String>
-  <String Id="FilesInUseLabel">The following applications are using files that need to be updated:</String>
-  <String Id="FilesInUseCloseRadioButton">Close the &amp;applications and attempt to restart them.</String>
-  <String Id="FilesInUseDontCloseRadioButton">&amp;Do not close applications. A reboot will be required.</String>
-  <String Id="FilesInUseOkButton">&amp;OK</String>
-  <String Id="FilesInUseCancelButton">&amp;Cancel</String>
-</WixLocalization>

+ 0 - 85
Tools/build/JenkinsScripts/distribution/Installer/LumberyardThemeGDC.xml

@@ -1,85 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Theme xmlns="http://wixtoolset.org/schemas/thmutil/2010">
-    <Window Width="485" Height="300" HexStyle="100a0000" FontId="0">#(loc.Caption)</Window>
-    <Font Id="0" Height="-12" Weight="500" Foreground="000000" Background="FFFFFF">Segoe UI</Font>
-    <Font Id="1" Height="-24" Weight="500" Foreground="000000">Segoe UI</Font>
-    <Font Id="2" Height="-22" Weight="500" Foreground="666666">Segoe UI</Font>
-    <Font Id="3" Height="-12" Weight="500" Foreground="000000" Background="FFFFFF">Segoe UI</Font>
-    <Font Id="4" Height="-12" Weight="500" Foreground="ff0000" Background="FFFFFF" Underline="yes">Segoe UI</Font>
-
-    <Image X="11" Y="11" Width="64" Height="64" ImageFile="logo.png" Visible="yes"/>
-    <Text X="80" Y="11" Width="-11" Height="64" FontId="1" Visible="yes" DisablePrefix="yes">#(loc.Title)</Text>
-
-    <Page Name="Help">
-        <Text X="11" Y="80" Width="-11" Height="30" FontId="2" DisablePrefix="yes">#(loc.HelpHeader)</Text>
-        <Text X="11" Y="112" Width="-11" Height="-35" FontId="3" DisablePrefix="yes">#(loc.HelpText)</Text>
-        <Button Name="HelpCancelButton" X="-11" Y="-11" Width="75" Height="23" TabStop="yes" FontId="0">#(loc.HelpCloseButton)</Button>
-    </Page>
-    <Page Name="Install">
-        <Text X="11" Y="80" Width="-11" Height="32" FontId="2" DisablePrefix="yes">#(loc.InstallHeader)</Text>
-        <Hypertext X="11" Y="121" Width="-11" Height="64" FontId="3">#(loc.InstallMessage)</Hypertext>
-        <Hypertext X="11" Y="161" Width="-11" Height="64" FontId="3">#(loc.InstallEulaAgreementText)</Hypertext>
-        <Text Name="InstallVersion" X="185" Y="-81" Width="-11" Height="17" FontId="3" DisablePrefix="yes" HideWhenDisabled="yes">#(loc.InstallVersion)</Text>
-        <Button Name="OptionsButton" X="-171" Y="-11" Width="75" Height="23" TabStop="yes" FontId="0" HideWhenDisabled="yes">#(loc.InstallOptionsButton)</Button>
-        <Button Name="InstallButton" X="-91" Y="-11" Width="75" Height="23" TabStop="yes" FontId="0">#(loc.InstallInstallButton)</Button>
-        <Button Name="WelcomeCancelButton" X="-11" Y="-11" Width="75" Height="23" TabStop="yes" FontId="0">#(loc.InstallCloseButton)</Button>
-    </Page>
-    <Page Name="Options">
-        <Text X="11" Y="80" Width="-11" Height="30" FontId="2" DisablePrefix="yes">#(loc.OptionsHeader)</Text>
-        <Text X="11" Y="121" Width="-11" Height="17" FontId="3" DisablePrefix="yes">#(loc.OptionsLocationLabel)</Text>
-        <Editbox Name="FolderEditbox" X="11" Y="143" Width="-91" Height="21" TabStop="yes" FontId="3" FileSystemAutoComplete="yes" />
-        <Button Name="BrowseButton" X="-11" Y="142" Width="75" Height="23" TabStop="yes" FontId="3">#(loc.OptionsBrowseButton)</Button>
-        <Button Name="OptionsOkButton" X="-91" Y="-11" Width="75" Height="23" TabStop="yes" FontId="0">#(loc.OptionsOkButton)</Button>
-        <Button Name="OptionsCancelButton" X="-11" Y="-11" Width="75" Height="23" TabStop="yes" FontId="0">#(loc.OptionsCancelButton)</Button>
-    </Page>
-    <Page Name="FilesInUse">
-      <Text X="11" Y="80" Width="-11" Height="30" FontId="2" DisablePrefix="yes">#(loc.FilesInUseHeader)</Text>
-      <Text X="11" Y="121" Width="-11" Height="34" FontId="3" DisablePrefix="yes">#(loc.FilesInUseLabel)</Text>
-      <Text Name="FilesInUseText"  X="11" Y="150" Width="-11" Height="-86" FontId="3" DisablePrefix="yes" HexStyle="0x0000C000"></Text>
-
-      <Button Name="FilesInUseCloseRadioButton" X="11" Y="-60" Width="-11" Height="23" TabStop="yes" FontId="0" HideWhenDisabled="yes" HexStyle="0x000009">#(loc.FilesInUseCloseRadioButton)</Button>
-      <Button Name="FilesInUseDontCloseRadioButton" X="11" Y="-40" Width="-11" Height="23" TabStop="yes" FontId="0" HideWhenDisabled="yes" HexStyle="0x000009">#(loc.FilesInUseDontCloseRadioButton)</Button>
-
-      <Button Name="FilesInUseOkButton" X="-91" Y="-11" Width="75" Height="23" TabStop="yes" FontId="0" HideWhenDisabled="yes">#(loc.FilesInUseOkButton)</Button>
-      <Button Name="FilesInUseCancelButton" X="-11" Y="-11" Width="75" Height="23" TabStop="yes" FontId="0">#(loc.FilesInUseCancelButton)</Button>
-    </Page>
-    <Page Name="Progress">
-        <Text X="11" Y="80" Width="-11" Height="30" FontId="2" DisablePrefix="yes">#(loc.ProgressHeader)</Text>
-
-        <Text X="11" Y="121" Width="75" Height="17" FontId="3" DisablePrefix="yes" >#(loc.DownloadLabel)</Text>
-        <Text Name="CacheProgressPackageText" X="100" Y="121" Width="-5" Height="17" FontId="3" DisablePrefix="yes">#(loc.OverallProgressPackageText)</Text>
-        <Progressbar Name="CacheProgressbar" X="11" Y="143" Width="-11" Height="20" />
-
-        <Text X="11" Y="181" Width="75" Height="17" FontId="3" DisablePrefix="yes">#(loc.ProgressLabel)</Text>
-        <Text Name="ExecuteProgressPackageText" X="100" Y="181" Width="-5" Height="17" FontId="3" DisablePrefix="yes"></Text>
-        <Progressbar Name="ExecuteProgressbar" X="11" Y="203" Width="-11" Height="20" />
-
-        <Button Name="ProgressCancelButton" X="-11" Y="-11" Width="75" Height="23" TabStop="yes" FontId="0">#(loc.ProgressCancelButton)</Button>
-    </Page>
-    <Page Name="Modify">
-        <Text X="11" Y="80" Width="-11" Height="30" FontId="2" DisablePrefix="yes">#(loc.ModifyHeader)</Text>
-        <Button Name="RepairButton" X="-171" Y="-11" Width="75" Height="23" TabStop="yes" FontId="0" HideWhenDisabled="yes">#(loc.ModifyRepairButton)</Button>
-        <Button Name="UninstallButton" X="-91" Y="-11" Width="75" Height="23" TabStop="yes" FontId="0">#(loc.ModifyUninstallButton)</Button>
-        <Button Name="ModifyCancelButton" X="-11" Y="-11" Width="75" Height="23" TabStop="yes" FontId="0">#(loc.ModifyCloseButton)</Button>
-    </Page>
-    <Page Name="Success">
-        <Text Name="SuccessHeader" X="11" Y="80" Width="-11" Height="30" FontId="2" HideWhenDisabled="yes" DisablePrefix="yes">#(loc.SuccessHeader)</Text>
-        <Text Name="SuccessInstallHeader" X="75" Y="110" Width="-11" Height="30" FontId="2" HideWhenDisabled="yes" DisablePrefix="yes">#(loc.SuccessInstallHeader)</Text>
-        <Text Name="SuccessRepairHeader" X="11" Y="80" Width="-11" Height="30" FontId="2" HideWhenDisabled="yes" DisablePrefix="yes">#(loc.SuccessRepairHeader)</Text>
-        <Text Name="SuccessUninstallHeader" X="75" Y="110" Width="-11" Height="30" FontId="2" HideWhenDisabled="yes" DisablePrefix="yes">#(loc.SuccessUninstallHeader)</Text>
-        <Button Name="LaunchButton" X="-142" Y="-30" Width="200" Height="23" TabStop="yes" FontId="0" HideWhenDisabled="yes">#(loc.SuccessLaunchButton)</Button>
-        <Text Name="SuccessRestartText" X="-11" Y="-51" Width="400" Height="34" FontId="3" HideWhenDisabled="yes" DisablePrefix="yes">#(loc.SuccessRestartText)</Text>
-        <Button Name="SuccessRestartButton" X="-91" Y="-11" Width="75" Height="23" TabStop="yes" FontId="0" HideWhenDisabled="yes">#(loc.SuccessRestartButton)</Button>
-    </Page>
-    <Page Name="Failure">
-        <Text Name="FailureHeader" X="11" Y="80" Width="-11" Height="30" FontId="2" HideWhenDisabled="yes" DisablePrefix="yes">#(loc.FailureHeader)</Text>
-        <Text Name="FailureInstallHeader" X="11" Y="80" Width="-11" Height="30" FontId="2" HideWhenDisabled="yes" DisablePrefix="yes">#(loc.FailureInstallHeader)</Text>
-        <Text Name="FailureUninstallHeader" X="11" Y="80" Width="-11" Height="30" FontId="2" HideWhenDisabled="yes" DisablePrefix="yes">#(loc.FailureUninstallHeader)</Text>
-        <Text Name="FailureRepairHeader" X="11" Y="80" Width="-11" Height="30" FontId="2" HideWhenDisabled="yes" DisablePrefix="yes">#(loc.FailureRepairHeader)</Text>
-        <Hypertext Name="FailureLogFileLink" X="11" Y="121" Width="-11" Height="42" FontId="3" TabStop="yes" HideWhenDisabled="yes">#(loc.FailureHyperlinkLogText)</Hypertext>
-        <Hypertext Name="FailureMessageText" X="22" Y="163" Width="-11" Height="51" FontId="3" TabStop="yes" HideWhenDisabled="yes" />
-        <Text Name="FailureRestartText" X="-11" Y="-51" Width="400" Height="34" FontId="3" HideWhenDisabled="yes" DisablePrefix="yes">#(loc.FailureRestartText)</Text>
-        <Button Name="FailureRestartButton" X="-91" Y="-11" Width="75" Height="23" TabStop="yes" FontId="0" HideWhenDisabled="yes">#(loc.FailureRestartButton)</Button>
-        <Button Name="FailureCloseButton" X="-11" Y="-11" Width="75" Height="23" TabStop="yes" FontId="0">#(loc.FailureCloseButton)</Button>
-    </Page>
-</Theme>

+ 0 - 29
Tools/build/JenkinsScripts/distribution/Installer/PackageExeSigning.py

@@ -1,29 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import os
-import SignTool
-
-
-def SignLumberyardExes(unpackedLocation,
-                       signingType,
-                       timestampServer,
-                       verbose,
-                       filesToSign):
-    for file in filesToSign:
-        fileFullPath = os.path.join(unpackedLocation, file)
-
-        SignTool.signtoolSignAndVerifyFile(fileFullPath,
-                                           os.path.dirname(fileFullPath),
-                                           os.path.dirname(fileFullPath),
-                                           signingType,
-                                           timestampServer,
-                                           verbose)

+ 0 - 3
Tools/build/JenkinsScripts/distribution/Installer/Redistributables.wxs

@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:0782cab23b9ad88124296586afb2ed8e88f2ab4020786efe05da396d8f33e8ec
-size 1542

+ 0 - 212
Tools/build/JenkinsScripts/distribution/Installer/SignTool.py

@@ -1,212 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import shutil
-import subprocess
-
-from BuildInstallerUtils import *
-
-# Signing cmd example
-# signtool.exe sign /f c:\codesigning\lumberyard.pfx /tr http://tsa.starfieldtech.com /td SHA256 /p thePasswordSeeNotesBelow %f
-
-# Verify cmd example.
-# signtool.exe verify /v /pa %f
-
-# SignTool COMMANDLINE TEMPLATES
-signtoolCommandBase = "signtool.exe {commandType} {verbose} {commandTypeParams} {filename}"
-signtoolKeySignParams = "/fd SHA256 /f {certificatePath} /p {password}"
-signtoolNameSignParams = '/fd SHA256 /n "{certificateName}"'
-signtoolTimestampParams = "/tr {timestampServerURL} /td SHA256"
-signtoolVerifyParams = "/pa /tw /hash SHA256"
-
-
-class SignType:
-    def __init__(self, signParamsString):
-        self.signtoolParamsTemplate = signParamsString
-
-    def getSigntoolParamsTemplate(self):
-        return self.signtoolParamsTemplate
-
-    def getSigntoolParams(self):
-        raise NotImplementedError(self.getSigntoolParams.__name__)
-
-
-class KeySigning(SignType):
-    def __init__(self, key, password):
-        SignType.__init__(self, signtoolKeySignParams)
-        self.privateKey = key
-        self.password = password
-
-    def getSigntoolParams(self):
-        return self.getSigntoolParamsTemplate().format(certificatePath=self.privateKey, password=self.password)
-
-
-class NameSigning(SignType):
-    def __init__(self, name):
-        SignType.__init__(self, signtoolNameSignParams)
-        self.certName = name
-
-    def getSigntoolParams(self):
-        return self.getSigntoolParamsTemplate().format(certificateName=self.certName)
-
-
-def getSignToolVerboseCommand(verboseMode):
-    if verboseMode:
-        return " /v"
-    else:
-        return ""
-
-
-def buildSignCommand(filename, signingType, verbose):
-    verboseCmd = getSignToolVerboseCommand(verbose)
-
-    signtoolParams = signingType.getSigntoolParams()
-    signtoolCommand = signtoolCommandBase.format(commandType="sign",
-        verbose=verboseCmd, commandTypeParams=signtoolParams, filename = filename)
-    return signtoolCommand
-
-
-def signtoolTestCredentials(signingType, timestampServer, verbose):
-    testFileName = "testFile"
-    if os.path.exists(testFileName):
-        os.remove(testFileName)
-
-    with open(testFileName, "wb") as testSign:
-        testSign.seek(1023)
-        testSign.write("0")
-
-    # error codes for sign tool are only 0 or 1. No way to get a success without
-    #   a valid .exe to sign, so the only way to test is to try to sign a file
-    #   that will be considered an unrecognized format, and parse the output to
-    #   see if the error was with the password.
-    signtoolCommand = buildSignCommand(testFileName, signingType, verbose)
-    sp = subprocess.Popen(signtoolCommand, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    output, error = sp.communicate()
-
-    validPassword = False
-    if error:
-        # if find returns -1, it means it couldnt find password incorrect text,
-        #   meaning it is a valid password.
-        validPassword = error.splitlines()[0].lower().find("password is not correct") == -1
-
-    os.remove(testFileName)
-    return validPassword
-
-
-def signtoolSignFile(filename, signingType, verbose):
-    signtoolCommand = buildSignCommand(filename, signingType, verbose)
-
-    verbose_print(verbose, '\n{}\n'.format(signtoolCommand))
-    return os.system(signtoolCommand) == 0
-
-
-def signtoolTimestamp(filename, timestampServer, verbose):
-    verboseCmd = getSignToolVerboseCommand(verbose)
-
-    signtoolParams = signtoolTimestampParams.format(timestampServerURL=timestampServer)
-    signtoolCommand = signtoolCommandBase.format(commandType="timestamp",
-                                                 verbose=verboseCmd,
-                                                 commandTypeParams=signtoolParams,
-                                                 filename=filename)
-
-    verbose_print(verbose, '\n{}\n'.format(signtoolCommand))
-
-    success = False
-    while not success:
-        success = (os.system(signtoolCommand) == 0)
-
-    return success
-
-
-def signtoolVerifySign(filename, verbose):
-    verboseCmd = getSignToolVerboseCommand(verbose)
-
-    signtoolCommand = signtoolCommandBase.format(commandType="verify",
-                                                 verbose=verboseCmd,
-                                                 commandTypeParams=signtoolVerifyParams,
-                                                 filename=filename)
-
-    verbose_print(verbose, '\n{}\n'.format(signtoolCommand))
-    return_result = os.system(signtoolCommand) == 0
-    return return_result
-
-
-def signtoolSignAndVerifyFile(filename,
-                              workingDir,
-                              sourceDir,
-                              signingType,
-                              timestampServer,
-                              verbose):
-    shouldCopy = workingDir is not sourceDir
-    filePath = os.path.join(workingDir, filename)
-
-    # most of the time that signing fails, it is due to the signing server not responding.
-    # keep retrying until the signing is successful.
-    # NOTE: might want to change this to a for loop to limit number of retries per file.
-    success = False
-    attemptsMade = 0
-
-    while not success:
-        # SIGN THE FILE
-        success = signtoolSignFile(filePath, signingType, verbose)
-        assert success, 'Failed to sign file {}. Most likely the password entered is incorrect.'.format(filename)
-
-        # TIMESTAMP THE FILE
-        success = signtoolTimestamp(filePath, timestampServer, verbose)
-        assert success, "Failed to contact the timestamp server."
-
-        # VERIFY SUCCESSFUL SIGNING
-        success = signtoolVerifySign(filePath, verbose)
-        attemptsMade += 1
-
-        if not success:
-            verbose_print(verbose, 'Failed to sign file {}'.format(filename))
-            if shouldCopy:
-                # delete the original file, copy back from source, and re-sign
-                os.remove(filePath)
-                shutil.copy(os.path.join(sourceDir, filename), filePath)
-
-        verbose_print(verbose, 'Attempts made to sign file {}: {}\n'.format(filename, attemptsMade))
-
-    return success
-
-
-def signtoolSignAndVerifyFiles(fileList,
-                               workingDir,
-                               sourceDir,
-                               signingType,
-                               timestampServer,
-                               verbose):
-
-    for filename in fileList:
-        signtoolSignAndVerifyFile(filename,
-                                  workingDir,
-                                  sourceDir,
-                                  signingType,
-                                  timestampServer,
-                                  verbose)
-
-
-def signtoolSignAndVerifyType(fileExtension,
-                              workingDir,
-                              sourceDir,
-                              signingType,
-                              timestampServer,
-                              verbose):
-    # Sign each file in a directory that has the given file extension
-    for file in os.listdir(workingDir):
-        if file.endswith(fileExtension):
-            signtoolSignAndVerifyFile(os.path.basename(file),
-                                      workingDir,
-                                      sourceDir,
-                                      signingType,
-                                      timestampServer,
-                                      verbose)

+ 0 - 186
Tools/build/JenkinsScripts/distribution/Installer/TestInstaller.py

@@ -1,186 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import argparse
-import ctypes
-import filecmp
-import os
-import os.path
-import sys
-import tempfile
-
-parser = argparse.ArgumentParser(description='Tests the Lumberyard Installer bootstrapper.')
-parser.add_argument('-v', '--verbose', action='store_true', help='Enables logging messages (default False)')
-parser.add_argument('-k', '--keep', action='store_true', help="Don't delete temp files")
-parser.add_argument('--packageRoot', default=None, help="Source content to test against, required for use. (default None)")
-parser.add_argument('--lyVersion', default="0.0.0.0", help="Version to use when generated artifacts (default '0.0.0.0')")
-parser.add_argument('--target', default='%temp%\\installertest\\TestInstall', help='The location to install Lumberyard. Make sure to use backslashes in the path. (default "%%temp%%\\installertest\\TestInstall"')
-parser.add_argument('--genRoot', default='%temp%/installertest/', help='Path for temp data (default "%%temp%%/installertest/"')
-parser.add_argument('--hostURL', default="https://s3-us-west-2.amazonaws.com/lumberyard-streaming-install-test/releases/JoeInstallTest/", help='The URL for the installer to download its packages from (msi + cab files). (default https://s3-us-west-2.amazonaws.com/lumberyard-streaming-install-test/releases/JoeInstallTest/)')
-parser.add_argument('--testName', default=None, help='The name of the test to run (case insensitive). Will cause an error if no test with that name exists. Will run all tests if not specified. (default None)')
-bootstrapperName = "automatedTestBootstrapper.exe"
-
-args = parser.parse_args()
-
-# Installation requires administration privileges.
-try:
-    is_admin = os.getuid() == 0
-except AttributeError:
-    is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
-
-if not is_admin:
-    sys.exit("Administrator privileges must be enabled to install Lumberyard")
-
-# Search target for forward slash (/) and replace with back slashes (\)
-# because the install command doesn't like forward slashes.
-installTarget = args.target.replace('/', '\\')
-
-pfxName = "LumberyardDev.pfx"
-signingArgs = "--privateKey {} --password test".format(pfxName)
-
-
-# Creates an installer with the given additional arguments. Will leave temp files
-#   even if the keep argument for this script is specified, in order to correctly
-#   perform some tests
-def makeInstaller(additionalCommands=None):
-    buildInstallerCommand = "BuildInstaller.py --packageRoot {} " \
-                            "--lyVersion {} " \
-                            "--genRoot {} " \
-                            "--bootstrapName {} " \
-                            "--hostURL {}".format(args.packageRoot,
-                                                  args.lyVersion,
-                                                  args.genRoot,
-                                                  bootstrapperName,
-                                                  args.hostURL)
-    if args.verbose:
-        buildInstallerCommand += " -v"
-
-    if additionalCommands:
-        buildInstallerCommand += " " + additionalCommands
-
-    os.system(buildInstallerCommand)
-
-
-# Will create an installer with the given additional arguments, run the installer,
-#   and verify contents of the installed lumberyard with the source package.
-#   Some tests require artifacts of previous installs to exist. If they do not,
-#   a set of artifacts will be created.
-def createAndTestInstaller(installerSubfolder,
-                           additionalCommands,
-                           requiresSigning=False,
-                           requiresArtifacts=False):
-    if requiresArtifacts:
-        # Create a set of artifacts if none are available
-        if not os.path.exists(os.path.join(args.genRoot, installerSubfolder)):
-            if requiresSigning:
-                makeInstaller(signingArgs)
-            else:
-                makeInstaller()
-
-    makeInstaller(additionalCommands)
-    bootstrapperPath = os.path.join(args.genRoot, installerSubfolder, bootstrapperName)
-    installCommand = "{} /silent InstallFolder={}".format(bootstrapperPath, installTarget)
-    if args.verbose:
-        print("Running installation command:")
-        print("\t{}".format(installCommand))
-    os.system(installCommand)
-
-    # Validate install was successful
-    pathToInstalledPackage = os.path.join(installTarget, args.lyVersion)
-    # filecmp.dircmp failes if you pass in a directory with "%temp%", so %temp% has to be replaced by
-    # temp dir before calling it.
-    scrubbedPackagedRoot = args.packageRoot.replace("%temp%", tempfile.gettempdir())
-    pathToInstalledPackage = pathToInstalledPackage.replace("%temp%", tempfile.gettempdir())
-    installVerifier = filecmp.dircmp(scrubbedPackagedRoot, pathToInstalledPackage)
-    if args.verbose:
-        print("Dif between install and source:")
-        installVerifier.report_full_closure()
-    if installVerifier.left_only or installVerifier.right_only:
-        print("Error: Install failed, source and destination do not match")
-
-    # Uninstall Lumberyard
-    uninstallCommand = "start /WAIT {} /silent /uninstall".format(bootstrapperPath)
-    if args.verbose:
-        print("Running uninstallation command:")
-        print("\t{}".format(uninstallCommand))
-    os.system(uninstallCommand)
-
-    # Validate uninstall was successful
-    if os.path.isdir(args.target):
-        print("Error: Uninstall failed.")
-
-
-tests = {}
-
-
-def makeTest(testName,
-             installerSubfolder,
-             additionalCommands,
-             requiresSigning=False,
-             requiresArtifacts=False):
-
-    def _makeTestArgs(testName, *testArgs):
-        tests[testName] = testArgs
-
-    _makeTestArgs(testName, installerSubfolder, additionalCommands, requiresSigning, requiresArtifacts)
-
-
-def listTests():
-    print('Tests with the following names have been defined:')
-    for testName in tests.keys():
-        print(testName)
-
-
-def runTest(testName):
-    if args.verbose:
-        print('Performing test "{}"'.format(testName))
-    testArgs = tests[testName]
-    if testArgs is not None and len(testArgs) > 0:
-        createAndTestInstaller(*testArgs)
-
-
-def runAllTests():
-    for testName in tests.keys():
-        runTest(testName)
-
-
-# DEFINE TEST CASES
-
-# Test success cases
-# Unsigned
-makeTest("unsigned installer", "unsignedInstaller", None)
-makeTest("unsigned bootstrap only", "unsignedInstaller", "--bootstrapOnly", False, True)
-# Signed
-makeTest("signed installer", "installer", signingArgs, True)
-makeTest("signed sign only", "installer", signingArgs + " --signOnly", True, True)
-makeTest("signed bootstrap only", "installer", signingArgs + " --bootstrapOnly", True, True)
-
-# END DEFINE TEST CASES
-
-
-# RUN TEST(S)
-if args.testName is not None:
-    if args.testName.lower() in tests:
-        runTest(args.testName.lower())
-    else:
-        print('Test with the name "{}" does not exist.'.format(args.testName))
-        listTests()
-else:
-    runAllTests()
-# END RUN TEST(S)
-
-
-# CLEAN-UP
-if not args.keep:
-    import shutil
-    if os.path.exists(args.genRoot):
-        shutil.rmtree(args.genRoot)
-# END CLEAN-UP

+ 0 - 10
Tools/build/JenkinsScripts/distribution/Installer/__init__.py

@@ -1,10 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#

+ 0 - 23
Tools/build/JenkinsScripts/distribution/Installer/allowed_empty_folders.json

@@ -1,23 +0,0 @@
-{
-    "Whitelist": [
-        "3rdParty/OpenEXR/2.0/src/openexr-2.0.0/vc/vc7/OpenEXR/debug",
-        "3rdParty/OpenEXR/2.0/src/openexr-2.0.0/vc/vc7/OpenEXR/exrenvmap/Debug",
-        "3rdParty/OpenEXR/2.0/src/openexr-2.0.0/vc/vc7/OpenEXR/exrenvmap/Release",
-        "3rdParty/OpenEXR/2.0/src/openexr-2.0.0/vc/vc7/OpenEXR/exrheader/Debug",
-        "3rdParty/OpenEXR/2.0/src/openexr-2.0.0/vc/vc7/OpenEXR/exrheader/Release",
-        "3rdParty/OpenEXR/2.0/src/openexr-2.0.0/vc/vc7/OpenEXR/exrmakepreview/Debug",
-        "3rdParty/OpenEXR/2.0/src/openexr-2.0.0/vc/vc7/OpenEXR/exrmakepreview/Release",
-        "3rdParty/OpenEXR/2.0/src/openexr-2.0.0/vc/vc7/OpenEXR/exrmaketiled/Debug",
-        "3rdParty/OpenEXR/2.0/src/openexr-2.0.0/vc/vc7/OpenEXR/exrstdattr/Debug",
-        "3rdParty/OpenEXR/2.0/src/openexr-2.0.0/vc/vc7/OpenEXR/IlmImf/Debug",
-        "3rdParty/OpenEXR/2.0/src/openexr-2.0.0/vc/vc7/OpenEXR/IlmImf/Release",
-        "3rdParty/OpenEXR/2.0/src/openexr-2.0.0/vc/vc7/OpenEXR/IlmImfExamples/Debug",
-        "3rdParty/OpenEXR/2.0/src/openexr-2.0.0/vc/vc7/OpenEXR/IlmImfExamples/Release",
-        "3rdParty/OpenEXR/2.0/src/openexr-2.0.0/vc/vc7/OpenEXR/IlmImfTest/Debug",
-        "3rdParty/OpenEXR/2.0/src/openexr-2.0.0/vc/vc7/OpenEXR/IlmImfTest/Release",
-        "3rdParty/OpenEXR/2.0/src/openexr-2.0.0/vc/vc7/OpenEXR/release",
-        "dev/Bin64/EditorPlugins",
-        "dev/SamplesProject/Levels/Samples/Gems_Samples",
-        "dev/Cache/StarterGame/pc/user/log"
-    ]
-}

+ 0 - 50
Tools/build/JenkinsScripts/distribution/Installer/dir_filelist.json

@@ -1,50 +0,0 @@
-{
-    "_comment": "Folders listed here will only include the content listed. All other content within the folder will be omitted from the installer.",
-    "dev/Bin64vc141": [
-        "EditorPlugins/ProceduralMaterialEditorPlugin.dll",
-        "EditorPlugins/ProceduralMaterialEditorPlugin.exp",
-        "EditorPlugins/ProceduralMaterialEditorPlugin.lib",
-        "EditorPlugins/ProceduralMaterialEditorPlugin.dll.manifest",
-        "rc/d3dcompiler_47.dll",
-        "rc/d3dcsx_47.dll",
-        "rc/d3dx11_43.dll",
-        "rc/dbghelp.dll",
-        "rc/opengl32sw.dll",
-        "rc/PVRTexLib_License.txt",
-        "rc/xinput1_3.dll",
-        "D3DCompiler_43.dll",
-        "d3dcompiler_46.dll",
-        "d3dcompiler_47.dll",
-        "d3dcsx_46.dll",
-        "d3dcsx_47.dll",
-        "d3dx11_43.dll",
-        "dbghelp.dll",
-        "glut32.dll",
-        "imguilib.dll",
-        "imguilib.dll.manifest",
-        "imguilib.exp",
-        "imguilib.lib",
-        "libeay32.dll",
-        "LuaCompiler.exe",
-        "Microsoft.VC90.CRT.manifest",
-        "msvcr90.dll",
-        "nvToolsExt64_1.dll",
-        "PhysX3CharacterKinematicPROFILE_x64.dll",
-        "PhysX3CommonPROFILE_x64.dll",
-        "PhysX3CookingPROFILE_x64.dll",
-        "PhysX3GpuPROFILE_x64.dll",
-        "PhysX3PROFILE_x64.dll",
-        "PhysXDevice64.dll",
-        "PVRTexLib_License.txt",
-        "PxFoundationPROFILE_x64.dll",
-        "PxPvdSDKPROFILE_x64.dll",
-        "SDL2.dll",
-        "ssleay32.dll",
-        "substance_d3d11pc_blend.dll",
-        "substance_linker.dll",
-        "substance_sse2_blend.dll",
-        "ToolsCrashUploader.exe",
-        "ToolsCrashUploader.exe.manifest",
-        "xinput1_3.dll"
-    ]
-}

+ 0 - 3
Tools/build/JenkinsScripts/distribution/Installer/editor_icon_setup.ico

@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:2f464626a1ce6148798c6c378865a66fceefe0e8541c904730485e38d21203b6
-size 43973

BIN
Tools/build/JenkinsScripts/distribution/Installer/license.rtf


+ 0 - 66
Tools/build/JenkinsScripts/distribution/Metrics/GameTemplates/BuildGameTemplateWhitelist.py

@@ -1,66 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import BuildGameTemplateWhitelistArgs
-import json
-import os
-import sys
-
-# Generates a whitelist of project template names we can include in metrics.
-# Input is a path to the project templates folder for a Lumberyard install.
-# Output is a JSON formatted file in that directory, that will be included in the package.
-# A template is defined by a templatedefinition.json file inside of a folder, the template name is the folder containing this file.
-# Output should look like:
-# {
-#   "TemplateListDescription": "This is a list of Amazon provided templates. This is a white list of project template names that are included in usage event tracking. If a template name is not in this list, the name will not be included in the event.",
-#   "TemplateNameWhitelist": [
-#     "SimpleTemplate",
-#     "EmptyTemplate"
-#   ]
-# }
-def main():
-    print "Generating Game Template Whitelist"
-    args = BuildGameTemplateWhitelistArgs.createArgs()
-    
-    validTemplates = []
-    # Search for project templates.
-    for root, dirnames, filenames in os.walk(args.projectTemplatesFolder):
-        templateRoot = os.path.basename(root)
-        for file in filenames:
-            if file == args.templateDefinitionFileName:
-                fullPath = os.path.join(templateRoot, file)
-                print "Found template definition: " + fullPath
-                validTemplates.append(templateRoot)
-
-    templateDescriptionKey = "TemplateListDescription"
-    templatedescriptionValue = "This is a white list of project template names that are included in usage event tracking. "
-    templatedescriptionValue  += "If a template name is not in this list, the name will not be included in the event."
-
-    TemplateNameWhitelistKey = "TemplateNameWhitelist"
-    jsonString = json.dumps({templateDescriptionKey: templatedescriptionValue, 
-                             TemplateNameWhitelistKey: validTemplates},
-                            sort_keys=True,
-                            indent=4,
-                            separators=(',', ': '))
-
-                            
-    templateWhitelistFilePath = os.path.join(args.projectTemplatesFolder, args.templateWhitelistFilename)
-    try:
-        whitelistFile = open(templateWhitelistFilePath, 'w')
-        whitelistFile.write(jsonString)
-        whitelistFile.close()
-    except:
-        print "Error writing template whitelist to file " + templateWhitelistFilePath
-        return 1
-    return 0
-
-if __name__ == "__main__":
-    sys.exit(main())

+ 0 - 30
Tools/build/JenkinsScripts/distribution/Metrics/GameTemplates/BuildGameTemplateWhitelistArgs.py

@@ -1,30 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import argparse
-
-def createArgs():
-    parser = argparse.ArgumentParser(description='Builds a whitelist of Game Templates to include in reported metrics.')
-    parser.add_argument('--projectTemplatesFolder', 
-                        default=None, 
-                        required=True, 
-                        help='Path to the templates folder. Expected: dev\\ProjectTemplates\\')
-    parser.add_argument('--templateDefinitionFileName', 
-                        default='templatedefinition.json', 
-                        help='Template definition file name (default templatedefinition.json)')
-    parser.add_argument('--templateWhitelistFilename', 
-                        default='TemplateListForMetrics.json', 
-                        help='Template whitelist file name (default TemplateListForMetrics.json)')
-    args = parser.parse_args()
-    print "Building game template whitelist with the following arguments"
-    print(args)
-    return args
-

+ 0 - 309
Tools/build/JenkinsScripts/distribution/ThirdParty/BuildThirdPartyPackages.py

@@ -1,309 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-import BuildThirdPartyArgs
-import BuildThirdPartyUtils
-import SDKPackager
-import ThirdPartySDKAWS
-import json
-import os
-import re
-import sys
-import urlparse
-
-importDir = os.path.dirname(os.path.abspath(__file__))
-sys.path.append(os.path.join(importDir, "..")) #Required for AWS_PyTools
-from AWS_PyTools import LyChecksum
-from AWS_PyTools import LyCloudfrontOps
-
-# Some files are in the 3rd party folder and not associated with any SDK/Versions.
-fileIgnorelist = [
-    "boost/Boost_Autoexp.dat",
-    "boost/CryEngine Customizations.txt",
-    "boost/CryREADME.txt",
-    "boost/LICENSE_1_0.txt",
-    "boost/lumberyard-1.61.0.patch",
-    "Qwt/license.txt",
-    "lz4/git checkout.txt"
-    "3rdParty.txt"
-]
-
-
-class SDK(object):
-
-    def __init__(self):
-        self.fileList = []
-        self.path = ""
-        self.versionFolder = ""
-
-
-def getListFromFile(file):
-    openFile = open(file, 'r')
-    filePaths = openFile.readlines()
-    openFile.close()
-    return filePaths
-
-
-def addUntrackedSDKs(sdks):
-    # Not all SDKs are represented in SetupAssistantConfig.json yet.
-
-    untrackedSDKs = {
-        "OpenEXR20": ["OpenEXR/2.0", "2.0"],
-        "OpenEXR22": ["OpenEXR/2.2", "2.2"],
-    }
-
-    for sdkName, untrackedSDK in untrackedSDKs.iteritems():
-        sdk = SDK()
-        sdk.path = untrackedSDK[0]
-        sdk.versionFolder = untrackedSDK[1]
-        sdks[sdkName] = sdk
-
-
-def getSDKsToPathsDict(thirdPartyVersionsFile):
-    versionsList = getListFromFile(thirdPartyVersionsFile)
-    sdks = {}
-    for version in versionsList:
-        match = re.match(r"(.*)(\.package.dir=)(.*)", version)
-        if not match:
-            BuildThirdPartyUtils.printError('SDK version file {0} has invalid formatting on line {1}'.format(
-                    thirdPartyVersionsFile,
-                    version))
-
-        sdkName = match.group(1)
-        # Store the SDK Path with forward slashes to make matching easier.
-        sdkPath = match.group(3).replace('\\', '/')
-        sdk = SDK()
-        sdk.path = sdkPath
-
-        versionMatch = re.search(r"([^/\\\\]*)$", sdkPath)
-        if not versionMatch:
-            BuildThirdPartyUtils.printError('SDK version file {0} has invalid formatting on line {1}'.format(
-                    thirdPartyVersionsFile,
-                    version))
-
-        sdk.versionFolder = versionMatch.group(1)
-        sdks[sdkName] = sdk
-
-    addUntrackedSDKs(sdks)
-    return sdks
-
-
-def populateSDKFilePaths(sdks, sdkFileListFile):
-    fileList = getListFromFile(sdkFileListFile)
-    lastSDKName = ""
-    for file in fileList:
-        slashesFixed = file.replace('\\', '/')
-        match = re.search("3rdParty/(.*)", slashesFixed)
-        if not match:
-            BuildThirdPartyUtils.printError("Could not find third party folder in file path {0}".format(file))
-
-        localPath = match.group(1)
-        sdkFound = False
-
-        # Files are generally grouped by SDK in the file list,
-        # caching the last SDK used can save a search through the loop.
-        if lastSDKName:
-             if localPath.startswith(sdks[lastSDKName].path):
-                sdk.fileList.append(file)
-                sdkFound = True
-                continue
-
-        for sdkName, sdk in sdks.iteritems():
-            if localPath.startswith(sdk.path):
-                sdk.fileList.append(file)
-                sdkFound = True
-                lastSDKName = sdkName
-                break
-
-        # Some files are loose and not trackable within the current system. For now we're going to ignore them,
-        # and they will need to be included in the package manually.
-        for ignore in fileIgnorelist:
-            if localPath == ignore:
-                sdkFound = True
-                break
-
-        if not sdkFound:
-            BuildThirdPartyUtils.printError("File {0} is not associated with any known SDKs".format(
-                    file,
-                    sdkFileListFile))
-
-
-def checkForSDKStagingErrors(bucket, baseBucketPath, sdkPath, sdkPlatform, filesetHash):
-    tmpDirPath = SDKPackager.getTempDir(sdkPath, sdkPlatform)
-
-    filelistFileName = SDKPackager.getFilelistFileName(sdkPlatform)
-    filelistLocalPath = os.path.join(tmpDirPath, filelistFileName)
-    filelistStagingPath = ThirdPartySDKAWS.getS3StagingPath(baseBucketPath, sdkPath) + filelistFileName
-
-    if not os.path.exists(tmpDirPath):
-        os.makedirs(tmpDirPath)
-    bucket.download_file(filelistStagingPath, filelistLocalPath)
-
-    filelistData = open(filelistLocalPath, 'r')
-    filelistJsonData = json.load(filelistData)
-    filelistData.close()
-
-    assert(filelistData != ""), "Failed to load from " + filelistLocalPath
-
-    filelistChecksum = filelistJsonData["filelist"]["checksum"]
-
-    if filelistChecksum != filesetHash.hexdigest():
-        # If the checksums don't match, then the SDK has been modified without changing the version. This is an error.
-        return True
-    return False
-
-
-def checkForExistingSDK(ignoreExisting,
-                        bucket,
-                        baseBucketPath,
-                        sdkName,
-                        versionFolder,
-                        sdkPath,
-                        sdkPlatform,
-                        filesetHash):
-    returnCode = 0
-    statusMessage = None
-    # Check for existing manifest
-    manifestExists, filelistExists = ThirdPartySDKAWS.getSDKStagingStatus(bucket,
-                                                                          baseBucketPath,
-                                                                          sdkPath,
-                                                                          sdkPlatform)
-    if ignoreExisting:
-        return statusMessage, returnCode
-
-    # If the manifest or filelist is missing, but one is available, then the SDK likely failed to upload.
-    # In this case, just continue on and generate the SDK package.
-    if manifestExists and filelistExists:
-        # If the manifest and filelist both exist, this SDK.version.package has been uploaded already.
-        stagingError = checkForSDKStagingErrors(bucket,
-                                                baseBucketPath,
-                                                sdkPath,
-                                                sdkPlatform,
-                                                filesetHash)
-
-        if stagingError:
-            statusMessage = "ERROR: The file list manifests do not match for SDK {0}, Version {1}, Platform {2}".format(sdkName,
-                                                                                                                 versionFolder,
-                                                                                                                 sdkPlatform)
-            returnCode = 1
-        else:
-            statusMessage = "\tEverything is up to date for SDK {0}, Version {1}, Platform {2}".format(sdkName,
-                                                                                                     versionFolder,
-                                                                                                     sdkPlatform)
-    if statusMessage:
-        print statusMessage
-    return statusMessage, returnCode
-
-
-def getListFromJsonFile(jsonFile, root):
-    if not jsonFile:
-        return []
-    if not os.path.isfile(jsonFile):
-        print "{} is not a valid file, please check the filename specified.".format(jsonFile)
-        exit(1)
-    with open(jsonFile, 'r') as source:
-        source_json = json.load(source)
-        try:
-            sdks_list = source_json[root]
-            return sdks_list
-        except KeyError:
-            print "Unknown json root {}, please check the json root specified.".format(root)
-            exit(1)
-
-
-############################
-
-
-def main():
-    print "Building third party packages"
-    args = BuildThirdPartyArgs.createArgs()
-
-    print "Parsing file lists"
-    ignoreExistingSDKList = getListFromJsonFile(args.ignoreExistingList, args.sdkPlatform)
-    sdkBlacklist = getListFromJsonFile(args.sdkBlacklist, "Blacklist")
-    sdks = getSDKsToPathsDict(args.thirdPartyVersions)
-    populateSDKFilePaths(sdks, args.sdkFilelist)
-
-    cloudfrontDist = LyCloudfrontOps.getCloudfrontDistribution(args.cloudfrontDomain, args.awsProfile)
-    bucket = LyCloudfrontOps.getBucket(cloudfrontDist, args.awsProfile)
-    baseBucketPath = LyCloudfrontOps.buildBucketPath(urlparse.urljoin(args.cloudfrontDomain, args.stagingFolderPath), cloudfrontDist)
-    baseCloudfrontUrl = args.cloudfrontDomain # we assume that the domain ends in a trailing '/'
-    if args.stagingFolderPath:
-        baseCloudfrontUrl += args.stagingFolderPath # we assume that the folder path ends in a trailing '/'
-
-    returnCode = 0
-    sdksGenerated = []
-
-    sdkCount = len(sdks)
-    currentSDK = -1
-    for sdkName, sdk in sdks.iteritems():
-        currentSDK += 1
-        print "{0}/{1} - Processing {2}".format(currentSDK, sdkCount, sdkName)
-
-        # Don't process SDKs in the blacklist.
-        if sdkName in sdkBlacklist:
-            print "\tSkipping blacklist SDK {0}".format(sdkName)
-            continue
-        # Hash all files for the SDK
-        filesetHash = LyChecksum.generateFilesetChecksum(sdk.fileList)
-
-        if args.internalPackage:
-            ignoreExisting = True
-        else:
-            ignoreExisting = sdkName in ignoreExistingSDKList
-
-        statusMessage, sdkReturnCode = checkForExistingSDK(ignoreExisting,
-                                                           bucket,
-                                                           baseBucketPath,
-                                                           sdkName,
-                                                           sdk.versionFolder,
-                                                           sdk.path,
-                                                           args.sdkPlatform,
-                                                           filesetHash)
-        # The final return should be the highest reported return code.
-        returnCode = max(returnCode, sdkReturnCode)
-        if statusMessage:
-            continue
-        manifestPath, filelistPath, zipFiles = SDKPackager.generateSDKPackage(baseCloudfrontUrl,
-                                                                              sdkName,
-                                                                              sdk.versionFolder,
-                                                                              sdk.path,
-                                                                              args.sdkPlatform,
-                                                                              filesetHash,
-                                                                              sdk.fileList,
-                                                                              args.archiveMaxSize)
-        if not args.skipUpload:
-            ThirdPartySDKAWS.uploadSDKToStaging(bucket,
-                                                baseBucketPath,
-                                                sdkName,
-                                                sdk.versionFolder,
-                                                sdk.path,
-                                                args.sdkPlatform,
-                                                manifestPath,
-                                                filelistPath,
-                                                zipFiles)
-        print "{0}/{1} - Completed Processing {2}".format(currentSDK+1, sdkCount, sdkName)
-        sdksGenerated.append(sdkName)
-
-    # If any SDKs were updated, and we were told to make a file to output
-    # the list of updated SDKs to, then make said file and write out the list
-    if len(sdksGenerated) > 0 and args.updatesFile:
-        try:
-            with open(args.updatesFile, 'w') as out:
-                out.writelines('\n'.join(sdksGenerated))
-        except:
-            BuildThirdPartyUtils.printError("Failed to write list of updated SDKs to {0}. Backup zip files will not be created properly for this build.".format(args.updatesFile))
-
-    return returnCode
-
-if __name__ == "__main__":
-    sys.exit(main())

+ 0 - 29
Tools/build/JenkinsScripts/distribution/ThirdParty/BuildThirdPartyUtils.py

@@ -1,29 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-import sys
-
-def printError(message):
-    print(message)
-    sys.exit(1)
-
-
-def reportIterationStatus(index, count, reportFrequency, message):
-    # Reporting on the first, last, and at a frequency that is a good balance of not spamming the logs
-    frequency = count / reportFrequency
-    lastPercent = float(index-1) / float(count)
-    percentComplete = float(index) / float(count)
-    lastReportSlice = int(lastPercent * frequency)
-    thisReportSlice = int(percentComplete * frequency)
-    shouldPrint = lastReportSlice != thisReportSlice or index == 1 or index == count
-    if shouldPrint:
-        print "\t{0}% complete, {1}/{2} {3}".format(int(percentComplete*100), index, count, message)

+ 0 - 207
Tools/build/JenkinsScripts/distribution/ThirdParty/SDKPackager.py

@@ -1,207 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-import zipfile
-import os.path
-import re
-import json
-import sys
-import ThirdPartySDKAWS
-import BuildThirdPartyUtils
-import tempfile
-
-importDir = os.path.dirname(os.path.abspath(__file__))
-sys.path.append(os.path.join(importDir, "..")) #Required for AWS_PyTools
-from AWS_PyTools import LyChecksum
-
-class SDKZipFile(object):
-    def __init__(self):
-        self.file = None
-        self.filePath = None
-        self.contents = []
-        self.compressedSize = 0
-        self.uncompressedSize = 0
-        self.compressedHash = 0
-
-
-def getFilelistVersion():
-    return "1.0.0"
-
-
-def getFilelistFileName(sdkPlatform):
-    return "filelist." + getFilelistVersion() + "." + sdkPlatform + ".json"
-
-
-def getManifestVersion():
-    return "1.0.0"
-
-
-def getManifestFileName(sdkPlatform):
-    return "manifest." + getManifestVersion() + "." + sdkPlatform + ".json"
-
-
-def toArchivePath(filePath):
-    # Archives are generated relative to 3rdParty folder, so strip everything before that in the path.
-    # zipfile is very particular in how paths to files within it are formatted.
-    slashesFixed = filePath.replace('\\', '/')
-    match = re.search("3rdParty/(.*)", slashesFixed)
-    if not match:
-        BuildThirdPartyUtils.printError("Could not find third party folder in file path {0}".format(filePath))
-    archivePath = match.group(1).strip("/").strip("\n")
-    return archivePath
-
-
-def prepFilesystemForFile(filePath):
-    directory = os.path.dirname(filePath)
-    if not os.path.exists(directory):
-        os.makedirs(directory)
-    if os.path.isfile(filePath):
-        os.remove(filePath)
-
-def createJSONString(dataToFormat):
-    return json.dumps(dataToFormat, sort_keys=True, indent=4, separators=(',', ': '))
-
-
-def generateSDKPackage(baseCloudfrontUrl,
-                       sdkName,
-                       sdkVersion,
-                       sdkPath,
-                       sdkPlatform,
-                       filesetHash,
-                       filePaths,
-                       archiveMaxSize):
-    zipFiles = zipPackage(sdkName, sdkVersion, sdkPath, sdkPlatform, filePaths, archiveMaxSize)
-    filelistPath = buildFilelistJSON(sdkPath, sdkPlatform, filesetHash, filePaths)
-    manifestPath = buildManifestJSON(baseCloudfrontUrl,
-                                     sdkName,
-                                     sdkPath,
-                                     sdkPlatform,
-                                     zipFiles,
-                                     filelistPath)
-    return manifestPath, filelistPath, zipFiles
-
-
-def getTempDir(sdkPath, sdkPlatform):
-    tempDir = os.path.join(tempfile.tempdir, "LY", "3rdPartySDKs", sdkPath, sdkPlatform)
-    return os.path.expandvars(tempDir)
-
-
-def zipPackage(sdkName, sdkVersion, sdkPath, sdkPlatform, filePaths, archiveMaxSize):
-    try:
-        import zlib
-        compression = zipfile.ZIP_DEFLATED
-    except:
-        compression = zipfile.ZIP_STORED
-
-    tempDir = getTempDir(sdkPath, sdkPlatform)
-    zipFiles = []
-    currentZipFile = None
-
-    fileIndex = 0
-    fileCount = len(filePaths)
-
-    for filePath in filePaths:
-
-        filePath = filePath.strip("\n")
-        archivePath = toArchivePath(filePath)
-        if currentZipFile is None or currentZipFile.file is None or currentZipFile.compressedSize > archiveMaxSize:
-            if currentZipFile and currentZipFile.file:
-                currentZipFile.file.close()
-            currentZipFile = SDKZipFile()
-            zipFiles.append(currentZipFile)
-            currentZipFile.filePath = os.path.join(tempDir, sdkName + "." + str(sdkPlatform) + "." + str(len(zipFiles)) + ".zip")
-            prepFilesystemForFile(currentZipFile.filePath)
-            currentZipFile.file = zipfile.ZipFile(currentZipFile.filePath, mode='w')
-        currentZipFile.file.write(filePath, compress_type=compression, arcname=archivePath)
-        fileInfo = currentZipFile.file.getinfo(archivePath)
-        currentZipFile.compressedSize += fileInfo.compress_size
-        currentZipFile.uncompressedSize += fileInfo.file_size
-        currentZipFile.contents.append(filePath)
-
-        fileIndex += 1
-        BuildThirdPartyUtils.reportIterationStatus(fileIndex, fileCount, 25, "files zipped")
-
-    if currentZipFile and currentZipFile.file:
-        currentZipFile.file.close()
-
-    for zipFile in zipFiles:
-        zipFile.compressedHash = LyChecksum.getChecksumForSingleFile(zipFile.filePath)
-
-    return zipFiles
-
-
-def buildFilelistJSON(sdkPath, sdkPlatform, filesetHash, filePaths):
-    jsonFormatFiles = []
-    for filePath in filePaths:
-        scrubbedFile = toArchivePath(filePath)
-        jsonFormatFiles.append(scrubbedFile)
-
-    filelistInfo = {
-        "filelist": {
-            "filelistVersion": getFilelistVersion(),
-            "checksum": filesetHash.hexdigest(),
-            "files": jsonFormatFiles,
-        }
-    }
-    filelistJSON = createJSONString(filelistInfo)
-    filelistName = getFilelistFileName(sdkPlatform)
-    filelistPath = getTempDir(sdkPath, sdkPlatform)
-    filelistFullPath = os.path.join(filelistPath, filelistName)
-
-    prepFilesystemForFile(filelistFullPath)
-
-    outputFile = open(filelistFullPath, 'w')
-    outputFile.write(filelistJSON)
-    outputFile.close()
-    return filelistFullPath
-
-
-def buildManifestJSON(baseCloudfrontUrl, sdkName, sdkPath, sdkPlatform, zipFiles, filelistPath):
-    uncompressedSize = 0
-    packageArchives = []
-    for zipFile in zipFiles:
-        uncompressedSize += zipFile.uncompressedSize
-        archiveUrl = ThirdPartySDKAWS.getProductionUrl(baseCloudfrontUrl, sdkPath, zipFile.filePath)
-        packageArchive = {
-            "archiveUrl": archiveUrl,
-            "archiveSize": str(zipFile.compressedSize),
-            "archiveChecksum": zipFile.compressedHash.hexdigest()
-        }
-        packageArchives.append(packageArchive)
-
-    filelistChecksum = LyChecksum.getChecksumForSingleFile(filelistPath)
-
-    filelistUrl = ThirdPartySDKAWS.getProductionUrl(baseCloudfrontUrl, sdkPath, filelistPath)
-    packageInfo = {
-        "package":
-        {
-            "manifestVersion": getManifestVersion(),
-            "identifier": sdkName,
-            "platform": sdkPlatform,
-            "uncompressedSize": str(uncompressedSize),
-            "filelistUrl": filelistUrl,
-            "filelistChecksum": filelistChecksum.hexdigest(),
-            "archives": packageArchives
-
-        }
-    }
-    manifestJSON = createJSONString(packageInfo)
-    manifestName = getManifestFileName(sdkPlatform)
-    manifestPath = getTempDir(sdkPath, sdkPlatform)
-    manifestFullPath = os.path.join(manifestPath, manifestName)
-
-    prepFilesystemForFile(manifestFullPath)
-
-    outputFile = open(manifestFullPath, 'w')
-    outputFile.write(manifestJSON)
-    outputFile.close()
-    return manifestFullPath

+ 0 - 76
Tools/build/JenkinsScripts/distribution/ThirdParty/ThirdPartySDKAWS.py

@@ -1,76 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-import os.path
-import sys
-import urlparse
-import SDKPackager
-import BuildThirdPartyUtils
-
-importDir = os.path.dirname(os.path.abspath(__file__))
-sys.path.append(os.path.join(importDir, "..")) #Required for AWS_PyTools
-from AWS_PyTools import LyCloudfrontOps
-
-def getS3StagingPath(stagingFolderPath, sdkPath):
-    # The staging path is used for the build machines to upload new builds of 3rd party packages.
-    # The Setup Assistant supports a global override so Lumberyard team members can pull from the staging location.
-    return urlparse.urljoin(stagingFolderPath, sdkPath.replace(' ', '_')) + '/'
-
-def getProductionUrl(cloudfrontUrl, sdkPath, filePath):
-    # The production path is the final path customers will download the SDK from.
-    # These links get baked into the manifest, which the Setup Assistant executable uses to acquire these SDKs.
-    # TODO : Generate an actual cloudfront production link.
-    #return cloudfrontUrl + getS3StagingPath(stagingFolderPath, sdkPath.replace(' ', '_')) + os.path.basename(filePath)
-    # we assume that the cloudfront url and the result of getS3stagingPath have a trailing '/'
-    return getS3StagingPath(cloudfrontUrl, sdkPath) + os.path.basename(filePath)
-
-def getSDKStagingStatus(bucket, baseBucketPath, sdkPath, sdkPlatform):
-
-    pathToSDK = getS3StagingPath(baseBucketPath, sdkPath)
-    pathToFilelist = pathToSDK + SDKPackager.getFilelistFileName(sdkPlatform)
-    pathToManifest = pathToSDK + SDKPackager.getManifestFileName(sdkPlatform)
-    manifestExists = False
-    filelistExists = False
-
-    objs = list(bucket.objects.filter(Prefix=pathToManifest))
-    if len(objs) > 0 and objs[0].key == pathToManifest:
-        manifestExists = True
-    else:
-        manifestExists = False
-
-    objs = list(bucket.objects.filter(Prefix=pathToFilelist))
-    if len(objs) > 0 and objs[0].key == pathToFilelist:
-        filelistExists = True
-    else:
-        filelistExists = False
-
-    return manifestExists, filelistExists
-
-def uploadSDKToStaging(bucket, baseBucketPath, sdkName, sdkVersion, sdkPath, sdkPlatform, manifestPath, filelistPath, zipFiles):
-    print "\tUploading SDK: {0} ({1},{2})".format(sdkName, sdkVersion, sdkPlatform)
-
-    manifestStagingPath = getS3StagingPath(baseBucketPath, sdkPath) + os.path.basename(manifestPath)
-    bucket.upload_file(manifestPath, manifestStagingPath)
-    print "\tUploaded manifest"
-
-    filelistStagingPath = getS3StagingPath(baseBucketPath, sdkPath) + os.path.basename(filelistPath)
-    bucket.upload_file(filelistPath, filelistStagingPath)
-    print "\tUploaded filelist"
-
-    filesUploaded = 0
-    totalFilesCount = len(zipFiles)
-    for zipFile in zipFiles:
-        fileLocalPath = zipFile.filePath
-        fileStagingPath = getS3StagingPath(baseBucketPath, sdkPath) + os.path.basename(zipFile.filePath)
-        bucket.upload_file(fileLocalPath, fileStagingPath)
-        filesUploaded += 1
-        BuildThirdPartyUtils.reportIterationStatus(filesUploaded, totalFilesCount, 5, "files uploaded")

+ 0 - 12
Tools/build/JenkinsScripts/distribution/__init__.py

@@ -1,12 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-

+ 0 - 12
Tools/build/JenkinsScripts/distribution/copyright.txt

@@ -1,12 +0,0 @@
-/*
-* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates, or 
-* a third party where indicated.
-*
-* For complete copyright and license terms please see the LICENSE at the root of this
-* distribution (the "License"). All use of this software is governed by the License,  
-* or, if provided, by the license below or the license accompanying this file. Do not
-* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-*
-*/
-

+ 0 - 83
Tools/build/JenkinsScripts/distribution/copyright_prepender.py

@@ -1,83 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-import os
-import stat
-import shutil
-import argparse
-
-flagged_extensions = ['.c', '.cpp', '.h', '.hpp', '.inl']
-skippable_extensions = ['.log', '.p4ignore', '.obj', '.dll', '.png', '.pdb', '.dylib', '.lib',\
-                        '.exe', '.flt', '.asi', '.exp', '.ilk', '.pch', '.res', '.bmp', '.cur',\
-                        '.ico', '.resx', '.jpg', '.psd', '.gif', '.a', '.fxcb', '.icns', '.cab',\
-                        '.chm', '.hxc', '.xsd', '.tif']
-
-copyright_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'copyright.txt')
-
-prepend_yes_log = open('prepend_yes.log', 'w')
-prepend_no_log = open('prepend_no.log', 'w')
-prepend_skip_log = open('prepend_skip.log', 'w')
-
-gQuietMode = False
-
-def prepend_copyrights():
-    for dirname, dirnames, filenames in os.walk('.'):
-        for filename in filenames:
-            full_filename = os.path.join(dirname, filename)
-            if copyright_required(filename):
-                apply_copyright(full_filename)
-            elif skippable(filename):
-                if not gQuietMode:
-                    print 'Skipping ' + full_filename
-                prepend_skip_log.write('Skipping ' + full_filename + '\n')
-            else:
-                if not gQuietMode:
-                    print 'Not prepending to ' + full_filename 
-                prepend_no_log.write('Not prepending to ' + full_filename + '\n')
-
-def apply_copyright(full_filename):
-    if not gQuietMode:
-        print 'Prepending copyright to ' +  full_filename 
-    prepend_yes_log.write('Prepending copyright to ' +  full_filename + '\n')
-    temp_file = full_filename + '_temp'
-    os.rename(full_filename, temp_file)
-    shutil.copyfile(copyright_path, full_filename)
-    with open(full_filename, 'a') as f:
-        with open(temp_file) as t:
-            for line in t:
-                f.write(line)
-    os.chmod(temp_file, stat.S_IWRITE)
-    os.remove(temp_file)
-
-def copyright_required(filename):
-    return os.path.splitext(filename)[1] in flagged_extensions
-
-def skippable(filename):
-    return os.path.splitext(filename)[1] in skippable_extensions
-
-
-def main():
-    prepend_copyrights()
-    prepend_yes_log.close()
-    prepend_no_log.close()
-    prepend_skip_log.close()
-
-
-if __name__ =="__main__":
-    parser = argparse.ArgumentParser()
-    parser.add_argument( '-q', '--quiet', dest='quiet', help='quiet mode - only print output to logs', action='store_true')
-
-    args = parser.parse_args()
-    gQuietMode = args.quiet
-
-    main()
-

+ 0 - 16
Tools/build/JenkinsScripts/distribution/copyright_removal/Categorizer.py

@@ -1,16 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-class Categorizer():
-    def __init__():
-        self.StarComment = StarComment()
-        self.SlashComment = SlashComment()

+ 0 - 26
Tools/build/JenkinsScripts/distribution/copyright_removal/CommentCategory.py

@@ -1,26 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-class CommentCategory:
-    def __init__():
-        self.start = -1
-        self.end = -1
-        self.type = None
-
-    def find_start():
-        pass
-
-    def find_end():
-        pass
-
-    def find_type():
-        pass

+ 0 - 20
Tools/build/JenkinsScripts/distribution/copyright_removal/SlashComment.py

@@ -1,20 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-class SlashComment(CommentCategory):
-    def find_start():
-        pass
-    def find_end():
-        pass
-
-    def find_type():
-        pass

+ 0 - 20
Tools/build/JenkinsScripts/distribution/copyright_removal/StarComment.py

@@ -1,20 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-class StarComment(CommentCategory):
-    def find_start():
-        pass
-    def find_end():
-        pass
-
-    def find_type():
-        pass

+ 0 - 580
Tools/build/JenkinsScripts/distribution/copyright_removal/copyright_header_manual_tool.py

@@ -1,580 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-# One-time script to inspect pre-copyright script headers against the a particular branch@latest and optionally make
-# corrections
-
-
-import re
-import os
-import codecs
-import stat
-import subprocess
-import argparse
-import tempfile
-import hashlib
-
-# The path to beyond compare local to the machine
-BEYOND_COMPARE_PATH="\"C:\\Program Files (x86)\\Beyond Compare 3\\BCompare.exe\""
-
-# The P4 Path to mainline to get the previous revision
-P4_MAINLINE = '//lyengine/dev'
-
-# The revision number in mainline just before the original copyright header script was applied
-PRECOPYRIGHT_REV = 130741
-
-# The name of the file to keep track of files that were intentionally skipped
-MANUAL_SKIP_FILE = 'manual_skipped.txt'
-
-# The extension of files to analyze
-DEFAULT_FILTERED_EXTENSIONS = ['CS','H','HPP','HXX','INL','C','CPP', 'EXT','PY', 'LUA', 'BAT', 'CFX', 'CFI']
-
-# Words that we will ignore when analyzing the original copyright header for anything meaningful
-TALLY_IGNORE_WORDS = ['COMPILERS',
-                      'VISUAL',
-                      'STUDIO',
-                      'VERSION',
-                      'CREATED',
-                      'STUDIOS',
-                      'FILE',
-                      'SOURCE',
-                      'COPYRIGHT',
-                      'CRYTEK',
-                      'C',
-                      'CREATED',
-                      'HISTORY']
-# The word threshold to use to flag previous revisions for the number of non-ignore words that were detected.
-WORD_THRESHOLD = 10
-
-# Folders to skip during the process
-SKIP_FOLDERS = ['/Code/SDKs',
-                '/Code/Sandbox/SDKs',
-                '/Code/Tools/SDKs',
-                '/Code/Tools/waf-1.7.13',
-                '/Code/Tools/MaxCryExport/Skin/12',
-                '/Code/Tools/MaxCryExport/Skin/13',
-                '/Code/Tools/MaxCryExport/Skin/14',
-                '/Code/Tools/MaxCryExport/Skin/15',
-                '/Code/Tools/MaxCryExport/Skin/16',
-                '/Code/Tools/MaxCryExport/Skin/17',
-                '/Code/Tools/MaxCryExport/Skin/18',
-                '/Code/Tools/HLSLCrossCompiler',
-                '/Code/Tools/HLSLCrossCompilerMETAL',
-                '/BinTemp']
-
-# Particular files to skip
-SKIP_FILES = ['resource.h',
-              '__init__.py']
-
-
-def calculate_hash(file_contents):
-    m = hashlib.md5()
-    for line in file_contents:
-        m.update(line)
-    return m.hexdigest()
-
-
-def calculate_file_hash(file_path):
-    with open(file_path,'r') as f:
-        file_content = f.readlines()
-    return calculate_hash(file_content)
-
-
-# Compare 2 files with beyond compare
-def compare_files(left,right):
-    subprocess.call('{} \"{}\" \"{}\"'.format(BEYOND_COMPARE_PATH,left,right))
-
-
-# Check the read-only flag of a file (assume it represents if a file is checked out or not)
-def check_file_status(filepath):
-    st = os.stat(filepath)
-    return bool(st.st_mode & (stat.S_IWGRP|stat.S_IWUSR|stat.S_IWOTH))
-
-
-# Checkout a file into a ChangeList
-def checkout_file(root_code_path,filepath, p4_path_root, cl_number):
-    p4_file_path = filepath.replace(root_path,'')
-    p4_path = p4_path_root + p4_file_path
-    p4_path = p4_path.replace('\\','/')
-
-    try:
-        result = subprocess.call('p4 edit -c {} \"{}\"'.format(cl_number,p4_path))
-        if result < 0:
-            print('[ABORT] Process terminated by signal')
-            return False
-    except OSError as e:
-        print('[ERROR] p4 call error:{}'.format(e))
-        return False
-
-    return check_file_status(filepath)
-
-
-def replace_source_with_update_temp(source_original_file,source_temp_file):
-    with open(source_temp_file,'r') as rf:
-        file_content = rf.readlines()
-    with open(source_original_file,'w') as wf:
-        skipped_first = False
-        for line in file_content:
-            if not skipped_first:
-                skipped_first = True
-            else:
-                wf.write(line)
-
-
-def pull_source_revision(root_code_path, file_path, p4_path_root, rev_number):
-
-    # Calculate the p4 root pathg
-    p4_path = p4_path_root + '/' + file_path.replace(root_code_path, '')
-    p4_path = p4_path.replace('\\','/')
-    p4_path = '//' + p4_path[2:].replace('//','/')
-
-    # If the rev number is greater than zero, then this is a specific version
-    if rev_number>0:
-        filename_only = os.path.split(file_path)[1] + '#{}'.format(rev_number)
-        temp_file_path = os.path.join(tempfile.gettempdir(), filename_only)
-        p4_command = ['p4','print','{}@{}'.format(p4_path, rev_number)]
-    else:
-        filename_only = os.path.split(file_path)[1]
-        temp_file_path = os.path.join(tempfile.gettempdir(), filename_only)
-        p4_command = ['p4','print','{}'.format(p4_path)]
-    try:
-        with open(temp_file_path,'w') as f:
-            result = subprocess.call(p4_command, stdout=f)
-        if result < 0:
-            print('[ABORT] Process terminated by signal')
-            return False
-    except OSError as e:
-        print('[ERROR] p4 call error:{}'.format(e))
-        return False
-
-    return temp_file_path
-
-
-# Auto insert descriptions into a file if the file doesnt already have a description section
-def auto_insert_description(file_to_modify,description_lines):
-    file_contents = []
-    with open(file_to_modify,'r') as r:
-        file_contents = r.readlines()
-
-    line_count = len(file_contents)
-    line_index = 0
-    insert_index = -1
-    comment_block_end_index = -1
-    has_description = False
-    while line_index<line_count:
-        check_line = file_contents[line_index].strip()
-        if check_line=='*/' and comment_block_end_index<0:
-            comment_block_end_index = line_index
-        if check_line=='// Original file Copyright Crytek GMBH or its affiliates, used under license.':
-            insert_index=line_index+1
-            break
-        if 'Description :' in check_line:
-            has_description = True
-            break
-        line_index += 1
-
-    # Already has a description, do not insert
-    if has_description:
-        return file_to_modify
-
-    # No appropriate place to insert the description, abort
-    if comment_block_end_index<0 and insert_index<0:
-        return file_to_modify
-
-    # Cannot locate the 'Original file Copyright..' line.  Add it automatically and set the insert index to right after
-    if insert_index < 0:
-        insert_index = comment_block_end_index+1
-        file_contents.insert(insert_index,'\n')
-        insert_index += 1
-        file_contents.insert(insert_index,'// Original file Copyright Crytek GMBH or its affiliates, used under license.\n')
-        insert_index += 1
-
-    # Insert the description line(s) if we have a place to insert it
-    if insert_index>0:
-        file_contents.insert(insert_index,'\n')
-        insert_index += 1
-        for insert_description_line in description_lines:
-            file_contents.insert(insert_index,insert_description_line+'\n')
-            insert_index += 1
-        file_contents.insert(insert_index,'\n')
-
-        # update the working file
-        with open(file_to_modify,'w') as w:
-            w.writelines(file_contents)
-
-    return file_to_modify
-
-
-def _is_skip_file(filepath):
-    if filepath.endswith('.Designer.cs'):
-        return True
-    normalized = os.path.dirname(filepath).replace('\\','/').upper()
-    for skip_file in SKIP_FILES:
-        if normalized.endswith('/'+skip_file.upper()):
-            return True
-    return False
-
-
-def _is_skip_folder(root_code_path,dirname):
-
-    normalized = '/'+dirname.replace(root_code_path,'').replace('\\','/').upper()
-    for skip_path in SKIP_FOLDERS:
-        if normalized.startswith(skip_path.upper()):
-            return True
-    return False
-
-
-def read_input_files_file(path,root_path):
-    files_to_process = set()
-    if not os.path.exists(path):
-        print('Invalid input file:{}'.format(path))
-    else:
-        with open(path,'r') as f:
-            file_content = f.readlines()
-            for filename in file_content:
-                if filename.startswith('#'):
-                    continue
-                base_name = filename.replace(root_path,'').strip()
-                base_name = os.path.realpath(root_path + '/' + base_name)
-                files_to_process.add(base_name.upper())
-    return files_to_process
-
-
-BOM_ENCODINGS = [ (codecs.BOM_UTF32),
-                  (codecs.BOM_UTF16),
-                  (codecs.BOM_UTF8)]
-
-
-def extract_header(original_file_content):
-    def _clean_bom(line):
-        for bom in BOM_ENCODINGS:
-            if bom in line:
-                return re.sub('[^\040-\176]','',line)
-        return line
-
-    header_content = []
-    for original_line in original_file_content:
-        bom_cleaned_line = _clean_bom(original_line).strip()
-        if bom_cleaned_line.startswith('#'):
-            break
-        if bom_cleaned_line.startswith('using'):
-            break
-        if bom_cleaned_line.startswith('import'):
-            break
-        if bom_cleaned_line.startswith('namespace'):
-            break
-        header_content.append(bom_cleaned_line)
-
-    return  header_content
-
-
-def analyze_header_content(source_file, show_tally, orig_file_path,skip_log, original_description_lines):
-
-    # Read the contents of the file and extract the header
-    with open(source_file,'r') as r:
-        original_file_content = r.readlines()
-    header_content = extract_header(original_file_content)
-
-    if '#' in source_file:
-        filename_and_ext_only = os.path.splitext(os.path.split(source_file)[1].split('#')[0])
-    else:
-        filename_and_ext_only = os.path.splitext(os.path.split(source_file)[1])[0]
-    filename_only = filename_and_ext_only[0]
-    ext_only = filename_and_ext_only[1]
-    filename_only_upper = filename_only.upper()
-
-    # Clean the header of comment tokens and non work tokens
-    clean_header_content = []
-    has_description = False
-    skipped_first = False
-    description_label_added = False
-    found_first_complete_description = False
-    for line in header_content:
-        if not skipped_first:
-            skipped_first = True
-        else:
-            # see if we can pull original description line
-            if not found_first_complete_description:
-                m_original_desc_one_line = re.match('(//|\\*)?(\\s*)(Description|Desc|'+filename_only+ext_only+')\\s*\\:\\s*(.*)',line)
-                if m_original_desc_one_line is not None:
-                    desc_label_key = m_original_desc_one_line.group(3)
-                    if m_original_desc_one_line.group(4) is not None:
-                        has_description_header = True
-                        extracted_description = m_original_desc_one_line.group(4)
-                        if extracted_description.strip().__len__()>0:
-                            original_description_lines.append('// Description : {}'.format(m_original_desc_one_line.group(4)))
-                            description_label_added = True
-                            has_description = True
-                        # If the description label key is the filename, then this is one line only
-                        if desc_label_key=='Description':
-                            found_first_complete_description = True
-            else:
-                multiline_desc_check = re.sub(r'[\W]',' ',line).split(' ')
-                multiline_desc_check_words = [w.upper() for w in multiline_desc_check if w!='']
-                if len(multiline_desc_check_words)>0:
-                    if not description_label_added:
-                        original_description_lines.append('// Description : {}'.format(re.sub(r'[\s/\\]',' ',line).strip()))
-                        description_label_added = True
-                        has_description = True
-                    else:
-                        original_description_lines.append('//               {}'.format(re.sub(r'[\s/\\]',' ',line).strip()))
-                        has_description = True
-                else:
-                    found_first_complete_description = False
-
-            updated_line = re.sub(r'[\W]',' ',line)
-            if show_tally:
-                print(updated_line)
-
-            clean_header_content.append(updated_line)
-
-    # Count the words we care about
-    word_count = 0
-
-    # Analyze each line
-    for line in clean_header_content:
-        # Split each line into capitalized words
-        words = [w.upper() for w in line.split(' ') if w != '']
-        for word in words:
-
-            # Skip the filename itself
-            if word==filename_only_upper:
-                continue
-
-            # Skip any ignore words
-            if word in TALLY_IGNORE_WORDS:
-                continue
-
-            # Skip any numbers
-            if re.match('\d{1,4}',word):
-                continue
-
-            word_count+=1
-
-    if show_tally:
-        print('\n\nTotal Words:{}'.format(word_count))
-
-    # Keep track of files that were skipped but had at least 4 unmatched words for futher analysis
-    if word_count > 4 and not has_description:
-        skip_log.write('***********************************************************************\n')
-        skip_log.write('** Skipping file {}:\n'.format(orig_file_path))
-        skip_log.write('***********************************************************************\n')
-        for skip_line in header_content:
-            skip_log.write(skip_line+'\n')
-        skip_log.write('\n\n\n\n\n\n')
-
-    return has_description or word_count > WORD_THRESHOLD
-
-
-def process_file(cl_number,p4_path_root,root_code_path,orig_filepath, original_file_content, show_details, skip_log, manual_skip_files, index):
-
-    # Pull the previous revision from mainline into a temp file
-    prev_file_content_file = pull_source_revision(root_code_path,orig_filepath,P4_MAINLINE,PRECOPYRIGHT_REV)
-
-    original_description_lines = []
-    if not analyze_header_content(prev_file_content_file, show_details, orig_filepath, skip_log, original_description_lines):
-        return
-
-    # Pull the latest revision
-    source_revision_file = pull_source_revision(root_code_path,orig_filepath,p4_path_root,-1)
-    if not os.path.exists(source_revision_file):
-        print('...File not in perforce {}:{}',p4_path_root,orig_filepath)
-        return
-
-    # Keep track of the hash of the original file
-    original_file_hash = calculate_file_hash(source_revision_file)
-
-    # If we detected any description lines in the original, attempt to insert it into the temp copy
-    if len(original_description_lines)>0:
-        source_revision_file = auto_insert_description(source_revision_file,original_description_lines)
-
-    # Bring up the diff viewer
-    compare_files(prev_file_content_file, source_revision_file)
-
-    # If a changelist is supplied, checkout and update the source file into the CL
-    if cl_number > 0:
-        # Calculate the hash of the sourece file again to detect any changes
-        check_file_hash = calculate_file_hash(source_revision_file)
-
-        # If the hashes differ, then we need to check out the original file, update it with the changes
-        if check_file_hash!=original_file_hash:
-
-            # Checkout the file if necessary
-            if not check_file_status(orig_filepath):
-                if not checkout_file(root_code_path,orig_filepath, p4_path_root,cl_number):
-                    print('...Unable to check out file {}'.format(orig_filepath))
-                    os.remove(source_revision_file)
-                    os.remove(prev_file_content_file)
-                    return
-            # Update the original with the temp
-            replace_source_with_update_temp(orig_filepath,source_revision_file)
-
-            print('Updated {}'.format(orig_filepath))
-        else:
-            with open(MANUAL_SKIP_FILE,'a') as update_file:
-                update_file.write(orig_filepath.lower()+'\n')
-            manual_skip_files.add(orig_filepath.lower())
-            print('Manually Skipped {}'.format(orig_filepath))
-
-    # Check out the file if needed
-    os.remove(source_revision_file)
-    os.remove(prev_file_content_file)
-
-
-def process(cl_number, p4_path, root_path, code_path, filtered_extensions, is_code_path_input_file,show_details,skip_log, manual_skip_files, skip_to):
-
-    # Collect the files to process
-    files_to_process = set()
-    if is_code_path_input_file:
-        files_to_process = read_input_files_file(code_path,root_path)
-    else:
-        if os.path.isdir(code_path):
-            for (dirpath, dirnames, filenames) in os.walk(code_path):
-                if _is_skip_folder(root_path, dirpath):
-                    continue
-                for file in filenames:
-                    file_name,file_ext = os.path.splitext(file)
-                    file_ext = file_ext.upper()[1:]
-                    if file_ext in filtered_extensions and not _is_skip_file(file):
-                        files_to_process.add(dirpath + "/" + file)
-        else:
-            files_to_process.add(code_path)
-
-    file_count = len(files_to_process)
-    file_progress = 0
-
-    for file_to_process in files_to_process:
-
-        # Open each file and handle in memory
-        with open(file_to_process,'r') as f:
-            file_content = f.readlines()
-
-        file_progress += 1
-
-        # Process each file
-        if file_progress<skip_to and skip_to>0:
-            print('({}/{}) Skipping {}.  (Forced)'.format(file_progress,file_count,file_to_process))
-            continue
-
-        # Skip files that were already skipped previously
-        if file_to_process.lower() in manual_skip_files:
-            print('({}/{}) Skipping {}.  Already Manually Skipped'.format(file_progress,file_count,file_to_process))
-            continue
-        # Skip files that are already checked out
-        if check_file_status(file_to_process):
-            print('({}/{}) Skipping {}.  Already checked out'.format(file_progress,file_count,file_to_process))
-            continue
-        if process_file(cl_number, p4_path,root_path,file_to_process, file_content, show_details, skip_log, manual_skip_files, file_progress):
-            print('({}/{}) Processed {}'.format(file_progress,file_count,file_to_process))
-        else:
-            print('({}/{}) Skipped {}'.format(file_progress,file_count,file_to_process))
-
-
-if __name__ == "__main__":
-
-    parser = argparse.ArgumentParser()
-
-    parser.add_argument('-i','--input_files', action='store_true', default=False, help='Option to use code_path as an input file of files to process')
-
-    parser.add_argument('-p','--py', action='store_true', default=False, help='Python files only (.py)')
-    parser.add_argument('-c','--cpp', action='store_true', default=False, help='C/C++ files only')
-    parser.add_argument('--cs', action='store_true', default=False, help='C# files only')
-    parser.add_argument('--ext', action='store_true', default=False, help='.EXT files only')
-    parser.add_argument('--lua', action='store_true', default=False, help='.LUA files only')
-    parser.add_argument('--bat', action='store_true', default=False, help='.BAT files only')
-    parser.add_argument('--cf', action='store_true', default=False, help='CFI/CFX files only')
-    parser.add_argument('--details', action='store_true', default=False, help='Show details of the header analysis')
-
-    parser.add_argument('-s','--skip',nargs='?',type=int,default=-1,help='The entry number to skip forward to')
-    parser.add_argument('--cl',nargs='?',type=int,default=0,help='Change list number to apply updates to.  If not set (or zero), then modified files will not be checked out')
-
-    parser.add_argument('p4_path', type=str, help='The base p4 path')
-    parser.add_argument('root_path', help='The base root path')
-    parser.add_argument('code_path', help='The root code path process on', default='.')
-
-    args = parser.parse_args()
-
-    print('Using Perforce environment:')
-    print(' P4PORT={}'.format(os.environ["P4PORT"]))
-    print(' P4CLIENT={}'.format(os.environ["P4CLIENT"]))
-
-    if not os.path.exists(args.root_path):
-        print('[ERROR]: Root path \'{}\' is invalid'.format(args.root_path))
-        exit()
-    else:
-        input_root_path = args.root_path
-
-    if not os.path.isdir(input_root_path):
-        print('[ERROR]: Root path \'{}\' cannot be a file'.format(args.root_path))
-        exit()
-
-    print('Root path {}'.format(args.root_path))
-    print('p4 path {}'.format(args.p4_path))
-
-    if args.input_files:
-        code_path = args.code_path
-        if not os.path.exists(code_path) or not os.path.isfile(code_path):
-            print('[ERROR]: Code input file \'{}\' is invalid'.format(args.code_path))
-            exit()
-    else:
-        code_path = os.path.normpath(input_root_path+'/'+args.code_path)
-        if not os.path.exists(code_path):
-            print('[ERROR]: Code path \'{}\' is invalid'.format(args.code_path))
-            exit()
-
-    if os.path.isdir(code_path):
-        print('Working on code folder {}'.format(code_path))
-    else:
-        print('Working on a single code file {}'.format(code_path))
-
-    filtered_extensions = []
-    if args.py:
-        print('Filtering on Python files')
-        filtered_extensions.append('PY')
-    if args.cpp:
-        print('Filtering on C/C++ files')
-        filtered_extensions.append('H')
-        filtered_extensions.append('HPP')
-        filtered_extensions.append('HXX')
-        filtered_extensions.append('INL')
-        filtered_extensions.append('C')
-        filtered_extensions.append('CPP')
-    if args.cs:
-        print('Filtering on C# files')
-        filtered_extensions.append('CS')
-    if args.ext:
-        print('Filtering on EXT files')
-        filtered_extensions.append('EXT')
-    if args.lua:
-        print('Filtering on LUA files')
-        filtered_extensions.append('LUA')
-    if args.bat:
-        print('Filtering on BAT files')
-        filtered_extensions.append('BAT')
-    if args.cf:
-        print('Filtering on CFI/CFX files')
-        filtered_extensions.append('CFI')
-        filtered_extensions.append('CFX')
-    if len(filtered_extensions)==0:
-        filtered_extensions = DEFAULT_FILTERED_EXTENSIONS
-
-    manual_skipped_files = set()
-    if os.path.exists(MANUAL_SKIP_FILE):
-        with open('manual_skipped.txt','r') as msf:
-            manual_skipped_files_list = msf.readlines()
-            for manual_skipped_file in manual_skipped_files_list:
-                manual_skipped_files.add(manual_skipped_file.lower().strip())
-
-    with open('skipped_logs','w') as skip_log:
-        process(args.cl, args.p4_path, input_root_path, code_path, filtered_extensions,args.input_files, args.details,
-                skip_log, manual_skipped_files,args.skip)
-

+ 0 - 813
Tools/build/JenkinsScripts/distribution/copyright_removal/copyright_update.py

@@ -1,813 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import sys
-import time
-import re
-import os
-import stat
-import subprocess
-import logging
-import argparse
-import shutil
-
-AMAZON_HEADER='''/*
-* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-* its licensors.
-*
-* For complete copyright and license terms please see the LICENSE at the root of this
-* distribution (the "License"). All use of this software is governed by the License,
-* or, if provided, by the license below or the license accompanying this file. Do not
-* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-*
-*/
-'''
-
-PY_AMAZON_HEADER='''#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-'''
-
-BAT_AMAZON_HEADER='''@echo off
-REM
-REM  All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-REM  its licensors.
-REM
-REM  REM  For complete copyright and license terms please see the LICENSE at the root of this
-REM  distribution (the "License"). All use of this software is governed by the License,
-REM  or, if provided, by the license below or the license accompanying this file. Do not
-REM  remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-REM  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-REM
-'''
-
-
-AMAZON_EXT_HEADER_TOP='''////////////////////////////////////////////////////////////////////////////
-//
-// All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-// its licensors.
-//
-// For complete copyright and license terms please see the LICENSE at the root of this
-// distribution (the "License"). All use of this software is governed by the License,
-// or, if provided, by the license below or the license accompanying this file. Do not
-// remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//
-'''
-
-AMAZON_EXT_HEADER_BOTTOM='''//
-////////////////////////////////////////////////////////////////////////////
-'''
-
-AMAZON_LUA_HEADER_TOP='''----------------------------------------------------------------------------------------------------
---
--- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
--- its licensors.
---
--- For complete copyright and license terms please see the LICENSE at the root of this
--- distribution (the "License"). All use of this software is governed by the License,
--- or, if provided, by the license below or the license accompanying this file. Do not
--- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
---
-'''
-
-AMAZON_LUA_HEADER_BOTTOM='''--
-----------------------------------------------------------------------------------------------------
-'''
-
-
-
-AMAZON_DEPRECATED_HEADER='Copyright 2015 Amazon.com'
-
-FORMER_CRYTEK_HEADER='// Original file Copyright Crytek GMBH or its affiliates, used under license.'
-
-PY_FORMER_CRYTEK_HEADER='# Original file Copyright Crytek GMBH or its affiliates, used under license.'
-
-LUA_FORMER_CRYTEK_HEADER='-- Original file Copyright Crytek GMBH or its affiliates, used under license.'
-
-BAT_FORMER_CRYTEK_HEADER='REM  Original file Copyright Crytek GMBH or its affiliates, used under license.'
-
-# BEYOND_COMPARE_PATH="\"C:\\Program Files (x86)\\Beyond Compare 4\\BCompare.exe\""
-BEYOND_COMPARE_PATH="\"C:\\Program Files (x86)\\Beyond Compare 3\\BCompare.exe\""
-
-
-
-DEFAULT_FILTERED_EXTENSIONS = ['CS','H','HPP','HXX','INL','C','CPP', 'EXT','PY', 'LUA', 'BAT', 'CFX', 'CFI']
-
-# Check if this line had the original (non-amazon) copyright notice
-def _is_former_crytek_header(line):
-    if 'CRYTEK' in line.upper() and line != FORMER_CRYTEK_HEADER and line != PY_FORMER_CRYTEK_HEADER:
-        return True
-    else:
-        return False
-
-
-def _is_amazon_header(line):
-    if 'AMAZON.COM' in line.upper() and not AMAZON_DEPRECATED_HEADER.upper() in line.upper():
-        return True
-    else:
-        return False
-
-def _is_amazon_deprecated_header(line):
-    if AMAZON_DEPRECATED_HEADER.upper() in line.upper():
-        return True
-    else:
-        return False
-
-ORIGINAL_AMAZON_SOURCE_FOLDER_ROOTS = ['./Code/Framework',
-                                       './Code/GameCore',
-                                       './Code/GameCoreTemplate',
-                                       './Gems',
-                                       './Code/Tools/AzCodeGenerator']
-SKIP_FOLDERS = ['/Code/SDKs',
-                '/Code/Sandbox/SDKs',
-                '/Code/Tools/SDKs',
-                '/Code/Tools/waf-1.7.13',
-                '/Code/Tools/MaxCryExport/Skin/12',
-                '/Code/Tools/MaxCryExport/Skin/13',
-                '/Code/Tools/MaxCryExport/Skin/14',
-                '/Code/Tools/MaxCryExport/Skin/15',
-                '/Code/Tools/MaxCryExport/Skin/16',
-                '/Code/Tools/MaxCryExport/Skin/17',
-                '/Code/Tools/MaxCryExport/Skin/18',
-                '/Code/Tools/HLSLCrossCompiler',
-                '/Code/Tools/HLSLCrossCompilerMETAL',
-                '/BinTemp']
-
-
-SKIP_FILES = ['resource.h',
-              '__init__.py']
-
-logging.basicConfig(filename='copyright_update.log',level=logging.INFO)
-
-log_file_updated = open('copyright_updated.files','w')
-log_file_updated.write('# Copyright Updated File List\n#\n')
-
-log_file_skipped = open('copyright_skipped.files','w')
-log_file_skipped.write('# Copyright Skipped File List\n#\n')
-
-log_file_open_source = open('opensource.files','w')
-log_file_open_source.write('# Detected open source files\n#\n')
-
-def log_update(path, detail):
-    logging.info('Updating file {} : {}'.format(path,detail))
-    log_file_updated.write('{}\n'.format(path))
-
-def log_skipped(path, detail):
-    logging.info('Skipping file {} : {}'.format(path,detail))
-    log_file_skipped.write('{}\n'.format(path))
-
-def log_opensource(path, license):
-    logging.info('Skipping file {} : Licensed ({}) File detected'.format(path,license))
-    log_file_open_source.write('{}\n'.format(path))
-
-def close_logs():
-    log_file_updated.close()
-    log_file_skipped.close()
-    log_file_open_source.close()
-
-def _is_skip_folder(root_code_path,dirname):
-
-    normalized = '/'+dirname.replace(root_code_path,'').replace('\\','/').upper()
-    for skip_path in SKIP_FOLDERS:
-        if normalized.startswith(skip_path.upper()):
-            return True
-    return False
-
-def _is_skip_file(filepath):
-    if filepath.endswith('.Designer.cs'):
-        return True
-    normalized = os.path.dirname(filepath).replace('\\','/').upper()
-    for skip_file in SKIP_FILES:
-        if normalized.endswith('/'+skip_file.upper()):
-            return True
-    return False
-
-def _is_file_original_amazon(filepath):
-
-    normalized = filepath.replace('\\','/').upper()
-    for amazon_path in ORIGINAL_AMAZON_SOURCE_FOLDER_ROOTS:
-        if normalized.startswith(amazon_path.upper()):
-            return True
-    return False
-
-def _is_copyright_notice_tbd(line):
-    if 'COPYRIGHT_NOTICE_TBD' in line.upper():
-        return True
-    else:
-        return False
-
-def _has_original_crytek_note(line):
-    if FORMER_CRYTEK_HEADER in line:
-        return True
-    elif PY_FORMER_CRYTEK_HEADER in line:
-        return True
-    else:
-        return False
-
-def _is_cs_auto_generated(line):
-    if 'auto-generated' in line:
-        return True
-    else:
-        return False
-
-def _is_open_source(line,filename):
-    if 'Microsoft Public License' in line:
-        log_opensource(filename,'Microsoft Public License')
-        return True
-    if '$QT_BEGIN_LICENSE:LGPL$' in line:
-        log_opensource(filename,'QT Lesser General Public License')
-        return True
-    if 'LICENSE.LGPL' in line:
-        log_opensource(filename,'GPL License')
-        return True
-    if 'Apache License' in line:
-        log_opensource(filename,'Apache License')
-        return True
-    if 'BSD LICENSE' in line.upper():
-        log_opensource(filename,'BSD License')
-        return True
-    if 'ADOBE SYSTEMS' in  line.upper():
-        log_opensource(filename,'Adobe License')
-        return True
-    if 'Public License' in line:
-        log_opensource(filename,'Public License')
-        return True
-
-    return False
-
-def _is_amazon_old_header_line(line):
-    if 'a third party where indicated' in line:
-        return True
-    else:
-        return False
-
-def check_file_status(filepath):
-    st = os.stat(filepath)
-    return bool(st.st_mode & (stat.S_IWGRP|stat.S_IWUSR|stat.S_IWOTH))
-
-def checkout_file(root_code_path,filepath, p4_path_root,cl_number):
-    p4_file_path = filepath.replace(root_path,'')
-    p4_path = p4_path_root + p4_file_path
-    p4_path = p4_path.replace('\\','/')
-
-    try:
-        result = subprocess.call('p4 edit -c {} \"{}\"'.format(cl_number,p4_path))
-        if result < 0:
-            print('[ABORT] Process terminated by signal')
-            return False
-    except OSError as e:
-        print('[ERROR] p4 call error:{}'.format(e))
-        return False
-
-    return check_file_status(filepath)
-
-
-def load_original_crytek_set(path, filtered_extensions):
-    original_set = set()
-    with open(path,'r') as f:
-        file_content = f.readlines()
-        for filename in file_content:
-            if filename.startswith('#'):
-                continue
-            base_name = filename.replace('//crypristine/vendor_branch_3.8.1','').strip()
-            orig_file, orig_ext = os.path.splitext(base_name)
-            if orig_ext.upper()[1:] in filtered_extensions:
-                original_set.add(base_name.upper())
-    return original_set
-
-
-def read_input_files_file(path,root_path):
-    files_to_process = set()
-    if not os.path.exists(path):
-        print('Invalid input file:{}'.format(path))
-    else:
-        with open(path,'r') as f:
-            file_content = f.readlines()
-            for filename in file_content:
-                if filename.startswith('#'):
-                    continue
-                base_name = filename.replace(root_path,'').strip()
-                base_name = os.path.realpath(root_path + '/' + base_name)
-                files_to_process.add(base_name.upper())
-    return files_to_process
-
-
-
-
-def process_file(orig_filepath, original_file_content, updated_file_content, is_original_crytek):
-
-    def _script_non_printable(instr):
-        return re.sub('[^\040-\176]','',instr)
-
-    orig_file, orig_ext = os.path.splitext(orig_filepath)
-    orig_ext = orig_ext[1:].lower()
-
-    is_csharp = orig_ext == 'cs'
-    is_cc = orig_ext in ['h','hpp','cxx','cpp','c','inl','cc']
-    is_py = orig_ext in ['py']
-    is_java = orig_ext in ['java']
-    is_ext = orig_ext == 'ext'
-    is_lua = orig_ext == 'lua'
-    is_bat = orig_ext == 'bat'
-    is_cf = orig_ext in ['cfx','cfi']
-
-    # Is the header removable (Either Crytek or COPYRIGHT_NOTICE_TBD)7
-    header_removable = False
-
-    # Has a comment header, is it original crytek?
-    is_original_crytek_header = False
-
-    # Has a comment header, is it amazon?
-    is_amazon_header = False
-
-    has_original_crytek_note = False
-
-    # Has a comment header, is it deprecated amazon?
-    is_amazon_deprecated_header = False
-
-    # Is this a placeholder for the copyright notice
-    is_copyright_notice_tbd = False
-
-    # No header block?
-    missing_header_block = False
-
-    # Comment header block starts with // (continue until first #)
-    starts_with_cc = False  # Starts with //
-
-    # Comment header block starts with /* (continue until */)
-    starts_with_cs = False  # Starts with /*
-
-    # Mark the start of where the source file should continue from after the new copyright(s)
-    copyright_remove_index = -1
-
-    # Is this an auto-generated csharp file
-    is_cs_auto_generated = False
-
-    # Does this contain the old/deprecated amazon copyright
-    is_amazon_old_header_line = False
-
-    # Does this file have the original crytek note
-    has_original_crytek_note = False
-
-    is_open_source = False
-
-    line_index = 0
-
-    # Examine the contents of the file
-    file_content_len = len(original_file_content)
-
-    # Special case.  There are some c-sharp files that start of with non-ascii characters for some reason. Strip it out
-    # from only the first line
-    if is_csharp:
-        line = _script_non_printable(original_file_content[0])
-        original_file_content[0] = line+'\n'
-
-    # Flag to indicate the file starts with comments
-    has_starting_comments = False
-
-    # Flag to indicate we are inside a /* comment
-    starting_code_index = 0
-
-    # (EXT) flags
-    ext_comment_divider_started = False
-    has_ext_comment_divider = False
-    has_ext_description = False
-    ext_start_description_line = ext_end_description_line = 0
-
-    # (PY) flags
-    py_started_str_quotes = False
-
-    # (LUA) flags
-    lua_has_comment = False
-    has_lua_description = False
-    lua_start_description_line = lua_end_description_line = 0
-
-
-    # First pass is to determine if there exists any comment blocks before any real code
-    while line_index<file_content_len:
-
-        line = original_file_content[line_index].strip()
-        line_index += 1
-
-        # Tally flags based on contents of each line
-
-        # Check if the line has any original crytek copyright (non-amazon)
-        if not is_original_crytek_header and _is_former_crytek_header(line):
-            is_original_crytek_header = True
-
-        # Check if the line has the current (correct) amazon header
-        if not is_amazon_header and _is_amazon_header(line):
-            is_amazon_header = True
-
-        # Check if the line has the deprecated amazon header
-        if not is_amazon_deprecated_header and _is_amazon_deprecated_header(line):
-            is_amazon_deprecated_header = True
-
-        # Check if the line has the TBD copyright placeholder
-        if not is_copyright_notice_tbd and _is_copyright_notice_tbd(line):
-            is_copyright_notice_tbd = True
-
-        # Check if this is a c-sharp auto generated file
-        if is_csharp and not is_cs_auto_generated and _is_cs_auto_generated(line):
-            is_cs_auto_generated = True
-
-        # Check if this has the previous amazon header (3rd party mention)
-        if not is_amazon_old_header_line and _is_amazon_old_header_line(line) and is_amazon_header:
-            is_amazon_old_header_line= True
-
-        # Check if this has the original crytek file note
-        if not has_original_crytek_note and _has_original_crytek_note(line):
-            has_original_crytek_note = True
-
-        if not is_open_source and _is_open_source(line,orig_filepath):
-            is_open_source = True
-
-        # C or CS or java uses C/C++ styled comments
-        if is_cc or is_csharp or is_java or is_cf:
-
-            if has_starting_comments and starts_with_cs:
-                if '*/' in line:
-                    starts_with_cs = False
-                continue
-            if line.startswith('//'):
-                has_starting_comments = True
-                continue
-            elif line.startswith('/*'):
-                has_starting_comments = True
-                if not '*/' in line:
-                    starts_with_cs = True
-                continue
-            elif line=='\n' or line.__len__()==0:
-                continue
-            elif (is_cc or is_cf) and line.startswith('#'):
-                starting_code_index = line_index-1
-                break
-            elif is_csharp and (line.startswith('using') or line.startswith('namespace')):
-                starting_code_index = line_index-1
-                break
-            elif is_java and (line.startswith('import') or line.startswith('namespace')):
-                starting_code_index = line_index-1
-                break
-            else:
-                log_skipped(orig_filepath,'CANNOT PARSE HEADER BLOCK')
-                return False
-
-        elif is_py:
-            if line.startswith('"""'):
-                has_starting_comments = True
-                if py_started_str_quotes:
-                    py_started_str_quotes = False
-                else:
-                    py_started_str_quotes = True
-                continue
-            elif line.endswith('"""'):
-                if py_started_str_quotes:
-                    py_started_str_quotes = False
-                continue
-            elif line.startswith('\'\'\''):
-                has_starting_comments = True
-                if py_started_str_quotes:
-                    py_started_str_quotes = False
-                else:
-                    py_started_str_quotes = True
-                continue
-            elif line.endswith('\'\'\''):
-                has_starting_comments = True
-                if py_started_str_quotes:
-                    py_started_str_quotes = False
-                continue
-            elif line.startswith('#'):
-                has_starting_comments = True
-                continue
-            elif line == '\n' or line.__len__()==0:
-                continue
-            elif py_started_str_quotes:
-                continue
-            else:
-                starting_code_index = line_index-1
-                break
-        elif is_ext:
-            starting_code_index = 0
-            if line.startswith('////'):
-                # Check if we are between the comment dividers ////...
-                if ext_comment_divider_started:
-                    ext_comment_divider_started = False
-                    has_ext_comment_divider = True
-                    starting_code_index = line_index+1
-                else:
-                    ext_comment_divider_started = True
-            elif line.startswith('//  Description:'):
-                has_ext_description = True
-                ext_start_description_line = ext_end_description_line = line_index-1
-            elif has_ext_description and (line.startswith('// -----') or line=='//' or line.startswith('////')):
-                ext_end_description_line = line_index-2
-                has_ext_description = False
-            elif line.startswith('//'):
-                continue
-            else:
-                starting_code_index = line_index-1
-                break
-        elif is_lua:
-            starting_code_index = 0
-            if line == '\n' or line.__len__()==0:
-                continue
-            elif 'DESCRIPTION:' in line.upper():
-                has_lua_description = True
-                lua_start_description_line = lua_end_description_line = line_index-1
-            elif has_lua_description and (line.startswith('-----') or line=='--'):
-                lua_end_description_line = line_index-2
-                has_lua_description = False
-            elif line.startswith('--'):
-                lua_has_comment = True
-                continue
-            else:
-                starting_code_index = line_index-1
-                break
-        elif is_bat:
-            if line == '\n' or line.__len__()==0:
-                continue
-            elif line.startswith('REM'):
-                continue
-            else:
-                starting_code_index = line_index-1
-                break
-
-        else:
-            return False
-
-    # Determine if we need to update the file
-    update_header = False
-
-    # This has an original crytek copyright
-    #   or deprecated amazon header or the copyright TBD header
-    #   or TBD copyright
-    #   AND is not the correct amazon header
-    if (is_original_crytek or is_amazon_deprecated_header or is_copyright_notice_tbd or is_original_crytek_header) and not is_amazon_header:
-        update_header = True
-    # This has the right amazon header (initially)
-    elif is_amazon_header:
-        # This is an original crytek file but does not have the original crytek note
-        if (is_original_crytek or is_original_crytek_header) and not has_original_crytek_note:
-            update_header = True
-        # The amazon header has the older 3rd party mention
-        if is_amazon_old_header_line:
-            update_header = True
-    else:
-        # This file either doesnt have a comment block, or there is no original crytek comment block.
-        # We will just prepend from the beginning
-        starting_code_index = 0
-        update_header = True
-
-    # If this is autogenerate cs, then never update the header
-    if is_cs_auto_generated:
-        log_skipped(orig_filepath,'AUTOGENERATED FILE')
-        update_header = False
-
-    # If this is open source, then never update the header
-    if is_open_source:
-        update_header = False
-
-    if update_header:
-        if is_csharp or is_cc or is_java or is_cf:
-            updated_file_content.append(AMAZON_HEADER)
-            if is_original_crytek:
-                updated_file_content.append(FORMER_CRYTEK_HEADER+'\n\n')
-        elif is_ext:
-            updated_file_content.append(AMAZON_EXT_HEADER_TOP)
-            if is_original_crytek:
-                updated_file_content.append(FORMER_CRYTEK_HEADER+'\n')
-                updated_file_content.append('//\n\n')
-            if ext_start_description_line>0:
-                desc_line = ext_start_description_line
-                while desc_line <= ext_end_description_line:
-                    updated_file_content.append(original_file_content[desc_line])
-                    desc_line += 1
-            updated_file_content.append(AMAZON_EXT_HEADER_BOTTOM)
-
-        elif is_lua:
-            updated_file_content.append(AMAZON_LUA_HEADER_TOP)
-            if is_original_crytek or is_original_crytek_header:
-                updated_file_content.append(LUA_FORMER_CRYTEK_HEADER+'\n')
-                updated_file_content.append('--\n--\n')
-            if lua_start_description_line>0:
-                desc_line = lua_start_description_line
-                while desc_line <= lua_end_description_line:
-                    updated_file_content.append(original_file_content[desc_line])
-                    desc_line += 1
-            updated_file_content.append(AMAZON_LUA_HEADER_BOTTOM)
-
-
-        elif is_py:
-            updated_file_content.append(PY_AMAZON_HEADER)
-            if is_original_crytek:
-                updated_file_content.append(PY_FORMER_CRYTEK_HEADER+'\n')
-                updated_file_content.append('#\n\n')
-
-        elif is_bat:
-            updated_file_content.append(BAT_AMAZON_HEADER+'REM\n')
-            if is_original_crytek:
-                updated_file_content.append(BAT_FORMER_CRYTEK_HEADER+'\n')
-                updated_file_content.append('REM\n')
-            updated_file_content.append('\n')
-            if original_file_content[starting_code_index].lower().startswith('@echo off'):
-                starting_code_index += 1
-
-        line_index = starting_code_index
-        while line_index<file_content_len:
-            line = original_file_content[line_index]
-            updated_file_content.append(line)
-            line_index += 1
-
-        if update_header:
-            log_update(orig_filepath,'UPDATED')
-
-    return update_header
-
-
-def compare_files(left,right):
-    subprocess.call('{} \"{}\" \"{}\"'.format(BEYOND_COMPARE_PATH,left,right))
-
-
-def process(cl_number, p4_path, root_path, code_path, eval_only, show_diff, filtered_extensions, is_code_path_input_file):
-
-
-    original_crytek_source = load_original_crytek_set('crytek_3.8.1_source.txt',filtered_extensions)
-
-    # Collect the files to process
-    files_to_process = set()
-    if is_code_path_input_file:
-        files_to_process = read_input_files_file(code_path,root_path)
-    else:
-        if os.path.isdir(code_path):
-            for (dirpath, dirnames, filenames) in os.walk(code_path):
-                if _is_skip_folder(root_path, dirpath):
-                    continue
-                for file in filenames:
-                    file_name,file_ext = os.path.splitext(file)
-                    file_ext = file_ext.upper()[1:]
-                    if file_ext in filtered_extensions and not _is_skip_file(file):
-                        files_to_process.add(dirpath + "/" + file)
-        else:
-            files_to_process.add(code_path)
-
-    for file_to_process in files_to_process:
-        file_to_process_normalized = file_to_process.replace('\\','/').upper()
-        file_to_process_normalized = '/' + file_to_process_normalized.replace(root_path.replace('\\','/').upper(),'')
-
-        # Determine if this is an original crytek file
-        is_original_crytek = file_to_process_normalized.upper() in original_crytek_source
-
-        # Open each file and handle in memory
-        with open(file_to_process,'r') as f:
-            file_content = f.readlines()
-        updated_file_content = []
-
-        # Process each file
-        if process_file(file_to_process, file_content, updated_file_content, is_original_crytek):
-
-            # If the file needs a header update, prepare a temp file for this and write the contents of the updated
-            # file to the temp file
-            temp_file_path = os.path.realpath(file_to_process + '_tmp')
-            with open(temp_file_path, 'w') as w:
-                for updated_file_line in updated_file_content:
-                    w.write(updated_file_line)
-
-            # Optional: Show a diff on the file before (optional) committing
-            if show_diff:
-                compare_files(file_to_process,temp_file_path)
-
-            # If not just eval, check out the file and commit
-            if not eval_only:
-                if not check_file_status(file_to_process):
-
-                    if not checkout_file(root_path,file_to_process,p4_path,cl_number):
-                        logging.error('...Unable to check out file {}'.format(file_to_process))
-                        print('...Unable to check out file {}'.format(file_to_process))
-                        os.remove(temp_file_path)
-                        continue
-                os.remove(file_to_process)
-                shutil.copyfile(temp_file_path,file_to_process)
-
-            print('Processed {}'.format(file_to_process))
-
-        else:
-            print('Skipped {}'.format(file_to_process))
-
-    close_logs()
-
-
-if __name__ == "__main__":
-
-    parser = argparse.ArgumentParser()
-
-    parser.add_argument('-e','--eval', action='store_true', default=False, help='Evaluate only without processing')
-    parser.add_argument('-d','--diff', action='store_true', default=False, help='Show diff window')
-    parser.add_argument('-i','--input_files', action='store_true', default=False, help='Option to use code_path as an input file of files to process')
-
-    parser.add_argument('-p','--py', action='store_true', default=False, help='Python files only (.py)')
-    parser.add_argument('-c','--cpp', action='store_true', default=False, help='C/C++ files only')
-    parser.add_argument('--cs', action='store_true', default=False, help='C# files only')
-    parser.add_argument('--ext', action='store_true', default=False, help='.EXT files only')
-    parser.add_argument('--lua', action='store_true', default=False, help='.LUA files only')
-    #parser.add_argument('--js', action='store_true', default=False, help='.JS files only')
-    parser.add_argument('--bat', action='store_true', default=False, help='.BAT files only')
-    parser.add_argument('--cf', action='store_true', default=False, help='CFI/CFX files only')
-
-    parser.add_argument('cl', type=int, help='Change list number to checkout the files to')
-    parser.add_argument('p4_path', type=str, help='The base p4 path')
-    parser.add_argument('root_path', help='The base root path')
-    parser.add_argument('code_path', help='The root code path process on', default='.')
-
-    args = parser.parse_args()
-
-    print('P4PORT={}'.format(os.environ["P4PORT"]))
-    print('P4CLIENT={}'.format(os.environ["P4CLIENT"]))
-    print('Changelist #{}'.format(args.cl))
-
-    if not os.path.exists(args.root_path):
-        print('[ERROR]: Root path \'{}\' is invalid'.format(args.root_path))
-        exit()
-    else:
-        root_path = args.root_path
-
-    if not os.path.isdir(root_path):
-        print('[ERROR]: Root path \'{}\' cannot be a file'.format(args.root_path))
-        exit()
-
-    print('Root path {}'.format(args.root_path))
-    print('p4 path {}'.format(args.p4_path))
-
-    if args.input_files:
-        code_path = args.code_path
-        if not os.path.exists(code_path) or not os.path.isfile(code_path):
-            print('[ERROR]: Code input file \'{}\' is invalid'.format(args.code_path))
-            exit()
-    else:
-        code_path = os.path.normpath(root_path+'/'+args.code_path)
-        if not os.path.exists(code_path):
-            print('[ERROR]: Code path \'{}\' is invalid'.format(args.code_path))
-            exit()
-
-    root_path = args.root_path
-
-    if os.path.isdir(code_path):
-        print('Working on code folder {}'.format(code_path))
-    else:
-        print('Working on a single code file {}'.format(code_path))
-
-    filtered_extensions = []
-    if args.py:
-        print('Filtering on Python files')
-        filtered_extensions.append('PY')
-    if args.cpp:
-        print('Filtering on C/C++ files')
-        filtered_extensions.append('H')
-        filtered_extensions.append('HPP')
-        filtered_extensions.append('HXX')
-        filtered_extensions.append('INL')
-        filtered_extensions.append('C')
-        filtered_extensions.append('CPP')
-    if args.cs:
-        print('Filtering on C# files')
-        filtered_extensions.append('CS')
-    if args.ext:
-        print('Filtering on EXT files')
-        filtered_extensions.append('EXT')
-    if args.lua:
-        print('Filtering on LUA files')
-        filtered_extensions.append('LUA')
-    if args.bat:
-        print('Filtering on BAT files')
-        filtered_extensions.append('BAT')
-    if args.cf:
-        print('Filtering on CFI/CFX files')
-        filtered_extensions.append('CFI')
-        filtered_extensions.append('CFX')
-    if len(filtered_extensions)==0:
-        filtered_extensions = DEFAULT_FILTERED_EXTENSIONS
-
-
-    process(args.cl, args.p4_path, root_path, code_path, args.eval, args.diff,filtered_extensions,args.input_files)
-
-
-
-
-

+ 0 - 473
Tools/build/JenkinsScripts/distribution/copyright_removal/copyrighttool.py

@@ -1,473 +0,0 @@
-# This script updates all copyright messages in the code to the Amazon standard copyright messages
-# cleans up Crytek original messages (if present) and preserves copyright messages from any other 3rd party.
-
-# The tool must be run from the root of the workspace corresponding to the branch selected below.
-# You can choose either to replace all copyright messages, or skip files that already have been stamped.
-# If the official copyright message has changed, you should run the tool to apply to all files.
-# The tool will strip old Amazon headers, and replace them with new ones.
-
-# CONFIGURATION VARIABLES
-# These variables change the behavior of the script
-# The root of the perforce branch to operate in
-
-perforceRoot="//lyengine/dev"
-
-# The root of the directory to actually scan for copyright updates. This may be below the branch root.
-# Normally you only pick something below the branch root in order to do a quick scan of the source
-# while making changes to this script.
-scanRoot="//lyengine/dev"
-
-# Test paths used during development
-#scanRoot="//lyengine/dev/Code/CryEngine/CryCommon"
-#scanRoot="//lyengine/dev/Code/Sandbox/Editor"
-#scanRoot="//lyengine/dev/Code/Sandbox/Plugins/EditorCommon/Serialization"
-
-# Skip files that already have the Amazon standard notice. Useful for incremental runs of the tool.
-
-skipFilesWithAmazonNotice=True
-#skipFilesWithAmazonNotice=False
-
-# Bare text of the current official notice. This gets wrapped in comment markers depending on the
-# type of file it is applied to.
-
-OfficialNotice = """
-All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-its licensors.
-
-For complete copyright and license terms please see the LICENSE at the root of this
-distribution (the "License"). All use of this software is governed by the License,
-or, if provided, by the license below or the license accompanying this file. Do not
-remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-
-# END OF CONFIGURATION VARIABLES
-
-
-import os
-import os.path
-import re
-import shutil
-import subprocess
-
-# We will only apply copyright notices to files in this list, unless Crytek asserts a copyright in the file
-# This is a list of those suffixes
-codeExtensions = [
-    'cpp',    # C++
-    'cc',    # Objective C
-    'c',    # C
-    'cs',    # C#
-    'mm',    # C++ file for Objective C compiler
-    'hpp',    # c++ include
-    'hxx',    # c++ include
-    'h',    # C or C++ include
-    'inl',    # C++ inline include file
-    #'rc',    # MFC resource file
-    #'bat',    # Batch file
-    #'sh',    # Shell script
-    #'py',    # Python file
-    #'lua',    # Lua file
-    'ext',    # Material description file
-    'cfi',    # Shader language include
-    'cfx',    # Shader language
-]
-
-# This pattern matches those suffixes
-codeFilePat = re.compile("^.*\.(" + "|".join(codeExtensions) + ")$",re.IGNORECASE) 
-
-def partition_files(root):
-
-    vendorFiles = {}
-
-    # Stage 1: sometimes we copied files directly into the tree without branching, so we start out by getting
-    # a dictionary of all file path suffixes that occur in vendor branches under /lyengine/vendor
-    # Each of these branches is of the form //lyengine/vendor/<name>, which we strip off to generate the suffix
-    # We do this with a single p4 command for speed.
-
-    print "Fetching vendor files dictionary"
-
-    p = subprocess.Popen("p4 files //lyengine/vendor/...",shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
-    pat = re.compile("^//lyengine/vendor/[^/]*/(.*)#.*")
-    for line in p.stdout:
-        line = line.strip()
-        m = pat.match(line)
-    if m:
-        vendorFiles[m.group(1)] = 1
-
-    # Stage 2, we look at all files under the provided root and check if the suffix minus the root is a file
-    # that occurs in a crytek original branch.
-
-    print "Identified {0} unique file paths in original CryEngine branches(s).".format(len(vendorFiles))
-
-    print "Scanning files under: " + root
-
-    # Now we scan the non-deleted files under our root (note the "p4 files -e" flag to skip deleted files!)
-    # For each of these files, we check if it was originally in a vendor drop or not
-    # partitioning into two lists, amazonFiles and crytekFiles.
-    # This is the first stage of determining which files get which copyright message.
-    # Because we may also have open source files, we must do further investigations to see
-    # which case we are in with each file.
-
-    p = subprocess.Popen("p4 files -e " + root + "/...",shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
-    pat = re.compile("^" + perforceRoot + "/(.*)#.*")
-    epat = re.compile("^.*/(SDKs|sdk|waf-.*)/")
-
-    for line in p.stdout:
-        line = line.strip()
-
-    # Skip files that are in external SDK directories, in the waf source or are not known code file types
-    if epat.match(line):
-        continue
-
-        m = pat.match(line)
-    if m:
-        suffix = m.group(1)
-        # Skip files that are not code files unless they have a crytek copyright
-        if not codeFilePat.match(suffix) and not hasCrytekCopyrights(suffix):
-        continue
-
-        if suffix in vendorFiles:
-            crytekFiles[suffix] = "Pure"
-        else:
-            amazonFiles[suffix] = "Pure"
-
-    for file in amazonFiles.keys():
-
-    if skipFilesWithAmazonNotice and hasAmazonNotice(file):
-        amazonFiles[file] = "Skip"
-    elif has3rdPartyCopyrights(file):
-        amazonFiles[file] = "3rdParty"
-
-    for file in crytekFiles.keys():
-    if skipFilesWithAmazonNotice and hasAmazonNotice(file):
-        crytekFiles[file] = "Skip"
-    elif has3rdPartyCopyrights(file):
-        crytekFiles[file] = "3rdParty"
-
-    return
-
-# This function scans a file to see if there are copyright notices or license notices from Crytek
-def hasCrytekCopyrights(file):
-    try:
-        count = 0
-        f=open(file)
-        p = re.compile(r".*\bcopyright\b|(c) ?[12][0-9][0-9][0-9]",re.IGNORECASE)
-        pCrytek = re.compile(r".*\bcrytek\b",re.IGNORECASE)
-        for line in f:
-            if p.match(line) and pCrytek.match(line):
-            return True
-        count += 1
-        # Optimization. Crytek copyrights occur early in the file. Don't look too far into binary files.
-        if count > 100:
-        return False
-        return False
-    except:
-        return False
-
-# This function scans a file to see if there are official copyright headers from Amazon
-def hasAmazonNotice(file):
-    try:
-        count = 0
-        f=open(file)
-
-    # Break up the notice lines to check in order
-    lines = OfficialNotice.strip('\n').splitlines()
-    # Index of the currently matched line for the whole notice
-    matchIndex = 0
-
-        for line in f:
-        line = line.strip('\n')
-            if line.endswith(lines[matchIndex]):
-            matchIndex += 1
-        if matchIndex == len(lines):
-                return True
-
-        count += 1
-        # Optimization. Amazon copyrights occur early in the file. Don't look too far into binary files.
-        if count > 100:
-        return False
-        return False
-    except:
-        return False
-
-# This function scans a file to see if there are copyright notices or license notices
-# from some party other than Amazon or Crytek
-def has3rdPartyCopyrights(file):
-    try:
-        f=open(file)
-        p = re.compile(r".*\bcopyright\b|(c) ?[12][0-9][0-9][0-9]",re.IGNORECASE)
-        pAmazon = re.compile(r".*\bamazon\b",re.IGNORECASE)
-        pCrytek = re.compile(r".*\bcrytek\b",re.IGNORECASE)
-        pWAITING = re.compile(r".*COPYRIGHT_NOTICE_TBD",re.IGNORECASE)
-        for line in f:
-            if p.match(line) and not pAmazon.match(line) and not pCrytek.match(line) and not pWAITING.match(line):
-            return True
-        return False
-    except:
-        return False
-
-# We define some patterns to find and remove Amazon copyright header blocks here
-
-# This pattern must occur in every commment block we want to erase
-
-removeBlock=re.compile(r".*COPYRIGHT_NOTICE_TBD|.*Copyright.*Amazon.com",re.IGNORECASE)
-
-# Matches any # or // style comment
-CStartBlock = re.compile(r"[ \t]*/\*")
-CStopBlock = re.compile(r".*\*/[ \t]*\n")
-XMLStartBlock = re.compile(r"[ \t]*<!--")
-XMLStopBlock = re.compile(r".*-->[ \t]*\n")
-CPPComment = re.compile(r"[ \t]*//")
-LuaComment = re.compile(r"[ \t]*//")
-BatchComment = re.compile(r"[ \t]*#(?!(def|if|endif|undef|pragma|include))")
-
-CrytekCopyright = re.compile(r"^.*(crytek.*copyright|crytek.*\(c\)|copyright.*crytek|\(c\).*crytek)",re.IGNORECASE)
-
-EngineIDLine = re.compile(r"^(.*)\b(Crytek Engine|Crytek CryEngine|(?<!Crytek )CryEngine)\b(.*)",re.IGNORECASE)
-
-
-# This function applies a notice at the start of a copyright file.
-# It also removes any Amazon block copyright notices, so that they can be replaced
-# with the refreshed block
-
-def startOrContinueBlock(line,state):
-    if state=="code":
-        if CStartBlock.match(line):             state = "cblock"
-        elif XMLStartBlock.match(line):            state = "xmlblock"
-    elif BatchComment.match(line):             state = "batch"
-    elif CPPComment.match(line):             state = "cpp"
-    elif LuaComment.match(line):             state = "lau"
-
-    elif state=="cblock" and CStopBlock.match(line):    state = "finish"
-
-    elif state=="xmlblock" and XMLStopBlock.match(line): state = "finish"
-
-    elif state=="batch":
-        if BatchComment.match(line):            state = "batch"
-    else:                        state = "code"
-
-    elif state=="cpp":
-        if CPPComment.match(line):            state = "cpp"
-    else:                        state = "code"
-
-    elif state=="Lua":
-        if LUAComment.match(line):            state = "lua"
-    else:                        state = "code"
-        
-    return state
-
-def createNotice(bareNotice,commentStart,commentEnd,commentContinue):
-    result = commentStart + commentStart[-1:]*100 + "\n"
-    lines = bareNotice.strip('\n').splitlines()
-        result += commentContinue + "\n"
-    for line in lines:
-         result += commentContinue + line + "\n"
-        result += commentContinue + "\n"
-    result += commentEnd[0:1]*100 + commentEnd + "\n"
-    return result
-
-COfficialNotice = createNotice(OfficialNotice,"/**","**/","* ")
-CPPOfficialNotice = createNotice(OfficialNotice,"///","///","// ")
-XMLOfficialNotice = createNotice(OfficialNotice,"<!-- -","- -->","-- ")
-LuaOfficialNotice = createNotice(OfficialNotice,"---","---","-- ")
-BatchOfficialNotice = createNotice(OfficialNotice,"###","###","# ")
-
-def cleanLineofCrytek(line):
-    # Clean away original Crytek copyright notices
-    if CrytekCopyright.match(line):
-    line = re.sub(r"^([ \t]*)(//|#|)([ \t]*).*$", r"\1\2\3",line)
-    # Clean up references to CryEngine or Crytek Engine -> Lumberyard in block headers
-    if EngineIDLine.match(line):
-    line = EngineIDLine.sub(r"\1Lumberyard\3",line)
-    return line
-
-
-def applyCopyrightNotice(file,extraNotice):
-
-    # Check out for edit
-    p = subprocess.Popen("p4 edit \"" + file + "\"",shell=True)
-    p.wait()
-
-    f=open(file)
-    fo=open("tmpfile","w")
-
-    content = ""
-    count = 0
-
-    # Grab the first 50 lines to work with
-    # The rest we copy over without looking at them
-
-    block=""
-    state="code"
-    remove=False
-    placeBlock=0
-
-    for line in f:
-
-    # Try to put the comment underneath any include guards and pragma once statement
-    if placeBlock <=3 and re.match("^[ \t]*\n",line): placeBlock = placeBlock
-    elif placeBlock == 0 and re.match("^#ifndef ",line): placeBlock = 1 
-    elif placeBlock == 1 and re.match("^#define ",line): placeBlock = 2
-    elif placeBlock == 2 and re.match("^#pragma once",line): placeBlock = 3
-    elif placeBlock <= 4: placeBlock = 4
-
-    if placeBlock == 4:
-        placeBlock = 5
-        # Place the official notice and the extra notice in place depending on file extension
-        extension = os.path.splitext(file)[1].lower()
-        if extension in ['.c','.h']:
-        fo.write(COfficialNotice)
-        if extraNotice:
-            fo.write("\n/* {0} */\n".format(extraNotice))
-        elif extension in ['.cpp','.cc','.cs','.mm','.hpp','.hxx','.inl','.rc','.ext','.cfx','.cfi']:
-        fo.write(CPPOfficialNotice)
-        if extraNotice:
-            fo.write("\n// {0}\n".format(extraNotice))
-        elif extension == ".lua":
-        fo.write(LuaOfficialNotice)
-        if extraNotice:
-            fo.write("\n-- {0}\n".format(extraNotice))
-        elif extension == ".targets": # XML style comments
-        fo.write(XMLOfficialNotice)
-        if extraNotice:
-            fo.write("\n<!-- {0} -->\n".format(extraNotice))
-        else:
-        fo.write(BatchOfficialNotice)
-        if extraNotice:
-            fo.write("\n# {0}\n".format(extraNotice))
-
-    # After placig the main header
-    # Eat blank lines
-    if placeBlock == 5:
-         if line == "\n": continue
-         else: placeBlock = 6
-
-    count += 1
-    state = startOrContinueBlock(line,state)
-
-    if state != "code" and state != "finish":
-        line = cleanLineofCrytek(line)
-        if line != "\n":
-            block+=line
-        if removeBlock.match(line): remove=True
-    else:
-        if state == "finish":
-        line = cleanLineofCrytek(line)
-            if line != "\n":
-                block+=line
-        if block != "":
-        if not remove: fo.write(block)
-        remove=False
-        block=""
-        if state != "finish":
-            fo.write(line)
-        state = "code"
-
-    if count > 50 and state == "code":
-        break
-
-    # If we get out of the loop and we are in a block, the file ended
-    if state != "code" and block != "":
-    if not remove: fo.write(block)
-    remove=False
-    block=""
-
-    # and now copy the rest of the file as is
-    for line in f:
-        fo.write(line)
-
-    f.close()
-    fo.close()
-
-    # Make a backup
-    #shutil.copyfile(file,file+".bak")
-    # Make the change
-    shutil.copyfile("tmpfile",file)
-
-
-# Set up two lists to contain files originating with either Crytek or Amazon
-# Each of these dictionaries will either have "Pure" or "3rdParty" for each file.
-# Pure denotes files that have no 3rd party copyright notices associated with them.
-# 3rdParty indicates a copyright message for some other party is present.
-
-amazonFiles = {}
-crytekFiles = {}
-
-
-# Partition the files by copyright notice conditions
-partition_files(scanRoot)
-
-print "Done partition: classified {0} files.".format(len(amazonFiles)+len(crytekFiles))
-
-
-# Apply the appropriate copyright notices to each file
-
-changeCount = 0
-skipCount = 0
-
-# log the files that we modify
-amzn_pure = open('copyright_removal_amzn_pure.log', 'w')
-amzn_3rdparty = open('copright_removal_amzn_3rdparty.log', 'w')
-amzn_skip = open('copyright_removal_amzn_skip.log', 'w')
-crytek_pure = open('copyright_removal_crytek_pure.log', 'w')
-crytek_copyright_found = open('copyright_removal_crytek_copyright_found.log', 'w')
-crytek_3rdparty = open('copyright_removal_crytek_3rdparty.log', 'w')
-crytek_skip = open('copyright_removal_crytek_skip.log', 'w')
-
-
-for file in amazonFiles.keys():
-    if amazonFiles[file] == "Pure":
-        print "Apply Amazon Copyright: " + file
-        applyCopyrightNotice(file,"")
-        amzn_pure.write(file + '\n')
-        changeCount += 1
-    elif amazonFiles[file] != "Skip":
-        print "Apply Amazon 3rd party notice: " + file
-        applyCopyrightNotice(file,"Modifications copyright Amazon.com, Inc. or its affiliates.")
-        amzn_3rdparty.write(file + '\n')
-        changeCount += 1
-    else:
-        amzn_skip.write(file + '\n')
-         skipCount += 1
-
-for file in crytekFiles.keys():
-    if crytekFiles[file] == "Pure":
-        print "Apply Amazon sublicense of Crytek Notice: " + file
-        # In this case, even if Crytek did not assert copyright, we will do it for them?
-        applyCopyrightNotice(file,"Original file Copyright Crytek GMBH or its affiliates, used under license.")
-        crytek_pure.write(file + '\n')
-        changeCount += 1
-    elif crytekFiles[file] != "Skip":
-        # in this case, double check if there is a crytek notice in the file
-        # and if the copyright notice that is there is near the top
-        if hasCrytekCopyrights(file):
-            print "Apply Amazon 3rd party notice and Crytek notice: " + file
-            applyCopyrightNotice(file,"Original file Copyright Crytek GMBH or its affiliates, used under license.")
-            crytek_copyright_found.write(file + '\n')
-            changeCount += 1
-        else:
-            # This is a weird case. Crytek may or may not have changed the file, and have not asserted copyright.
-            # We assume that they did not, because it is all from some other party.
-            # These cases are few and probably bear investigation
-            print "Apply Amazon 3rd party notice and Crytek notice of origin?: " + file
-            applyCopyrightNotice(file,"Modifications copyright Amazon.com, Inc. or its affiliates.")
-            crytek_3rdparty.write(file + '\n')
-            changeCount += 1
-    else:
-        crytek_skip.write(file + '\n')
-        skipCount += 1
-
-amzn_pure.close()
-amzn_3rdparty.close()
-amzn_skip.close()
-crytek_pure.close()
-crytek_copyright_found.close()
-crytek_3rdparty.close()
-crytek_skip.close()
-
-
-
-print "Done editing. {0} files checked out and changed.".format(changeCount)
-if skipCount > 0:
-    print "Skipped {0} files with existing Amazon notices.".format(skipCount)

+ 0 - 0
Tools/build/JenkinsScripts/distribution/copyright_removal/crytek_3.8.1_source.txt


+ 0 - 126
Tools/build/JenkinsScripts/distribution/copyright_removal/replace_crytek_copyright.py

@@ -1,126 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-import re
-import os
-import shutil
-import stat
-
-max_num_lines_to_read = 30
-copyright_removed_count = 0
-special_case_count = 0
-star_comments = re.compile(r'/\*.*\*/', re.DOTALL)
-flagged_extensions = ['.c', '.cpp', '.h', '.hpp', '.inl']
-skippable_extensions = ['.log', '.p4ignore', '.obj', '.dll', '.png', '.pdb', '.dylib', '.lib',\
-                        '.exe', '.flt', '.asi', '.exp', '.ilk', '.pch', '.res', '.bmp', '.cur',\
-                        '.ico', '.resx', '.jpg', '.psd', '.gif', '.a', '.fxcb', '.icns', '.cab',\
-                        '.chm', '.hxc', '.xsd', '.tif', '.xml']
-crytek_replacement_header = '// Original file Copyright Crytek GMBH or its affiliates, used under license.\n'
-#categorizer = Categorizer()
-
-def remove_crytek_copyrights():
-    for dirname, dirnames, filenames in os.walk('.'):
-        if skippable_directory(dirname):
-            continue
-        for filename in filenames: 
-            full_filename = os.path.join(dirname, filename)
-            if skippable_file(filename):
-                continue
-            elif cpp_style_file(filename):
-                remove_crytek_copyright_from_file(full_filename)
-
-
-def remove_crytek_copyright_from_file(full_filename): 
-    comment_start_index, comment_end_index, comment_type = fetch_comment_indices_and_comment_type(full_filename)
-    if crytek_copyright_not_found(comment_start_index, comment_end_index, full_filename):
-        return
-    temp_file = full_filename + '_temp'
-    shutil.copyfile(full_filename, temp_file)
-    os.chmod(full_filename, stat.S_IWRITE)
-    with open(temp_file, 'r') as t, open(full_filename, 'w') as f:
-        # first, put in our replacement header
-        f.write(crytek_replacement_header)
-        for line_index, line in enumerate(t):
-            if inside_copyright_block(line_index, comment_start_index, comment_end_index):
-                continue
-            else:
-                f.write(line)
-    os.chmod(temp_file, stat.S_IWRITE)
-    os.remove(temp_file)
-
-def fetch_comment_indices_and_comment_type(full_filename):
-    start_index = -1
-    end_index = -1
-    comment_type = None
-    with open(full_filename, 'r') as f:
-        for index, line in enumerate(f):
-            if index <= max_num_lines_to_read:
-                start_index, end_index, comment_type = update_indices_and_type(start_index, end_index, comment_type, index, line)
-            else:
-                break
-    return start_index, end_index, comment_type
-
-def update_indices_and_type(start_index, end_index, comment_type, index, line):
-    # set start, end, or comment type if they haven't been set yet
-    if start_index == -1:
-        if '/*' in line:
-            start_index = index
-    if end_index == -1:
-        if '*/' in line:
-            end_index = index
-
-    if comment_type == None:
-        if '/*' in line:
-            comment_type = 'star'
-    return start_index, end_index, comment_type
-
-def crytek_copyright_detected(lines):
-    return 'Crytek' in lines and 'Copyright' in lines
-
-def skippable_directory(directory):
-    skippable = 'SDKs' in directory
-    if skippable:
-        log('skipping directory' + directory)
-    return skippable
-
-def skippable_file(filename):
-    skippable = os.path.splitext(filename)[1] in skippable_extensions
-    if skippable:
-        log('skipping file ' + filename)
-    return skippable
-
-def cpp_style_file(filename):
-    return os.path.splitext(filename)[1] in flagged_extensions
-
-def crytek_copyright_not_found(comment_start_index, comment_end_index, filename):
-    not_found = comment_start_index == -1 or comment_end_index == -1
-    if not_found:
-        log('Crytek copyright not found in file: {}.'.format(filename))
-    else:
-        log('replacing copyright of ' + filename)
-    return not_found
-
-def inside_copyright_block(line_index, comment_start_index, comment_end_index):
-    return line_index >= comment_start_index and line_index <= comment_end_index
-
-
-def main():
-    remove_crytek_copyrights()
-
-
-def log(msg, log_name=None):
-    print msg
-    if log_name != None:
-        log_name.write(msg)
-
-if __name__ == '__main__':
-    main()

+ 0 - 20
Tools/build/JenkinsScripts/distribution/get_changelist_number.py

@@ -1,20 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-import subprocess 
-import sys
-
-out = subprocess.check_output('p4 changes -m1  //lyengine/promotions/release_candidate_stable/...')
-
-changelist = out.split()[1]
-
-print changelist,

+ 0 - 143
Tools/build/JenkinsScripts/distribution/git_release/GitDailyValidation.py

@@ -1,143 +0,0 @@
-############################################################################################
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates, or
-# a third party where indicated.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#############################################################################################
-import argparse
-import tempfile
-import shutil
-import sys
-import os
-
-THIS_SCRIPT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
-sys.path.append(os.path.join(THIS_SCRIPT_DIRECTORY, ".."))  # Required for importing Git scripts
-from GitRelease import mirror_repo_from_local
-from GitIntegrityChecker import check_integrity, clone_from_url, IntegrityError, handleRemoveReadonly
-from GitOpsCodeCommit import custom_clone
-from GitOpsGitHub import create_authenticated_https_clone_url, are_credentials_valid
-
-def parse_args():
-    parser = argparse.ArgumentParser(description="Performs validation of Git repos, and restores if necessary.")
-    parser.add_argument('--publicRepoURL',
-                        help='The URL for the repository that we are validating the integrity of (expects a GitHub URL).',
-                        required=True)
-    parser.add_argument('--backupRepoURL',
-                        help='The URL for the repository that we are baselining our check of the public repository against. (expects a CodeCommit URL).',
-                        required=True)
-    parser.add_argument('--hashFile',
-                        help='Path to the file containing acceptable commit hashes.',
-                        required=True)
-    parser.add_argument('--githubUser',
-                        required=True,
-                        help='Username for the Github account.')
-    parser.add_argument('--githubPassword',
-                        required=True,
-                        help='Password for the Github account.')
-    parser.add_argument('--ccUser',
-                        required=True,
-                        help='Username for the CodeCommit account.')
-    parser.add_argument('--ccPassword',
-                        required=True,
-                        help='Password for the CodeCommit account.')
-    parser.add_argument('--dryRun',
-                        help='Execute without performing any restore.',
-                        action="store_true")
-    args = parser.parse_args()
-    return args
-
-
-def validate_args(args):
-    # ensure that backup repo isn't a github repo
-    github_domain = 'github.com'
-    if github_domain in args.backupRepoURL.lower():
-        raise Exception('Cannot backup repo to another GitHub repo. Please use a git repo not on GitHub.')
-    # ensure that destination repo is a github repo
-    if github_domain not in args.publicRepoURL.lower():
-        raise Exception('Cannot release to any repo other than one hosted on GitHub. Please use a git repo on GitHub.')
-
-    if not os.path.exists(args.hashFile):
-        raise Exception(f'Hash file not found: {args.hashFile}')
-
-    if not are_credentials_valid(args.githubUser, args.githubPassword):
-        raise Exception('Provided GitHub credentials are invalid.')
-
-
-def restore_repo_from_backup(user, password, src_repo_url, dst_repo_url):
-    # Make a temporary workspace for cloning the CodeCommit repo.
-    temp_dir_path = tempfile.mkdtemp()
-    try:
-        clone_from_url(user, password, src_repo_url, temp_dir_path)
-        mirror_repo_from_local(temp_dir_path, dst_repo_url)
-    except:
-        raise
-    finally:
-        # Remove the temporary cloning workspace.
-        shutil.rmtree(temp_dir_path, ignore_errors=False, onerror=handleRemoveReadonly)
-
-def main():
-    args = parse_args()
-    validate_args(args)
-
-    github_repo_url = args.publicRepoURL
-    github_backup_repo_url = args.backupRepoURL
-    github_integrity_valid = True
-    github_hash_list = []  # init as empty list.
-    codecommit_integrity_valid = True
-    codecommit_hash_list = []  # init as empty list
-
-    #
-    # Obtain integrity status
-    #
-    try:
-        print("Checking GitHub repo integrity...")
-        check_integrity(None, args.hashFile, False,
-                        clone_from_url, args.githubUser, args.githubPassword, github_repo_url)
-    except IntegrityError as error:
-        print(error)
-        github_integrity_valid = False
-        github_hash_list = error.repo_hash_list
-
-    try:
-        print("Checking CodeCommit repo integrity...")
-        check_integrity(None, args.hashFile, False,
-                        clone_from_url, args.ccUser, args.ccPassword, github_backup_repo_url)
-    except IntegrityError as error:
-        print(error)
-        codecommit_integrity_valid = False
-        codecommit_hash_list = error.repo_hash_list
-
-    #
-    # Validate integrity and restore if necessary.
-    #
-    print("Inspecting repo integrity results...")
-    if not codecommit_integrity_valid and not github_integrity_valid and codecommit_hash_list == github_hash_list:
-        raise Exception("Internal and external mirrors are identical, but the hashlist is different. "
-                        "Internal hashlist has been compromised! Intervene manually.")
-
-    if not codecommit_integrity_valid:
-        raise Exception("Internal Git mirror has been compromised. Intervene manually.")
-
-    if not github_integrity_valid:
-        print("GitHub repository has been compromised! Executing restore operation.")
-        if args.dryRun:
-            print("Dry run: Skip restore operation")
-        else:
-            authenticated_github_repo_url = create_authenticated_https_clone_url(args.githubUser, args.githubPassword, github_repo_url)
-            restore_repo_from_backup(args.ccUser, args.ccPassword, github_backup_repo_url, authenticated_github_repo_url)
-
-    # If we are here, all possible exceptions have been evaluated.
-    # It is safe to assume all is well.
-    print("Integrity validation succeeded.")
-
-
-if __name__ == "__main__":
-    try:
-        main()
-    except Exception as e:
-        print(e)
-        sys.exit(1)

+ 0 - 23
Tools/build/JenkinsScripts/distribution/git_release/GitHashList.json

@@ -1,23 +0,0 @@
-{
-    "Hashlist": [
-        "d21172c26536133a4213873469a171f4f0c4280c", 
-        "01c1c1acf52ebe65259df8c3706e7d3c156924fb", 
-        "3c7dbc9b7e33050e99ceede18de61f6db5e15bef", 
-        "44f7b2480f7a9839d3389b08f5945ef56092ebc4", 
-        "cfd94c64e631911e9b3e98db606081c2ea5ff14d", 
-        "e881f3023cc1840650eb7b133e605881d1d4330d", 
-        "247aa1c6eb21e10db0cc566895444c7b5f855052", 
-        "10c18d4c2296622d104be5b1146ffc8630d0fe8f", 
-        "0b34452ef270f6b27896858dc7899c9796efb124", 
-        "4648727c4c84f4bc224656f8adeef050581af344", 
-        "9608bcf905bb60e9f326bd3fe8297381c22d83a6", 
-        "931f5b9a04f7cf156bf7dec3165a1e74cb831209", 
-        "87761bcdca2cf76fd4c6a976daf3e7a2dc08120c", 
-        "164512f8d415d6bdf37e195af319ffe5f96a8f0b", 
-        "6fef201546019126306a6b47d5b9e1f2d82d56ae", 
-        "2d8969362cd5fa5523c231e5e3abb6d855e31d59", 
-        "d10be93eb4c147af77ac90bc36137f7cd4e0e510", 
-        "24840d0102c4ebda6334bf7355ecd34c0fd5eda7", 
-        "6b8dd98ad0e59b1817a79f6aaf5b89afb41b1086"
-    ]
-}

+ 0 - 203
Tools/build/JenkinsScripts/distribution/git_release/GitIntegrityChecker.py

@@ -1,203 +0,0 @@
-############################################################################################
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates, or
-# a third party where indicated.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#############################################################################################
-import argparse
-import errno
-import json
-import os
-import shutil
-import stat
-import subprocess
-import tempfile
-import sys
-
-THIS_SCRIPT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
-sys.path.append(os.path.join(THIS_SCRIPT_DIRECTORY, ".."))  # Required for importing Git scripts
-from GitOpsGitHub import create_authenticated_https_clone_url
-from GitOpsCommon import get_revision_list
-
-HASHLIST_KEY = 'Hashlist'
-
-
-class IntegrityError(RuntimeError):
-    """Exception type for failed integrity check."""
-    def __init__(self, message, file_hash_list, repo_hash_list):
-        self.message = message
-        self.hash_list = file_hash_list
-        self.repo_hash_list = repo_hash_list
-
-
-def handleRemoveReadonly(func, path, exc):
-    """
-    Python has issues removing files and directories on Windows
-    (even if we've just created them) if they were set to 'readonly'.
-    This usually occurs when deleting a '.git' directory, because some internal
-    git repository files become 'readonly' when initializing a new repo.
-    The following function should override general permission issues when
-    deleting.
-    """
-    excvalue = exc[1]
-    if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
-        os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777
-        func(path)
-    else:
-        raise
-
-
-def validate_args(args):
-    all_github_args_valid = (args.githubUser is not None and
-                             args.githubPassword is not None)
-    assert (all_github_args_valid or args.gitLocation is not None), 'Please provide GitHub information, ' \
-                                                                    'or a path to a git repository on disk.'
-    if all_github_args_valid and args.gitLocation is not None:
-        print("Warning: A GitHub username and a path to a git repository have been provided. These commands are not " \
-              "compatible, and this script will default to using provided GitHub credentials.")
-
-    # If a working directory was given, verify it exists and is empty.
-    if args.workingDirectory is not None:
-        assert (os.path.exists(args.workingDirectory)), 'If using the working directory argument, please provide a ' \
-                                                        'directory that exists on disk.'
-        assert (os.listdir(args.workingDirectory) == []), 'Please provide an empty working directory.'
-
-
-def parse_args():
-    parser = argparse.ArgumentParser(description="Compares the commit hashes of a git repository against"
-                                                 "a known good hash list.")
-    parser.add_argument('-gitRepoURL',
-                        help='The URL for the repository that we are checking the integrity of.',
-                        required=True)
-    parser.add_argument('--hashFile',
-                        help='Path to the file containing acceptable commit hashes.',
-                        required=True)
-
-    # Either a Github username and password need to be passed in, or a location of a git install on disk.
-    parser.add_argument('--githubUser',
-                        default=None,
-                        help='Username for the Github account.')
-    parser.add_argument('--githubPassword',
-                        default=None,
-                        help='Password for the Github account.')
-    parser.add_argument('--preserveGithubClone',
-                        help='If using Github, preserves the cloned data on disk for inspection by the user.',
-                        required=False,
-                        action="store_true")
-
-    parser.add_argument('--gitLocation',
-                        default=None,
-                        help='Path to a git repository on disk. Cloning will not occur if this is set.')
-
-    parser.add_argument('--workingDirectory',
-                        default=None,
-                        help='Path to a temporary working directory. If not supplied, tempfile.mkdtemp will be used.')
-    args = parser.parse_args()
-    validate_args(args)
-    return args
-
-
-def load_json_hashes(json_file_path):
-    file_data = open(json_file_path, 'r')
-    json_file_data = json.load(file_data)
-    file_data.close()
-    file_hashes = json_file_data[HASHLIST_KEY]
-    return file_hashes
-
-
-def clone_from_url(github_user, github_password, github_repo, root_dir=None):
-    if root_dir is None:
-        root_dir = os.getcwd()
-    if os.path.exists(root_dir) and os.path.isdir(root_dir):
-        print("Cloning from GitHub into directory:\n" + root_dir)
-        authenticated_clone_url = create_authenticated_https_clone_url(github_user,github_password, github_repo)
-        subprocess.call(["git", "clone", "--no-checkout", authenticated_clone_url, root_dir])
-    else:
-        raise Exception(root_dir, "Provided path is not a valid directory on disk.")
-
-
-def validate_hash_counts_match(git_hashes, json_hashes):
-    return len(git_hashes) == len(json_hashes)
-
-
-def validate_hashes_match(git_hashes, json_hashes):
-    for git_hash, json_hash in list(zip(git_hashes, json_hashes)):
-        if git_hash != json_hash:
-            return False, git_hash, json_hash
-    return True, None, None
-
-
-def check_integrity(working_directory, hash_file, preserve_github_clone, git_clone_function, *extra_args):
-    """
-    Checks the integrity of a specified repo. Will raise an exception if integrity fails.
-
-    :param working_directory: Where the repo will be cloned.
-    :param hash_file: The hashfile to compare against for verifying integrity.
-    :param preserve_github_clone: True, if we want to keep the repo on disk after integrity check.
-    :param git_clone_function: A clone operation function. GitHub & CodeCommit clone differently due to authentication.
-    :param extra_args: Arguments for the clone operation function.
-    :return:
-    """
-
-    # Create a sub-folder for easy cleanup.
-    if working_directory is not None:
-        local_git_location = os.path.join(working_directory, "temp_git_repo")
-        os.makedirs(local_git_location)
-    else:
-        local_git_location = tempfile.mkdtemp()
-
-    # Change directory to the intended location before cloning. Regardless of success or fail,
-    # we must change back to inital directory and delete the temp repo, if necessary. It is important
-    # that we return to the initial directory because this function may be called from other Python modules.
-    # We will use try/finally to ensure we always return to the initial directory.
-    try:
-        initial_dir = os.getcwd()
-        os.chdir(local_git_location)
-        git_clone_function(*extra_args)
-
-        json_hashes = load_json_hashes(hash_file)
-
-        # This git logging function results in all hashes for the repository printed out, one per line.
-        git_hashes = get_revision_list(local_git_location)
-
-    finally:
-        # No more Git operations to be made, restore CWD
-        os.chdir(initial_dir)
-
-        # Once we have the list of hashes, we can clean up all of the temp files that were created.
-        if not preserve_github_clone:
-            print("Deleting cloned Git repository.")
-            shutil.rmtree(local_git_location, ignore_errors=False, onerror=handleRemoveReadonly)
-
-    if not validate_hash_counts_match(git_hashes, json_hashes):
-        exception_message = "ERROR: Length of hash lists do not match. There are " + \
-                            str(len(git_hashes)) + " git commits, and " + str(len(json_hashes)) + \
-                            " hashes in the passed in JSON file."
-        raise IntegrityError(exception_message, json_hashes, git_hashes)
-
-    hash_result, git_hash, json_hash = validate_hashes_match(git_hashes, json_hashes)
-
-    if not hash_result:
-        exception_message = "ERROR: Hashes do not match. Git hash '" + git_hash + "'. JSON hash '" + json_hash + "'"
-        raise IntegrityError(exception_message, json_hashes, git_hashes)
-
-    print("All hashes match.")
-
-
-def main():
-    args = parse_args()
-    check_integrity(
-        args.workingDirectory,
-        args.hashFile,
-        args.preserveGithubClone,
-        clone_from_url,
-        args.githubUser,
-        args.githubPassword,
-        args.gitRepoURL
-    )
-if __name__ == "__main__":
-    main()

+ 0 - 47
Tools/build/JenkinsScripts/distribution/git_release/GitIntegrityCheckerTester.py

@@ -1,47 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-import unittest
-import GitIntegrityChecker
-
-
-class GitIntegrityCheckerTester(unittest.TestCase):
-
-    def test_hashCountMatch_withEqualCounts_returnsTrue(self):
-        list_1 = ["A", "B", "C"]
-        list_2 = ["A", "B", "C"]
-        self.assertTrue(GitIntegrityChecker.validate_hash_counts_match(list_1, list_2))
-
-    def test_hashCountMatch_withBiggerList1_returnsFalse(self):
-        list_1 = ["A", "B", "C", "D"]
-        list_2 = ["A", "B", "C"]
-        self.assertFalse(GitIntegrityChecker.validate_hash_counts_match(list_1, list_2))
-
-    def test_hashCountMatch_withBiggerList2_returnsFalse(self):
-        list_1 = ["A", "B", "C"]
-        list_2 = ["A", "B", "C", "D"]
-        self.assertFalse(GitIntegrityChecker.validate_hash_counts_match(list_1, list_2))
-
-    def test_hashMatch_withIdenticalLists_returnsTrue(self):
-        list_1 = ["A", "B", "C"]
-        list_2 = ["A", "B", "C"]
-        match_results, list_1_out, list_2_out = GitIntegrityChecker.validate_hashes_match(list_1, list_2)
-        self.assertTrue(match_results)
-
-    def test_hashMatch_withDifferentLists_returnsFalse(self):
-        list_1 = ["A", "B", "D"]
-        list_2 = ["A", "B", "C"]
-        match_results, list_1_out, list_2_out = GitIntegrityChecker.validate_hashes_match(list_1, list_2)
-        self.assertFalse(match_results)
-
-if __name__ == '__main__':
-    unittest.main()

+ 0 - 331
Tools/build/JenkinsScripts/distribution/git_release/GitMoveDetection.py

@@ -1,331 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-from P4 import P4
-import subprocess
-import os
-import re
-
-
-class MoveDetection:
-    """
-    This class scans two Perforce branches at a specific revision to determine which files have been moved from one
-    branch to another. The core logic relies on finding a historical common ancestor of a file split between the two
-    specified branches.
-    """
-    def __init__(self):
-        self.p4 = P4()
-        self.p4.connect()
-        self.parent = dict()
-        self.history_data = dict()
-        """
-        --- Below is a sample structure for intended use of the 'history_data' dictionary.
-        --- This dictionary is constructed/populated what we call build_parent_hash().
-         history_data:
-        {
-            "//lyengine/releases/ver01_10":
-            {
-                roots:
-                {
-                    [p4_filepath, revision]
-                }
-                rev_roots:
-                {
-                    [p4_filepath, revision]
-                }
-                files:
-                {
-                    [p4_filepath, revision]
-                }
-            }
-            "//lyengine/releases/ver01_11":
-            {
-                ...
-            }
-        }
-        """
-
-    @staticmethod
-    def branch_pathname_to_inclusive_pathspec(branch_key):
-        if branch_key.endswith('/'):
-            return branch_key + '...'
-        else:
-            return branch_key + '/...'
-
-    @staticmethod
-    def generate_filelist_hashes(branch_key, branch_cl_revspec):
-        """
-        Generates iterable listing of files on a branch for use when calculating file ancestor data.
-        :param branch_key:
-            The branch to scan for files.
-        :param branch_cl_revspec:
-            Used to generate the file list at the point in time of specified P4 revspec.
-            Typically, this value is with a P4 CL number (i.e. @44569).
-        :return complete_file_list_hash, file_list_hash:
-            Two iterable collections of file hashes - One including deleted files, another excluding deleted files.
-        """
-        file_list_filename = branch_key.replace('/', '.') + '_files.log'
-        if not os.path.exists(file_list_filename):
-            file_list_fp = open(file_list_filename, "w+")
-
-            command = f'p4 files {MoveDetection.branch_pathname_to_inclusive_pathspec(branch_key)}@{branch_cl_revspec}'
-            print('Performing: ' + command)
-            subprocess.check_call(command.split(), stdout=file_list_fp)
-            # begin reading from the start
-            file_list_fp.seek(0)
-        else:
-            file_list_fp = open(file_list_filename, 'r')
-
-        complete_file_list_hash = {}  # All files, including deleted ones.
-        file_list_hash = {}  # All files, excluding deleted ones.
-
-        for line in file_list_fp:
-            filename = re.sub('(#[0-9][0-9]*) - .*', "", line).strip()
-            revision = re.sub('.*(#[0-9][0-9]*) - .*', "\\1", line).strip()
-            action = re.sub('.*#[0-9]* - ', '', line).strip()
-            if not action.startswith('delete change'):
-                file_list_hash[(filename, revision)] = True
-            complete_file_list_hash[(filename, revision)] = True
-
-        file_list_fp.close()
-        return complete_file_list_hash, file_list_hash
-
-    @staticmethod
-    def generate_history_data(branch_cl_revspec, branch_key, complete_file_list_hash):
-        """
-        Calculates historical data to identify ancestor/roots (original name of a file when added) and
-        descendants/reverse-roots (all the possible permutations of an original file, whether via copy, branch, or move)
-
-        :param branch_cl_revspec:
-            Used to generate the file list at the point in time of specified P4 revspec.
-            Typically, this value is with a P4 CL number (i.e. @44569).
-        :param branch_key:
-            The branch to scan for files.
-        :param complete_file_list_hash:
-            An iterable collection of the latests files on the branch to compare with.
-        :return temp_rev_roots, temp_roots:
-            Dictionaries depicting file ancestors and possible descendants for each file.
-        """
-        file_log_filename = branch_key.replace('/', '.') + '_filelog.log'
-        if not os.path.exists(file_log_filename):
-            file_log = open(file_log_filename, "w+")
-            command = 'p4 filelog -h -s -p {0}@{1}'.format(MoveDetection.branch_pathname_to_inclusive_pathspec(branch_key),
-                                                           branch_cl_revspec)
-            print('Performing: ' + command)
-            subprocess.check_call(command.split(), stdout=file_log)
-            # begin reading from the start, immediately after populating the file.
-            file_log.seek(0)
-        else:
-            file_log = open(file_log_filename, 'r')
-        '''
-        Loop control vars
-        '''
-        DEFAULT_VALUE = (str(), -1)
-        potential_ancestor = DEFAULT_VALUE  # ( filename, revision )
-        current_parsed_filename = DEFAULT_VALUE
-        current_branch_filename = DEFAULT_VALUE
-        temp_roots = dict()  # for calculation purposes
-        temp_rev_roots = dict()  # for calculation purposes
-        cur_line = 0
-        # Begin parsing file log
-        for line in file_log:
-            if line.startswith('//'):  # Filename
-                potential_ancestor = current_parsed_filename
-                current_parsed_filename = (line.strip(), -1)
-
-            elif line.startswith('... #'):  # Revision
-                if current_parsed_filename[1] == -1:  # If no revision has been found yet...
-                    current_parsed_filename = (current_parsed_filename[0], line.split()[1])  # Gets the revision number.
-
-                    # If we are parsing a filename existing in our current/latest revision...
-                    # We use the complete file list hash because we want to account for deleted files when
-                    # building the ancestry data. Unfortunately, 'p4 filelog' does not support excluding deleted files.
-                    # We have to filter this out manually...
-                    if current_parsed_filename in complete_file_list_hash:
-                        # Treat this filename as a child filename, and begin scanning it's ancestors.
-                        # This is the starting point of a file's rename/move history.
-
-                        # If the 'current_branch_filename' IS NOT the default value...
-                        # (This basically means we avoid a default-initialization value as a key in the dict.)
-                        if current_branch_filename != DEFAULT_VALUE:
-                            # Close out history on prior file...
-
-                            # Track filename root
-                            temp_roots[current_branch_filename] = potential_ancestor
-                            # Track filename reverse root.
-                            if potential_ancestor not in temp_rev_roots:
-                                temp_rev_roots[potential_ancestor] = list()
-                            temp_rev_roots[potential_ancestor].append(current_branch_filename)
-
-                        # Start tracking history of the next file
-                        current_branch_filename = current_parsed_filename
-
-            cur_line += 1
-        file_log.close()
-        # Close history for the last file in the log file's history/entry.
-        temp_roots[current_branch_filename] = potential_ancestor
-        if potential_ancestor not in temp_rev_roots:
-            temp_rev_roots[potential_ancestor] = list()
-        temp_rev_roots[potential_ancestor].append(current_branch_filename)
-        return temp_rev_roots, temp_roots
-
-    def build_parent_hash(self, branch_key, branch_cl_revspec):
-        """
-        :param branch_key:
-            The branch to scan for files.
-        :param branch_cl_revspec:
-            Used to generate the file list at the point in time of specified P4 revspec.
-            Typically, this value is with a P4 CL number (i.e. @44569).
-        """
-
-        # Get files list
-        complete_file_list_hash, file_list_hash = self.generate_filelist_hashes(branch_key, branch_cl_revspec)
-        # Get file history
-        file_reverse_roots, file_roots = self.generate_history_data(branch_cl_revspec, branch_key,
-                                                                    complete_file_list_hash)
-
-        # Construct results. Save data to class members.
-        self.history_data[branch_key] = dict()
-        self.history_data[branch_key]['roots'] = file_roots
-        self.history_data[branch_key]['rev_roots'] = file_reverse_roots
-        # Below, we save only the currently existing files as a means to iterate over all files, without having to query
-        # Perforce continuously.
-        self.history_data[branch_key]['files'] = file_list_hash
-
-    def find_moved_files_between_branches(self, p4_branch_name_src, p4_branch_name_dst):
-        """
-        Find files in revisionB that have moved from revisionA
-        """
-        file_move = list()
-
-        for Bfile in self.history_data[p4_branch_name_dst]['files']:
-            root_b = self.history_data[p4_branch_name_dst]['roots'][Bfile]
-            dest_filename = Bfile[0].split(p4_branch_name_dst)[1]
-
-            # If 'Bfile' shares a common ancestor with any file in 'branchA'...
-            if root_b in self.history_data[p4_branch_name_src]['rev_roots']:
-                reverse_roots_a = self.history_data[p4_branch_name_src]['rev_roots'][root_b]  # Related candidates
-
-                # Scan the candidates to see if any of them depict the file WAS NOT moved/branched/copied.
-                found_exact_file_in_both_branches = False
-                for Afile in reverse_roots_a:
-                    src_filename = Afile[0].split(p4_branch_name_src)[1]
-                    if src_filename == dest_filename:
-                        found_exact_file_in_both_branches = True
-                        break
-
-                # If there is no sign of the file in the other branch, we have moved the file.
-                if found_exact_file_in_both_branches is False:
-                    # Register a file move
-                    file_move.append((src_filename, dest_filename))
-                    print(file_move[-1])
-
-        return self.filter_file_moves_to_dev(file_move)
-
-    @staticmethod
-    def filter_file_moves_to_dev(file_moves):
-        filtered_moves = list()
-        for move in file_moves:
-            if move[0].startswith('dev/'):
-                filtered_moves.append(move)
-        return filtered_moves
-
-    @staticmethod
-    def chrono_sort_moves(move_list):
-        """
-        Sorts file moves in chronological operations to avoid out-of-order rename stomping.
-        :param move_list:
-            List of tuples {src_filename, dst_filename}
-        :return:
-            A sorted list that can be iterated from beginning to end for rename opterations, without stomping conflicts.
-        """
-        # Iterate through all the moves to construct a linked list.
-        head_to_tail_mapping = dict()  # All the filenames for the start of a chain. (For discovering insertion points)
-        chains = dict()  # A collecton of a chain of moves (A->B->C->D file renames)
-        tail_to_head_mapping = dict()  # All the filenames at the end of a chain. (For discovering insertion points)
-
-        for move in move_list:  # tuple: (src, dst)
-            src = move[0]
-            dst = move[1]
-
-            # Create a chain for this move.
-            chains[src] = [src, dst]
-            head_to_tail_mapping[src] = dst
-            tail_to_head_mapping[dst] = src
-
-            # Possible outcomes:
-            # Extending the end of an existing chain...
-            if src in tail_to_head_mapping:
-
-                # Update our tails & heads
-                new_tail = head_to_tail_mapping[src]  # Tail of the chain starting with 'src'
-                new_head = tail_to_head_mapping[src]  # Tail of the chain ending with 'src'
-
-                # Join above two chains together.
-                tail_to_head_mapping[new_tail] = new_head
-                head_to_tail_mapping[new_head] = new_tail
-
-                # Update chain.
-                chains[new_head] = chains[new_head] + chains[src][1:]  # Remove first duplicate entry
-
-                # Clean-up
-                del head_to_tail_mapping[src]
-                del tail_to_head_mapping[src]
-                del chains[src]
-
-            # Extending the beginning of an existing chain...
-            if dst in head_to_tail_mapping:
-                # Update our tails & heads
-                new_tail = head_to_tail_mapping[dst]  # Tail of the chain starting with 'dst'
-                new_head = tail_to_head_mapping[dst]  # Tail of the chain ending with 'dst'
-
-                # Extend.
-                chains[new_head] = chains[new_head] + chains[dst][1:]  # Remove first duplicate entry
-
-                # Join above two chains together.
-                tail_to_head_mapping[new_tail] = new_head
-                head_to_tail_mapping[new_head] = new_tail
-
-                # Clean-up.
-                del head_to_tail_mapping[dst]
-                del tail_to_head_mapping[dst]
-                del chains[dst]
-
-        # Construct list from chains
-        return_list = list()
-        for cur_chain in chains:
-            previous_filename = None
-
-            reverse_chain = chains[cur_chain]
-            reverse_chain.reverse()
-
-            for current_filename in reverse_chain:
-                if previous_filename:
-                    # We are appending in reverse order.
-                    # When renaming, we go from the end of the list, to the beginning.
-                    # This way we avoid stomping renames.
-                    return_list.append((current_filename, previous_filename))
-                previous_filename = current_filename
-
-        return return_list
-
-    def generate_list_files_moved_between_branches(self, branch_cl_tuple_src, branch_cl_tuple_dst):
-        """
-        :param branch_cl_tuple_src:
-            {Tuple} (branch, revision/build number)
-        :param branch_cl_tuple_dst:
-            {Tuple} (branch, revision/build number)
-        :return:
-            A list of tuples (filename before, filename after) ordered by intended chronological move operations
-        """
-        self.build_parent_hash(branch_cl_tuple_src[0], branch_cl_tuple_src[1])
-        self.build_parent_hash(branch_cl_tuple_dst[0], branch_cl_tuple_dst[1])
-        file_moves = self.find_moved_files_between_branches(branch_cl_tuple_src[0], branch_cl_tuple_dst[0])
-        return self.chrono_sort_moves(file_moves)

+ 0 - 75
Tools/build/JenkinsScripts/distribution/git_release/GitOpsCodeCommit.py

@@ -1,75 +0,0 @@
-############################################################################################
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates, or
-# a third party where indicated.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#############################################################################################
-
-"""
-This file is the central location for functions and operations for CodeCommit that are shared accross different scripts.
-"""
-import subprocess
-import os
-import git
-
-# Initializes a Git repository configured with necessary settings to access CodeCommit via AWS CLI credentials.
-def init_git_repo(aws_repo_url, awscli_profile, local_repo_directory):
-    """
-    Initializes a Git repository configured with necessary settings to access CodeCommit via AWS CLI credentials.
-
-    :param aws_repo_url: Clone URL for the CodeCommit repo
-    :param awscli_profile: AWS CLI profile with access/permissions to the repo.
-    :param local_repo_directory: Clone directory
-    :return:
-    """
-
-    parse_key = "amazonaws.com"
-    host_domain_end_index = aws_repo_url.index(parse_key) + len(parse_key)
-    host_domain = aws_repo_url[:host_domain_end_index]
-
-    subprocess.call(["git", "init", local_repo_directory])
-
-    config_append = f"""
-[credential "{host_domain}"]
-\thelper = !aws --profile {awscli_profile} codecommit credential-helper $@
-\tUseHttpPath = true
-
-[remote "origin"]
-\turl = {aws_repo_url}
-\tfetch = +refs/heads/*:refs/remotes/origin/*
-
-[branch "master"]
-\tremote = origin
-\tmerge = refs/heads/master
-"""
-
-    config_filepath = os.path.join(local_repo_directory, '.git', 'config')
-
-    with open(config_filepath, "a") as myfile:
-        myfile.write(config_append)
-
-
-def custom_clone(aws_repo_url, awscli_profile, local_repo_directory, setup_tracking_branches):
-    print("Initializing local repo with custom AWS CodeCommit config...")
-    initial_directory = os.getcwd()
-    os.chdir(local_repo_directory)
-    init_git_repo(aws_repo_url, awscli_profile, local_repo_directory)
-    os.chdir(initial_directory)
-
-    repo = git.Repo(local_repo_directory)
-
-    print(f"Fetching from remote: {aws_repo_url}")
-    repo.remote().fetch()  # Fetch branches
-    repo.remote().fetch("--tags")  # Fetch tags
-
-    if setup_tracking_branches:
-        for remote_branch in repo.remote().refs:
-            branch_name = remote_branch.remote_head
-            repo.create_head(branch_name, remote_branch) \
-                .set_tracking_branch(remote_branch)
-
-    return repo

+ 0 - 24
Tools/build/JenkinsScripts/distribution/git_release/GitOpsCommon.py

@@ -1,24 +0,0 @@
-############################################################################################
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates, or
-# a third party where indicated.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#############################################################################################
-"""
-This file is the central location for functions and operations common to GitHub and CodeCommit
-"""
-
-import subprocess
-
-
-def get_revision_list(repo_diretory):
-    print(f"Parsing hashes from git repository: {repo_diretory}")
-    p = subprocess.Popen(["git", "rev-list", "--all"], stdout=subprocess.PIPE, cwd=repo_diretory)
-    (git_hash_output, git_hash_error) = p.communicate()
-    if git_hash_error is not None:
-        raise Exception(git_hash_error)
-    return git_hash_output.splitlines()

+ 0 - 49
Tools/build/JenkinsScripts/distribution/git_release/GitOpsGitHub.py

@@ -1,49 +0,0 @@
-############################################################################################
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates, or
-# a third party where indicated.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#############################################################################################
-
-"""
-This file is the central location for functions and operations for GitHub that are shared accross different scripts.
-"""
-from urllib.parse import quote_plus
-from github import Github, BadCredentialsException
-
-
-def create_authenticated_https_clone_url(github_user, github_password, https_endpoint_url):
-        # Windows does not ship with SSH, and we need a universally usable approach.
-        # This is why we opt for HTTPS authentication. Without user input, we will
-        # need to inject username/password into the repo url at
-        # 'auth_insertion_offset'.
-        # Example resulting URL- https://username:[email protected]/repo.git
-        auth_insertion_offset = 8
-
-        # If you have a symbol like @ in your username or password, it will mess up the url command to clone.
-        # urllib.quote_plus replaces special characters with url safe characters, turning @ into %40.
-        url_safe_password = quote_plus(github_password)
-        url_safe_user = quote_plus(github_user)
-
-        authenticated_url = "{0}{1}:{2}@{3}".format(https_endpoint_url[:auth_insertion_offset],
-                                                    url_safe_user,
-                                                    url_safe_password,
-                                                    https_endpoint_url[auth_insertion_offset:])
-        return authenticated_url
-
-
-def are_credentials_valid(username, password):
-    auth_test_obj = Github(username, password)
-
-    try:
-        for repo in auth_test_obj.get_user().get_repos():
-            # Will raise 'Bad Credentials' exception if can't find name.
-            repo.name
-    except BadCredentialsException:
-        return False
-
-    return True

+ 0 - 417
Tools/build/JenkinsScripts/distribution/git_release/GitPromotion.py

@@ -1,417 +0,0 @@
-############################################################################################
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates, or
-# a third party where indicated.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#############################################################################################
-from P4 import P4
-import argparse
-import boto3
-import os
-import sys
-import shutil
-from importlib import reload
-from urllib.parse import urlparse
-from git import Repo, RemoteProgress
-from git.repo.base import InvalidGitRepositoryError, NoSuchPathError
-
-THIS_SCRIPT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
-sys.path.append(os.path.join(THIS_SCRIPT_DIRECTORY, ".."))  # Required for AWS_PyTools
-from GitStaging import handle_remove_readonly, clean_replace_repo_contents
-from GitOpsCodeCommit import custom_clone, init_git_repo
-from GitMoveDetection import MoveDetection
-
-
-class MyProgressPrinter(RemoteProgress):
-    def update(self, op_code, cur_count, max_count=None, message=''):
-        print(op_code, cur_count, max_count, cur_count / (max_count or 100.0), message or "NO MESSAGE")
-
-
-def create_args():
-    parser = argparse.ArgumentParser(description='Promotes a specified release from the \'SignedBuilds\' repository.')
-    parser.add_argument('--sourceRepoURL',
-                        help='The URL for the repository that we are taking the contents of the promotion from.',
-                        required=True)
-
-    parser.add_argument('--destinationRepoURL',
-                        help='The URL for the repository that we are promoting content to.',
-                        required=True)
-
-    parser.add_argument('--commitRef',
-                        help='A valid Git reference from the staging repo to use as a base for building the new commit.'
-                             ' Usually a Perforce changelist number (the staging repo\'s tag names).',
-                        required=True)
-
-    parser.add_argument('--localRepoDirectory',
-                        help='Path to the local repository where the Git work takes place. '
-                             'If the directory does not exist, it will be created. '
-                             'If the directory contains no repository, a new local clone will be created.',
-                        required=True)
-
-    parser.add_argument('--genRoot',
-                        help='Directory for temp files.',
-                        default="",
-                        required=True)
-
-    parser.add_argument('--dryRun',
-                        help='Runs without pushing or modifying remotes.',
-                        action="store_true",
-                        required=False)
-
-    parser.add_argument('--clean',
-                        help='Runs with a clean slate. Deletes any pre-existing files that may cause conflicts.',
-                        action="store_true",
-                        required=False)
-
-    # we should assume that the account containing staging repo is the same as the account containing promotion repo
-    parser.add_argument('--awsProfile',
-                        help='AWS credentials profile generated from AWS CLI. Defaults to \'default\'.',
-                        required=False,
-                        default='default')
-    return parser.parse_args()
-
-
-def validate_args(args):
-    #ensure that source and dest repos aren't github repos
-    github_domain = "github.com"
-    if github_domain in args.sourceRepoURL.lower() or github_domain in args.destinationRepoURL.lower():
-        abort_operation("Cannot promote to or from GitHub directly. Please use a git repo not on GitHub.")
-
-    #ensure that repo urls don't have a trailing slash
-    if args.sourceRepoURL.endswith('/'):
-        args.sourceRepoURL = args.sourceRepoURL[:-1]
-    if args.destinationRepoURL.endswith('/'):
-        args.destinationRepoURL = args.destinationRepoURL[:-1]
-
-    # ensure aws profile exists on the machine
-    if args.awsProfile:
-        if boto3.Session(profile_name=args.awsProfile) is None:
-            abort_operation("The AWS CLI profile name specified does not exist on this machine. Please specify an existing AWS CLI profile.")
-
-
-def get_repo_name(url):
-    url_path = urlparse(url).path
-    return os.path.split(url_path)[-1]
-
-
-def generate_workspace_repo(local_repo_directory, aws_profile_name, source_repo_name, source_repo_url, dest_repo_url):
-    repo_url = dest_repo_url
-    init_git_repo(repo_url, aws_profile_name, local_repo_directory)
-    repo = Repo(local_repo_directory)
-    repo.git.fetch('origin')
-    repo.git.fetch('origin', '--tags')
-
-    # Add remote for 'signed release builds' repo
-    repo.create_remote(source_repo_name, source_repo_url)
-    repo.git.fetch(source_repo_name)
-    repo.git.fetch(source_repo_name, '--tags')
-    return repo
-
-
-def branch_name_from_version_string(version_string):
-    version_split = version_string.split(".")
-    return f"{version_split[0]}.{version_split[1]}"
-
-
-def rename_tag(old_name, new_name, remote_name, repo, dryRun):
-    # Create copy of old tag
-    repo.git.tag(new_name, old_name)
-    if not dryRun:
-        repo.git.push(remote_name, new_name)
-    # Delete old tag
-    repo.git.tag("-d", old_name)
-    if not dryRun:
-        repo.git.push(remote_name, ":" + old_name)
-
-
-def init_mix_repo(local_repo_directory, aws_profile_name, source_repo_name, source_repo_url, dest_repo_url):
-    # Acquire git repo with dependent remotes
-    try:
-        mix_repo = Repo(local_repo_directory)
-
-        # Delete all the local tags before we fetch to ensure tags are synchronized.
-        for tag in mix_repo.tags:
-            mix_repo.delete_tag(tag)
-        mix_repo.remote('origin').fetch(progress=MyProgressPrinter())
-        mix_repo.remote(source_repo_name).fetch(progress=MyProgressPrinter())
-    except InvalidGitRepositoryError:
-        print("No local repo in specified directory. Deleting local contents & creating repo from remote.")
-        shutil.rmtree(local_repo_directory, onerror=handle_remove_readonly)
-        mix_repo = generate_workspace_repo(local_repo_directory, aws_profile_name, source_repo_name, source_repo_url, dest_repo_url)
-    except NoSuchPathError:
-        print("Local directory does not exist. Creating new directory and local repo within it...")
-        mix_repo = generate_workspace_repo(local_repo_directory, aws_profile_name, source_repo_name, source_repo_url, dest_repo_url)
-    return mix_repo
-
-
-def ensure_tag_exists(repo, commit_ref, source_repo_url, dest_repo_url):
-    if commit_ref not in repo.tags:
-        available_tags = ('\n'.join(str(p) for p in repo.tags))
-        raise Exception("ERROR: '{0}' tag does not exist in '{1}' or '{2}' repositories. \
-                Has it already been promoted?\n\nTags available:\n{3}".format(
-            commit_ref,
-            source_repo_url,
-            dest_repo_url,
-            available_tags))
-
-
-def get_ly_version_from_mirror_repo(mirror_repo):
-    # Find tag for the specified commit of GitHubMirror repo.
-    version_tag = get_tag_for_commit(mirror_repo, mirror_repo.head.commit)
-    return str(version_tag)[1:]  # drop the 'v' from tag string.
-
-
-def get_tag_for_commit(repo, commit):
-    return next((tag for tag in repo.tags if tag.commit == commit), None)
-
-
-def find_cl_for_ly_version_from_staging_repo(ly_version_string, mix_repo, repo_remote_name):
-    """
-    Get last promoted commit of version branch in SignedBuilds repo
-    :param ly_version_string: string of a lumberyard version (X.X.X.X)
-    :param mix_repo: repository object containing a remote to the SignedBuilds repo.
-    :param repo_remote_name: Alias for the git remote repository representing the staging repo.
-    :return: The string value of the changelist number.
-    """
-    refs = mix_repo.remotes[repo_remote_name].refs
-    staging_repo_version_branch = refs[ly_version_string]
-    staging_repo_version_branch_head = staging_repo_version_branch.commit
-
-    # Traverse all commits in the branch to find a match for '*-Promoted'.
-    commit = staging_repo_version_branch_head
-    while commit:
-        # If we have a promoted tag...
-        commit_tag = get_tag_for_commit(mix_repo, commit)
-        if commit_tag is not None and 'Promoted' in commit_tag.name:
-                # Return the int value of the CL number
-                return ''.join(filter(str.isdigit, commit_tag.name))
-
-        if len(staging_repo_version_branch_head.parents) > 1:
-            raise Exception(f'Commit {commit} has more than one parent: {staging_repo_version_branch_head.parents}')
-
-        commit = staging_repo_version_branch_head.parents[0]
-
-    raise Exception('Could not find tagged release. Dev Error')
-
-
-def generate_move_commit(mirror_repo, branch_name_src, build_number_src, branch_name_dst, build_number_dst):
-    """
-    Performs a Git commit containing only file moves between two Lumberyard releases.
-
-    :param mirror_repo:
-        GitPython repository reference
-    :param branch_name_src:
-        Name of the SOURCE Perforce branch
-    :param build_number_src:
-        Build/Changelist number within the SOURCE branch
-    :param branch_name_dst:
-        Name of the DESTINATION Perforce branch
-    :param build_number_dst:
-        Build/Changelist number within the DESTINATION branch
-    :return:
-    """
-
-    # We want to skip generating a move-commit if there is no prior history to create a range from. This condition can
-    # be present in any branch, not just the repo as a whole.
-    # At the time of execution, we expect at least 1 commit already present. The existing commit represents the previous
-    # commit, whereas the incoming commit represents the next commit. In this function, we create the commit that is
-    # lodged in the middle; the move-commit.
-    rev_list_count = int(mirror_repo.git.rev_list('HEAD', '--count'))
-    if rev_list_count < 1:
-        raise Exception('Cannot promote an empty branch.')
-
-    move_detection = MoveDetection()
-    file_moves = move_detection.generate_list_files_moved_between_branches((branch_name_src, build_number_src),
-                                                                           (branch_name_dst, build_number_dst))
-    if len(file_moves) == 0:
-        print('No files moved between releases. Skipping move-commit generation.')
-    else:
-        skipped_files = list()
-        for move in file_moves:
-            filename_before = os.path.join(mirror_repo.working_dir, move[0])
-            filename_after = os.path.join(mirror_repo.working_dir, move[1])
-
-            # If the old file is not found, it's likely due to a file move happening outside the repo's tracked directory.
-            # Such a case occurs when files in the additive zip have moved/renamed. We don't care about these files.
-            if not os.path.exists(filename_before):
-                skipped_files.append(move)
-                continue
-
-            filename_after_dir = os.path.dirname(filename_after)
-            if not os.path.exists(filename_after_dir):
-                os.makedirs(filename_after_dir)
-            
-            # Git is attempting to move to an existing file. Skip this move.
-            if os.path.exists(filename_after):
-                continue
-
-            mirror_repo.git.mv(filename_before, filename_after)
-
-        print('Skipped processing the following moves not tracked by the git repository:')
-        for entry in skipped_files:
-            print(entry)
-
-        mirror_repo.index.commit("Move Commit")
-
-
-def format_p4_branch_from_ly_version(ly_version):
-    split_version = ly_version.split('.')
-    parsed_version = f'{split_version[0].zfill(2)}_{split_version[1].zfill(2)}'
-    return f'//lyengine/releases/ver{parsed_version}/'
-
-
-def find_ly_version_for_p4_cl(repo, p4_cl):
-    tag_ref = repo.tag('refs/tags/' + p4_cl)
-    branch_list = repo.git.branch('-r', '--contains', tag_ref.commit)
-    branch_list = branch_list.split()
-
-    # We assume the latest branch in the list is always the correct version.
-    latest_branch = branch_list[0]
-
-    # Return right-hand split of [remote]/[branch name].
-    return latest_branch.split('/')[1]
-
-
-def main():
-    args = create_args()
-    validate_args(args)
-
-    if args.clean:
-        print("Running clean. Deleting pre-existing local repo.")
-        if os.path.exists(args.localRepoDirectory):
-            shutil.rmtree(args.localRepoDirectory, onerror=handle_remove_readonly)
-
-    previous_lumberyard_version = None
-    next_lumberyard_version = None
-    source_repo_name = get_repo_name(args.sourceRepoURL)
-    mix_repo = init_mix_repo(args.localRepoDirectory, args.awsProfile, source_repo_name, args.sourceRepoURL, args.destinationRepoURL)
-    remote_origin_refs = mix_repo.remote().refs
-
-    ensure_tag_exists(mix_repo, args.commitRef, args.sourceRepoURL, args.destinationRepoURL)
-
-    # Checkout the to-be-promoted commit
-    mix_repo.git.checkout(args.commitRef)
-
-    # Import Lumberyard version data.
-    sys.path.append(os.path.join(mix_repo.working_dir, 'dev'))
-    import waf_branch_spec
-    next_lumberyard_version = waf_branch_spec.LUMBERYARD_VERSION
-    mirror_repo_branch_name = branch_name_from_version_string(next_lumberyard_version)
-    should_create_version_branch = False
-    empty_master_branch = False
-
-    # We always delete the local repository, regardless of the '--clean' flag because reusing a git repo requires
-    # extensive sanitation to guarantee safe usage. It's easier to just clone a new one specifically for our purposes.
-    if os.path.exists(args.genRoot):
-        print(f"Path: {args.genRoot}\nFound pre-existing temp directory. Clearing contents before proceeding...")
-        shutil.rmtree(args.genRoot, onerror=handle_remove_readonly)
-
-    # We want to move all files of the to-be-promoted commit in a separate directory. This leaves the working directory
-    # bare.
-    print ("Copying repo contents to temp directory.")
-    os.makedirs(args.genRoot)
-    exclude_git_dir = os.path.join(args.localRepoDirectory, ".git")
-    clean_replace_repo_contents(args.localRepoDirectory, args.genRoot, [exclude_git_dir])
-
-    # Checkout the corresponding (mirror repo) branch which our new commit will be based off of.
-    # We must determine if we branch off from a canonical version branch or from 'master'.
-    if hasattr(mix_repo.heads, mirror_repo_branch_name):
-        print(f"Checking out local branch '{mirror_repo_branch_name}'")
-        mix_repo.heads[mirror_repo_branch_name].checkout()
-    elif hasattr(remote_origin_refs, mirror_repo_branch_name):
-        print(f"Checking out remote branch '{mirror_repo_branch_name}'")
-        mix_repo.create_head(mirror_repo_branch_name, remote_origin_refs[mirror_repo_branch_name]) \
-            .set_tracking_branch(remote_origin_refs[mirror_repo_branch_name]) \
-            .checkout()
-    else:
-        print(f"'{mirror_repo_branch_name}' branch not found. Creating version branch after committing in 'master'.")
-        should_create_version_branch = True
-
-        if hasattr(mix_repo.heads, 'master'):
-            print("Performing checkout on 'master'.")
-            mix_repo.heads.master.checkout()
-        else:
-            print("'master' does not exist locally.")
-            if hasattr(remote_origin_refs, 'master'):
-                print("'origin/master' found. Performing checkout from 'origin' remote.")
-                mix_repo.create_head('master', remote_origin_refs.master) \
-                    .set_tracking_branch(remote_origin_refs.master) \
-                    .checkout()
-            else:
-                print("'master' neither exist locally or remotely. Using default-empty 'master' branch.")
-                mix_repo.git.checkout("--orphan", "master")
-                mix_repo.git.reset(".")
-                mix_repo.git.clean("-df")
-                empty_master_branch = True
-
-    # Before performing any new commits, we must set the stage for the incoming commit. That means we must generate a
-    # move-commit to track where the new Lumberyard version's files are destined to exist.
-    print ("Generating Move-Commit...")
-    mix_repo.git.reset('--hard')
-
-    # Importing this file before v1.24 (python 2.7) will raise errors. This can be safely ignored.
-    try:
-        reload(waf_branch_spec)
-    except TypeError:
-        pass
-
-    previous_lumberyard_version = waf_branch_spec.LUMBERYARD_VERSION
-    # Try to decode in case waf_branch_spec is still using the old format.
-    try:
-        previous_lumberyard_version = previous_lumberyard_version.decode('utf-8', 'ignore')
-    except (UnicodeDecodeError, AttributeError):
-        pass
-
-    previous_CL = find_cl_for_ly_version_from_staging_repo(previous_lumberyard_version, mix_repo, source_repo_name)
-    previous_branch = format_p4_branch_from_ly_version(previous_lumberyard_version)
-    next_CL = ''.join(filter(str.isdigit, args.commitRef))
-    next_branch = format_p4_branch_from_ly_version(find_ly_version_for_p4_cl(mix_repo, args.commitRef))
-
-    generate_move_commit(mix_repo, previous_branch, previous_CL, next_branch, next_CL)
-
-    # Replace repo contents with the temp files we created early on.
-    exclude_git_dir = os.path.join(args.localRepoDirectory, ".git")
-    clean_replace_repo_contents(args.genRoot, args.localRepoDirectory, [exclude_git_dir])
-    mix_repo.git.add("--all", "--force")
-    promoted_commit_object = mix_repo.commit(args.commitRef)
-    mix_repo.index.commit(promoted_commit_object.message,
-                             author=promoted_commit_object.author,
-                             committer=promoted_commit_object.committer)
-
-    # Tag for customer release
-    print("Tagging new commit.")
-    version_tag_string = "v{0}".format(next_lumberyard_version)
-    mix_repo.create_tag(version_tag_string, force=True)
-
-    # Rename staging repo tag, appending '-Promoted'
-    rename_tag(args.commitRef, args.commitRef + "-Promoted",
-               source_repo_name, mix_repo, args.dryRun)
-
-    # Push commit & tags
-    if args.dryRun:
-        print("Performing dry run. No changes will be pushed to remotes.")
-    else:
-        print("Pushing tag & commit")
-        if empty_master_branch:
-            mix_repo.git.push("-u", "origin", "master")
-        else:
-            mix_repo.git.push("--all")
-        mix_repo.remote().push(version_tag_string)
-
-    # Create version branch, if necessary
-    if should_create_version_branch:
-        print(f"Creating branch '{mirror_repo_branch_name}' off new master head.")
-        mix_repo.create_head(mirror_repo_branch_name)
-        if args.dryRun:
-            print("Performing dry run. No changes will be pushed to remotes.")
-        else:
-            mix_repo.git.push("-u", "origin", "{0}:{0}".format(mirror_repo_branch_name))
-
-
-if __name__ == "__main__":
-    main()
-    sys.exit()

+ 0 - 217
Tools/build/JenkinsScripts/distribution/git_release/GitRelease.py

@@ -1,217 +0,0 @@
-############################################################################################
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates, or
-# a third party where indicated.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#############################################################################################
-import argparse
-import boto3
-import sys
-import shutil
-import json
-import os.path
-import re
-from subprocess import Popen, PIPE, check_output
-from git import Repo
-
-THIS_SCRIPT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
-sys.path.append(os.path.join(THIS_SCRIPT_DIRECTORY, '..'))  # Required for importing Git scripts
-from GitStaging import handle_remove_readonly, abort_operation
-from GitOpsCodeCommit import custom_clone
-from GitIntegrityChecker import HASHLIST_KEY
-from GitOpsGitHub import create_authenticated_https_clone_url, are_credentials_valid
-from GitOpsCommon import get_revision_list
-
-def create_args():
-    parser = argparse.ArgumentParser(description='Mirrors all content from the internal to external GitHubMirror.')
-    parser.add_argument('--sourceRepoURL',
-                        help='The URL for the repository that we are mirroring to GitHub (expects a CodeCommit URL).',
-                        required=True)
-    parser.add_argument('--destinationRepoURL',
-                        help='The URL for the repository that we are mirroring on to (expects a GitHub URL).',
-                        required=True)
-    parser.add_argument('--backupRepoURL',
-                        help='The URL for the repository that we are backing up the mirrored content to (expects a CodeCommit URL).',
-                        required=True)
-    parser.add_argument('--hashFile',
-                        help='Hash file to update when release succeeds.',
-                        required=True)
-    parser.add_argument('--genRoot',
-                        help='Directory for temp files.',
-                        required=True)
-    parser.add_argument('--githubUser',
-                        help='Username for the Github account.',
-                        required=True)
-    parser.add_argument('--githubPassword',
-                        help='Password for the Github account.',
-                        required=True)
-    parser.add_argument('--awsProfile',
-                        help='AWS credentials profile generated from AWS CLI. Defaults to \'default\'.',
-                        required=False,
-                        default='default')
-    parser.add_argument('--keep',
-                        help='Keeps all intermediary files after operation completes.',
-                        action='store_true',
-                        required=False)
-    parser.add_argument('--SkipP4Submit',
-                        help='Skips the submission to P4 history.',
-                        action='store_true',
-                        required=False)
-    return parser.parse_args()
-
-
-def validate_args(args):
-    # ensure that source and backup repos aren't github repos
-    github_domain = 'github.com'
-    if github_domain in args.sourceRepoURL.lower() or github_domain in args.backupRepoURL.lower():
-        abort_operation('Cannot mirror from GitHub directly. Please use a git repo not on GitHub.')
-
-    if github_domain in args.backupRepoURL.lower():
-        abort_operation('Cannot backup repo to another GitHub repo. Please use a git repo not on GitHub.')
-
-    # ensure that destination repo is a github repo
-    if github_domain not in args.destinationRepoURL.lower():
-        abort_operation('Cannot release to any repo other than one hosted on GitHub. Please use a git repo on GitHub.')
-
-    # ensure aws profile exists on the machine
-    if args.awsProfile:
-        if boto3.Session(profile_name=args.awsProfile) is None:
-            abort_operation('The AWS CLI profile name specified does not exist on this machine. Please specify an existing AWS CLI profile.')
-
-    if not os.path.exists(args.hashFile):
-        abort_operation(f'Hash file not found: {args.hashFile}. If using perforce, please make sure that the file is mapped to your workspace, is checked out, and is at the lastest revision.')
-
-    if not are_credentials_valid(args.githubUser, args.githubPassword):
-        abort_operation('Provided GitHub credentials are invalid. If you are using an account with Two-Factor Authentication enabled, your password should be replaced with the proper acces token.')
-
-
-def mirror_repo_from_local(working_dir, dest_repo_url):
-    repo = Repo(working_dir)
-
-    # Perform a mirroring operation by pushing all refs into the remote repo.
-    # This operation will delete stale refs and overwrite outdated ones on the remote repo.
-    print(f'Pushing to remote: {dest_repo_url}')
-    repo.git.push('--mirror', dest_repo_url)
-
-    repo.close()
-
-
-def mirror_repo_from_remote(working_dir, aws_profile, keep, source_repo_url, dest_repo_url):
-    # We begin by 'cloning' the git repo from the internal GitHub mirror at CodeCommit. Normally a git clone would
-    # elegantly set everything ready to mirror, but CodeCommit access is done via AWS CLI, meaning that the git repo
-    # needs a particular configuration in order to communicate with CodeCommit. Said configuration can be setup on the
-    # user's machine, but we inject the configuration directly into the repo initialization in order for the scripts to
-    # be portable across machines.
-    # A drawback with this method is that there are more manual steps to do (such as creating local branches for each
-    # branch remote) in order to correctly mirror the repo to the external GitHub site.
-    if os.path.exists(working_dir):
-        shutil.rmtree(working_dir, onerror=handle_remove_readonly)
-    os.makedirs(working_dir)
-
-    # Clone the remote repo to mirror.
-    custom_clone(source_repo_url, aws_profile, working_dir, True)
-
-    # Perform the actual mirrorring operation.
-    # Shortcut:
-    #   Use alternate mirror function for code re-use.
-    mirror_repo_from_local(working_dir, dest_repo_url)
-
-    if not keep:
-        print('Cleaning up temp files.')
-        shutil.rmtree(working_dir, onerror=handle_remove_readonly)
-
-
-def get_last_commit_info(repo_directory):
-    initial_dir = os.getcwd()
-    os.chdir(repo_directory)
-    commit_info = check_output(['git', 'log', '-1', '--all', '--date-order', '--oneline', '--pretty=format:\'%H %s\''])
-    os.chdir(initial_dir)
-
-    # Sanitize string from stdout for Python usage
-    commit_info = re.sub('[\'\\\]', '', commit_info)
-
-    commit_info_split = commit_info.split(' ', 1)
-    commit_hash = commit_info_split[0]
-    commit_title = commit_info_split[1]
-    print(f'Last commit info found from directory {repo_directory}:\nCommit hash: {commit_hash}\nCommit Title: {commit_title}')
-    return commit_hash, commit_title
-
-
-def create_git_release_p4_changelist(changelist_description):
-    changelist_config = check_output(['p4', 'change', '-o'])
-    changelist_config = changelist_config.replace('<enter description here>', changelist_description, 1)
-
-    p = Popen(['p4', 'change', '-i'], stdout=PIPE, stdin=PIPE, stderr=PIPE)
-    result_stdout = p.communicate(input=changelist_config)[0]
-
-    # Successful result prints changelist number (example: 'Change 452982 created.\r\n')
-    changelist_number = result_stdout.split()[1]
-    return changelist_number
-
-
-def p4_update_hashlist(commit_hashes_filepath, hash_list, changelist_number):
-    print(f'Checking out hashlist file for edit: {commit_hashes_filepath}')
-    print(check_output(['p4', 'edit', '-c', changelist_number, commit_hashes_filepath]))
-
-    print('Loading hashlist from file.')
-    with open(commit_hashes_filepath) as json_data:
-        json_obj_hashes = json.load(json_data)
-    json_obj_hashes[HASHLIST_KEY] = hash_list
-
-    # Replace the hashlist file with the new updated version
-    print('Writting new hash list to file')
-    os.remove(commit_hashes_filepath)
-    with open(commit_hashes_filepath, 'w') as f:
-        json.dump(json_obj_hashes, f, indent=4)
-
-
-def submit_p4_changelist(changelist_number):
-    print(f'Submitting P4 CL{changelist_number}...')
-    print(check_output(['p4', 'submit', '-c', changelist_number]))
-
-
-def main():
-    args = create_args()
-    validate_args(args)
-
-    https_authenticated_url = \
-        create_authenticated_https_clone_url(args.githubUser, args.githubPassword,
-                                             args.destinationRepoURL)
-
-    working_dir = os.path.join(args.genRoot, 'git_repo_release')
-
-    # Mirror the repo to GitHub.
-    mirror_repo_from_remote(working_dir, args.awsProfile, True,
-                            args.sourceRepoURL,
-                            https_authenticated_url)
-
-    # Reuse the cloned repo to mirror once more to our internal backup
-    mirror_repo_from_local(working_dir, args.backupRepoURL)
-
-    # Do a rev-list to update the hashlist
-    hash_list = get_revision_list(working_dir)
-
-    # Get the last git commit to parse the version number. We need this number to fill the description of the Perforce
-    # changelist used to update the hashlist. Ignore the 'commit_hash'.
-    commit_hash, commit_title = get_last_commit_info(working_dir)
-
-    p4_cl_number = create_git_release_p4_changelist('GitHub ' + commit_title)
-
-    p4_update_hashlist(args.hashFile, hash_list, p4_cl_number)
-
-    if not args.SkipP4Submit:
-        submit_p4_changelist(p4_cl_number)
-    else:
-        print('Skipping P4 Submit.')
-
-    if not args.keep:
-        # Delete the repo
-        shutil.rmtree(working_dir, onerror=handle_remove_readonly)
-
-if __name__ == '__main__':
-    main()
-    sys.exit()

+ 0 - 825
Tools/build/JenkinsScripts/distribution/git_release/GitStaging.py

@@ -1,825 +0,0 @@
-############################################################################################
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates, or
-# a third party where indicated.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#############################################################################################
-
-# Requires following installs via pip:
-# - Boto3
-# Requires following software pre-installed:
-# - Git
-# - Python 3.7.6 x64
-# - Apache Ant
-from distutils import spawn
-from distutils.version import StrictVersion
-from git import Repo, RemoteProgress
-from urllib.parse import urljoin, urlparse
-import argparse
-import boto3
-import botocore.exceptions
-import datetime
-import errno
-import GitOpsCodeCommit
-import glob
-import json
-import locale
-import os
-import shutil
-import stat
-import subprocess
-import sys
-import textwrap
-import time
-
-THIS_SCRIPT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
-sys.path.append(os.path.join(THIS_SCRIPT_DIRECTORY, ".."))  # Required for AWS_PyTools
-sys.path.append(os.path.join(THIS_SCRIPT_DIRECTORY, "..", "Installer"))  # Required for BuildInstallerUtils
-from AWS_PyTools import LyCloudfrontOps
-from AWS_PyTools import LyChecksum
-from Installer import SignTool
-
-
-URL_KEY = "URL"
-CHECKSUM_KEY = "Checksum"
-SIZE_KEY = "Uncompressed Size"
-BOOTSTRAP_CONFIG_FILENAME = "bootstrap_config.json"
-
-HASH_FILE_NAME = "filehashes.json"
-class ExitCodes(object):
-    # Starting with higher numbers to avoid default system error collisions.
-    INVALID_ARGUMENT = 11
-    UNSIGNED_PACKAGE = 12
-
-
-def print_status_message(message):
-    print("-----------------------------")
-    print(f"{message}")
-    print("-----------------------------\n")
-
-
-def bytes_to_megabytes(size_in_bytes):
-    bytes_in_megabytes = 1048576
-    return size_in_bytes / bytes_in_megabytes
-
-
-def bytes_to_gigabytes(size_in_bytes):
-    megabytes_in_gigabytes = 1024
-    return bytes_to_megabytes(size_in_bytes) / megabytes_in_gigabytes
-
-
-def appendTrailingSlashToUrl(url):
-    if not url.endswith(tuple(['/', '\\'])):
-        url += '/'
-    return url
-
-
-def get_empty_subdirectories(path):
-    empty_directories = []
-    for dirpath, dirnames, filenames in os.walk(path):
-        for cur_dir in dirnames:
-            full_path = os.path.join(dirpath, cur_dir)
-            if not os.listdir(full_path):
-                empty_directories.append(full_path)
-    return empty_directories
-
-
-def get_directory_size_in_bytes(start_path):
-    total_size = 0
-    for dirpath, dirnames, filenames in os.walk(start_path):
-        for f in filenames:
-            fp = os.path.join(dirpath, f)
-            total_size += os.path.getsize(fp)
-    return total_size
-
-
-# Python has issues removing files and directories on Windows
-# (even if we've just created them) if they were set to 'readonly'.
-# This usually occurs when deleting a '.git' directory, because some internal
-# git repository files become 'readonly' when initializing a new repo.
-# The following function should override general permission issues when
-# deleting.
-def handle_remove_readonly(func, path, exc):
-    os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)  # 0777
-    func(path)
-
-
-# Ensures a specified directory is existent and empty.  If clean is 'True',
-# the directory will delete all contents within.  The clean argument is
-# intended to be used for retrying script operation from scratch.  Use with
-# care.
-def ensure_directory_is_usable(directory, clean):
-    if os.path.exists(directory):
-        print(f"'{directory}' already exists.")
-        if clean is True:
-            print(f"Clean mode enabled. Deleting contents of '{directory}'")
-            dir_entries = os.listdir(directory)
-            for entry in dir_entries:
-                entry_full_path = os.path.join(directory, entry)
-                if os.path.isfile(entry_full_path):
-                    os.chmod(entry_full_path, stat.S_IWRITE)
-                    os.remove(entry_full_path)
-                elif os.path.isdir(entry_full_path):
-                    shutil.rmtree(entry_full_path,
-                                  ignore_errors=False,
-                                  onerror=handle_remove_readonly)
-            if len(os.listdir(directory)) > 0:
-                print("Required directory for operation is not empty.")
-                print(f"Failed to delete contents of: {directory}")
-                sys.exit()
-        else:
-            print("Reusing contents of existing directory.")
-    else:
-        print(f"'{directory}' does not exist. Creating.")
-        os.makedirs(directory)
-
-
-def parse_script_arguments():
-    parser = argparse.ArgumentParser(description='Creates a git commit from a signed Lumberyard release.')
-
-    parser.add_argument('--gitURL',
-                        help="The url for the repo you would like to push to.",
-                        default=None)
-    parser.add_argument('--gitBranch',
-                        help="The branch which the commit will be made to.",
-                        required=False,
-                        default="master")
-    parser.add_argument('--packagePath',
-                        help='Filepath to the signed Lumberyard package.',
-                        required=True)
-    parser.add_argument('--cloudfrontURL',
-                        help='Cloudfront base URL.',
-                        required=False,
-                        default=None)
-    parser.add_argument('--zipDescriptor',
-                        help='Name of the zip file as appears on a commit message when describing the URL.',
-                        required=True)
-    parser.add_argument('--binZipSuffix',
-                        help='Suffix appended to the newly created binary zip name.',
-                        required=True)
-    # is it safe to assume that the profile being used for S3 and CodeCommit are the same?
-    parser.add_argument('--awsProfile',
-                        help='AWS credentials profile generated from AWS CLI. The codecommit repo and cloudfront distribution should both be able to be access by this profile. Defaults to "default".',
-                        required=False,
-                        default='default')
-    parser.add_argument('--uploadProfile',
-                        help='AWS CLI profile to use to upload to cloudfront',
-                        required=False,
-                        default='default')
-    parser.add_argument('--genRoot',
-                        help='Path to where the entire script operation will take place.',
-                        required=True)
-    parser.add_argument('--binDownloader',
-                        help='Path to the binary downloader which is stored in Perforce.',
-                        required=False,
-                        default=os.path.join(THIS_SCRIPT_DIRECTORY, "inject", "git_bootstrap.exe"))
-    parser.add_argument('--gitReadme',
-                        help="The readme to be displayed on the Git repo page.",
-                        required=False,
-                        default=os.path.join(THIS_SCRIPT_DIRECTORY, "inject", "README.md"))
-    parser.add_argument('--gitGuidelines',
-                        help="The contribution guidelines for customers submitting changes.",
-                        required=False,
-                        default=os.path.join(THIS_SCRIPT_DIRECTORY, "inject", "CONTRIBUTING.md"))
-    parser.add_argument('--gitIgnore',
-                        help="The default '.gitignore' for the repo.",
-                        required=False,
-                        default=os.path.join(THIS_SCRIPT_DIRECTORY, "inject", ".gitignore"))
-    parser.add_argument('--gitBugTemplate',
-                        help="The bug issue template.",
-                        required=False,
-                        default=os.path.join(THIS_SCRIPT_DIRECTORY, "inject", ".github", "ISSUE_TEMPLATE", "bug_report.md"))
-    parser.add_argument('--gitFeatureTemplate',
-                        help="The feature issue template.",
-                        required=False,
-                        default=os.path.join(THIS_SCRIPT_DIRECTORY, "inject", ".github", "ISSUE_TEMPLATE", "feature_request.md"))
-    parser.add_argument('--gitQuestionTemplate',
-                        help="The question issue template.",
-                        required=False,
-                        default=os.path.join(THIS_SCRIPT_DIRECTORY, "inject", ".github", "ISSUE_TEMPLATE", "question.md"))
-    parser.add_argument('--clean',
-                        help='Clear out existing temp directories before executing this operation.',
-                        required=False,
-                        action="store_true")
-    parser.add_argument('--keep',
-                        help='Skips the clean-up process at the end of the '
-                             'operation, keeping temp files in the working directory.',
-                        required=False,
-                        action="store_true")
-    parser.add_argument('--performUpload',
-                        help='Performs the upload of the Lumberyard build binaries zip. '
-                             'If specified, artifacts are deleted upon successful upload.',
-                        required=False,
-                        action="store_true")
-    parser.add_argument('--performPush',
-                        help='Performs the a Git push of the new repo changes created by this process.',
-                        required=False,
-                        action="store_true")
-    parser.add_argument('--allowUnsignedPackages',
-                        help='Allows unsigned Lumberyard packages to be processed.',
-                        required=False,
-                        action="store_true")
-    parser.add_argument('--engineDefaultSettingsPath',
-                        help="Path to 'default_settings.json' relative to the engine root.",
-                        required=False,
-                        default='dev/_WAF_/default_settings.json')
-    parser.add_argument('--zipOnly',
-                        help='Only generate the zip. Do not actually do anything with a git repo.',
-                        required=False,
-                        action="store_true")
-    return parser.parse_args()
-
-
-# Takes a repository directory and replaces it's contents with the contents of
-# another directory, all while preserving the state of the repository.
-# The incoming contents are MOVED, not copied.
-def clean_replace_repo_contents(incoming_content_directory, repo_directory, excludes):
-    repo_dir_entries = os.listdir(repo_directory)
-    for entry in repo_dir_entries:
-        src_entry_full_path = os.path.join(repo_directory, entry)
-        if src_entry_full_path in excludes:
-            continue
-        if os.path.isfile(src_entry_full_path):
-            os.remove(src_entry_full_path)
-        elif os.path.isdir(src_entry_full_path):
-            print(f"Removing directory '{src_entry_full_path}'")
-            shutil.rmtree(src_entry_full_path,
-                          ignore_errors=False,
-                          onerror=handle_remove_readonly)
-
-    # Add the incoming content to the repo directory
-    src_dir_entries = os.listdir(incoming_content_directory)
-    for entry in src_dir_entries:
-        src_entry_full_path = os.path.join(incoming_content_directory, entry)
-        dst_entry_full_path = os.path.join(repo_directory, entry)
-        if src_entry_full_path in excludes:
-            continue
-        shutil.move(src_entry_full_path, dst_entry_full_path)
-
-
-# Checks whether a particular temp file should be generated/created.
-# The 'filepath' argument does not necessarily correlate to the file-to-be-created.
-# Example: Checking if FileA should be generated to decide how to process FileB.
-def should_generate_resource(filepath, run_clean):
-    if run_clean:
-        should_create = True
-        print(f"'--clean' flag detected. Checking if '{filepath}' already exists.")
-        if os.path.exists(filepath):
-            print(f"'{filepath}' already exists. Removing.")
-            os.remove(filepath)
-        else:
-            print(f"'{filepath}' is not present.")
-    elif not os.path.exists(filepath):
-        should_create = True
-        print(f"'{filepath}' does not currently exist.")
-    else:
-        should_create = False
-        print(f"'{filepath}' already exists.")
-    return should_create
-
-
-def abort_operation(reason, exit_code):
-    print(reason)
-    print("Aborting operation.")
-    sys.exit(exit_code)
-
-
-### ZIP GENERATION FUNCTIONS
-
-# create a json file that contains key value pairs of relative path of a file going into the zip, to that file's hash
-def generate_hashes_file(bin_directory):
-    directory_to_hash = os.path.join(bin_directory, "")
-    out_file_path = os.path.join(directory_to_hash, HASH_FILE_NAME)
-    if os.path.exists(out_file_path):
-        print(f"Hash list already exists. Reusing existing file:\n{out_file_path}")
-    else:
-        file_hashes = {}
-        for root, _, files in os.walk(directory_to_hash):
-            for filename in files:
-                file_to_hash = os.path.join(root, filename)
-                rel_file_path = os.path.relpath(file_to_hash, os.path.dirname(directory_to_hash))
-                # make sure the key (file path) is relative, so that it doesn't matter
-                #   where the customer has their repository.
-                # Opens file in universal mode to reduce unix vs pc line endings issues.
-                file_hashes[rel_file_path] = LyChecksum.getChecksumForSingleFile(file_to_hash).hexdigest()
-        with open(out_file_path, 'w') as out_file:
-            json.dump(file_hashes, out_file, sort_keys=True, indent=4, separators=(',', ': '))
-        print(f"Hash list file output to {out_file_path}")
-
-
-### GIT COMMIT FUNCTIONS
-
-def get_downloader_version(downloader_filepath):
-    p = subprocess.Popen([downloader_filepath, "--version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    output, error = p.communicate()
-    if p.returncode != 0:
-        raise Exception(f"Downloader version command failed ({p.returncode:d}) {output} {error}")
-    version = output.strip()
-    return version
-
-
-def get_ly_version(src_directory):
-    sys.path.append(os.path.join(src_directory, 'dev'))
-    import waf_branch_spec
-    return waf_branch_spec.LUMBERYARD_VERSION
-
-
-def get_ly_build(src_directory):
-    sys.path.append(os.path.join(src_directory, 'dev'))
-    import waf_branch_spec
-    return waf_branch_spec.LUMBERYARD_BUILD
-
-
-# creates a file with the given name at the root of the repo containing all of the
-#   additive binary info
-def create_bootstrap_config(filename, path, url, checksum, size):
-    zip_info = { URL_KEY: url, CHECKSUM_KEY: checksum, SIZE_KEY: size }
-    out_file_path = os.path.join(path, filename)
-    with open(out_file_path, 'w') as out_file:
-        json.dump(zip_info, out_file, sort_keys=True, indent=4, separators=(',', ': '))
-
-
-# returns True if needed to create a new branch during this operation
-def checkout_git_branch(repo, ly_version):
-    if hasattr(repo.heads, ly_version):
-        print(f'Performing checkout of local branch {ly_version}')
-        repo.git.checkout(ly_version)
-    elif hasattr(repo.remote("origin").refs, ly_version):
-        print(f'Performing checkout of remote branch {ly_version}')
-        remote_ref = repo.remote("origin").refs[ly_version]
-        repo.create_head(ly_version, remote_ref) \
-            .set_tracking_branch(remote_ref) \
-            .checkout()
-    else:
-        print(f"Branch '{ly_version}' was not found. Searching for closest relative.")
-        ly_version_split = ly_version.split('.')
-
-        remote_heads = []
-        for head in repo.remote('origin').refs:
-            origin_prefix = 'origin/'
-            ref_name = head.name[len(origin_prefix):]
-            product_major_version_pattern = f"{ly_version_split[0]}.{ly_version_split[1]}"
-            if ref_name.startswith(product_major_version_pattern):
-                remote_heads.append(ref_name)
-
-        # Did we get any matches?
-        if len(remote_heads) > 0:
-            identified_parent_branch_name = remote_heads[len(remote_heads)-1]
-            repo.git.checkout(identified_parent_branch_name)
-
-            # Magic Git voodoo code to show commits belonging to checked out branch, containing the word 'Promoted'.
-            promoted_commits_string = subprocess.check_output("git log"
-                                                              " --decorate=full"
-                                                              " --simplify-by-decoration"
-                                                              " --pretty=oneline"
-                                                              " HEAD"
-                                                              " | sed -r -e \"s#^[^\(]*\(([^\)]*)\).*$#\\1#\" -e 's#,#\\n#g'"
-                                                              " | grep 'tag:'"
-                                                              " | sed -r -e 's#[[:space:]]*tag:[[:space:]]*##'"
-                                                              " | grep 'Promoted'", shell=True)
-            promoted_commits_array = promoted_commits_string.splitlines()
-
-            # Due to output order of subprocess command, the first element is the last promoted commit in this branch.
-            identified_promoted_commit_tag_name = promoted_commits_array[0]
-            print(subprocess.check_output(["git", "checkout", "-b", ly_version, identified_promoted_commit_tag_name]))
-        # We found no matches, we are adding a new version to master.
-        # No need to checkout master because every checkout for this logic branch has failed; we are already on master.
-        else:
-            return True
-    return False
-
-
-def validate_downloader(args, clone_directory, binary_downloader_filename):
-    p4_downloader_filepath = args.binDownloader
-    git_downloader_filepath = os.path.join(clone_directory, binary_downloader_filename)
-
-    if os.path.exists(git_downloader_filepath):
-        print("A binary downloader already exists in the Git repo.")
-        p4_downloader_version = StrictVersion(get_downloader_version(p4_downloader_filepath).decode())
-        git_downloader_version = StrictVersion(get_downloader_version(git_downloader_filepath).decode())
-
-        if p4_downloader_version > git_downloader_version:
-            print(f"The binary downloader in Git is outdated: v{git_downloader_version}")
-            print(f"Updating to a newer version: v{p4_downloader_version}")
-            shutil.copy(p4_downloader_filepath, git_downloader_filepath)
-            # The downloader binary might be read-only, specially if taken directy from a Perforce-managed
-            # directory while not checked-out. This may cause permission issues, prompts, or errors on future
-            # scripts/automation without authority to modify read-only files. Setting the file to read/write.
-            os.chmod(git_downloader_filepath, stat.S_IWRITE)
-        elif p4_downloader_version == git_downloader_version:
-            print("No changes detected in the binary downloader.")
-        else:
-            raise Exception(f"The binary downloader in the Git repo is newer (v{git_downloader_version}) than the internal one(v{p4_downloader_version}).\n"
-                            "Has the Git repo become compromised?")
-    else:
-        print("No binary downloader exists in the Git repository. Adding.")
-        shutil.copy(p4_downloader_filepath, git_downloader_filepath)
-
-
-def checkout_git_repo (args):
-    print_status_message("Generating empty git repo...")
-    GitOpsCodeCommit.init_git_repo(args.gitURL, args.awsProfile, os.path.curdir)
-    repo = Repo(os.path.curdir)
-
-    print_status_message("Fetching git repo from remote...")
-    repo.remote("origin").fetch()
-
-    if not hasattr(repo.heads, args.gitBranch) and not hasattr(repo.remote().refs, args.gitBranch):
-        # We only reach here if gitBranch (presumably 'master', but can be anything) is missing from local & remote.
-        # This happens when staging to an empty repo. In such case, we create a local branch which will push at the end.
-        # Orphan checkout is only useful if gitBranch is other than 'master', otherwise, this call has no effect.
-        repo.git.checkout('--orphan', args.gitBranch)
-    else:
-        repo.git.checkout(args.gitBranch)
-
-    return repo
-
-
-def checkout_version_branch(src_directory, repo):
-    # We want to attach the Lumberyard version number to the commit message.
-    # Obtain version & build-number from the package contents via module import.
-    # To successfully import package contents, we must append to our Python 'sys' path.
-    ly_version = get_ly_version(src_directory)
-    print(f'Parsed Lumberyard version from source: {ly_version}')
-
-    # Checkout whatever branch we should check this commit in to, or make one if there is not already an appropriate branch
-    creating_new_version_branch = checkout_git_branch(repo, ly_version)
-
-    return ly_version, creating_new_version_branch
-
-
-def stage_files_for_commit(args, clone_directory, src_directory, repo):
-    # Performing an accurate commit requires detecting modification, deletion, and addition to the repo files.
-    # In order to automatically detect this, we will leverage Git's ability to pick up on changes. To do this, we will
-    # delete all local files from the repo to then add the new source files. Git should know which files differ.
-    print_status_message("Collecting files for staging...")
-    binary_downloader_filename = os.path.basename(args.binDownloader)
-    excludes = [
-        os.path.join(clone_directory, ".git"),
-        os.path.join(clone_directory, binary_downloader_filename)
-    ]
-    clean_replace_repo_contents(src_directory, clone_directory, excludes)
-
-    # During the Git staging process, we may or may not include the binary downloader as part of the commit.
-    print_status_message("Validating binary downloader...")
-    validate_downloader(args, clone_directory, binary_downloader_filename)
-
-    # Add all into Git staging to determine what the historical changes are.
-    # Force the add to bypass .gitignore rules. This ensures any unintended ignored files
-    # are mistakenly added to the repository instead of being lost/deleted during this staging proceedure.
-    print_status_message("Staging Git files...")
-    repo.git.add("--all", "--force")
-
-
-def commit_to_local_repo(args, repo, ly_version, src_directory, bin_directory_size_in_bytes, creating_new_version_branch):
-    bin_directory_size_in_gigabytes = bytes_to_gigabytes(bin_directory_size_in_bytes)
-
-    print_status_message("Generating Git commit...")
-    # Generate a commit message.  This can be expanded with development
-    # highlights. The highlights could be fed in via python arguments in form
-    # of a URL to scrape, text file, or raw arguments.
-    git_commit_message_args = [ly_version,
-                               args.zipDescriptor,
-                               locale.format_string("%.2f", bin_directory_size_in_gigabytes, grouping=True),
-                               bin_directory_size_in_bytes,]
-    git_commit_message = textwrap.dedent(
-        """Lumberyard Release {0}
-
-        {1} Uncompressed Size: {2}GB ({3} bytes)
-        """.format(*git_commit_message_args))
-
-    print(f"Generating commit with the following message:\n{git_commit_message}")
-    repo.index.commit(git_commit_message)
-
-    # For CI builds, we want to tag the commit with the Perforce change list (CL) number.
-    repo.create_tag('CL' + str(get_ly_build(src_directory)))
-
-    if creating_new_version_branch:
-        repo.git.checkout('-b', ly_version)
-        # TODO:
-        # Need to figure out how to set upstream without pushing so that we may isolate all
-        # pushes to a single block of code in this file (for easier debugging and maintenance).
-        if args.performPush:
-            repo.git.push('-u', 'origin', ly_version)
-
-
-def push_repo_to_remote(args, repo):
-    if args.performPush:
-        print_status_message("Pushing Git commit to remote...")
-        repo.git.push('--all')
-        repo.git.push('--tags')
-
-    else:
-        print_status_message("'--performPush' flag not present. Skipping Git push procedure...")
-
-
-def clean_up_repo_and_tempfiles(args, repo, abs_gen_root):
-    if not args.keep:
-        print_status_message("Cleaning up temp files...")
-        repo.close()
-
-        # Give OS time to release any handles on files/paths (we see you, Windows)
-        time.sleep(0.1)
-
-        # Although we created multiple directories, the files in genRoot are all
-        # temp files. We can safely delete the parent directory instead of each
-        # individually created directory/file.
-        shutil.rmtree(abs_gen_root, ignore_errors=False, onerror=handle_remove_readonly)
-    else:
-        print_status_message("'--keep' flag detected. Skipping cleanup procedure...")
-
-
-# Tests for any invalid input. Any error results into application termination.
-def validate_args(args):
-    # Test for cloudfront url secure protocol
-    if args.performUpload == True or args.zipOnly == False:
-        if args.cloudfrontURL == None:
-            abort_operation("Need to specify --cloudfrontURL if generating a commit or uploading the zip.")
-    if args.performUpload == True or args.cloudfrontURL is not None:
-        if not args.cloudfrontURL.startswith("https://"):
-            abort_operation("Incorrect cloudfront protocol. Ensure cloudfront URL starts with 'https://'",
-                            ExitCodes.INVALID_ARGUMENT)
-        args.cloudfrontURL = appendTrailingSlashToUrl(args.cloudfrontURL)
-
-        # Check to see if a default aws profile is available
-        try:
-            boto3.Session(profile_name=args.awsProfile)
-        except botocore.exceptions.ProfileNotFound:
-            abort_operation("AWS credentials files are missing. "
-                            "Ensure AWS CLI is installed and configured with your IAM credentials.",
-                            ExitCodes.INVALID_ARGUMENT)
-
-    # Ensure Lumberyard package exists
-    ly_package_filepath = os.path.abspath(args.packagePath)
-    if os.path.exists(ly_package_filepath) is False:
-        abort_operation(f"'{ly_package_filepath}' does not exist.",
-                        ExitCodes.INVALID_ARGUMENT)
-    if os.path.isfile(ly_package_filepath) is False:
-        abort_operation(f"'{ly_package_filepath}' is not a valid file. Did you specify a directoy or symlink?",
-                        ExitCodes.INVALID_ARGUMENT)
-
-    if args.performPush == True and args.zipOnly == True:
-        abort_operation("Cannot specify both 'zipOnly' and 'performPush'. Please specify just one.")
-
-    if args.zipOnly == False:
-        # Verify Git is installed
-        if spawn.find_executable("git") is None:
-            abort_operation("Cannot find Git in your environment path. Ensure Git is installed on your machine.")
-
-        if args.gitURL is None:
-            abort_operation('You must specify "--gitURL" with a valid URL to a git repo in order to perform any git operations with this script.')
-
-        # Ensure repo URL does not point to a public GitHub repo
-        if "github.com" in args.gitURL.lower():
-            abort_operation("Cannot stage to GitHub directly. Please use a git repo not on GitHub.")
-
-        # Ensure bin downloader is a valid file
-        if os.path.exists(args.binDownloader) is False:
-            abort_operation(f"'--binDownloader' filepath does not exist:\n{args.binDownloader}")
-
-        # Ensure readme is a valid file
-        if os.path.exists(args.gitReadme) is False:
-            abort_operation(f"'--gitReadme' filepath does not exist:\n{args.gitReadme}")
-
-        # Ensure contributions is a valid file
-        if os.path.exists(args.gitGuidelines) is False:
-            abort_operation(f"'--gitGuidelines' filepath does not exist:\n{args.gitGuidelines}")
-
-        # Ensure gitignore is a valid file
-        if os.path.exists(args.gitIgnore) is False:
-            abort_operation(f"'--gitIgnore' filepath does not exist:\n{args.gitIgnore}")
-
-        # Ensure gitBugTemplate is a valid file
-        if os.path.exists(args.gitBugTemplate) is False:
-            abort_operation(f"'--gitBugTemplate' filepath does not exist:\n{args.gitBugTemplate}")
-
-        # Ensure gitFeatureTemplate is a valid file
-        if os.path.exists(args.gitFeatureTemplate) is False:
-            abort_operation(f"'--gitFeatureTemplate' filepath does not exist:\n{args.gitFeatureTemplate}")
-
-        # Ensure gitQuestionTemplate is a valid file
-        if os.path.exists(args.gitQuestionTemplate) is False:
-            abort_operation(f"'--gitQuestionTemplate' filepath does not exist:\n{args.gitQuestionTemplate}")
-
-
-# Returns True if a Lumberyard package is signed.
-def is_lumberyard_package_signed(unpacked_directory_root):
-    # List of binaries obtained from 'InstallerAutomation.py'
-    binaries_to_scan = [
-        os.path.join(unpacked_directory_root, "dev", "Bin64vc141", "Editor.exe"),
-        os.path.join(unpacked_directory_root, "dev", "Bin64vc142", "Editor.exe")
-    ]
-
-    for bin_filename in binaries_to_scan:
-        if SignTool.signtoolVerifySign(bin_filename, True) is False:
-            return False
-    return True
-
-
-def main():
-    args = parse_script_arguments()
-    validate_args(args)
-
-    initial_cwd = os.getcwd()
-    locale.setlocale(locale.LC_NUMERIC, 'english')
-
-    # Cache the absolute path of genRoot (aka, the workspace)
-    abs_gen_root = os.path.abspath(args.genRoot)
-
-    # Where the package shall be entirely extracted to.
-    # Contents are 1:1 with zip.
-    bin_directory = os.path.join(abs_gen_root, "PackageExtract")
-
-    # Where the source files will be once split (moved) from the extracted
-    # package directory.
-    src_directory = os.path.join(abs_gen_root, "Src")
-
-    # Directory for the local git clone of the repository.
-    clone_directory = os.path.join(abs_gen_root, "Repo")
-
-    # We want to split the package into source files and binary files.
-    # To begin this process, we must extract the contents from the zip.
-    print_status_message("Creating genRoot directories...")
-    ensure_directory_is_usable(clone_directory, args.clean)
-    ensure_directory_is_usable(src_directory, args.clean)
-    ensure_directory_is_usable(bin_directory, args.clean)
-
-    package_zip_filepath = os.path.abspath(args.packagePath)
-
-    if args.clean or not os.path.exists(bin_directory):
-        print_status_message(f"Extracting files from {package_zip_filepath}")
-        subprocess.call(["ant",
-                         "ExtractPackage",
-                         "-DZipfile=" + package_zip_filepath,
-                         "-DExtractDir=" + bin_directory],
-                        shell=True)
-    else:
-        print_status_message("Path already exists. Reusing contents")
-
-    print_status_message("Verifying structure of package contents")
-    empty_directories = get_empty_subdirectories(bin_directory)
-    if len(empty_directories) > 0:
-        stringList = '\n'.join(empty_directories)
-        print(f"Package contains empty directories. Deleting:\n{stringList}")
-        for x in empty_directories:
-            shutil.rmtree(x)
-    else:
-        print("Structure is valid. No empty directories found.")
-
-    if args.allowUnsignedPackages:
-        print_status_message("'--allowUnsignedPackages' flag detected. Skipping signature check.")
-    else:
-        print_status_message("Scanning for signed Lumberyard package")
-        if not is_lumberyard_package_signed(bin_directory):
-            abort_operation("The provided package is not signed. Retry with a signed package, or ensure "
-                            "'--allowUnsignedPackages' flag is specified.", ExitCodes.UNSIGNED_PACKAGE)
-        elif not SignTool.signtoolVerifySign(args.binDownloader, True):
-            abort_operation("The provided package is signed, but the binary downloader is not. Sign the downloader, or "
-                            "ensure '--allowUnsignedPackages' flag is specified.", ExitCodes.UNSIGNED_PACKAGE)
-        else:
-            print("Package is signed.")
-
-    # We now meet the requirements for the next step: Splitting.
-    # For this, we invoke an ant script.
-    if args.clean:
-        print_status_message("Splitting source and binary files...")
-        subprocess.call(["ant",
-                         "SplitZip",
-                         "-DExtractBuild=" + bin_directory,
-                         "-DGitSrc=" + src_directory],
-                        shell=True)
-
-        # The Github readme, guidelines and templates are stored separately so let's make sure they're included in the distribution.
-        github_readme_filename = os.path.basename(args.gitReadme)
-        shutil.copy(args.gitReadme, os.path.join(src_directory, github_readme_filename))
-
-        github_contributions_filename = os.path.basename(args.gitGuidelines)
-        shutil.copy(args.gitGuidelines, os.path.join(src_directory, github_contributions_filename))
-
-        github_gitignore_filename = os.path.basename(args.gitIgnore)
-        shutil.copy(args.gitIgnore, os.path.join(src_directory, github_gitignore_filename))
-
-        template_path = os.path.join(src_directory,".github/ISSUE_TEMPLATE")
-        if not os.path.exists(template_path):
-            os.makedirs(template_path)
-
-        github_bug_filename = os.path.join(template_path, os.path.basename(args.gitBugTemplate))
-        shutil.copy(args.gitBugTemplate, os.path.join(github_bug_filename))
-
-        github_feature_filename = os.path.join(template_path, os.path.basename(args.gitFeatureTemplate))
-        shutil.copy(args.gitFeatureTemplate, os.path.join(github_feature_filename))
-
-        github_question_filename = os.path.join(template_path, os.path.basename(args.gitQuestionTemplate))
-        shutil.copy(args.gitQuestionTemplate, os.path.join(github_question_filename))
-
-    # Generate the file containing the list of hashes of all of the content that will go into the zip
-    print_status_message("Generating file containing list of file hashes...")
-    generate_hashes_file(bin_directory)
-
-
-    # We have split our Source from Binaries. We can now identify the size of
-    # the binaries zip when uncompressed. We will need this for the commit message.
-    print_status_message("Measuring unpacked binaries size...")
-    bin_directory_size_in_bytes = get_directory_size_in_bytes(bin_directory)
-    bin_directory_size_in_megabytes = bytes_to_megabytes(bin_directory_size_in_bytes)
-    bin_directory_size_in_gigabytes = bytes_to_gigabytes(bin_directory_size_in_bytes)
-
-    print(textwrap.dedent(f"""
-        Total size in Bytes: {locale.format_string("%d", bin_directory_size_in_bytes, grouping=True)}
-                  Megabytes: {locale.format_string("%.4f", bin_directory_size_in_megabytes, grouping=True)}
-                  Gigabytes: {locale.format_string("%.2f", bin_directory_size_in_gigabytes, grouping=True)}
-        """))
-
-    # Generate the expected filepath for the binaries zip.
-    if args.clean:
-        timestamp = time.time()
-        bin_zip_filename = os.path.basename(package_zip_filepath)
-        bin_zip_filepath = "{0}_{1}-{2}.zip".format(
-            os.path.join(abs_gen_root, os.path.splitext(bin_zip_filename)[0]),
-            datetime.datetime.fromtimestamp(timestamp).strftime('%m%d%y_%H%M%S'),
-            args.binZipSuffix)
-    else:
-        # Find the last generated binary zip.
-        glob_pattern = f"{abs_gen_root}\\*-{args.binZipSuffix}.zip"
-        glob_list_result = glob.glob(glob_pattern)
-        glob_list_length = len(glob_list_result)
-        if glob_list_length > 0:
-            bin_zip_filepath = glob_list_result[glob_list_length-1]
-        else:
-            raise Exception("No pre-existing binary zip to reuse. Run with '--clean' flag to generate new binary zip.")
-
-
-    # Let's zip up the Binaries for submitting to S3.
-    # We may be resuming from a previous run. We may not want to start over
-    # from scratch as this takes a while, thus, we check before processing...
-    if should_generate_resource(bin_zip_filepath, args.clean):
-        print_status_message(f"Zipping binary files into '{bin_zip_filepath}'")
-        subprocess.call(["ant",
-                         "ZipBinaries",
-                         "-DZipDest=" + bin_zip_filepath,
-                         "-DZipSrc=" + bin_directory],
-                        shell=True)
-    else:
-        print_status_message("Skipping binary files zipping operation.")
-
-    # We generate a checksum for the zip file. The downloader will use this to
-    # ensure the contents have not been tampered with.
-    print_status_message("Generating bin zip checksum.")
-    bin_zip_file_checksum = LyChecksum.getChecksumForSingleFile(bin_zip_filepath)
-
-    target_cloudfront_url = "N/A"
-    if args.performUpload:
-        print_status_message("Uploading to S3...")
-        target_bucket_path = LyCloudfrontOps.uploadFileToCloudfrontURL(bin_zip_filepath,
-                                                                       args.cloudfrontURL,
-                                                                       args.uploadProfile,
-                                                                       False)
-        # Bucket path and cloudfront url both contain the bucket folder name in their paths.
-        # We'll trim the the clourfront url to only be the cloudfront distribution link.
-        # We then append the path to generate a qualified url for download.
-        parsed_url = urlparse(args.cloudfrontURL)
-        target_bucket_url = '{url.scheme}://{url.netloc}/'.format(url=parsed_url)
-        target_cloudfront_url = urljoin(target_bucket_url, target_bucket_path)
-        print(f"Uploaded file to: {target_cloudfront_url}")
-        if not args.keep:
-            os.remove(bin_zip_filepath)
-    else:
-        print_status_message("'--performUpload' flag not present. Skipping S3 upload procedure...")
-
-    # generate the JSON file that contains information on the git binary. Can
-    #   only do that if we have the url to put into the file.
-    if args.cloudfrontURL:
-        create_bootstrap_config(BOOTSTRAP_CONFIG_FILENAME,
-            src_directory,
-            urljoin(args.cloudfrontURL,os.path.basename(bin_zip_filepath)),
-            bin_zip_file_checksum.hexdigest(),
-            bin_directory_size_in_bytes)
-
-    if args.zipOnly == False:
-        # We may now begin the git phase.
-        # To perform all subsequent git operations, we should do them from the repo directory.
-        os.chdir(clone_directory)
-
-        repo = checkout_git_repo(args)
-        ly_version, create_new_branch = checkout_version_branch(src_directory, repo)
-        stage_files_for_commit(args, clone_directory, src_directory, repo)
-        commit_to_local_repo(args, repo, ly_version, src_directory, bin_directory_size_in_bytes, create_new_branch)
-        push_repo_to_remote(args, repo)
-
-        # Let's return to the original working directory.
-        os.chdir(initial_cwd)
-        clean_up_repo_and_tempfiles(args, repo, abs_gen_root)
-
-    else:
-        print_status_message("'--zipOnly' flag present. Skipping Git commit generation...")
-
-    print_status_message("Lumberyard to Git operation completed successfully.")
-
-
-if __name__ == "__main__":
-    main()
-    sys.exit()

+ 0 - 146
Tools/build/JenkinsScripts/distribution/git_release/build.xml

@@ -1,146 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<project name="package">
-    <target name="SplitZip">
-        <!--The fileset below is the filter for all source files. This task moves
-            all filtered files (aka source) into a separate directory (${GitSrc}), 
-            outside the Lumberyard build zip-extracted location (${ExtractBuild})-->
-        <move todir="${GitSrc}" includeEmptyDirs="yes" verbose="false">
-            <fileset dir="${ExtractBuild}" casesensitive="no">
-                <exclude name="/dev/Gems/GameEffectSystem/**"/>
-                <exclude name="/dev/Gems/Oculus/**"/>
-                <exclude name="/dev/Gems/OpenVR/**"/>
-                <exclude name="/dev/Gems/PBSreferenceMaterials/**"/>
-                <exclude name="/dev/Gems/Substance/**"/>
-                <exclude name="/dev/MultiplayerProject/**"/>
-                <exclude name="/dev/MultiplayerSample/**"/>
-                <exclude name="/dev/SamplesProject/**"/>
-                <exclude name="/dev/Bin64vc142/**"/>
-                <exclude name="/dev/Bin64vc141/**"/>
-                <exclude name="/dev/Bin64/**"/>
-                <!-- BEGIN LYAJAV-395  Prevent packaging of the following restributables that use to be in Bin64 -->
-                <exclude name="/dev/Tools/Redistributables/ANGLE/**"/>
-                <exclude name="/dev/Tools/Redistributables/D3DCompiler/**"/>
-                <exclude name="/dev/Tools/Redistributables/DbgHelp/**"/>
-                <exclude name="/dev/Tools/Redistributables/FFMpeg/**"/>
-                <exclude name="/dev/Tools/Redistributables/LuaCompiler/**"/>
-                <exclude name="/dev/Tools/Redistributables/MSVC90/**"/>
-                <exclude name="/dev/Tools/Redistributables/OpenGL32/**"/>
-                <exclude name="/dev/Tools/Redistributables/SSLEAY/**"/>
-                <!-- END LYAJAV-395 -->
-                <exclude name="/3rdParty/**"/>
-
-                <include name="**/*.args"/>
-                <include name="**/*.asp"/>
-                <include name="**/*.attribute_layout"/>
-                <include name="**/*.bat"/>
-                <include name="**/*.c"/>
-                <include name="**/*.cbc"/>
-                <include name="**/*.cfg"/>
-                <include name="**/*.chrparams"/>
-                <include name="**/*.cmake"/>
-                <include name="**/*.cmd"/>
-                <include name="**/*.conf"/>
-                <include name="**/*.coffee"/>
-                <include name="**/*.cpp"/>
-                <include name="**/*.cs"/>
-                <include name="**/*.csproj"/>
-                <include name="**/*.decTest"/>
-                <include name="**/*.def"/>
-                <include name="**/*.ent"/>
-                <include name="**/*.ent_template"/>
-                <include name="**/*.env"/>
-                <include name="**/*.exportsettings"/>
-                <include name="**/*.filters"/>
-                <include name="**/*.h"/>
-                <include name="**/*.hpp"/>
-                <include name="**/*.html"/>
-                <include name="**/*.hxx"/>
-                <include name="**/*.ignore"/>
-                <include name="**/*.import"/>
-                <include name="**/*.ini"/>
-                <include name="**/*.inl"/>
-                <include name="**/*.java"/>
-                <include name="**/*.js"/>
-                <include name="**/*.json"/>
-                <include name="**/*.lua"/>
-                <include name="**/*.lua_template"/>
-                <include name="**/*.lyr"/>
-                <include name="**/*.m"/>
-                <include name="**/*.manifest"/>
-                <include name="**/*.md"/>
-                <include name="**/*.mel"/>
-                <include name="**/*.mm"/>
-                <include name="**/*.mk"/>
-                <include name="**/*.ms"/>
-                <include name="**/*.mtl"/>
-                <include name="**/*.njsproj"/>
-                <include name="**/*.npmignore"/>
-                <include name="**/*.p4ignore"/>
-                <include name="**/*.plist"/>
-                <include name="**/*.prefab"/>
-                <include name="**/*.pro"/>
-                <include name="**/*.props"/>
-                <include name="**/*.py"/>
-                <include name="**/*.pyproj"/>
-                <include name="**/*.pys"/>
-                <include name="**/*.pyw"/>
-                <include name="**/*.qml"/>
-                <include name="**/*.qmldir"/>
-                <include name="**/*.qmltypes"/>
-                <include name="**/*.qrc"/>
-                <include name="**/*.qss"/>
-                <include name="**/*.rc"/>
-                <include name="**/*.sct"/>
-                <include name="**/*.scss"/>
-                <include name="**/*.sh"/>
-                <include name="**/*.slice"/>
-                <include name="**/*.sln"/>
-                <include name="**/*.spec"/>
-                <include name="**/*.sql"/>
-                <include name="**/*.targets"/>
-                <include name="**/*.tcl"/>
-                <include name="**/*.tip"/>
-                <include name="**/*.ts"/>
-                <include name="**/*.txt"/>
-                <include name="**/*.TXT"/>
-                <include name="**/*.ui"/>
-                <include name="**/*.uicanvas"/>
-                <include name="**/*.vbs"/>
-                <include name="**/*.vcproj"/>
-                <include name="**/*.vcxproj"/>
-                <include name="**/*.vssscc"/>
-                <include name="**/*.waf_files"/>
-                <include name="**/*.xbm"/>
-                <include name="**/*.xml"/>
-                <include name="**/*.xproj"/>
-                <include name="**/ChangeLog"/>
-                <include name="**/configure"/>
-                <include name="**/DEVEL"/>
-                <include name="**/lmbr_waf"/>
-                <include name="**/makefile"/>
-                <include name="**/makefile.appletv"/>
-                <include name="**/wscript"/>
-                <include name="**/README"/>
-                <include name="**/TODO"/>
-                <include name="**/waf"/>
-            </fileset>
-        </move>
-
-        <!--An Ant trick to delete all empty directories from the extract location, where the binaries reside.
-            The command recursively deletes everything in a directory, EXCEPT for every file (lol).
-            During execution, it will include deleting empty directories (what we are really after).-->
-        <delete includeemptydirs="true">
-          <fileset dir="${ExtractBuild}" excludes="**/*" />
-        </delete>
-    </target>
-    <target name="ZipBinaries">
-        <zip destfile="${ZipDest}"
-               basedir="${ZipSrc}"/>
-    </target>
-    <target name="ExtractPackage">
-        <unzip src="${Zipfile}"
-               dest="${ExtractDir}"
-               overwrite="false"/>
-    </target>
-</project>

+ 0 - 1136
Tools/build/JenkinsScripts/distribution/git_release/git_bootstrap.py

@@ -1,1136 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import argparse
-import datetime
-import hashlib
-import json
-import math
-import os
-import Queue
-import re
-import shutil
-import ssl
-import subprocess
-import sys
-import threading
-import time
-import urllib2
-import urlparse
-import zipfile
-from collections import deque
-from distutils import dir_util, file_util, spawn
-from distutils.errors import DistutilsFileError
-
-importDir = os.path.dirname(os.path.abspath(__file__))
-sys.path.append(os.path.join(importDir, ".."))  # Required for AWS_PyTools
-from AWS_PyTools import LyChecksum
-from GitStaging import get_directory_size_in_bytes, URL_KEY, CHECKSUM_KEY, SIZE_KEY, BOOTSTRAP_CONFIG_FILENAME
-
-FETCH_CHUNK_SIZE = 1000000
-CHUNK_FILE_SIZE = 100000000    # 100 million bytes per chunk file
-WORKING_DIR_NAME = "_temp"
-DOWNLOAD_DIR_NAME = "d"
-UNPACK_DIR_NAME = "u"
-
-DOWNLOADER_THREAD_COUNT = 20
-
-# The downloader is intended to be an executable. Typically, executables should have their version bake into the binary.
-# Windows has two variations of versions for a binary file: FileVersion, ProductVersion. Furthermore, we need to import
-# Windows-specific apis to read Windows executable binary versions. Other operating systems have their own versioning scheme.
-# To simplify maintenance, we will simply track the version in the source code itself.
-DOWNLOADER_RELEASE_VERSION = "1.2"
-
-FILESIZE_UNIT_ONE_INCREMENT = 1024
-FILESIZE_UNIT_TWO_INCREMENT = FILESIZE_UNIT_ONE_INCREMENT * FILESIZE_UNIT_ONE_INCREMENT
-FILESIZE_UNIT_THREE_INCREMENT = FILESIZE_UNIT_TWO_INCREMENT * FILESIZE_UNIT_ONE_INCREMENT
-
-HASH_FILE_NAME = "filehashes.json"
-DEFAULT_HASH_FILE_URL = "https://d3dn1rjl3s1m7l.cloudfront.net/default-hash-file/" + HASH_FILE_NAME
-
-TRY_AGAIN_STRING = "Please try again or contact Lumberyard support if you continue to experience issues."
-
-#returns the size of the file moved
-def safe_file_copy(src_basepath, dst_basepath, cur_file):
-    src_file_path = os.path.join(src_basepath, cur_file)
-    dst_file_path = os.path.join(dst_basepath, cur_file)
-
-    dir_util.mkpath(os.path.dirname(dst_file_path))
-    dst_name, copied = file_util.copy_file(src_file_path, dst_file_path, verbose=0)
-
-    if copied is False:
-        raise Exception("Failed to copy {} to {}.".format(src_file_path, dst_file_path))
-
-    return os.path.getsize(dst_name)
-
-
-def _get_input_replace_file(filename):
-
-    def _print_invalid_input_given(response):
-        print 'Your response of "{0}" is not a valid response. Please enter one of the options mentioned.\n'
-
-    valid_replace_responses = ['y', 'yes', 'yes all']
-    valid_keep_responses = ['n', 'no', 'no all']
-
-    response_given = None
-    while not response_given:
-        print 'A new version of {0} has been downloaded, but a change to the local file has been detected.'.format(filename)
-        print 'Would you like to replace the file on disk with the new version? ({0})'.format("/".join(valid_replace_responses + valid_keep_responses))
-        print 'Answering "n" will keep the local file with your modificaitions.'
-        print 'Ansering "yes all"/"no all" will assume this answer for all subsequent prompts.'
-        response = raw_input("Replace the file on disk with the new version?  ({0}) ".format("/".join(valid_replace_responses + valid_keep_responses)))
-        print ""
-        normalized_input = None
-        try:
-            normalized_input = response.lower()
-            if normalized_input not in valid_replace_responses and \
-                normalized_input not in valid_keep_responses:
-                _print_invalid_input_given(response)
-            else:
-                valid_respose = True
-                response_given = normalized_input
-        except Exception:
-            _print_invalid_input_given(response)
-
-    # we know this is valid input. if it is not a replace response, then it must be a keep
-    return response_given in valid_replace_responses, 'a' in response_given
-
-
-def find_files_to_prompt(args, changed_files, dst_basepath, old_file_hashes):
-    num_files_to_prompt = 0
-    for key in changed_files:
-        # get path to file that is currently on disk
-        existing_file_path = os.path.join(dst_basepath, key)
-        if os.path.exists(existing_file_path):
-            # get the hash of the file on disk
-            file_hash = LyChecksum.getChecksumForSingleFile(existing_file_path, 'rU').hexdigest()
-            # if disk is same as old, replace
-            if file_hash == old_file_hashes[key]:
-                continue
-            # otherwise, ask if keep, replace
-            else:
-                # assume an answer
-                if not (args.yes or args.no):
-                    num_files_to_prompt += 1
-
-    return num_files_to_prompt
-
-
-def partition_moves_and_skips(args, changed_files, dst_basepath, old_file_hashes):
-    changed_files_to_move = set()
-    changed_files_to_skip = set()
-
-    for key in changed_files:
-        should_move_file = False
-        # get path to file that is currently on disk
-        existing_file_path = os.path.join(dst_basepath, key)
-        if os.path.exists(existing_file_path):
-            # get the hash of the file on disk
-            file_hash = LyChecksum.getChecksumForSingleFile(existing_file_path, 'rU').hexdigest()
-            # if disk is same as old, replace
-            if file_hash == old_file_hashes[key]:
-                should_move_file = True
-            # otherwise, ask if keep, replace
-            else:
-                # assume the answer is to replace
-                if args.yes:
-                    should_move_file = True
-                # assume the answer is to keep
-                elif args.no:
-                    should_move_file = False
-                else:
-                    should_move_file, use_as_assumption = _get_input_replace_file(existing_file_path)
-                    if use_as_assumption and should_move_file:
-                        args.yes = True
-                        print "Marking all subsequent files as files to replace."
-                    elif use_as_assumption and not should_move_file:
-                        args.no = True
-                        print "Marking all subsequent files as files to keep."
-
-        # it was deleted on disk, so it should be safe to move over
-        else:
-            should_move_file = True
-
-        if should_move_file:
-            changed_files_to_move.add(key)
-        else:
-            changed_files_to_skip.add(key)
-
-    return changed_files_to_move, changed_files_to_skip
-
-
-def load_hashlist_from_json(path):
-    file_path = os.path.join(path, HASH_FILE_NAME)
-    hash_list = {}
-    if not os.path.exists(file_path):
-        raise Exception("No hashfile exists at {0}.".format(file_path))
-    with open(file_path, 'rU') as hashfile:
-        hash_list = json.load(hashfile)
-    return hash_list
-
-
-def copy_directory_contents(args, src_basepath, dst_basepath, uncompressed_size):
-    # read in new hashlist
-    new_file_hashes = load_hashlist_from_json(src_basepath)
-
-    # read in old hashlist. We check to make sure it is still on disk before we get here.
-    old_file_hashes = load_hashlist_from_json(dst_basepath)
-
-    num_files_in_new = len(new_file_hashes.keys())
-    print "There are {0} files in the new zip file.\n".format(num_files_in_new)
-
-    old_file_hashes_keys = set(old_file_hashes.keys())
-    new_file_hashes_keys = set(new_file_hashes.keys())
-
-    changed_files = old_file_hashes_keys & new_file_hashes_keys # '&' operator finds intersection between sets
-    deleted_files = set()
-    added_files = set()
-    missing_files = set()
-    identical_hashes = set()
-    changed_files_to_move = set()
-    changed_files_to_skip = set()
-
-    identical_files_size_total = 0
-
-    # lets get rid of files that have the same hash, as we dont care about then
-    #   skip if the same
-    for key in changed_files:
-        # if the file doesn't exist on disk, treat it as an add, regardless of whether the filelists have diff hashes
-        if not os.path.exists(os.path.join(dst_basepath, key)):
-            missing_files.add(key)
-        # if the file is on disk, and the hashes in the filelists are the same, there is no action to take, sorecord the progress
-        elif old_file_hashes[key] == new_file_hashes[key]:
-            identical_files_size_total += os.path.getsize(os.path.join(src_basepath, key))
-            del old_file_hashes[key]
-            del new_file_hashes[key]
-            identical_hashes.add(key)
-
-    # now that we cleared all of the identical hashes, if a file doesn't
-    #   exist in the intersection, it is an add or delete, depending on
-    #   the source hash list
-    deleted_files = old_file_hashes_keys.difference(changed_files)
-    added_files = missing_files.union(new_file_hashes_keys.difference(changed_files))
-
-    # cant remove from the set being iterated over, so get the difference between
-    #   identical hashes and changed hashes and save it back to the changed set
-    changed_files = changed_files.difference(identical_hashes.union(missing_files))
-
-    total_keys = len(old_file_hashes_keys | new_file_hashes_keys)
-    keys_across_all_sets = len(changed_files | deleted_files | added_files | missing_files | identical_hashes)
-    if total_keys != keys_across_all_sets:
-        raise Exception("Not all keys caught in the resulting sets.")
-
-    print "Finding files with conflicts."
-    # figure out how many files there are to prompt about
-    num_files_to_prompt = find_files_to_prompt(args, changed_files, dst_basepath, old_file_hashes)
-    print "There are {0} files with conflicts that need to be asked about.\n".format(num_files_to_prompt)
-
-    # split the files into moves and skips, and ask customers about files with any conflicts
-    changed_files_to_move, changed_files_to_skip = partition_moves_and_skips(args, changed_files, dst_basepath, old_file_hashes)
-
-
-    # find the total size for all the skipped files
-    skipped_files_size_total = 0
-    for key in changed_files_to_skip:
-        skipped_files_size_total += os.path.getsize(os.path.join(src_basepath, key))
-
-    move_progress_meter = ProgressMeter()
-    move_progress_meter.action_label = "Moving"
-    move_progress_meter.target = float(uncompressed_size)
-    move_progress_meter.report_eta = False
-    move_progress_meter.report_speed = False
-    move_progress_meter.start()
-
-    # initialize the meter with the size of the files not being moved either due to being skipped, or being identical
-    move_progress_meter.record_progress(identical_files_size_total + skipped_files_size_total)
-
-    # if in new but not old, keep - it was added
-    # also move files that were changed that should be moved
-    num_files_moved = 0
-    for key in added_files.union(changed_files_to_move):
-        dest_file_size = safe_file_copy(src_basepath, dst_basepath, key)
-        move_progress_meter.record_progress(dest_file_size)
-        num_files_moved += 1
-
-    #   if in old but not new, it was deleted. compare against disk
-    num_files_deleted = 0
-    for key in deleted_files:
-        # get path to file that is currently on disk
-        existing_file_path = os.path.join(dst_basepath, key)
-        if os.path.exists(existing_file_path):
-            # get the hash of the file on disk
-            file_hash = LyChecksum.getChecksumForSingleFile(existing_file_path, 'rU').hexdigest()
-            # if disk is same as old, deleted, otherwise, we keep the file that is there.
-            # not tracked against the progress, as removes are not counted
-            #   against the total (the uncompressed size of the zip)
-            if file_hash == old_file_hashes[key]:
-                os.remove(existing_file_path)
-                num_files_deleted += 1
-
-    # move new hashfile over
-    dest_file_size = safe_file_copy(src_basepath, dst_basepath, HASH_FILE_NAME)
-    move_progress_meter.record_progress(dest_file_size)
-
-    move_progress_meter.stop()
-
-    print "{0}/{1} new files were moved".format(num_files_moved, num_files_in_new)
-    print "{0}/{1} files were removed".format(num_files_deleted, len(deleted_files))
-
-def get_default_hashlist(args, dst_basepath, working_dir_path):
-    #   acquire default hashlist
-    default_hashlist_url = DEFAULT_HASH_FILE_URL
-    if args.overrideDefaultHashfileURL is not None:
-         default_hashlist_url = args.overrideDefaultHashfileURL
-
-    with Downloader(DOWNLOADER_THREAD_COUNT) as downloader:
-        dest = os.path.join(working_dir_path, HASH_FILE_NAME)
-        print "Downloading files from url {0} to {1}"\
-            .format(default_hashlist_url, dest)
-        try:
-            files = downloader.download_file(default_hashlist_url, dest, 0, True, True)
-        finally:
-            downloader.close()
-    if not files:
-        raise Exception("Failed to finish downloading {0} after a few retries."
-                        .format(HASH_FILE_NAME))
-    # now that we have the hashlist, move it to the root of the local repo
-    safe_file_copy(working_dir_path, dst_basepath, HASH_FILE_NAME)
-    os.remove(dest)
-
-
-def is_url(potential_url):
-    return potential_url.startswith('https')
-
-
-def create_ssl_context():
-    ciphers_to_remove = ["RC4", "DES", "PSK", "MD5", "IDEA", "SRP", "DH", "DSS", "SEED", "3DES"]
-    cipher_string = ssl._DEFAULT_CIPHERS + ":"
-    for idx in range(len(ciphers_to_remove)):
-        # create the cipher string to permanently remove all of these ciphers,
-        #   based on the format documented at
-        #   https://www.openssl.org/docs/man1.0.2/apps/ciphers.html
-        cipher_string += "!{}".format(ciphers_to_remove[idx])
-        if idx < len(ciphers_to_remove) - 1:
-            cipher_string += ":"    # ":" is the delimiter
-
-    ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
-    ssl_context.set_ciphers(cipher_string)
-
-    ssl_context.verify_mode = ssl.CERT_REQUIRED
-    # I can't find a way to load CRL
-    # ssl_context.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
-
-    return ssl_context
-
-
-#
-# Disk space
-#
-def get_free_disk_space(dir_name):
-    # Get the remaining space on the drive that the given directory is on
-    import platform
-    import ctypes
-    if platform.system() == 'Windows':
-        free_bytes = ctypes.c_ulonglong(0)
-        ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(dir_name), None, None, ctypes.pointer(free_bytes))
-        return free_bytes.value
-    else:
-        st = os.statvfs(dir_name)
-        return st.f_bavail * st.f_frsize
-
-
-#
-# Checksum
-#
-def get_checksum_for_multi_file(multi_file):
-    block_size = 65536
-    fileset_hash = hashlib.sha512()
-    buf = multi_file.read(block_size)
-    while len(buf) > 0:
-        fileset_hash.update(buf)
-        buf = multi_file.read(block_size)
-    return fileset_hash
-
-
-def get_zip_info_from_json(zip_descriptor):
-    try:
-        url = zip_descriptor[URL_KEY]
-
-        checksum = zip_descriptor[CHECKSUM_KEY]
-        if not LyChecksum.is_valid_hash_sha512(checksum):
-            raise Exception("The checksum found in the config file is not a valid SHA512 checksum.")
-
-        size = zip_descriptor[SIZE_KEY]
-        if not size > 0:
-            raise Exception("The uncompressed size mentioned in the config file is "
-                            "a value less than, or equal to zero.")
-    except KeyError as missingKey:
-        print "There is a key, value pair missing from the bootstrap configuration file."
-        print "Error: {0}".format(missingKey)
-        raise missingKey
-    except Exception:
-        raise
-    return url, checksum, size
-
-
-def get_info_from_bootstrap_config(config_filepath):
-    zip_descriptor = {}
-    if not os.path.exists(config_filepath):
-        raise Exception("Could not find bootstrap config file at the root of the repository ({0}). "
-                        "Please sync this file from the repository again."
-                        .format(bootstrap_config_file))
-    with open(config_filepath, 'rU') as config_file:
-        zip_descriptor = json.load(config_file)
-    try:
-        url, checksum, size = get_zip_info_from_json(zip_descriptor)
-    except Exception:
-        raise
-
-    return url, checksum, size
-
-
-#
-# Args
-#
-def create_args():
-    parser = argparse.ArgumentParser(description="Downloads required files relevant to the repositiry HEAD "
-                                                 "to complete Lumberyard setup via Git.")
-    parser.add_argument('--rootDir',
-                        default=os.path.dirname(os.path.abspath(__file__)),
-                        help="The location of the root of the repository.")
-    parser.add_argument('--pathToGit',
-                        default=spawn.find_executable("git"),
-                        help="The location of the git executable. Git is assumed to be in your path if "
-                             "this argument is not provided.")
-    parser.add_argument('-k', '--keep',
-                        default=False,
-                        action='store_true',
-                        help='Keep downloaded files around after download finishes. (default False)')
-    parser.add_argument('-c', '--clean',
-                        default=False,
-                        action='store_true',
-                        help='Remove any temp files before proceeding. (default False)')
-    parser.add_argument('-v', '--verbose',
-                        default=False,
-                        action='store_true',
-                        help='Enables logging messages. (default False)')
-    parser.add_argument('--version',
-                        default=False,
-                        action='store_true',
-                        help='Print application version')
-    parser.add_argument('-s', '--skipWarning',
-                        default=False,
-                        action='store_true',
-                        help='Skip all warnings produced. (default False)')
-    # If specified, download the hashfile from the given location
-    parser.add_argument('--overrideDefaultHashfileURL',
-                        default=None,
-                        help=argparse.SUPPRESS)
-    group = parser.add_mutually_exclusive_group()
-    group.add_argument('-y', "--yes",
-                        default=False,
-                        action='store_true',
-                        help='Will automatically answer "yes" to all files being asked to be overwritten. Only specify one of either --yes or --no. (default False)')
-    group.add_argument('-n', "--no",
-                        default=False,
-                        action='store_true',
-                        help='Will automatically answer "no" to all files being asked to be overwritten. Only specify one of either --yes or --no. (default False)')
-
-    args, unknown = parser.parse_known_args()
-    return args
-
-
-def validate_args(args):
-    if args.version:
-        print DOWNLOADER_RELEASE_VERSION
-        sys.exit(0)
-
-    assert (os.path.exists(args.rootDir)), "The root directory specified (%r) does not exist." % args.rootDir
-
-    # check to make sure git exists either from the path or user specified location
-    if args.pathToGit is None:
-        raise Exception("Cannot find Git in your environment path. This scripts requires Git to be installed.")
-    else:
-        if os.path.isfile(args.pathToGit) is False:
-            raise Exception("The path to Git provided does not exists.")
-
-
-class ProgressMeter:
-    def __init__(self):
-        self.event = None
-        self.worker = None
-
-        self.lock = threading.Lock()
-        self.startTime = 0
-        self.rateSamples = deque()
-
-        self.action_label = ""
-        self.target = 0
-        self.progress = 0
-
-        self.report_eta = True
-        self.report_speed = True
-        self.report_target = True
-
-        self.report_bar = True
-        self.report_bar_width = 10
-
-        self.prev_line_length = 0
-
-        self.spinner_frames = ["|", "/", "-", "\\"]
-        self.curr_spinner_frame = 0
-
-    @staticmethod
-    def meter_worker(meter, event):
-        while not event.is_set():
-            try:
-                meter.report_progress()
-                time.sleep(0.25)
-            except Exception:
-                pass
-
-    def add_target(self, i):
-        self.lock.acquire()
-        try:
-            self.target += i
-        finally:
-            self.lock.release()
-
-    def record_progress(self, i):
-        self.lock.acquire()
-        try:
-            self.progress += i
-        finally:
-            self.lock.release()
-
-    def reset(self):
-        self.startTime = 0
-        self.rateSamples = deque()
-
-        self.action_label = ""
-        self.target = 0
-        self.progress = 0
-
-        self.report_eta = True
-        self.report_speed = True
-        self.report_target = True
-
-        self.report_bar = True
-        self.report_bar_width = 10
-
-        self.prev_line_length = 0
-        self.curr_spinner_frame = 0
-
-    def start(self):
-        self.event = threading.Event()
-        self.worker = threading.Thread(target=self.meter_worker, args=(self, self.event))
-        self.worker.setDaemon(True)
-        self.worker.start()
-        self.startTime = time.clock()
-
-    # Set up so we can work with with statement, and auto destruct
-    def __enter__(self):
-        self.start()
-        return self
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        # Make sure the thread stops
-        self.stop()
-
-    def __del__(self):
-        if self.event:
-            self.event.set()
-
-    def stop(self):
-        self.report_progress()        # Final progress report, to show completion
-        self.event.set()
-        print ""                      # Set a new line from all other print operations
-
-    def build_report_str(self, percent_complete, rate, eta):
-        # Build output report string
-        output_str = "{}".format(self.action_label)
-
-        if self.report_target is True:
-            output_str += " {:4.2f} GB".format(float(self.target) / FILESIZE_UNIT_THREE_INCREMENT)
-
-        if self.report_speed is True:
-            output_str += " @ {:5.2f} MB/s".format(rate / FILESIZE_UNIT_TWO_INCREMENT)
-
-        if self.report_bar is True:
-            percent_per_width = 100.0 / self.report_bar_width
-            current_bar_width = percent_complete * 100.0 / percent_per_width
-            current_bar_width = int(math.floor(current_bar_width))
-            remaining_width = self.report_bar_width - current_bar_width
-
-            curr_spinner_icon = ""
-            if remaining_width is not 0:
-                curr_spinner_icon = self.spinner_frames[self.curr_spinner_frame]
-
-            output_str += " [" + ("=" * current_bar_width) + curr_spinner_icon + (" " * (remaining_width - 1)) + "]"
-
-        output_str += " {:.0%} complete.".format(percent_complete)
-
-        if self.report_eta is True:
-            output_str += " ETA {}.".format(str(datetime.timedelta(seconds=eta)))
-
-        return output_str
-
-    def report_progress(self):
-        self.lock.acquire()
-        try:
-            if self.target == 0:
-                percent_complete = 1.0
-            else:
-                percent_complete = self.progress * 1.0 / self.target
-
-            self.rateSamples.append([self.progress, time.clock()])
-            # We only keep 40 samples, about 10 seconds worth
-            if len(self.rateSamples) > 40:
-                self.rateSamples.popleft()
-            if len(self.rateSamples) < 2:
-                rate = 0.0
-            else:
-                # Calculate rate from oldest sample and newest sample.
-                span = float(self.rateSamples[-1][0] - self.rateSamples[0][0])
-                duration = self.rateSamples[-1][1] - self.rateSamples[0][1]
-                rate = span / duration
-
-            if percent_complete == 1.0:
-                eta = 0
-            elif rate == 0.0:
-                eta = 100000
-            else:
-                eta = int((self.target - self.progress) / rate)
-
-            self.curr_spinner_frame = (self.curr_spinner_frame + 1) % len(self.spinner_frames)
-            output_str = self.build_report_str(percent_complete, rate, eta)
-
-            # Calculate the delta of prev and curr line length to clear
-            curr_line_length = len(output_str)
-            line_len_delta = max(self.prev_line_length - curr_line_length, 0)
-
-            # Extra spaces added to the end of the string to clear the unused buffer of previous write
-            sys.stdout.write("\r" + output_str + " " * line_len_delta) # \r placed at the beginning to play nice with PyCharm.
-            sys.stdout.flush()
-            self.prev_line_length = curr_line_length
-
-        except Exception as e:
-            print "Exception: ", e
-            sys.stdout.flush()
-        finally:
-            self.lock.release()
-
-
-class Downloader:
-    meter = ProgressMeter()
-    download_queue = Queue.Queue()
-    max_worker_threads = 1
-    max_retries = 3
-    timeout = 5     # in seconds.
-    event = None
-
-    def __init__(self, max_threads=1, max_retries=3):
-        self.max_worker_threads = max_threads
-        self.retries = max_retries
-        self.event = threading.Event()
-
-        # preallocate the worker threads.
-        for i in range(self.max_worker_threads):
-            worker = threading.Thread(target=self.download_chunk_file, args=(self.download_queue, self.event))
-            worker.daemon = True
-            worker.start()
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        # Make sure the threads stop
-        self.event.set()
-
-    def __del__(self):
-        self.event.set()
-
-    def close(self):
-        self.event.set()
-
-    def download_chunk(self):
-        pass
-
-    def download_chunk_segments(self, start, end, file_path, url, exit_event):
-        # Set up so that we can resume a download that was interrupted
-        try:
-            existing_size = os.path.getsize(file_path)
-        except os.error as e:
-            "Exception: {}".format(e)
-            existing_size = 0
-
-        # offset by the size of the already downloaded file so we can resume
-        start = start + existing_size
-
-        # if the existing size of the file matches the expected size, then we already have the file, so skip it.
-        if existing_size is not min((end - start)+1, CHUNK_FILE_SIZE):
-            segments = int(math.ceil(float((end-start)+1)/float(FETCH_CHUNK_SIZE)))
-
-            with open(file_path, 'ab') as chunk_file:
-                for segment in range(segments):
-                    # check for the exit event
-                    if exit_event.is_set():
-                        break
-
-                    segment_start = start + (segment * FETCH_CHUNK_SIZE)
-                    segment_end = min(end, (segment_start + FETCH_CHUNK_SIZE) - 1)
-                    byte_range = '{}-{}'.format(segment_start, segment_end)
-                    chunk_content_read_size = 10000
-                    try:
-                        request_result = urllib2.urlopen(
-                            urllib2.Request(url, headers={'Range': 'bytes=%s' % byte_range}), timeout=self.timeout)
-                        # Result codes 206 and 200 are both considered successes
-                        if not (request_result.getcode() == 206 or request_result.getcode() == 200):
-                            raise Exception("URL Request did not succeed. Error code: {}"
-                                            .format(request_result.getcode()))
-                        while True:
-                            data = request_result.read(chunk_content_read_size)
-                            if exit_event.is_set() or not data:
-                                break
-                            self.meter.record_progress(len(data))
-                            chunk_file.write(data)
-                            chunk_file.flush()
-                    except Exception:
-                        raise
-
-    # Helper thread worker for Downloader class
-    def download_chunk_file(self, queue, exit_event):
-        while not exit_event.is_set():
-            try:
-                job = queue.get(timeout=1)
-                try:
-                    start = job['start']
-                    end = job['end']
-                    file_path = job['file']
-                    url = job['url']
-                    for i in range(self.max_retries):
-                        if exit_event.is_set():
-                            break
-                        try:
-                            self.download_chunk_segments(start, end, file_path, url, exit_event)
-                        except Exception:
-                            # if the try throws, we retry, so ignore
-                            pass
-                        else:
-                            break
-                    else:
-                        raise Exception("GET Request for {} failed after retries. Site down or network disconnected?"
-                                        .format(file_path))
-                finally:
-                    queue.task_done()
-            except Exception:
-                # No jobs in the queue. Don't error, but don't block on it. Otherwise,
-                # the daemon thread cant quit when the event was set
-                pass
-
-    def simple_download(self, url, dest):
-        self.meter.reset()
-        self.meter.action_label = "Downloading"
-        self.meter.start()
-        request_result = urllib2.urlopen(urllib2.Request(url), timeout=self.timeout)
-        if request_result.getcode() != 200:
-            raise ValueError('HEAD Request failed.', request_result.getcode())
-        with open(dest, 'wb') as download_file:
-            data = request_result.read()
-            if data:
-                self.meter.record_progress(len(data))
-                download_file.write(data)
-                download_file.flush()
-        self.meter.stop()
-        self.meter.reset()
-
-    def download_file(self, url, dest, expected_uncompressed_size, force_simple=False, suppress_suffix=False):
-        start_time = time.clock()
-
-        # ssl tests
-        ssl_context = create_ssl_context()
-        for i in range(self.max_retries):
-            try:
-                request_result = urllib2.urlopen(urllib2.Request(url), timeout=10, context=ssl_context)
-                # should not hard code this... pass this to an error handling function to figure out what to do
-                if request_result.getcode() != 200:
-                    raise ValueError('HEAD Request failed.', request_result.getcode())
-
-            except ssl.SSLError as ssl_error:
-                raise Exception("SSL ERROR: Type: {0}, Library: {1}, Reason: {2}."
-                                .format(type(ssl_error), ssl_error.library, ssl_error.reason))
-
-            except ssl.CertificateError:
-                raise
-
-            except urllib2.HTTPError:
-                raise
-
-            except urllib2.URLError as e:
-                if isinstance(e.reason, ssl.SSLError):
-                    # raise the SSLError exception we encountered and stop downloading
-                    raise e.reason
-                pass    # we'll ignore the other URLErrors for now, it'll be caught in the else statement below
-
-            except Exception as e:
-                import traceback
-                print "Generic exception caught: " + traceback.format_exc()
-                print str(e)
-                pass    # we'll ignore the error now. Might want to put this into a "most recent error" var for later
-
-            else:
-                break   # we got the result, so no need to loop further"""
-
-        else:
-            # we went through the loop without getting a result. figure out what the errors were and report it upwards
-            raise Exception('HEAD Request failed after retries. Site down or network disconnected?')
-
-        file_size = int(request_result.headers.getheader('content-length'))
-        # check disk to see if there is enough space for the compressed file and the uncompressed file
-        remaining_disk_space = get_free_disk_space(os.path.dirname(dest))
-        operation_required_size = file_size + expected_uncompressed_size
-        if operation_required_size > remaining_disk_space:
-            raise Exception("There is not enough space on disk ({}) to perform the operation. "
-                            "Please make sure that {}GB of free space is available then try again."
-                            .format(dest, operation_required_size
-                                    / FILESIZE_UNIT_THREE_INCREMENT))
-
-        # We may be re-running the script from a previous attempt where we have already partially downloaded some files.
-        # Calculate the actual amount to be downloaded.
-        dest_directory = os.path.dirname(os.path.abspath(dest))
-        dest_byte_size = get_directory_size_in_bytes(os.path.abspath(dest_directory))
-        self.meter.add_target(file_size)
-        self.meter.record_progress(dest_byte_size)
-
-        ranges_available = request_result.headers.getheader('accept-ranges')
-        if ranges_available != 'bytes' or force_simple is True:
-            # download without using ranges
-            download_dest = dest
-            if not suppress_suffix:
-                download_dest += ".000"
-            self.simple_download(url, download_dest)
-            return download_dest
-        else:
-            # We have byte ranges, so we can download in chunks in
-            # parallel. We download into multiple files, which we
-            # will recombine with the file inputs function to pass
-            # into the unzip function later.
-            # This allows a clean resume with parallel gets from
-            # different parts of the overall range.
-
-            chunk_files = int(math.ceil(float(file_size) / float(CHUNK_FILE_SIZE)))
-            # break into a collection of <chunkFiles> files
-            file_list = ["{}.{:04d}".format(dest, x) for x in range(chunk_files)]
-            files = [{'start': x * CHUNK_FILE_SIZE,
-                      'end': min(((x+1) * CHUNK_FILE_SIZE) - 1, file_size - 1),
-                      'file': "{}.{:04d}".format(dest, x),
-                      'url': url} for x in range(chunk_files)]
-
-            for entry in files:
-                self.download_queue.put(entry)
-
-            self.meter.action_label = "Downloading"
-            self.meter.start()
-
-            while self.download_queue.unfinished_tasks:
-                time.sleep(0.1)
-
-            # double check all tasks are completed
-            self.download_queue.join()
-
-            self.meter.stop()
-
-            if self.meter.progress < self.meter.target:
-                print_str = "Download failed. Check network and retry. Elapsed time {}"\
-                    .format(str(datetime.timedelta(seconds=time.clock() - start_time)))
-                return_list = []
-            else:
-                print_str = "Finished. Elapsed time {}"\
-                    .format(str(datetime.timedelta(seconds=time.clock()-start_time)))
-                return_list = file_list
-
-            print print_str
-            sys.stdout.flush()
-            return return_list
-
-
-# Class to treat a collection of chunk files as a single larger file.
-# We use this to unzip the chunk files as a single file.
-# This is a minimal implementation, as required by the zipFile handle.
-# This essentially supports only seeking and reading.
-# Minimal error processing is present here. Probably needs some more
-# to deal with ill formed input files. Right now we just assume
-# errors thrown by the underlying system will be the right ones.
-class MultiFile:
-    fileList = []
-    fileSizes = []
-    fileOffsets = []
-    fileSize = 0
-    current_file = 0
-
-    def __init__(self, files, mode):
-        self.fileList = files
-        self.mode = mode
-        for f in files:
-            self.fileSizes.append(os.path.getsize(f))
-            self.fileOffsets.append(self.fileSize)
-            self.fileSize += self.fileSizes[-1]
-        try:
-            self.cfp = open(self.fileList[0], self.mode)
-        except Exception:
-            raise
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        pass
-
-    def seek(self, offset, w=0):
-        cursor = self.tell()
-        if w == os.SEEK_SET:
-            cursor = offset
-        elif w == os.SEEK_CUR:
-            cursor += offset
-        elif w == 2:
-            cursor = self.fileSize + offset
-
-        # Determine which file we are in now, and do the local seek
-        current_pos = cursor
-        local_curr_file = 0
-        for i in range(len(self.fileSizes) - 1):
-            if current_pos < self.fileSizes[i]:
-                local_curr_file = i
-                break
-            current_pos -= self.fileSizes[i]
-        else:
-            local_curr_file = len(self.fileSizes) - 1
-
-        if self.current_file != local_curr_file:
-            self.current_file = local_curr_file
-            self.cfp.close()
-            self.cfp = open(self.fileList[self.current_file], self.mode)
-        self.cfp.seek(current_pos, 0)
-
-    def close(self):
-        self.cfp.close()
-
-    def tell(self):
-        cpos = self.cfp.tell()
-        cursor = self.fileOffsets[self.current_file] + cpos
-        return cursor
-
-    def read(self, size=None):
-        if size is None:
-            size = self.fileSize - self.tell()
-        block = self.cfp.read(size)
-        remaining = size-len(block)
-        # Keep reading if there is size to read remaining, and we are
-        # not yet already reading the last file (and may have gotten EOF)
-        while remaining > 0 and self.current_file < len(self.fileList)-1:
-            # Switch to next file
-            self.cfp.close()
-            self.current_file += 1
-            self.cfp = open(self.fileList[self.current_file], self.mode)
-            nblock = self.cfp.read(remaining)
-            block += nblock
-            remaining -= len(nblock)
-        return block
-
-
-def main():
-    try:
-        args = create_args()
-        validate_args(args)
-        abs_root_dir = os.path.abspath(args.rootDir)
-        remove_downloaded_files = False
-        script_succeed = False
-
-        if args.skipWarning is False:
-            print "Now completing your Lumberyard setup."
-            print "This downloads essential content not included in the Git repository."
-            print "If you've made any changes, please back them up before running this."
-            print "Press Enter to continue (Ctrl+C to cancel at anytime)..."
-            sys.stdout.flush()
-
-            # blocks until user presses the Enter key
-            raw_input()
-
-        # As of 1.8, the longest file path relative to root of the zip is 151,
-        # giving 104 chars before the windows path limit. Set the max working dir
-        # length to 60 to have some wiggle room.
-        max_working_dir_len = 60
-        # working dir should be rootDir/working_dir_name. if that is too long, try
-        #   using %TEMP%/working_dir_name
-        working_dir_path = os.path.join(abs_root_dir, WORKING_DIR_NAME)
-        if len(working_dir_path) > max_working_dir_len:
-            # switch to using default temp dir
-            working_dir_path = os.path.join(os.path.expandvars("%TEMP%"), WORKING_DIR_NAME)
-        unpack_dir_path = os.path.join(working_dir_path, UNPACK_DIR_NAME)
-
-        # Remove any pre-downloaded files, if necessary.
-        if args.clean and os.path.exists(working_dir_path):
-            shutil.rmtree(working_dir_path)
-
-        if not os.path.exists(working_dir_path):
-            os.makedirs(working_dir_path)
-
-        # check for old hashlist
-        old_hash_file_path = os.path.join(abs_root_dir, HASH_FILE_NAME)
-        if not os.path.exists(old_hash_file_path):
-            get_default_hashlist(args, abs_root_dir, working_dir_path)
-
-        try:
-            try:
-                bootstrap_config_file = os.path.join(abs_root_dir, BOOTSTRAP_CONFIG_FILENAME)
-                download_url, expected_checksum, uncompressed_size = get_info_from_bootstrap_config(bootstrap_config_file)
-                download_file_name = os.path.basename(urlparse.urlparse(download_url)[2])
-            except Exception:
-                raise
-
-            # check remaining disk space of destination against the uncompressed size
-            remaining_disk_space = get_free_disk_space(abs_root_dir)
-            if not uncompressed_size < remaining_disk_space:
-                raise Exception("There is not enough space on disk ({}) for the extra files. "
-                                "Please make sure that {}GB of free space is available then try again."
-                                .format(abs_root_dir, uncompressed_size / FILESIZE_UNIT_THREE_INCREMENT))
-
-            # now check against the disk where we are doing the work
-            remaining_disk_space = get_free_disk_space(working_dir_path)
-            if not uncompressed_size < remaining_disk_space:
-                raise Exception("There is not enough space on disk ({}) to perform the operation. "
-                                "Please make sure that {}GB of free space is available then try again."
-                                .format(working_dir_path, uncompressed_size / FILESIZE_UNIT_THREE_INCREMENT))
-
-            # download the file, with 20 threads!
-            try:
-                with Downloader(DOWNLOADER_THREAD_COUNT) as downloader:
-                    download_dir_path = os.path.join(working_dir_path, DOWNLOAD_DIR_NAME)
-                    if not os.path.exists(download_dir_path):
-                        os.mkdir(download_dir_path)
-                    dest = os.path.join(download_dir_path, download_file_name)
-
-                    print "Downloading files from url {0} to {1}"\
-                        .format(download_url, dest)
-                    files = downloader.download_file(download_url, dest, uncompressed_size)
-            except Exception:
-                downloader.close()
-                raise
-
-            # if the download failed...
-            if not files:
-                raise Exception("Failed to finish downloading {0} after a few retries."
-                                .format(download_file_name))
-
-            # make the downloaded parts a single file
-            multi_file_zip = MultiFile(files, 'rb')
-
-            # check downloaded file against checksum
-            print "Checking downloaded contents' checksum."
-            downloaded_file_checksum = get_checksum_for_multi_file(multi_file_zip)
-            readable_checksum = downloaded_file_checksum.hexdigest()
-            if readable_checksum != expected_checksum:
-                remove_downloaded_files = True
-                raise Exception("The checksum of the downloaded file does not match the expected checksum. ")
-
-            # check if unpack directory exists. clear it if it does.
-            delete_existing_attempts = 0
-            delete_success = False
-            delete_attempts_max = 3
-            if os.path.exists(unpack_dir_path):
-                while not delete_success and delete_existing_attempts < delete_attempts_max:
-                    try:
-                        shutil.rmtree(unpack_dir_path)
-                    except (shutil.Error, WindowsError, DistutilsFileError) as removeError:
-                        delete_existing_attempts += 1
-                        if delete_existing_attempts >= delete_attempts_max:
-                            raise removeError
-                        print ("{0}: {1}").format(type(removeError).__name__, removeError)
-                        print ("Failed to remove files that already existed at {} before unpacking. Please ensure the files"
-                               " are deletable by closing related applications (such as Asset Processor, "
-                               "and the Lumberyard Editor), then try running this program again.").format(unpack_dir_path)
-                        raw_input("Press ENTER to retry...")
-                    except Exception:
-                        raise
-                    else:
-                        delete_success = True
-            os.mkdir(unpack_dir_path)
-
-            # unpack file to temp directory.
-            zip_file = zipfile.ZipFile(multi_file_zip, allowZip64=True)
-            try:
-                print "Extracting all files from {0} to {1}".format(download_file_name, unpack_dir_path)
-
-                extract_progress_meter = ProgressMeter()
-                extract_progress_meter.action_label = "Extracting"
-                extract_progress_meter.target = float(uncompressed_size)
-                extract_progress_meter.report_eta = False
-                extract_progress_meter.report_speed = False
-
-                extract_progress_meter.start()
-
-                zip_file_info = zip_file.infolist()
-
-                for file_path in zip_file_info:
-                    zip_file.extract(file_path, path=unpack_dir_path)
-                    extract_progress_meter.record_progress(file_path.file_size)
-
-                extract_progress_meter.stop()
-
-            except Exception:
-                raise Exception("Failed to extract files from {0}. ".format(files))
-            finally:
-                zip_file.close()
-                multi_file_zip.close()
-
-            num_unpacked_files = 0
-            for root, dirs, files in os.walk(unpack_dir_path):
-                num_unpacked_files += len(files)
-
-            # move temp to
-            print "Moving zip contents to final location."
-            copy_directory_contents(args, unpack_dir_path, abs_root_dir, uncompressed_size)
-
-        except (shutil.Error, WindowsError, DistutilsFileError) as removeError:
-            print ("{0}: {1}").format(type(removeError).__name__, removeError)
-            print ("Failed to remove files that already existed at {} before unpacking. Please ensure the files are"
-                   " deletable by closing related applications (such as Asset Processor, and the Lumberyard"
-                   " Editor), then try running this program again.").format(abs_root_dir)
-            script_succeed = False
-
-        except Exception as e:
-            print ("Failed to finish acquiring needed files: {} " + TRY_AGAIN_STRING).format(e)
-            script_succeed = False
-
-        else:
-            remove_downloaded_files = True
-            script_succeed = True
-
-        finally:
-            # clean up temp dir
-            if not args.keep:
-
-                if remove_downloaded_files and os.path.exists(working_dir_path):
-                    # printing a line new to have a separation from the other logs
-                    print ("\nCleaning up temp files")
-                    shutil.rmtree(working_dir_path)
-                elif os.path.exists(unpack_dir_path):
-                    # printing a line new to have a separation from the other logs
-                    print ("\nCleaning up temp files")
-                    shutil.rmtree(unpack_dir_path)
-
-    except KeyboardInterrupt:
-        print ("\nOperation aborted. Please perform manual cleanup, or re-run git_bootstrap.exe.\n\n")
-        sys.stdout.flush()
-        sys.exit(0)
-
-if __name__ == "__main__":
-    main()

+ 0 - 208
Tools/build/JenkinsScripts/distribution/git_release/git_bootstrap_test.py

@@ -1,208 +0,0 @@
-#
-# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-# its licensors.
-#
-# For complete copyright and license terms please see the LICENSE at the root of this
-# distribution (the "License"). All use of this software is governed by the License,
-# or, if provided, by the license below or the license accompanying this file. Do not
-# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#
-
-import bin_download
-import contextlib
-import io
-import os
-import shutil
-import ssl
-import sys
-import unittest
-
-
[email protected]
-def no_stdout():
-    save_stdout = sys.stdout
-    sys.stdout = io.BytesIO()
-
-    try:
-        yield
-    finally:
-        sys.stdout = save_stdout
-
-
-class BadSslTestCase(unittest.TestCase):
-    def setUp(self):
-        self.working_dir_path = os.path.join(os.path.expandvars("%TEMP%"), "_temp")
-        self.download_file_name = "test_download.test"
-        self.destination = os.path.join(self.working_dir_path, self.download_file_name)
-        self.uncompressed_size = 1000000
-
-        if not os.path.exists(self.destination):
-            os.makedirs(self.destination)
-
-    def tearDown(self):
-        if os.path.exists(self.destination):
-            shutil.rmtree(self.destination)
-
-    def download_file(self, download_url):
-        try:
-            with bin_download.Downloader(20) as downloader:
-                with no_stdout():
-                    downloader.download_file(download_url, self.destination, self.uncompressed_size)
-
-        except ssl.SSLError:
-            raise
-
-        except ssl.CertificateError:
-            raise
-
-        except Exception:
-            print "\tFATAL ERROR: Unhandled exception encountered."
-            raise
-
-        return True
-
-    def test_cloudfront_download(self):
-        self.assertTrue(self.download_file("https://s3-us-west-2.amazonaws.com/lumberyard-download-artifacts-bucket/"
-                                           "3rdParty/squish-ccr/20150601_lmbr_v1/filelist.1.0.0.common.json"))
-
-    def test_expired(self):
-        self.assertRaises(ssl.SSLError, self.download_file, "https://expired.badssl.com/")
-
-    def test_wrong_host(self):
-        self.assertRaises(ssl.CertificateError, self.download_file, "https://wrong.host.badssl.com/")
-
-    def test_self_signed(self):
-        self.assertRaises(ssl.SSLError, self.download_file, "https://self-signed.badssl.com/")
-
-    def test_untrusted_root(self):
-        self.assertRaises(ssl.SSLError, self.download_file, "https://untrusted-root.badssl.com/")
-
-    def test_incomplete_chain(self):
-        self.assertRaises(ssl.SSLError, self.download_file, "https://incomplete-chain.badssl.com/")
-
-    def test_sha256(self):
-        self.assertTrue(self.download_file("https://sha256.badssl.com/"))
-
-    def test_1000_sans(self):
-        self.assertTrue(self.download_file("https://1000-sans.badssl.com/"))
-
-    def test_ecc256(self):
-        self.assertTrue(self.download_file("https://ecc256.badssl.com/"))
-
-    def test_ecc384(self):
-        self.assertTrue(self.download_file("https://ecc384.badssl.com/"))
-
-    def test_cbc(self):
-        # cbc is supposed to be secure in TLS1_1 and TLS1_2
-        self.assertTrue(self.download_file("https://cbc.badssl.com/"))
-
-    def test_rc4_md5(self):
-        self.assertRaises(ssl.SSLError, self.download_file, "https://rc4-md5.badssl.com/")
-
-    def test_rc4(self):
-        self.assertRaises(ssl.SSLError, self.download_file, "https://rc4.badssl.com/")
-
-    def test_3des(self):
-        self.assertRaises(ssl.SSLError, self.download_file, "https://3des.badssl.com/")
-
-    def test_null(self):
-        self.assertRaises(ssl.SSLError, self.download_file, "https://null.badssl.com/")
-
-    def test_mozilla_intermediate(self):
-        self.assertTrue(self.download_file("https://mozilla-intermediate.badssl.com/"))
-
-    def test_mozilla_modern(self):
-        self.assertTrue(self.download_file("https://mozilla-modern.badssl.com/"))
-
-    def test_dh480(self):
-        self.assertRaises(ssl.SSLError, self.download_file, "https://dh480.badssl.com/")
-
-    def test_dh512(self):
-        self.assertRaises(ssl.SSLError, self.download_file, "https://dh512.badssl.com/")
-
-    def test_dh_small(self):
-        self.assertRaises(ssl.SSLError, self.download_file, "https://dh-small.badssl.com/")
-
-    def test_dh_composite(self):
-        self.assertRaises(ssl.SSLError, self.download_file, "https://dh-composite.badssl.com/")
-
-    def test_static_rsa(self):
-        # Static RSA is still supported in TLS1_2 but is probably going to be removed in TLS1_3
-        self.assertTrue(self.download_file("https://static-rsa.badssl.com/"))
-
-    def test_hsts(self):
-        self.assertTrue(self.download_file("https://hsts.badssl.com/"))
-
-    def test_upgrade(self):
-        self.assertTrue(self.download_file("https://upgrade.badssl.com/"))
-
-    def test_preloaded_hsts(self):
-        self.assertTrue(self.download_file("https://preloaded-hsts.badssl.com/"))
-
-    def test_subdomain_preloaded_hsts(self):
-        self.assertRaises(ssl.CertificateError, self.download_file, "https://subdomain.preloaded-hsts.badssl.com/")
-
-    def test_https_everywhere(self):
-        self.assertTrue(self.download_file("https://https-everywhere.badssl.com/"))
-
-    def test_http(self):
-        self.assertTrue(self.download_file("https://http.badssl.com/"))
-
-    def test_spoofed_favicon(self):
-        self.assertTrue(self.download_file("https://spoofed-favicon.badssl.com/"))
-
-    def test_long_dashes(self):
-        self.assertTrue(self.download_file(
-            "https://long-extended-subdomain-name-containing-many-letters-and-dashes.badssl.com/"))
-
-    def test_long_without_dashes(self):
-        self.assertTrue(self.download_file(
-            "https://longextendedsubdomainnamewithoutdashesinordertotestwordwrapping.badssl.com/"))
-
-    def test_superfish(self):
-        self.assertRaises(ssl.SSLError, self.download_file, "https://superfish.badssl.com/")
-
-    def test_edellroot(self):
-        self.assertRaises(ssl.SSLError, self.download_file, "https://edellroot.badssl.com/")
-
-    def test_dsdtestprovider(self):
-        self.assertRaises(ssl.SSLError, self.download_file, "https://dsdtestprovider.badssl.com/")
-
-    # I can't successfully load get CRL to work
-    # This is a new test in badssl.com is failing
-    # as well since I don't think Qt has support for it.
-    # "https://revoked.badssl.com/"
-
-    # Excessive message size error
-    # "https://10000-sans.badssl.com/"
-
-    # This check is platform specific.
-    # 8192-bit RSA keys were not supported in OSX between 2006 and 2015.
-    # "https://rsa8192.badssl.com/"
-
-    # We don't download web pages
-    # "https://mixed-script.badssl.com/"
-    # "https://very.badssl.com/"
-    # mixed HTTP content in site
-    # "https://mixed.badssl.com/"
-    # implicit favicon redirects to HTTP
-    # "https://mixed-favicon.badssl.com/"
-    # "http://http-password.badssl.com/"
-    # "http://http-login.badssl.com/"
-    # "http://http-dynamic-login.badssl.com/"
-    # "http://http-credit-card.badssl.com/"
-
-    # We're not yet sure how to reject mozilla old SSL certs.
-    # "https://mozilla-old.badssl.com/"
-
-    # For some reason SSLv3 is used in these cases and we are blocking the use of SSLv3
-    # "https://dh1024.badssl.com/"
-    # "https://dh2048.badssl.com/"
-
-    # This test is failing in Setup Assistant as well
-    # "https://pinning-test.badssl.com/"
-
-
-if __name__ == "__main__":
-    unittest.main()

+ 0 - 26
Tools/build/JenkinsScripts/distribution/git_release/inject/.github/ISSUE_TEMPLATE/bug_report.md

@@ -1,26 +0,0 @@
----
-name: Bug Report
-about: Create a bug report to help us improve.
-
----
-
-**Describe the bug**
-Please provide a concise description of the bug.
-
-**Steps to reproduce**
-Please provide steps to reproduce the bug. The more detail you provide, the more likely we'll be able to reproduce it. 
-
-**Expected behavior**
-Please provide a concise description of what you expected to happen.
-
-**Screenshots/Logs**
-Please include any relevant screenshots and log files from your game project directory (e.g., C:\Amazon\lumberyard\1.15.0.0\dev\Cache\YOURPROJECT\pc\user\log). Note that you are posting to a public forum so please remove any sensitive information from your log files such as project name & path, IP address, credentials etc. 
-
-**Lumberyard version**
-State the version of Lumberyard in which you discovered this bug (e.g., v1.14.0.1 or v1.15.0.0 etc.).
-
-**[OPTIONAL] What is your role in game development?**
-Are you a game designer, engineer, artist, producer, something else?
-
-**[OPTIONAL] Tell us about your project or studio.**
-Briefly tell us about your project or studio.

+ 0 - 17
Tools/build/JenkinsScripts/distribution/git_release/inject/.github/ISSUE_TEMPLATE/feature_request.md

@@ -1,17 +0,0 @@
----
-name: Feature Request
-about: Suggest a feature you'd like to see developed. 
-
----
-
-**Describe your feature request**
-Please provide a concise description of the feature you'd like to see in Lumberyard.
-
-**Describe workarounds or alternatives you've considered**
-Please provide a concise description of workarounds or alternative solutions you've considered.
-
-**[OPTIONAL] What is your role in game development?**
-Are you a game designer, engineer, artist, producer, something else?
-
-**[OPTIONAL] Tell us about your project or studio.**
-Briefly tell us about your project or studio.

+ 0 - 20
Tools/build/JenkinsScripts/distribution/git_release/inject/.github/ISSUE_TEMPLATE/question.md

@@ -1,20 +0,0 @@
----
-name: Question
-about: Ask a question to the community.
-
----
-
-**What is your question?**
-Please ask your question here. 
-
-**Which part of the engine are you asking about?**
-Please indicate the component of the engine that your question relates to (e.g., Script Canvas, PhysX, Animation etc.).
-
-**Which version of Lumberyard are you using?**
-State the Lumberyard version that you're using (e.g., v1.14.0.1 or v1.15.0.0 etc.).
-
-**[OPTIONAL] What is your role in game development?**
-Are you a game designer, engineer, artist, producer, something else?
-
-**[OPTIONAL] Tell us about your project or studio.**
-Briefly tell us about your project or studio.

+ 0 - 45
Tools/build/JenkinsScripts/distribution/git_release/inject/CONTRIBUTING.md

@@ -1,45 +0,0 @@
-# Contribution Guidelines
-Thank you for visiting our contribution guidelines! An active and healthy development community is what makes a good game engine an exceptional game engine. As we focus on developing new features and resolving bugs with every version of Lumberyard, we want to hear from you. We are interested in seeing how you're using the engine and what improvements you're making while you work on your own game projects. This is why, in addition to our [GameDev Forums](https://gamedev.amazon.com/forums/index.html), [Tutorials](https://www.youtube.com/amazongamedev) and [Documentation](https://aws.amazon.com/documentation/lumberyard/), we provide you with the opportunity to share your features and improvements with your fellow developers. After you modify the core engine code, simply submit a pull request.
-
-To make it easy for you to contribute to our game engine, the Lumberyard development team adheres to the following coding conventions. We believe that these guidelines keep the engine code consistent and easy to understand so that you can spend less time interpreting code and more time coding. We look forward to your contributions!
-
-## Compiler Compatibility:
--	Use the C++11 standard whenever possible.
--	Stick to the C++11 features that are commonly supported by Microsoft Visual Studio 2013/2015 (refer to https://msdn.microsoft.com/en-us/library/hh567368.aspx).
-
-## Formatting:
--	Lumberyard recommends using the Uncrustify code beautifier to keep C++ code consistent with the engine code. Refer to http://uncrustify.sourceforge.net/.
--	Apply indentation in a consistent manner:
-	-	Files should start without any indentation.
-	-	Use a single additional level of indentation for each nested block of code.
-	-	Indent all lines of a block by the same amount.
-	-	Make lines a reasonable length.
--	Indent preprocessor statements in a similar way to regular code.
--	When positioning curly braces, open braces on a new line and keep them flush with the outer block's indentation.
--	Always use curly braces for flow control statements.
--	Each line of code should only include a single statement.
--	Naming conventions for classes, functions, types and files should adhere to CamelCase and specify what the function does. 
--	All header files must include the directive, "#pragma once".
--	Use forward declarations to minimize header file dependencies. Compile times are a concern so please put in the effort to minimize include chains.
--	The following syntax should be used when including header files: #include <Package/SubdirectoryChain/Header.h>
-This rule helps disambiguate files from different packages that have the same name. <Object.h> might appear relatively often, but <AZRender/Object.h> is far less likely to.
-
-## Classes:
--	You should define a default constructor if your class defines member variables and has no other constructors. Unless you have a very specifically targeted optimization, you should initialize all variables to a known state even if the variable state is invalid.
--	Do not assume any specific properties based on the choice of struct vs class; always use <type_traits> to check the actual properties
--	Public declarations come before private declarations. Methods should be declared before data members.
--	All methods that do not modify internal state should be const. All function parameters passed by pointer or reference should be marked const unless they are output parameters.
--	Use the override keyword wherever possible and omit the keyword virtual when using override.
--	Use the final keyword where its use can be justified.
-
-## Scoping:
--	All of your code should be in at least a namespace named after the package and conform to the naming convention specified earlier in this document.
--	Place a function's variable declarations in the narrowest possible scope and always initialize variables in their declaration.
--	Static member or global variables that are concrete class objects are completely forbidden. If you must have a global object it should be a pointer, and it must be constructed and destroyed via appropriate functions.
-
-## Commenting Code:
-Clear and concise communication is essential in keeping the code readable for everyone. Since comments are the main method for communication, please follow these guidelines for commenting the code:
--	Use /// for comments.
--	Use /**..*/ for block comments.
--	Use @param, etc. for commands.
--	Full sentences with good grammar are preferable to abbreviated notes.

+ 0 - 57
Tools/build/JenkinsScripts/distribution/git_release/inject/README.md

@@ -1,57 +0,0 @@
-![lmbr](http://d2tinsms4add52.cloudfront.net/github/readme_header.jpg)
-
-# Amazon Lumberyard
-Amazon Lumberyard is a free, AAA game engine that gives you the tools you need to create high quality games. Deeply integrated with AWS and Twitch, Amazon Lumberyard includes full source code, allowing you to customize your project at any level.
-
-For more information, visit: https://aws.amazon.com/lumberyard/
-
-## Acquiring Lumberyard source
-Each release of Lumberyard exists as a separate branch in GitHub. You can get Lumberyard from GitHub using the following steps:
-
-### Fork the repository
-Forking creates a copy of the Lumberyard repository in your GitHub account. Your fork becomes the remote repository into which you can push changes.
-
-### Create a branch
-The GitHub workflow assumes your master branch is always deployable. Create a branch for your local project or fixes.
-
-For more information about branching, see the [GitHub documentation](https://guides.github.com/introduction/flow/).
-
-### Clone the repository
-Cloning the repository copies your fork onto your computer. To clone the repository, click the "Clone or download" button on the GitHub website, and copy the resultant URL to the clipboard. In a command line window, type ```git clone [URL]```, where ```[URL]``` is the URL that you copied in the previous step.
-
-For more information about cloning a reposity, see the [GitHub documentation](https://help.github.com/articles/cloning-a-repository/).
-
-
-### Downloading additive files
-Once the repository exists locally on your machine, manually execute ```git_bootstrap.exe``` found at the root of the repository. This application will perform a download operation for __Lumberyard binaries that are required prior to using or building the engine__. This program uses AWS services to download the binaries. Monitor the health of AWS services on the [AWS Service Health Dashboard](https://status.aws.amazon.com/).
-
-### Running the Setup Assistant
-```git_bootstrap.exe``` will launch the Setup Assistant when it completes. Setup Assistant lets you configure your environment and launch the Lumberyard Editor.
-
-## Contributing code to Lumberyard
-You can submit changes or fixes to Lumberyard using pull requests. When you submit a pull request, the Lumberyard support team is notified and evaluates the code you submitted. You may be contacted to provide further detail or clarification while the support team evaluates your submitted code.
-
-### Best practices for submitting pull requests
-Before submitting a pull request to a Lumberyard branch, please merge the latest changes from that branch into your project. We only accept pull requests on the latest version of a branch.
-
-For more information about working with pull requests, see the [GitHub documentation](https://help.github.com/articles/cloning-a-repository/).
-
-## Purpose of Lumberyard on GitHub
-Lumberyard on GitHub provides a way for you to view and acquire the engine source code, and contribute by submitting pull requests. Lumberyard does not endorse any particular source control system for your personal use.
-
-## Lumberyard Documentation
-Full Lumberyard documentation can be found here:
-https://aws.amazon.com/documentation/lumberyard/
-We also have tutorials available at https://www.youtube.com/amazongamedev
-
-## License
-Your use of Lumberyard is governed by the AWS Customer Agreement at https://aws.amazon.com/agreement/ and Lumberyard Service Terms at https://aws.amazon.com/serviceterms/#57._Amazon_Lumberyard_Engine.
-
-For complete copyright and license terms please see the LICENSE.txt file at the root of this distribution (the "License").  As a reminder, here are some key pieces to keep in mind when submitting changes/fixes and creating your own forks:
--	If you submit a change/fix, we can use it without restriction, and other Lumberyard users can use it under the License.
--	Only share forks in this GitHub repo (i.e., forks must be parented to https://github.com/aws/lumberyard).
--	Your forks are governed by the License, and you must include the License.txt file with your fork.  Please also add a note at the top explaining your modifications.
--	If you use someone else’s fork from this repo, your use is subject to the License.    
--	Your fork may not enable the use of third-party compute, storage or database services.  
--	It's fine to connect to third-party platform services like Steamworks, Apple GameCenter, console platform services, etc.  
-To learn more, please see our FAQs https://aws.amazon.com/lumberyard/faq/#licensing.

+ 0 - 167
Tools/build/JenkinsScripts/distribution/inject_signed_binaries.py

@@ -1,167 +0,0 @@
-"""
-All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-its licensors.
-
-For complete copyright and license terms please see the LICENSE at the root of this
-distribution (the "License"). All use of this software is governed by the License,
-or, if provided, by the license below or the license accompanying this file. Do not
-remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
-
-Description: 
-    Release automation script that injects signed binary files into package zips 
-    then generates new MD5 checksum files for the modified packages. 
-"""
-import argparse
-import os
-import subprocess
-import sys
-
-from AWS_PyTools.LyChecksum import getMD5ChecksumForSingleFile
-from Installer.InstallerAutomation import defaultFilesToSign
-from Installer.SignTool import signtoolVerifySign
-
-DEFAULT_FILES_TO_INJECT = defaultFilesToSign
-DEFAULT_PLATFORMS = ['pc', 'provo', 'consoles']
-DEFAULT_WORKING_DIR = '%TEMP%/installerAuto'
-DEFAULT_PATH_7ZIP = 'C:/7z.exe'
-
-DEFAULT_VERSION = os.environ.get('VERSION')
-DEFAULT_P4_CL = os.environ.get('P4_CL')
-DEFAULT_BUILD_NUMBER = os.environ.get('BUILD_NUMBER')
-
-
-class InjectionError(Exception): pass
-
-
-def create_md5_file(file_path, checksum):
-    """Write the provided checksum to an .MD5 file and return the file path."""
-    md5_file = '{}.MD5'.format(file_path)
-    with open(md5_file, 'w') as f:
-        f.write(checksum)
-    return md5_file
-
-
-def format_package_name(package_version, changelist, platform, build_number):
-    """Return a string of the package name in the standard format"""
-    package_name = 'lumberyard-{0}-{1}-{2}-{3}.zip'.format(package_version, changelist, platform, build_number)
-    return package_name
-
-
-def verify_signed_files(binary_files, working_dir, verbose):
-    """Verifies that the binary files in the workspace are signed
-
-    Function imported from Installer/SignTool.py to verify binary files.
-    Returns True if file is signed.
-
-    Returns:
-        The list of relative paths for the verified signed files
-
-    """
-    signed_binary_files = []
-    for b in binary_files:
-        binary_file_path = os.path.join(working_dir, b)
-        signed = signtoolVerifySign(binary_file_path, verbose)
-        if not signed:
-            raise InjectionError('Unsigned binary file found in workspace: {0}'.format(b))
-        signed_binary_files.append(b)
-    return signed_binary_files
-
-
-def print_result(description, list):
-    """Prints the result with formatting"""
-    list_new_line = '\n'.join(map(str, list))
-    result = '\n'.join([description, list_new_line])
-    print(result)
-
-
-def generate_md5_checksums(updated_zips):
-    """Using a list of file paths, generate MD5 checksums.
-
-    Function is imported from AWS_PyTools/LyChecksum.py to generate checksum 
-    A file is then created for each checksum.
-
-    Returns:
-        The list of paths for the generated .MD5 files.
-
-    """
-    md5_files = []
-    for z in updated_zips:
-        checksum = getMD5ChecksumForSingleFile(z).hexdigest()
-        md5_file = create_md5_file(z, checksum)
-        md5_files.append(md5_file)
-    return md5_files
-
-
-def inject_binaries(args):
-    """Inject signed binary files into package zips.
-
-    Verify that the binary files are signed.
-    Write the signed binary list to a file seprated by new lines to supply to 7-Zip.
-
-    Run command to inject signed binary into package zips.
-    Command line syntax: 7z.exe a -spf2 <zip_file_path> @<file_list>
-
-    Returns:
-        The list of file paths for the updated package zips. 
-
-    """
-    signed_binary_files = verify_signed_files(args.files_to_inject, args.working_dir, args.verbose)
-
-    list_file_path = os.path.join(args.working_dir, 'list_file.txt')
-    with open(list_file_path, 'w') as list_file:
-        list_file.write('\n'.join(signed_binary_files))
-
-    updated_zips = []
-    for p in args.platforms:
-        package_name = format_package_name(args.package_version, args.changelist, p, args.build_number)
-        package_path = os.path.join(args.working_dir, package_name)
-        try:
-            subprocess.check_call([args.path_7zip, 'a', '-spf2', package_path, '@{0}'.format(list_file_path)], 
-                                    cwd=args.working_dir)
-            updated_zips.append(package_path)
-        except subprocess.CalledProcessError as e:
-            raise InjectionError('Error using 7z to inject signed binaries: {0}'.format(e))
-    return updated_zips
-
-
-def parse_args():
-    """Setup arguments. Defaults to using build parameters and binary list also used by CODESIGN_Windows."""
-    parser = argparse.ArgumentParser(
-        description='Inject signed binary files into package zips then generate new MD5 checksums')
-    parser.add_argument('-w', '--working-dir', default=DEFAULT_WORKING_DIR, 
-        help='Directory where the binary files and package zips are located.')
-    parser.add_argument('-p', '--package-version', default=DEFAULT_VERSION, 
-        help='<Major>.<Minor> version of the target packages.')
-    parser.add_argument('-c', '--changelist', default=DEFAULT_P4_CL, 
-        help='Perforce changelist for the target packages')
-    parser.add_argument('-b', '--build-number', default=DEFAULT_BUILD_NUMBER, 
-        help='Perforce changelist for the target packages')
-    parser.add_argument('--files-to-inject', nargs='+', default=DEFAULT_FILES_TO_INJECT, 
-        help='List of binaries to inject into the package zips. Defaults to the list used to sign the files.')
-    parser.add_argument('--platforms', nargs='+', default=DEFAULT_PLATFORMS, 
-        help='Specifies which platform packages to inject.')
-    parser.add_argument('--path-7zip', default=DEFAULT_PATH_7ZIP, 
-        help='Path for the 7zip executable.')
-    parser.add_argument('--verbose', action='store_true', 
-        help='Verbose output on codesigning commands.')
-    args = parser.parse_args()
-    return args
-
-
-def main():
-    try:
-        args = parse_args()
-        updated_zips = inject_binaries(args)
-        md5_files = generate_md5_checksums(updated_zips)
-
-        print_result('Package zips updated with signed binaries:', updated_zips)
-        print_result('Generated MD5 checksum files:', md5_files)
-    except InjectionError as e:
-        raise SystemExit('Injection Error: {}'.format(e))
-
-    sys.exit(0)
-    
-
-if __name__ == "__main__":
-    main()

+ 0 - 59
Tools/build/JenkinsScripts/distribution/ly_dep_version_tool.py

@@ -1,59 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-from optparse import OptionParser
-import os, json, re, sys
-
-dirDescriptor = ".package.dir"
-def main():
-    parser = OptionParser()
-    parser.add_option(  "-s", "--source",
-                        dest="source",
-                        help="Specify the boot-strap tool's metadata file to be parsed (i.e. C:/dev/SetupAssistantConfig.json)",
-                        default="../../../../SetupAssistantConfig.json")
-    parser.add_option(  "-o", "--outputfile",
-                        dest="output",
-                        help="Specify the output file of this tool.  If it exists, it will be over-written.",
-                        default="./3rdparty_versions.txt")
-    (options, args) = parser.parse_args()
-
-    if not os.path.isfile(options.source):
-        print 'invalid sourcefile "{}"'.format(options.source)
-        return 2
-
-    ant_property_list = ''
-
-    with open(options.source, 'r') as source:
-        source_json = json.load(source)
-
-        sdks_list = source_json['SDKs']
-
-        for sdk_object in sdks_list:
-            identifier = sdk_object['identifier'].encode('ascii')
-
-            if identifier:
-                subdir = sdk_object.get('source')
-
-                ant_property_list += '{}={}\n'.format(identifier + dirDescriptor, subdir)
-                # Wwise LTX is distributed differently than other SDKs, and exists in two locations.
-                if identifier == "wwiseLtx":
-                    justVersionDir = re.sub("Wwise/", "", subdir)
-                    ant_property_list += '{}={}\n'.format("wwiseLtx.tool" + dirDescriptor, justVersionDir)
-
-    with open(options.output, 'w') as output:
-        output.write(ant_property_list)
-
-    print '\nANT Properties:'
-    print ant_property_list.strip()
-
-if __name__ == "__main__":
-    main()

+ 0 - 24
Tools/build/JenkinsScripts/distribution/modify_lylauncherconfig.py

@@ -1,24 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-import fileinput
-import os
-import stat
-
-os.chmod('SetupAssistantConfig.ini', stat.S_IWRITE)
-for line in fileinput.input('SetupAssistantConfig.ini', inplace=1):
-    # Below is an example of how to modify the value for 'compileandroid'.  Its commented out to demonstrate the ability to 
-    # alter entries in this file in the future
-    #if line.startswith(';compileandroid'):
-    #    print('compileandroid="enabled"  ; compile runtime for android')
-    #else:
-    print line,

+ 0 - 44
Tools/build/JenkinsScripts/distribution/package_source_assets.bat

@@ -1,44 +0,0 @@
-REM 
-REM 
-REM  All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
-REM  its licensors.
-REM 
-REM  For complete copyright and license terms please see the LICENSE at the root of this
-REM  distribution (the "License"). All use of this software is governed by the License,
-REM  or, if provided, by the license below or the license accompanying this file. Do not
-REM  remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
-REM  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-REM 
-
-@ECHO WARNING This batch file, %0, is deprecated.  See ant script build.xml
-EXIT 0
-
-REM TBD: once the build.xml script gets past CB5 QA, remove the below lines
-REM Until then, keep this to compare the build.xml behavior in CB5 to the CB4 behavior below
-@echo #1
-IF EXIST .\Bin64vc141\rc\rc.exe (
-    SET BINFOLDER=Bin64vc141
-) ELSE (
-    ECHO Cannot find rc.exe
-    EXIT /b 1
-)
-
-.\%BINFOLDER%\rc\rc.exe /job=.\%BINFOLDER%\rc\RCJob_Build_RPGSample_paks.xml > BuildRPGSamplePaks.log
-del TempRC\RPGsample /s /q
-del Build\RPGSample /s /q
-
-@echo #2
-@echo Move (not copy) these files into another folder, zip it up so it retains the same folder structure. That way someone could just extract the .zip file and have everything go to the right place
-xcopy RPGSample\*.dds SourceAssets\RPGSample /s /i
-xcopy RPGSample\*.tif SourceAssets\RPGSample /s /i
-xcopy RPGSample\*.psd SourceAssets\RPGSample /s /i
-
-del RPGSample\*.dds /s /q /f
-del RPGSample\*.tif /s /q /f
-del RPGSample\*.psd /s /q /f
-
-
-@echo #3 We'll deliver the packaged engine and the assets that were moved in #2 separately, so they can choose to download the source art or not (an extra 15GB or so)
-
-@echo If they choose to extract the source art, they'll want to run 
-@echo .\%BINFOLDER%\rc\rc.exe /job=.\Bin64\rc\RCJob_Compile_RPGSample_Textures.xml

+ 0 - 47
Tools/build/JenkinsScripts/distribution/release_automation_tool.py

@@ -1,47 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-from Installer import InstallerAutomation
-from ThirdParty import thirdparty_bucket_fetch
-import os
-
-def main():
-    # make sure that current working directory is the directory that this
-    #   script lives in
-    abspath = os.path.abspath(__file__)
-    dname = os.path.dirname(abspath)
-    os.chdir(dname)
-    
-    # parse InstallerAutomation args from execution of this script
-    os.chdir("Installer")
-    installerArgs = InstallerAutomation.createArgs()
-    InstallerAutomation.validateArgs(installerArgs)
-    os.chdir("..")
-
-    # If we succeed InstallerAutomation validation (we would have asserted otherwise),
-    #   then parse thirdparty_bucket_fetch args.
-    os.chdir("ThirdParty")
-    ladPackageArgs = thirdparty_bucket_fetch.parse_args()
-    ladPackageParams = thirdparty_bucket_fetch.PromoterParams(ladPackageArgs)
-    os.chdir("..")
-
-    # if we succeeded that one too, then it is safe to run the scripts themselves
-    os.chdir("Installer")
-    InstallerAutomation.run(installerArgs)
-
-    os.chdir("../ThirdParty")
-    thirdparty_bucket_fetch.run(ladPackageArgs, ladPackageParams)
-
-    os.chdir("..")
-
-if __name__ == "__main__":
-    main()

+ 0 - 379
Tools/build/JenkinsScripts/distribution/s3multiput.py

@@ -1,379 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-#!/usr/bin/python
-# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish, dis-
-# tribute, sublicense, and/or sell copies of the Software, and to permit
-# persons to whom the Software is furnished to do so, subject to the fol-
-# lowing conditions:
-#
-# The above copyright notice and this permission notice shall be included
-# in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
-# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-#
- 
-# multipart portions copyright Fabian Topfstedt
-# https://gist.github.com/924094
- 
- 
-import math
-import mimetypes
-from multiprocessing import Pool
-import getopt, sys, os
- 
-import boto
-from boto.exception import S3ResponseError
- 
-from boto.s3.connection import S3Connection
-from filechunkio import FileChunkIO
- 
-import time
- 
-usage_string = """
-SYNOPSIS
-    s3put [-a/--access_key <access_key>] [-s/--secret_key <secret_key>]
-          -b/--bucket <bucket_name> [-c/--callback <num_cb>]
-          [-d/--debug <debug_level>] [-i/--ignore <ignore_dirs>]
-          [-n/--no_op] [-p/--prefix <prefix>] [-k/--key_prefix <key_prefix>] 
-          [-q/--quiet] [-g/--grant grant] [-w/--no_overwrite] [-r/--reduced] path
-
-    Where
-        access_key - Your AWS Access Key ID.  If not supplied, boto will
-                     use the value of the environment variable
-                     AWS_ACCESS_KEY_ID
-        secret_key - Your AWS Secret Access Key.  If not supplied, boto
-                     will use the value of the environment variable
-                     AWS_SECRET_ACCESS_KEY
-        bucket_name - The name of the S3 bucket the file(s) should be
-                      copied to.
-        path - A path to a directory or file that represents the items
-               to be uploaded.  If the path points to an individual file,
-               that file will be uploaded to the specified bucket.  If the
-               path points to a directory, s3_it will recursively traverse
-               the directory and upload all files to the specified bucket.
-        debug_level - 0 means no debug output (default), 1 means normal
-                      debug output from boto, and 2 means boto debug output
-                      plus request/response output from httplib
-        ignore_dirs - a comma-separated list of directory names that will
-                      be ignored and not uploaded to S3.
-        num_cb - The number of progress callbacks to display.  The default
-                 is zero which means no callbacks.  If you supplied a value
-                 of "-c 10" for example, the progress callback would be
-                 called 10 times for each file transferred.
-        prefix - A file path prefix that will be stripped from the full
-                 path of the file when determining the key name in S3.
-                 For example, if the full path of a file is:
-                     /home/foo/bar/fie.baz
-                 and the prefix is specified as "-p /home/foo/" the
-                 resulting key name in S3 will be:
-                     /bar/fie.baz
-                 The prefix must end in a trailing separator and if it
-                 does not then one will be added.
-        key_prefix - A prefix to be added to the S3 key name, after any 
-                     stripping of the file path is done based on the 
-                     "-p/--prefix" option.
-        reduced - Use Reduced Redundancy storage
-        grant - A canned ACL policy that will be granted on each file
-                transferred to S3.  The value of provided must be one
-                of the "canned" ACL policies supported by S3:
-                private|public-read|public-read-write|authenticated-read
-        no_overwrite - No files will be overwritten on S3, if the file/key
-                       exists on s3 it will be kept. This is useful for
-                       resuming interrupted transfers. Note this is not a
-                       sync, even if the file has been updated locally if
-                       the key exists on s3 the file on s3 will not be
-                       updated.
-
-     If the -n option is provided, no files will be transferred to S3 but
-     informational messages will be printed about what would happen.
-"""
-def usage():
-    print usage_string
-    sys.exit()
- 
-def submit_cb(bytes_so_far, total_bytes):
-    print '%d bytes transferred / %d bytes total' % (bytes_so_far, total_bytes)
- 
-_last_cb_end = None # XXX blargh!
-def init_throttle():
-    global _last_cb_end
-    _last_cb_end = time.time()
- 
-def throttle_cb(bytes_so_far, total_bytes):
-    global _last_cb_end
-    # print '%d bytes transferred / %d bytes total' % (bytes_so_far, total_bytes)
- 
-    d = time.time() - _last_cb_end
-    time.sleep(1.0 - d)
-    _last_cb_end = time.time()
- 
-def get_key_name(fullpath, prefix, key_prefix):
-    key_name = fullpath[len(prefix):]
-    l = key_name.split(os.sep)
-    return key_prefix + '/'.join(l)
- 
-def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num,
-    source_path, offset, bytes, debug, cb, num_cb, amount_of_retries=10):
-    if debug == 1:
-        print "_upload_part(%s, %s, %s)" % (source_path, offset, bytes)
-    """
-    Uploads a part with retries.
-    """
-    def _upload(retries_left=amount_of_retries):
-        try:
-            if debug == 1:
-                print 'Start uploading part #%d ...' % part_num
-            conn = S3Connection(aws_key, aws_secret)
-            conn.debug = debug
-            bucket = conn.get_bucket(bucketname)
-            for mp in bucket.get_all_multipart_uploads():
-                if mp.id == multipart_id:
-                    with FileChunkIO(source_path, 'r', offset=offset,
-                        bytes=bytes) as fp:
-                        mp.upload_part_from_file(fp=fp, part_num=part_num, cb=cb, num_cb=num_cb)
-                    break
-        except Exception, exc:
-            if retries_left:
-                _upload(retries_left=retries_left - 1)
-            else:
-                print 'Failed uploading part #%d' % part_num
-                raise exc
-        else:
-            if debug == 1:
-                print '... Uploaded part #%d' % part_num
- 
-    _upload()
- 
-def upload(bucketname, aws_key, aws_secret, source_path, keyname,
-    reduced, debug, cb, num_cb,
-    acl='private', headers={}, guess_mimetype=True, parallel_processes=4, throttle_kbps=None):
-    """
-    Parallel multipart upload.
-    """
-    conn = S3Connection(aws_key, aws_secret)
-    conn.debug = debug
-    bucket = conn.get_bucket(bucketname)
- 
-    if guess_mimetype:
-        mtype = mimetypes.guess_type(keyname)[0] or 'application/octet-stream'
-        headers.update({'Content-Type': mtype})
- 
-    mp = bucket.initiate_multipart_upload(keyname, headers=headers, reduced_redundancy=reduced)
- 
-    source_size = os.stat(source_path).st_size
-    bytes_per_chunk = max(int(math.sqrt(5242880) * math.sqrt(source_size)),
-        5242880)
-    chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk)))
- 
-    if parallel_processes == 0:
-        print "doing serial upload"
-        if throttle_kbps:
-            print "throttling to %d kbps" % throttle_kbps
- 
-        for i in range(chunk_amount):
-            offset = i * bytes_per_chunk
-            remaining_bytes = source_size - offset
-            bytes = min([bytes_per_chunk, remaining_bytes])
-            part_num = i + 1
- 
-            if throttle_kbps:
-                chunks = bytes / (throttle_kbps * 1024)
-                print "uploading %d bytes in %d chunks" % (bytes, chunks)
-                num_cb = chunks
-                cb = throttle_cb
-                init_throttle()
- 
-            _upload_part(bucketname, aws_key, aws_secret, mp.id, part_num,
-                source_path, offset, bytes, debug, cb, num_cb, amount_of_retries=2)
-    else:
-        pool = Pool(processes=parallel_processes)
-        for i in range(chunk_amount):
-            offset = i * bytes_per_chunk
-            remaining_bytes = source_size - offset
-            bytes = min([bytes_per_chunk, remaining_bytes])
-            part_num = i + 1
-            pool.apply_async(_upload_part, [bucketname, aws_key, aws_secret, mp.id,
-                part_num, source_path, offset, bytes, debug, cb, num_cb])
-        pool.close()
-        pool.join()
- 
-    if len(mp.get_all_parts()) == chunk_amount:
-        mp.complete_upload()
-        key = bucket.get_key(keyname)
-        #key.set_acl(acl)
-    else:
-        mp.cancel_upload()
- 
- 
-def main():
- 
-    # default values
-    aws_access_key_id     = None
-    aws_secret_access_key = None
-    bucket_name = ''
-    ignore_dirs = []
-    total  = 0
-    debug  = 0
-    cb     = None
-    num_cb = 0
-    quiet  = False
-    no_op  = False
-    prefix = '/'
-    key_prefix = ''
-    grant  = None
-    no_overwrite = False
-    reduced = False
-    num_workers = 4
-    throttle_kbps=None
- 
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], 'a:b:c::d:g:hi:k:np:qs:wr',
-                                   ['access_key=', 'bucket=', 'callback=', 'debug=', 'help', 'grant=',
-                                    'ignore=', 'key_prefix=', 'no_op', 'prefix=', 'quiet', 'secret_key=', 
-                                    'no_overwrite', 'reduced', 'throttle=', 'num_workers='])
-    except getopt.GetoptError, e:
-        print e
-        usage()
- 
-    # parse opts
-    for o, a in opts:
-        if o in ('-h', '--help'):
-            usage()
-        if o in ('-a', '--access_key'):
-            aws_access_key_id = a
-        if o in ('-b', '--bucket'):
-            bucket_name = a
-        if o in ('-c', '--callback'):
-            num_cb = int(a)
-            cb = submit_cb
-        if o in ('-d', '--debug'):
-            debug = int(a)
-        if o in ('-g', '--grant'):
-            grant = a
-        if o in ('-i', '--ignore'):
-            ignore_dirs = a.split(',')
-        if o in ('-n', '--no_op'):
-            no_op = True
-        if o in ('w', '--no_overwrite'):
-            no_overwrite = True
-        if o in ('-p', '--prefix'):
-            prefix = a
-            if prefix[-1] != os.sep:
-                prefix = prefix + os.sep
-        if o in ('-k', '--key_prefix'):
-            key_prefix = a
-        if o in ('-q', '--quiet'):
-            quiet = True
-        if o in ('-s', '--secret_key'):
-            aws_secret_access_key = a
-        if o in ('-r', '--reduced'):
-            reduced = True
-        if o in ('--throttle'): # XXX this will interfere with cb params
-            throttle_kbps = int(a)
-        if o in ('--num_workers'):
-            num_workers = int(a)
- 
-    if len(args) != 1:
-        usage()
- 
-    path = os.path.expanduser(args[0])
-    path = os.path.expandvars(path)
-    path = os.path.abspath(path)
- 
-    if not bucket_name:
-        print "bucket name is required!"
-        usage()
- 
-    c = boto.connect_s3(aws_access_key_id=aws_access_key_id,
-                        aws_secret_access_key=aws_secret_access_key)
-    c.debug = debug
-    b = c.get_bucket(bucket_name)
- 
-    # upload a directory of files recursively
-    if os.path.isdir(path):
-        if no_overwrite:
-            if not quiet:
-                print 'Getting list of existing keys to check against'
-            keys = []
-            for key in b.list(get_key_name(path, prefix, key_prefix)):
-                keys.append(key.name)
-        for root, dirs, files in os.walk(path):
-            for ignore in ignore_dirs:
-                if ignore in dirs:
-                    dirs.remove(ignore)
-            for file in files:
-                fullpath = os.path.join(root, file)
-                key_name = get_key_name(fullpath, prefix, key_prefix)
-                copy_file = True
-                if no_overwrite:
-                    if key_name in keys:
-                        copy_file = False
-                        if not quiet:
-                            print 'Skipping %s as it exists in s3' % file
- 
-                if copy_file:
-                    if not quiet:
-                        print 'Copying %s to %s/%s' % (file, bucket_name, key_name)
- 
-                    if not no_op:
-                        if os.stat(fullpath).st_size == 0:
-                            # 0-byte files don't work and also don't need multipart upload
-                            k = b.new_key(key_name)
-                            k.set_contents_from_filename(fullpath, cb=cb, num_cb=num_cb,
-                                                         policy=grant, reduced_redundancy=reduced)
-                        else:
-                            upload(bucket_name, aws_access_key_id,
-                                   aws_secret_access_key, fullpath, key_name,
-                                   reduced, debug, cb, num_cb, grant or 'private',
-                                   parallel_processes=num_workers, throttle_kbps=throttle_kbps)
-                total += 1
- 
-    # upload a single file
-    elif os.path.isfile(path):
-        key_name = get_key_name(os.path.abspath(path), prefix, key_prefix)
-        copy_file = True
-        if no_overwrite:
-            if b.get_key(key_name):
-                copy_file = False
-                if not quiet:
-                    print 'Skipping %s as it exists in s3' % path
- 
-        if copy_file:
-            if not quiet:
-                print 'Copying %s to %s/%s' % (path, bucket_name, key_name)
- 
-            if not no_op:
-                if os.stat(path).st_size == 0:
-                    # 0-byte files don't work and also don't need multipart upload
-                    k = b.new_key(key_name)
-                    k.set_contents_from_filename(path, cb=cb, num_cb=num_cb, policy=grant,
-                                                 reduced_redundancy=reduced)
-                else:
-                    upload(bucket_name, aws_access_key_id,
-                           aws_secret_access_key, path, key_name,
-                           reduced, debug, cb, num_cb, grant or 'private',
-                           parallel_processes=num_workers, throttle_kbps=throttle_kbps)
- 
-if __name__ == "__main__":
-    main()

+ 0 - 450
Tools/build/JenkinsScripts/distribution/s3put.py

@@ -1,450 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-#!C:\Python27\python.exe
-# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish, dis-
-# tribute, sublicense, and/or sell copies of the Software, and to permit
-# persons to whom the Software is furnished to do so, subject to the fol-
-# lowing conditions:
-#
-# The above copyright notice and this permission notice shall be included
-# in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
-# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-#
-import getopt
-import sys
-import os
-import boto
-
-from boto.compat import six
-
-try:
-    # multipart portions copyright Fabian Topfstedt
-    # https://gist.github.com/924094
-
-    import math
-    import mimetypes
-    from multiprocessing import Pool
-    from boto.s3.connection import S3Connection
-    from filechunkio import FileChunkIO
-    multipart_capable = True
-    usage_flag_multipart_capable = """ [--multipart]"""
-    usage_string_multipart_capable = """
-        multipart - Upload files as multiple parts. This needs filechunkio.
-                    Requires ListBucket, ListMultipartUploadParts,
-                    ListBucketMultipartUploads and PutObject permissions."""
-except ImportError as err:
-    multipart_capable = False
-    usage_flag_multipart_capable = ""
-    if six.PY2:
-        attribute = 'message'
-    else:
-        attribute = 'msg'
-    usage_string_multipart_capable = '\n\n     "' + \
-        getattr(err, attribute)[len('No module named '):] + \
-        '" is missing for multipart support '
-
-
-DEFAULT_REGION = 'us-east-1'
-
-usage_string = """
-SYNOPSIS
-    s3put [-a/--access_key <access_key>] [-s/--secret_key <secret_key>]
-          -b/--bucket <bucket_name> [-c/--callback <num_cb>]
-          [-d/--debug <debug_level>] [-i/--ignore <ignore_dirs>]
-          [-n/--no_op] [-p/--prefix <prefix>] [-k/--key_prefix <key_prefix>]
-          [-q/--quiet] [-g/--grant grant] [-w/--no_overwrite] [-r/--reduced]
-          [--header] [--region <name>] [--host <s3_host>]""" + \
-          usage_flag_multipart_capable + """ path [path...]
-
-    Where
-        access_key - Your AWS Access Key ID.  If not supplied, boto will
-                     use the value of the environment variable
-                     AWS_ACCESS_KEY_ID
-        secret_key - Your AWS Secret Access Key.  If not supplied, boto
-                     will use the value of the environment variable
-                     AWS_SECRET_ACCESS_KEY
-        bucket_name - The name of the S3 bucket the file(s) should be
-                      copied to.
-        path - A path to a directory or file that represents the items
-               to be uploaded.  If the path points to an individual file,
-               that file will be uploaded to the specified bucket.  If the
-               path points to a directory, it will recursively traverse
-               the directory and upload all files to the specified bucket.
-        debug_level - 0 means no debug output (default), 1 means normal
-                      debug output from boto, and 2 means boto debug output
-                      plus request/response output from httplib
-        ignore_dirs - a comma-separated list of directory names that will
-                      be ignored and not uploaded to S3.
-        num_cb - The number of progress callbacks to display.  The default
-                 is zero which means no callbacks.  If you supplied a value
-                 of "-c 10" for example, the progress callback would be
-                 called 10 times for each file transferred.
-        prefix - A file path prefix that will be stripped from the full
-                 path of the file when determining the key name in S3.
-                 For example, if the full path of a file is:
-                     /home/foo/bar/fie.baz
-                 and the prefix is specified as "-p /home/foo/" the
-                 resulting key name in S3 will be:
-                     /bar/fie.baz
-                 The prefix must end in a trailing separator and if it
-                 does not then one will be added.
-        key_prefix - A prefix to be added to the S3 key name, after any
-                     stripping of the file path is done based on the
-                     "-p/--prefix" option.
-        reduced - Use Reduced Redundancy storage
-        grant - A canned ACL policy that will be granted on each file
-                transferred to S3.  The value of provided must be one
-                of the "canned" ACL policies supported by S3:
-                private|public-read|public-read-write|authenticated-read
-        no_overwrite - No files will be overwritten on S3, if the file/key
-                       exists on s3 it will be kept. This is useful for
-                       resuming interrupted transfers. Note this is not a
-                       sync, even if the file has been updated locally if
-                       the key exists on s3 the file on s3 will not be
-                       updated.
-        header - key=value pairs of extra header(s) to pass along in the
-                 request
-        region - Manually set a region for buckets that are not in the US
-                 classic region. Normally the region is autodetected, but
-                 setting this yourself is more efficient.
-        host - Hostname override, for using an endpoint other then AWS S3
-""" + usage_string_multipart_capable + """
-
-
-     If the -n option is provided, no files will be transferred to S3 but
-     informational messages will be printed about what would happen.
-"""
-
-
-def usage(status=1):
-    print(usage_string)
-    sys.exit(status)
-
-
-def submit_cb(bytes_so_far, total_bytes):
-    print('%d bytes transferred / %d bytes total' % (bytes_so_far, total_bytes))
-
-
-def get_key_name(fullpath, prefix, key_prefix):
-    if fullpath.startswith(prefix):
-        key_name = fullpath[len(prefix):]
-    else:
-        key_name = fullpath
-    l = key_name.split(os.sep)
-    return key_prefix + '/'.join(l)
-
-
-def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num,
-                 source_path, offset, bytes, debug, cb, num_cb,
-                 amount_of_retries=10):
-    """
-    Uploads a part with retries.
-    """
-    if debug == 1:
-        print("_upload_part(%s, %s, %s)" % (source_path, offset, bytes))
-
-    def _upload(retries_left=amount_of_retries):
-        try:
-            if debug == 1:
-                print('Start uploading part #%d ...' % part_num)
-            conn = S3Connection(aws_key, aws_secret)
-            conn.debug = debug
-            bucket = conn.get_bucket(bucketname)
-            for mp in bucket.get_all_multipart_uploads():
-                if mp.id == multipart_id:
-                    with FileChunkIO(source_path, 'r', offset=offset,
-                                     bytes=bytes) as fp:
-                        mp.upload_part_from_file(fp=fp, part_num=part_num,
-                                                 cb=cb, num_cb=num_cb)
-                    break
-        except Exception as exc:
-            if retries_left:
-                _upload(retries_left=retries_left - 1)
-            else:
-                print('Failed uploading part #%d' % part_num)
-                raise exc
-        else:
-            if debug == 1:
-                print('... Uploaded part #%d' % part_num)
-
-    _upload()
-
-def check_valid_region(conn, region):
-    if conn is None:
-        print('Invalid region (%s)' % region)
-        sys.exit(1)
-
-def multipart_upload(bucketname, aws_key, aws_secret, source_path, keyname,
-                     reduced, debug, cb, num_cb, acl='private', headers={},
-                     guess_mimetype=True, parallel_processes=4,
-                     region=DEFAULT_REGION):
-    """
-    Parallel multipart upload.
-    """
-    conn = boto.s3.connect_to_region(region, aws_access_key_id=aws_key,
-                                     aws_secret_access_key=aws_secret)
-    check_valid_region(conn, region)
-    conn.debug = debug
-    bucket = conn.get_bucket(bucketname)
-
-    if guess_mimetype:
-        mtype = mimetypes.guess_type(keyname)[0] or 'application/octet-stream'
-        headers.update({'Content-Type': mtype})
-
-    mp = bucket.initiate_multipart_upload(keyname, headers=headers,
-                                          reduced_redundancy=reduced)
-
-    source_size = os.stat(source_path).st_size
-    bytes_per_chunk = max(int(math.sqrt(5242880) * math.sqrt(source_size)),
-                          5242880)
-    chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk)))
-
-    pool = Pool(processes=parallel_processes)
-    for i in range(chunk_amount):
-        offset = i * bytes_per_chunk
-        remaining_bytes = source_size - offset
-        bytes = min([bytes_per_chunk, remaining_bytes])
-        part_num = i + 1
-        pool.apply_async(_upload_part, [bucketname, aws_key, aws_secret, mp.id,
-                                        part_num, source_path, offset, bytes,
-                                        debug, cb, num_cb])
-    pool.close()
-    pool.join()
-
-    if len(mp.get_all_parts()) == chunk_amount:
-        mp.complete_upload()
-        key = bucket.get_key(keyname)
-        key.set_acl(acl)
-    else:
-        mp.cancel_upload()
-
-
-def singlepart_upload(bucket, key_name, fullpath, *kargs, **kwargs):
-    """
-    Single upload.
-    """
-    k = bucket.new_key(key_name)
-    k.set_contents_from_filename(fullpath, *kargs, **kwargs)
-
-
-def expand_path(path):
-    path = os.path.expanduser(path)
-    path = os.path.expandvars(path)
-    return os.path.abspath(path)
-
-
-def main():
-
-    # default values
-    aws_access_key_id = None
-    aws_secret_access_key = None
-    bucket_name = ''
-    ignore_dirs = []
-    debug = 0
-    cb = None
-    num_cb = 0
-    quiet = False
-    no_op = False
-    prefix = '/'
-    key_prefix = ''
-    grant = None
-    no_overwrite = False
-    reduced = False
-    headers = {}
-    host = None
-    multipart_requested = False
-    region = None
-
-    try:
-        opts, args = getopt.getopt(
-            sys.argv[1:], 'a:b:c::d:g:hi:k:np:qs:wr',
-            ['access_key=', 'bucket=', 'callback=', 'debug=', 'help', 'grant=',
-             'ignore=', 'key_prefix=', 'no_op', 'prefix=', 'quiet',
-             'secret_key=', 'no_overwrite', 'reduced', 'header=', 'multipart',
-             'host=', 'region='])
-    except:
-        usage(1)
-
-    # parse opts
-    for o, a in opts:
-        if o in ('-h', '--help'):
-            usage(0)
-        if o in ('-a', '--access_key'):
-            aws_access_key_id = a
-        if o in ('-b', '--bucket'):
-            bucket_name = a
-        if o in ('-c', '--callback'):
-            num_cb = int(a)
-            cb = submit_cb
-        if o in ('-d', '--debug'):
-            debug = int(a)
-        if o in ('-g', '--grant'):
-            grant = a
-        if o in ('-i', '--ignore'):
-            ignore_dirs = a.split(',')
-        if o in ('-n', '--no_op'):
-            no_op = True
-        if o in ('-w', '--no_overwrite'):
-            no_overwrite = True
-        if o in ('-p', '--prefix'):
-            prefix = a
-            if prefix[-1] != os.sep:
-                prefix = prefix + os.sep
-            prefix = expand_path(prefix)
-        if o in ('-k', '--key_prefix'):
-            key_prefix = a
-        if o in ('-q', '--quiet'):
-            quiet = True
-        if o in ('-s', '--secret_key'):
-            aws_secret_access_key = a
-        if o in ('-r', '--reduced'):
-            reduced = True
-        if o == '--header':
-            (k, v) = a.split("=", 1)
-            headers[k] = v
-        if o == '--host':
-            host = a
-        if o == '--multipart':
-            if multipart_capable:
-                multipart_requested = True
-            else:
-                print("multipart upload requested but not capable")
-                sys.exit(4)
-        if o == '--region':
-            regions = boto.s3.regions()
-            for region_info in regions:
-                if region_info.name == a:
-                    region = a
-                    break
-            else:
-                raise ValueError('Invalid region %s specified' % a)
-
-    if len(args) < 1:
-        usage(2)
-
-    if not bucket_name:
-        print("bucket name is required!")
-        usage(3)
-
-    connect_args = {
-        'aws_access_key_id': aws_access_key_id,
-        'aws_secret_access_key': aws_secret_access_key
-    }
-
-    if host:
-        connect_args['host'] = host
-
-    c = boto.s3.connect_to_region(region or DEFAULT_REGION, **connect_args)
-    check_valid_region(c, region or DEFAULT_REGION)
-    c.debug = debug
-    b = c.get_bucket(bucket_name, validate=False)
-
-    # Attempt to determine location and warn if no --host or --region
-    # arguments were passed. Then try to automagically figure out
-    # what should have been passed and fix it.
-    if host is None and region is None:
-        try:
-            location = b.get_location()
-
-            # Classic region will be '', any other will have a name
-            if location:
-                print('Bucket exists in %s but no host or region given!' % location)
-
-                # Override for EU, which is really Ireland according to the docs
-                if location == 'EU':
-                    location = 'eu-west-1'
-
-                print('Automatically setting region to %s' % location)
-
-                # Here we create a new connection, and then take the existing
-                # bucket and set it to use the new connection
-                c = boto.s3.connect_to_region(location, **connect_args)
-                c.debug = debug
-                b.connection = c
-        except Exception as e:
-            if debug > 0:
-                print(e)
-            print('Could not get bucket region info, skipping...')
-
-    existing_keys_to_check_against = []
-    files_to_check_for_upload = []
-
-    for path in args:
-        path = expand_path(path)
-        # upload a directory of files recursively
-        if os.path.isdir(path):
-            if no_overwrite:
-                if not quiet:
-                    print('Getting list of existing keys to check against')
-                for key in b.list(get_key_name(path, prefix, key_prefix)):
-                    existing_keys_to_check_against.append(key.name)
-            for root, dirs, files in os.walk(path):
-                for ignore in ignore_dirs:
-                    if ignore in dirs:
-                        dirs.remove(ignore)
-                for path in files:
-                    if path.startswith("."):
-                        continue
-                    files_to_check_for_upload.append(os.path.join(root, path))
-
-        # upload a single file
-        elif os.path.isfile(path):
-            fullpath = os.path.abspath(path)
-            key_name = get_key_name(fullpath, prefix, key_prefix)
-            files_to_check_for_upload.append(fullpath)
-            existing_keys_to_check_against.append(key_name)
-
-        # we are trying to upload something unknown
-        else:
-            print("I don't know what %s is, so i can't upload it" % path)
-
-    for fullpath in files_to_check_for_upload:
-        key_name = get_key_name(fullpath, prefix, key_prefix)
-
-        if no_overwrite and key_name in existing_keys_to_check_against:
-            if b.get_key(key_name):
-                if not quiet:
-                    print('Skipping %s as it exists in s3' % fullpath)
-                continue
-
-        if not quiet:
-            print('Copying %s to %s/%s' % (fullpath, bucket_name, key_name))
-
-        if not no_op:
-            # 0-byte files don't work and also don't need multipart upload
-            if os.stat(fullpath).st_size != 0 and multipart_capable and \
-                    multipart_requested:
-                multipart_upload(bucket_name, aws_access_key_id,
-                                 aws_secret_access_key, fullpath, key_name,
-                                 reduced, debug, cb, num_cb,
-                                 grant or 'private', headers,
-                                 region=region or DEFAULT_REGION)
-            else:
-                singlepart_upload(b, key_name, fullpath, cb=cb, num_cb=num_cb,
-                                  policy=grant, reduced_redundancy=reduced,
-                                  headers=headers)
-
-if __name__ == "__main__":
-    main()

+ 0 - 66
Tools/build/JenkinsScripts/distribution/update_version_strings.py

@@ -1,66 +0,0 @@
-"""
-
- All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
- its licensors.
-
- For complete copyright and license terms please see the LICENSE at the root of this
- distribution (the "License"). All use of this software is governed by the License,
- or, if provided, by the license below or the license accompanying this file. Do not
- remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-"""
-
-import argparse
-import datetime
-import os
-import re
-import shutil
-import stat
-import sys
-
-# Files requiring embedded version string
-waf_default_settings = "./dev/_WAF_/default_settings.json"
-
-def update_version_strings(args):
-        current_version = fetch_current_version(args)
-        print 'Storing version: ' + current_version +" to " + waf_default_settings
-
-        # preserve the developer's work and make it re-runnable
-        suffix = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
-        tempfn = waf_default_settings + '-' + suffix
-
-        shutil.move(waf_default_settings,tempfn) #preserve file attribs with move not copy
-        shutil.copyfile( tempfn, waf_default_settings) # make a writable copy for ourselves
-        print 'Original json settings saved to ' + tempfn
-
-        with open( waf_default_settings, "r+" ) as file:
-            fc = file.read() # currently default_settings.json is small enough to slurp
-            fc = re.sub( r'(\"Build\s+Options"\s*:\s*\[[^\]]+\"default_value.+\")((\d+\.){2,4}\d+)\"', r'\g<1>' + current_version + '"', fc, flags=re.IGNORECASE|re.MULTILINE )
-            file.seek( 0 ) # empty the file at byte 0
-            file.truncate()
-            file.write( fc )
-        return
-
-def fetch_current_version(args):
-    changelist_number = int(args.changelist_number)
-    # Below is how Lumberyard fits a changelist > 64K into a windows compatible version string that maxes out at 64K
-    upper_word = (changelist_number >> 16) & 0xFFFF
-    lower_word = changelist_number & 0xFFFF
-    return (args.major + '.' + args.minor + '.' + str(upper_word) + '.' + str(lower_word))
-
-
-def main():
-    parser = argparse.ArgumentParser()
-    parser.add_argument('changelist_number', help='changelist number to embed into dlls')
-    parser.add_argument('major', help='major version number')
-    parser.add_argument('minor', help='minor version number')
-    args = parser.parse_args()
-    # Inability to set the version string is a warning not an error. Do not stop the build.
-    try:
-        update_version_strings(args)
-    except:
-        print "FATAL ERROR: Unable to set version string in " + waf_user_settings + ": check for file not found, not writable, or version= option not found"
-        sys.exit(1)
-
-if __name__ == '__main__':
-    main()

Vissa filer visades inte eftersom för många filer har ändrats