author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
258,388 | 17.06.2020 11:21:59 | 25,200 | 61c487950a3ed84d54e8e94d2911e6392181e057 | Request new AFL++ experiment | [
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "# are still testing this feature. You should request an experiment by contacting\n# us as you normally do.\n+- experiment: 2020-06-17\n+ fuzzers:\n+ - aflplusplus\n+ - aflplusplus_optimal\n+ - aflplusplus_optimal_shmem\n+ - aflplusplus_qemu\n+ - aflplusplus_shmem\n+\n- experiment: 2020-06-12\nfuzzers:\n- aflcc\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Request new AFL++ experiment (#454) |
258,399 | 17.06.2020 17:29:04 | 18,000 | 560f472e604954b2d1eda0d49c95d796cd77260f | local experiment support
Related to and | [
{
"change_type": "MODIFY",
"old_path": "experiment/conftest.py",
"new_path": "experiment/conftest.py",
"diff": "@@ -27,3 +27,14 @@ def experiment_config():\nwith open(config_filepath) as file_handle:\nreturn yaml.load(file_handle, yaml.SafeLoader)\n+\n+\[email protected]\n+def local_experiment_config():\n+ \"\"\"Fixture that returns the loaded yaml configuration\n+ test_data/local_experiment-config.yaml.\"\"\"\n+ config_filepath = os.path.join(os.path.dirname(__file__), 'test_data',\n+ 'local-experiment-config.yaml')\n+\n+ with open(config_filepath) as file_handle:\n+ return yaml.load(file_handle, yaml.SafeLoader)\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/measurer.py",
"new_path": "experiment/measurer.py",
"diff": "@@ -572,14 +572,14 @@ def measure_snapshot_coverage(fuzzer: str, benchmark: str, trial_num: int,\ndef set_up_coverage_binaries(pool, experiment):\n\"\"\"Set up coverage binaries for all benchmarks in |experiment|.\"\"\"\n- benchmarks = [\n+ # Use set comprehension to select distinct benchmarks.\n+ benchmarks = {\ntrial.benchmark for trial in db_utils.query(models.Trial).distinct(\nmodels.Trial.benchmark).filter(\nmodels.Trial.experiment == experiment)\n- ]\n+ }\ncoverage_binaries_dir = build_utils.get_coverage_binaries_dir()\n- if not os.path.exists(coverage_binaries_dir):\n- os.makedirs(coverage_binaries_dir)\n+ filesystem.create_directory(coverage_binaries_dir)\npool.map(set_up_coverage_binary, benchmarks)\n@@ -588,8 +588,7 @@ def set_up_coverage_binary(benchmark):\ninitialize_logs()\ncoverage_binaries_dir = build_utils.get_coverage_binaries_dir()\nbenchmark_coverage_binary_dir = coverage_binaries_dir / benchmark\n- if not os.path.exists(benchmark_coverage_binary_dir):\n- os.mkdir(benchmark_coverage_binary_dir)\n+ filesystem.create_directory(benchmark_coverage_binary_dir)\narchive_name = 'coverage-build-%s.tar.gz' % benchmark\narchive_filestore_path = exp_path.filestore(coverage_binaries_dir /\narchive_name)\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/resources/runner-startup-script-template.sh",
"new_path": "experiment/resources/runner-startup-script-template.sh",
"diff": "@@ -28,7 +28,7 @@ do\necho 'Error pulling image, retrying...'\ndone{% endif %}\n-docker run {% if local_experiment %}-v {{host_gcloud_config}}:/root/.config/gcloud {% endif %}\\\n+docker run \\\n--privileged --cpus=1 --rm \\\n-e INSTANCE_NAME={{instance_name}} \\\n-e FUZZER={{fuzzer}} \\\n@@ -36,11 +36,11 @@ docker run {% if local_experiment %}-v {{host_gcloud_config}}:/root/.config/gclo\n-e EXPERIMENT={{experiment}} \\\n-e TRIAL_ID={{trial_id}} \\\n-e MAX_TOTAL_TIME={{max_total_time}} \\\n--e CLOUD_PROJECT={{cloud_project}} \\\n--e CLOUD_COMPUTE_ZONE={{cloud_compute_zone}} \\\n--e EXPERIMENT_FILESTORE={{experiment_filestore}} \\\n--e REPORT_FILESTORE={{report_filestore}} \\\n+-e CLOUD_PROJECT={{cloud_project}} {% if not local_experiment %}-e CLOUD_COMPUTE_ZONE={{cloud_compute_zone}} {% endif %}\\\n+-e EXPERIMENT_FILESTORE={{experiment_filestore}} {% if local_experiment %}-v {{experiment_filestore}}:{{experiment_filestore}} {% endif %}\\\n+-e REPORT_FILESTORE={{report_filestore}} {% if local_experiment %}-v {{report_filestore}}:{{report_filestore}} {% endif %}\\\n-e FUZZ_TARGET={{fuzz_target}} \\\n+-e LOCAL_EXPERIMENT={{local_experiment}} \\\n{{additional_env}} {% if not local_experiment %}--name=runner-container {% endif %}\\\n--cap-add SYS_NICE --cap-add SYS_PTRACE \\\n{{docker_image_url}} 2>&1 | tee /tmp/runner-log.txt\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/run_experiment.py",
"new_path": "experiment/run_experiment.py",
"diff": "@@ -350,9 +350,16 @@ class LocalDispatcher:\nexperiment=self.config['experiment'])\nset_cloud_project_arg = 'CLOUD_PROJECT={cloud_project}'.format(\ncloud_project=self.config['cloud_project'])\n+ shared_experiment_filestore_arg = '{0}:{0}'.format(\n+ self.config['experiment_filestore'])\nset_experiment_filestore_arg = (\n'EXPERIMENT_FILESTORE={experiment_filestore}'.format(\nexperiment_filestore=self.config['experiment_filestore']))\n+ shared_report_filestore_arg = '{0}:{0}'.format(\n+ self.config['report_filestore'])\n+ set_report_filestore_arg = (\n+ 'REPORT_FILESTORE={report_filestore}'.format(\n+ report_filestore=self.config['report_filestore']))\ndocker_image_url = '{base_docker_tag}/dispatcher-image'.format(\nbase_docker_tag=base_docker_tag)\ncommand = [\n@@ -364,6 +371,10 @@ class LocalDispatcher:\n'/var/run/docker.sock:/var/run/docker.sock',\n'-v',\nshared_volume_volume_arg,\n+ '-v',\n+ shared_experiment_filestore_arg,\n+ '-v',\n+ shared_report_filestore_arg,\n'-e',\nshared_volume_env_arg,\n'-e',\n@@ -377,6 +388,8 @@ class LocalDispatcher:\n'-e',\nset_experiment_filestore_arg,\n'-e',\n+ set_report_filestore_arg,\n+ '-e',\n'LOCAL_EXPERIMENT=True',\n'--cap-add=SYS_PTRACE',\n'--cap-add=SYS_NICE',\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/scheduler.py",
"new_path": "experiment/scheduler.py",
"diff": "@@ -130,7 +130,8 @@ def end_expired_trials(experiment_config: dict):\nif not expired_instances:\nreturn\n- if not delete_instances(expired_instances, experiment_config):\n+ if not experiment_utils.is_local_experiment() and not delete_instances(\n+ expired_instances, experiment_config):\n# If we failed to delete some instances, then don't update the status\n# of expired trials in database as we don't know which instances were\n# successfully deleted. Wait for next iteration of end_expired_trials.\n@@ -585,21 +586,26 @@ def schedule_loop(experiment_config: dict):\n# Create the thread pool once and reuse it to avoid leaking threads and\n# other issues.\nlogger.info('Starting scheduler.')\n- gce.initialize()\nnum_trials = len(\nget_experiment_trials(experiment_config['experiment']).all())\n- trial_instance_manager = TrialInstanceManager(num_trials, experiment_config)\n+ local_experiment = experiment_utils.is_local_experiment()\n+ if not local_experiment:\n+ gce.initialize()\n+ trial_instance_manager = TrialInstanceManager(num_trials,\n+ experiment_config)\nexperiment = experiment_config['experiment']\nwith multiprocessing.Pool() as pool:\nhandle_preempted = False\nwhile not all_trials_ended(experiment):\ntry:\n- if not handle_preempted and not any_pending_trials(experiment):\n- # Only start handling preempted instances once every initial\n- # trial was started. This ensures that .\n+ schedule(experiment_config, pool)\n+ if not local_experiment:\n+ if not handle_preempted and not any_pending_trials(\n+ experiment):\n+ # Only start handling preempted instances once every\n+ # initial trial was started. This ensures that.\nhandle_preempted = True\n- schedule(experiment_config, pool)\nif handle_preempted:\ntrial_instance_manager.handle_preempted_trials()\nexcept Exception: # pylint: disable=broad-except\n@@ -732,7 +738,6 @@ def render_startup_script_template(instance_name: str, fuzzer: str,\n'trial_id': trial_id,\n'max_total_time': experiment_config['max_total_time'],\n'cloud_project': experiment_config['cloud_project'],\n- 'cloud_compute_zone': experiment_config['cloud_compute_zone'],\n'experiment_filestore': experiment_config['experiment_filestore'],\n'report_filestore': experiment_config['report_filestore'],\n'fuzz_target': fuzz_target,\n@@ -740,8 +745,9 @@ def render_startup_script_template(instance_name: str, fuzzer: str,\n'additional_env': additional_env,\n'local_experiment': local_experiment\n}\n- if local_experiment:\n- kwargs['host_gcloud_config'] = os.environ['HOST_GCLOUD_CONFIG']\n+\n+ if not local_experiment:\n+ kwargs['cloud_compute_zone'] = experiment_config['cloud_compute_zone']\nreturn template.render(**kwargs)\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/stop_experiment.py",
"new_path": "experiment/stop_experiment.py",
"diff": "@@ -26,9 +26,14 @@ logger = logs.Logger('stop_experiment') # pylint: disable=invalid-name\ndef stop_experiment(experiment_name, experiment_config_filename):\n\"\"\"Stop the experiment specified by |experiment_config_filename|.\"\"\"\n+ experiment_config = yaml_utils.read(experiment_config_filename)\n+\n+ if experiment_config.get('local_experiment', False):\n+ raise NotImplementedError(\n+ 'Local experiment stop logic is not implemented.')\n+\ninstances = gcloud.list_instances()\n- experiment_config = yaml_utils.read(experiment_config_filename)\ncloud_compute_zone = experiment_config['cloud_compute_zone']\ntrial_prefix = 'r-' + experiment_name\nexperiment_instances = [\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "experiment/test_data/local-experiment-config.yaml",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+#\n+# TODO: remove `cloud_project` by `docker_registry`.\n+\n+experiment: test-experiment\n+trials: 4\n+max_total_time: 86400\n+cloud_project: fuzzbench\n+experiment_filestore: /tmp/experiment-data\n+report_filestore: /tmp/web-reports\n+local_experiment: true\n+benchmarks: \"benchmark-1,benchmark-2\"\n+git_hash: \"git-hash\"\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_scheduler.py",
"new_path": "experiment/test_scheduler.py",
"diff": "@@ -106,11 +106,11 @@ docker run \\\\\n-e EXPERIMENT=test-experiment \\\\\n-e TRIAL_ID=9 \\\\\n-e MAX_TOTAL_TIME=86400 \\\\\n--e CLOUD_PROJECT=fuzzbench \\\\\n--e CLOUD_COMPUTE_ZONE=us-central1-a \\\\\n+-e CLOUD_PROJECT=fuzzbench -e CLOUD_COMPUTE_ZONE=us-central1-a \\\\\n-e EXPERIMENT_FILESTORE=gs://experiment-data \\\\\n-e REPORT_FILESTORE=gs://web-reports \\\\\n-e FUZZ_TARGET={oss_fuzz_target} \\\\\n+-e LOCAL_EXPERIMENT=False \\\\\n-e C1=custom -e C2=custom2 --name=runner-container \\\\\n--cap-add SYS_NICE --cap-add SYS_PTRACE \\\\\n{docker_image_url} 2>&1 | tee /tmp/runner-log.txt'''\n@@ -127,16 +127,16 @@ docker run \\\\\n'gcr.io/fuzzbench/runners/fuzzer-a/bloaty_fuzz_target', 'fuzz_target')])\ndef test_create_trial_instance_local_experiment(benchmark, expected_image,\nexpected_target,\n- experiment_config, environ):\n+ local_experiment_config,\n+ environ):\n\"\"\"Test that create_trial_instance invokes create_instance and creates a\nstartup script for the instance, as we expect it to when running a\nlocal_experiment.\"\"\"\nos.environ['LOCAL_EXPERIMENT'] = str(True)\n- os.environ['HOST_GCLOUD_CONFIG'] = '~/.config/gcloud'\nexpected_startup_script = '''## Start docker.\n-docker run -v ~/.config/gcloud:/root/.config/gcloud \\\\\n+docker run \\\\\n--privileged --cpus=1 --rm \\\\\n-e INSTANCE_NAME=r-test-experiment-9 \\\\\n-e FUZZER=variant \\\\\n@@ -145,16 +145,16 @@ docker run -v ~/.config/gcloud:/root/.config/gcloud \\\\\n-e TRIAL_ID=9 \\\\\n-e MAX_TOTAL_TIME=86400 \\\\\n-e CLOUD_PROJECT=fuzzbench \\\\\n--e CLOUD_COMPUTE_ZONE=us-central1-a \\\\\n--e EXPERIMENT_FILESTORE=gs://experiment-data \\\\\n--e REPORT_FILESTORE=gs://web-reports \\\\\n+-e EXPERIMENT_FILESTORE=/tmp/experiment-data -v /tmp/experiment-data:/tmp/experiment-data \\\\\n+-e REPORT_FILESTORE=/tmp/web-reports -v /tmp/web-reports:/tmp/web-reports \\\\\n-e FUZZ_TARGET={oss_fuzz_target} \\\\\n+-e LOCAL_EXPERIMENT=True \\\\\n-e C1=custom -e C2=custom2 \\\\\n--cap-add SYS_NICE --cap-add SYS_PTRACE \\\\\n{docker_image_url} 2>&1 | tee /tmp/runner-log.txt'''\n_test_create_trial_instance(benchmark, expected_image, expected_target,\n- expected_startup_script, experiment_config,\n- False)\n+ expected_startup_script,\n+ local_experiment_config, False)\[email protected]('common.gcloud.create_instance')\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | local experiment support (#444)
Related to #251 and #8. |
258,388 | 17.06.2020 19:27:34 | 25,200 | 97efaa5bf342a06ddcf84eb0baa7809b1137e49c | Request another AFL++ experiment | [
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "# are still testing this feature. You should request an experiment by contacting\n# us as you normally do.\n+- experiment: 2020-06-18\n+ fuzzers:\n+ - aflplusplus\n+ - aflplusplus_optimal\n+ - aflplusplus_optimal_shmem\n+ - aflplusplus_qemu\n+ - aflplusplus_shmem\n+\n- experiment: 2020-06-17\nfuzzers:\n- aflplusplus\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Request another AFL++ experiment (#460) |
258,399 | 18.06.2020 12:24:32 | 18,000 | c40344d85cb128a639eeacfc5b6e884d1f3e4a3a | [NFC] `handle_preemptible` never True for local running
Related to | [
{
"change_type": "MODIFY",
"old_path": "experiment/scheduler.py",
"new_path": "experiment/scheduler.py",
"diff": "@@ -598,14 +598,16 @@ def schedule_loop(experiment_config: dict):\nhandle_preempted = False\nwhile not all_trials_ended(experiment):\ntry:\n- schedule(experiment_config, pool)\n- if not local_experiment:\n- if not handle_preempted and not any_pending_trials(\n- experiment):\n- # Only start handling preempted instances once every\n- # initial trial was started. This ensures that.\n+ if (not local_experiment and not handle_preempted and\n+ not any_pending_trials(experiment)):\n+ # This ensures that:\n+ # 1. handle_preempted will not becomes True when running\n+ # locally.\n+ # 2. Only start handling preempted instances once every\n+ # initial trial was started.\nhandle_preempted = True\n+ schedule(experiment_config, pool)\nif handle_preempted:\ntrial_instance_manager.handle_preempted_trials()\nexcept Exception: # pylint: disable=broad-except\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [NFC] `handle_preemptible` never True for local running (#464)
Related to #444 |
258,399 | 19.06.2020 16:54:31 | 18,000 | c3ae4db58f60c182efe9e97204247575b0b44661 | Add `expect_zero` back for filestore_utils.cp
Fix | [
{
"change_type": "MODIFY",
"old_path": "common/filestore_utils.py",
"new_path": "common/filestore_utils.py",
"diff": "@@ -37,12 +37,15 @@ def get_impl():\nreturn local_filestore\n-def cp(source, destination, recursive=False, parallel=False): # pylint: disable=invalid-name\n- \"\"\"Copies |source| to |destination|.\"\"\"\n+def cp(source, destination, recursive=False, parallel=False, expect_zero=True): # pylint: disable=invalid-name\n+ \"\"\"Copies |source| to |destination|. If |expect_zero| is True then it can\n+ raise subprocess.CalledProcessError. |parallel| is only effective for\n+ gsutil.\"\"\"\nreturn get_impl().cp(source,\ndestination,\nrecursive=recursive,\n- parallel=parallel)\n+ parallel=parallel,\n+ expect_zero=expect_zero)\ndef ls(path, must_exist=True): # pylint: disable=invalid-name\n@@ -67,7 +70,8 @@ def rsync( # pylint: disable=too-many-arguments\ngsutil_options=None,\noptions=None,\nparallel=False):\n- \"\"\"Syncs |source| and |destination| folders.\"\"\"\n+ \"\"\"Syncs |source| and |destination| folders. |gsutil_options| and |parallel|\n+ are only used in gsutil.\"\"\"\nreturn get_impl().rsync(source,\ndestination,\ndelete,\n"
},
{
"change_type": "MODIFY",
"old_path": "common/gsutil.py",
"new_path": "common/gsutil.py",
"diff": "@@ -28,25 +28,28 @@ def gsutil_command(arguments, expect_zero=True, parallel=False):\nreturn new_process.execute(command + arguments, expect_zero=expect_zero)\n-def cp(source, destination, recursive=False, parallel=False): # pylint: disable=invalid-name\n- \"\"\"Executes gsutil's \"cp\" command with |cp_arguments|.\"\"\"\n+def cp(source, destination, recursive=False, parallel=False, expect_zero=True): # pylint: disable=invalid-name\n+ \"\"\"Executes gsutil's \"cp\" command to copy |source| to |destination|. Uses -r\n+ if |recursive|. If |expect_zero| is True and the command fails then this\n+ function will raise a subprocess.CalledError.\"\"\"\ncommand = ['cp']\nif recursive:\ncommand.append('-r')\ncommand.extend([source, destination])\n- return gsutil_command(command, parallel=parallel)\n+ return gsutil_command(command, parallel=parallel, expect_zero=expect_zero)\ndef ls(path, must_exist=True): # pylint: disable=invalid-name\n- \"\"\"Executes gsutil's \"ls\" command on |path|.\"\"\"\n+ \"\"\"Executes gsutil's \"ls\" command on |path|. If |must_exist| is True and the\n+ command fails then this function will raise a subprocess.CalledError.\"\"\"\ncommand = ['ls', path]\nprocess_result = gsutil_command(command, expect_zero=must_exist)\nreturn process_result\ndef rm(path, recursive=True, force=False, parallel=False): # pylint: disable=invalid-name\n- \"\"\"Executes gsutil's rm command with |rm_arguments| and returns the result.\n+ \"\"\"Executes gsutil's rm command on |path| and returns the result.\nUses -r if |recursive|. If |force|, then uses -f and will not except if\nreturn code is nonzero.\"\"\"\ncommand = ['rm', path]\n"
},
{
"change_type": "MODIFY",
"old_path": "common/local_filestore.py",
"new_path": "common/local_filestore.py",
"diff": "@@ -23,7 +23,8 @@ def cp( # pylint: disable=invalid-name\nsource,\ndestination,\nrecursive=False,\n- parallel=False): # pylint: disable=unused-argument\n+ parallel=False, # pylint: disable=unused-argument\n+ expect_zero=True):\n\"\"\"Executes \"cp\" command from |source| to |destination|.\"\"\"\n# Create intermediate folders for `cp` command to behave like `gsutil.cp`.\nfilesystem.create_directory(os.path.dirname(destination))\n@@ -32,7 +33,7 @@ def cp( # pylint: disable=invalid-name\nif recursive:\ncommand.append('-r')\ncommand.extend([source, destination])\n- return new_process.execute(command, expect_zero=True)\n+ return new_process.execute(command, expect_zero=expect_zero)\ndef ls(path, must_exist=True): # pylint: disable=invalid-name\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/measurer.py",
"new_path": "experiment/measurer.py",
"diff": "@@ -21,7 +21,6 @@ import os\nimport pathlib\nimport posixpath\nimport sys\n-import subprocess\nimport tarfile\nimport time\nfrom typing import List, Set\n@@ -400,12 +399,10 @@ class SnapshotMeasurer: # pylint: disable=too-many-instance-attributes\ndef copy_unchanged_cycles_file():\nunchanged_cycles_filestore_path = exp_path.filestore(\nself.unchanged_cycles_path)\n- try:\n- filestore_utils.cp(unchanged_cycles_filestore_path,\n- self.unchanged_cycles_path)\n- return True\n- except subprocess.CalledProcessError:\n- return False\n+ result = filestore_utils.cp(unchanged_cycles_filestore_path,\n+ self.unchanged_cycles_path,\n+ expect_zero=False)\n+ return result.retcode == 0\nif not os.path.exists(self.unchanged_cycles_path):\nif not copy_unchanged_cycles_file():\n@@ -540,9 +537,9 @@ def measure_snapshot_coverage(fuzzer: str, benchmark: str, trial_num: int,\nif not os.path.exists(corpus_archive_dir):\nos.makedirs(corpus_archive_dir)\n- try:\n- filestore_utils.cp(corpus_archive_src, corpus_archive_dst)\n- except subprocess.CalledProcessError:\n+ if filestore_utils.cp(corpus_archive_src,\n+ corpus_archive_dst,\n+ expect_zero=False).retcode:\nsnapshot_logger.warning('Corpus not found for cycle: %d.', cycle)\nreturn None\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_measurer.py",
"new_path": "experiment/test_measurer.py",
"diff": "import os\nimport shutil\n-import subprocess\nfrom unittest import mock\nimport queue\n@@ -200,7 +199,7 @@ def test_is_cycle_unchanged_no_file(mocked_cp, fs, experiment):\n# Make sure we log if there is no unchanged-cycles file.\nsnapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,\nSNAPSHOT_LOGGER)\n- mocked_cp.side_effect = subprocess.CalledProcessError(1, ['fakecommand'])\n+ mocked_cp.return_value = new_process.ProcessResult(1, '', False)\nassert not snapshot_measurer.is_cycle_unchanged(0)\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Add `expect_zero` back for filestore_utils.cp (#466)
Fix #449. |
258,399 | 19.06.2020 17:18:02 | 18,000 | 7805093436f36ddaef155da8b045da2b57ec02b2 | remove shared_volume
Fix `coverage_binaries` will store under `experiment_filestore/experiment_name/`. | [
{
"change_type": "MODIFY",
"old_path": "experiment/build/local_build.py",
"new_path": "experiment/build/local_build.py",
"diff": "\"\"\"Module for building things on Google Cloud Build for use in trials.\"\"\"\nimport os\n-import posixpath\nfrom typing import Tuple\nfrom common import benchmark_utils\nfrom common import environment\n-from common import experiment_path as exp_path\n-from common import filestore_utils\n+from common import experiment_utils\nfrom common import logs\nfrom common import new_process\nfrom common import utils\n-from experiment.build import build_utils\nlogger = logs.Logger('builder') # pylint: disable=invalid-name\n@@ -43,8 +40,8 @@ def build_base_images() -> Tuple[int, str]:\ndef get_shared_coverage_binaries_dir():\n\"\"\"Returns the shared coverage binaries directory.\"\"\"\n- shared_volume = os.environ['SHARED_VOLUME']\n- return os.path.join(shared_volume, 'coverage-binaries')\n+ experiment_filestore_path = experiment_utils.get_experiment_filestore_path()\n+ return os.path.join(experiment_filestore_path, 'coverage-binaries')\ndef make_shared_coverage_binaries_dir():\n@@ -77,16 +74,10 @@ def copy_coverage_binaries(benchmark):\nshared_coverage_binaries_dir, coverage_build_archive)\ncommand = 'cd /out; tar -czvf {} *'.format(\ncoverage_build_archive_shared_dir_path)\n- new_process.execute([\n+ return new_process.execute([\n'docker', 'run', '-v', mount_arg, builder_image_url, '/bin/bash', '-c',\ncommand\n])\n- coverage_binaries_dir = build_utils.get_coverage_binaries_dir()\n- coverage_build_archive_gcs_path = posixpath.join(\n- exp_path.filestore(coverage_binaries_dir), coverage_build_archive)\n-\n- return filestore_utils.cp(coverage_build_archive_shared_dir_path,\n- coverage_build_archive_gcs_path)\ndef build_fuzzer_benchmark(fuzzer: str, benchmark: str) -> bool:\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/run_experiment.py",
"new_path": "experiment/run_experiment.py",
"diff": "@@ -334,13 +334,11 @@ class LocalDispatcher:\ndef start(self):\n\"\"\"Start the experiment on the dispatcher.\"\"\"\n- shared_volume_dir = os.path.abspath('shared-volume')\n- if not os.path.exists(shared_volume_dir):\n- os.mkdir(shared_volume_dir)\n- shared_volume_volume_arg = '{0}:{0}'.format(shared_volume_dir)\n- shared_volume_env_arg = 'SHARED_VOLUME={}'.format(shared_volume_dir)\n+ experiment_filestore_path = os.path.abspath(\n+ self.config['experiment_filestore'])\n+ filesystem.create_directory(experiment_filestore_path)\nsql_database_arg = 'SQL_DATABASE_URL=sqlite:///{}'.format(\n- os.path.join(shared_volume_dir, 'local.db'))\n+ os.path.join(experiment_filestore_path, 'local.db'))\nbase_docker_tag = experiment_utils.get_base_docker_tag(\nself.config['cloud_project'])\n@@ -370,14 +368,10 @@ class LocalDispatcher:\n'-v',\n'/var/run/docker.sock:/var/run/docker.sock',\n'-v',\n- shared_volume_volume_arg,\n- '-v',\nshared_experiment_filestore_arg,\n'-v',\nshared_report_filestore_arg,\n'-e',\n- shared_volume_env_arg,\n- '-e',\nset_instance_name_arg,\n'-e',\nset_experiment_arg,\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | remove shared_volume (#462)
Fix #457. `coverage_binaries` will store under `experiment_filestore/experiment_name/`. |
258,399 | 22.06.2020 11:17:53 | 18,000 | bb9076c9f1cb80f5ab55287cc42c7ed4a1df8c56 | [NFC] minor comment fix | [
{
"change_type": "MODIFY",
"old_path": "common/filestore_utils.py",
"new_path": "common/filestore_utils.py",
"diff": "@@ -39,8 +39,8 @@ def get_impl():\ndef cp(source, destination, recursive=False, parallel=False, expect_zero=True): # pylint: disable=invalid-name\n\"\"\"Copies |source| to |destination|. If |expect_zero| is True then it can\n- raise subprocess.CalledProcessError. |parallel| is only effective for\n- gsutil.\"\"\"\n+ raise subprocess.CalledProcessError. |parallel| is only used by the gsutil\n+ implementation.\"\"\"\nreturn get_impl().cp(source,\ndestination,\nrecursive=recursive,\n@@ -71,7 +71,7 @@ def rsync( # pylint: disable=too-many-arguments\noptions=None,\nparallel=False):\n\"\"\"Syncs |source| and |destination| folders. |gsutil_options| and |parallel|\n- are only used in gsutil.\"\"\"\n+ are only used by the gsutil implementation.\"\"\"\nreturn get_impl().rsync(source,\ndestination,\ndelete,\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [NFC] #466 minor comment fix (#475) |
258,399 | 22.06.2020 13:41:35 | 18,000 | ca24c304439466eab8331b1ba5bd5aaad76a58e8 | sql query optimization
Remove set construction. | [
{
"change_type": "MODIFY",
"old_path": "experiment/measurer.py",
"new_path": "experiment/measurer.py",
"diff": "@@ -570,11 +570,12 @@ def measure_snapshot_coverage(fuzzer: str, benchmark: str, trial_num: int,\ndef set_up_coverage_binaries(pool, experiment):\n\"\"\"Set up coverage binaries for all benchmarks in |experiment|.\"\"\"\n# Use set comprehension to select distinct benchmarks.\n- benchmarks = {\n- trial.benchmark for trial in db_utils.query(models.Trial).distinct(\n- models.Trial.benchmark).filter(\n- models.Trial.experiment == experiment)\n- }\n+ benchmarks = [\n+ benchmark_tuple[0]\n+ for benchmark_tuple in db_utils.query(models.Trial.benchmark).distinct(\n+ ).filter(models.Trial.experiment == experiment)\n+ ]\n+\ncoverage_binaries_dir = build_utils.get_coverage_binaries_dir()\nfilesystem.create_directory(coverage_binaries_dir)\npool.map(set_up_coverage_binary, benchmarks)\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | sql query optimization (#469)
Remove set construction. |
258,388 | 22.06.2020 13:37:46 | 25,200 | 7d8527cbded8d0280cbed461058a7b89beb935cd | [woff2] Fix seeds
Use seeds from OSS-Fuzz instead of getting them manually
(and incorrectly).
Fixes | [
{
"change_type": "MODIFY",
"old_path": "benchmarks/woff2-2016-05-06/build.sh",
"new_path": "benchmarks/woff2-2016-05-06/build.sh",
"diff": "@@ -22,10 +22,13 @@ apt-get update && \\\nautoconf \\\nlibtool\n+# Get seeds.\n+get_git_revision https://github.com/google/oss-fuzz.git e8ffee4077b59e35824a2e97aa214ee95d39ed13 oss-fuzz\n+mkdir -p $OUT/seeds\n+cp oss-fuzz/projects/woff2/corpus/* $OUT/seeds\n+\nget_git_revision https://github.com/google/woff2.git 9476664fd6931ea6ec532c94b816d8fbbe3aed90 SRC\nget_git_revision https://github.com/google/brotli.git 3a9032ba8733532a6cd6727970bade7f7c0e2f52 BROTLI\n-get_git_revision https://github.com/FontFaceKit/roboto.git 0e41bf923e2599d651084eece345701e55a8bfde $OUT/seeds\n-rm -rf $OUT/seeds/.git # Remove unneeded .git folder.\nrm -f *.o\nfor f in font.cc normalize.cc transform.cc woff2_common.cc woff2_dec.cc woff2_enc.cc glyph.cc table_tags.cc variable_length.cc woff2_out.cc; do\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [woff2] Fix seeds (#477)
Use seeds from OSS-Fuzz instead of getting them manually
(and incorrectly).
Fixes #463 |
258,399 | 24.06.2020 13:24:22 | 18,000 | b16c35cb5e819c8d025a6857e7266b457ae5b889 | remove `cloud_project` requirement for local run; add `docker_registry` as a required option for both settings
Fix related to and | [
{
"change_type": "MODIFY",
"old_path": "common/benchmark_utils.py",
"new_path": "common/benchmark_utils.py",
"diff": "import os\nimport re\n-from common import experiment_utils\nfrom common import fuzzer_utils\nfrom common import logs\nfrom common import oss_fuzz\n@@ -45,21 +44,18 @@ def get_fuzz_target(benchmark):\nreturn fuzzer_utils.DEFAULT_FUZZ_TARGET_NAME\n-def get_runner_image_url(benchmark, fuzzer, cloud_project):\n+def get_runner_image_url(benchmark, fuzzer, docker_registry):\n\"\"\"Get the URL of the docker runner image for fuzzing the benchmark with\nfuzzer.\"\"\"\n- base_tag = experiment_utils.get_base_docker_tag(cloud_project)\n- return '{base_tag}/runners/{fuzzer}/{benchmark}'.format(base_tag=base_tag,\n- fuzzer=fuzzer,\n- benchmark=benchmark)\n+ return '{docker_registry}/runners/{fuzzer}/{benchmark}'.format(\n+ docker_registry=docker_registry, fuzzer=fuzzer, benchmark=benchmark)\n-def get_builder_image_url(benchmark, fuzzer, cloud_project):\n+def get_builder_image_url(benchmark, fuzzer, docker_registry):\n\"\"\"Get the URL of the docker builder image for fuzzing the benchmark with\nfuzzer.\"\"\"\n- base_tag = experiment_utils.get_base_docker_tag(cloud_project)\n- return '{base_tag}/builders/{fuzzer}/{benchmark}'.format(\n- base_tag=base_tag, fuzzer=fuzzer, benchmark=benchmark)\n+ return '{docker_registry}/builders/{fuzzer}/{benchmark}'.format(\n+ docker_registry=docker_registry, fuzzer=fuzzer, benchmark=benchmark)\ndef get_oss_fuzz_builder_hash(benchmark):\n"
},
{
"change_type": "MODIFY",
"old_path": "common/test_benchmark_utils.py",
"new_path": "common/test_benchmark_utils.py",
"diff": "@@ -19,7 +19,7 @@ from common import conftest\n# pylint: disable=invalid-name,unused-argument\n-CLOUD_PROJECT = 'fuzzbench'\n+DOCKER_REGISTRY = 'gcr.io/fuzzbench'\nOTHER_BENCHMARK = 'benchmark'\n@@ -62,7 +62,7 @@ def test_get_fuzz_target(benchmark, expected_fuzz_target, oss_fuzz_benchmark):\ndef test_get_runner_image_url(benchmark, expected_url, oss_fuzz_benchmark):\n\"\"\"Test that we can get the runner image url of a benchmark.\"\"\"\nassert benchmark_utils.get_runner_image_url(benchmark, 'fuzzer',\n- CLOUD_PROJECT) == expected_url\n+ DOCKER_REGISTRY) == expected_url\ndef test_get_builder_hash_oss_fuzz_benchmark(oss_fuzz_benchmark):\n"
},
{
"change_type": "MODIFY",
"old_path": "conftest.py",
"new_path": "conftest.py",
"diff": "@@ -87,6 +87,8 @@ def use_local_filestore(experiment): # pylint: disable=redefined-outer-name,unu\n\"\"\"Mock a local filestore usage experiment.\"\"\"\nos.environ['EXPERIMENT_FILESTORE'] = '/experiment-data'\nos.environ['REPORT_FILESTORE'] = '/experiment-report'\n+ os.environ['LOCAL_EXPERIMENT'] = 'true'\n+ os.environ['DOCKER_REGISTRY'] = 'gcr.io/fuzzbench'\[email protected]\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/running-your-own-experiment/running_an_experiment.md",
"new_path": "docs/running-your-own-experiment/running_an_experiment.md",
"diff": "@@ -56,6 +56,10 @@ trials: 5\n# 1 day = 24 * 60 * 60 = 86400\nmax_total_time: 86400\n+# The docker registry for your fuzzbench experiments.\n+# If you use Google Cloud, this can be gcr.io/$PROJECT_NAME.\n+docker_registry: gcr.io/$PROJECT_NAME\n+\n# The name of your Google Cloud project.\ncloud_project: $PROJECT_NAME\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/build/local_build.py",
"new_path": "experiment/build/local_build.py",
"diff": "@@ -68,7 +68,7 @@ def copy_coverage_binaries(benchmark):\nshared_coverage_binaries_dir = get_shared_coverage_binaries_dir()\nmount_arg = '{0}:{0}'.format(shared_coverage_binaries_dir)\nbuilder_image_url = benchmark_utils.get_builder_image_url(\n- benchmark, 'coverage', environment.get('CLOUD_PROJECT'))\n+ benchmark, 'coverage', environment.get('DOCKER_REGISTRY'))\ncoverage_build_archive = 'coverage-build-{}.tar.gz'.format(benchmark)\ncoverage_build_archive_shared_dir_path = os.path.join(\nshared_coverage_binaries_dir, coverage_build_archive)\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/resources/runner-startup-script-template.sh",
"new_path": "experiment/resources/runner-startup-script-template.sh",
"diff": "@@ -36,7 +36,7 @@ docker run \\\n-e EXPERIMENT={{experiment}} \\\n-e TRIAL_ID={{trial_id}} \\\n-e MAX_TOTAL_TIME={{max_total_time}} \\\n--e CLOUD_PROJECT={{cloud_project}} {% if not local_experiment %}-e CLOUD_COMPUTE_ZONE={{cloud_compute_zone}} {% endif %}\\\n+-e DOCKER_REGISTRY={{docker_registry}} {% if not local_experiment %}-e CLOUD_PROJECT={{cloud_project}} -e CLOUD_COMPUTE_ZONE={{cloud_compute_zone}} {% endif %}\\\n-e EXPERIMENT_FILESTORE={{experiment_filestore}} {% if local_experiment %}-v {{experiment_filestore}}:{{experiment_filestore}} {% endif %}\\\n-e REPORT_FILESTORE={{report_filestore}} {% if local_experiment %}-v {{report_filestore}}:{{report_filestore}} {% endif %}\\\n-e FUZZ_TARGET={{fuzz_target}} \\\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/run_experiment.py",
"new_path": "experiment/run_experiment.py",
"diff": "@@ -66,10 +66,11 @@ def read_and_validate_experiment_config(config_filename: str) -> Dict:\nand returns it.\"\"\"\nconfig = yaml_utils.read(config_filename)\nfilestore_params = {'experiment_filestore', 'report_filestore'}\n- cloud_config = {'cloud_compute_zone'}\n- string_params = cloud_config.union(filestore_params)\n+ cloud_config = {'cloud_compute_zone', 'cloud_project'}\n+ docker_config = {'docker_registry'}\n+ string_params = cloud_config.union(filestore_params).union(docker_config)\nint_params = {'trials', 'max_total_time'}\n- required_params = int_params.union(filestore_params)\n+ required_params = int_params.union(filestore_params).union(docker_config)\nlocal_experiment = config.get('local_experiment', False)\nif not local_experiment:\n@@ -120,6 +121,8 @@ def read_and_validate_experiment_config(config_filename: str) -> Dict:\nif not valid:\nraise ValidationError('Config: %s is invalid.' % config_filename)\n+\n+ config['local_experiment'] = local_experiment\nreturn config\n@@ -340,16 +343,16 @@ class LocalDispatcher:\nsql_database_arg = 'SQL_DATABASE_URL=sqlite:///{}'.format(\nos.path.join(experiment_filestore_path, 'local.db'))\n- base_docker_tag = experiment_utils.get_base_docker_tag(\n- self.config['cloud_project'])\n+ docker_registry = self.config['docker_registry']\nset_instance_name_arg = 'INSTANCE_NAME={instance_name}'.format(\ninstance_name=self.instance_name)\nset_experiment_arg = 'EXPERIMENT={experiment}'.format(\nexperiment=self.config['experiment'])\n- set_cloud_project_arg = 'CLOUD_PROJECT={cloud_project}'.format(\n- cloud_project=self.config['cloud_project'])\nshared_experiment_filestore_arg = '{0}:{0}'.format(\nself.config['experiment_filestore'])\n+ # TODO: (#484) Use config in function args or set as environment\n+ # variables.\n+ set_docker_registry_arg = 'DOCKER_REGISTRY={}'.format(docker_registry)\nset_experiment_filestore_arg = (\n'EXPERIMENT_FILESTORE={experiment_filestore}'.format(\nexperiment_filestore=self.config['experiment_filestore']))\n@@ -358,8 +361,8 @@ class LocalDispatcher:\nset_report_filestore_arg = (\n'REPORT_FILESTORE={report_filestore}'.format(\nreport_filestore=self.config['report_filestore']))\n- docker_image_url = '{base_docker_tag}/dispatcher-image'.format(\n- base_docker_tag=base_docker_tag)\n+ docker_image_url = '{docker_registry}/dispatcher-image'.format(\n+ docker_registry=docker_registry)\ncommand = [\n'docker',\n'run',\n@@ -376,14 +379,14 @@ class LocalDispatcher:\n'-e',\nset_experiment_arg,\n'-e',\n- set_cloud_project_arg,\n- '-e',\nsql_database_arg,\n'-e',\nset_experiment_filestore_arg,\n'-e',\nset_report_filestore_arg,\n'-e',\n+ set_docker_registry_arg,\n+ '-e',\n'LOCAL_EXPERIMENT=True',\n'--cap-add=SYS_PTRACE',\n'--cap-add=SYS_NICE',\n@@ -424,8 +427,7 @@ class GoogleCloudDispatcher(BaseDispatcher):\ngcloud.robust_begin_gcloud_ssh(self.instance_name,\nself.config['cloud_compute_zone'])\n- base_docker_tag = experiment_utils.get_base_docker_tag(\n- self.config['cloud_project'])\n+ docker_registry = self.config['docker_registry']\ncloud_sql_instance_connection_name = (\nself.config['cloud_sql_instance_connection_name'])\n@@ -435,6 +437,7 @@ class GoogleCloudDispatcher(BaseDispatcher):\n'-e INSTANCE_NAME=\"{instance_name}\" '\n'-e EXPERIMENT=\"{experiment}\" '\n'-e CLOUD_PROJECT=\"{cloud_project}\" '\n+ '-e DOCKER_REGISTRY=\"{docker_registry}\" '\n'-e EXPERIMENT_FILESTORE=\"{experiment_filestore}\" '\n'-e POSTGRES_PASSWORD=\"{postgres_password}\" '\n'-e CLOUD_SQL_INSTANCE_CONNECTION_NAME='\n@@ -442,7 +445,7 @@ class GoogleCloudDispatcher(BaseDispatcher):\n'--cap-add=SYS_PTRACE --cap-add=SYS_NICE '\n'-v /var/run/docker.sock:/var/run/docker.sock '\n'--name=dispatcher-container '\n- '{base_docker_tag}/dispatcher-image '\n+ '{docker_registry}/dispatcher-image '\n'/work/startup-dispatcher.sh'\n).format(\ninstance_name=self.instance_name,\n@@ -455,7 +458,7 @@ class GoogleCloudDispatcher(BaseDispatcher):\nexperiment_filestore=self.config['experiment_filestore'],\ncloud_sql_instance_connection_name=(\ncloud_sql_instance_connection_name),\n- base_docker_tag=base_docker_tag,\n+ docker_registry=docker_registry,\n)\nreturn gcloud.ssh(self.instance_name,\ncommand=command,\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/scheduler.py",
"new_path": "experiment/scheduler.py",
"diff": "@@ -718,7 +718,8 @@ def render_startup_script_template(instance_name: str, fuzzer: str,\nprovided and return the result.\"\"\"\nfuzzer_config = fuzzer_config_utils.get_by_variant_name(fuzzer)\ndocker_image_url = benchmark_utils.get_runner_image_url(\n- benchmark, fuzzer_config['fuzzer'], experiment_config['cloud_project'])\n+ benchmark, fuzzer_config['fuzzer'],\n+ experiment_config['docker_registry'])\nfuzz_target = benchmark_utils.get_fuzz_target(benchmark)\n# Convert additional environment variables from configuration to arguments\n@@ -739,17 +740,18 @@ def render_startup_script_template(instance_name: str, fuzzer: str,\n'fuzzer': fuzzer,\n'trial_id': trial_id,\n'max_total_time': experiment_config['max_total_time'],\n- 'cloud_project': experiment_config['cloud_project'],\n'experiment_filestore': experiment_config['experiment_filestore'],\n'report_filestore': experiment_config['report_filestore'],\n'fuzz_target': fuzz_target,\n'docker_image_url': docker_image_url,\n'additional_env': additional_env,\n+ 'docker_registry': experiment_config['docker_registry'],\n'local_experiment': local_experiment\n}\nif not local_experiment:\nkwargs['cloud_compute_zone'] = experiment_config['cloud_compute_zone']\n+ kwargs['cloud_project'] = experiment_config['cloud_project']\nreturn template.render(**kwargs)\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_data/experiment-config.yaml",
"new_path": "experiment/test_data/experiment-config.yaml",
"diff": "@@ -16,6 +16,7 @@ experiment: test-experiment\ntrials: 4\nmax_total_time: 86400\ncloud_project: fuzzbench\n+docker_registry: gcr.io/fuzzbench\ncloud_compute_zone: us-central1-a\nexperiment_filestore: gs://experiment-data\nreport_filestore: gs://web-reports\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_data/local-experiment-config.yaml",
"new_path": "experiment/test_data/local-experiment-config.yaml",
"diff": "# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n-# TODO: remove `cloud_project` by `docker_registry`.\nexperiment: test-experiment\ntrials: 4\nmax_total_time: 86400\n-cloud_project: fuzzbench\n+docker_registry: gcr.io/fuzzbench\nexperiment_filestore: /tmp/experiment-data\nreport_filestore: /tmp/web-reports\nlocal_experiment: true\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_run_experiment.py",
"new_path": "experiment/test_run_experiment.py",
"diff": "@@ -52,6 +52,8 @@ class TestReadAndValdiateExperimentConfig(unittest.TestCase):\n'experiment_filestore': 'gs://bucket',\n'report_filestore': 'gs://web-bucket',\n'experiment': 'experiment-name',\n+ 'docker_registry': 'gcr.io/fuzzbench',\n+ 'cloud_project': 'fuzzbench',\n'cloud_compute_zone': 'us-central1-a',\n'trials': 10,\n'max_total_time': 1000,\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_scheduler.py",
"new_path": "experiment/test_scheduler.py",
"diff": "@@ -106,7 +106,7 @@ docker run \\\\\n-e EXPERIMENT=test-experiment \\\\\n-e TRIAL_ID=9 \\\\\n-e MAX_TOTAL_TIME=86400 \\\\\n--e CLOUD_PROJECT=fuzzbench -e CLOUD_COMPUTE_ZONE=us-central1-a \\\\\n+-e DOCKER_REGISTRY=gcr.io/fuzzbench -e CLOUD_PROJECT=fuzzbench -e CLOUD_COMPUTE_ZONE=us-central1-a \\\\\n-e EXPERIMENT_FILESTORE=gs://experiment-data \\\\\n-e REPORT_FILESTORE=gs://web-reports \\\\\n-e FUZZ_TARGET={oss_fuzz_target} \\\\\n@@ -144,7 +144,7 @@ docker run \\\\\n-e EXPERIMENT=test-experiment \\\\\n-e TRIAL_ID=9 \\\\\n-e MAX_TOTAL_TIME=86400 \\\\\n--e CLOUD_PROJECT=fuzzbench \\\\\n+-e DOCKER_REGISTRY=gcr.io/fuzzbench \\\\\n-e EXPERIMENT_FILESTORE=/tmp/experiment-data -v /tmp/experiment-data:/tmp/experiment-data \\\\\n-e REPORT_FILESTORE=/tmp/web-reports -v /tmp/web-reports:/tmp/web-reports \\\\\n-e FUZZ_TARGET={oss_fuzz_target} \\\\\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | remove `cloud_project` requirement for local run; add `docker_registry` as a required option for both settings (#461)
Fix #445, related to #251 and #444. |
258,399 | 24.06.2020 15:08:25 | 18,000 | c08d2a131d510ce4e20a213003bb2de637d2314d | update service/experiment_config.yaml | [
{
"change_type": "MODIFY",
"old_path": "service/experiment-config.yaml",
"new_path": "service/experiment-config.yaml",
"diff": "trials: 20\nmax_total_time: 82800 # 23 hours, the default time for preemptible experiments.\ncloud_project: fuzzbench\n+docker_registry: gcr.io/fuzzbench\ncloud_compute_zone: us-central1-a\nexperiment_filestore: gs://fuzzbench-data\nreport_filestore: gs://www.fuzzbench.com/reports\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | update service/experiment_config.yaml (#485) |
258,388 | 25.06.2020 10:36:21 | 25,200 | f50e978cec3e060a7f446fe9d5d09a5c46f177c1 | [docs] Improve docs-serve and fix link
[docs] Improve docs-serve and fix link
1. Make sure dependencies are installed when using docs-serve
2. Fix link to customizing reports from report reference page. | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -42,7 +42,10 @@ lint: install-dependencies\ntypecheck: install-dependencies\nsource ${VENV_ACTIVATE} && python3 presubmit.py typecheck\n-docs-serve:\n+install-docs-dependencies: docs/Gemfile.lock\n+ cd docs && bundle install\n+\n+docs-serve: install-docs-dependencies\ncd docs && bundle exec jekyll serve --livereload\nclear-cache:\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [docs] Improve docs-serve and fix link (#487)
[docs] Improve docs-serve and fix link
1. Make sure dependencies are installed when using docs-serve
2. Fix link to customizing reports from report reference page. |
258,388 | 26.06.2020 17:59:42 | 25,200 | bec1bdd7046fdda2a7fbc90dbbbbcd2d124e1995 | Request experiment for AFL++ and entropic | [
{
"change_type": "MODIFY",
"old_path": "presubmit.py",
"new_path": "presubmit.py",
"diff": "@@ -260,8 +260,9 @@ def validate_experiment_requests(paths: List[Path]):\nautomatic_run_experiment.REQUESTED_EXPERIMENTS_PATH)\nreturn False\n+ # Only validate the latest request.\nresult = automatic_run_experiment.validate_experiment_requests(\n- experiment_requests)\n+ experiment_requests[:1])\nif not result:\nprint('%s is not valid.' %\n"
},
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "# are still testing this feature. You should request an experiment by contacting\n# us as you normally do.\n+- experiment: 2020-06-26\n+ fuzzers:\n+ - aflplusplus\n+ - aflplusplus_optimal\n+ - aflplusplus_optimal_shmem\n+ - aflplusplus_shmem\n+ - entropic\n+\n- experiment: 2020-06-18\nfuzzers:\n- aflplusplus\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Request experiment for AFL++ and entropic (#490) |
258,388 | 30.06.2020 14:48:46 | 25,200 | a4378dc95968c55758a3a6aa19979ad1b04b97ce | Reqeust experiment qemu mode fuzzers and aflplusplus_optimal_shmem | [
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "# are still testing this feature. You should request an experiment by contacting\n# us as you normally do.\n+- experiment: 2020-06-30\n+ fuzzers:\n+ - aflplusplus_qemu\n+ - afl_qemu\n+ - honggfuzz_qemu\n+ - aflplusplus_optimal_shmem\n+\n- experiment: 2020-06-26\nfuzzers:\n- aflplusplus\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Reqeust experiment qemu mode fuzzers and aflplusplus_optimal_shmem (#495) |
258,388 | 01.07.2020 10:16:57 | 25,200 | 71f5082fc9740d4a217633e7588f250aa7f1d4e3 | [NFC] Cleanup (mostly comments) in fuzzer integrations.
Clean up (mostly comments) in fuzzer integrations.
Integrations get copied so stale comments and other small nits
multiply. | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/fuzzers.yml",
"new_path": ".github/workflows/fuzzers.yml",
"diff": "@@ -32,7 +32,7 @@ jobs:\n- libfuzzer_nocmp\n- manul\n- mopt\n- # Greybox fuzzers\n+ # Binary-only (greybox) fuzzers.\n- eclipser\n- afl_qemu\n- aflplusplus_qemu\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/afl/fuzzer.py",
"new_path": "fuzzers/afl/fuzzer.py",
"diff": "@@ -19,8 +19,6 @@ import os\nfrom fuzzers import utils\n-# OUT environment variable is the location of build directory (default is /out).\n-\ndef prepare_build_environment():\n\"\"\"Set environment variables used to build targets for AFL-based\n@@ -74,7 +72,7 @@ def run_afl_fuzz(input_corpus,\n# FIXME: Currently AFL will exit if it encounters a crashing input in seed\n# corpus (usually timeouts). Add a way to skip/delete such inputs and\n# re-run AFL.\n- print('[run_fuzzer] Running target with afl-fuzz')\n+ print('[run_afl_fuzz] Running target with afl-fuzz')\ncommand = [\n'./afl-fuzz',\n'-i',\n@@ -100,7 +98,7 @@ def run_afl_fuzz(input_corpus,\n# performs.\n'2147483647'\n]\n- print('[run_fuzzer] Running command: ' + ' '.join(command))\n+ print('[run_afl_fuzz] Running command: ' + ' '.join(command))\noutput_stream = subprocess.DEVNULL if hide_output else None\nsubprocess.check_call(command, stdout=output_stream, stderr=output_stream)\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/afl_qemu/fuzzer.py",
"new_path": "fuzzers/afl_qemu/fuzzer.py",
"diff": "# limitations under the License.\n\"\"\"Integration code for AFL qemu fuzzer.\"\"\"\n-# as aflplusplus has the build for qemu already in there we include this\n+# As aflplusplus has the build for qemu already in there we include this.\nfrom fuzzers.aflplusplus import fuzzer as aflplusplus_fuzzer\n@@ -24,7 +24,7 @@ def build():\ndef fuzz(input_corpus, output_corpus, target_binary):\n\"\"\"Run fuzzer.\"\"\"\n- # necessary fuzzer options\n+ # Necessary fuzzer options.\nflags = ['-Q']\naflplusplus_fuzzer.fuzz(input_corpus,\noutput_corpus,\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/aflcc/fuzzer.py",
"new_path": "fuzzers/aflcc/fuzzer.py",
"diff": "@@ -303,7 +303,7 @@ def build():\npost_build(fuzz_target)\n-def run_fuzz(input_corpus,\n+def run_fuzzer(input_corpus,\noutput_corpus,\ntarget_binary,\nadditional_flags=None,\n@@ -345,10 +345,10 @@ def fuzz(input_corpus, output_corpus, target_binary):\n\"\"\"Run fuzzer.\"\"\"\nprepare_fuzz_environment(input_corpus)\n- # Note: dictionary automatically added by run_fuzz().\n+ # Note: dictionary automatically added by run_fuzzer().\n# Use a dictionary for original afl as well.\n- print('[run_fuzzer] Running AFL for original binary')\n+ print('[fuzz] Running AFL for original binary')\nsrc_file = '{target}-normalized-none-nopt.dict'.format(target=target_binary)\ndst_file = '{target}-original.dict'.format(target=target_binary)\nshutil.copy(src_file, dst_file)\n@@ -356,7 +356,7 @@ def fuzz(input_corpus, output_corpus, target_binary):\n# to be non-optimized to prevent AFL from aborting.\nos.system('sed -i \\'s/OPTIMIZED/NORMAL/g\\' {dict}'.format(dict=dst_file))\nafl_fuzz_thread1 = threading.Thread(\n- target=run_fuzz,\n+ target=run_fuzzer,\nargs=(input_corpus, output_corpus,\n'{target}-original'.format(target=target_binary),\n['-S', 'slave-original']))\n@@ -364,14 +364,14 @@ def fuzz(input_corpus, output_corpus, target_binary):\nprint('[run_fuzzer] Running AFL for normalized and optimized dictionary')\nafl_fuzz_thread2 = threading.Thread(\n- target=run_fuzz,\n+ target=run_fuzzer,\nargs=(input_corpus, output_corpus,\n'{target}-normalized-none-nopt'.format(target=target_binary),\n['-S', 'slave-normalized-nopt']))\nafl_fuzz_thread2.start()\nprint('[run_fuzzer] Running AFL for FBSP and optimized dictionary')\n- run_fuzz(input_corpus,\n+ run_fuzzer(input_corpus,\noutput_corpus,\n'{target}-no-collision-all-opt'.format(target=target_binary),\n['-S', 'slave-no-collision-all-opt'],\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/aflplusplus/fuzzer.py",
"new_path": "fuzzers/aflplusplus/fuzzer.py",
"diff": "@@ -20,8 +20,6 @@ import shutil\nfrom fuzzers.afl import fuzzer as afl_fuzzer\nfrom fuzzers import utils\n-# OUT environment variable is the location of build directory (default is /out).\n-\ndef get_cmplog_build_directory(target_directory):\n\"\"\"Return path to CmpLog target directory.\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/aflplusplus_optimal/fuzzer.py",
"new_path": "fuzzers/aflplusplus_optimal/fuzzer.py",
"diff": "@@ -26,8 +26,6 @@ import glob\nfrom fuzzers.aflplusplus import fuzzer as aflplusplus_fuzzer\n-# OUT environment variable is the location of build directory (default is /out).\n-\ndef build(): # pylint: disable=too-many-branches,too-many-statements\n\"\"\"Build benchmark.\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/aflplusplus_optimal_shmem/fuzzer.py",
"new_path": "fuzzers/aflplusplus_optimal_shmem/fuzzer.py",
"diff": "@@ -26,8 +26,6 @@ import glob\nfrom fuzzers.aflplusplus import fuzzer as aflplusplus_fuzzer\n-# OUT environment variable is the location of build directory (default is /out).\n-\ndef build(): # pylint: disable=too-many-branches,too-many-statements\n\"\"\"Build benchmark.\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/aflplusplus_qemu/fuzzer.py",
"new_path": "fuzzers/aflplusplus_qemu/fuzzer.py",
"diff": "@@ -18,8 +18,6 @@ import subprocess\nfrom fuzzers.aflplusplus import fuzzer as aflplusplus_fuzzer\n-# OUT environment variable is the location of build directory (default is /out).\n-\ndef build():\n\"\"\"Build benchmark.\"\"\"\n@@ -28,7 +26,7 @@ def build():\ndef fuzz(input_corpus, output_corpus, target_binary):\n\"\"\"Run fuzzer.\"\"\"\n- # get LLVMFuzzerTestOneInput address\n+ # Get LLVMFuzzerTestOneInput address.\nnm_proc = subprocess.run([\n'sh', '-c',\n'nm \\'' + target_binary + '\\' | grep \\'T afl_qemu_driver_stdin_input\\''\n@@ -36,8 +34,9 @@ def fuzz(input_corpus, output_corpus, target_binary):\nstdout=subprocess.PIPE,\ncheck=True)\ntarget_func = \"0x\" + nm_proc.stdout.split()[0].decode(\"utf-8\")\n- print('[run_fuzzer] afl_qemu_driver_stdin_input() address =', target_func)\n- # fuzzer options\n+ print('[fuzz] afl_qemu_driver_stdin_input() address =', target_func)\n+\n+ # Set fuzzer options.\nflags = ['-Q', '-L', '0'] # MOpt flags\nos.environ['AFL_COMPCOV_LEVEL'] = '3' # float compcov\nos.environ['AFL_QEMU_PERSISTENT_ADDR'] = target_func\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/aflplusplus_shmem/fuzzer.py",
"new_path": "fuzzers/aflplusplus_shmem/fuzzer.py",
"diff": "from fuzzers.aflplusplus import fuzzer as aflplusplus_fuzzer\n-# OUT environment variable is the location of build directory (default is /out).\n-\ndef build():\n\"\"\"Build benchmark.\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/aflsmart/fuzzer.py",
"new_path": "fuzzers/aflsmart/fuzzer.py",
"diff": "@@ -19,8 +19,6 @@ import glob\nfrom fuzzers.afl import fuzzer as afl_fuzzer\n-# OUT environment variable is the location of build directory (default is /out).\n-\ndef build():\n\"\"\"Build benchmark.\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/ankou/fuzzer.py",
"new_path": "fuzzers/ankou/fuzzer.py",
"diff": "@@ -20,8 +20,6 @@ import os\nfrom fuzzers import utils\nfrom fuzzers.afl import fuzzer as afl_fuzzer\n-# OUT environment variable is the location of build directory (default is /out).\n-\ndef build():\n\"\"\"Build benchmark.\"\"\"\n@@ -37,12 +35,12 @@ def fuzz(input_corpus, output_corpus, target_binary):\n\"\"\"Run Ankou on target.\"\"\"\nafl_fuzzer.prepare_fuzz_environment(input_corpus)\n- print('[run_fuzzer] Running target with Ankou')\n+ print('[fuzz] Running target with Ankou')\ncommand = [\n'./Ankou', '-app', target_binary, '-i', input_corpus, '-o',\noutput_corpus\n]\n# \"-dict\" option may not work for format mismatching.\n- print('[run_fuzzer] Running command: ' + ' '.join(command))\n+ print('[fuzz] Running command: ' + ' '.join(command))\nsubprocess.check_call(command)\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/eclipser/fuzzer.py",
"new_path": "fuzzers/eclipser/fuzzer.py",
"diff": "@@ -41,7 +41,7 @@ def fuzz(input_corpus, output_corpus, target_binary):\nif not os.path.exists(encoded_temp_corpus):\nos.mkdir(encoded_temp_corpus)\n- print('[run_fuzzer] Running target with Eclipser')\n+ print('[fuzz] Running target with Eclipser')\ncommand = [\n'dotnet',\n'/Eclipser/build/Eclipser.dll',\n@@ -69,7 +69,7 @@ def fuzz(input_corpus, output_corpus, target_binary):\n]\nif os.listdir(input_corpus): # Important, otherwise Eclipser crashes.\ncommand += ['-i', input_corpus]\n- print('[run_fuzzer] Running command: ' + ' '.join(command))\n+ print('[fuzz] Running command: ' + ' '.join(command))\nsubprocess.Popen(command)\nprocess = Process(target=copy_corpus_directory,\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/honggfuzz/fuzzer.py",
"new_path": "fuzzers/honggfuzz/fuzzer.py",
"diff": "@@ -19,8 +19,6 @@ import subprocess\nfrom fuzzers import utils\n-# OUT environment variable is the location of build directory (default is /out).\n-\ndef build():\n\"\"\"Build benchmark.\"\"\"\n@@ -43,7 +41,7 @@ def fuzz(input_corpus, output_corpus, target_binary):\nif not os.path.exists(output_corpus):\nos.makedirs(output_corpus)\n- print('[run_fuzzer] Running target with honggfuzz')\n+ print('[fuzz] Running target with honggfuzz')\ncommand = [\n'./honggfuzz',\n'--persistent',\n@@ -60,5 +58,5 @@ def fuzz(input_corpus, output_corpus, target_binary):\ncommand.extend(['--dict', dictionary_path])\ncommand.extend(['--', target_binary])\n- print('[run_fuzzer] Running command: ' + ' '.join(command))\n+ print('[fuzz] Running command: ' + ' '.join(command))\nsubprocess.check_call(command)\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/honggfuzz_qemu/builder.Dockerfile",
"new_path": "fuzzers/honggfuzz_qemu/builder.Dockerfile",
"diff": "ARG parent_image=gcr.io/fuzzbench/base-builder\nFROM $parent_image\n-# honggfuzz requires libfd and libunwid.\n+# Honggfuzz requires libbfd and libunwid.\nRUN apt-get update -y && \\\napt-get install -y \\\nlibbfd-dev \\\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/honggfuzz_qemu/fuzzer.py",
"new_path": "fuzzers/honggfuzz_qemu/fuzzer.py",
"diff": "@@ -19,8 +19,6 @@ import subprocess\nfrom fuzzers import utils\n-# OUT environment variable is the location of build directory (default is /out).\n-\ndef build():\n\"\"\"Build benchmark.\"\"\"\n@@ -46,7 +44,7 @@ def fuzz(input_corpus, output_corpus, target_binary):\nif not os.path.exists(output_corpus):\nos.makedirs(output_corpus)\n- print('[run_fuzzer] Running target with honggfuzz')\n+ print('[fuzz] Running target with honggfuzz')\ncommand = [\n'./honggfuzz',\n'--rlimit_rss',\n@@ -62,5 +60,5 @@ def fuzz(input_corpus, output_corpus, target_binary):\ncommand.extend(['--dict', dictionary_path])\ncommand.extend(['--', './qemu-x86_64', target_binary])\n- print('[run_fuzzer] Running command: ' + ' '.join(command))\n+ print('[fuzz] Running command: ' + ' '.join(command))\nsubprocess.check_call(command)\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [NFC] Cleanup (mostly comments) in fuzzer integrations. (#496)
Clean up (mostly comments) in fuzzer integrations.
Integrations get copied so stale comments and other small nits
multiply. |
258,388 | 09.07.2020 14:05:00 | 25,200 | 4e905184736152341d340fbeccf6c6b19e29c548 | Use a startup script for launching the dispatcher
Using ssh is hacky and error prone. Use a startup script instead.
This will also help with servicification as run_experiment.py will finish immediately after an experiment is started.
Fixes and | [
{
"change_type": "MODIFY",
"old_path": "common/gcloud.py",
"new_path": "common/gcloud.py",
"diff": "import enum\nimport subprocess\n-import time\nfrom typing import List\nfrom common import experiment_utils\n-from common import logs\nfrom common import new_process\n# Constants for dispatcher specs.\n@@ -35,33 +33,6 @@ RUNNER_BOOT_DISK_SIZE = '30GB'\nINSTANCE_BATCH_SIZE = 100\n-def ssh(instance: str, *args, **kwargs):\n- \"\"\"SSH into |instance|.\"\"\"\n- zone = kwargs.pop('zone', None)\n- command = kwargs.pop('command', None)\n- ssh_command = ['gcloud', 'beta', 'compute', 'ssh', instance]\n- if command:\n- ssh_command.append('--command=%s' % command)\n- if zone:\n- ssh_command.append('--zone=%s' % zone)\n- return new_process.execute(ssh_command, *args, **kwargs)\n-\n-\n-def robust_begin_gcloud_ssh(instance_name: str, zone: str):\n- \"\"\"Try to SSH into an instance, |instance_name| in |zone| that might not be\n- ready.\"\"\"\n- for _ in range(10):\n- result = ssh(instance_name,\n- zone=zone,\n- command='echo ping',\n- expect_zero=False)\n- if result.retcode == 0:\n- return\n- logs.info('GCP instance isn\\'t ready yet. Rerunning SSH momentarily.')\n- time.sleep(5)\n- raise Exception('Couldn\\'t SSH to instance.')\n-\n-\nclass InstanceType(enum.Enum):\n\"\"\"Types of instances we need for the experiment.\"\"\"\nDISPATCHER = 0\n"
},
{
"change_type": "MODIFY",
"old_path": "common/test_gcloud.py",
"new_path": "common/test_gcloud.py",
"diff": "from unittest import mock\n-import pytest\n-\nfrom common import gcloud\nfrom common import new_process\nfrom test_libs import utils as test_utils\n@@ -26,49 +24,6 @@ ZONE = 'zone-a'\nCONFIG = {'cloud_compute_zone': ZONE, 'service_account': 'blah'}\n-def test_ssh():\n- \"\"\"Tests that ssh works as expected.\"\"\"\n- with test_utils.mock_popen_ctx_mgr() as mocked_popen:\n- gcloud.ssh(INSTANCE_NAME)\n- assert mocked_popen.commands == [[\n- 'gcloud', 'beta', 'compute', 'ssh', INSTANCE_NAME\n- ]]\n-\n-\[email protected](('kwargs_for_ssh', 'expected_argument'),\n- [\n- ({'command': 'ls'}, '--command=ls'),\n- ({'zone': ZONE}, '--zone=' + ZONE),\n- ]) # yapf: disable\n-def test_ssh_optional_arg(kwargs_for_ssh, expected_argument):\n- \"\"\"Tests that ssh works as expected when given an optional argument.\"\"\"\n- with test_utils.mock_popen_ctx_mgr() as mocked_popen:\n- gcloud.ssh(INSTANCE_NAME, **kwargs_for_ssh)\n- assert expected_argument in mocked_popen.commands[0]\n-\n-\[email protected]('time.sleep')\[email protected]('common.gcloud.ssh')\n-def test_robust_begin_gcloud_ssh_fail(_, mocked_ssh):\n- \"\"\"Tests that ssh works as expected.\"\"\"\n- with pytest.raises(Exception) as exception:\n- gcloud.robust_begin_gcloud_ssh(INSTANCE_NAME, ZONE)\n- assert mocked_ssh.call_count == 10\n- assert exception.value == 'Couldn\\'t SSH to instance.'\n-\n-\[email protected]('time.sleep')\[email protected]('common.gcloud.ssh')\n-def test_robust_begin_gcloud_ssh_pass(mocked_ssh, _):\n- \"\"\"Tests robust_begin_gcloud_ssh works as intended on google cloud.\"\"\"\n- mocked_ssh.return_value = new_process.ProcessResult(0, None, False)\n- gcloud.robust_begin_gcloud_ssh(INSTANCE_NAME, ZONE)\n- mocked_ssh.assert_called_with('instance-a',\n- command='echo ping',\n- expect_zero=False,\n- zone='zone-a')\n-\n-\ndef test_create_instance():\n\"\"\"Tests create_instance creates an instance.\"\"\"\nwith test_utils.mock_popen_ctx_mgr(returncode=1) as mocked_popen:\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/dispatcher.py",
"new_path": "experiment/dispatcher.py",
"diff": "@@ -34,6 +34,7 @@ from experiment.build import builder\nfrom experiment import measurer\nfrom experiment import reporter\nfrom experiment import scheduler\n+from experiment import stop_experiment\nLOOP_WAIT_SECONDS = 5 * 60\n@@ -116,7 +117,7 @@ def dispatcher_main():\n# reason.\nmultiprocessing.set_start_method('spawn')\ndb_utils.initialize()\n- if os.getenv('LOCAL_EXPERIMENT'):\n+ if experiment_utils.is_local_experiment():\nmodels.Base.metadata.create_all(db_utils.engine)\nexperiment_config_file_path = os.path.join(fuzzer_config_utils.get_dir(),\n@@ -172,6 +173,17 @@ def main():\nexcept Exception as error:\nlogs.error('Error conducting experiment.')\nraise error\n+ experiment_config_file_path = os.path.join(fuzzer_config_utils.get_dir(),\n+ 'experiment.yaml')\n+\n+ if experiment_utils.is_local_experiment():\n+ return 0\n+\n+ if stop_experiment.stop_experiment(experiment_utils.get_experiment_name(),\n+ experiment_config_file_path):\n+ return 0\n+\n+ return 1\nif __name__ == '__main__':\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "experiment/resources/dispatcher-startup-script-template.sh",
"diff": "+#!/bin/bash\n+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope\n+docker run --rm \\\n+ -e INSTANCE_NAME={{instance_name}} -e EXPERIMENT={{experiment}} \\\n+ -e CLOUD_PROJECT={{cloud_project}} \\\n+ -e EXPERIMENT_FILESTORE={{experiment_filestore}} \\\n+ -e POSTGRES_PASSWORD={{postgres_password}} \\\n+ -e CLOUD_SQL_INSTANCE_CONNECTION_NAME={{cloud_sql_instance_connection_name}} \\\n+ --cap-add=SYS_PTRACE --cap-add=SYS_NICE \\\n+ -v /var/run/docker.sock:/var/run/docker.sock --name=dispatcher-container \\\n+ {{docker_registry}}/dispatcher-image /work/startup-dispatcher.sh\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/resources/runner-startup-script-template.sh",
"new_path": "experiment/resources/runner-startup-script-template.sh",
"diff": "# See the License for the specific language governing permissions and\n# limitations under the License.\n-## Configure the host.\n+# Configure the host.\n# Make everything ptrace-able.\necho 0 > /proc/sys/kernel/yama/ptrace_scope\n@@ -21,7 +21,7 @@ echo 0 > /proc/sys/kernel/yama/ptrace_scope\n# Do not notify external programs about core dumps.\necho core >/proc/sys/kernel/core_pattern\n-## Start docker.\n+# Start docker.\n{% if not local_experiment %}\nwhile ! docker pull {{docker_image_url}}\ndo\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/run_experiment.py",
"new_path": "experiment/run_experiment.py",
"diff": "it needs to begin an experiment.\"\"\"\nimport argparse\n-import multiprocessing\nimport os\nimport re\nimport subprocess\nimport sys\nimport tarfile\n+import tempfile\nfrom typing import Dict, List\n+\n+import jinja2\nimport yaml\nfrom common import benchmark_utils\n@@ -35,7 +37,6 @@ from common import logs\nfrom common import new_process\nfrom common import utils\nfrom common import yaml_utils\n-from experiment import stop_experiment\nfrom src_analysis import experiment_changes\nBENCHMARKS_DIR = os.path.join(utils.ROOT_DIR, 'benchmarks')\n@@ -60,6 +61,8 @@ FILTER_SOURCE_REGEX = re.compile(r'('\nCONFIG_DIR = 'config'\n+RESOURCES_DIR = os.path.join(utils.ROOT_DIR, 'experiment', 'resources')\n+\ndef read_and_validate_experiment_config(config_filename: str) -> Dict:\n\"\"\"Reads |config_filename|, validates it, finds as many errors as possible,\n@@ -267,11 +270,8 @@ def start_dispatcher(config: Dict, config_dir: str):\n\"\"\"Start the dispatcher instance and run the dispatcher code on it.\"\"\"\ndispatcher = get_dispatcher(config)\n# Is dispatcher code being run manually (useful for debugging)?\n- manual_experiment = os.getenv('MANUAL_EXPERIMENT')\n- if not manual_experiment:\n- dispatcher.create_async()\ncopy_resources_to_bucket(config_dir, config)\n- if not manual_experiment:\n+ if not os.getenv('MANUAL_EXPERIMENT'):\ndispatcher.start()\n@@ -312,11 +312,6 @@ class BaseDispatcher:\nself.config = config\nself.instance_name = experiment_utils.get_dispatcher_instance_name(\nconfig['experiment'])\n- self.process = None\n-\n- def create_async(self):\n- \"\"\"Creates the dispatcher asynchronously.\"\"\"\n- raise NotImplementedError\ndef start(self):\n\"\"\"Start the experiment on the dispatcher.\"\"\"\n@@ -332,11 +327,10 @@ class LocalDispatcher:\nconfig['experiment'])\nself.process = None\n- def create_async(self):\n- \"\"\"Noop in local experiments.\"\"\"\n-\ndef start(self):\n\"\"\"Start the experiment on the dispatcher.\"\"\"\n+ container_name = 'dispatcher-container'\n+ logs.info('Started dispatcher with container name: %s', container_name)\nexperiment_filestore_path = os.path.abspath(\nself.config['experiment_filestore'])\nfilesystem.create_directory(experiment_filestore_path)\n@@ -390,7 +384,7 @@ class LocalDispatcher:\n'LOCAL_EXPERIMENT=True',\n'--cap-add=SYS_PTRACE',\n'--cap-add=SYS_NICE',\n- '--name=dispatcher-container',\n+ '--name=%s' % container_name,\ndocker_image_url,\n'/bin/bash',\n'-c',\n@@ -410,59 +404,47 @@ class LocalDispatcher:\nclass GoogleCloudDispatcher(BaseDispatcher):\n\"\"\"Class representing the dispatcher instance on Google Cloud.\"\"\"\n- def create_async(self):\n- \"\"\"Creates the instance asynchronously.\"\"\"\n- self.process = multiprocessing.Process(\n- target=gcloud.create_instance,\n- args=(self.instance_name, gcloud.InstanceType.DISPATCHER,\n- self.config))\n- self.process.start()\n-\ndef start(self):\n\"\"\"Start the experiment on the dispatcher.\"\"\"\n- # TODO(metzman): Replace this workflow with a startup script so we don't\n- # need to SSH into the dispatcher.\n- self.process.join() # Wait for dispatcher instance.\n- # Check that we can SSH into the instance.\n- gcloud.robust_begin_gcloud_ssh(self.instance_name,\n- self.config['cloud_compute_zone'])\n-\n- docker_registry = self.config['docker_registry']\n+ logs.info('Started dispatcher with instance name: %s',\n+ self.instance_name)\n+ with tempfile.NamedTemporaryFile(dir=os.getcwd(),\n+ mode='w') as startup_script:\n+ self.write_startup_script(startup_script)\n+ gcloud.create_instance(self.instance_name,\n+ gcloud.InstanceType.DISPATCHER,\n+ self.config,\n+ startup_script=startup_script.name)\n+\n+ def _render_startup_script(self):\n+ \"\"\"Renders the startup script template and returns the result as a\n+ string.\"\"\"\n+ jinja_env = jinja2.Environment(\n+ undefined=jinja2.StrictUndefined,\n+ loader=jinja2.FileSystemLoader(RESOURCES_DIR),\n+ )\n+ template = jinja_env.get_template(\n+ 'dispatcher-startup-script-template.sh')\ncloud_sql_instance_connection_name = (\nself.config['cloud_sql_instance_connection_name'])\n- command = (\n- 'echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope && '\n- 'docker run --rm '\n- '-e INSTANCE_NAME=\"{instance_name}\" '\n- '-e EXPERIMENT=\"{experiment}\" '\n- '-e CLOUD_PROJECT=\"{cloud_project}\" '\n- '-e DOCKER_REGISTRY=\"{docker_registry}\" '\n- '-e EXPERIMENT_FILESTORE=\"{experiment_filestore}\" '\n- '-e POSTGRES_PASSWORD=\"{postgres_password}\" '\n- '-e CLOUD_SQL_INSTANCE_CONNECTION_NAME='\n- '\"{cloud_sql_instance_connection_name}\" '\n- '--cap-add=SYS_PTRACE --cap-add=SYS_NICE '\n- '-v /var/run/docker.sock:/var/run/docker.sock '\n- '--name=dispatcher-container '\n- '{docker_registry}/dispatcher-image '\n- '/work/startup-dispatcher.sh'\n- ).format(\n- instance_name=self.instance_name,\n- postgres_password=os.environ['POSTGRES_PASSWORD'],\n- experiment=self.config['experiment'],\n- # TODO(metzman): Create a function that sets env vars based on\n- # the contents of a dictionary, and use it instead of hardcoding\n- # the configs we use.\n- cloud_project=self.config['cloud_project'],\n- experiment_filestore=self.config['experiment_filestore'],\n- cloud_sql_instance_connection_name=(\n- cloud_sql_instance_connection_name),\n- docker_registry=docker_registry,\n- )\n- return gcloud.ssh(self.instance_name,\n- command=command,\n- zone=self.config['cloud_compute_zone'])\n+ kwargs = {\n+ 'instance_name': self.instance_name,\n+ 'postgres_password': os.environ['POSTGRES_PASSWORD'],\n+ 'experiment': self.config['experiment'],\n+ 'cloud_project': self.config['cloud_project'],\n+ 'experiment_filestore': self.config['experiment_filestore'],\n+ 'cloud_sql_instance_connection_name':\n+ (cloud_sql_instance_connection_name),\n+ 'docker_registry': self.config['docker_registry'],\n+ }\n+ return template.render(**kwargs)\n+\n+ def write_startup_script(self, startup_script_file):\n+ \"\"\"Get the startup script to start the experiment on the dispatcher.\"\"\"\n+ startup_script = self._render_startup_script()\n+ startup_script_file.write(startup_script)\n+ startup_script_file.flush()\ndef get_dispatcher(config: Dict) -> BaseDispatcher:\n@@ -530,9 +512,6 @@ def main():\nstart_experiment(args.experiment_name, args.experiment_config,\nargs.benchmarks, fuzzer_configs)\n- if not os.getenv('MANUAL_EXPERIMENT'):\n- stop_experiment.stop_experiment(args.experiment_name,\n- args.experiment_config)\nreturn 0\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/stop_experiment.py",
"new_path": "experiment/stop_experiment.py",
"diff": "@@ -48,14 +48,14 @@ def stop_experiment(experiment_name, experiment_config_filename):\nif not experiment_instances:\nlogger.warning('No experiment instances found, no work to do.')\n- return 0\n+ return True\nif not gcloud.delete_instances(experiment_instances, cloud_compute_zone):\nlogger.error('Failed to stop experiment instances.')\n- return 1\n+ return False\nlogger.info('Successfully stopped experiment.')\n- return 0\n+ return True\ndef main():\n@@ -64,7 +64,7 @@ def main():\nprint(\"Usage {0} <experiment-name> <experiment-config.yaml>\")\nreturn 1\nlogs.initialize()\n- return stop_experiment(sys.argv[1], sys.argv[2])\n+ return 0 if stop_experiment(sys.argv[1], sys.argv[2]) else 1\nif __name__ == '__main__':\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_scheduler.py",
"new_path": "experiment/test_scheduler.py",
"diff": "@@ -91,7 +91,7 @@ def test_create_trial_instance(benchmark, expected_image, expected_target,\nexperiment_config):\n\"\"\"Test that create_trial_instance invokes create_instance\nand creates a startup script for the instance, as we expect it to.\"\"\"\n- expected_startup_script = '''## Start docker.\n+ expected_startup_script = '''# Start docker.\nwhile ! docker pull {docker_image_url}\ndo\n@@ -133,7 +133,7 @@ def test_create_trial_instance_local_experiment(benchmark, expected_image,\nstartup script for the instance, as we expect it to when running a\nlocal_experiment.\"\"\"\nos.environ['LOCAL_EXPERIMENT'] = str(True)\n- expected_startup_script = '''## Start docker.\n+ expected_startup_script = '''# Start docker.\ndocker run \\\\\n@@ -191,7 +191,7 @@ def _test_create_trial_instance( # pylint: disable=too-many-locals\nwith open(expected_startup_script_path) as file_handle:\ncontent = file_handle.read()\n- check_from = '## Start docker.'\n+ check_from = '# Start docker.'\nassert check_from in content\nscript_for_docker = content[content.find(check_from):]\nassert script_for_docker == expected_startup_script.format(\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Use a startup script for launching the dispatcher (#489)
Using ssh is hacky and error prone. Use a startup script instead.
This will also help with servicification as run_experiment.py will finish immediately after an experiment is started.
Fixes #361 and #486 |
258,388 | 09.07.2020 19:09:17 | 25,200 | bf11656eef7832b4d773d81532172e2312e70001 | Request new aflplusplus experiment
Request new aflplusplus experiment | [
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "# are still testing this feature. You should request an experiment by contacting\n# us as you normally do.\n+- experiment: 2020-07-09\n+ fuzzers:\n+ - aflplusplus_lto_dict\n+ - aflplusplus_lto\n+ - aflplusplus_ltoinstrim\n+ - aflplusplus_ctx\n+ - aflplusplus_ngram2\n+ - aflplusplus_optimal\n+ - aflplusplus_qemu\n+ - aflplusplus\n+\n- experiment: 2020-06-30\nfuzzers:\n- aflplusplus_qemu\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Request new aflplusplus experiment (#511)
Request new aflplusplus experiment |
258,388 | 10.07.2020 12:11:43 | 25,200 | aba7f1dad6141f1ca53e7be54f994e893514cc28 | Install dependencies in seperate step.
This should make output of failures easier to read. | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/presubmit.yml",
"new_path": ".github/workflows/presubmit.yml",
"diff": "@@ -10,10 +10,16 @@ jobs:\n- run: | # Needed for presubmit to work.\ngit fetch origin master --depth 1\ngit symbolic-ref refs/remotes/origin/HEAD refs/remotes/origin/master\n+\n- name: Setup Python environment\nuses: actions/[email protected]\nwith:\npython-version: 3.7\n+\n+ - name: Install dependencies\n+ run: |\n+ make install-dependencies\n+\n- name: Run presubmit checks\nrun: |\nFUZZBENCH_TEST_INTEGRATION=1 make presubmit\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Install dependencies in seperate step. (#517)
This should make output of failures easier to read. |
258,388 | 10.07.2020 12:17:32 | 25,200 | d46a990810dd1105386f9c3e3388d4d374c8b031 | Don't stop experiment in automatic_run_experiment
Don't stop experiment in automatic_run_experiment
After landed
run_experiment will exit after starting the dispatcher, not after
the experiment terminates. Stopping the experiment immediately after
starting the experiment isn't what we want. | [
{
"change_type": "MODIFY",
"old_path": "service/automatic_run_experiment.py",
"new_path": "service/automatic_run_experiment.py",
"diff": "@@ -29,7 +29,6 @@ from common import yaml_utils\nfrom database import models\nfrom database import utils as db_utils\nfrom experiment import run_experiment\n-from experiment import stop_experiment\nlogger = logs.Logger('automatic_run_experiment') # pylint: disable=invalid-name\n@@ -215,7 +214,6 @@ def _run_experiment(experiment_name, fuzzer_configs, dry_run=False):\nreturn\nrun_experiment.start_experiment(experiment_name, EXPERIMENT_CONFIG_FILE,\nBENCHMARKS, fuzzer_configs)\n- stop_experiment.stop_experiment(experiment_name, EXPERIMENT_CONFIG_FILE)\ndef main():\n"
},
{
"change_type": "MODIFY",
"old_path": "service/test_automatic_run_experiment.py",
"new_path": "service/test_automatic_run_experiment.py",
"diff": "@@ -57,10 +57,8 @@ def test_run_requested_experiment_pause_service(\[email protected]('experiment.run_experiment.start_experiment')\[email protected]('experiment.stop_experiment.stop_experiment')\[email protected]('service.automatic_run_experiment._get_requested_experiments')\ndef test_run_requested_experiment(mocked_get_requested_experiments,\n- mocked_stop_experiment,\nmocked_start_experiment, db):\n\"\"\"Tests that run_requested_experiment starts and stops the experiment\nproperly.\"\"\"\n@@ -114,9 +112,6 @@ def test_run_requested_experiment(mocked_get_requested_experiments,\nstart_experiment_call_args[0][0][3].sort(key=sort_key)\nassert start_experiment_call_args == expected_calls\n- mocked_stop_experiment.assert_called_with(expected_experiment_name,\n- expected_config_file)\n-\[email protected](\n('name', 'expected_result'), [('02000-1-1', False), ('2020-1-1', False),\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Don't stop experiment in automatic_run_experiment (#514)
Don't stop experiment in automatic_run_experiment
After https://github.com/google/fuzzbench/pull/489 landed
run_experiment will exit after starting the dispatcher, not after
the experiment terminates. Stopping the experiment immediately after
starting the experiment isn't what we want. |
258,388 | 13.07.2020 12:24:05 | 25,200 | 60eee494f2d7d4142ee7a19df7891a3cef3921ce | Remove some blockers for Python3.8 support.
Remove some blockers for Python3.8 support.
1. Only use pytype on 3.7 since it doesn't work on 3.8+
2. Skip a test that hangs if not on Python3.7.
Also, mark a test as "slow" instead of "long" and register the marker
to avoid getting the warnings. | [
{
"change_type": "MODIFY",
"old_path": "experiment/build/test_builder.py",
"new_path": "experiment/build/test_builder.py",
"diff": "import itertools\nimport os\n+import sys\nfrom unittest import mock\nimport pytest\n@@ -62,7 +63,8 @@ def get_benchmarks_or_fuzzers(benchmarks_or_fuzzers_directory, filename,\n]\n-# This test seems to hang on Python3.8+\[email protected](sys.version_info.minor > 7,\n+ reason='Test can hang on versions greater than 3.7')\[email protected]('experiment.build.builder.build_measurer')\[email protected]('time.sleep')\[email protected]('build_measurer_return_value', [True, False])\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_run_experiment.py",
"new_path": "experiment/test_run_experiment.py",
"diff": "@@ -226,7 +226,7 @@ def test_validate_experiment_name_invalid(experiment_name):\n# This test takes up to a minute to complete.\[email protected]\[email protected]\ndef test_copy_resources_to_bucket(tmp_path):\n\"\"\"Tests that copy_resources_to_bucket copies the correct resources.\"\"\"\n# Do this so that Ctrl-C doesn't pollute the repo.\n"
},
{
"change_type": "MODIFY",
"old_path": "presubmit.py",
"new_path": "presubmit.py",
"diff": "@@ -210,6 +210,15 @@ def lint(paths: List[Path]) -> bool:\ndef pytype(paths: List[Path]) -> bool:\n\"\"\"Run pytype on |path| if it is a python file. Return False if it fails\ntype checking.\"\"\"\n+ # Pytype isn't supported on Python3.8+. See\n+ # https://github.com/google/pytype/issues/440.\n+ assert sys.version_info.major == 3, \"Need Python3.\"\n+ if sys.version_info.minor > 7:\n+ logs.error(\n+ 'Python version is: \"%s\". You should be using 3.7. '\n+ 'Not running pytype.', sys.version)\n+ return True\n+\npaths = [path for path in paths if is_python(path)]\nif not paths:\nreturn True\n"
},
{
"change_type": "MODIFY",
"old_path": "pytest.ini",
"new_path": "pytest.ini",
"diff": "[pytest]\nnorecursedirs = docs/_site/* docs/vendor/* third_party/* .venv/*\n+\n+markers =\n+ slow: marks tests as slow (deselect with '-m \"not slow\"')\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Remove some blockers for Python3.8 support. (#516)
Remove some blockers for Python3.8 support.
1. Only use pytype on 3.7 since it doesn't work on 3.8+
2. Skip a test that hangs if not on Python3.7.
Also, mark a test as "slow" instead of "long" and register the marker
to avoid getting the warnings. |
258,388 | 14.07.2020 08:44:19 | 25,200 | 5e742da97d1544e93b155c12f0ae2a7a6cbc6452 | [service] Use automatic report merging | [
{
"change_type": "MODIFY",
"old_path": "service/experiment-config.yaml",
"new_path": "service/experiment-config.yaml",
"diff": "@@ -11,3 +11,10 @@ experiment_filestore: gs://fuzzbench-data\nreport_filestore: gs://www.fuzzbench.com/reports\ncloud_sql_instance_connection_name: \"fuzzbench:us-central1:postgres-experiment-db=tcp:5432\"\npreemptible_runners: true\n+\n+# This experiment should generate a report that is combined with other public\n+# \"production\" experiments.\n+merge_with_nonprivate: true\n+\n+# This experiment should be merged with other reports in later experiments.\n+private: false\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [service] Use automatic report merging (#525) |
258,388 | 14.07.2020 08:44:38 | 25,200 | 008421a5c2d4cb4815b06fc6eb94715ca522107d | [experiment-requests] New AFL++ variant experiment | [
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "# are still testing this feature. You should request an experiment by contacting\n# us as you normally do.\n+- experiment: 2020-07-13\n+ fuzzers:\n+ - aflplusplus_ctx_default\n+ - aflplusplus_ctx_nosingle\n+ - aflplusplus_ctx_nozero\n+ - aflplusplus_ctx_nozerosingle\n+ - aflplusplus_ngram4\n+ - aflplusplus_ngram6\n+ - aflplusplus_ngram8\n+\n- experiment: 2020-07-09\nfuzzers:\n- aflplusplus_lto_dict\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [experiment-requests] New AFL++ variant experiment (#526) |
258,388 | 14.07.2020 10:03:48 | 25,200 | c983645e2d3f2e72183d0d54afc8dfd8bb189c16 | [service] Fix issues with bash script | [
{
"change_type": "MODIFY",
"old_path": "service/run.bash",
"new_path": "service/run.bash",
"diff": "@@ -22,6 +22,8 @@ expriment_working_dir=/tmp/fuzzbench-automatic-experiment-working-dir\nrepo_path=/tmp/fuzzbench-automatic-experiment-repo\nrm -rf $repo_path $expriment_working_dir\n+mkdir $expriment_working_dir\n+\ngit clone https://github.com/google/fuzzbench.git $repo_path\ncd $repo_path\n@@ -30,6 +32,6 @@ source .venv/bin/activate\nexport PYTHONPATH=$repo_path\ncd $expriment_working_dir\n-python3 service/automatic_run_experiment.py diff\n+python3 service/automatic_run_experiment.py\nrm -rf $repo_path\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [service] Fix issues with bash script (#528) |
258,388 | 14.07.2020 12:16:16 | 25,200 | 88d5aba08bbaea0b015143232039db2c733269e7 | [benchmarks][libxml2-v2.9.2] Fix broken build
Fix broken build by ignoring git's unhelpful conversion of CRLF to LF. | [
{
"change_type": "MODIFY",
"old_path": "benchmarks/libxml2-v2.9.2/build.sh",
"new_path": "benchmarks/libxml2-v2.9.2/build.sh",
"diff": "@@ -33,7 +33,15 @@ build_lib() {\n)\n}\n-get_git_tag https://gitlab.gnome.org/GNOME/libxml2.git v2.9.2 SRC\n+git clone https://gitlab.gnome.org/GNOME/libxml2.git SRC\n+cd SRC\n+\n+# Git is converting CRLF to LF automatically and causing issues when checking\n+# out the branch. So use -f to ignore the complaint about lost changes that we\n+# don't even want.\n+git checkout -f v2.9.2\n+cd -\n+\nbuild_lib\n$CXX $CXXFLAGS -std=c++11 $SCRIPT_DIR/target.cc -I BUILD/include BUILD/.libs/libxml2.a $FUZZER_LIB -o $FUZZ_TARGET\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [benchmarks][libxml2-v2.9.2] Fix broken build (#531)
Fix broken build by ignoring git's unhelpful conversion of CRLF to LF. |
258,388 | 14.07.2020 12:17:11 | 25,200 | 581a41edfdd75d83983a608e0f71c463efc69f97 | [service] Cleanup run bash script and add setup script for initialization | [
{
"change_type": "MODIFY",
"old_path": "service/run.bash",
"new_path": "service/run.bash",
"diff": "# See the License for the specific language governing permissions and\n# limitations under the License.\n-# Use this script to clone a new copy of fuzzbench and run a diff experiment.\n+# Use this script to clone a new copy of fuzzbench and run a requested\n+# experiment.\n# Use a seperate working directory to run the experiment so we don't pollute\n# the source code with the config directory created by run_experiment.py\n@@ -30,8 +31,7 @@ cd $repo_path\nmake install-dependencies\nsource .venv/bin/activate\nexport PYTHONPATH=$repo_path\n-cd $expriment_working_dir\n+cd $expriment_working_dir\npython3 $repo_path/service/automatic_run_experiment.py\nrm -rf $repo_path\n-\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "service/setup.bash",
"diff": "+#!/bin/bash -ex\n+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Use this script once to setup a machine for running the fuzzbench service.\n+\n+# Install a supported python version.\n+export PYTHON_VERSION=3.7.6\n+\n+sudo apt-get update -y && sudo apt-get install -y \\\n+ build-essential \\\n+ rsync \\\n+ curl \\\n+ zlib1g-dev \\\n+ libncurses5-dev \\\n+ libgdbm-dev \\\n+ libnss3-dev \\\n+ libssl-dev \\\n+ libreadline-dev \\\n+ libffi-dev \\\n+ virtualenv \\\n+ libbz2-dev \\\n+ liblzma-dev \\\n+ libsqlite3-dev\n+\n+cd /tmp/ && \\\n+ curl -O https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tar.xz && \\\n+ tar -xvf Python-$PYTHON_VERSION.tar.xz && \\\n+ cd Python-$PYTHON_VERSION && \\\n+ ./configure --enable-loadable-sqlite-extensions --enable-optimizations && \\\n+ sudo make -j install && \\\n+ sudo rm -r /tmp/Python-$PYTHON_VERSION.tar.xz /tmp/Python-$PYTHON_VERSION\n+\n+# Download and run the cloud_sql_proxy\n+export cloud_sql_proxy_path=/tmp/cloud_sql_proxy\n+wget https://dl.google.com/cloudsql/cloud_sql_proxy.linux.amd64 -O \\\n+ $cloud_sql_proxy_path\n+chmod +x $cloud_sql_proxy_path\n+# This is a hardcoded value that only works for the official fuzzbench service.\n+$cloud_sql_proxy_path -instances=fuzzbench:us-central1:postgres-experiment-db=tcp:5432 &\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [service] Cleanup run bash script and add setup script for initialization (#533) |
258,388 | 14.07.2020 13:58:38 | 25,200 | 64de6d7bf32296fc3309c28437026bcd807f4483 | [service] Fix nits | [
{
"change_type": "MODIFY",
"old_path": "service/run.bash",
"new_path": "service/run.bash",
"diff": "# Use a seperate working directory to run the experiment so we don't pollute\n# the source code with the config directory created by run_experiment.py\n-expriment_working_dir=/tmp/fuzzbench-automatic-experiment-working-dir\n+experiment_working_dir=/tmp/fuzzbench-automatic-experiment-working-dir\nrepo_path=/tmp/fuzzbench-automatic-experiment-repo\n-rm -rf $repo_path $expriment_working_dir\n+rm -rf $repo_path $experiment_working_dir\n-mkdir $expriment_working_dir\n+mkdir $experiment_working_dir\ngit clone https://github.com/google/fuzzbench.git $repo_path\ncd $repo_path\n@@ -31,7 +31,7 @@ cd $repo_path\nmake install-dependencies\nsource .venv/bin/activate\nexport PYTHONPATH=$repo_path\n+cd $experiment_working_dir\n-cd $expriment_working_dir\npython3 $repo_path/service/automatic_run_experiment.py\nrm -rf $repo_path\n"
},
{
"change_type": "MODIFY",
"old_path": "service/setup.bash",
"new_path": "service/setup.bash",
"diff": "@@ -42,10 +42,11 @@ cd /tmp/ && \\\nsudo make -j install && \\\nsudo rm -r /tmp/Python-$PYTHON_VERSION.tar.xz /tmp/Python-$PYTHON_VERSION\n-# Download and run the cloud_sql_proxy\n+# Download and run the cloud_sql_proxy.\nexport cloud_sql_proxy_path=/tmp/cloud_sql_proxy\nwget https://dl.google.com/cloudsql/cloud_sql_proxy.linux.amd64 -O \\\n$cloud_sql_proxy_path\nchmod +x $cloud_sql_proxy_path\n+\n# This is a hardcoded value that only works for the official fuzzbench service.\n$cloud_sql_proxy_path -instances=fuzzbench:us-central1:postgres-experiment-db=tcp:5432 &\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [service] Fix nits (#534) |
258,396 | 17.07.2020 03:33:58 | 0 | 140e69107d725611290a05d6369f159337d61bdf | Use libFuzzer that supports interceptors for memcmp-like functions without ASan | [
{
"change_type": "MODIFY",
"old_path": "fuzzers/libfuzzer/builder.Dockerfile",
"new_path": "fuzzers/libfuzzer/builder.Dockerfile",
"diff": "@@ -17,7 +17,7 @@ FROM $parent_image\nRUN git clone https://github.com/llvm/llvm-project.git /llvm-project && \\\ncd /llvm-project/ && \\\n- git checkout d8981ce5b9f8caa567613b2bf5aa3095e0156130 && \\\n+ git checkout 12d1124c49beec0fb79d36944960e5bf0f236d4c && \\\ncd compiler-rt/lib/fuzzer && \\\n(for f in *.cpp; do \\\nclang++ -stdlib=libc++ -fPIC -O2 -std=c++11 $f -c & \\\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Use libFuzzer that supports interceptors for memcmp-like functions without ASan (#549) |
258,388 | 20.07.2020 12:49:18 | 25,200 | 7cb2023a669ecb178b414fef3bc2b5201f973c98 | [libFuzzer] Request experiment for libFuzzer changes in | [
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "# are still testing this feature. You should request an experiment by contacting\n# us as you normally do.\n+- experiment: 2020-07-20\n+ fuzzers:\n+ - libfuzzer\n+\n- experiment: 2020-07-13\nfuzzers:\n- aflplusplus_ctx_default\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [libFuzzer] Request experiment for libFuzzer changes in #549 (#552) |
258,388 | 20.07.2020 13:44:07 | 25,200 | b7443c6364049884fd6a2d4d53ac003252494960 | Make experiments private by default.
For the most part all nonprivate experiments are run by the service
which explicitly makes them nonprivate. So private should be the
default since most other experiments are not ones I want to
be merged into other experiments. | [
{
"change_type": "MODIFY",
"old_path": "experiment/dispatcher.py",
"new_path": "experiment/dispatcher.py",
"diff": "@@ -59,7 +59,7 @@ def _initialize_experiment_in_db(experiment_config: dict,\ndb_utils.get_or_create(models.Experiment,\nname=experiment_config['experiment'],\ngit_hash=experiment_config['git_hash'],\n- private=experiment_config.get('private', False))\n+ private=experiment_config.get('private', True))\n])\n# TODO(metzman): Consider doing this without sqlalchemy. This can get\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Make experiments private by default. (#554)
For the most part all nonprivate experiments are run by the service
which explicitly makes them nonprivate. So private should be the
default since most other experiments are not ones I want to
be merged into other experiments. |
258,388 | 20.07.2020 15:29:24 | 25,200 | d9cda8ba5ee7ff5563d11c2b9ca613df138f6f4f | [service] Make service ready for external users
1. Document service for external users.
2. Document service for FuzzBench maintainers.
3. Include a copy of the service's crontab.
4. Output stdout/stderr of cron job to a file. | [
{
"change_type": "MODIFY",
"old_path": "docs/faq.md",
"new_path": "docs/faq.md",
"diff": "---\nlayout: default\ntitle: FAQ\n-has_children: true\n+has_children: false\nnav_order: 6\npermalink: /faq/\n---\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/getting-started/adding_a_new_fuzzer.md",
"new_path": "docs/getting-started/adding_a_new_fuzzer.md",
"diff": "@@ -280,10 +280,32 @@ make build-$FUZZER_NAME-all\n* Run `make presubmit` to lint your code and ensure all tests are passing.\n-* Run `make clear-cache` to clear docker containers' caches. Next time you build a project, the container will be built from scratch.\n-\n-* Add your fuzzer to the list in `.github/workflows/ci.yml` to enable building\n- it on continous integration.\n+* Run `make clear-cache` to clear docker containers' caches. Next time you build\n+ a project, the container will be built from scratch.\n+\n+## Requesting an experiment\n+\n+The FuzzBench service automatically runs experiments that are requested by users\n+twice a day at 6:00 AM PT (13:00 UTC) and 6:00 PM PT (01:00 UTC). If you want\n+the FuzzBench service to run an experiment on specific fuzzers (such as the one\n+you are adding): add an experiment request to\n+[service/experiment-requests.yaml](https://github.com/google/fuzzbench/blob/master/service/experiment-requests.yaml).\n+`service/experiment-requests.yaml` explains how to do this. At the end of the\n+experiment, FuzzBench will generate a report comparing your fuzzer to the latest\n+versions of other fuzzers, so you only need to include fuzzers that you've\n+modified in a meaningful way (i.e. fuzzers whose results are likely affected by\n+your\n+change). This report, and a real-time report of your experiment can be viewed at\n+`https://www.fuzzbench.com/reports/$YOUR_EXPERIMENT_NAME`. Note that real-time\n+reports may not appear until a few hours after the experiment starts since every\n+fuzzer-benchmark pair in the experiment must build in order for fuzzing to\n+start.\n+\n+## Submitting your integration\n+\n+* Add your fuzzer to the list in `.github/workflows/ci.yml` so that our\n+ continuous integration will test that your fuzzer can build and briefly run on\n+ all benchmarks once you've submitted a pull request.\n* Submit the integration in a\n[GitHub pull request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request).\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/internal-documentation/internal_documentation.md",
"diff": "+---\n+layout: default\n+title: Internal Documentation\n+has_children: true\n+nav_order: 7\n+permalink: /internal-documentation/\n+---\n+\n+# Internal documentation\n+\n+This section contains internal documentation that is meant for FuzzBench\n+maintainers. It is not useful for end users of FuzzBench.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/internal-documentation/service.md",
"diff": "+---\n+layout: default\n+title: The FuzzBench Service\n+parent: Internal Documentation\n+nav_order: 1\n+permalink: /internal-documentation/service/\n+---\n+\n+# # The FuzzBench service\n+{: .no_toc}\n+\n+**Note:** This document and most of the `service/` directory is only intended\n+for use by FuzzBench maintainers. It will contain hardcoded values and\n+references to things that don't make sense for other users.\n+\n+- TOC\n+{:toc}\n+\n+## Overview\n+\n+This document discusses the FuzzBench service. The service works as follows:\n+When a user wants a new experiment they add the experiment to\n+`experiment-requests.yaml`. Twice a day at 6 AM PT (13:00 UTC) and 6 PM PT\n+(01:00 UTC) a cron job on the `service` instance will execute the script\n+`run.bash`. `run.bash` will clone FuzzBench and then execute\n+`automatic_run_experiment.py` which starts newly requested experiments.\n+\n+## Setting up an instance to run an experiment\n+\n+This shouldn't be necessary, but here are instructions in case the current\n+instance is lost.\n+1. Run `setup.bash`. This will build and install a supported Python version,\n+ download the `cloud_sql_proxy` and run it so that we have a connection to the\n+ db.\n+\n+1. Install the cron job. An example you can use is in the\n+ [crontab file](https://github.com/google/fuzzbench/tree/master/service/crontab).\n+ Note that you must fill in `POSTGRES_PASSWORD` and `$HOME`.\n+\n+1. Verify that the service is running. One way you can debug this is by looking\n+ at the stdout/stderr of `run.bash` which is saved in\n+ `/tmp/fuzzbench-service.log`. If something isn't working you should probably\n+ verify that `run.bash` works on its own. Note that `run.bash` is executed\n+ from a checkout of FuzzBench that isn't automatically updated. So if you need\n+ to update you must do so with `git pull --rebase`.\n+\n+## Automatic merging\n+\n+Experiments that are run using the service will be marked as nonprivate and on\n+completion automatically merge using clobbering.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "service/crontab",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Example crontab file that can be used to run the service. Note that the path\n+# needs to be set because cron jobs will typically use different paths than\n+# users (it breaks without this). Though the SQL database can only be used by\n+# Google accounts that have permission, we don't include the password here.\n+\n+# m h dom mon dow command\n+00 1,13 * * * export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/opt/puppetlabs/bin; export POSTGRES_PASSWORD=\"\"; $HOME/fuzzbench/service/run.bash\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "service/run.bash",
"new_path": "service/run.bash",
"diff": "# Use this script to clone a new copy of fuzzbench and run a requested\n# experiment.\n+# Append stdout and stderr to /tmp/fuzzbench-service.logs. Redirecting the\n+# stdout/stderr of this script in the cron command didn't work for some reason.\n+exec &>> /tmp/fuzzbench-service.logs\n+\n# Use a seperate working directory to run the experiment so we don't pollute\n# the source code with the config directory created by run_experiment.py\nexperiment_working_dir=/tmp/fuzzbench-automatic-experiment-working-dir\n@@ -31,7 +35,7 @@ cd $repo_path\nmake install-dependencies\nsource .venv/bin/activate\nexport PYTHONPATH=$repo_path\n-cd $experiment_working_dir\n+cd $experiment_working_dir\npython3 $repo_path/service/automatic_run_experiment.py\nrm -rf $repo_path\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [service] Make service ready for external users (#553)
1. Document service for external users.
2. Document service for FuzzBench maintainers.
3. Include a copy of the service's crontab.
4. Output stdout/stderr of cron job to a file. |
258,388 | 21.07.2020 10:17:03 | 25,200 | 5db871f65c312d4b2cedf16bd7ac2c6311987b26 | [GCB] Fix image tagging
Don't add two tags in a single command. One is sufficient and two
is an error. | [
{
"change_type": "MODIFY",
"old_path": "docker/gcb/coverage.yaml",
"new_path": "docker/gcb/coverage.yaml",
"diff": "@@ -31,10 +31,10 @@ steps:\n# Use two tags so that the image builds properly and we can push it to the\n# correct location.\n'--tag',\n- 'gcr.io/fuzzbench/builders/coverage:${_EXPERIMENT}:${_EXPERIMENT}',\n+ 'gcr.io/fuzzbench/builders/coverage:${_EXPERIMENT}',\n'--tag',\n- '${_REPO}/builders/coverage:${_EXPERIMENT}:${_EXPERIMENT}',\n+ '${_REPO}/builders/coverage:${_EXPERIMENT}',\n'--cache-from',\n'${_REPO}/builders/coverage',\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [GCB] Fix image tagging (#561)
Don't add two tags in a single command. One is sufficient and two
is an error. |
258,399 | 22.07.2020 12:30:48 | 18,000 | 0198c184f30738109ce436b2394ddb68519de16c | Job dependency test | [
{
"change_type": "RENAME",
"old_path": "fuzzbench/fake_jobs.py",
"new_path": "fuzzbench/jobs.py",
"diff": "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n-\"\"\"Fake jobs.\"\"\"\n+\"\"\"Defines all required jobs for one experiment.\"\"\"\n+import os\nimport subprocess\n-import time\n+BASE_TAG = 'gcr.io/fuzzbench'\n-def build_image(name):\n- \"\"\"Build a Docker image.\"\"\"\n- print('Building', name)\n- subprocess.run(['docker', '--version'], check=True)\n- time.sleep(3)\n+\n+def build_image(name: str):\n+ \"\"\"Builds a Docker image and returns whether it succeeds.\"\"\"\n+ image_tag = os.path.join(BASE_TAG, name)\n+ subprocess.run(['docker', 'pull', image_tag], check=True)\n+ subprocess.run(\n+ ['docker', 'build', '--tag', image_tag,\n+ os.path.join('docker', name)],\n+ check=True)\nreturn True\ndef run_trial():\n- \"\"\"Run a trial.\"\"\"\n+ \"\"\"Runs a trial.\"\"\"\nreturn True\ndef measure_corpus_snapshot():\n- \"\"\"Measure a corpus snapshot.\"\"\"\n+ \"\"\"Measures a corpus snapshot.\"\"\"\nreturn True\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzbench/run_experiment.py",
"new_path": "fuzzbench/run_experiment.py",
"diff": "@@ -18,22 +18,37 @@ import time\nimport redis\nimport rq\n-from fuzzbench import fake_jobs\n+from fuzzbench import jobs\ndef run_experiment():\n\"\"\"Main experiment logic.\"\"\"\nprint('Initializing the job queue.')\nqueue = rq.Queue()\n- jobs = []\n- for i in range(6):\n- jobs.append(queue.enqueue(fake_jobs.build_image, 'something-%d' % i))\n+ jobs_list = []\n+ jobs_list.append(\n+ queue.enqueue(jobs.build_image,\n+ 'base-image',\n+ job_timeout=600,\n+ job_id='base-image'))\n+ jobs_list.append(\n+ queue.enqueue(jobs.build_image,\n+ 'base-builder',\n+ job_timeout=600,\n+ job_id='base-builder',\n+ depends_on='base-image'))\n+ jobs_list.append(\n+ queue.enqueue(jobs.build_image,\n+ 'base-runner',\n+ job_timeout=600,\n+ job_id='base-runner',\n+ depends_on='base-image'))\nwhile True:\nprint('Current status of jobs:')\n- for job in jobs:\n+ for job in jobs_list:\nprint(' %s%s : %s' % (job.func_name, job.args, job.get_status()))\n- if all([job.result is not None for job in jobs]):\n+ if all([job.result is not None for job in jobs_list]):\nbreak\ntime.sleep(3)\nprint('All done!')\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzbench/test_e2e_run.py",
"new_path": "fuzzbench/test_e2e_run.py",
"diff": "@@ -18,17 +18,38 @@ test.\"\"\"\nimport os\nimport pytest\n+import redis\n+from rq.job import Job\n+\n+\[email protected](scope='class')\n+def redis_connection():\n+ \"\"\"Returns the default redis server connection.\"\"\"\n+ return redis.Redis(host='queue-server')\n# pylint: disable=no-self-use\[email protected]('E2E_INTEGRATION_TEST' not in os.environ,\nreason='Not running end-to-end test.')\[email protected]('redis_connection')\nclass TestEndToEndRunResults:\n\"\"\"Checks the result of a test experiment run.\"\"\"\n- def test_all_jobs_finished_sucessfully(self):\n- \"\"\"Fake test to be implemented later.\"\"\"\n- assert True\n+ def test_jobs_dependency(self, redis_connection): # pylint: disable=redefined-outer-name\n+ \"\"\"Tests that jobs dependency preserves during working.\"\"\"\n+ jobs = {\n+ name: Job.fetch(name, connection=redis_connection)\n+ for name in ['base-image', 'base-builder', 'base-runner']\n+ }\n+ assert jobs['base-image'].ended_at <= jobs['base-builder'].started_at\n+ assert jobs['base-image'].ended_at <= jobs['base-runner'].started_at\n+\n+ def test_all_jobs_finished_successfully(self, redis_connection): # pylint: disable=redefined-outer-name\n+ \"\"\"Tests all jobs finished successully.\"\"\"\n+ jobs = Job.fetch_many(['base-image', 'base-builder', 'base-runner'],\n+ connection=redis_connection)\n+ for job in jobs:\n+ assert job.get_status() == 'finished'\ndef test_measurement_jobs_were_started_before_trial_jobs_finished(self):\n\"\"\"Fake test to be implemented later.\"\"\"\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Job dependency test (#548) |
258,396 | 23.07.2020 01:04:10 | 0 | 17959e39a2b00b239e8c4e0edbeced31536e50cc | [libFuzzer] Add -fno-builtin flags to disable optimizing calls to memcmp-like functions | [
{
"change_type": "MODIFY",
"old_path": "fuzzers/libfuzzer/fuzzer.py",
"new_path": "fuzzers/libfuzzer/fuzzer.py",
"diff": "@@ -25,6 +25,18 @@ def build():\n# /usr/lib/libFuzzer.a as the FUZZER_LIB for the main fuzzing binary. This\n# allows us to link against a version of LibFuzzer that we specify.\ncflags = ['-fsanitize=fuzzer-no-link']\n+\n+ # Can be removed once the patch https://reviews.llvm.org/D83987 lands\n+ # and appears in gcr.io/fuzzbench/base-builder\n+ cflags += ['-fno-builtin-memcmp']\n+ cflags += ['-fno-builtin-strncmp']\n+ cflags += ['-fno-builtin-strcmp']\n+ cflags += ['-fno-builtin-strncasecmp']\n+ cflags += ['-fno-builtin-strcasecmp']\n+ cflags += ['-fno-builtin-strstr']\n+ cflags += ['-fno-builtin-strcasestr']\n+ cflags += ['-fno-builtin-memmem']\n+\nutils.append_flags('CFLAGS', cflags)\nutils.append_flags('CXXFLAGS', cflags)\n"
},
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "# are still testing this feature. You should request an experiment by contacting\n# us as you normally do.\n+- experiment: 2020-07-22\n+ fuzzers:\n+ - libfuzzer\n+\n- experiment: 2020-07-20\nfuzzers:\n- libfuzzer\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [libFuzzer] Add -fno-builtin flags to disable optimizing calls to memcmp-like functions (#571) |
258,388 | 24.07.2020 15:21:44 | 25,200 | 86a64bc4d523a5ff11cca529d34e3e5886913b71 | Eliminate OSS-Fuzz runner.
It's the same thing as benchmark-runner. | [
{
"change_type": "MODIFY",
"old_path": "docker/gcb/oss-fuzz-fuzzer.yaml",
"new_path": "docker/gcb/oss-fuzz-fuzzer.yaml",
"diff": "@@ -175,7 +175,7 @@ steps:\n'benchmark=${_BENCHMARK}',\n'--file',\n- 'docker/oss-fuzz-runner/Dockerfile',\n+ 'docker/benchmark-runner/Dockerfile',\n'.',\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/generate_makefile.py",
"new_path": "docker/generate_makefile.py",
"diff": "@@ -125,11 +125,11 @@ endif\n\"\"\"\nOSS_FUZZER_BENCHMARK_RUN_TARGETS_TEMPLATE = \"\"\"\n-build-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-oss-fuzz-runner\n+build-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-runner\n-pull-{fuzzer}-{benchmark}: .pull-{fuzzer}-{benchmark}-oss-fuzz-runner\n+pull-{fuzzer}-{benchmark}: .pull-{fuzzer}-{benchmark}-runner\n-run-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-oss-fuzz-runner\n+run-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-runner\ndocker run \\\\\n--cpus=1 \\\\\n--cap-add SYS_NICE \\\\\n@@ -142,7 +142,7 @@ run-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-oss-fuzz-runner\n-e FUZZ_TARGET=$({benchmark}-fuzz-target) \\\\\n-it {base_tag}/runners/{fuzzer}/{benchmark}\n-test-run-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-oss-fuzz-runner\n+test-run-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-runner\ndocker run \\\\\n--cap-add SYS_NICE \\\\\n--cap-add SYS_PTRACE \\\\\n@@ -156,7 +156,7 @@ test-run-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-oss-fuzz-runner\n-e SNAPSHOT_PERIOD=10 \\\\\n{base_tag}/runners/{fuzzer}/{benchmark}\n-debug-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-oss-fuzz-runner\n+debug-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-runner\ndocker run \\\\\n--cpus=1 \\\\\n--cap-add SYS_NICE \\\\\n@@ -209,16 +209,16 @@ ifeq (,$(filter {fuzzer},coverage coverage_source_based))\n.pull-{fuzzer}-{benchmark}-oss-fuzz-intermediate-runner: pull-base-runner\ndocker pull {base_tag}/runners/{fuzzer}/{benchmark}-intermediate\n-.{fuzzer}-{benchmark}-oss-fuzz-runner: .{fuzzer}-{benchmark}-oss-fuzz-builder .{fuzzer}-{benchmark}-oss-fuzz-intermediate-runner\n+.{fuzzer}-{benchmark}-runner: .{fuzzer}-{benchmark}-oss-fuzz-builder .{fuzzer}-{benchmark}-oss-fuzz-intermediate-runner\ndocker build \\\\\n--tag {base_tag}/runners/{fuzzer}/{benchmark} \\\\\n--build-arg fuzzer={fuzzer} \\\\\n--build-arg benchmark={benchmark} \\\\\n$(call cache_from,{base_tag}/runners/{fuzzer}/{benchmark}) \\\\\n- --file docker/oss-fuzz-runner/Dockerfile \\\\\n+ --file docker/benchmark-runner/Dockerfile \\\\\n.\n-.pull-{fuzzer}-{benchmark}-oss-fuzz-runner: .pull-{fuzzer}-{benchmark}-oss-fuzz-builder .pull-{fuzzer}-{benchmark}-oss-fuzz-intermediate-runner\n+.pull-{fuzzer}-{benchmark}-runner: .pull-{fuzzer}-{benchmark}-oss-fuzz-builder .pull-{fuzzer}-{benchmark}-oss-fuzz-intermediate-runner\ndocker pull {base_tag}/runners/{fuzzer}/{benchmark}\n\"\"\" + OSS_FUZZER_BENCHMARK_RUN_TARGETS_TEMPLATE + \"\"\"\n"
},
{
"change_type": "DELETE",
"old_path": "docker/oss-fuzz-runner/Dockerfile",
"new_path": null,
"diff": "-# Copyright 2020 Google LLC\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-#\n-# Dockerfile for running a specific OSS-Fuzz project as a benchmark for a\n-# specific fuzzer.\n-#\n-# This Dockerfile adds essential files into the runner image, regardless of\n-# whether it was built from `benchmark-runner/Dockerfile` or a custom\n-# `runner.Dockerfile`.\n-#\n-# The benchmark/fuzzer pair is defined by build arguments. To specify them, pass\n-# the following arguments to docker build:\n-#\n-# $ docker build \\\n-# --build-arg benchmark=bloaty_fuzz_target \\\n-# --build-arg fuzzer=afl \\\n-# ...\n-\n-ARG fuzzer\n-ARG benchmark\n-\n-# We use Docker's multi-stage build feature to create a minimal runner image,\n-# separate from the sometimes bulky builder images.\n-\n-# We take the already built builder image for the given fuzzer/benchmark pair\n-# and refer to it as \"builder\", so we can copy the build artifacts from it.\n-FROM gcr.io/fuzzbench/builders/$fuzzer/$benchmark AS builder\n-\n-# We base the runner image from the intermediate runner image, defined by the\n-# runner.Dockerfile of each fuzzer.\n-FROM gcr.io/fuzzbench/runners/$fuzzer/$benchmark-intermediate\n-\n-# Set up the directory for the build artifacts.\n-ENV WORKDIR /out\n-RUN mkdir -p $WORKDIR\n-WORKDIR $WORKDIR\n-\n-# Copy over all the build artifacts (without * to preserve directory structure).\n-# This also copies seed and dictionary files if they are available.\n-COPY --from=builder /out/ ./\n-# Copy the fuzzer.py file.\n-COPY --from=builder /src/fuzzer.py .\n-# Copy the fuzzers directory.\n-COPY --from=builder /src/fuzzers fuzzers\n-# Create empty __init__.py to allow python deps to work.\n-RUN touch __init__.py\n-\n-# Define environment variables used when we run the fuzzer:\n-# - Directory to get starting seeds from.\n-ENV SEED_CORPUS_DIR=$WORKDIR/seeds\n-# - Where to place new test cases generated by the fuzzer.\n-ENV OUTPUT_CORPUS_DIR=$WORKDIR/corpus\n-\n-# Create the seeds directory if it doesn't exist.\n-RUN mkdir -p $SEED_CORPUS_DIR $OUTPUT_CORPUS_DIR\n-\n-# Copy the source code into the image. We do this here instead of in base-image\n-# because it is likely to change, particularly in development. If this were done\n-# earlier, build cycles would be intolerably slow.\n-ENV ROOT_DIR=/src\n-COPY common $ROOT_DIR/common\n-COPY experiment/runner.py $ROOT_DIR/experiment/runner.py\n-COPY docker/benchmark-runner $ROOT_DIR/docker/benchmark-runner\n-\n-ENV PYTHONPATH=$ROOT_DIR\n-\n-# |VIRTUALENV_DIR| is set so that python code can know the location of current\n-# virtualenv directory and strip it if needed to execute in system python\n-# environment.\n-ENV VIRTUALENV_DIR=$ROOT_DIR/.venv\n-RUN virtualenv --python=$(which python3) $VIRTUALENV_DIR\n-RUN /bin/bash -c \"source $VIRTUALENV_DIR/bin/activate && \\\n- pip3 install -r $ROOT_DIR/docker/benchmark-runner/requirements.txt\"\n-RUN chmod +x $ROOT_DIR/docker/benchmark-runner/startup-runner.sh\n-ENTRYPOINT $ROOT_DIR/docker/benchmark-runner/startup-runner.sh\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Eliminate OSS-Fuzz runner. (#582)
It's the same thing as benchmark-runner. |
258,396 | 27.07.2020 21:42:42 | 0 | d012101bcfc2141ff214fb617ec488f74d5a5632 | [libFuzzer] Update libFuzzer.a version | [
{
"change_type": "MODIFY",
"old_path": "fuzzers/libfuzzer/builder.Dockerfile",
"new_path": "fuzzers/libfuzzer/builder.Dockerfile",
"diff": "@@ -17,7 +17,7 @@ FROM $parent_image\nRUN git clone https://github.com/llvm/llvm-project.git /llvm-project && \\\ncd /llvm-project/ && \\\n- git checkout 12d1124c49beec0fb79d36944960e5bf0f236d4c && \\\n+ git checkout b52b2e1c188072e3cbc91500cfd503fb26d50ffc && \\\ncd compiler-rt/lib/fuzzer && \\\n(for f in *.cpp; do \\\nclang++ -stdlib=libc++ -fPIC -O2 -std=c++11 $f -c & \\\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/libfuzzer/fuzzer.py",
"new_path": "fuzzers/libfuzzer/fuzzer.py",
"diff": "@@ -26,8 +26,9 @@ def build():\n# allows us to link against a version of LibFuzzer that we specify.\ncflags = ['-fsanitize=fuzzer-no-link']\n- # Can be removed once the patch https://reviews.llvm.org/D83987 lands\n- # and appears in gcr.io/fuzzbench/base-builder\n+ # Can be removed once the patch https://reviews.llvm.org/D83987\n+ # appears in gcr.io/fuzzbench/base-builder\n+ cflags += ['-fno-builtin-bcmp']\ncflags += ['-fno-builtin-memcmp']\ncflags += ['-fno-builtin-strncmp']\ncflags += ['-fno-builtin-strcmp']\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [libFuzzer] Update libFuzzer.a version (#596) |
258,399 | 28.07.2020 15:00:22 | 18,000 | 6090e47d0319ffa4e820885592d653d9b2aa0487 | [NFC] update current jobs status reporting info | [
{
"change_type": "MODIFY",
"old_path": "fuzzbench/run_experiment.py",
"new_path": "fuzzbench/run_experiment.py",
"diff": "@@ -46,8 +46,14 @@ def run_experiment():\nwhile True:\nprint('Current status of jobs:')\n+ print('\\tqueued:\\t%d' % queue.count)\n+ print('\\tstarted:\\t%d' % queue.started_job_registry.count)\n+ print('\\tdeferred:\\t%d' % queue.deferred_job_registry.count)\n+ print('\\tfinished:\\t%d' % queue.finished_job_registry.count)\n+ print('\\tfailed:\\t%d' % queue.failed_job_registry.count)\nfor job in jobs_list:\n- print(' %s%s : %s' % (job.func_name, job.args, job.get_status()))\n+ print(' %s : %s\\t(%s)' % (job.func_name, job.get_status(), job.id))\n+\nif all([job.result is not None for job in jobs_list]):\nbreak\ntime.sleep(3)\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [NFC] update current jobs status reporting info (#588) |
258,399 | 28.07.2020 16:12:07 | 18,000 | 94e3c0aeb82c3503b77fb8e5aaf4894e5c10131b | add stop logic: docker-compose down | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "# Running experiments locally.\n-run-experiment: export COMPOSE_PROJECT_NAME := fuzzbench\n-run-experiment: export COMPOSE_FILE := compose/fuzzbench.yaml\n+run-experiment stop-experiment: export COMPOSE_PROJECT_NAME := fuzzbench\n+run-experiment stop-experiment: export COMPOSE_FILE := compose/fuzzbench.yaml\nrun-experiment:\ndocker-compose up --build --scale worker=2 --detach\ndocker-compose logs --follow run-experiment\ndocker-compose down\n+# Running this is only necessary if `run-experiment` was interrupted and\n+# containers were not cleaned up.\n+stop-experiment:\n+ docker-compose down\n+\n# Development.\n-run-end-to-end-test: export COMPOSE_PROJECT_NAME := e2e-test\n-run-end-to-end-test: export COMPOSE_FILE := compose/fuzzbench.yaml:compose/e2e-test.yaml\n+run-end-to-end-test stop-end-to-end-test: export COMPOSE_PROJECT_NAME := e2e-test\n+run-end-to-end-test stop-end-to-end-test: export COMPOSE_FILE := compose/fuzzbench.yaml:compose/e2e-test.yaml\nrun-end-to-end-test:\ndocker-compose build\ndocker-compose up --detach queue-server\n@@ -32,6 +37,11 @@ run-end-to-end-test:\ndocker-compose run run-tests; STATUS=$$?; \\\ndocker-compose down; exit $$STATUS\n+# Running this is only necessary if `run-end-to-end-test` was interrupted and\n+# containers were not cleaned up.\n+stop-end-to-end-test:\n+ docker-compose down\n+\ninclude docker/build.mk\ninclude docker/generated.mk\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | add stop logic: docker-compose down (#600) |
258,396 | 28.07.2020 22:58:05 | 0 | 28d27f34449204712a6f57a8ddf64ef419996ef5 | Request 2020-07-28 experiment | [
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "# are still testing this feature. You should request an experiment by contacting\n# us as you normally do.\n+- experiment: 2020-07-28\n+ fuzzers:\n+ - afl\n+ - honggfuzz\n+ - libfuzzer\n+ - entropic\n+\n- experiment: 2020-07-27\nfuzzers:\n- afl\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Request 2020-07-28 experiment (#604) |
258,371 | 29.07.2020 00:27:01 | 14,400 | 486c708f1d0f03de85e9e6a7c91360f97f3e8f24 | Store specific coverage data in the end of measuring. | [
{
"change_type": "MODIFY",
"old_path": "experiment/dispatcher.py",
"new_path": "experiment/dispatcher.py",
"diff": "@@ -133,18 +133,16 @@ def dispatcher_main():\nargs=(experiment.config,))\nscheduler_loop_thread.start()\n- max_total_time = experiment.config['max_total_time']\n- measurer_loop_process = multiprocessing.Process(\n- target=measurer.measure_loop,\n- args=(experiment.experiment_name, max_total_time))\n+ measurer_main_process = multiprocessing.Process(\n+ target=measurer.measure_main, args=(experiment.config,))\n- measurer_loop_process.start()\n+ measurer_main_process.start()\nis_complete = False\nwhile True:\ntime.sleep(LOOP_WAIT_SECONDS)\nif not scheduler_loop_thread.is_alive():\n- is_complete = not measurer_loop_process.is_alive()\n+ is_complete = not measurer_main_process.is_alive()\n# Generate periodic output reports.\nreporter.output_report(experiment.config, in_progress=not is_complete)\n@@ -155,7 +153,7 @@ def dispatcher_main():\nlogs.info('Dispatcher finished.')\nscheduler_loop_thread.join()\n- measurer_loop_process.join()\n+ measurer_main_process.join()\ndef main():\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/measurer.py",
"new_path": "experiment/measurer.py",
"diff": "@@ -53,6 +53,7 @@ NUM_RETRIES = 3\nRETRY_DELAY = 3\nFAIL_WAIT_SECONDS = 30\nSNAPSHOT_QUEUE_GET_TIMEOUT = 1\n+COV_DIFF_QUEUE_GET_TIMEOUT = 1\nSNAPSHOTS_BATCH_SAVE_SIZE = 100\n@@ -67,13 +68,126 @@ def exists_in_experiment_filestore(path: pathlib.Path) -> bool:\nmust_exist=False).retcode == 0\n+def get_fuzzer_benchmark_key(fuzzer: str, benchmark: str):\n+ \"\"\"Return the key in coverage dict for a pair of fuzzer-benchmark.\"\"\"\n+ return fuzzer + ' ' + benchmark\n+\n+\n+def get_trial_ids(experiment: str, fuzzer: str, benchmark: str):\n+ \"\"\"Get ids of all finished trials for a pair of fuzzer and benchmark.\"\"\"\n+ trial_ids = [\n+ trial_id_tuple[0]\n+ for trial_id_tuple in db_utils.query(models.Trial.id).filter(\n+ models.Trial.experiment == experiment, models.Trial.fuzzer ==\n+ fuzzer, models.Trial.benchmark == benchmark,\n+ ~models.Trial.preempted)\n+ ]\n+ return trial_ids\n+\n+\n+def get_coverage_infomation(coverage_summary_file):\n+ \"\"\"Read the coverage information from |coverage_summary_file|\n+ and skip possible warnings in the file.\"\"\"\n+ with open(coverage_summary_file) as summary:\n+ return json.loads(summary.readlines()[-1])\n+\n+\n+def store_coverage_data(experiment_config: dict):\n+ \"\"\"Generate the specific coverage data and store in cloud bucket.\"\"\"\n+ logger.info('Start storing coverage data')\n+ with multiprocessing.Pool() as pool, multiprocessing.Manager() as manager:\n+ q = manager.Queue() # pytype: disable=attribute-error\n+ covered_regions = get_all_covered_regions(experiment_config, pool, q)\n+ json_src_dir = get_experiment_folders_dir()\n+ json_src = os.path.join(json_src_dir, 'covered_regions.json')\n+ with open(json_src, 'w') as src_file:\n+ json.dump(covered_regions, src_file)\n+ json_dst = exp_path.filestore(json_src)\n+ filestore_utils.cp(json_src, json_dst)\n+ logger.info('Finished storing coverage data')\n+\n+\n+def get_all_covered_regions(experiment_config: dict, pool, q) -> dict:\n+ \"\"\"Get regions covered for each pair for fuzzer and benchmark.\"\"\"\n+ logger.info('Measuring all fuzzer-benchmark pairs for final coverage data.')\n+\n+ benchmarks = experiment_config['benchmarks'].split(',')\n+ fuzzers = experiment_config['fuzzers'].split(',')\n+ experiment = experiment_config['experiment']\n+\n+ get_covered_region_args = [(experiment, fuzzer, benchmark, q)\n+ for fuzzer in fuzzers\n+ for benchmark in benchmarks]\n+\n+ result = pool.starmap_async(get_covered_region, get_covered_region_args)\n+\n+ # Poll the queue for covered region data and save them in a dict until the\n+ # pool is done processing each combination of fuzzers and benchmarks.\n+ all_covered_regions = {}\n+\n+ while True:\n+ try:\n+ covered_regions = q.get(timeout=COV_DIFF_QUEUE_GET_TIMEOUT)\n+ all_covered_regions.update(covered_regions)\n+ except queue.Empty:\n+ if result.ready():\n+ # If \"ready\" that means pool has finished. Since it is\n+ # finished and the queue is empty, we can stop checking\n+ # the queue for more covered regions.\n+ logger.debug(\n+ 'Finished call to map with get_all_covered_regions.')\n+ break\n+\n+ for key in all_covered_regions:\n+ all_covered_regions[key] = list(all_covered_regions[key])\n+ logger.info('Done measuring all coverage data.')\n+ return all_covered_regions\n+\n+\n+def get_covered_region(experiment: str, fuzzer: str, benchmark: str,\n+ q: multiprocessing.Queue):\n+ \"\"\"Get the final covered region for a specific pair of fuzzer-benchmark.\"\"\"\n+ initialize_logs()\n+ logger.debug('Measuring covered region: fuzzer: %s, benchmark: %s.', fuzzer,\n+ benchmark)\n+ key = get_fuzzer_benchmark_key(fuzzer, benchmark)\n+ covered_regions = {key: set()}\n+ trial_ids = get_trial_ids(experiment, fuzzer, benchmark)\n+ for trial_id in trial_ids:\n+ logger.info('Measuring covered region: trial_id = %d.', trial_id)\n+ snapshot_logger = logs.Logger('measurer',\n+ default_extras={\n+ 'fuzzer': fuzzer,\n+ 'benchmark': benchmark,\n+ 'trial_id': str(trial_id),\n+ })\n+ snapshot_measurer = SnapshotMeasurer(fuzzer, benchmark, trial_id,\n+ snapshot_logger)\n+ new_covered_regions = snapshot_measurer.get_current_covered_regions()\n+ covered_regions[key] = covered_regions[key].union(new_covered_regions)\n+ q.put(covered_regions)\n+ logger.debug('Done measuring covered region: fuzzer: %s, benchmark: %s.',\n+ fuzzer, benchmark)\n+\n+\n+def measure_main(experiment_config):\n+ \"\"\"Do the continuously measuring and the final measuring.\"\"\"\n+ initialize_logs()\n+ logger.info('Start measuring.')\n+\n+ # Start the measure loop first.\n+ experiment = experiment_config['experiment']\n+ max_total_time = experiment_config['max_total_time']\n+ measure_loop(experiment, max_total_time)\n+\n+ # Do the final measuring and store the coverage data.\n+ store_coverage_data(experiment_config)\n+ logger.info('Finished measuring.')\n+\n+\ndef measure_loop(experiment: str, max_total_time: int):\n\"\"\"Continuously measure trials for |experiment|.\"\"\"\n- logs.initialize(default_extras={\n- 'component': 'dispatcher',\n- 'subcomponent': 'measurer',\n- })\n- logs.info('Start measure_loop.')\n+ logger.info('Start measure_loop.')\nwith multiprocessing.Pool() as pool, multiprocessing.Manager() as manager:\nset_up_coverage_binaries(pool, experiment)\n@@ -98,7 +212,7 @@ def measure_loop(experiment: str, max_total_time: int):\ntime.sleep(FAIL_WAIT_SECONDS)\n- logger.info('Finished measuring.')\n+ logger.info('Finished measure loop.')\ndef measure_all_trials(experiment: str, max_total_time: int, pool, q) -> bool: # pylint: disable=invalid-name\n@@ -367,33 +481,47 @@ class SnapshotMeasurer: # pylint: disable=too-many-instance-attributes\nself.UNIT_BLACKLIST[self.benchmark] = (\nself.UNIT_BLACKLIST[self.benchmark].union(set(crashing_units)))\n+ def get_current_covered_regions(self):\n+ \"\"\"Get the covered regions for the current trial.\"\"\"\n+ covered_regions = set()\n+ try:\n+ coverage_info = get_coverage_infomation(self.cov_summary_file)\n+ functions_data = coverage_info['data'][0]['functions']\n+ # The fourth number in the region-list indicates if the region\n+ # is hit.\n+ hit_index = 4\n+ # The last number in the region-list indicates what type of the\n+ # region it is; 'code_region' is used to obtain various code\n+ # coverage statistic and is represented by number 0.\n+ type_index = -1\n+ for function_data in functions_data:\n+ for region in function_data['regions']:\n+ if region[hit_index] != 0 and region[type_index] == 0:\n+ covered_regions.add(tuple(region[:hit_index]))\n+ except Exception: # pylint: disable=broad-except\n+ self.logger.error(\n+ 'Coverage summary json file defective or missing.')\n+ return covered_regions\n+\ndef get_current_coverage(self) -> int:\n\"\"\"Get the current number of lines covered.\"\"\"\nif not os.path.exists(self.cov_summary_file):\nself.logger.warning('No coverage summary json file found.')\nreturn 0\ntry:\n- with open(self.cov_summary_file) as summary:\n- # Using the last line to skip the warning in bloaty\n- coverage_info = json.loads(summary.readlines()[-1])\n+ coverage_info = get_coverage_infomation(self.cov_summary_file)\ncoverage_data = coverage_info[\"data\"][0]\nsummary_data = coverage_data[\"totals\"]\nregions_coverage_data = summary_data[\"regions\"]\nregions_covered = regions_coverage_data[\"covered\"]\nreturn regions_covered\nexcept Exception: # pylint: disable=broad-except\n- self.logger.error('Coverage summary json file defective.')\n+ self.logger.error(\n+ 'Coverage summary json file defective or missing.')\nreturn 0\ndef generate_profdata(self, cycle: int):\n\"\"\"Generate .profdata file from .profraw file.\"\"\"\n- if not os.path.exists(self.profraw_file):\n- self.logger.error('No profraw file found for cycle: %d.', cycle)\n- return\n- if not os.path.getsize(self.profraw_file):\n- self.logger.error('Empty profraw file found for cycle: %d.', cycle)\n- return\n-\nif os.path.isfile(self.profdata_file):\n# If coverage profdata exists, then merge it with\n# existing available data.\n@@ -410,17 +538,9 @@ class SnapshotMeasurer: # pylint: disable=too-many-instance-attributes\ndef generate_summary(self, cycle: int):\n\"\"\"Transform the .profdata file into json form.\"\"\"\n- if not os.path.exists(self.profdata_file):\n- self.logger.error('No profdata file found for cycle: %d.', cycle)\n- return\n- if not os.path.getsize(self.profdata_file):\n- self.logger.error('Empty profdata file found for cycle: %d.', cycle)\n- return\n-\ncoverage_binary = get_coverage_binary(self.benchmark)\ncommand = [\n- 'llvm-cov', 'export', '-format=text', '-summary-only',\n- coverage_binary,\n+ 'llvm-cov', 'export', '-format=text', coverage_binary,\n'-instr-profile=%s' % self.profdata_file\n]\nwith open(self.cov_summary_file, 'w') as output_file:\n@@ -432,6 +552,25 @@ class SnapshotMeasurer: # pylint: disable=too-many-instance-attributes\n'Coverage summary json file generation failed for \\\ncycle: %d.', cycle)\n+ def generate_coverage_information(self, cycle: int):\n+ \"\"\"Generate the .profdata file and then transform it into\n+ json summary.\"\"\"\n+ if not os.path.exists(self.profraw_file):\n+ self.logger.error('No profraw file found for cycle: %d.', cycle)\n+ return\n+ if not os.path.getsize(self.profraw_file):\n+ self.logger.error('Empty profraw file found for cycle: %d.', cycle)\n+ return\n+ self.generate_profdata(cycle)\n+\n+ if not os.path.exists(self.profdata_file):\n+ self.logger.error('No profdata file found for cycle: %d.', cycle)\n+ return\n+ if not os.path.getsize(self.profdata_file):\n+ self.logger.error('Empty profdata file found for cycle: %d.', cycle)\n+ return\n+ self.generate_summary(cycle)\n+\ndef is_cycle_unchanged(self, cycle: int) -> bool:\n\"\"\"Returns True if |cycle| is unchanged according to the\nunchanged-cycles file. This file is written to by the trial's runner.\"\"\"\n@@ -592,8 +731,7 @@ def measure_snapshot_coverage(fuzzer: str, benchmark: str, trial_num: int,\nsnapshot_measurer.run_cov_new_units()\n# Generate profdata and transform it into json form.\n- snapshot_measurer.generate_profdata(cycle)\n- snapshot_measurer.generate_summary(cycle)\n+ snapshot_measurer.generate_coverage_information(cycle)\n# Get the coverage of the new corpus units.\nregions_covered = snapshot_measurer.get_current_coverage()\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_data/cov_summary.json",
"new_path": "experiment/test_data/cov_summary.json",
"diff": "-{\"version\":\"2.0.0\",\"type\":\"llvm.coverage.json.export\",\"data\":[{\"files\":[{\"filename\":\"/home/test/fuzzing/tutorial/libFuzzer/fuzz_me.cc\",\"summary\":{\"lines\":{\"count\":11,\"covered\":11,\"percent\":100},\"functions\":{\"count\":2,\"covered\":2,\"percent\":100},\"instantiations\":{\"count\":2,\"covered\":2,\"percent\":100},\"regions\":{\"count\":10,\"covered\":7,\"notcovered\":3,\"percent\":70}}}],\"totals\":{\"lines\":{\"count\":11,\"covered\":11,\"percent\":100},\"functions\":{\"count\":2,\"covered\":2,\"percent\":100},\"instantiations\":{\"count\":2,\"covered\":2,\"percent\":100},\"regions\":{\"count\":10,\"covered\":7,\"notcovered\":3,\"percent\":70}}}]}\n\\ No newline at end of file\n+{\"version\":\"2.0.0\",\"type\":\"llvm.coverage.json.export\",\"data\":[{\"files\":[{\"filename\":\"/home/test/fuzz_no_fuzzer.cc\",\"segments\":[[1,16,20,1,1],[1,17,20,1,1],[1,20,20,1,0],[1,24,2,1,1],[1,27,20,1,0],[1,28,0,0,0],[2,37,2,1,1],[3,24,22,1,1],[3,30,2,1,0],[3,32,20,1,1],[3,35,2,1,0],[3,36,20,1,0],[3,37,20,1,1],[3,39,20,1,1],[3,42,20,1,0],[3,48,2,1,0],[5,3,0,1,1],[6,2,0,0,0],[7,12,1,1,1],[11,3,0,1,1],[12,2,0,0,0]],\"expansions\":[{\"source_region\":[3,39,3,42,10,0,1,1],\"target_regions\":[[2,37,6,2,1,0,0,0],[3,24,3,30,11,0,0,0],[3,32,3,35,10,0,0,0],[3,36,3,37,10,0,0,3],[3,37,3,48,10,0,0,0],[3,39,3,42,10,0,1,1],[5,3,6,2,0,0,0,0],[1,16,1,28,10,1,0,0],[1,17,1,20,10,1,0,0],[1,24,1,27,1,1,0,0]],\"filenames\":[\"/home/test/fuzz_no_fuzzer.cc\",\"/home/test/fuzz_no_fuzzer.cc\"]},{\"source_region\":[3,39,3,42,10,0,1,1],\"target_regions\":[[2,37,6,2,1,0,0,0],[3,24,3,30,11,0,0,0],[3,32,3,35,10,0,0,0],[3,36,3,37,10,0,0,3],[3,37,3,48,10,0,0,0],[3,39,3,42,10,0,1,1],[5,3,6,2,0,0,0,0],[1,16,1,28,10,1,0,0],[1,17,1,20,10,1,0,0],[1,24,1,27,1,1,0,0]],\"filenames\":[\"/home/test/fuzz_no_fuzzer.cc\",\"/home/test/fuzz_no_fuzzer.cc\"]}],\"summary\":{\"lines\":{\"count\":11,\"covered\":9,\"percent\":81},\"functions\":{\"count\":2,\"covered\":2,\"percent\":100},\"instantiations\":{\"count\":3,\"covered\":3,\"percent\":100},\"regions\":{\"count\":10,\"covered\":8,\"notcovered\":2,\"percent\":80}}}],\"functions\":[{\"name\":\"main\",\"count\":1,\"regions\":[[7,12,12,2,1,0,0,0],[11,3,12,2,0,0,0,0]],\"filenames\":[\"/home/test/fuzz_no_fuzzer.cc\"]},{\"name\":\"_Z3fooIiEvT_\",\"count\":1,\"regions\":[[2,37,6,2,1,0,0,0],[3,24,3,30,11,0,0,0],[3,32,3,35,10,0,0,0],[3,36,3,37,10,0,0,3],[3,37,3,48,10,0,0,0],[3,39,3,42,10,0,1,1],[5,3,6,2,0,0,0,0],[1,16,1,28,10,1,0,0],[1,17,1,20,10,1,0,0],[1,24,1,27,1,1,0,0]],\"filenames\":[\"/home/test/fuzz_no_fuzzer.cc\",\"/home/test/fuzz_no_fuzzer.cc\"]},{\"name\":\"_Z3fooIfEvT_\",\"count\":1,\"regions\":[[2,37,6,2,1,0,0,0],[3,24,3,30,11,0,0,0],[3,32,3,35,10,0,0,0],[3,36,3,37,10,0,0,3],[3,37,3,48,10,0,0,0],[3,39,3,42,10,0,1,1],[5,3,6,2,0,0,0,0],[1,16,1,28,10,1,0,0],[1,17,1,20,10,1,0,0],[1,24,1,27,1,1,0,0]],\"filenames\":[\"/home/test/fuzz_no_fuzzer.cc\",\"/home/test/fuzz_no_fuzzer.cc\"]}],\"totals\":{\"lines\":{\"count\":11,\"covered\":9,\"percent\":81},\"functions\":{\"count\":2,\"covered\":2,\"percent\":100},\"instantiations\":{\"count\":3,\"covered\":3,\"percent\":100},\"regions\":{\"count\":10,\"covered\":8,\"notcovered\":2,\"percent\":80}}}]}\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_measurer.py",
"new_path": "experiment/test_measurer.py",
"diff": "@@ -56,6 +56,17 @@ def db_experiment(experiment_config, db):\nyield\n+def test_get_current_covered_regions(fs, experiment):\n+ \"\"\"Tests that get_current_coverage reads the correct data from json file.\"\"\"\n+ snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,\n+ SNAPSHOT_LOGGER)\n+ json_cov_summary_file = get_test_data_path('cov_summary.json')\n+ fs.add_real_file(json_cov_summary_file, read_only=False)\n+ snapshot_measurer.cov_summary_file = json_cov_summary_file\n+ covered_regions = snapshot_measurer.get_current_covered_regions()\n+ assert len(covered_regions) == 8\n+\n+\ndef test_get_current_coverage(fs, experiment):\n\"\"\"Tests that get_current_coverage reads the correct data from json file.\"\"\"\nsnapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,\n@@ -64,7 +75,7 @@ def test_get_current_coverage(fs, experiment):\nfs.add_real_file(json_cov_summary_file, read_only=False)\nsnapshot_measurer.cov_summary_file = json_cov_summary_file\ncovered_regions = snapshot_measurer.get_current_coverage()\n- assert covered_regions == 7\n+ assert covered_regions == 8\ndef test_get_current_coverage_error(fs, experiment):\n@@ -150,7 +161,7 @@ def test_generate_summary(mocked_get_coverage_binary, mocked_execute,\nsnapshot_measurer.generate_summary(CYCLE)\nexpected = [\n- 'llvm-cov', 'export', '-format=text', '-summary-only',\n+ 'llvm-cov', 'export', '-format=text',\n'/work/coverage-binaries/benchmark-a/fuzz-target',\n'-instr-profile=/reports/data.profdata'\n]\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Store specific coverage data in the end of measuring. (#595) |
258,388 | 30.07.2020 10:19:50 | 25,200 | fadaaafbc2f3dc6c3c25acfac28161c6ccc0ebed | [GCB] Fix parent_image outside of FuzzBench cloud project
GCB was broken on non-FuzzBench cloud projects because experiment
tagging meant that certain images were no longer tagged the way
they were passed as parent_image. | [
{
"change_type": "MODIFY",
"old_path": "docker/gcb/coverage.yaml",
"new_path": "docker/gcb/coverage.yaml",
"diff": "@@ -101,7 +101,7 @@ steps:\n'${_REPO}/builders/coverage/${_BENCHMARK}',\n'--build-arg',\n- 'parent_image=${_REPO}/builders/coverage/${_BENCHMARK}-intermediate',\n+ 'parent_image=gcr.io/fuzzbench/builders/coverage/${_BENCHMARK}-intermediate',\n'--build-arg',\n'fuzzer=coverage',\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/gcb/fuzzer.yaml",
"new_path": "docker/gcb/fuzzer.yaml",
"diff": "@@ -102,7 +102,7 @@ steps:\n'${_REPO}/builders/${_FUZZER}/${_BENCHMARK}',\n'--build-arg',\n- 'parent_image=${_REPO}/builders/${_FUZZER}/${_BENCHMARK}-intermediate',\n+ 'parent_image=gcr.io/fuzzbench/builders/${_FUZZER}/${_BENCHMARK}-intermediate',\n'--build-arg',\n'fuzzer=${_FUZZER}',\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [GCB] Fix parent_image outside of FuzzBench cloud project (#612)
GCB was broken on non-FuzzBench cloud projects because experiment
tagging meant that certain images were no longer tagged the way
they were passed as parent_image. |
258,399 | 30.07.2020 17:07:24 | 18,000 | 6dcf2696f446338746b59de2892cb60bc775f5d4 | change worker mode | [
{
"change_type": "MODIFY",
"old_path": "compose/fuzzbench.yaml",
"new_path": "compose/fuzzbench.yaml",
"diff": "@@ -14,7 +14,8 @@ services:\nimage: fuzzbench\nenvironment:\nRQ_REDIS_URL: redis://queue-server\n- command: rq worker --burst\n+ PYTHONPATH: .\n+ command: python3 fuzzbench/worker.py\nvolumes:\n# Allow access to the host's docker daemon.\n- /var/run/docker.sock:/var/run/docker.sock\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/image_types.yaml",
"new_path": "docker/image_types.yaml",
"diff": "tag: 'base-image'\ncontext: 'docker/base-image'\n-'base-builder':\n- tag: 'base-builder'\n- context: 'docker/base-builder'\n- depends_on:\n- - 'base-image'\n-\n'base-runner':\ntag: 'base-runner'\ncontext: 'docker/base-runner'\ncontext: 'fuzzers/coverage'\ndockerfile: 'fuzzers/coverage/builder.Dockerfile'\ndepends_on:\n- - 'base-builder'\n+ - 'base-image'\n'coverage-{benchmark}-builder':\ntag: 'builders/coverage/{benchmark}'\ncontext: 'fuzzers/{fuzzer}'\ndockerfile: 'fuzzers/{fuzzer}/builder.Dockerfile'\ndepends_on:\n- - 'base-builder'\n+ - 'base-image'\n'{fuzzer}-{benchmark}-builder':\ntag: 'builders/{fuzzer}/{benchmark}'\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/build/test_docker_images.py",
"new_path": "experiment/build/test_docker_images.py",
"diff": "@@ -23,7 +23,7 @@ def test_images_to_build_list():\nbenchmarks = ['libxml', 'libpng']\nall_images = docker_images.get_images_to_build(fuzzers, benchmarks)\nassert set(all_images.keys()) == set([\n- 'base-image', 'base-builder', 'base-runner', 'coverage-builder',\n+ 'base-image', 'base-runner', 'coverage-builder',\n'coverage-libxml-builder', 'coverage-libpng-builder', 'afl-builder',\n'afl-libxml-builder', 'afl-libxml-intermediate-runner',\n'afl-libxml-runner', 'afl-libpng-builder',\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzbench/run_experiment.py",
"new_path": "fuzzbench/run_experiment.py",
"diff": "@@ -24,19 +24,14 @@ from fuzzbench import jobs\ndef run_experiment():\n\"\"\"Main experiment logic.\"\"\"\nprint('Initializing the job queue.')\n- queue = rq.Queue()\n+ # Create the queue for scheduling build jobs and run jobs.\n+ queue = rq.Queue('build_n_run_queue')\njobs_list = []\njobs_list.append(\nqueue.enqueue(jobs.build_image,\n'base-image',\njob_timeout=600,\njob_id='base-image'))\n- jobs_list.append(\n- queue.enqueue(jobs.build_image,\n- 'base-builder',\n- job_timeout=600,\n- job_id='base-builder',\n- depends_on='base-image'))\njobs_list.append(\nqueue.enqueue(jobs.build_image,\n'base-runner',\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzbench/test_e2e_run.py",
"new_path": "fuzzbench/test_e2e_run.py",
"diff": "@@ -39,14 +39,13 @@ class TestEndToEndRunResults:\n\"\"\"Tests that jobs dependency preserves during working.\"\"\"\njobs = {\nname: Job.fetch(name, connection=redis_connection)\n- for name in ['base-image', 'base-builder', 'base-runner']\n+ for name in ['base-image', 'base-runner']\n}\n- assert jobs['base-image'].ended_at <= jobs['base-builder'].started_at\nassert jobs['base-image'].ended_at <= jobs['base-runner'].started_at\ndef test_all_jobs_finished_successfully(self, redis_connection): # pylint: disable=redefined-outer-name\n\"\"\"Tests all jobs finished successully.\"\"\"\n- jobs = Job.fetch_many(['base-image', 'base-builder', 'base-runner'],\n+ jobs = Job.fetch_many(['base-image', 'base-runner'],\nconnection=redis_connection)\nfor job in jobs:\nassert job.get_status() == 'finished'\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzbench/worker.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Self-defined worker module.\"\"\"\n+import time\n+\n+import redis\n+import rq\n+\n+\n+def main():\n+ \"\"\"Sets up Redis connection and starts the worker.\"\"\"\n+ redis_connection = redis.Redis(host=\"queue-server\")\n+ with rq.Connection(redis_connection):\n+ queue = rq.Queue('build_n_run_queue')\n+ worker = rq.Worker([queue])\n+\n+ while queue.count + queue.deferred_job_registry.count > 0:\n+ worker.work(burst=True)\n+ time.sleep(5)\n+\n+\n+if __name__ == '__main__':\n+ main()\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | change worker mode (#585) |
258,370 | 31.07.2020 15:25:57 | 14,400 | 9f07002ccf8e7ef50a983d2d8921fcedbcd78a4a | Makefile build simplification using image definitions instead of text-template
Makefile build simplification using image definitions. | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/build_and_test_run_fuzzer_benchmarks.py",
"new_path": ".github/workflows/build_and_test_run_fuzzer_benchmarks.py",
"diff": "@@ -77,6 +77,9 @@ def delete_docker_images():\nimage_ids = result.stdout.splitlines()\nsubprocess.run(['docker', 'rmi', '-f'] + image_ids, check=False)\n+ # Needed for BUILDKIT to clear build cache & avoid insufficient disk space.\n+ subprocess.run(['docker', 'builder', 'prune', '-f'], check=False)\n+\ndef make_builds(benchmarks, fuzzer):\n\"\"\"Use make to build each target in |build_targets|.\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -42,7 +42,6 @@ run-end-to-end-test:\nstop-end-to-end-test:\ndocker-compose down\n-include docker/build.mk\ninclude docker/generated.mk\nSHELL := /bin/bash\n@@ -55,7 +54,7 @@ ${VENV_ACTIVATE}: requirements.txt\ninstall-dependencies: ${VENV_ACTIVATE}\ndocker/generated.mk: docker/generate_makefile.py $(wildcard fuzzers/*/variants.yaml) ${VENV_ACTIVATE}\n- source ${VENV_ACTIVATE} && python3 $< > $@\n+ source ${VENV_ACTIVATE} && PYTHONPATH=. python3 $< > $@\npresubmit: install-dependencies\nsource ${VENV_ACTIVATE} && python3 presubmit.py\n"
},
{
"change_type": "DELETE",
"old_path": "docker/build.mk",
"new_path": null,
"diff": "-# Copyright 2020 Google LLC\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-BENCHMARKS := $(notdir $(shell find benchmarks -type f -name benchmark.yaml | xargs dirname))\n-\n-BASE_TAG ?= gcr.io/fuzzbench\n-\n-# If we're running on a CI service, cache-from a remote image. Otherwise just\n-# use the local cache.\n-cache_from = $(if ${RUNNING_ON_CI},--cache-from $(1),)\n-\n-# For base-* images (and those that depend on it), use a remote cache by\n-# default, unless the developer sets DISABLE_REMOTE_CACHE_FOR_BASE.\n-cache_from_base = $(if ${DISABLE_REMOTE_CACHE_FOR_BASE},,--cache-from $(1))\n-\n-base-image:\n- docker build \\\n- --tag $(BASE_TAG)/base-image \\\n- $(call cache_from_base,${BASE_TAG}/base-image) \\\n- docker/base-image\n-\n-pull-base-image:\n- docker pull $(BASE_TAG)/base-image\n-\n-dispatcher-image: base-image\n- docker build \\\n- --tag $(BASE_TAG)/dispatcher-image \\\n- $(call cache_from,${BASE_TAG}/dispatcher-image) \\\n- docker/dispatcher-image\n-\n-define benchmark_template\n-$(1)-fuzz-target := $(shell cat benchmarks/$(1)/benchmark.yaml | \\\n- grep fuzz_target | cut -d ':' -f2 | tr -d ' ')\n-\n-# TODO: It would be better to call this benchmark builder. But that would be\n-# confusing because this doesn't involve benchmark-builder/Dockerfile. Rename\n-# that and then rename this.\n-.$(1)-project-builder:\n- docker build \\\n- --tag $(BASE_TAG)/builders/benchmark/$(1) \\\n- --file benchmarks/$(1)/Dockerfile \\\n- $(call cache_from,${BASE_TAG}/builders/benchmarks/$(1)) \\\n- benchmarks/$(1)\n-\n-endef\n-# Instantiate the above template with all OSS-Fuzz benchmarks.\n-$(foreach benchmark,$(BENCHMARKS), \\\n- $(eval $(call benchmark_template,$(benchmark))))\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/generate_makefile.py",
"new_path": "docker/generate_makefile.py",
"diff": "import os\n-BASE_TAG = 'gcr.io/fuzzbench'\n-BENCHMARKS_DIR = os.path.join(os.path.dirname(__file__), os.pardir,\n- 'benchmarks')\n-FUZZERS_DIR = os.path.join(os.path.dirname(__file__), os.pardir, 'fuzzers')\n-\n-BOILERPLATE = \"\"\"\n-cache_from = $(if ${RUNNING_ON_CI},--cache-from {fuzzer},)\n-\"\"\"\n-\n-FUZZER_TEMPLATE = \"\"\"\n-.{fuzzer}-builder: base-image\n- docker build \\\\\n- --tag {base_tag}/builders/{fuzzer} \\\\\n- --file fuzzers/{fuzzer}/builder.Dockerfile \\\\\n- $(call cache_from,{base_tag}/builders/{fuzzer}) \\\\\n- fuzzers/{fuzzer}\n-\n-.pull-{fuzzer}-builder: pull-base-image\n- docker pull {base_tag}/builders/{fuzzer}\n-\"\"\"\n-\n-FUZZER_BENCHMARK_RUN_TARGETS_TEMPLATE = \"\"\"\n-build-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-runner\n-\n-pull-{fuzzer}-{benchmark}: .pull-{fuzzer}-{benchmark}-runner\n-\n-run-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-runner\n- docker run \\\\\n- --cpus=1 \\\\\n- --cap-add SYS_NICE \\\\\n- --cap-add SYS_PTRACE \\\\\n- -e FUZZ_OUTSIDE_EXPERIMENT=1 \\\\\n- -e FORCE_LOCAL=1 \\\\\n- -e TRIAL_ID=1 \\\\\n- -e FUZZER={fuzzer} \\\\\n- -e BENCHMARK={benchmark} \\\\\n- -e FUZZ_TARGET=$({benchmark}-fuzz-target) \\\\\n- -it {base_tag}/runners/{fuzzer}/{benchmark}\n-\n-test-run-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-runner\n- docker run \\\\\n- --cap-add SYS_NICE \\\\\n- --cap-add SYS_PTRACE \\\\\n- -e FUZZ_OUTSIDE_EXPERIMENT=1 \\\\\n- -e FORCE_LOCAL=1 \\\\\n- -e TRIAL_ID=1 \\\\\n- -e FUZZER={fuzzer} \\\\\n- -e BENCHMARK={benchmark} \\\\\n- -e FUZZ_TARGET=$({benchmark}-fuzz-target) \\\\\n- -e MAX_TOTAL_TIME=20 \\\\\n- -e SNAPSHOT_PERIOD=10 \\\\\n- {base_tag}/runners/{fuzzer}/{benchmark}\n-\n-debug-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-runner\n- docker run \\\\\n- --cpus=1 \\\\\n- --cap-add SYS_NICE \\\\\n- --cap-add SYS_PTRACE \\\\\n- -e FUZZ_OUTSIDE_EXPERIMENT=1 \\\\\n- -e FORCE_LOCAL=1 \\\\\n- -e TRIAL_ID=1 \\\\\n- -e FUZZER={fuzzer} \\\\\n- -e BENCHMARK={benchmark} \\\\\n- -e FUZZ_TARGET=$({benchmark}-fuzz-target) \\\\\n- --entrypoint \"/bin/bash\" \\\\\n- -it {base_tag}/runners/{fuzzer}/{benchmark}\n-\"\"\"\n-\n-FUZZER_BENCHMARK_TEMPLATE = \"\"\"\n-.{fuzzer}-{benchmark}-builder-intermediate: .{benchmark}-project-builder\n- docker build \\\\\n- --tag {base_tag}/builders/{fuzzer}/{benchmark}-intermediate \\\\\n- --file=fuzzers/{fuzzer}/builder.Dockerfile \\\\\n- --build-arg parent_image=gcr.io/fuzzbench/builders/benchmark/{benchmark} \\\\\n- $(call cache_from,{base_tag}/builders/{fuzzer}/{benchmark}-intermediate) \\\\\n- fuzzers/{fuzzer}\n-\n-.pull-{fuzzer}-{benchmark}-builder-intermediate:\n- docker pull {base_tag}/builders/{fuzzer}/{benchmark}-intermediate\n-\n-.{fuzzer}-{benchmark}-builder: .{fuzzer}-{benchmark}-builder-intermediate\n- docker build \\\\\n- --tag {base_tag}/builders/{fuzzer}/{benchmark} \\\\\n- --file=docker/benchmark-builder/Dockerfile \\\\\n- --build-arg parent_image={base_tag}/builders/{fuzzer}/{benchmark}-intermediate \\\\\n- --build-arg fuzzer={fuzzer} \\\\\n- --build-arg benchmark={benchmark} \\\\\n- $(call cache_from,{base_tag}/builders/{fuzzer}/{benchmark}) \\\\\n- .\n-\n-.pull-{fuzzer}-{benchmark}-builder: .pull-{fuzzer}-{benchmark}-builder-intermediate\n- docker pull {base_tag}/builders/{fuzzer}/{benchmark}\n-\n-ifneq ({fuzzer}, coverage)\n-\n-.{fuzzer}-{benchmark}-intermediate-runner: .{fuzzer}-{benchmark}-builder\n- docker build \\\\\n- --tag {base_tag}/runners/{fuzzer}/{benchmark}-intermediate \\\\\n- --file fuzzers/{fuzzer}/runner.Dockerfile \\\\\n- $(call cache_from,{base_tag}/runners/{fuzzer}/{benchmark}-intermediate) \\\\\n- fuzzers/{fuzzer}\n-\n-.pull-{fuzzer}-{benchmark}-intermediate-runner: pull-base-image\n- docker pull {base_tag}/runners/{fuzzer}/{benchmark}-intermediate\n-\n-.{fuzzer}-{benchmark}-runner: .{fuzzer}-{benchmark}-intermediate-runner\n- docker build \\\\\n- --tag {base_tag}/runners/{fuzzer}/{benchmark} \\\\\n- --build-arg fuzzer={fuzzer} \\\\\n- --build-arg benchmark={benchmark} \\\\\n- $(call cache_from,{base_tag}/runners/{fuzzer}/{benchmark}) \\\\\n- --file docker/benchmark-runner/Dockerfile \\\\\n- .\n-\n-.pull-{fuzzer}-{benchmark}-runner: .pull-{fuzzer}-{benchmark}-intermediate-runner\n- docker pull {base_tag}/runners/{fuzzer}/{benchmark}\n-\n-\"\"\" + FUZZER_BENCHMARK_RUN_TARGETS_TEMPLATE + \"\"\"\n-\n-else\n-\n-build-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-builder\n-pull-{fuzzer}-{benchmark}: .pull-{fuzzer}-{benchmark}-builder\n-\n-endif\n-\"\"\"\n-\n-\n-def generate_fuzzer(fuzzer, benchmarks):\n- \"\"\"Output make rules for a single fuzzer.\"\"\"\n- # Generate build rules for the fuzzer itself.\n- print(FUZZER_TEMPLATE.format(fuzzer=fuzzer, base_tag=BASE_TAG))\n-\n- # Generate rules for fuzzer-benchmark pairs.\n+from common import yaml_utils\n+from common import benchmark_utils\n+from common import fuzzer_utils\n+from experiment.build import docker_images\n+\n+BASE_TAG = \"gcr.io/fuzzbench\"\n+BENCHMARK_DIR = benchmark_utils.BENCHMARKS_DIR\n+\n+\n+def _print_benchmark_fuzz_target(benchmarks):\n+ \"\"\"Prints benchmark variables from benchmark.yaml files.\"\"\"\nfor benchmark in benchmarks:\n- print(\n- FUZZER_BENCHMARK_TEMPLATE.format(fuzzer=fuzzer,\n+ benchmark_vars = yaml_utils.read(\n+ os.path.join(BENCHMARK_DIR, benchmark, 'benchmark.yaml'))\n+ print(benchmark + '-fuzz-target=' + benchmark_vars['fuzz_target'])\n+ print()\n+\n+\n+def _print_makefile_run_template(image):\n+ fuzzer, benchmark = image['tag'].split('/')[1:]\n+\n+ for run_type in ('run', 'debug', 'test-run'):\n+ print(('{run_type}-{fuzzer}-{benchmark}: ' +\n+ '.{fuzzer}-{benchmark}-runner').format(run_type=run_type,\nbenchmark=benchmark,\n- base_tag=BASE_TAG))\n+ fuzzer=fuzzer))\n+\n+ print('\\\n+\\tdocker run \\\\\\n\\\n+\\t--cpus=1 \\\\\\n\\\n+\\t--cap-add SYS_NICE \\\\\\n\\\n+\\t--cap-add SYS_PTRACE \\\\\\n\\\n+\\t-e FUZZ_OUTSIDE_EXPERIMENT=1 \\\\\\n\\\n+\\t-e FORCE_LOCAL=1 \\\\\\n\\\n+\\t-e TRIAL_ID=1 \\\\\\n\\\n+\\t-e FUZZER={fuzzer} \\\\\\n\\\n+\\t-e BENCHMARK={benchmark} \\\\\\n\\\n+\\t-e FUZZ_TARGET=$({benchmark}-fuzz-target) \\\\\\\n+'.format(fuzzer=fuzzer, benchmark=benchmark))\n+\n+ if run_type == 'test-run':\n+ print('\\t-e MAX_TOTAL_TIME=20 \\\\\\n\\t-e SNAPSHOT_PERIOD=10 \\\\')\n+ if run_type == 'debug':\n+ print('\\t-entrypoint \"/bin/bash\" \\\\\\n\\t-it ', end='')\n+ else:\n+ print('\\t', end='')\n+\n+ print(os.path.join(BASE_TAG, image['tag']))\n+ print()\n+\n+\n+# TODO(tanq16): Add unit test.\n+def _print_rules_for_image(name, image):\n+ \"\"\"Print makefile section for given image to stdout.\"\"\"\n+ if not ('base' in name or 'dispatcher' in name):\n+ print('.', end='')\n+ print(name + ':', end='')\n+ if 'depends_on' in image:\n+ for dep in image['depends_on']:\n+ if 'base' in dep:\n+ print(' ' + dep, end='')\n+ else:\n+ print(' .' + dep, end='')\n+ print()\n+ print('\\tdocker build \\\\')\n+ print('\\t--tag ' + os.path.join(BASE_TAG, image['tag']) + ' \\\\')\n+ print('\\t--build-arg BUILDKIT_INLINE_CACHE=1 \\\\')\n+ print('\\t--cache-from ' + os.path.join(BASE_TAG, image['tag']) + ' \\\\')\n+ if 'build_arg' in image:\n+ for arg in image['build_arg']:\n+ print('\\t--build-arg ' + arg + ' \\\\')\n+ if 'dockerfile' in image:\n+ print('\\t--file ' + image['dockerfile'] + ' \\\\')\n+ print('\\t' + image['context'])\n+ print()\n+\n+ # Print run, debug, test-run rules if image is a runner.\n+ if 'runner' in name and not ('intermediate' in name or 'base' in name):\n+ _print_makefile_run_template(image)\n- # Generate rules for building/pulling all target/benchmark pairs.\n- all_build_targets = ' '.join(\n- ['build-{0}-{1}'.format(fuzzer, benchmark) for benchmark in benchmarks])\n- all_pull_targets = ' '.join(\n- ['pull-{0}-{1}'.format(fuzzer, benchmark) for benchmark in benchmarks])\n+\n+def main():\n+ \"\"\"Generates Makefile with docker image build rules.\"\"\"\n+ fuzzers = fuzzer_utils.get_fuzzer_names()\n+ benchmarks = benchmark_utils.get_all_benchmarks()\n+ buildable_images = docker_images.get_images_to_build(fuzzers, benchmarks)\n+\n+ print('export DOCKER_BUILDKIT := 1')\n+\n+ # Print oss-fuzz benchmarks property variables.\n+ _print_benchmark_fuzz_target(benchmarks)\n+\n+ for name, image in buildable_images.items():\n+ _print_rules_for_image(name, image)\n+\n+ # Print build targets for all fuzzer-benchmark pairs (including coverage).\n+ fuzzers.append('coverage')\n+ for fuzzer in fuzzers:\n+ image_type = \"runner\"\n+ if 'coverage' in fuzzer:\n+ image_type = \"builder\"\n+ for benchmark in benchmarks:\n+ print(('build-{fuzzer}-{benchmark}: ' +\n+ '.{fuzzer}-{benchmark}-{image_type}\\n').format(\n+ fuzzer=fuzzer,\n+ benchmark=benchmark,\n+ image_type=image_type))\n+ print()\n+\n+ # Print fuzzer-all benchmarks build targets.\n+ for fuzzer in fuzzers:\n+ all_build_targets = ' '.join([\n+ 'build-{0}-{1}'.format(fuzzer, benchmark)\n+ for benchmark in benchmarks\n+ ])\nprint('build-{fuzzer}-all: {all_targets}'.format(\nfuzzer=fuzzer, all_targets=all_build_targets))\n- print('pull-{fuzzer}-all: {all_targets}'.format(\n- fuzzer=fuzzer, all_targets=all_pull_targets))\n-\n-def main():\n- \"\"\"Main entry point.\"\"\"\n- # Output boilerplate used by other templates and generated rules.\n- print(BOILERPLATE)\n-\n- # Compute the list of benchmarks.\n- benchmarks = []\n- for benchmark in os.listdir(BENCHMARKS_DIR):\n- benchmark_path = os.path.join(BENCHMARKS_DIR, benchmark)\n- if not os.path.isdir(benchmark_path):\n- continue\n- if os.path.exists(os.path.join(benchmark_path, 'benchmark.yaml')):\n- benchmarks.append(benchmark)\n-\n- # Generate the build rules for fuzzer/benchmark pairs.\n- fuzzers = []\n- for fuzzer in os.listdir(FUZZERS_DIR):\n- # Skip non-directory files. These do not represent fuzzers.\n- fuzzer_dir = os.path.join(FUZZERS_DIR, fuzzer)\n- if not os.path.isdir(fuzzer_dir):\n- continue\n-\n- generate_fuzzer(fuzzer, benchmarks)\n- fuzzers.append(fuzzer)\n-\n- # Generate rules to build all known targets.\n+ # Print all targets build target.\nall_build_targets = ' '.join(\n['build-{0}-all'.format(name) for name in fuzzers])\n- all_pull_targets = ' '.join(\n- ['pull-{0}-all'.format(name) for name in fuzzers])\nprint('build-all: {all_targets}'.format(all_targets=all_build_targets))\n- print('pull-all: {all_targets}'.format(all_targets=all_pull_targets))\nif __name__ == '__main__':\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/image_types.yaml",
"new_path": "docker/image_types.yaml",
"diff": "tag: 'base-image'\ncontext: 'docker/base-image'\n-'coverage-builder':\n- tag: 'builders/coverage'\n+'dispatcher-image':\n+ tag: 'dispatcher-image'\n+ context: 'docker/dispatcher-image'\n+ depends_on:\n+ - 'base-image'\n+\n+'coverage-{benchmark}-builder-intermediate':\n+ tag: 'builders/coverage/{benchmark}-intermediate'\ncontext: 'fuzzers/coverage'\ndockerfile: 'fuzzers/coverage/builder.Dockerfile'\ndepends_on:\n- - 'base-image'\n+ - '{benchmark}-project-builder'\n+ build_arg:\n+ - 'parent_image=gcr.io/fuzzbench/builders/benchmark/{benchmark}'\n'coverage-{benchmark}-builder':\ntag: 'builders/coverage/{benchmark}'\ncontext: '.'\ndockerfile: 'docker/benchmark-builder/Dockerfile'\nbuild_arg:\n+ - 'parent_image=gcr.io/fuzzbench/builders/coverage/{benchmark}-intermediate'\n- 'fuzzer=coverage'\n- 'benchmark={benchmark}'\ndepends_on:\n- - 'coverage-builder'\n+ - 'coverage-{benchmark}-builder-intermediate'\n+\n+# TODO: It would be better to call this benchmark builder. But that would be\n+# confusing because this doesn't involve benchmark-builder/Dockerfile. Rename\n+# that and then rename this.\n+'{benchmark}-project-builder':\n+ tag: 'builders/benchmark/{benchmark}'\n+ context: 'benchmarks/{benchmark}'\n+ dockerfile: 'benchmarks/{benchmark}/Dockerfile'\n-'{fuzzer}-builder':\n- tag: 'builders/{fuzzer}'\n+'{fuzzer}-{benchmark}-builder-intermediate':\n+ tag: 'builders/{fuzzer}/{benchmark}-intermediate'\ncontext: 'fuzzers/{fuzzer}'\ndockerfile: 'fuzzers/{fuzzer}/builder.Dockerfile'\ndepends_on:\n- - 'base-image'\n+ - '{benchmark}-project-builder'\n+ build_arg:\n+ - 'parent_image=gcr.io/fuzzbench/builders/benchmark/{benchmark}'\n'{fuzzer}-{benchmark}-builder':\ntag: 'builders/{fuzzer}/{benchmark}'\ncontext: '.'\ndockerfile: 'docker/benchmark-builder/Dockerfile'\nbuild_arg:\n+ - 'parent_image=gcr.io/fuzzbench/builders/{fuzzer}/{benchmark}-intermediate'\n- 'fuzzer={fuzzer}'\n- 'benchmark={benchmark}'\ndepends_on:\n- - '{fuzzer}-builder'\n+ - '{fuzzer}-{benchmark}-builder-intermediate'\n'{fuzzer}-{benchmark}-intermediate-runner':\ntag: 'runners/{fuzzer}/{benchmark}-intermediate'\n"
},
{
"change_type": "UNKNOWN",
"old_path": "docs/developing-fuzzbench/adding_a_new_benchmark.md",
"new_path": "docs/developing-fuzzbench/adding_a_new_benchmark.md",
"diff": ""
},
{
"change_type": "MODIFY",
"old_path": "experiment/build/docker_images.py",
"new_path": "experiment/build/docker_images.py",
"diff": "# limitations under the License.\n\"\"\"Provides the set of buildable images and their dependencies.\"\"\"\n+import os\n+\nfrom common import yaml_utils\n+from common.utils import ROOT_DIR\ndef _substitute(template, fuzzer, benchmark):\n@@ -40,7 +43,9 @@ def _instantiate_image_obj(name_template, obj_template, fuzzer, benchmark):\ndef _get_image_type_templates():\n\"\"\"Loads the image types config that contains \"templates\" describing how to\nbuild them and their dependencies.\"\"\"\n- return yaml_utils.read('docker/image_types.yaml')\n+ yaml_file = os.path.join(ROOT_DIR, 'docker', 'image_types.yaml')\n+ all_templates = yaml_utils.read(yaml_file)\n+ return all_templates\ndef get_images_to_build(fuzzers, benchmarks):\n"
},
{
"change_type": "DELETE",
"old_path": "experiment/build/generate_makefile.py",
"new_path": null,
"diff": "-# Copyright 2020 Google LLC\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\"\"\"Generates Makefile containing docker image targets.\"\"\"\n-\n-import argparse\n-\n-from experiment.build import docker_images\n-\n-\n-# TODO(Tanq16): Add unit test for this.\n-def print_makefile(buildable_images, docker_registry):\n- \"\"\"Prints the generated makefile to stdout.\"\"\"\n- print('export DOCKER_BUILDKIT := 1')\n-\n- for name, image in buildable_images.items():\n- print(name + ':', end='')\n- if 'depends_on' in image:\n- for dep in image['depends_on']:\n- print(' ' + dep, end='')\n- print()\n- print('\\tdocker build \\\\')\n- print(' --tag ' + image['tag'] + ' \\\\')\n- print(' --cache-from ' + docker_registry + image['tag'] + ' \\\\')\n- if 'build_arg' in image:\n- for arg in image['build_arg']:\n- print(' --build-arg ' + arg + ' \\\\')\n- if 'dockerfile' in image:\n- print(' --file ' + image['dockerfile'] + ' \\\\')\n- print(' ' + image['context'])\n- print()\n-\n-\n-def main():\n- \"\"\"Generates Makefile with docker image build rules.\"\"\"\n- parser = argparse.ArgumentParser(\n- description='Makefile build rule generator.')\n- parser.add_argument('-r',\n- '--docker-registry',\n- default='gcr.io/fuzzbench/',\n- help='Docker registry to use as cache.')\n- args = parser.parse_args()\n-\n- # TODO(Tanq16): Create fuzzer/benchmark list dynamically.\n- fuzzers = ['afl', 'libfuzzer']\n- benchmarks = ['libxml', 'libpng']\n- buildable_images = docker_images.get_images_to_build(fuzzers, benchmarks)\n- print_makefile(buildable_images, args.docker_registry)\n-\n-\n-if __name__ == '__main__':\n- main()\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/build/test_docker_images.py",
"new_path": "experiment/build/test_docker_images.py",
"diff": "@@ -23,14 +23,18 @@ def test_images_to_build_list():\nbenchmarks = ['libxml', 'libpng']\nall_images = docker_images.get_images_to_build(fuzzers, benchmarks)\nassert set(all_images.keys()) == set([\n- 'base-image', 'coverage-builder', 'coverage-libxml-builder',\n- 'coverage-libpng-builder', 'afl-builder', 'afl-libxml-builder',\n- 'afl-libxml-intermediate-runner', 'afl-libxml-runner',\n- 'afl-libpng-builder', 'afl-libpng-intermediate-runner',\n- 'afl-libpng-runner', 'libfuzzer-builder', 'libfuzzer-libxml-builder',\n+ 'base-image', 'dispatcher-image', 'libxml-project-builder',\n+ 'libpng-project-builder', 'afl-libxml-builder-intermediate',\n+ 'afl-libxml-intermediate-runner', 'afl-libxml-builder',\n+ 'coverage-libxml-builder', 'afl-libpng-builder',\n+ 'afl-libpng-intermediate-runner', 'afl-libpng-builder-intermediate',\n+ 'afl-libpng-runner', 'libfuzzer-libxml-builder-intermediate',\n+ 'libfuzzer-libxml-builder', 'libfuzzer-libpng-builder-intermediate',\n'libfuzzer-libxml-intermediate-runner', 'libfuzzer-libxml-runner',\n'libfuzzer-libpng-builder', 'libfuzzer-libpng-intermediate-runner',\n- 'libfuzzer-libpng-runner'\n+ 'libfuzzer-libpng-runner', 'coverage-libxml-builder-intermediate',\n+ 'coverage-libpng-builder', 'coverage-libxml-builder-intermediate',\n+ 'afl-libxml-runner', 'coverage-libpng-builder-intermediate'\n])\n"
},
{
"change_type": "MODIFY",
"old_path": "src_analysis/test_change_utils.py",
"new_path": "src_analysis/test_change_utils.py",
"diff": "@@ -23,7 +23,7 @@ def test_get_changed_fuzzers_for_ci():\n\"\"\"Tests that get_changed_fuzzers_for_ci returns all fuzzers when a file\nthat affects all fuzzer build was changed.\"\"\"\nchanged_fuzzers = change_utils.get_changed_fuzzers_for_ci(\n- [os.path.join(utils.ROOT_DIR, 'docker', 'build.mk')])\n+ [os.path.join(utils.ROOT_DIR, 'docker', 'image_types.yaml')])\nassert changed_fuzzers == fuzzer_utils.get_fuzzer_names()\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Makefile build simplification using image definitions instead of text-template (#567)
Makefile build simplification using image definitions. |
258,396 | 31.07.2020 14:44:02 | 25,200 | 8b738d4e2a5a55ff9c5f11294546500e366e6384 | Add temporary libfuzzer/entropic variants for evaluating the impact of keeping all seed inputs. | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/fuzzers.yml",
"new_path": ".github/workflows/fuzzers.yml",
"diff": "@@ -35,7 +35,9 @@ jobs:\n# temporary variants.\n- aflplusplus_havoc\n- libfuzzer_interceptors\n+ - libfuzzer_keepseed\n- entropic_interceptors\n+ - entropic_keepseed\nbenchmark_type:\n- oss-fuzz\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_keepseed/builder.Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+ARG parent_image\n+FROM $parent_image\n+\n+COPY patch.diff /\n+\n+RUN git clone https://github.com/llvm/llvm-project.git /llvm-project && \\\n+ cd /llvm-project && \\\n+ git checkout b52b2e1c188072e3cbc91500cfd503fb26d50ffc && \\\n+ patch -p1 < /patch.diff && \\\n+ cd /llvm-project/compiler-rt/lib/fuzzer && \\\n+ (for f in *.cpp; do \\\n+ clang++ -stdlib=libc++ -fPIC -O2 -std=c++11 $f -c & \\\n+ done && wait) && \\\n+ ar r /libEntropic.a *.o\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_keepseed/fuzzer.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Integration code for libFuzzer fuzzer.\"\"\"\n+\n+from fuzzers.entropic_interceptors import fuzzer as entropic_fuzzer\n+from fuzzers.libfuzzer_interceptors import fuzzer as libfuzzer_fuzzer\n+\n+\n+def build():\n+ \"\"\"Build benchmark.\"\"\"\n+ entropic_fuzzer.build()\n+\n+\n+def fuzz(input_corpus, output_corpus, target_binary):\n+ \"\"\"Run fuzzer.\"\"\"\n+ libfuzzer_fuzzer.run_fuzzer(input_corpus,\n+ output_corpus,\n+ target_binary,\n+ extra_flags=['-entropic=1', '-keep_seed=1'])\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_keepseed/patch.diff",
"diff": "+commit aac9771fa16e3fc00725d4bbd662d71186a09532\n+Author: Dokyung Song <[email protected]>\n+Date: Fri Jul 31 00:07:20 2020 +0000\n+\n+ [libFuzzer] Optionally keep initial seed inputs regardless of whether they discover new features or not.\n+\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerCorpus.h b/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n+index 54d1e09ec6d..80398a9d7ce 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n++++ b/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n+@@ -33,6 +33,7 @@ struct InputInfo {\n+ // Stats.\n+ size_t NumExecutedMutations = 0;\n+ size_t NumSuccessfullMutations = 0;\n++ bool SeedInput = false;\n+ bool MayDeleteFile = false;\n+ bool Reduced = false;\n+ bool HasFocusFunction = false;\n+@@ -131,9 +132,11 @@ class InputCorpus {\n+\n+ EntropicOptions Entropic;\n+\n++ bool KeepSeed = false;\n++\n+ public:\n+- InputCorpus(const std::string &OutputCorpus, EntropicOptions Entropic)\n+- : Entropic(Entropic), OutputCorpus(OutputCorpus) {\n++ InputCorpus(const std::string &OutputCorpus, EntropicOptions Entropic, bool KeepSeed)\n++ : Entropic(Entropic), OutputCorpus(OutputCorpus), KeepSeed(KeepSeed) {\n+ memset(InputSizesPerFeature, 0, sizeof(InputSizesPerFeature));\n+ memset(SmallestElementPerFeature, 0, sizeof(SmallestElementPerFeature));\n+ }\n+@@ -177,7 +180,7 @@ public:\n+ bool empty() const { return Inputs.empty(); }\n+ const Unit &operator[] (size_t Idx) const { return Inputs[Idx]->U; }\n+ InputInfo *AddToCorpus(const Unit &U, size_t NumFeatures, bool MayDeleteFile,\n+- bool HasFocusFunction,\n++ bool HasFocusFunction, bool SeedInput,\n+ const Vector<uint32_t> &FeatureSet,\n+ const DataFlowTrace &DFT, const InputInfo *BaseII) {\n+ assert(!U.empty());\n+@@ -187,6 +190,7 @@ public:\n+ InputInfo &II = *Inputs.back();\n+ II.U = U;\n+ II.NumFeatures = NumFeatures;\n++ II.SeedInput = SeedInput;\n+ II.MayDeleteFile = MayDeleteFile;\n+ II.UniqFeatureSet = FeatureSet;\n+ II.HasFocusFunction = HasFocusFunction;\n+@@ -471,7 +475,7 @@ private:\n+\n+ for (size_t i = 0; i < N; i++) {\n+\n+- if (Inputs[i]->NumFeatures == 0) {\n++ if (Inputs[i]->NumFeatures == 0 && !(Inputs[i]->SeedInput && KeepSeed)) {\n+ // If the seed doesn't represent any features, assign zero energy.\n+ Weights[i] = 0.;\n+ } else if (Inputs[i]->NumExecutedMutations / kMaxMutationFactor >\n+@@ -491,7 +495,7 @@ private:\n+\n+ if (VanillaSchedule) {\n+ for (size_t i = 0; i < N; i++)\n+- Weights[i] = Inputs[i]->NumFeatures\n++ Weights[i] = (Inputs[i]->NumFeatures || (KeepSeed && Inputs[i]->SeedInput))\n+ ? (i + 1) * (Inputs[i]->HasFocusFunction ? 1000 : 1)\n+ : 0.;\n+ }\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n+index 00a33a413d2..ef7991c1e27 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n++++ b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n+@@ -649,6 +649,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\n+ Options.Verbosity = Flags.verbosity;\n+ Options.MaxLen = Flags.max_len;\n+ Options.LenControl = Flags.len_control;\n++ Options.KeepSeed = Flags.keep_seed;\n+ Options.UnitTimeoutSec = Flags.timeout;\n+ Options.ErrorExitCode = Flags.error_exitcode;\n+ Options.TimeoutExitCode = Flags.timeout_exitcode;\n+@@ -753,7 +754,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\n+\n+ Random Rand(Seed);\n+ auto *MD = new MutationDispatcher(Rand, Options);\n+- auto *Corpus = new InputCorpus(Options.OutputCorpus, Entropic);\n++ auto *Corpus = new InputCorpus(Options.OutputCorpus, Entropic, Options.KeepSeed);\n+ auto *F = new Fuzzer(Callback, *Corpus, *MD, Options);\n+\n+ for (auto &U: Dictionary)\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerFlags.def b/compiler-rt/lib/fuzzer/FuzzerFlags.def\n+index 832224a705d..0dac7e705a3 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerFlags.def\n++++ b/compiler-rt/lib/fuzzer/FuzzerFlags.def\n+@@ -23,6 +23,8 @@ FUZZER_FLAG_INT(len_control, 100, \"Try generating small inputs first, \"\n+ FUZZER_FLAG_STRING(seed_inputs, \"A comma-separated list of input files \"\n+ \"to use as an additional seed corpus. Alternatively, an \\\"@\\\" followed by \"\n+ \"the name of a file containing the comma-separated list.\")\n++FUZZER_FLAG_INT(keep_seed, 0, \"If 1, keep seed inputs for mutation even if \"\n++ \"they do not produce new coverage.\")\n+ FUZZER_FLAG_INT(cross_over, 1, \"If 1, cross over inputs.\")\n+ FUZZER_FLAG_INT(mutate_depth, 5,\n+ \"Apply this number of consecutive mutations to each input.\")\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerFork.cpp b/compiler-rt/lib/fuzzer/FuzzerFork.cpp\n+index d9e6b79443e..38fb82fc12d 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerFork.cpp\n++++ b/compiler-rt/lib/fuzzer/FuzzerFork.cpp\n+@@ -309,11 +309,17 @@ void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,\n+ else\n+ Env.MainCorpusDir = CorpusDirs[0];\n+\n+- auto CFPath = DirPlusFile(Env.TempDir, \"merge.txt\");\n+- CrashResistantMerge(Env.Args, {}, SeedFiles, &Env.Files, {}, &Env.Features,\n+- {}, &Env.Cov,\n+- CFPath, false);\n+- RemoveFile(CFPath);\n++ if (Options.KeepSeed) {\n++ for (auto &File : SeedFiles)\n++ Env.Files.push_back(File.File);\n++ }\n++ else {\n++ auto CFPath = DirPlusFile(Env.TempDir, \"merge.txt\");\n++ CrashResistantMerge(Env.Args, {}, SeedFiles, &Env.Files, {}, &Env.Features,\n++ {}, &Env.Cov,\n++ CFPath, false);\n++ RemoveFile(CFPath);\n++ }\n+ Printf(\"INFO: -fork=%d: %zd seed inputs, starting to fuzz in %s\\n\", NumJobs,\n+ Env.Files.size(), Env.TempDir.c_str());\n+\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerInternal.h b/compiler-rt/lib/fuzzer/FuzzerInternal.h\n+index 31096ce804b..e75807209f5 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerInternal.h\n++++ b/compiler-rt/lib/fuzzer/FuzzerInternal.h\n+@@ -119,6 +119,8 @@ private:\n+\n+ size_t LastCorpusUpdateRun = 0;\n+\n++ bool IsExecutingSeedCorpora = false;\n++\n+ bool HasMoreMallocsThanFrees = false;\n+ size_t NumberOfLeakDetectionAttempts = 0;\n+\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n+index 02db6d27b0a..a9af25a3070 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n++++ b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n+@@ -487,10 +487,11 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,\n+ *FoundUniqFeatures = FoundUniqFeaturesOfII;\n+ PrintPulseAndReportSlowInput(Data, Size);\n+ size_t NumNewFeatures = Corpus.NumFeatureUpdates() - NumUpdatesBefore;\n+- if (NumNewFeatures) {\n++ if (NumNewFeatures || (Options.KeepSeed && IsExecutingSeedCorpora)) {\n+ TPC.UpdateObservedPCs();\n+ auto NewII = Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures,\n+ MayDeleteFile, TPC.ObservedFocusFunction(),\n++ IsExecutingSeedCorpora,\n+ UniqFeatureSetTmp, DFT, II);\n+ WriteFeatureSetToFile(Options.FeaturesDir, Sha1ToString(NewII->Sha1),\n+ NewII->UniqFeatureSet);\n+@@ -764,6 +765,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\n+ assert(CorporaFiles.front().Size <= CorporaFiles.back().Size);\n+ }\n+\n++ IsExecutingSeedCorpora = true;\n++\n+ // Load and execute inputs one by one.\n+ for (auto &SF : CorporaFiles) {\n+ auto U = FileToVector(SF.File, MaxInputLen, /*ExitOnError=*/false);\n+@@ -773,6 +776,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\n+ TryDetectingAMemoryLeak(U.data(), U.size(),\n+ /*DuringInitialCorpusExecution*/ true);\n+ }\n++\n++ IsExecutingSeedCorpora = false;\n+ }\n+\n+ PrintStats(\"INITED\");\n+@@ -785,6 +790,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\n+ Corpus.NumInputsThatTouchFocusFunction());\n+ }\n+\n++ Printf(\"INFO: corpus size = %d\\n\", Corpus.size());\n++\n+ if (Corpus.empty() && Options.MaxNumberOfRuns) {\n+ Printf(\"ERROR: no interesting inputs were found. \"\n+ \"Is the code instrumented for coverage? Exiting.\\n\");\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerOptions.h b/compiler-rt/lib/fuzzer/FuzzerOptions.h\n+index 9d975bd61fe..ccd0b3dcb56 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerOptions.h\n++++ b/compiler-rt/lib/fuzzer/FuzzerOptions.h\n+@@ -18,6 +18,7 @@ struct FuzzingOptions {\n+ int Verbosity = 1;\n+ size_t MaxLen = 0;\n+ size_t LenControl = 1000;\n++ bool KeepSeed = false;\n+ int UnitTimeoutSec = 300;\n+ int TimeoutExitCode = 70;\n+ int OOMExitCode = 71;\n+diff --git a/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp b/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp\n+index 0e9435ab8fc..dfc642ab6d0 100644\n+--- a/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp\n++++ b/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp\n+@@ -593,7 +593,8 @@ TEST(Corpus, Distribution) {\n+ DataFlowTrace DFT;\n+ Random Rand(0);\n+ struct EntropicOptions Entropic = {false, 0xFF, 100};\n+- std::unique_ptr<InputCorpus> C(new InputCorpus(\"\", Entropic));\n++ bool KeepSeed = false;\n++ std::unique_ptr<InputCorpus> C(new InputCorpus(\"\", Entropic, KeepSeed));\n+ size_t N = 10;\n+ size_t TriesPerUnit = 1<<16;\n+ for (size_t i = 0; i < N; i++)\n+@@ -1057,7 +1058,8 @@ TEST(Entropic, UpdateFrequency) {\n+ size_t Index;\n+ // Create input corpus with default entropic configuration\n+ struct EntropicOptions Entropic = {true, 0xFF, 100};\n+- std::unique_ptr<InputCorpus> C(new InputCorpus(\"\", Entropic));\n++ bool KeepSeed = false;\n++ std::unique_ptr<InputCorpus> C(new InputCorpus(\"\", Entropic, KeepSeed));\n+ std::unique_ptr<InputInfo> II(new InputInfo());\n+\n+ C->AddRareFeature(FeatIdx1);\n+@@ -1094,7 +1096,8 @@ double SubAndSquare(double X, double Y) {\n+ TEST(Entropic, ComputeEnergy) {\n+ const double Precision = 0.01;\n+ struct EntropicOptions Entropic = {true, 0xFF, 100};\n+- std::unique_ptr<InputCorpus> C(new InputCorpus(\"\", Entropic));\n++ bool KeepSeed = false;\n++ std::unique_ptr<InputCorpus> C(new InputCorpus(\"\", Entropic, KeepSeed));\n+ std::unique_ptr<InputInfo> II(new InputInfo());\n+ Vector<std::pair<uint32_t, uint16_t>> FeatureFreqs = {{1, 3}, {2, 3}, {3, 3}};\n+ II->FeatureFreqs = FeatureFreqs;\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_keepseed/runner.Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+FROM gcr.io/fuzzbench/base-runner\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/libfuzzer_keepseed/builder.Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+ARG parent_image\n+FROM $parent_image\n+\n+COPY patch.diff /\n+\n+RUN git clone https://github.com/llvm/llvm-project.git /llvm-project && \\\n+ cd /llvm-project/ && \\\n+ git checkout b52b2e1c188072e3cbc91500cfd503fb26d50ffc && \\\n+ patch -p1 < /patch.diff && \\\n+ cd compiler-rt/lib/fuzzer && \\\n+ (for f in *.cpp; do \\\n+ clang++ -stdlib=libc++ -fPIC -O2 -std=c++11 $f -c & \\\n+ done && wait) && \\\n+ ar r /usr/lib/libFuzzer.a *.o\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/libfuzzer_keepseed/fuzzer.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Integration code for libFuzzer fuzzer.\"\"\"\n+\n+from fuzzers.libfuzzer_interceptors import fuzzer as libfuzzer_fuzzer\n+\n+\n+def build():\n+ \"\"\"Build benchmark.\"\"\"\n+ libfuzzer_fuzzer.build()\n+\n+\n+def fuzz(input_corpus, output_corpus, target_binary):\n+ \"\"\"Run fuzzer.\"\"\"\n+ libfuzzer_fuzzer.run_fuzzer(input_corpus,\n+ output_corpus,\n+ target_binary,\n+ extra_flags=['-keep_seed=1'])\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/libfuzzer_keepseed/patch.diff",
"diff": "+commit aac9771fa16e3fc00725d4bbd662d71186a09532\n+Author: Dokyung Song <[email protected]>\n+Date: Fri Jul 31 00:07:20 2020 +0000\n+\n+ [libFuzzer] Optionally keep initial seed inputs regardless of whether they discover new features or not.\n+\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerCorpus.h b/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n+index 54d1e09ec6d..80398a9d7ce 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n++++ b/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n+@@ -33,6 +33,7 @@ struct InputInfo {\n+ // Stats.\n+ size_t NumExecutedMutations = 0;\n+ size_t NumSuccessfullMutations = 0;\n++ bool SeedInput = false;\n+ bool MayDeleteFile = false;\n+ bool Reduced = false;\n+ bool HasFocusFunction = false;\n+@@ -131,9 +132,11 @@ class InputCorpus {\n+\n+ EntropicOptions Entropic;\n+\n++ bool KeepSeed = false;\n++\n+ public:\n+- InputCorpus(const std::string &OutputCorpus, EntropicOptions Entropic)\n+- : Entropic(Entropic), OutputCorpus(OutputCorpus) {\n++ InputCorpus(const std::string &OutputCorpus, EntropicOptions Entropic, bool KeepSeed)\n++ : Entropic(Entropic), OutputCorpus(OutputCorpus), KeepSeed(KeepSeed) {\n+ memset(InputSizesPerFeature, 0, sizeof(InputSizesPerFeature));\n+ memset(SmallestElementPerFeature, 0, sizeof(SmallestElementPerFeature));\n+ }\n+@@ -177,7 +180,7 @@ public:\n+ bool empty() const { return Inputs.empty(); }\n+ const Unit &operator[] (size_t Idx) const { return Inputs[Idx]->U; }\n+ InputInfo *AddToCorpus(const Unit &U, size_t NumFeatures, bool MayDeleteFile,\n+- bool HasFocusFunction,\n++ bool HasFocusFunction, bool SeedInput,\n+ const Vector<uint32_t> &FeatureSet,\n+ const DataFlowTrace &DFT, const InputInfo *BaseII) {\n+ assert(!U.empty());\n+@@ -187,6 +190,7 @@ public:\n+ InputInfo &II = *Inputs.back();\n+ II.U = U;\n+ II.NumFeatures = NumFeatures;\n++ II.SeedInput = SeedInput;\n+ II.MayDeleteFile = MayDeleteFile;\n+ II.UniqFeatureSet = FeatureSet;\n+ II.HasFocusFunction = HasFocusFunction;\n+@@ -471,7 +475,7 @@ private:\n+\n+ for (size_t i = 0; i < N; i++) {\n+\n+- if (Inputs[i]->NumFeatures == 0) {\n++ if (Inputs[i]->NumFeatures == 0 && !(Inputs[i]->SeedInput && KeepSeed)) {\n+ // If the seed doesn't represent any features, assign zero energy.\n+ Weights[i] = 0.;\n+ } else if (Inputs[i]->NumExecutedMutations / kMaxMutationFactor >\n+@@ -491,7 +495,7 @@ private:\n+\n+ if (VanillaSchedule) {\n+ for (size_t i = 0; i < N; i++)\n+- Weights[i] = Inputs[i]->NumFeatures\n++ Weights[i] = (Inputs[i]->NumFeatures || (KeepSeed && Inputs[i]->SeedInput))\n+ ? (i + 1) * (Inputs[i]->HasFocusFunction ? 1000 : 1)\n+ : 0.;\n+ }\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n+index 00a33a413d2..ef7991c1e27 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n++++ b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n+@@ -649,6 +649,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\n+ Options.Verbosity = Flags.verbosity;\n+ Options.MaxLen = Flags.max_len;\n+ Options.LenControl = Flags.len_control;\n++ Options.KeepSeed = Flags.keep_seed;\n+ Options.UnitTimeoutSec = Flags.timeout;\n+ Options.ErrorExitCode = Flags.error_exitcode;\n+ Options.TimeoutExitCode = Flags.timeout_exitcode;\n+@@ -753,7 +754,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\n+\n+ Random Rand(Seed);\n+ auto *MD = new MutationDispatcher(Rand, Options);\n+- auto *Corpus = new InputCorpus(Options.OutputCorpus, Entropic);\n++ auto *Corpus = new InputCorpus(Options.OutputCorpus, Entropic, Options.KeepSeed);\n+ auto *F = new Fuzzer(Callback, *Corpus, *MD, Options);\n+\n+ for (auto &U: Dictionary)\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerFlags.def b/compiler-rt/lib/fuzzer/FuzzerFlags.def\n+index 832224a705d..0dac7e705a3 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerFlags.def\n++++ b/compiler-rt/lib/fuzzer/FuzzerFlags.def\n+@@ -23,6 +23,8 @@ FUZZER_FLAG_INT(len_control, 100, \"Try generating small inputs first, \"\n+ FUZZER_FLAG_STRING(seed_inputs, \"A comma-separated list of input files \"\n+ \"to use as an additional seed corpus. Alternatively, an \\\"@\\\" followed by \"\n+ \"the name of a file containing the comma-separated list.\")\n++FUZZER_FLAG_INT(keep_seed, 0, \"If 1, keep seed inputs for mutation even if \"\n++ \"they do not produce new coverage.\")\n+ FUZZER_FLAG_INT(cross_over, 1, \"If 1, cross over inputs.\")\n+ FUZZER_FLAG_INT(mutate_depth, 5,\n+ \"Apply this number of consecutive mutations to each input.\")\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerFork.cpp b/compiler-rt/lib/fuzzer/FuzzerFork.cpp\n+index d9e6b79443e..38fb82fc12d 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerFork.cpp\n++++ b/compiler-rt/lib/fuzzer/FuzzerFork.cpp\n+@@ -309,11 +309,17 @@ void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,\n+ else\n+ Env.MainCorpusDir = CorpusDirs[0];\n+\n+- auto CFPath = DirPlusFile(Env.TempDir, \"merge.txt\");\n+- CrashResistantMerge(Env.Args, {}, SeedFiles, &Env.Files, {}, &Env.Features,\n+- {}, &Env.Cov,\n+- CFPath, false);\n+- RemoveFile(CFPath);\n++ if (Options.KeepSeed) {\n++ for (auto &File : SeedFiles)\n++ Env.Files.push_back(File.File);\n++ }\n++ else {\n++ auto CFPath = DirPlusFile(Env.TempDir, \"merge.txt\");\n++ CrashResistantMerge(Env.Args, {}, SeedFiles, &Env.Files, {}, &Env.Features,\n++ {}, &Env.Cov,\n++ CFPath, false);\n++ RemoveFile(CFPath);\n++ }\n+ Printf(\"INFO: -fork=%d: %zd seed inputs, starting to fuzz in %s\\n\", NumJobs,\n+ Env.Files.size(), Env.TempDir.c_str());\n+\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerInternal.h b/compiler-rt/lib/fuzzer/FuzzerInternal.h\n+index 31096ce804b..e75807209f5 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerInternal.h\n++++ b/compiler-rt/lib/fuzzer/FuzzerInternal.h\n+@@ -119,6 +119,8 @@ private:\n+\n+ size_t LastCorpusUpdateRun = 0;\n+\n++ bool IsExecutingSeedCorpora = false;\n++\n+ bool HasMoreMallocsThanFrees = false;\n+ size_t NumberOfLeakDetectionAttempts = 0;\n+\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n+index 02db6d27b0a..a9af25a3070 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n++++ b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n+@@ -487,10 +487,11 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,\n+ *FoundUniqFeatures = FoundUniqFeaturesOfII;\n+ PrintPulseAndReportSlowInput(Data, Size);\n+ size_t NumNewFeatures = Corpus.NumFeatureUpdates() - NumUpdatesBefore;\n+- if (NumNewFeatures) {\n++ if (NumNewFeatures || (Options.KeepSeed && IsExecutingSeedCorpora)) {\n+ TPC.UpdateObservedPCs();\n+ auto NewII = Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures,\n+ MayDeleteFile, TPC.ObservedFocusFunction(),\n++ IsExecutingSeedCorpora,\n+ UniqFeatureSetTmp, DFT, II);\n+ WriteFeatureSetToFile(Options.FeaturesDir, Sha1ToString(NewII->Sha1),\n+ NewII->UniqFeatureSet);\n+@@ -764,6 +765,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\n+ assert(CorporaFiles.front().Size <= CorporaFiles.back().Size);\n+ }\n+\n++ IsExecutingSeedCorpora = true;\n++\n+ // Load and execute inputs one by one.\n+ for (auto &SF : CorporaFiles) {\n+ auto U = FileToVector(SF.File, MaxInputLen, /*ExitOnError=*/false);\n+@@ -773,6 +776,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\n+ TryDetectingAMemoryLeak(U.data(), U.size(),\n+ /*DuringInitialCorpusExecution*/ true);\n+ }\n++\n++ IsExecutingSeedCorpora = false;\n+ }\n+\n+ PrintStats(\"INITED\");\n+@@ -785,6 +790,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\n+ Corpus.NumInputsThatTouchFocusFunction());\n+ }\n+\n++ Printf(\"INFO: corpus size = %d\\n\", Corpus.size());\n++\n+ if (Corpus.empty() && Options.MaxNumberOfRuns) {\n+ Printf(\"ERROR: no interesting inputs were found. \"\n+ \"Is the code instrumented for coverage? Exiting.\\n\");\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerOptions.h b/compiler-rt/lib/fuzzer/FuzzerOptions.h\n+index 9d975bd61fe..ccd0b3dcb56 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerOptions.h\n++++ b/compiler-rt/lib/fuzzer/FuzzerOptions.h\n+@@ -18,6 +18,7 @@ struct FuzzingOptions {\n+ int Verbosity = 1;\n+ size_t MaxLen = 0;\n+ size_t LenControl = 1000;\n++ bool KeepSeed = false;\n+ int UnitTimeoutSec = 300;\n+ int TimeoutExitCode = 70;\n+ int OOMExitCode = 71;\n+diff --git a/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp b/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp\n+index 0e9435ab8fc..dfc642ab6d0 100644\n+--- a/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp\n++++ b/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp\n+@@ -593,7 +593,8 @@ TEST(Corpus, Distribution) {\n+ DataFlowTrace DFT;\n+ Random Rand(0);\n+ struct EntropicOptions Entropic = {false, 0xFF, 100};\n+- std::unique_ptr<InputCorpus> C(new InputCorpus(\"\", Entropic));\n++ bool KeepSeed = false;\n++ std::unique_ptr<InputCorpus> C(new InputCorpus(\"\", Entropic, KeepSeed));\n+ size_t N = 10;\n+ size_t TriesPerUnit = 1<<16;\n+ for (size_t i = 0; i < N; i++)\n+@@ -1057,7 +1058,8 @@ TEST(Entropic, UpdateFrequency) {\n+ size_t Index;\n+ // Create input corpus with default entropic configuration\n+ struct EntropicOptions Entropic = {true, 0xFF, 100};\n+- std::unique_ptr<InputCorpus> C(new InputCorpus(\"\", Entropic));\n++ bool KeepSeed = false;\n++ std::unique_ptr<InputCorpus> C(new InputCorpus(\"\", Entropic, KeepSeed));\n+ std::unique_ptr<InputInfo> II(new InputInfo());\n+\n+ C->AddRareFeature(FeatIdx1);\n+@@ -1094,7 +1096,8 @@ double SubAndSquare(double X, double Y) {\n+ TEST(Entropic, ComputeEnergy) {\n+ const double Precision = 0.01;\n+ struct EntropicOptions Entropic = {true, 0xFF, 100};\n+- std::unique_ptr<InputCorpus> C(new InputCorpus(\"\", Entropic));\n++ bool KeepSeed = false;\n++ std::unique_ptr<InputCorpus> C(new InputCorpus(\"\", Entropic, KeepSeed));\n+ std::unique_ptr<InputInfo> II(new InputInfo());\n+ Vector<std::pair<uint32_t, uint16_t>> FeatureFreqs = {{1, 3}, {2, 3}, {3, 3}};\n+ II->FeatureFreqs = FeatureFreqs;\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/libfuzzer_keepseed/runner.Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+FROM gcr.io/fuzzbench/base-runner\n"
},
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "# are still testing this feature. You should request an experiment by contacting\n# us as you normally do.\n+- experiment: 2020-08-01\n+ fuzzers:\n+ - afl\n+ - aflfast\n+ - aflplusplus\n+ - aflplusplus_optimal\n+ - aflplusplus_qemu\n+ - afl_qemu\n+ - aflsmart\n+ - eclipser\n+ - entropic\n+ - fairfuzz\n+ - fastcgs_lm\n+ - honggfuzz\n+ - honggfuzz_qemu\n+ - lafintel\n+ - libfuzzer\n+ - manul\n+ - mopt\n+ - libfuzzer_keepseed\n+ - entropic_keepseed\n+\n- experiment: 2020-07-30\nfuzzers:\n- libfuzzer\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Add temporary libfuzzer/entropic variants for evaluating the impact of keeping all seed inputs. (#614) |
258,371 | 03.08.2020 19:25:45 | 14,400 | 96c0ccd7893e8fb070de56b17644df9f8bba1717 | Use -summary-only flag for coverage measuring | [
{
"change_type": "MODIFY",
"old_path": "experiment/measurer.py",
"new_path": "experiment/measurer.py",
"diff": "@@ -163,6 +163,7 @@ def get_covered_region(experiment: str, fuzzer: str, benchmark: str,\n})\nsnapshot_measurer = SnapshotMeasurer(fuzzer, benchmark, trial_id,\nsnapshot_logger)\n+ snapshot_measurer.generate_summary(0, summary_only=False)\nnew_covered_regions = snapshot_measurer.get_current_covered_regions()\ncovered_regions[key] = covered_regions[key].union(new_covered_regions)\nq.put(covered_regions)\n@@ -536,21 +537,29 @@ class SnapshotMeasurer: # pylint: disable=too-many-instance-attributes\nself.logger.error(\n'Coverage profdata generation failed for cycle: %d.', cycle)\n- def generate_summary(self, cycle: int):\n+ def generate_summary(self, cycle: int, summary_only=True):\n\"\"\"Transform the .profdata file into json form.\"\"\"\ncoverage_binary = get_coverage_binary(self.benchmark)\ncommand = [\n'llvm-cov', 'export', '-format=text', coverage_binary,\n'-instr-profile=%s' % self.profdata_file\n]\n+\n+ if summary_only:\n+ command.append('-summary-only')\n+\nwith open(self.cov_summary_file, 'w') as output_file:\nresult = new_process.execute(command,\noutput_file=output_file,\nexpect_zero=False)\nif result.retcode != 0:\n+ if cycle != 0:\nself.logger.error(\n'Coverage summary json file generation failed for \\\ncycle: %d.', cycle)\n+ else:\n+ self.logger.error(\n+ 'Coverage summary json file generation failed in the end.')\ndef generate_coverage_information(self, cycle: int):\n\"\"\"Generate the .profdata file and then transform it into\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_measurer.py",
"new_path": "experiment/test_measurer.py",
"diff": "@@ -163,7 +163,7 @@ def test_generate_summary(mocked_get_coverage_binary, mocked_execute,\nexpected = [\n'llvm-cov', 'export', '-format=text',\n'/work/coverage-binaries/benchmark-a/fuzz-target',\n- '-instr-profile=/reports/data.profdata'\n+ '-instr-profile=/reports/data.profdata', '-summary-only'\n]\nassert (len(mocked_execute.call_args_list)) == 1\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Use -summary-only flag for coverage measuring (#622) |
258,388 | 04.08.2020 15:58:28 | 25,200 | da808b6b3c4798522f60e5683e8aba3cfa842c77 | Fix make debug- | [
{
"change_type": "MODIFY",
"old_path": "docker/generate_makefile.py",
"new_path": "docker/generate_makefile.py",
"diff": "@@ -58,7 +58,7 @@ def _print_makefile_run_template(image):\nif run_type == 'test-run':\nprint('\\t-e MAX_TOTAL_TIME=20 \\\\\\n\\t-e SNAPSHOT_PERIOD=10 \\\\')\nif run_type == 'debug':\n- print('\\t-entrypoint \"/bin/bash\" \\\\\\n\\t-it ', end='')\n+ print('\\t--entrypoint \"/bin/bash\" \\\\\\n\\t-it ', end='')\nelse:\nprint('\\t', end='')\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Fix make debug- (#628) |
258,370 | 05.08.2020 11:37:53 | 14,400 | 6058b6c0433a5ce82e4b4f20af65e28dc90700fb | Fix Make issue: Ctrl-C out of run-benchmark | [
{
"change_type": "MODIFY",
"old_path": "docker/generate_makefile.py",
"new_path": "docker/generate_makefile.py",
"diff": "@@ -59,6 +59,8 @@ def _print_makefile_run_template(image):\nprint('\\t-e MAX_TOTAL_TIME=20 \\\\\\n\\t-e SNAPSHOT_PERIOD=10 \\\\')\nif run_type == 'debug':\nprint('\\t--entrypoint \"/bin/bash\" \\\\\\n\\t-it ', end='')\n+ elif run_type == 'run':\n+ print('\\t-it ', end='')\nelse:\nprint('\\t', end='')\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Fix Make issue: Ctrl-C out of run-benchmark (#630) |
258,388 | 05.08.2020 08:42:14 | 25,200 | 43b38e82633717b6e1d5b5e04d811f33b3aabb01 | [CI] Cache Python packages
Transient pip install errors are a large source of spurious CI
failures. Most of these seem to be related to downloading packages.
Caching should therefore cutdown on a lot of spurious CI failures.
Caching also speeds up CI by ~10 seconds. | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/fuzzers.yml",
"new_path": ".github/workflows/fuzzers.yml",
"diff": "@@ -66,6 +66,20 @@ jobs:\nwith:\npython-version: 3.7\n+ # Copied from:\n+ # https://docs.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions\n+ - name: Cache pip\n+ uses: actions/cache@v2\n+ with:\n+ # This path is specific to Ubuntu.\n+ path: ~/.cache/pip\n+ # Look to see if there is a cache hit for the corresponding requirements\n+ # file.\n+ key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }}\n+ restore-keys: |\n+ ${{ runner.os }}-pip-\n+ ${{ runner.os }}-\n+\n- name: Install dependencies\nrun: |\nmake install-dependencies\n"
},
{
"change_type": "MODIFY",
"old_path": ".github/workflows/presubmit.yml",
"new_path": ".github/workflows/presubmit.yml",
"diff": "@@ -16,6 +16,20 @@ jobs:\nwith:\npython-version: 3.7\n+ # Copied from:\n+ # https://docs.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions\n+ - name: Cache pip\n+ uses: actions/cache@v2\n+ with:\n+ # This path is specific to Ubuntu.\n+ path: ~/.cache/pip\n+ # Look to see if there is a cache hit for the corresponding requirements\n+ # file.\n+ key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }}\n+ restore-keys: |\n+ ${{ runner.os }}-pip-\n+ ${{ runner.os }}-\n+\n- name: Install dependencies\nrun: |\nmake install-dependencies\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [CI] Cache Python packages (#631)
Transient pip install errors are a large source of spurious CI
failures. Most of these seem to be related to downloading packages.
Caching should therefore cutdown on a lot of spurious CI failures.
Caching also speeds up CI by ~10 seconds. |
258,388 | 05.08.2020 19:20:07 | 25,200 | cdeebaa3c47299231b8fd4fa42f7d139841b3e72 | Support running experiments without seeds or dictionaries.
This adds two new flags to run_experiment: --no-seeds and --no-dictionaries.
When passed, they cause trials run as part of the experiment not to use seed corpus files or dictionaries (respectively) even if they are provided by the benchmark.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "experiment/resources/runner-startup-script-template.sh",
"new_path": "experiment/resources/runner-startup-script-template.sh",
"diff": "@@ -36,6 +36,8 @@ docker run \\\n-e EXPERIMENT={{experiment}} \\\n-e TRIAL_ID={{trial_id}} \\\n-e MAX_TOTAL_TIME={{max_total_time}} \\\n+-e NO_SEEDS={{no_seeds}} \\\n+-e NO_DICTIONARIES={{no_dictionaries}} \\\n-e DOCKER_REGISTRY={{docker_registry}} {% if not local_experiment %}-e CLOUD_PROJECT={{cloud_project}} -e CLOUD_COMPUTE_ZONE={{cloud_compute_zone}} {% endif %}\\\n-e EXPERIMENT_FILESTORE={{experiment_filestore}} {% if local_experiment %}-v {{experiment_filestore}}:{{experiment_filestore}} {% endif %}\\\n-e REPORT_FILESTORE={{report_filestore}} {% if local_experiment %}-v {{report_filestore}}:{{report_filestore}} {% endif %}\\\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/run_experiment.py",
"new_path": "experiment/run_experiment.py",
"diff": "@@ -37,7 +37,6 @@ from common import logs\nfrom common import new_process\nfrom common import utils\nfrom common import yaml_utils\n-from src_analysis import experiment_changes\nBENCHMARKS_DIR = os.path.join(utils.ROOT_DIR, 'benchmarks')\nFUZZERS_DIR = os.path.join(utils.ROOT_DIR, 'fuzzers')\n@@ -199,8 +198,13 @@ def get_git_hash():\nreturn output.strip().decode('utf-8')\n-def start_experiment(experiment_name: str, config_filename: str,\n- benchmarks: List[str], fuzzers: List[str]):\n+def start_experiment( # pylint: disable=too-many-arguments\n+ experiment_name: str,\n+ config_filename: str,\n+ benchmarks: List[str],\n+ fuzzers: List[str],\n+ no_seeds=False,\n+ no_dictionaries=False):\n\"\"\"Start a fuzzer benchmarking experiment.\"\"\"\ncheck_no_local_changes()\n@@ -212,6 +216,8 @@ def start_experiment(experiment_name: str, config_filename: str,\nconfig['benchmarks'] = ','.join(benchmarks)\nconfig['experiment'] = experiment_name\nconfig['git_hash'] = get_git_hash()\n+ config['no_seeds'] = no_seeds\n+ config['no_dictionaries'] = no_dictionaries\nset_up_experiment_config_file(config)\n@@ -440,36 +446,35 @@ def main():\n'--experiment-name',\nhelp='Experiment name.',\nrequired=True)\n- fuzzers_group = parser.add_mutually_exclusive_group()\n- fuzzers_group.add_argument('-f',\n+ parser.add_argument('-f',\n'--fuzzers',\nhelp='Fuzzers to use.',\nnargs='+',\nrequired=False,\ndefault=None,\nchoices=all_fuzzers)\n- fuzzers_group.add_argument('-cf',\n- '--changed-fuzzers',\n- help=('Use fuzzers that have changed since the '\n- 'last experiment. The last experiment is '\n- 'determined by the database your '\n- 'experiment uses, not necessarily the '\n- 'fuzzbench service'),\n- action='store_true',\n- required=False)\n+ parser.add_argument('-ns',\n+ '--no-seeds',\n+ help='Should trials be conducted without seed corpora.',\n+ required=False,\n+ default=False,\n+ action='store_true')\n+ parser.add_argument('-nd',\n+ '--no-dictionaries',\n+ help='Should trials be conducted without dictionaries.',\n+ required=False,\n+ default=False,\n+ action='store_true')\nargs = parser.parse_args()\n-\n- if args.changed_fuzzers:\n- fuzzers = experiment_changes.get_fuzzers_changed_since_last()\n- if not fuzzers:\n- logs.error('No fuzzers changed since last experiment. Exiting.')\n- return 1\n- else:\nfuzzers = args.fuzzers or all_fuzzers\n- start_experiment(args.experiment_name, args.experiment_config,\n- args.benchmarks, fuzzers)\n+ start_experiment(args.experiment_name,\n+ args.experiment_config,\n+ args.benchmarks,\n+ fuzzers,\n+ no_seeds=args.no_seeds,\n+ no_dictionaries=args.no_dictionaries)\nreturn 0\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/runner.py",
"new_path": "experiment/runner.py",
"diff": "@@ -65,11 +65,20 @@ fuzzer_errored_out = False # pylint:disable=invalid-name\ndef _clean_seed_corpus(seed_corpus_dir):\n- \"\"\"Moves seed corpus files from sub-directories into the corpus directory\n- root. Also, deletes any files that exceed the 1 MB limit.\"\"\"\n+ \"\"\"Prepares |seed_corpus_dir| for the trial. This ensures that it can be\n+ used by AFL which is picky about the seed corpus. Moves seed corpus files\n+ from sub-directories into the corpus directory root. Also, deletes any files\n+ that exceed the 1 MB limit. If the NO_SEEDS env var is specified than the\n+ seed corpus files are deleted.\"\"\"\nif not os.path.exists(seed_corpus_dir):\nreturn\n+ if environment.get('NO_SEEDS'):\n+ logs.info('NO_SEEDS specified, deleting seed corpus files.')\n+ shutil.rmtree(seed_corpus_dir)\n+ os.mkdir(seed_corpus_dir)\n+ return\n+\nfailed_to_move_files = []\nfor root, _, files in os.walk(seed_corpus_dir):\nfor filename in files:\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/scheduler.py",
"new_path": "experiment/scheduler.py",
"diff": "@@ -733,7 +733,9 @@ def render_startup_script_template(instance_name: str, fuzzer: str,\n'fuzz_target': fuzz_target,\n'docker_image_url': docker_image_url,\n'docker_registry': experiment_config['docker_registry'],\n- 'local_experiment': local_experiment\n+ 'local_experiment': local_experiment,\n+ 'no_seeds': experiment_config['no_seeds'],\n+ 'no_dictionaries': experiment_config['no_dictionaries'],\n}\nif not local_experiment:\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_data/experiment-config.yaml",
"new_path": "experiment/test_data/experiment-config.yaml",
"diff": "@@ -24,3 +24,5 @@ cloud_sql_instance_connection_name: \"fuzzbench:us-central1:experiment-db=tcp:543\nbenchmarks: \"benchmark-1,benchmark-2\"\nfuzzers: \"fuzzer-a,fuzzer-b\"\ngit_hash: \"git-hash\"\n+no_seeds: false\n+no_dictionaries: false\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_runner.py",
"new_path": "experiment/test_runner.py",
"diff": "@@ -297,6 +297,18 @@ class TestIntegrationRunner:\nmocked_error.assert_not_called()\n+def test_clean_seed_corpus_no_seeds(fs):\n+ \"\"\"Test that seed corpus files are deleted if NO_SEEDS is set in the\n+ environment to 'True'.\"\"\"\n+ seed_corpus_dir = '/seeds'\n+ fs.create_dir(seed_corpus_dir)\n+ seed_file = os.path.join(seed_corpus_dir, 'a')\n+ fs.create_file(seed_file, contents='abc')\n+ runner._clean_seed_corpus(seed_corpus_dir) # pylint: disable=protected-access\n+ assert not os.path.exists(seed_file)\n+ assert os.path.exists(seed_corpus_dir)\n+\n+\ndef test_clean_seed_corpus(fs):\n\"\"\"Test that seed corpus files are moved to root directory and deletes files\nexceeding 1 MB limit.\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_scheduler.py",
"new_path": "experiment/test_scheduler.py",
"diff": "@@ -108,6 +108,8 @@ docker run \\\\\n-e EXPERIMENT=test-experiment \\\\\n-e TRIAL_ID=9 \\\\\n-e MAX_TOTAL_TIME=86400 \\\\\n+-e NO_SEEDS=False \\\\\n+-e NO_DICTIONARIES=False \\\\\n-e DOCKER_REGISTRY=gcr.io/fuzzbench -e CLOUD_PROJECT=fuzzbench -e CLOUD_COMPUTE_ZONE=us-central1-a \\\\\n-e EXPERIMENT_FILESTORE=gs://experiment-data \\\\\n-e REPORT_FILESTORE=gs://web-reports \\\\\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/test_utils.py",
"new_path": "fuzzers/test_utils.py",
"diff": "@@ -62,9 +62,9 @@ def test_dictionary_options_file_with_dict(fs):\nassert utils.get_dictionary_path('/fuzz-target') == '/fuzz.dict'\n-def test_dictionary_skip(fs, environ):\n- \"\"\"Test that None is return when SKIP_DICT is set.\"\"\"\n- os.environ['SKIP_DICT'] = '1'\n+def test_dictionary_no_dictionaries(fs, environ):\n+ \"\"\"Test that None is return when NO_DICTIONARIES is set.\"\"\"\n+ os.environ['NO_DICTIONARIES'] = '1'\nfs.create_file('/fuzz-target.dict', contents='A')\nassert utils.get_dictionary_path('/fuzz-target') is None\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/utils.py",
"new_path": "fuzzers/utils.py",
"diff": "@@ -117,7 +117,8 @@ def restore_directory(directory):\ndef get_dictionary_path(target_binary):\n\"\"\"Return dictionary path for a target binary.\"\"\"\n- if os.getenv('SKIP_DICT'):\n+ if os.getenv('NO_DICTIONARIES'):\n+ # Don't use dictionaries if experiment specifies not to.\nreturn None\ndictionary_path = target_binary + '.dict'\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Support running experiments without seeds or dictionaries. (#633)
This adds two new flags to run_experiment: --no-seeds and --no-dictionaries.
When passed, they cause trials run as part of the experiment not to use seed corpus files or dictionaries (respectively) even if they are provided by the benchmark.
Fixes #127 |
258,388 | 06.08.2020 09:33:50 | 25,200 | 519113937a43d79b33ec14b1537f8babdd83e1c6 | Fix issue with presubmit in CI
Fixes the failures occurring here:
Basically, somehow the branch we are on became shallow, so make it unshallow in order to diff it against origin/master | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/fuzzers.yml",
"new_path": ".github/workflows/fuzzers.yml",
"diff": "@@ -60,7 +60,7 @@ jobs:\nsteps:\n- uses: actions/checkout@v2\n- run: | # Needed for git diff to work.\n- git fetch origin master --depth 1\n+ git fetch origin master --unshallow\ngit symbolic-ref refs/remotes/origin/HEAD refs/remotes/origin/master\n- name: Clear unnecessary files\n"
},
{
"change_type": "MODIFY",
"old_path": ".github/workflows/presubmit.yml",
"new_path": ".github/workflows/presubmit.yml",
"diff": "@@ -8,7 +8,7 @@ jobs:\nsteps:\n- uses: actions/checkout@v2\n- run: | # Needed for presubmit to work.\n- git fetch origin master --depth 1\n+ git fetch origin master --unshallow\ngit symbolic-ref refs/remotes/origin/HEAD refs/remotes/origin/master\n- name: Setup Python environment\n"
},
{
"change_type": "MODIFY",
"old_path": "src_analysis/diff_utils.py",
"new_path": "src_analysis/diff_utils.py",
"diff": "@@ -52,7 +52,7 @@ def get_changed_files(commit_name: str = 'origin...') -> List[str]:\npass\nraise DiffError((\n'\"%s\" failed.\\n'\n- 'Please run \"git fetch origin master && '\n+ 'Please run \"git fetch origin master --unshallow && '\n'git symbolic-ref refs/remotes/origin/HEAD refs/remotes/origin/master\" '\n'and try again.\\n'\n'Please file an issue if this doesn\\'t fix things.') %\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Fix issue with presubmit in CI (#636)
Fixes the failures occurring here: https://github.com/google/fuzzbench/pull/632/checks?check_run_id=954303324
Basically, somehow the branch we are on became shallow, so make it unshallow in order to diff it against origin/master |
258,370 | 06.08.2020 13:44:19 | 14,400 | a0fb95b518cb727f58d21c8fe96592c7249a0ec3 | Use single source of truth for building images for Local and GCB build systems | [
{
"change_type": "MODIFY",
"old_path": "docker/gcb/base-images.yaml",
"new_path": "docker/gcb/base-images.yaml",
"diff": "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n-\n-# The order of execution is as follows:\n-# 1. Pull base-image.\n-# 2. After pulling base-image completes: Build base-image.\n-# This is optimized to take advantage of concurrency and caching as much as\n-# possible. When nothing changes, this does build takes two minutes to complete\n-# instead of ten.'\n-# See https://cloud.google.com/cloud-build/docs/speeding-up-builds for a discussion\n-# on using caching.\n-# See https://cloud.google.com/cloud-build/docs/configuring-builds/configure-build-step-order\n-# for a discussion on paralellism/concurrency for build steps in GCB.\n-\n-steps:\n-\n-# Pull base-image to fill cache.\n-- name: 'gcr.io/cloud-builders/docker'\n- entrypoint: 'bash'\n- args:\n- - '-c'\n- - |\n- docker pull ${_REPO}/base-image || exit 0\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- args: [\n- 'build',\n-\n- # Use two tags so that the image builds properly and we can push it to the\n- # correct location.\n- '--tag',\n- 'gcr.io/fuzzbench/base-image',\n-\n- '--tag',\n- '${_REPO}/base-image:${_EXPERIMENT}',\n-\n- '--cache-from',\n- '${_REPO}/base-image',\n-\n- 'docker/base-image'\n- ]\n- id: 'base-image'\n+# This file is used by GCB to build base images everytime a commit is\n+# made to the master branch.\nimages:\n- - '${_REPO}/base-image:${_EXPERIMENT}'\n- - '${_REPO}/base-image'\n+- gcr.io/fuzzbench/base-image:test-experiment\n+- gcr.io/fuzzbench/base-image\n+steps:\n+- args:\n+ - build\n+ - --tag\n+ - gcr.io/fuzzbench/base-image\n+ - --tag\n+ - gcr.io/fuzzbench/base-image:test-experiment\n+ - --cache-from\n+ - gcr.io/fuzzbench/base-image\n+ - --build-arg\n+ - BUILDKIT_INLINE_CACHE=1\n+ - --file\n+ - docker/base-image/Dockerfile\n+ - docker/base-image\n+ env: DOCKER_BUILDKIT=1\n+ id: base-image\n+ name: gcr.io/cloud-builders/docker\n"
},
{
"change_type": "DELETE",
"old_path": "docker/gcb/coverage.yaml",
"new_path": null,
"diff": "-# Copyright 2020 Google LLC\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-steps:\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- entrypoint: 'bash'\n- args:\n- - '-c'\n- - |\n- docker pull ${_REPO}/builders/benchmark/${_BENCHMARK} || exit 0\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- args: [\n- 'build',\n-\n- # Use two tags so that the image builds properly and we can push it to the\n- # correct location.\n- '--tag',\n- 'gcr.io/fuzzbench/builders/benchmark/${_BENCHMARK}',\n-\n- '--tag',\n- '${_REPO}/builders/benchmark/${_BENCHMARK}:${_EXPERIMENT}',\n-\n- '--file=benchmarks/${_BENCHMARK}/Dockerfile',\n-\n- '--cache-from',\n- '${_REPO}/builders/benchmark/${_BENCHMARK}:${_EXPERIMENT}',\n-\n- 'benchmarks/${_BENCHMARK}',\n- ]\n- id: 'build-project-builder'\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- entrypoint: 'bash'\n- args:\n- - '-c'\n- - |\n- docker pull ${_REPO}/builders/coverage/${_BENCHMARK}-intermediate || exit 0\n- id: 'pull-coverage-benchmark-builder-intermediate'\n- wait_for: ['-']\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- args: [\n- 'build',\n-\n- # Use two tags so that the image builds properly and we can push it to the\n- # correct location.\n- '--tag',\n- 'gcr.io/fuzzbench/builders/coverage/${_BENCHMARK}-intermediate',\n-\n- '--tag',\n- '${_REPO}/builders/coverage/${_BENCHMARK}-intermediate:${_EXPERIMENT}',\n-\n- '--file=fuzzers/coverage/builder.Dockerfile',\n-\n- '--cache-from',\n- '${_REPO}/builders/coverage/${_BENCHMARK}-intermediate',\n-\n- '--build-arg',\n- 'parent_image=gcr.io/fuzzbench/builders/benchmark/${_BENCHMARK}',\n-\n- 'fuzzers/coverage',\n- ]\n- id: 'build-coverage-benchmark-builder-intermediate'\n- wait_for: ['build-project-builder', 'pull-coverage-benchmark-builder-intermediate']\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- entrypoint: 'bash'\n- args:\n- - '-c'\n- - |\n- docker pull ${_REPO}/builders/coverage/${_BENCHMARK} || exit 0\n- id: 'pull-coverage-benchmark-builder'\n- wait_for: ['-']\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- args: [\n- 'build',\n-\n- '--tag',\n- 'gcr.io/fuzzbench/builders/coverage/${_BENCHMARK}',\n-\n- '--tag',\n- '${_REPO}/builders/coverage/${_BENCHMARK}:${_EXPERIMENT}',\n-\n- '--file=docker/benchmark-builder/Dockerfile',\n-\n- '--cache-from',\n- '${_REPO}/builders/coverage/${_BENCHMARK}',\n-\n- '--build-arg',\n- 'parent_image=gcr.io/fuzzbench/builders/coverage/${_BENCHMARK}-intermediate',\n-\n- '--build-arg',\n- 'fuzzer=coverage',\n-\n- '--build-arg',\n- 'benchmark=${_BENCHMARK}',\n-\n- '.',\n- ]\n- wait_for: ['pull-coverage-benchmark-builder', 'build-coverage-benchmark-builder-intermediate']\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- args: [\n- 'run',\n- '-v',\n- '/workspace/out:/host-out',\n- '${_REPO}/builders/coverage/${_BENCHMARK}:${_EXPERIMENT}',\n- '/bin/bash',\n- '-c',\n- 'cd /out; tar -czvf /host-out/coverage-build-${_BENCHMARK}.tar.gz *'\n- ]\n-\n-- name: 'gcr.io/cloud-builders/gsutil'\n- args: [\n- '-m',\n- 'cp',\n- '/workspace/out/coverage-build-${_BENCHMARK}.tar.gz',\n- '${_GCS_COVERAGE_BINARIES_DIR}/',\n- ]\n-\n-images:\n- - '${_REPO}/builders/benchmark/${_BENCHMARK}:${_EXPERIMENT}'\n- - '${_REPO}/builders/benchmark/${_BENCHMARK}'\n- - '${_REPO}/builders/coverage/${_BENCHMARK}-intermediate:${_EXPERIMENT}'\n- - '${_REPO}/builders/coverage/${_BENCHMARK}-intermediate'\n- - '${_REPO}/builders/coverage/${_BENCHMARK}:${_EXPERIMENT}'\n- - '${_REPO}/builders/coverage/${_BENCHMARK}'\n"
},
{
"change_type": "DELETE",
"old_path": "docker/gcb/fuzzer.yaml",
"new_path": null,
"diff": "-# Copyright 2020 Google LLC\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-steps:\n-\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- entrypoint: 'bash'\n- args:\n- - '-c'\n- - |\n- docker pull ${_REPO}/builders/benchmark/${_BENCHMARK} || exit 0\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- args: [\n- 'build',\n-\n- # Use two tags so that the image builds properly and we can push it to the\n- # correct location.\n- '--tag',\n- 'gcr.io/fuzzbench/builders/benchmark/${_BENCHMARK}',\n-\n- '--tag',\n- '${_REPO}/builders/benchmark/${_BENCHMARK}:${_EXPERIMENT}',\n-\n- '--file=benchmarks/${_BENCHMARK}/Dockerfile',\n-\n- '--cache-from',\n- '${_REPO}/builders/benchmark/${_BENCHMARK}:${_EXPERIMENT}',\n-\n- 'benchmarks/${_BENCHMARK}',\n- ]\n- id: 'build-project-builder'\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- entrypoint: 'bash'\n- args:\n- - '-c'\n- - |\n- docker pull ${_REPO}/builders/${_FUZZER}/${_BENCHMARK}-intermediate || exit 0\n- id: 'pull-fuzzer-benchmark-builder-intermediate'\n- wait_for: ['-']\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- args: [\n- 'build',\n-\n- '--tag',\n- 'gcr.io/fuzzbench/builders/${_FUZZER}/${_BENCHMARK}-intermediate',\n-\n- '--tag',\n- '${_REPO}/builders/${_FUZZER}/${_BENCHMARK}-intermediate:${_EXPERIMENT}',\n-\n- '--file=fuzzers/${_FUZZER}/builder.Dockerfile',\n-\n- '--cache-from',\n- '${_REPO}/builders/${_FUZZER}/${_BENCHMARK}-intermediate',\n-\n- # Use a hardcoded repo because the parent image is pinned by SHA. Users\n- # won't have it.\n- '--build-arg',\n- 'parent_image=gcr.io/fuzzbench/builders/benchmark/${_BENCHMARK}',\n-\n- 'fuzzers/${_FUZZER}',\n- ]\n- id: 'build-fuzzer-benchmark-builder-intermediate'\n- wait_for: ['pull-fuzzer-benchmark-builder-intermediate', 'build-project-builder']\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- entrypoint: 'bash'\n- args:\n- - '-c'\n- - |\n- docker pull ${_REPO}/builders/${_FUZZER}/${_BENCHMARK} || exit 0\n- id: 'pull-fuzzer-benchmark-builder'\n- wait_for: ['-']\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- args: [\n- 'build',\n-\n- '--tag',\n- 'gcr.io/fuzzbench/builders/${_FUZZER}/${_BENCHMARK}',\n-\n- '--tag',\n- '${_REPO}/builders/${_FUZZER}/${_BENCHMARK}:${_EXPERIMENT}',\n-\n- '--file=docker/benchmark-builder/Dockerfile',\n-\n- '--cache-from',\n- '${_REPO}/builders/${_FUZZER}/${_BENCHMARK}',\n-\n- '--build-arg',\n- 'parent_image=gcr.io/fuzzbench/builders/${_FUZZER}/${_BENCHMARK}-intermediate',\n-\n- '--build-arg',\n- 'fuzzer=${_FUZZER}',\n-\n- '--build-arg',\n- 'benchmark=${_BENCHMARK}',\n-\n- '.',\n- ]\n- id: 'build-fuzzer-benchmark-builder'\n- wait_for: ['pull-fuzzer-benchmark-builder', 'build-fuzzer-benchmark-builder-intermediate']\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- entrypoint: 'bash'\n- args:\n- - '-c'\n- - |\n- docker pull ${_REPO}/builders/${_FUZZER}/${_BENCHMARK}-intermediate || exit 0\n- id: 'pull-fuzzer-benchmark-runner-intermediate'\n- wait_for: ['-']\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- args: [\n- 'build',\n-\n- '--tag',\n- 'gcr.io/fuzzbench/runners/${_FUZZER}/${_BENCHMARK}-intermediate',\n-\n- '--tag',\n- '${_REPO}/runners/${_FUZZER}/${_BENCHMARK}-intermediate:${_EXPERIMENT}',\n-\n- '--file',\n- 'fuzzers/${_FUZZER}/runner.Dockerfile',\n-\n- '--cache-from',\n- '${_REPO}/runners/${_FUZZER}/${_BENCHMARK}-intermediate',\n-\n- 'fuzzers/${_FUZZER}',\n- ]\n- id: 'build-fuzzer-benchmark-runner-intermediate'\n- wait_for: ['pull-fuzzer-benchmark-runner-intermediate', 'build-fuzzer-benchmark-builder']\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- entrypoint: 'bash'\n- args:\n- - '-c'\n- - |\n- docker pull ${_REPO}/builders/${_FUZZER}/${_BENCHMARK} || exit 0\n- id: 'pull-fuzzer-benchmark-runner'\n- wait_for: ['-']\n-\n-\n-- name: 'gcr.io/cloud-builders/docker'\n- args: [\n- 'build',\n-\n- '--tag',\n- 'gcr.io/fuzzbench/runners/${_FUZZER}/${_BENCHMARK}',\n-\n- '--tag',\n- '${_REPO}/runners/${_FUZZER}/${_BENCHMARK}:${_EXPERIMENT}',\n-\n- '--build-arg',\n- 'fuzzer=${_FUZZER}',\n-\n- '--cache-from',\n- '${_REPO}/runners/${_FUZZER}/${_BENCHMARK}',\n-\n- '--build-arg',\n- 'benchmark=${_BENCHMARK}',\n-\n- '--file',\n- 'docker/benchmark-runner/Dockerfile',\n-\n- '.',\n- ]\n- wait_for: ['pull-fuzzer-benchmark-runner', 'build-fuzzer-benchmark-runner-intermediate']\n-\n-images:\n- - '${_REPO}/builders/benchmark/${_BENCHMARK}:${_EXPERIMENT}'\n- - '${_REPO}/builders/benchmark/${_BENCHMARK}'\n- - '${_REPO}/builders/${_FUZZER}/${_BENCHMARK}-intermediate:${_EXPERIMENT}'\n- - '${_REPO}/builders/${_FUZZER}/${_BENCHMARK}-intermediate'\n- - '${_REPO}/builders/${_FUZZER}/${_BENCHMARK}:${_EXPERIMENT}'\n- - '${_REPO}/builders/${_FUZZER}/${_BENCHMARK}'\n- - '${_REPO}/runners/${_FUZZER}/${_BENCHMARK}-intermediate:${_EXPERIMENT}'\n- - '${_REPO}/runners/${_FUZZER}/${_BENCHMARK}-intermediate'\n- - '${_REPO}/runners/${_FUZZER}/${_BENCHMARK}:${_EXPERIMENT}'\n- - '${_REPO}/runners/${_FUZZER}/${_BENCHMARK}'\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/generate_makefile.py",
"new_path": "docker/generate_makefile.py",
"diff": "@@ -34,7 +34,8 @@ def _print_benchmark_fuzz_target(benchmarks):\ndef _print_makefile_run_template(image):\n- fuzzer, benchmark = image['tag'].split('/')[1:]\n+ fuzzer = image['fuzzer']\n+ benchmark = image['benchmark']\nfor run_type in ('run', 'debug', 'test-run'):\nprint(('{run_type}-{fuzzer}-{benchmark}: ' +\n@@ -68,8 +69,8 @@ def _print_makefile_run_template(image):\nprint()\n-# TODO(tanq16): Add unit test.\n-def _print_rules_for_image(name, image):\n+# TODO(Tanq16): Function must return a string as opposed to printing it.\n+def print_rules_for_image(name, image):\n\"\"\"Print makefile section for given image to stdout.\"\"\"\nif not ('base' in name or 'dispatcher' in name):\nprint('.', end='')\n@@ -110,7 +111,7 @@ def main():\n_print_benchmark_fuzz_target(benchmarks)\nfor name, image in buildable_images.items():\n- _print_rules_for_image(name, image)\n+ print_rules_for_image(name, image)\n# Print build targets for all fuzzer-benchmark pairs (including coverage).\nfuzzers.append('coverage')\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/image_types.yaml",
"new_path": "docker/image_types.yaml",
"diff": "# dependency graph.\n'base-image':\n- tag: 'base-image'\n+ dockerfile: 'docker/base-image/Dockerfile'\ncontext: 'docker/base-image'\n+ tag: 'base-image'\n+ type: 'base'\n'dispatcher-image':\n- tag: 'dispatcher-image'\n- context: 'docker/dispatcher-image'\ndepends_on:\n- 'base-image'\n+ dockerfile: 'docker/dispatcher-image/Dockerfile'\n+ context: 'docker/dispatcher-image'\n+ tag: 'dispatcher-image'\n+ type: 'dispatcher'\n'coverage-{benchmark}-builder-intermediate':\n- tag: 'builders/coverage/{benchmark}-intermediate'\n- context: 'fuzzers/coverage'\n- dockerfile: 'fuzzers/coverage/builder.Dockerfile'\n- depends_on:\n- - '{benchmark}-project-builder'\nbuild_arg:\n- 'parent_image=gcr.io/fuzzbench/builders/benchmark/{benchmark}'\n+ depends_on:\n+ - '{benchmark}-project-builder'\n+ dockerfile: 'fuzzers/coverage/builder.Dockerfile'\n+ context: 'fuzzers/coverage'\n+ tag: 'builders/coverage/{benchmark}-intermediate'\n+ type: 'coverage'\n'coverage-{benchmark}-builder':\n- tag: 'builders/coverage/{benchmark}'\n- context: '.'\n- dockerfile: 'docker/benchmark-builder/Dockerfile'\nbuild_arg:\n- - 'parent_image=gcr.io/fuzzbench/builders/coverage/{benchmark}-intermediate'\n- - 'fuzzer=coverage'\n- 'benchmark={benchmark}'\n+ - 'fuzzer=coverage'\n+ - 'parent_image=gcr.io/fuzzbench/builders/coverage/{benchmark}-intermediate'\ndepends_on:\n- 'coverage-{benchmark}-builder-intermediate'\n+ dockerfile: 'docker/benchmark-builder/Dockerfile'\n+ context: '.'\n+ tag: 'builders/coverage/{benchmark}'\n+ type: 'coverage'\n# TODO: It would be better to call this benchmark builder. But that would be\n# confusing because this doesn't involve benchmark-builder/Dockerfile. Rename\n# that and then rename this.\n'{benchmark}-project-builder':\n- tag: 'builders/benchmark/{benchmark}'\n- context: 'benchmarks/{benchmark}'\ndockerfile: 'benchmarks/{benchmark}/Dockerfile'\n+ context: 'benchmarks/{benchmark}'\n+ tag: 'builders/benchmark/{benchmark}'\n+ type: 'builder'\n'{fuzzer}-{benchmark}-builder-intermediate':\n- tag: 'builders/{fuzzer}/{benchmark}-intermediate'\n- context: 'fuzzers/{fuzzer}'\n- dockerfile: 'fuzzers/{fuzzer}/builder.Dockerfile'\n- depends_on:\n- - '{benchmark}-project-builder'\nbuild_arg:\n- 'parent_image=gcr.io/fuzzbench/builders/benchmark/{benchmark}'\n+ depends_on:\n+ - '{benchmark}-project-builder'\n+ dockerfile: 'fuzzers/{fuzzer}/builder.Dockerfile'\n+ context: 'fuzzers/{fuzzer}'\n+ tag: 'builders/{fuzzer}/{benchmark}-intermediate'\n+ type: 'builder'\n'{fuzzer}-{benchmark}-builder':\n- tag: 'builders/{fuzzer}/{benchmark}'\n- context: '.'\n- dockerfile: 'docker/benchmark-builder/Dockerfile'\nbuild_arg:\n- - 'parent_image=gcr.io/fuzzbench/builders/{fuzzer}/{benchmark}-intermediate'\n- - 'fuzzer={fuzzer}'\n- 'benchmark={benchmark}'\n+ - 'fuzzer={fuzzer}'\n+ - 'parent_image=gcr.io/fuzzbench/builders/{fuzzer}/{benchmark}-intermediate'\ndepends_on:\n- '{fuzzer}-{benchmark}-builder-intermediate'\n+ dockerfile: 'docker/benchmark-builder/Dockerfile'\n+ context: '.'\n+ tag: 'builders/{fuzzer}/{benchmark}'\n+ type: 'builder'\n'{fuzzer}-{benchmark}-intermediate-runner':\n- tag: 'runners/{fuzzer}/{benchmark}-intermediate'\n- context: 'fuzzers/{fuzzer}'\n- dockerfile: 'fuzzers/{fuzzer}/runner.Dockerfile'\ndepends_on:\n- '{fuzzer}-{benchmark}-builder'\n+ dockerfile: 'fuzzers/{fuzzer}/runner.Dockerfile'\n+ context: 'fuzzers/{fuzzer}'\n+ tag: 'runners/{fuzzer}/{benchmark}-intermediate'\n+ type: 'runner'\n'{fuzzer}-{benchmark}-runner':\n- tag: 'runners/{fuzzer}/{benchmark}'\n- context: '.'\n- dockerfile: 'docker/benchmark-runner/Dockerfile'\n+ benchmark: '{benchmark}'\nbuild_arg:\n- - 'fuzzer={fuzzer}'\n- 'benchmark={benchmark}'\n+ - 'fuzzer={fuzzer}'\ndepends_on:\n- '{fuzzer}-{benchmark}-intermediate-runner'\n+ dockerfile: 'docker/benchmark-runner/Dockerfile'\n+ fuzzer: '{fuzzer}'\n+ context: '.'\n+ tag: 'runners/{fuzzer}/{benchmark}'\n+ type: 'runner'\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docker/test_generate_makefile.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Tests for generate_makefile.py.\"\"\"\n+\n+from unittest.mock import call\n+from unittest.mock import patch\n+\n+from docker import generate_makefile\n+\n+\n+@patch('builtins.print')\n+def test_print_makefile_build(mocked_print):\n+ \"\"\"Tests result of a makefile generation for an image.\"\"\"\n+\n+ name = 'afl-zlib-builder-intermediate'\n+ image = {\n+ 'tag': 'builders/afl/zlib-intermediate',\n+ 'context': 'fuzzers/afl',\n+ 'dockerfile': 'fuzzers/afl/builder.Dockerfile',\n+ 'depends_on': ['zlib-project-builder'],\n+ 'build_arg': ['parent_image=gcr.io/fuzzbench/builders/benchmark/zlib']\n+ }\n+\n+ generate_makefile.print_rules_for_image(name, image)\n+ assert mocked_print.mock_calls == [\n+ call('.', end=''),\n+ call('afl-zlib-builder-intermediate:', end=''),\n+ call(' .zlib-project-builder', end=''),\n+ call(),\n+ call('\\tdocker build \\\\'),\n+ call('\\t--tag gcr.io/fuzzbench/builders/afl/zlib-intermediate \\\\'),\n+ call('\\t--build-arg BUILDKIT_INLINE_CACHE=1 \\\\'),\n+ call('\\t--cache-from gcr.io/fuzzbench/builders/afl/zlib-intermediate \\\\'\n+ ),\n+ call('\\t--build-arg parent_image=gcr.io/' +\n+ 'fuzzbench/builders/benchmark/zlib \\\\'),\n+ call('\\t--file fuzzers/afl/builder.Dockerfile \\\\'),\n+ call('\\tfuzzers/afl'),\n+ call()\n+ ]\n+\n+\n+@patch('builtins.print')\n+def test_print_makefile_runner_image(mocked_print):\n+ \"\"\"Tests result of a makefile generation for a runner image.\"\"\"\n+\n+ name = 'afl-zlib-runner'\n+ image = {\n+ 'tag': 'runners/afl/zlib',\n+ 'fuzzer': 'afl',\n+ 'benchmark': 'zlib',\n+ 'context': '.',\n+ 'dockerfile': 'docker/benchmark-runner/Dockerfile',\n+ 'build_arg': ['fuzzer=afl', 'benchmark=zlib'],\n+ 'depends_on': ['afl-zlib-builder', 'afl-zlib-intermediate-runner']\n+ }\n+\n+ generate_makefile.print_rules_for_image(name, image)\n+\n+ assert mocked_print.mock_calls == [\n+ call('.', end=''),\n+ call('afl-zlib-runner:', end=''),\n+ call(' .afl-zlib-builder', end=''),\n+ call(' .afl-zlib-intermediate-runner', end=''),\n+ call(),\n+ call('\\tdocker build \\\\'),\n+ call('\\t--tag gcr.io/fuzzbench/runners/afl/zlib \\\\'),\n+ call('\\t--build-arg BUILDKIT_INLINE_CACHE=1 \\\\'),\n+ call('\\t--cache-from gcr.io/fuzzbench/runners/afl/zlib \\\\'),\n+ call('\\t--build-arg fuzzer=afl \\\\'),\n+ call('\\t--build-arg benchmark=zlib \\\\'),\n+ call('\\t--file docker/benchmark-runner/Dockerfile \\\\'),\n+ call('\\t.'),\n+ call(),\n+ call('run-afl-zlib: .afl-zlib-runner'),\n+ call('\\\n+\\tdocker run \\\\\\n\\\n+\\t--cpus=1 \\\\\\n\\\n+\\t--cap-add SYS_NICE \\\\\\n\\\n+\\t--cap-add SYS_PTRACE \\\\\\n\\\n+\\t-e FUZZ_OUTSIDE_EXPERIMENT=1 \\\\\\n\\\n+\\t-e FORCE_LOCAL=1 \\\\\\n\\\n+\\t-e TRIAL_ID=1 \\\\\\n\\\n+\\t-e FUZZER=afl \\\\\\n\\\n+\\t-e BENCHMARK=zlib \\\\\\n\\\n+\\t-e FUZZ_TARGET=$(zlib-fuzz-target) \\\\\\\n+'),\n+ call('\\t-it ', end=''),\n+ call('gcr.io/fuzzbench/runners/afl/zlib'),\n+ call(),\n+ call('debug-afl-zlib: .afl-zlib-runner'),\n+ call('\\\n+\\tdocker run \\\\\\n\\\n+\\t--cpus=1 \\\\\\n\\\n+\\t--cap-add SYS_NICE \\\\\\n\\\n+\\t--cap-add SYS_PTRACE \\\\\\n\\\n+\\t-e FUZZ_OUTSIDE_EXPERIMENT=1 \\\\\\n\\\n+\\t-e FORCE_LOCAL=1 \\\\\\n\\\n+\\t-e TRIAL_ID=1 \\\\\\n\\\n+\\t-e FUZZER=afl \\\\\\n\\\n+\\t-e BENCHMARK=zlib \\\\\\n\\\n+\\t-e FUZZ_TARGET=$(zlib-fuzz-target) \\\\\\\n+'),\n+ call('\\t--entrypoint \"/bin/bash\" \\\\\\n\\t-it ', end=''),\n+ call('gcr.io/fuzzbench/runners/afl/zlib'),\n+ call(),\n+ call('test-run-afl-zlib: .afl-zlib-runner'),\n+ call('\\\n+\\tdocker run \\\\\\n\\\n+\\t--cpus=1 \\\\\\n\\\n+\\t--cap-add SYS_NICE \\\\\\n\\\n+\\t--cap-add SYS_PTRACE \\\\\\n\\\n+\\t-e FUZZ_OUTSIDE_EXPERIMENT=1 \\\\\\n\\\n+\\t-e FORCE_LOCAL=1 \\\\\\n\\\n+\\t-e TRIAL_ID=1 \\\\\\n\\\n+\\t-e FUZZER=afl \\\\\\n\\\n+\\t-e BENCHMARK=zlib \\\\\\n\\\n+\\t-e FUZZ_TARGET=$(zlib-fuzz-target) \\\\\\\n+'),\n+ call('\\t-e MAX_TOTAL_TIME=20 \\\\\\n\\t-e SNAPSHOT_PERIOD=10 \\\\'),\n+ call('\\t', end=''),\n+ call('gcr.io/fuzzbench/runners/afl/zlib'),\n+ call()\n+ ]\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/build/gcb_build.py",
"new_path": "experiment/build/gcb_build.py",
"diff": "# limitations under the License.\n\"\"\"Module for building things on Google Cloud Build for use in trials.\"\"\"\n-import os\n+import tempfile\nfrom typing import Dict\n-from common import experiment_path as exp_path\n-from common import experiment_utils\nfrom common import logs\nfrom common import new_process\nfrom common import utils\n+from common import yaml_utils\nfrom experiment.build import build_utils\n+from experiment.build import docker_images\n+from experiment.build import generate_cloudbuild\nBUILDER_STEP_IDS = [\n'build-fuzzer-builder',\n@@ -39,31 +40,40 @@ GCB_MACHINE_TYPE = 'n1-highcpu-8'\nlogger = logs.Logger('builder') # pylint: disable=invalid-name\n+def _get_buildable_images(fuzzer=None, benchmark=None):\n+ return docker_images.get_images_to_build([fuzzer], [benchmark])\n+\n+\ndef build_base_images():\n\"\"\"Build base images on GCB.\"\"\"\n- _build(get_build_config_file('base-images.yaml'), 'base-images')\n+ image_templates = {'base-image': _get_buildable_images()['base-image']}\n+ config = generate_cloudbuild.create_cloud_build_spec(image_templates,\n+ build_base_images=True)\n+ _build(config, 'base-images')\ndef build_coverage(benchmark):\n\"\"\"Build coverage image for benchmark on GCB.\"\"\"\n- coverage_binaries_dir = exp_path.filestore(\n- build_utils.get_coverage_binaries_dir())\n- substitutions = {\n- '_GCS_COVERAGE_BINARIES_DIR': coverage_binaries_dir,\n- '_BENCHMARK': benchmark,\n+ buildable_images = _get_buildable_images(benchmark=benchmark)\n+ image_templates = {\n+ image_name: image_specs\n+ for image_name, image_specs in buildable_images.items()\n+ if image_specs['type'] in 'coverage'\n}\n- config_file = get_build_config_file('coverage.yaml')\n+ config = generate_cloudbuild.create_cloud_build_spec(image_templates,\n+ benchmark=benchmark)\nconfig_name = 'benchmark-{benchmark}-coverage'.format(benchmark=benchmark)\n- _build(config_file, config_name, substitutions)\n+ _build(config, config_name)\n-def _build(config_file: str,\n+def _build(config: Dict,\nconfig_name: str,\n- substitutions: Dict[str, str] = None,\ntimeout_seconds: int = GCB_BUILD_TIMEOUT\n) -> new_process.ProcessResult:\n- \"\"\"Build each of |args| on gcb.\"\"\"\n- config_arg = '--config=%s' % config_file\n+ \"\"\"Submit build to GCB.\"\"\"\n+ with tempfile.NamedTemporaryFile() as config_file:\n+ yaml_utils.write(config_file.name, config)\n+ config_arg = '--config=%s' % config_file.name\nmachine_type_arg = '--machine-type=%s' % GCB_MACHINE_TYPE\n# Use \"s\" suffix to denote seconds.\n@@ -79,21 +89,6 @@ def _build(config_file: str,\nmachine_type_arg,\n]\n- if substitutions is None:\n- substitutions = {}\n-\n- assert '_REPO' not in substitutions\n- substitutions['_REPO'] = experiment_utils.get_base_docker_tag()\n-\n- assert '_EXPERIMENT' not in substitutions\n- substitutions['_EXPERIMENT'] = experiment_utils.get_experiment_name()\n-\n- substitutions = [\n- '%s=%s' % (key, value) for key, value in substitutions.items()\n- ]\n- substitutions = ','.join(substitutions)\n- command.append('--substitutions=%s' % substitutions)\n-\n# Don't write to stdout to make concurrent building faster. Otherwise\n# writing becomes the bottleneck.\nresult = new_process.execute(command,\n@@ -104,19 +99,16 @@ def _build(config_file: str,\nreturn result\n-def get_build_config_file(filename: str) -> str:\n- \"\"\"Return the path of the GCB build config file |filename|.\"\"\"\n- return os.path.join(utils.ROOT_DIR, 'docker', 'gcb', filename)\n-\n-\n-def build_fuzzer_benchmark(fuzzer: str, benchmark: str) -> bool:\n+def build_fuzzer_benchmark(fuzzer: str, benchmark: str):\n\"\"\"Builds |benchmark| for |fuzzer|.\"\"\"\n- substitutions = {\n- '_BENCHMARK': benchmark,\n- '_FUZZER': fuzzer,\n- }\n- config_file = get_build_config_file('fuzzer.yaml')\n+ image_templates = {}\n+ buildable_images = _get_buildable_images(fuzzer=fuzzer, benchmark=benchmark)\n+ for image_name, image_specs in buildable_images.items():\n+ if image_specs['type'] in ('base', 'coverage', 'dispatcher'):\n+ continue\n+ image_templates[image_name] = image_specs\n+ config = generate_cloudbuild.create_cloud_build_spec(image_templates)\nconfig_name = 'benchmark-{benchmark}-fuzzer-{fuzzer}'.format(\nbenchmark=benchmark, fuzzer=fuzzer)\n- _build(config_file, config_name, substitutions)\n+ _build(config, config_name)\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/build/generate_cloudbuild.py",
"new_path": "experiment/build/generate_cloudbuild.py",
"diff": "# limitations under the License.\n\"\"\"Generates Cloud Build specification\"\"\"\n-import argparse\n-import json\n-import yaml\n-\n-from experiment.build import docker_images\n-\n-\n-# TODO(Tanq16): Add unit test for this.\n-def create_cloud_build_spec(buildable_images, docker_registry):\n- \"\"\"Returns Cloud Build specificatiion.\"\"\"\n-\n- cloud_build_spec = {}\n- cloud_build_spec['steps'] = []\n- cloud_build_spec['images'] = []\n-\n- for name, image in buildable_images.items():\n- step = {}\n- step['id'] = name\n- step['name'] = 'gcr.io/cloud-builders/docker'\n- step['args'] = []\n- step['args'] += ['--tag', image['tag']]\n- step['args'] += ['--cache-from', docker_registry + image['tag']]\n- step['args'] += ['--build-arg', 'BUILDKIT_INLINE_CACHE=1']\n- if 'build_arg' in image:\n- for build_arg in image['build_arg']:\n+import os\n+import posixpath\n+\n+from common import yaml_utils\n+from common import experiment_utils\n+from common import experiment_path as exp_path\n+from common.utils import ROOT_DIR\n+from experiment.build import build_utils\n+\n+DOCKER_REGISTRY = 'gcr.io/fuzzbench'\n+\n+\n+def get_experiment_tag_for_image(image_specs, tag_by_experiment=True):\n+ \"\"\"Returns the registry with the experiment tag for given image.\"\"\"\n+ tag = posixpath.join(experiment_utils.get_base_docker_tag(),\n+ image_specs['tag'])\n+ if tag_by_experiment:\n+ tag += ':' + experiment_utils.get_experiment_name()\n+ return tag\n+\n+\n+def coverage_steps(benchmark):\n+ \"\"\"Returns GCB run steps for coverage builds.\"\"\"\n+ coverage_binaries_dir = exp_path.filestore(\n+ build_utils.get_coverage_binaries_dir())\n+ steps = [{\n+ 'name':\n+ 'gcr.io/cloud-builders/docker',\n+ 'args': [\n+ 'run', '-v', '/workspace/out:/host-out',\n+ posixpath.join(experiment_utils.get_base_docker_tag(), 'builders',\n+ 'coverage', benchmark) + ':' +\n+ experiment_utils.get_experiment_name(), '/bin/bash', '-c',\n+ 'cd /out; tar -czvf /host-out/coverage-build-' + benchmark +\n+ '.tar.gz *'\n+ ]\n+ }]\n+ step = {'name': 'gcr.io/cloud-builders/gsutil'}\n+ step['args'] = [\n+ '-m', 'cp', '/workspace/out/coverage-build-' + benchmark + '.tar.gz',\n+ coverage_binaries_dir + '/'\n+ ]\n+ steps.append(step)\n+ return steps\n+\n+\n+def create_cloud_build_spec(image_templates,\n+ benchmark='',\n+ build_base_images=False):\n+ \"\"\"Generates Cloud Build specification.\n+\n+ Args:\n+ image_templates: Image types and their properties.\n+ benchmark: Name of benchmark (required for coverage builds only).\n+ build_base_images: True if building only base images.\n+\n+ Returns:\n+ GCB build steps.\n+ \"\"\"\n+ cloud_build_spec = {'steps': [], 'images': []}\n+\n+ for image_name, image_specs in image_templates.items():\n+ step = {\n+ 'id': image_name,\n+ 'env': 'DOCKER_BUILDKIT=1',\n+ 'name': 'gcr.io/cloud-builders/docker'\n+ }\n+ step['args'] = [\n+ 'build', '--tag',\n+ posixpath.join(DOCKER_REGISTRY, image_specs['tag']), '--tag',\n+ get_experiment_tag_for_image(image_specs), '--cache-from',\n+ get_experiment_tag_for_image(image_specs, tag_by_experiment=False),\n+ '--build-arg', 'BUILDKIT_INLINE_CACHE=1'\n+ ]\n+ for build_arg in image_specs.get('build_arg', []):\nstep['args'] += ['--build-arg', build_arg]\n- if 'dockerfile' in image:\n- step['args'] += ['--file', image['dockerfile']]\n- step['args'] += [image['context']]\n- if 'depends_on' in image:\n+\n+ step['args'] += [\n+ '--file', image_specs['dockerfile'], image_specs['context']\n+ ]\nstep['wait_for'] = []\n- for dep in image['depends_on']:\n- step['wait_for'] += [dep]\n- cloud_build_spec['images'].append(name)\n+ for dependency in image_specs.get('depends_on', []):\n+ # Base images are built before creating fuzzer benchmark builds,\n+ # so it's not required to wait for them to build.\n+ if 'base' in dependency and not build_base_images:\n+ continue\n+ step['wait_for'] += [dependency]\n+\ncloud_build_spec['steps'].append(step)\n+ cloud_build_spec['images'].append(\n+ get_experiment_tag_for_image(image_specs))\n+ cloud_build_spec['images'].append(\n+ get_experiment_tag_for_image(image_specs, tag_by_experiment=False))\n+\n+ if any(image_specs['type'] in 'coverage'\n+ for _, image_specs in image_templates.items()):\n+ cloud_build_spec['steps'] += coverage_steps(benchmark)\nreturn cloud_build_spec\ndef main():\n- \"\"\"Generates Cloud Build specification.\"\"\"\n- parser = argparse.ArgumentParser(description='GCB spec generator.')\n- parser.add_argument('-r',\n- '--docker-registry',\n- default='gcr.io/fuzzbench/',\n- help='Docker registry to use.')\n- args = parser.parse_args()\n-\n- # TODO(Tanq16): Create fuzzer/benchmark list dynamically.\n- fuzzers = ['afl', 'libfuzzer']\n- benchmarks = ['libxml', 'libpng']\n- buildable_images = docker_images.get_images_to_build(fuzzers, benchmarks)\n- cloud_build_spec = create_cloud_build_spec(buildable_images,\n- args.docker_registry)\n- # Build spec can be yaml or json, use whichever:\n- # https://cloud.google.com/cloud-build/docs/configuring-builds/create-basic-configuration\n- print(yaml.dump(cloud_build_spec))\n- print(json.dumps(cloud_build_spec))\n+ \"\"\"Write base-images build spec when run from command line.\"\"\"\n+ image_templates = yaml_utils.read(\n+ os.path.join(ROOT_DIR, 'docker', 'image_types.yaml'))\n+ base_images_spec = create_cloud_build_spec(\n+ {'base-image': image_templates['base-image']}, build_base_images=True)\n+ base_images_spec_file = os.path.join(ROOT_DIR, 'docker', 'gcb',\n+ 'base-images.yaml')\n+ yaml_utils.write(base_images_spec_file, base_images_spec)\nif __name__ == '__main__':\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "experiment/build/test_generate_cloudbuild.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Tests for generate_cloudbuild.py.\"\"\"\n+\n+import os\n+\n+from unittest.mock import patch\n+\n+from experiment.build import generate_cloudbuild\n+\n+\[email protected](os.environ, {\n+ 'CLOUD_PROJECT': 'fuzzbench',\n+ 'EXPERIMENT': 'test-experiment'\n+})\n+def test_generate_cloud_build_spec():\n+ \"\"\"Tests result of a makefile generation for an image.\"\"\"\n+\n+ image = {\n+ 'afl-zlib-builder-intermediate': {\n+ 'build_arg': [\n+ 'parent_image=gcr.io/fuzzbench/builders/benchmark/zlib'\n+ ],\n+ 'depends_on': ['zlib-project-builder'],\n+ 'dockerfile': 'fuzzers/afl/builder.Dockerfile',\n+ 'context': 'fuzzers/afl',\n+ 'tag': 'builders/afl/zlib-intermediate',\n+ 'type': 'builder'\n+ }\n+ }\n+\n+ generated_spec = generate_cloudbuild.create_cloud_build_spec(image)\n+\n+ expected_spec = {\n+ 'steps': [{\n+ 'id': 'afl-zlib-builder-intermediate',\n+ 'env': 'DOCKER_BUILDKIT=1',\n+ 'name': 'gcr.io/cloud-builders/docker',\n+ 'args': [\n+ 'build', '--tag',\n+ 'gcr.io/fuzzbench/builders/afl/zlib-intermediate', '--tag',\n+ 'gcr.io/fuzzbench/builders/afl/zlib-intermediate'\n+ ':test-experiment', '--cache-from',\n+ 'gcr.io/fuzzbench/builders/afl/zlib-intermediate',\n+ '--build-arg', 'BUILDKIT_INLINE_CACHE=1', '--build-arg',\n+ 'parent_image=gcr.io/fuzzbench/builders/benchmark/zlib',\n+ '--file', 'fuzzers/afl/builder.Dockerfile', 'fuzzers/afl'\n+ ],\n+ 'wait_for': ['zlib-project-builder']\n+ }],\n+ 'images': [\n+ 'gcr.io/fuzzbench/builders/afl/zlib-intermediate:test-experiment',\n+ 'gcr.io/fuzzbench/builders/afl/zlib-intermediate'\n+ ]\n+ }\n+\n+ assert generated_spec == expected_spec\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Use single source of truth for building images for Local and GCB build systems (#615) |
258,370 | 07.08.2020 16:24:01 | 14,400 | b431926ac716b378b416725956c3b801cb5f6e6d | Test fix - BuildKit Fails on CI
Fix | [
{
"change_type": "MODIFY",
"old_path": "docker/benchmark-runner/Dockerfile",
"new_path": "docker/benchmark-runner/Dockerfile",
"diff": "@@ -54,13 +54,15 @@ ENV WORKDIR /out\nRUN mkdir -p $WORKDIR\nWORKDIR $WORKDIR\n+# The argument needs to be re-declared otherwise it returns an empty string.\n+ARG fuzzer\n# Copy over all the build artifacts (without * to preserve directory structure).\n# This also copies seed and dictionary files if they are available.\nCOPY --from=builder /out/ ./\n# Copy the fuzzer.py file.\n-COPY --from=builder /src/fuzzer.py .\n+COPY fuzzers/$fuzzer/fuzzer.py .\n# Copy the fuzzers directory.\n-COPY --from=builder /src/fuzzers fuzzers\n+COPY fuzzers/ ./fuzzers\n# Create empty __init__.py to allow python deps to work.\nRUN touch __init__.py\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Test fix - BuildKit Fails on CI (#632)
Fix |
258,396 | 07.08.2020 19:28:29 | 25,200 | 8d4cb49cee270687f4222c2e97815a5e2786e7ca | [libFuzzer] Add (libfuzzer|entropic)_fixcrossover variants to test whether fixing CrossOver mutator gives an average input size decrease (and perhaps a throughput increase). | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/fuzzers.yml",
"new_path": ".github/workflows/fuzzers.yml",
"diff": "@@ -46,8 +46,10 @@ jobs:\n- aflplusplus_seek\n- libfuzzer_interceptors\n- libfuzzer_keepseed\n+ - libfuzzer_fixcrossover\n- entropic_interceptors\n- entropic_keepseed\n+ - entropic_fixcrossover\nbenchmark_type:\n- oss-fuzz\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_fixcrossover/builder.Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+ARG parent_image\n+FROM $parent_image\n+\n+COPY patch.diff /\n+\n+RUN git clone https://github.com/llvm/llvm-project.git /llvm-project && \\\n+ cd /llvm-project && \\\n+ git checkout b52b2e1c188072e3cbc91500cfd503fb26d50ffc && \\\n+ patch -p1 < /patch.diff && \\\n+ cd /llvm-project/compiler-rt/lib/fuzzer && \\\n+ (for f in *.cpp; do \\\n+ clang++ -stdlib=libc++ -fPIC -O2 -std=c++11 $f -c & \\\n+ done && wait) && \\\n+ ar r /libEntropic.a *.o\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_fixcrossover/fuzzer.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Integration code for libFuzzer fuzzer.\"\"\"\n+\n+from fuzzers.entropic_interceptors import fuzzer as entropic_fuzzer\n+from fuzzers.libfuzzer_interceptors import fuzzer as libfuzzer_fuzzer\n+\n+\n+def build():\n+ \"\"\"Build benchmark.\"\"\"\n+ entropic_fuzzer.build()\n+\n+\n+def fuzz(input_corpus, output_corpus, target_binary):\n+ \"\"\"Run fuzzer.\"\"\"\n+ libfuzzer_fuzzer.run_fuzzer(input_corpus,\n+ output_corpus,\n+ target_binary,\n+ extra_flags=['-entropic=1'])\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_fixcrossover/patch.diff",
"diff": "+commit df6a841a7c8b1c540f0e3c31c3697a11b2a00f51\n+Author: Dokyung Song <[email protected]>\n+Date: Wed Aug 5 23:12:19 2020 +0000\n+\n+ [libFuzzer] Fix arguments of InsertPartOf/CopyPartOf calls in CrossOver mutator.\n+\n+ The CrossOver mutator is meant to cross over two given buffers\n+ (referred to as the first/second buffer below). Previously\n+ InsertPartOf/CopyPartOf calls used in the CrossOver mutator\n+ incorrectly inserted/copied part of the second buffer into a \"scratch\n+ buffer\" (MutateInPlaceHere of the size CurrentMaxMutationLen), rather\n+ than the first buffer. This is not intended behavior, because the\n+ scratch buffer does not always (i) contain the content of the first\n+ buffer, and (ii) have the same size as the first buffer;\n+ CurrentMaxMutationLen is typically a lot larger than the size of the\n+ first buffer. This patch fixes the issue by using the first buffer\n+ instead of the scratch buffer in InsertPartOf/CopyPartOf calls.\n+\n+ This patch also adds two new tests, namely \"cross_over_insert\" and\n+ \"cross_over_copy\", which specifically target InsertPartOf and\n+ CopyPartOf, respectively.\n+\n+ - cross_over_insert.test checks if the fuzzer can use InsertPartOf to\n+ trigger the crash.\n+\n+ - cross_over_copy.test checks if the fuzzer can use CopyPartOf to\n+ trigger the crash.\n+\n+ These newly added tests were designed to pass with the current patch,\n+ but not without the it (with b216c80cc2496b87bf827260ce7e24dc62247d71\n+ these tests do no pass). To achieve this, -max_len was intentionally\n+ given a high value. Without this patch, InsertPartOf/CopyPartOf will\n+ generate larger inputs, possibly with unpredictable data in it,\n+ thereby failing to trigger the crash.\n+\n+ The test pass condition for these new tests is narrowed down by (i)\n+ limiting mutation depth to 1 (i.e., a single CrossOver mutation should\n+ be able to trigger the crash) and (ii) checking whether the mutation\n+ sequence of \"CrossOver-\" leads to the crash.\n+\n+ Also note that these newly added tests and an existing test\n+ (cross_over.test) all use \"-reduce_inputs=0\" flags to prevent reducing\n+ inputs; it's easier to force the fuzzer to keep original input string\n+ this way than tweaking cov-instrumented basic blocks in the source\n+ code of the fuzzer executable.\n+\n+ Differential Revision: https://reviews.llvm.org/D85554\n+\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n+index 29541eac5dc..df9ada45bb0 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n++++ b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n+@@ -425,26 +425,26 @@ size_t MutationDispatcher::Mutate_CrossOver(uint8_t *Data, size_t Size,\n+ if (!CrossOverWith) return 0;\n+ const Unit &O = *CrossOverWith;\n+ if (O.empty()) return 0;\n+- MutateInPlaceHere.resize(MaxSize);\n+- auto &U = MutateInPlaceHere;\n+ size_t NewSize = 0;\n+ switch(Rand(3)) {\n+ case 0:\n+- NewSize = CrossOver(Data, Size, O.data(), O.size(), U.data(), U.size());\n++ MutateInPlaceHere.resize(MaxSize);\n++ NewSize = CrossOver(Data, Size, O.data(), O.size(),\n++ MutateInPlaceHere.data(), MaxSize);\n++ memcpy(Data, MutateInPlaceHere.data(), NewSize);\n+ break;\n+ case 1:\n+- NewSize = InsertPartOf(O.data(), O.size(), U.data(), U.size(), MaxSize);\n++ NewSize = InsertPartOf(O.data(), O.size(), Data, Size, MaxSize);\n+ if (!NewSize)\n+- NewSize = CopyPartOf(O.data(), O.size(), U.data(), U.size());\n++ NewSize = CopyPartOf(O.data(), O.size(), Data, Size);\n+ break;\n+ case 2:\n+- NewSize = CopyPartOf(O.data(), O.size(), U.data(), U.size());\n++ NewSize = CopyPartOf(O.data(), O.size(), Data, Size);\n+ break;\n+ default: assert(0);\n+ }\n+ assert(NewSize > 0 && \"CrossOver returned empty unit\");\n+ assert(NewSize <= MaxSize && \"CrossOver returned overisized unit\");\n+- memcpy(Data, U.data(), NewSize);\n+ return NewSize;\n+ }\n+\n+diff --git a/compiler-rt/test/fuzzer/CrossOverTest.cpp b/compiler-rt/test/fuzzer/CrossOverTest.cpp\n+index a7643570a92..3ca53a8a851 100644\n+--- a/compiler-rt/test/fuzzer/CrossOverTest.cpp\n++++ b/compiler-rt/test/fuzzer/CrossOverTest.cpp\n+@@ -4,10 +4,10 @@\n+\n+ // Test for a fuzzer. The fuzzer must find the string\n+ // ABCDEFGHIJ\n+-// We use it as a test for CrossOver functionality\n+-// by passing two inputs to it:\n+-// ABCDE00000\n+-// ZZZZZFGHIJ\n++// We use it as a test for each of CrossOver functionalities\n++// by passing the following sets of two inputs to it:\n++// {ABCDEHIJ, ZFG} to test InsertPartOf\n++// {ABCDE00HIJ, ZFG} to test CopyPartOf\n+ //\n+ #include <assert.h>\n+ #include <cstddef>\n+@@ -16,6 +16,17 @@\n+ #include <iostream>\n+ #include <ostream>\n+\n++#ifndef INPUT_A\n++#define INPUT_A \"ABCDE00000\"\n++#endif\n++\n++#ifndef INPUT_B\n++#define INPUT_B \"ZZZZZFGHIJ\"\n++#endif\n++\n++const char *InputA = INPUT_A;\n++const char *InputB = INPUT_B;\n++\n+ static volatile int Sink;\n+ static volatile int *NullPtr;\n+\n+@@ -42,13 +53,11 @@ static const uint32_t ExpectedHash = 0xe1677acb;\n+\n+ extern \"C\" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {\n+ // fprintf(stderr, \"ExpectedHash: %x\\n\", ExpectedHash);\n+- if (Size != 10) return 0;\n+- if (*Data == 'A')\n++ if (Size == 10 && ExpectedHash == simple_hash(Data, Size))\n++ *NullPtr = 0;\n++ if (*Data == InputA[0])\n+ Sink++;\n+- if (*Data == 'Z')\n++ if (*Data == InputB[0])\n+ Sink--;\n+- if (ExpectedHash == simple_hash(Data, Size))\n+- *NullPtr = 0;\n+ return 0;\n+ }\n+-\n+diff --git a/compiler-rt/test/fuzzer/cross_over.test b/compiler-rt/test/fuzzer/cross_over.test\n+index 058b5eb2c85..64e06e8cd36 100644\n+--- a/compiler-rt/test/fuzzer/cross_over.test\n++++ b/compiler-rt/test/fuzzer/cross_over.test\n+@@ -12,7 +12,7 @@ RUN: echo -n ABCDE00000 > %t-corpus/A\n+ RUN: echo -n ZZZZZFGHIJ > %t-corpus/B\n+\n+\n+-RUN: not %run %t-CrossOverTest -max_len=10 -seed=1 -runs=10000000 %t-corpus\n++RUN: not %run %t-CrossOverTest -max_len=10 -reduce_inputs=0 -seed=1 -runs=10000000 %t-corpus\n+\n+ # Test the same thing but using -seed_inputs instead of passing the corpus dir.\n+-RUN: not %run %t-CrossOverTest -max_len=10 -seed=1 -runs=10000000 -seed_inputs=%t-corpus/A,%t-corpus/B\n++RUN: not %run %t-CrossOverTest -max_len=10 -reduce_inputs=0 -seed=1 -runs=10000000 -seed_inputs=%t-corpus/A,%t-corpus/B\n+diff --git a/compiler-rt/test/fuzzer/cross_over_copy.test b/compiler-rt/test/fuzzer/cross_over_copy.test\n+new file mode 100644\n+index 00000000000..f8f45c974e2\n+--- /dev/null\n++++ b/compiler-rt/test/fuzzer/cross_over_copy.test\n+@@ -0,0 +1,20 @@\n++# Tests CrossOver CopyPartOf.\n++# We want to make sure that the test can find the input\n++# ABCDEFGHIJ when given two other inputs in the seed corpus:\n++# ABCDE00HIJ and\n++# (Z) FG\n++#\n++RUN: %cpp_compiler -DINPUT_A='\"ABCDE00HIJ\"' -DINPUT_B='\"ZFG\"' %S/CrossOverTest.cpp -o %t-CrossOverTest\n++\n++RUN: rm -rf %t-corpus\n++RUN: mkdir %t-corpus\n++RUN: echo -n ABCDE00HIJ > %t-corpus/A\n++RUN: echo -n ZFG > %t-corpus/B\n++\n++\n++RUN: not %run %t-CrossOverTest -mutate_depth=1 -max_len=1024 -reduce_inputs=0 -seed=1 -runs=10000000 %t-corpus 2>&1 | FileCheck %s\n++\n++# Test the same thing but using -seed_inputs instead of passing the corpus dir.\n++RUN: not %run %t-CrossOverTest -mutate_depth=1 -max_len=1024 -reduce_inputs=0 -seed=1 -runs=10000000 -seed_inputs=%t-corpus/A,%t-corpus/B 2>&1 | FileCheck %s\n++\n++CHECK: MS: 1 CrossOver-\n+diff --git a/compiler-rt/test/fuzzer/cross_over_insert.test b/compiler-rt/test/fuzzer/cross_over_insert.test\n+new file mode 100644\n+index 00000000000..5ad2ff0a633\n+--- /dev/null\n++++ b/compiler-rt/test/fuzzer/cross_over_insert.test\n+@@ -0,0 +1,20 @@\n++# Tests CrossOver InsertPartOf.\n++# We want to make sure that the test can find the input\n++# ABCDEFGHIJ when given two other inputs in the seed corpus:\n++# ABCDE HIJ and\n++# (Z) FG\n++#\n++RUN: %cpp_compiler -DINPUT_A='\"ABCDEHIJ\"' -DINPUT_B='\"ZFG\"' %S/CrossOverTest.cpp -o %t-CrossOverTest\n++\n++RUN: rm -rf %t-corpus\n++RUN: mkdir %t-corpus\n++RUN: echo -n ABCDEHIJ > %t-corpus/A\n++RUN: echo -n ZFG > %t-corpus/B\n++\n++\n++RUN: not %run %t-CrossOverTest -mutate_depth=1 -max_len=1024 -reduce_inputs=0 -seed=1 -runs=10000000 %t-corpus 2>&1 | FileCheck %s\n++\n++# Test the same thing but using -seed_inputs instead of passing the corpus dir.\n++RUN: not %run %t-CrossOverTest -mutate_depth=1 -max_len=1024 -reduce_inputs=0 -seed=1 -runs=10000000 -seed_inputs=%t-corpus/A,%t-corpus/B 2>&1 | FileCheck %s\n++\n++CHECK: MS: 1 CrossOver-\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_fixcrossover/runner.Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+FROM gcr.io/fuzzbench/base-runner\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/libfuzzer_fixcrossover/builder.Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+ARG parent_image\n+FROM $parent_image\n+\n+COPY patch.diff /\n+\n+RUN git clone https://github.com/llvm/llvm-project.git /llvm-project && \\\n+ cd /llvm-project/ && \\\n+ git checkout b52b2e1c188072e3cbc91500cfd503fb26d50ffc && \\\n+ patch -p1 < /patch.diff && \\\n+ cd compiler-rt/lib/fuzzer && \\\n+ (for f in *.cpp; do \\\n+ clang++ -stdlib=libc++ -fPIC -O2 -std=c++11 $f -c & \\\n+ done && wait) && \\\n+ ar r /usr/lib/libFuzzer.a *.o\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/libfuzzer_fixcrossover/fuzzer.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Integration code for libFuzzer fuzzer.\"\"\"\n+\n+from fuzzers.libfuzzer_interceptors import fuzzer as libfuzzer_fuzzer\n+\n+\n+def build():\n+ \"\"\"Build benchmark.\"\"\"\n+ libfuzzer_fuzzer.build()\n+\n+\n+def fuzz(input_corpus, output_corpus, target_binary):\n+ \"\"\"Run fuzzer.\"\"\"\n+ libfuzzer_fuzzer.run_fuzzer(input_corpus, output_corpus, target_binary)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/libfuzzer_fixcrossover/patch.diff",
"diff": "+commit df6a841a7c8b1c540f0e3c31c3697a11b2a00f51\n+Author: Dokyung Song <[email protected]>\n+Date: Wed Aug 5 23:12:19 2020 +0000\n+\n+ [libFuzzer] Fix arguments of InsertPartOf/CopyPartOf calls in CrossOver mutator.\n+\n+ The CrossOver mutator is meant to cross over two given buffers\n+ (referred to as the first/second buffer below). Previously\n+ InsertPartOf/CopyPartOf calls used in the CrossOver mutator\n+ incorrectly inserted/copied part of the second buffer into a \"scratch\n+ buffer\" (MutateInPlaceHere of the size CurrentMaxMutationLen), rather\n+ than the first buffer. This is not intended behavior, because the\n+ scratch buffer does not always (i) contain the content of the first\n+ buffer, and (ii) have the same size as the first buffer;\n+ CurrentMaxMutationLen is typically a lot larger than the size of the\n+ first buffer. This patch fixes the issue by using the first buffer\n+ instead of the scratch buffer in InsertPartOf/CopyPartOf calls.\n+\n+ This patch also adds two new tests, namely \"cross_over_insert\" and\n+ \"cross_over_copy\", which specifically target InsertPartOf and\n+ CopyPartOf, respectively.\n+\n+ - cross_over_insert.test checks if the fuzzer can use InsertPartOf to\n+ trigger the crash.\n+\n+ - cross_over_copy.test checks if the fuzzer can use CopyPartOf to\n+ trigger the crash.\n+\n+ These newly added tests were designed to pass with the current patch,\n+ but not without the it (with b216c80cc2496b87bf827260ce7e24dc62247d71\n+ these tests do no pass). To achieve this, -max_len was intentionally\n+ given a high value. Without this patch, InsertPartOf/CopyPartOf will\n+ generate larger inputs, possibly with unpredictable data in it,\n+ thereby failing to trigger the crash.\n+\n+ The test pass condition for these new tests is narrowed down by (i)\n+ limiting mutation depth to 1 (i.e., a single CrossOver mutation should\n+ be able to trigger the crash) and (ii) checking whether the mutation\n+ sequence of \"CrossOver-\" leads to the crash.\n+\n+ Also note that these newly added tests and an existing test\n+ (cross_over.test) all use \"-reduce_inputs=0\" flags to prevent reducing\n+ inputs; it's easier to force the fuzzer to keep original input string\n+ this way than tweaking cov-instrumented basic blocks in the source\n+ code of the fuzzer executable.\n+\n+ Differential Revision: https://reviews.llvm.org/D85554\n+\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n+index 29541eac5dc..df9ada45bb0 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n++++ b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n+@@ -425,26 +425,26 @@ size_t MutationDispatcher::Mutate_CrossOver(uint8_t *Data, size_t Size,\n+ if (!CrossOverWith) return 0;\n+ const Unit &O = *CrossOverWith;\n+ if (O.empty()) return 0;\n+- MutateInPlaceHere.resize(MaxSize);\n+- auto &U = MutateInPlaceHere;\n+ size_t NewSize = 0;\n+ switch(Rand(3)) {\n+ case 0:\n+- NewSize = CrossOver(Data, Size, O.data(), O.size(), U.data(), U.size());\n++ MutateInPlaceHere.resize(MaxSize);\n++ NewSize = CrossOver(Data, Size, O.data(), O.size(),\n++ MutateInPlaceHere.data(), MaxSize);\n++ memcpy(Data, MutateInPlaceHere.data(), NewSize);\n+ break;\n+ case 1:\n+- NewSize = InsertPartOf(O.data(), O.size(), U.data(), U.size(), MaxSize);\n++ NewSize = InsertPartOf(O.data(), O.size(), Data, Size, MaxSize);\n+ if (!NewSize)\n+- NewSize = CopyPartOf(O.data(), O.size(), U.data(), U.size());\n++ NewSize = CopyPartOf(O.data(), O.size(), Data, Size);\n+ break;\n+ case 2:\n+- NewSize = CopyPartOf(O.data(), O.size(), U.data(), U.size());\n++ NewSize = CopyPartOf(O.data(), O.size(), Data, Size);\n+ break;\n+ default: assert(0);\n+ }\n+ assert(NewSize > 0 && \"CrossOver returned empty unit\");\n+ assert(NewSize <= MaxSize && \"CrossOver returned overisized unit\");\n+- memcpy(Data, U.data(), NewSize);\n+ return NewSize;\n+ }\n+\n+diff --git a/compiler-rt/test/fuzzer/CrossOverTest.cpp b/compiler-rt/test/fuzzer/CrossOverTest.cpp\n+index a7643570a92..3ca53a8a851 100644\n+--- a/compiler-rt/test/fuzzer/CrossOverTest.cpp\n++++ b/compiler-rt/test/fuzzer/CrossOverTest.cpp\n+@@ -4,10 +4,10 @@\n+\n+ // Test for a fuzzer. The fuzzer must find the string\n+ // ABCDEFGHIJ\n+-// We use it as a test for CrossOver functionality\n+-// by passing two inputs to it:\n+-// ABCDE00000\n+-// ZZZZZFGHIJ\n++// We use it as a test for each of CrossOver functionalities\n++// by passing the following sets of two inputs to it:\n++// {ABCDEHIJ, ZFG} to test InsertPartOf\n++// {ABCDE00HIJ, ZFG} to test CopyPartOf\n+ //\n+ #include <assert.h>\n+ #include <cstddef>\n+@@ -16,6 +16,17 @@\n+ #include <iostream>\n+ #include <ostream>\n+\n++#ifndef INPUT_A\n++#define INPUT_A \"ABCDE00000\"\n++#endif\n++\n++#ifndef INPUT_B\n++#define INPUT_B \"ZZZZZFGHIJ\"\n++#endif\n++\n++const char *InputA = INPUT_A;\n++const char *InputB = INPUT_B;\n++\n+ static volatile int Sink;\n+ static volatile int *NullPtr;\n+\n+@@ -42,13 +53,11 @@ static const uint32_t ExpectedHash = 0xe1677acb;\n+\n+ extern \"C\" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {\n+ // fprintf(stderr, \"ExpectedHash: %x\\n\", ExpectedHash);\n+- if (Size != 10) return 0;\n+- if (*Data == 'A')\n++ if (Size == 10 && ExpectedHash == simple_hash(Data, Size))\n++ *NullPtr = 0;\n++ if (*Data == InputA[0])\n+ Sink++;\n+- if (*Data == 'Z')\n++ if (*Data == InputB[0])\n+ Sink--;\n+- if (ExpectedHash == simple_hash(Data, Size))\n+- *NullPtr = 0;\n+ return 0;\n+ }\n+-\n+diff --git a/compiler-rt/test/fuzzer/cross_over.test b/compiler-rt/test/fuzzer/cross_over.test\n+index 058b5eb2c85..64e06e8cd36 100644\n+--- a/compiler-rt/test/fuzzer/cross_over.test\n++++ b/compiler-rt/test/fuzzer/cross_over.test\n+@@ -12,7 +12,7 @@ RUN: echo -n ABCDE00000 > %t-corpus/A\n+ RUN: echo -n ZZZZZFGHIJ > %t-corpus/B\n+\n+\n+-RUN: not %run %t-CrossOverTest -max_len=10 -seed=1 -runs=10000000 %t-corpus\n++RUN: not %run %t-CrossOverTest -max_len=10 -reduce_inputs=0 -seed=1 -runs=10000000 %t-corpus\n+\n+ # Test the same thing but using -seed_inputs instead of passing the corpus dir.\n+-RUN: not %run %t-CrossOverTest -max_len=10 -seed=1 -runs=10000000 -seed_inputs=%t-corpus/A,%t-corpus/B\n++RUN: not %run %t-CrossOverTest -max_len=10 -reduce_inputs=0 -seed=1 -runs=10000000 -seed_inputs=%t-corpus/A,%t-corpus/B\n+diff --git a/compiler-rt/test/fuzzer/cross_over_copy.test b/compiler-rt/test/fuzzer/cross_over_copy.test\n+new file mode 100644\n+index 00000000000..f8f45c974e2\n+--- /dev/null\n++++ b/compiler-rt/test/fuzzer/cross_over_copy.test\n+@@ -0,0 +1,20 @@\n++# Tests CrossOver CopyPartOf.\n++# We want to make sure that the test can find the input\n++# ABCDEFGHIJ when given two other inputs in the seed corpus:\n++# ABCDE00HIJ and\n++# (Z) FG\n++#\n++RUN: %cpp_compiler -DINPUT_A='\"ABCDE00HIJ\"' -DINPUT_B='\"ZFG\"' %S/CrossOverTest.cpp -o %t-CrossOverTest\n++\n++RUN: rm -rf %t-corpus\n++RUN: mkdir %t-corpus\n++RUN: echo -n ABCDE00HIJ > %t-corpus/A\n++RUN: echo -n ZFG > %t-corpus/B\n++\n++\n++RUN: not %run %t-CrossOverTest -mutate_depth=1 -max_len=1024 -reduce_inputs=0 -seed=1 -runs=10000000 %t-corpus 2>&1 | FileCheck %s\n++\n++# Test the same thing but using -seed_inputs instead of passing the corpus dir.\n++RUN: not %run %t-CrossOverTest -mutate_depth=1 -max_len=1024 -reduce_inputs=0 -seed=1 -runs=10000000 -seed_inputs=%t-corpus/A,%t-corpus/B 2>&1 | FileCheck %s\n++\n++CHECK: MS: 1 CrossOver-\n+diff --git a/compiler-rt/test/fuzzer/cross_over_insert.test b/compiler-rt/test/fuzzer/cross_over_insert.test\n+new file mode 100644\n+index 00000000000..5ad2ff0a633\n+--- /dev/null\n++++ b/compiler-rt/test/fuzzer/cross_over_insert.test\n+@@ -0,0 +1,20 @@\n++# Tests CrossOver InsertPartOf.\n++# We want to make sure that the test can find the input\n++# ABCDEFGHIJ when given two other inputs in the seed corpus:\n++# ABCDE HIJ and\n++# (Z) FG\n++#\n++RUN: %cpp_compiler -DINPUT_A='\"ABCDEHIJ\"' -DINPUT_B='\"ZFG\"' %S/CrossOverTest.cpp -o %t-CrossOverTest\n++\n++RUN: rm -rf %t-corpus\n++RUN: mkdir %t-corpus\n++RUN: echo -n ABCDEHIJ > %t-corpus/A\n++RUN: echo -n ZFG > %t-corpus/B\n++\n++\n++RUN: not %run %t-CrossOverTest -mutate_depth=1 -max_len=1024 -reduce_inputs=0 -seed=1 -runs=10000000 %t-corpus 2>&1 | FileCheck %s\n++\n++# Test the same thing but using -seed_inputs instead of passing the corpus dir.\n++RUN: not %run %t-CrossOverTest -mutate_depth=1 -max_len=1024 -reduce_inputs=0 -seed=1 -runs=10000000 -seed_inputs=%t-corpus/A,%t-corpus/B 2>&1 | FileCheck %s\n++\n++CHECK: MS: 1 CrossOver-\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/libfuzzer_fixcrossover/runner.Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+FROM gcr.io/fuzzbench/base-runner\n"
},
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "# are still testing this feature. You should request an experiment by contacting\n# us as you normally do.\n+- experiment: 2020-08-08\n+ fuzzers:\n+ - libfuzzer_fixcrossover\n+ - entropic_fixcrossover\n+\n- experiment: 2020-08-07\nfuzzers:\n- aflplusplus_datalenrand\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [libFuzzer] Add (libfuzzer|entropic)_fixcrossover variants to test whether fixing CrossOver mutator gives an average input size decrease (and perhaps a throughput increase). (#644) |
258,399 | 09.08.2020 21:33:44 | 18,000 | 60a994829c1abc5a38ebaf0a8f4d111b817d9a30 | Add default end to end test config file for testing; prepare for config validation refactoring
The initial version for enqueuing and working image building jobs in the new arch. | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/presubmit.yml",
"new_path": ".github/workflows/presubmit.yml",
"diff": "@@ -37,4 +37,7 @@ jobs:\n- name: Run presubmit checks\nrun: |\nFUZZBENCH_TEST_INTEGRATION=1 make presubmit\n- # TODO(zhichengcai): Add back end to end test.\n+\n+ - name: Run end to end CI test\n+ run: |\n+ make run-end-to-end-test\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "common/config_utils.py",
"diff": "+#!/usr/bin/env python3\n+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Provides helper functions to obtain configurations.\"\"\"\n+\n+\n+def validate_and_expand(config):\n+ \"\"\"Validates |config| and returns the expanded configuration.\"\"\"\n+ # TODO: move the logic from experiment/run_experiment.py to here.\n+ return config\n"
},
{
"change_type": "MODIFY",
"old_path": "compose/e2e-test.yaml",
"new_path": "compose/e2e-test.yaml",
"diff": "@@ -8,4 +8,8 @@ services:\n- queue-server\nenvironment:\nE2E_INTEGRATION_TEST: 1\n- command: python3 -m pytest -vv fuzzbench/test_e2e_run.py\n+ command: python3 -m pytest -vv fuzzbench/test_e2e/test_e2e_run.py\n+\n+ run-experiment:\n+ environment:\n+ EXPERIMENT_CONFIG: fuzzbench/test_e2e/end-to-end-test-config.yaml\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/fuzzbench/Dockerfile",
"new_path": "docker/fuzzbench/Dockerfile",
"diff": "@@ -31,6 +31,8 @@ COPY benchmarks benchmarks\nCOPY common common\nCOPY database database\nCOPY docker docker\n+COPY experiment/build experiment/build\n+COPY experiment/*.py experiment/\nCOPY fuzzbench fuzzbench\nCOPY fuzzers fuzzers\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/image_types.yaml",
"new_path": "docker/image_types.yaml",
"diff": "tag: 'dispatcher-image'\ntype: 'dispatcher'\n+# TODO: It would be better to call this benchmark builder. But that would be\n+# confusing because this doesn't involve benchmark-builder/Dockerfile. Rename\n+# that and then rename this.\n+'{benchmark}-project-builder':\n+ dockerfile: 'benchmarks/{benchmark}/Dockerfile'\n+ context: 'benchmarks/{benchmark}'\n+ tag: 'builders/benchmark/{benchmark}'\n+ type: 'builder'\n+\n'coverage-{benchmark}-builder-intermediate':\nbuild_arg:\n- 'parent_image=gcr.io/fuzzbench/builders/benchmark/{benchmark}'\ntag: 'builders/coverage/{benchmark}'\ntype: 'coverage'\n-# TODO: It would be better to call this benchmark builder. But that would be\n-# confusing because this doesn't involve benchmark-builder/Dockerfile. Rename\n-# that and then rename this.\n-'{benchmark}-project-builder':\n- dockerfile: 'benchmarks/{benchmark}/Dockerfile'\n- context: 'benchmarks/{benchmark}'\n- tag: 'builders/benchmark/{benchmark}'\n- type: 'builder'\n-\n'{fuzzer}-{benchmark}-builder-intermediate':\nbuild_arg:\n- 'parent_image=gcr.io/fuzzbench/builders/benchmark/{benchmark}'\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzbench/jobs.py",
"new_path": "fuzzbench/jobs.py",
"diff": "@@ -19,14 +19,19 @@ import subprocess\nBASE_TAG = 'gcr.io/fuzzbench'\n-def build_image(name: str):\n+def build_image(image):\n\"\"\"Builds a Docker image and returns whether it succeeds.\"\"\"\n- image_tag = os.path.join(BASE_TAG, name)\n+ image_tag = os.path.join(BASE_TAG, image['tag'])\nsubprocess.run(['docker', 'pull', image_tag], check=True)\n- subprocess.run(\n- ['docker', 'build', '--tag', image_tag,\n- os.path.join('docker', name)],\n- check=True)\n+ command = ['docker', 'build', '--tag', image_tag, image['context']]\n+ cpu_options = ['--cpu-period', '100000', '--cpu-quota', '100000']\n+ command.extend(cpu_options)\n+ if 'dockerfile' in image:\n+ command.extend(['--file', image['dockerfile']])\n+ if 'build_arg' in image:\n+ for arg in image['build_arg']:\n+ command.extend(['--build-arg', arg])\n+ subprocess.run(command, check=True)\nreturn True\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzbench/local-experiment-config.yaml",
"diff": "+benchmarks:\n+ - freetype2-2017\n+ - bloaty_fuzz_target\n+fuzzers:\n+ - afl\n+ - libfuzzer\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzbench/run_experiment.py",
"new_path": "fuzzbench/run_experiment.py",
"diff": "@@ -18,20 +18,34 @@ import time\nimport redis\nimport rq\n+from common import config_utils, environment, yaml_utils\n+from experiment.build import docker_images\nfrom fuzzbench import jobs\n-def run_experiment():\n+def run_experiment(config):\n\"\"\"Main experiment logic.\"\"\"\nprint('Initializing the job queue.')\n# Create the queue for scheduling build jobs and run jobs.\nqueue = rq.Queue('build_n_run_queue')\n+\n+ images_to_build = docker_images.get_images_to_build(config['fuzzers'],\n+ config['benchmarks'])\njobs_list = []\n+ # TODO(#643): topological sort before enqueuing jobs.\n+ for name, image in images_to_build.items():\n+ depends = image.get('depends_on', None)\n+ if depends is not None:\n+ assert len(depends) == 1, 'image %s has %d dependencies. Multiple '\\\n+ 'dependencies are currently not supported.' % (name, len(depends))\njobs_list.append(\n- queue.enqueue(jobs.build_image,\n- 'base-image',\n- job_timeout=600,\n- job_id='base-image'))\n+ queue.enqueue(\n+ jobs.build_image,\n+ image=image,\n+ job_timeout=30 * 60,\n+ result_ttl=-1,\n+ job_id=name,\n+ depends_on=depends[0] if 'depends_on' in image else None))\nwhile True:\nprint('Current status of jobs:')\n@@ -52,8 +66,14 @@ def run_experiment():\ndef main():\n\"\"\"Set up Redis connection and start the experiment.\"\"\"\nredis_connection = redis.Redis(host=\"queue-server\")\n+\n+ config_path = environment.get('EXPERIMENT_CONFIG',\n+ 'fuzzbench/local-experiment-config.yaml')\n+ config = yaml_utils.read(config_path)\n+ config = config_utils.validate_and_expand(config)\n+\nwith rq.Connection(redis_connection):\n- return run_experiment()\n+ return run_experiment(config)\nif __name__ == '__main__':\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzbench/test_e2e/end-to-end-test-config.yaml",
"diff": "+benchmarks:\n+ - bloaty_fuzz_target\n+fuzzers:\n+ - libfuzzer\n"
},
{
"change_type": "RENAME",
"old_path": "fuzzbench/test_e2e_run.py",
"new_path": "fuzzbench/test_e2e/test_e2e_run.py",
"diff": "@@ -19,7 +19,17 @@ import os\nimport pytest\nimport redis\n-from rq.job import Job\n+import rq\n+\n+from common import config_utils, yaml_utils\n+from experiment.build import docker_images\n+\n+\[email protected](scope='class')\n+def experiment_config():\n+ \"\"\"Returns the default configuration for end-to-end testing.\"\"\"\n+ return config_utils.validate_and_expand(\n+ yaml_utils.read('fuzzbench/test_e2e/end-to-end-test-config.yaml'))\[email protected](scope='class')\n@@ -31,17 +41,32 @@ def redis_connection():\n# pylint: disable=no-self-use\[email protected]('E2E_INTEGRATION_TEST' not in os.environ,\nreason='Not running end-to-end test.')\[email protected]('redis_connection')\[email protected]('redis_connection', 'experiment_config')\nclass TestEndToEndRunResults:\n\"\"\"Checks the result of a test experiment run.\"\"\"\n- def test_jobs_dependency(self): # pylint: disable=redefined-outer-name\n+ def test_jobs_dependency(self, experiment_config, redis_connection): # pylint: disable=redefined-outer-name\n\"\"\"Tests that jobs dependency preserves during working.\"\"\"\n- assert True\n+ all_images = docker_images.get_images_to_build(\n+ experiment_config['fuzzers'], experiment_config['benchmarks'])\n+ jobs = {\n+ name: rq.job.Job.fetch(name, connection=redis_connection)\n+ for name in all_images\n+ }\n+ for name, image in all_images.items():\n+ if 'depends_on' in image:\n+ for dep in image['depends_on']:\n+ assert jobs[dep].ended_at <= jobs[name].started_at\n- def test_all_jobs_finished_successfully(self, redis_connection): # pylint: disable=redefined-outer-name\n+ def test_all_jobs_finished_successfully(\n+ self,\n+ experiment_config, # pylint: disable=redefined-outer-name\n+ redis_connection): # pylint: disable=redefined-outer-name\n\"\"\"Tests all jobs finished successully.\"\"\"\n- jobs = Job.fetch_many(['base-image'], connection=redis_connection)\n+ all_images = docker_images.get_images_to_build(\n+ experiment_config['fuzzers'], experiment_config['benchmarks'])\n+ jobs = rq.job.Job.fetch_many(all_images.keys(),\n+ connection=redis_connection)\nfor job in jobs:\nassert job.get_status() == 'finished'\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Add default end to end test config file for testing; prepare for config validation refactoring (#550)
The initial version for enqueuing and working image building jobs in the new arch. |
258,388 | 09.08.2020 21:23:20 | 25,200 | c730a34cfaca8c3661103e04b88bf002b6ba9eb4 | [docs] Update gem dependency kramdown | [
{
"change_type": "MODIFY",
"old_path": "docs/Gemfile.lock",
"new_path": "docs/Gemfile.lock",
"diff": "@@ -67,7 +67,7 @@ GEM\njekyll-theme-time-machine (= 0.1.1)\njekyll-titles-from-headings (= 0.5.3)\njemoji (= 0.11.1)\n- kramdown (= 1.17.0)\n+ kramdown (= 2.3.0)\nliquid (= 4.0.3)\nmercenary (~> 0.3)\nminima (= 2.5.1)\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [docs] Update gem dependency kramdown (#649) |
258,388 | 10.08.2020 08:10:43 | 25,200 | 8d9452fc5468905616d5bfc839eae08895cdd039 | [docs] Run `bundle update github-pages`
We need to run this to use the new version of kramdown. | [
{
"change_type": "MODIFY",
"old_path": "docs/Gemfile.lock",
"new_path": "docs/Gemfile.lock",
"diff": "GEM\nremote: https://rubygems.org/\nspecs:\n- activesupport (6.0.3.1)\n+ activesupport (6.0.3.2)\nconcurrent-ruby (~> 1.0, >= 1.0.2)\ni18n (>= 0.7, < 2)\nminitest (~> 5.1)\n@@ -16,9 +16,9 @@ GEM\ncolorator (1.1.0)\ncommonmarker (0.17.13)\nruby-enum (~> 0.5)\n- concurrent-ruby (1.1.6)\n- dnsruby (1.61.3)\n- addressable (~> 2.5)\n+ concurrent-ruby (1.1.7)\n+ dnsruby (1.61.4)\n+ simpleidn (~> 0.1)\nem-websocket (0.5.1)\neventmachine (>= 0.12.9)\nhttp_parser.rb (~> 0.6.0)\n@@ -26,14 +26,14 @@ GEM\nffi (>= 1.3.0)\neventmachine (1.2.7)\nexecjs (2.7.0)\n- faraday (1.0.0)\n+ faraday (1.0.1)\nmultipart-post (>= 1.2, < 3)\n- ffi (1.12.1)\n+ ffi (1.13.1)\nforwardable-extended (2.6.0)\ngemoji (3.0.1)\n- github-pages (204)\n+ github-pages (207)\ngithub-pages-health-check (= 1.16.1)\n- jekyll (= 3.8.5)\n+ jekyll (= 3.9.0)\njekyll-avatar (= 0.7.0)\njekyll-coffeescript (= 1.1.1)\njekyll-commonmark-ghpages (= 0.1.6)\n@@ -68,11 +68,12 @@ GEM\njekyll-titles-from-headings (= 0.5.3)\njemoji (= 0.11.1)\nkramdown (= 2.3.0)\n+ kramdown-parser-gfm (= 1.1.0)\nliquid (= 4.0.3)\nmercenary (~> 0.3)\nminima (= 2.5.1)\nnokogiri (>= 1.10.4, < 2.0)\n- rouge (= 3.13.0)\n+ rouge (= 3.19.0)\nterminal-table (~> 1.4)\ngithub-pages-health-check (1.16.1)\naddressable (~> 2.3)\n@@ -80,20 +81,20 @@ GEM\noctokit (~> 4.0)\npublic_suffix (~> 3.0)\ntyphoeus (~> 1.3)\n- html-pipeline (2.12.3)\n+ html-pipeline (2.13.0)\nactivesupport (>= 2)\nnokogiri (>= 1.4)\nhttp_parser.rb (0.6.0)\ni18n (0.9.5)\nconcurrent-ruby (~> 1.0)\n- jekyll (3.8.5)\n+ jekyll (3.9.0)\naddressable (~> 2.4)\ncolorator (~> 1.0)\nem-websocket (~> 0.5)\ni18n (~> 0.7)\njekyll-sass-converter (~> 1.0)\njekyll-watch (~> 2.0)\n- kramdown (~> 1.14)\n+ kramdown (>= 1.17, < 3)\nliquid (~> 4.0)\nmercenary (~> 0.3.3)\npathutil (~> 0.9)\n@@ -191,7 +192,10 @@ GEM\ngemoji (~> 3.0)\nhtml-pipeline (~> 2.2)\njekyll (>= 3.0, < 5.0)\n- kramdown (1.17.0)\n+ kramdown (2.3.0)\n+ rexml\n+ kramdown-parser-gfm (1.1.0)\n+ kramdown (~> 2.0)\nliquid (4.0.3)\nlisten (3.2.1)\nrb-fsevent (~> 0.10, >= 0.10.3)\n@@ -204,21 +208,22 @@ GEM\njekyll-seo-tag (~> 2.1)\nminitest (5.14.1)\nmultipart-post (2.1.1)\n- nokogiri (1.10.9)\n+ nokogiri (1.10.10)\nmini_portile2 (~> 2.4.0)\n- octokit (4.15.0)\n+ octokit (4.18.0)\nfaraday (>= 0.9)\nsawyer (~> 0.8.0, >= 0.5.3)\npathutil (0.16.2)\nforwardable-extended (~> 2.6)\npublic_suffix (3.1.1)\n- rb-fsevent (0.10.3)\n+ rb-fsevent (0.10.4)\nrb-inotify (0.10.1)\nffi (~> 1.0)\n- rouge (3.13.0)\n- ruby-enum (0.7.2)\n+ rexml (3.2.4)\n+ rouge (3.19.0)\n+ ruby-enum (0.8.0)\ni18n\n- rubyzip (2.1.0)\n+ rubyzip (2.3.0)\nsafe_yaml (1.0.5)\nsass (3.7.4)\nsass-listen (~> 4.0.0)\n@@ -228,15 +233,20 @@ GEM\nsawyer (0.8.2)\naddressable (>= 2.3.5)\nfaraday (> 0.8, < 2.0)\n+ simpleidn (0.1.1)\n+ unf (~> 0.1.4)\nterminal-table (1.8.0)\nunicode-display_width (~> 1.1, >= 1.1.1)\nthread_safe (0.3.6)\n- typhoeus (1.3.1)\n+ typhoeus (1.4.0)\nethon (>= 0.9.0)\ntzinfo (1.2.7)\nthread_safe (~> 0.1)\n- unicode-display_width (1.6.1)\n- zeitwerk (2.3.0)\n+ unf (0.1.4)\n+ unf_ext\n+ unf_ext (0.0.7.7)\n+ unicode-display_width (1.7.0)\n+ zeitwerk (2.4.0)\nPLATFORMS\nruby\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [docs] Run `bundle update github-pages` (#652)
We need to run this to use the new version of kramdown. |
258,371 | 10.08.2020 14:43:09 | 14,400 | 127e5816148761db3633fa8ed55127629ab4ecc0 | Generate coverage reports. | [
{
"change_type": "MODIFY",
"old_path": "analysis/benchmark_results.py",
"new_path": "analysis/benchmark_results.py",
"diff": "@@ -58,6 +58,12 @@ class BenchmarkResults: # pylint: disable=too-many-public-methods\ndef _benchmark_snapshot_df(self):\nreturn data_utils.get_benchmark_snapshot(self._benchmark_df)\n+ @property\n+ @functools.lru_cache()\n+ def fuzzers(self):\n+ \"\"\"Fuzzers with valid trials on this benchmark.\"\"\"\n+ return self._benchmark_df.fuzzer.unique()\n+\n@property\ndef fuzzers_with_not_enough_samples(self):\n\"\"\"Fuzzers with not enough samples.\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "analysis/generate_report.py",
"new_path": "analysis/generate_report.py",
"diff": "@@ -27,6 +27,8 @@ from analysis import rendering\nfrom common import filesystem\nfrom common import logs\n+logger = logs.Logger('generate_report')\n+\ndef get_arg_parser():\n\"\"\"Returns argument parser.\"\"\"\n@@ -74,6 +76,11 @@ def get_arg_parser():\n'--fuzzers',\nnargs='*',\nhelp='Names of the fuzzers to include in the report.')\n+ parser.add_argument('-cov',\n+ '--coverage-report',\n+ action='store_true',\n+ default=False,\n+ help='If set, clang coverage reports are linked.')\n# It doesn't make sense to clobber and label by experiment, since nothing\n# can get clobbered like this.\n@@ -130,7 +137,8 @@ def generate_report(experiment_names,\nin_progress=False,\nend_time=None,\nmerge_with_clobber=False,\n- merge_with_clobber_nonprivate=False):\n+ merge_with_clobber_nonprivate=False,\n+ coverage_report=False):\n\"\"\"Generate report helper.\"\"\"\nif merge_with_clobber_nonprivate:\nexperiment_names = (\n@@ -174,7 +182,7 @@ def generate_report(experiment_names,\ntemplate = report_type + '.html'\ndetailed_report = rendering.render_report(experiment_ctx, template,\n- in_progress)\n+ in_progress, coverage_report)\nfilesystem.write(os.path.join(report_directory, 'index.html'),\ndetailed_report)\n@@ -198,7 +206,8 @@ def main():\nlog_scale=args.log_scale,\nfrom_cached_data=args.from_cached_data,\nend_time=args.end_time,\n- merge_with_clobber=args.merge_with_clobber)\n+ merge_with_clobber=args.merge_with_clobber,\n+ coverage_report=args.coverage_report)\nif __name__ == '__main__':\n"
},
{
"change_type": "MODIFY",
"old_path": "analysis/rendering.py",
"new_path": "analysis/rendering.py",
"diff": "@@ -20,7 +20,7 @@ import jinja2\nfrom common import utils\n-def render_report(experiment_results, template, in_progress):\n+def render_report(experiment_results, template, in_progress, coverage_report):\n\"\"\"Renders report with |template| using data provided by the\n|experiment_results| context.\n@@ -37,5 +37,17 @@ def render_report(experiment_results, template, in_progress):\nloader=jinja2.FileSystemLoader(templates_dir),\n)\ntemplate = environment.get_template(template)\n+\n+ # FIXME: Use |experiment_filestore_name| from experiment db.\n+ # See #642: https://github.com/google/fuzzbench/issues/642\n+ if 'EXPERIMENT_FILESTORE' in os.environ:\n+ experiment_filestore = os.environ['EXPERIMENT_FILESTORE']\n+ prefix = \"gs://\"\n+ experiment_filestore_name = experiment_filestore[len(prefix):]\n+ else:\n+ experiment_filestore_name = 'fuzzbench-data'\n+\nreturn template.render(experiment=experiment_results,\n- in_progress=in_progress)\n+ in_progress=in_progress,\n+ coverage_report=coverage_report,\n+ experiment_filestore_name=experiment_filestore_name)\n"
},
{
"change_type": "MODIFY",
"old_path": "analysis/report_templates/default.html",
"new_path": "analysis/report_templates/default.html",
"diff": "</li>\n</ul>\n+ {% if coverage_report %}\n+ <ul class=\"collapsible\">\n+ <li>\n+ <div class=\"collapsible-header\">\n+ Coverage reports for each fuzzer on this benchmark\n+ </div>\n+ <div class=\"collapsible-body\">\n+ <div class=\"row\">\n+ {% for fuzzer in benchmark.fuzzers %}\n+ <div class=\"col\">\n+ <a class=\"waves-effect waves-light btn\" href=\"https://storage.googleapis.com/{{ experiment_filestore_name }}/{{ experiment.name }}/reports/coverage/{{ benchmark.name }}/{{ fuzzer }}/index.html\">{{ fuzzer }}</a>\n+ </div>\n+ {% endfor %}\n+ </div>\n+ </div>\n+ </li>\n+ </ul>\n+ {% endif %}\n</div> <!-- id=\"{{ benchmark.name }}\" -->\n{% endfor %}\n"
},
{
"change_type": "MODIFY",
"old_path": "common/experiment_utils.py",
"new_path": "common/experiment_utils.py",
"diff": "@@ -17,6 +17,7 @@ import os\nimport posixpath\nfrom common import environment\n+from common import experiment_path as exp_path\nDEFAULT_SNAPSHOT_SECONDS = 15 * 60 # Seconds.\nCONFIG_DIR = 'config'\n@@ -38,6 +39,11 @@ def get_experiment_name():\nreturn os.environ['EXPERIMENT']\n+def get_experiment_folders_dir():\n+ \"\"\"Returns experiment folders directory.\"\"\"\n+ return exp_path.path('experiment-folders')\n+\n+\ndef get_cloud_project():\n\"\"\"Returns the cloud project.\"\"\"\nreturn os.environ['CLOUD_PROJECT']\n@@ -88,11 +94,16 @@ def is_local_experiment():\ndef get_trial_dir(fuzzer, benchmark, trial_id):\n\"\"\"Returns the unique directory for |fuzzer|, |benchmark|, and\n|trial_id|.\"\"\"\n- benchmark_fuzzer_directory = '%s-%s' % (benchmark, fuzzer)\n+ benchmark_fuzzer_directory = get_benchmark_fuzzer_dir(benchmark, fuzzer)\ntrial_subdir = 'trial-%d' % trial_id\nreturn posixpath.join(benchmark_fuzzer_directory, trial_subdir)\n+def get_benchmark_fuzzer_dir(benchmark, fuzzer):\n+ \"\"\"Returns the directory for |benchmark| and |fuzzer|.\"\"\"\n+ return '%s-%s' % (benchmark, fuzzer)\n+\n+\ndef get_trial_bucket_dir(fuzzer, benchmark, trial_id):\n\"\"\"Returns the unique directory in experiment-folders int the bucket for\n|fuzzer|, |benchmark|, and |trial_id|.\"\"\"\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "experiment/coverage_utils.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Utility functions for coverage report generation.\"\"\"\n+\n+import os\n+import multiprocessing\n+import json\n+import queue\n+\n+from common import experiment_utils as exp_utils\n+from common import new_process\n+from common import benchmark_utils\n+from common import fuzzer_utils\n+from common import logs\n+from common import filestore_utils\n+from common import experiment_path as exp_path\n+from common import filesystem\n+from database import utils as db_utils\n+from database import models\n+from experiment.build import build_utils\n+from experiment import reporter\n+\n+logger = logs.Logger('coverage_utils') # pylint: disable=invalid-name\n+\n+COV_DIFF_QUEUE_GET_TIMEOUT = 1\n+\n+\n+def upload_coverage_reports_to_bucket():\n+ \"\"\"Copies the coverage reports to gcs bucket.\"\"\"\n+ report_dir = reporter.get_reports_dir()\n+ src_dir = get_coverage_report_dir()\n+ dst_dir = exp_path.filestore(report_dir)\n+ filestore_utils.cp(src_dir, dst_dir, recursive=True, parallel=True)\n+\n+\n+def generate_coverage_reports(experiment_config: dict):\n+ \"\"\"Generates coverage reports for each benchmark and fuzzer.\"\"\"\n+ logger.info('Start generating coverage report for benchmarks.')\n+ benchmarks = experiment_config['benchmarks'].split(',')\n+ fuzzers = experiment_config['fuzzers'].split(',')\n+ experiment = experiment_config['experiment']\n+ with multiprocessing.Pool() as pool:\n+ generate_coverage_report_args = [(experiment, benchmark, fuzzer)\n+ for benchmark in benchmarks\n+ for fuzzer in fuzzers]\n+ pool.starmap(generate_coverage_report, generate_coverage_report_args)\n+ pool.close()\n+ pool.join()\n+ logger.info('Finished generating coverage report.')\n+\n+\n+def generate_coverage_report(experiment, benchmark, fuzzer):\n+ \"\"\"Generates the coverage report for one pair of benchmark and fuzzer.\"\"\"\n+ logs.initialize()\n+ logger.info('Generating coverage report for benchmark: {benchmark} \\\n+ fuzzer: {fuzzer}.'.format(benchmark=benchmark, fuzzer=fuzzer))\n+ generator = CoverageReporter(fuzzer, benchmark, experiment)\n+ # Merges all the profdata files.\n+ generator.merge_profdata_files()\n+ # Generates the reports using llvm-cov.\n+ generator.generate_cov_report()\n+\n+ logger.info('Finished generating coverage report for '\n+ 'benchmark:{benchmark} fuzzer:{fuzzer}.'.format(\n+ benchmark=benchmark, fuzzer=fuzzer))\n+\n+\n+class CoverageReporter: # pylint: disable=too-many-instance-attributes\n+ \"\"\"Class used to generate coverage report for a pair of\n+ fuzzer and benchmark.\"\"\"\n+\n+ # pylint: disable=too-many-arguments\n+ def __init__(self, fuzzer, benchmark, experiment):\n+ self.fuzzer = fuzzer\n+ self.benchmark = benchmark\n+ self.experiment = experiment\n+ self.trial_ids = get_trial_ids(experiment, fuzzer, benchmark)\n+ cov_report_directory = get_coverage_report_dir()\n+ self.report_dir = os.path.join(cov_report_directory, benchmark, fuzzer)\n+ benchmark_fuzzer_dir = exp_utils.get_benchmark_fuzzer_dir(\n+ benchmark, fuzzer)\n+ work_dir = exp_utils.get_work_dir()\n+ self.merged_profdata_file = os.path.join(work_dir,\n+ 'measurement-folders',\n+ benchmark_fuzzer_dir,\n+ 'merged.profdata')\n+ coverage_binaries_dir = build_utils.get_coverage_binaries_dir()\n+ self.source_files = os.path.join(coverage_binaries_dir, benchmark,\n+ 'src')\n+ self.binary_file = get_coverage_binary(benchmark)\n+\n+ def merge_profdata_files(self):\n+ \"\"\"Merge profdata files from |src_files| to |dst_files|.\"\"\"\n+ logger.info('Merging profdata for fuzzer: '\n+ '{fuzzer},benchmark: {benchmark}.'.format(\n+ fuzzer=self.fuzzer, benchmark=self.benchmark))\n+ files_to_merge = [\n+ TrialCoverage(self.fuzzer, self.benchmark, trial_id,\n+ logger).profdata_file for trial_id in self.trial_ids\n+ ]\n+ result = merge_profdata_files(files_to_merge, self.merged_profdata_file)\n+ if result.retcode != 0:\n+ logger.error('Profdata files merging failed.')\n+\n+ def generate_cov_report(self):\n+ \"\"\"Generates the coverage report.\"\"\"\n+ command = [\n+ 'llvm-cov', 'show', '-format=html',\n+ '-path-equivalence=/,{prefix}'.format(prefix=self.source_files),\n+ '-output-dir={dst_dir}'.format(dst_dir=self.report_dir),\n+ '-Xdemangler', 'c++filt', '-Xdemangler', '-n', self.binary_file,\n+ '-instr-profile={profdata}'.format(\n+ profdata=self.merged_profdata_file)\n+ ]\n+ result = new_process.execute(command)\n+ if result.retcode != 0:\n+ logger.error('Coverage report generation failed for '\n+ 'fuzzer: {fuzzer},benchmark: {benchmark}.'.format(\n+ fuzzer=self.fuzzer, benchmark=self.benchmark))\n+\n+\n+def get_coverage_archive_name(benchmark):\n+ \"\"\"Gets the archive name for |benchmark|.\"\"\"\n+ return 'coverage-build-%s.tar.gz' % benchmark\n+\n+\n+def get_fuzzer_benchmark_key(fuzzer: str, benchmark: str):\n+ \"\"\"Returns the key in coverage dict for a pair of fuzzer-benchmark.\"\"\"\n+ return fuzzer + ' ' + benchmark\n+\n+\n+def get_profdata_file_name(trial_id):\n+ \"\"\"Returns the profdata file name for |trial_id|.\"\"\"\n+ return 'data-{id}.profdata'.format(id=trial_id)\n+\n+\n+def get_coverage_report_dir():\n+ \"\"\"Returns the directory to store all the coverage reports.\"\"\"\n+ report_dir = reporter.get_reports_dir()\n+ return os.path.join(report_dir, 'coverage')\n+\n+\n+def get_coverage_binary(benchmark: str) -> str:\n+ \"\"\"Gets the coverage binary for benchmark.\"\"\"\n+ coverage_binaries_dir = build_utils.get_coverage_binaries_dir()\n+ fuzz_target = benchmark_utils.get_fuzz_target(benchmark)\n+ return fuzzer_utils.get_fuzz_target_binary(coverage_binaries_dir /\n+ benchmark,\n+ fuzz_target_name=fuzz_target)\n+\n+\n+def get_trial_ids(experiment: str, fuzzer: str, benchmark: str):\n+ \"\"\"Gets ids of all finished trials for a pair of fuzzer and benchmark.\"\"\"\n+ trial_ids = [\n+ trial_id_tuple[0]\n+ for trial_id_tuple in db_utils.query(models.Trial.id).filter(\n+ models.Trial.experiment == experiment, models.Trial.fuzzer ==\n+ fuzzer, models.Trial.benchmark == benchmark,\n+ ~models.Trial.preempted)\n+ ]\n+ return trial_ids\n+\n+\n+def merge_profdata_files(src_files, dst_file):\n+ \"\"\"Uses llvm-profdata to merge |src_files| to |dst_files|.\"\"\"\n+ command = ['llvm-profdata', 'merge', '-sparse']\n+ command.extend(src_files)\n+ command.extend(['-o', dst_file])\n+ result = new_process.execute(command, expect_zero=False)\n+ return result\n+\n+\n+def get_coverage_infomation(coverage_summary_file):\n+ \"\"\"Reads the coverage information from |coverage_summary_file|\n+ and skip possible warnings in the file.\"\"\"\n+ with open(coverage_summary_file) as summary:\n+ return json.loads(summary.readlines()[-1])\n+\n+\n+def store_coverage_data(experiment_config: dict):\n+ \"\"\"Generates the specific coverage data and store in cloud bucket.\"\"\"\n+ logger.info('Start storing coverage data')\n+ with multiprocessing.Pool() as pool, multiprocessing.Manager() as manager:\n+ q = manager.Queue() # pytype: disable=attribute-error\n+ covered_regions = get_all_covered_regions(experiment_config, pool, q)\n+ json_src_dir = reporter.get_reports_dir()\n+ filesystem.recreate_directory(json_src_dir)\n+ json_src = os.path.join(json_src_dir, 'covered_regions.json')\n+ with open(json_src, 'w') as src_file:\n+ json.dump(covered_regions, src_file)\n+ json_dst = exp_path.filestore(json_src)\n+ filestore_utils.cp(json_src, json_dst)\n+\n+ logger.info('Finished storing coverage data')\n+\n+\n+def get_all_covered_regions(experiment_config: dict, pool, q) -> dict:\n+ \"\"\"Gets regions covered for each pair for fuzzer and benchmark.\"\"\"\n+ logger.info('Measuring all fuzzer-benchmark pairs for final coverage data.')\n+\n+ benchmarks = experiment_config['benchmarks'].split(',')\n+ fuzzers = experiment_config['fuzzers'].split(',')\n+ experiment = experiment_config['experiment']\n+\n+ get_covered_region_args = [(experiment, fuzzer, benchmark, q)\n+ for fuzzer in fuzzers\n+ for benchmark in benchmarks]\n+\n+ result = pool.starmap_async(get_covered_region, get_covered_region_args)\n+\n+ # Poll the queue for covered region data and save them in a dict until the\n+ # pool is done processing each combination of fuzzers and benchmarks.\n+ all_covered_regions = {}\n+\n+ while True:\n+ try:\n+ covered_regions = q.get(timeout=COV_DIFF_QUEUE_GET_TIMEOUT)\n+ all_covered_regions.update(covered_regions)\n+ except queue.Empty:\n+ if result.ready():\n+ # If \"ready\" that means pool has finished. Since it is\n+ # finished and the queue is empty, we can stop checking\n+ # the queue for more covered regions.\n+ logger.debug(\n+ 'Finished call to map with get_all_covered_regions.')\n+ break\n+\n+ for key in all_covered_regions:\n+ all_covered_regions[key] = list(all_covered_regions[key])\n+ logger.info('Done measuring all coverage data.')\n+ return all_covered_regions\n+\n+\n+def get_covered_region(experiment: str, fuzzer: str, benchmark: str,\n+ q: multiprocessing.Queue):\n+ \"\"\"Gets the final covered region for a specific pair of fuzzer-benchmark.\"\"\"\n+ logs.initialize()\n+ logger.debug('Measuring covered region: fuzzer: %s, benchmark: %s.', fuzzer,\n+ benchmark)\n+ key = get_fuzzer_benchmark_key(fuzzer, benchmark)\n+ covered_regions = {key: set()}\n+ trial_ids = get_trial_ids(experiment, fuzzer, benchmark)\n+ for trial_id in trial_ids:\n+ logger.info('Measuring covered region: trial_id = %d.', trial_id)\n+ snapshot_logger = logs.Logger('measurer',\n+ default_extras={\n+ 'fuzzer': fuzzer,\n+ 'benchmark': benchmark,\n+ 'trial_id': str(trial_id),\n+ })\n+ trial_coverage = TrialCoverage(fuzzer, benchmark, trial_id,\n+ snapshot_logger)\n+ trial_coverage.generate_summary(0, summary_only=False)\n+ new_covered_regions = trial_coverage.get_current_covered_regions()\n+ covered_regions[key] = covered_regions[key].union(new_covered_regions)\n+ q.put(covered_regions)\n+ logger.debug('Done measuring covered region: fuzzer: %s, benchmark: %s.',\n+ fuzzer, benchmark)\n+\n+\n+class TrialCoverage: # pylint: disable=too-many-instance-attributes\n+ \"\"\"Base class for storing and getting coverage data for a trial.\"\"\"\n+\n+ def __init__(self, fuzzer: str, benchmark: str, trial_num: int,\n+ trial_logger: logs.Logger):\n+ self.fuzzer = fuzzer\n+ self.benchmark = benchmark\n+ self.trial_num = trial_num\n+ self.logger = trial_logger\n+ self.benchmark_fuzzer_trial_dir = exp_utils.get_trial_dir(\n+ fuzzer, benchmark, trial_num)\n+ self.work_dir = exp_utils.get_work_dir()\n+ self.measurement_dir = os.path.join(self.work_dir,\n+ 'measurement-folders',\n+ self.benchmark_fuzzer_trial_dir)\n+ self.report_dir = os.path.join(self.measurement_dir, 'reports')\n+\n+ # Store the profdata file for the current trial.\n+ self.profdata_file = os.path.join(self.report_dir, 'data.profdata')\n+\n+ # Store the coverage information in json form.\n+ self.cov_summary_file = os.path.join(self.report_dir,\n+ 'cov_summary.json')\n+\n+ def get_current_covered_regions(self):\n+ \"\"\"Get the covered regions for the current trial.\"\"\"\n+ covered_regions = set()\n+ try:\n+ coverage_info = get_coverage_infomation(self.cov_summary_file)\n+ functions_data = coverage_info['data'][0]['functions']\n+ # The fourth number in the region-list indicates if the region\n+ # is hit.\n+ hit_index = 4\n+ # The last number in the region-list indicates what type of the\n+ # region it is; 'code_region' is used to obtain various code\n+ # coverage statistic and is represented by number 0.\n+ type_index = -1\n+ for function_data in functions_data:\n+ for region in function_data['regions']:\n+ if region[hit_index] != 0 and region[type_index] == 0:\n+ covered_regions.add(tuple(region[:hit_index]))\n+ except Exception: # pylint: disable=broad-except\n+ self.logger.error(\n+ 'Coverage summary json file defective or missing.')\n+ return covered_regions\n+\n+ def generate_summary(self, cycle: int, summary_only=True):\n+ \"\"\"Transforms the .profdata file into json form.\"\"\"\n+ coverage_binary = get_coverage_binary(self.benchmark)\n+ command = [\n+ 'llvm-cov', 'export', '-format=text', coverage_binary,\n+ '-instr-profile=%s' % self.profdata_file\n+ ]\n+\n+ if summary_only:\n+ command.append('-summary-only')\n+\n+ with open(self.cov_summary_file, 'w') as output_file:\n+ result = new_process.execute(command,\n+ output_file=output_file,\n+ expect_zero=False)\n+ if result.retcode != 0:\n+ self.logger.error(\n+ 'Coverage summary json file generation failed for \\\n+ cycle: %d.', cycle)\n+ if cycle != 0:\n+ self.logger.error(\n+ 'Coverage summary json file generation failed for \\\n+ cycle: %d.', cycle)\n+ else:\n+ self.logger.error(\n+ 'Coverage summary json file generation failed in the end.')\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/dispatcher.py",
"new_path": "experiment/dispatcher.py",
"diff": "@@ -155,7 +155,9 @@ def dispatcher_main():\nis_complete = not measurer_main_process.is_alive()\n# Generate periodic output reports.\n- reporter.output_report(experiment.config, in_progress=not is_complete)\n+ reporter.output_report(experiment.config,\n+ in_progress=not is_complete,\n+ coverage_report=is_complete)\nif is_complete:\n# Experiment is complete, bail out.\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/measurer.py",
"new_path": "experiment/measurer.py",
"diff": "@@ -24,25 +24,22 @@ import tarfile\nimport time\nfrom typing import List, Set\nimport queue\n-import json\nfrom sqlalchemy import func\nfrom sqlalchemy import orm\n-from common import benchmark_utils\nfrom common import experiment_utils\nfrom common import experiment_path as exp_path\nfrom common import filesystem\n-from common import fuzzer_utils\nfrom common import filestore_utils\nfrom common import logs\nfrom common import utils\n-from common import new_process\nfrom database import utils as db_utils\nfrom database import models\nfrom experiment.build import build_utils\nfrom experiment import run_coverage\nfrom experiment import scheduler\n+from experiment import coverage_utils\nlogger = logs.Logger('measurer') # pylint: disable=invalid-name\n@@ -53,124 +50,15 @@ NUM_RETRIES = 3\nRETRY_DELAY = 3\nFAIL_WAIT_SECONDS = 30\nSNAPSHOT_QUEUE_GET_TIMEOUT = 1\n-COV_DIFF_QUEUE_GET_TIMEOUT = 1\nSNAPSHOTS_BATCH_SAVE_SIZE = 100\n-def get_experiment_folders_dir():\n- \"\"\"Return experiment folders directory.\"\"\"\n- return exp_path.path('experiment-folders')\n-\n-\ndef exists_in_experiment_filestore(path: pathlib.Path) -> bool:\n\"\"\"Returns True if |path| exists in the experiment_filestore.\"\"\"\nreturn filestore_utils.ls(exp_path.filestore(path),\nmust_exist=False).retcode == 0\n-def get_fuzzer_benchmark_key(fuzzer: str, benchmark: str):\n- \"\"\"Return the key in coverage dict for a pair of fuzzer-benchmark.\"\"\"\n- return fuzzer + ' ' + benchmark\n-\n-\n-def get_trial_ids(experiment: str, fuzzer: str, benchmark: str):\n- \"\"\"Get ids of all finished trials for a pair of fuzzer and benchmark.\"\"\"\n- trial_ids = [\n- trial_id_tuple[0]\n- for trial_id_tuple in db_utils.query(models.Trial.id).filter(\n- models.Trial.experiment == experiment, models.Trial.fuzzer ==\n- fuzzer, models.Trial.benchmark == benchmark,\n- ~models.Trial.preempted)\n- ]\n- return trial_ids\n-\n-\n-def get_coverage_infomation(coverage_summary_file):\n- \"\"\"Read the coverage information from |coverage_summary_file|\n- and skip possible warnings in the file.\"\"\"\n- with open(coverage_summary_file) as summary:\n- return json.loads(summary.readlines()[-1])\n-\n-\n-def store_coverage_data(experiment_config: dict):\n- \"\"\"Generate the specific coverage data and store in cloud bucket.\"\"\"\n- logger.info('Start storing coverage data')\n- with multiprocessing.Pool() as pool, multiprocessing.Manager() as manager:\n- q = manager.Queue() # pytype: disable=attribute-error\n- covered_regions = get_all_covered_regions(experiment_config, pool, q)\n- json_src_dir = get_experiment_folders_dir()\n- json_src = os.path.join(json_src_dir, 'covered_regions.json')\n- with open(json_src, 'w') as src_file:\n- json.dump(covered_regions, src_file)\n- json_dst = exp_path.filestore(json_src)\n- filestore_utils.cp(json_src, json_dst)\n- logger.info('Finished storing coverage data')\n-\n-\n-def get_all_covered_regions(experiment_config: dict, pool, q) -> dict:\n- \"\"\"Get regions covered for each pair for fuzzer and benchmark.\"\"\"\n- logger.info('Measuring all fuzzer-benchmark pairs for final coverage data.')\n-\n- benchmarks = experiment_config['benchmarks'].split(',')\n- fuzzers = experiment_config['fuzzers'].split(',')\n- experiment = experiment_config['experiment']\n-\n- get_covered_region_args = [(experiment, fuzzer, benchmark, q)\n- for fuzzer in fuzzers\n- for benchmark in benchmarks]\n-\n- result = pool.starmap_async(get_covered_region, get_covered_region_args)\n-\n- # Poll the queue for covered region data and save them in a dict until the\n- # pool is done processing each combination of fuzzers and benchmarks.\n- all_covered_regions = {}\n-\n- while True:\n- try:\n- covered_regions = q.get(timeout=COV_DIFF_QUEUE_GET_TIMEOUT)\n- all_covered_regions.update(covered_regions)\n- except queue.Empty:\n- if result.ready():\n- # If \"ready\" that means pool has finished. Since it is\n- # finished and the queue is empty, we can stop checking\n- # the queue for more covered regions.\n- logger.debug(\n- 'Finished call to map with get_all_covered_regions.')\n- break\n-\n- for key in all_covered_regions:\n- all_covered_regions[key] = list(all_covered_regions[key])\n- logger.info('Done measuring all coverage data.')\n- return all_covered_regions\n-\n-\n-def get_covered_region(experiment: str, fuzzer: str, benchmark: str,\n- q: multiprocessing.Queue):\n- \"\"\"Get the final covered region for a specific pair of fuzzer-benchmark.\"\"\"\n- initialize_logs()\n- logger.debug('Measuring covered region: fuzzer: %s, benchmark: %s.', fuzzer,\n- benchmark)\n- key = get_fuzzer_benchmark_key(fuzzer, benchmark)\n- covered_regions = {key: set()}\n- trial_ids = get_trial_ids(experiment, fuzzer, benchmark)\n- for trial_id in trial_ids:\n- logger.info('Measuring covered region: trial_id = %d.', trial_id)\n- snapshot_logger = logs.Logger('measurer',\n- default_extras={\n- 'fuzzer': fuzzer,\n- 'benchmark': benchmark,\n- 'trial_id': str(trial_id),\n- })\n- snapshot_measurer = SnapshotMeasurer(fuzzer, benchmark, trial_id,\n- snapshot_logger)\n- snapshot_measurer.generate_summary(0, summary_only=False)\n- new_covered_regions = snapshot_measurer.get_current_covered_regions()\n- covered_regions[key] = covered_regions[key].union(new_covered_regions)\n- q.put(covered_regions)\n- logger.debug('Done measuring covered region: fuzzer: %s, benchmark: %s.',\n- fuzzer, benchmark)\n-\n-\ndef measure_main(experiment_config):\n\"\"\"Do the continuously measuring and the final measuring.\"\"\"\ninitialize_logs()\n@@ -182,7 +70,9 @@ def measure_main(experiment_config):\nmeasure_loop(experiment, max_total_time)\n# Do the final measuring and store the coverage data.\n- store_coverage_data(experiment_config)\n+ coverage_utils.store_coverage_data(experiment_config)\n+ coverage_utils.generate_coverage_reports(experiment_config)\n+ coverage_utils.upload_coverage_reports_to_bucket()\nlogger.info('Finished measuring.')\n@@ -222,7 +112,7 @@ def measure_all_trials(experiment: str, max_total_time: int, pool, q) -> bool:\nwas called first. Otherwise it will use fork which breaks logging.\"\"\"\nlogger.info('Measuring all trials.')\n- experiment_folders_dir = get_experiment_folders_dir()\n+ experiment_folders_dir = experiment_utils.get_experiment_folders_dir()\nif not exists_in_experiment_filestore(experiment_folders_dir):\nreturn True\n@@ -420,7 +310,7 @@ def extract_corpus(corpus_archive: str, sha_blacklist: Set[str],\nfilesystem.write(file_path, member_contents, 'wb')\n-class SnapshotMeasurer: # pylint: disable=too-many-instance-attributes\n+class SnapshotMeasurer(coverage_utils.TrialCoverage): # pylint: disable=too-many-instance-attributes\n\"\"\"Class used for storing details needed to measure coverage of a particular\ntrial.\"\"\"\n@@ -428,22 +318,13 @@ class SnapshotMeasurer: # pylint: disable=too-many-instance-attributes\ndef __init__(self, fuzzer: str, benchmark: str, trial_num: int,\ntrial_logger: logs.Logger):\n- self.fuzzer = fuzzer\n- self.benchmark = benchmark\n- self.trial_num = trial_num\n- self.logger = trial_logger\n- benchmark_fuzzer_trial_dir = experiment_utils.get_trial_dir(\n- fuzzer, benchmark, trial_num)\n- work_dir = experiment_utils.get_work_dir()\n- measurement_dir = os.path.join(work_dir, 'measurement-folders',\n- benchmark_fuzzer_trial_dir)\n- self.corpus_dir = os.path.join(measurement_dir, 'corpus')\n-\n- self.crashes_dir = os.path.join(measurement_dir, 'crashes')\n- self.coverage_dir = os.path.join(measurement_dir, 'coverage')\n- self.report_dir = os.path.join(measurement_dir, 'reports')\n- self.trial_dir = os.path.join(work_dir, 'experiment-folders',\n- benchmark_fuzzer_trial_dir)\n+ super().__init__(fuzzer, benchmark, trial_num, trial_logger)\n+ self.corpus_dir = os.path.join(self.measurement_dir, 'corpus')\n+\n+ self.crashes_dir = os.path.join(self.measurement_dir, 'crashes')\n+ self.coverage_dir = os.path.join(self.measurement_dir, 'coverage')\n+ self.trial_dir = os.path.join(self.work_dir, 'experiment-folders',\n+ self.benchmark_fuzzer_trial_dir)\n# Stores the files that have already been measured for a trial.\nself.measured_files_path = os.path.join(self.report_dir,\n@@ -473,7 +354,7 @@ class SnapshotMeasurer: # pylint: disable=too-many-instance-attributes\ndef run_cov_new_units(self):\n\"\"\"Run the coverage binary on new units.\"\"\"\n- coverage_binary = get_coverage_binary(self.benchmark)\n+ coverage_binary = coverage_utils.get_coverage_binary(self.benchmark)\ncrashing_units = run_coverage.do_coverage_run(coverage_binary,\nself.corpus_dir,\nself.profraw_file,\n@@ -482,35 +363,14 @@ class SnapshotMeasurer: # pylint: disable=too-many-instance-attributes\nself.UNIT_BLACKLIST[self.benchmark] = (\nself.UNIT_BLACKLIST[self.benchmark].union(set(crashing_units)))\n- def get_current_covered_regions(self):\n- \"\"\"Get the covered regions for the current trial.\"\"\"\n- covered_regions = set()\n- try:\n- coverage_info = get_coverage_infomation(self.cov_summary_file)\n- functions_data = coverage_info['data'][0]['functions']\n- # The fourth number in the region-list indicates if the region\n- # is hit.\n- hit_index = 4\n- # The last number in the region-list indicates what type of the\n- # region it is; 'code_region' is used to obtain various code\n- # coverage statistic and is represented by number 0.\n- type_index = -1\n- for function_data in functions_data:\n- for region in function_data['regions']:\n- if region[hit_index] != 0 and region[type_index] == 0:\n- covered_regions.add(tuple(region[:hit_index]))\n- except Exception: # pylint: disable=broad-except\n- self.logger.error(\n- 'Coverage summary json file defective or missing.')\n- return covered_regions\n-\ndef get_current_coverage(self) -> int:\n\"\"\"Get the current number of lines covered.\"\"\"\nif not os.path.exists(self.cov_summary_file):\nself.logger.warning('No coverage summary json file found.')\nreturn 0\ntry:\n- coverage_info = get_coverage_infomation(self.cov_summary_file)\n+ coverage_info = coverage_utils.get_coverage_infomation(\n+ self.cov_summary_file)\ncoverage_data = coverage_info[\"data\"][0]\nsummary_data = coverage_data[\"totals\"]\nregions_coverage_data = summary_data[\"regions\"]\n@@ -529,38 +389,13 @@ class SnapshotMeasurer: # pylint: disable=too-many-instance-attributes\nfiles_to_merge = [self.profraw_file, self.profdata_file]\nelse:\nfiles_to_merge = [self.profraw_file]\n- command = ['llvm-profdata', 'merge', '-sparse'\n- ] + files_to_merge + ['-o', self.profdata_file]\n- result = new_process.execute(command, expect_zero=False)\n+ result = coverage_utils.merge_profdata_files(files_to_merge,\n+ self.profdata_file)\nif result.retcode != 0:\nself.logger.error(\n'Coverage profdata generation failed for cycle: %d.', cycle)\n- def generate_summary(self, cycle: int, summary_only=True):\n- \"\"\"Transform the .profdata file into json form.\"\"\"\n- coverage_binary = get_coverage_binary(self.benchmark)\n- command = [\n- 'llvm-cov', 'export', '-format=text', coverage_binary,\n- '-instr-profile=%s' % self.profdata_file\n- ]\n-\n- if summary_only:\n- command.append('-summary-only')\n-\n- with open(self.cov_summary_file, 'w') as output_file:\n- result = new_process.execute(command,\n- output_file=output_file,\n- expect_zero=False)\n- if result.retcode != 0:\n- if cycle != 0:\n- self.logger.error(\n- 'Coverage summary json file generation failed for \\\n- cycle: %d.', cycle)\n- else:\n- self.logger.error(\n- 'Coverage summary json file generation failed in the end.')\n-\ndef generate_coverage_information(self, cycle: int):\n\"\"\"Generate the .profdata file and then transform it into\njson summary.\"\"\"\n@@ -790,15 +625,6 @@ def set_up_coverage_binary(benchmark):\nos.remove(archive_path)\n-def get_coverage_binary(benchmark: str) -> str:\n- \"\"\"Get the coverage binary for benchmark.\"\"\"\n- coverage_binaries_dir = build_utils.get_coverage_binaries_dir()\n- fuzz_target = benchmark_utils.get_fuzz_target(benchmark)\n- return fuzzer_utils.get_fuzz_target_binary(coverage_binaries_dir /\n- benchmark,\n- fuzz_target_name=fuzz_target)\n-\n-\ndef initialize_logs():\n\"\"\"Initialize logs. This must be called on process start.\"\"\"\nlogs.initialize(default_extras={\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/reporter.py",
"new_path": "experiment/reporter.py",
"diff": "@@ -32,7 +32,9 @@ def get_reports_dir():\nreturn exp_path.path('reports')\n-def output_report(experiment_config: dict, in_progress=False):\n+def output_report(experiment_config: dict,\n+ in_progress=False,\n+ coverage_report=False):\n\"\"\"Generate the HTML report and write it to |web_bucket|.\"\"\"\nexperiment_name = experiment_utils.get_experiment_name()\nweb_filestore_path = posixpath.join(experiment_config['report_filestore'],\n@@ -54,7 +56,8 @@ def output_report(experiment_config: dict, in_progress=False):\nstr(reports_dir),\nreport_name=experiment_name,\nin_progress=in_progress,\n- merge_with_clobber_nonprivate=merge_with_nonprivate)\n+ merge_with_clobber_nonprivate=merge_with_nonprivate,\n+ coverage_report=coverage_report)\nfilestore_utils.rsync(str(reports_dir),\nweb_filestore_path,\ngsutil_options=[\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_measurer.py",
"new_path": "experiment/test_measurer.py",
"diff": "@@ -144,7 +144,7 @@ def test_generate_profdata_merge(mocked_execute, experiment, fs):\[email protected]('common.new_process.execute')\[email protected]('experiment.measurer.get_coverage_binary')\[email protected]('experiment.coverage_utils.get_coverage_binary')\ndef test_generate_summary(mocked_get_coverage_binary, mocked_execute,\nexperiment, fs):\n\"\"\"Tests that generate_summary can run the correct command.\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_reporter.py",
"new_path": "experiment/test_reporter.py",
"diff": "@@ -50,5 +50,6 @@ def test_output_report_filestore(fs, experiment):\n[experiment_name],\nreports_dir,\nreport_name=experiment_name,\n+ coverage_report=False,\nin_progress=False,\nmerge_with_clobber_nonprivate=False)\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/coverage/builder.Dockerfile",
"new_path": "fuzzers/coverage/builder.Dockerfile",
"diff": "@@ -26,3 +26,7 @@ RUN git clone https://github.com/llvm/llvm-project.git /llvm-project && \\\ncd /llvm-project/compiler-rt/lib/fuzzer && \\\nbash build.sh && \\\ncp libFuzzer.a /usr/lib\n+\n+# Copy source code files of benchmark to $OUT for the report generation.\n+RUN mkdir $OUT/src && \\\n+ cp -rL --parent $SRC $OUT/src\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Generate coverage reports. (#618) |
258,388 | 11.08.2020 13:22:20 | 25,200 | c014024a2c5a3c154595a01530939abee6e0f342 | Fix issue with multiple mounts and don't pull images in CI anymore | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/build_and_test_run_fuzzer_benchmarks.py",
"new_path": ".github/workflows/build_and_test_run_fuzzer_benchmarks.py",
"diff": "@@ -52,11 +52,9 @@ STANDARD_BENCHMARKS = {\ndef get_make_targets(benchmarks, fuzzer):\n- \"\"\"Return pull and test targets for |fuzzer| and each benchmark\n+ \"\"\"Returns and test targets for |fuzzer| and each benchmark\nin |benchmarks| to pass to make.\"\"\"\n- return [('pull-%s-%s' % (fuzzer, benchmark),\n- 'test-run-%s-%s' % (fuzzer, benchmark))\n- for benchmark in benchmarks]\n+ return ['test-run-%s-%s' % (fuzzer, benchmark) for benchmark in benchmarks]\ndef delete_docker_images():\n@@ -81,18 +79,14 @@ def delete_docker_images():\ndef make_builds(benchmarks, fuzzer):\n- \"\"\"Use make to build each target in |build_targets|.\"\"\"\n+ \"\"\"Use make to test the fuzzer on each benchmark in |benchmarks|.\"\"\"\nprint('Building benchmarks: {} for fuzzer: {}'.format(\n', '.join(benchmarks), fuzzer))\nmake_targets = get_make_targets(benchmarks, fuzzer)\n- for pull_target, build_target in make_targets:\n- # Pull target first.\n- subprocess.run(['make', '-j', pull_target], check=False)\n-\n- # Then build.\n- build_command = ['make', 'RUNNING_ON_CI=yes', '-j', build_target]\n- print('Running command:', ' '.join(build_command))\n- result = subprocess.run(build_command, check=False)\n+ for make_target in make_targets:\n+ make_command = ['make', 'RUNNING_ON_CI=yes', '-j', make_target]\n+ print('Running command:', ' '.join(make_command))\n+ result = subprocess.run(make_command, check=False)\nif not result.returncode == 0:\nreturn False\n# Delete docker images so disk doesn't fill up.\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/benchmark-runner/Dockerfile",
"new_path": "docker/benchmark-runner/Dockerfile",
"diff": "@@ -54,13 +54,16 @@ ENV WORKDIR /out\nRUN mkdir -p $WORKDIR\nWORKDIR $WORKDIR\n+# The argument needs to be re-declared otherwise it becomes an empty string.\n+ARG fuzzer\n+\n# Copy over all the build artifacts (without * to preserve directory structure).\n# This also copies seed and dictionary files if they are available.\nCOPY --from=builder /out/ ./\n# Copy the fuzzer.py file.\n-COPY --from=builder /src/fuzzer.py .\n+COPY fuzzers/$fuzzer/fuzzer.py .\n# Copy the fuzzers directory.\n-COPY --from=builder /src/fuzzers fuzzers\n+COPY fuzzers ./fuzzers\n# Create empty __init__.py to allow python deps to work.\nRUN touch __init__.py\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Fix issue with multiple mounts and don't pull images in CI anymore (#660) |
258,396 | 12.08.2020 10:23:18 | 25,200 | 7f83f9821d68a43428efadd9652321ecb6c15114 | An experiment request of libFuzzer CrossOver fix with dictionaries | [
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "# are still testing this feature. You should request an experiment by contacting\n# us as you normally do.\n+- experiment: 2020-08-14\n+ fuzzers:\n+ - libfuzzer_fixcrossover\n+ - entropic_fixcrossover\n+\n- experiment: 2020-08-13\nfuzzers:\n- aflplusplus\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | An experiment request of libFuzzer CrossOver fix with dictionaries (#668) |
258,396 | 12.08.2020 16:53:25 | 25,200 | ce391a80df984be3abfd03bcaa3124f6ec395177 | Rebase libfuzzer/entropic's keepseed variants on fixcrossover variants | [
{
"change_type": "MODIFY",
"old_path": "fuzzers/entropic_keepseed/builder.Dockerfile",
"new_path": "fuzzers/entropic_keepseed/builder.Dockerfile",
"diff": "ARG parent_image\nFROM $parent_image\n+COPY patch-crossover.diff /\nCOPY patch.diff /\nRUN git clone https://github.com/llvm/llvm-project.git /llvm-project && \\\ncd /llvm-project && \\\ngit checkout b52b2e1c188072e3cbc91500cfd503fb26d50ffc && \\\n+ patch -p1 < /patch-crossover.diff && \\\npatch -p1 < /patch.diff && \\\ncd /llvm-project/compiler-rt/lib/fuzzer && \\\n(for f in *.cpp; do \\\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_keepseed/patch-crossover.diff",
"diff": "+commit df6a841a7c8b1c540f0e3c31c3697a11b2a00f51\n+Author: Dokyung Song <[email protected]>\n+Date: Wed Aug 5 23:12:19 2020 +0000\n+\n+ [libFuzzer] Fix arguments of InsertPartOf/CopyPartOf calls in CrossOver mutator.\n+\n+ The CrossOver mutator is meant to cross over two given buffers\n+ (referred to as the first/second buffer below). Previously\n+ InsertPartOf/CopyPartOf calls used in the CrossOver mutator\n+ incorrectly inserted/copied part of the second buffer into a \"scratch\n+ buffer\" (MutateInPlaceHere of the size CurrentMaxMutationLen), rather\n+ than the first buffer. This is not intended behavior, because the\n+ scratch buffer does not always (i) contain the content of the first\n+ buffer, and (ii) have the same size as the first buffer;\n+ CurrentMaxMutationLen is typically a lot larger than the size of the\n+ first buffer. This patch fixes the issue by using the first buffer\n+ instead of the scratch buffer in InsertPartOf/CopyPartOf calls.\n+\n+ This patch also adds two new tests, namely \"cross_over_insert\" and\n+ \"cross_over_copy\", which specifically target InsertPartOf and\n+ CopyPartOf, respectively.\n+\n+ - cross_over_insert.test checks if the fuzzer can use InsertPartOf to\n+ trigger the crash.\n+\n+ - cross_over_copy.test checks if the fuzzer can use CopyPartOf to\n+ trigger the crash.\n+\n+ These newly added tests were designed to pass with the current patch,\n+ but not without the it (with b216c80cc2496b87bf827260ce7e24dc62247d71\n+ these tests do no pass). To achieve this, -max_len was intentionally\n+ given a high value. Without this patch, InsertPartOf/CopyPartOf will\n+ generate larger inputs, possibly with unpredictable data in it,\n+ thereby failing to trigger the crash.\n+\n+ The test pass condition for these new tests is narrowed down by (i)\n+ limiting mutation depth to 1 (i.e., a single CrossOver mutation should\n+ be able to trigger the crash) and (ii) checking whether the mutation\n+ sequence of \"CrossOver-\" leads to the crash.\n+\n+ Also note that these newly added tests and an existing test\n+ (cross_over.test) all use \"-reduce_inputs=0\" flags to prevent reducing\n+ inputs; it's easier to force the fuzzer to keep original input string\n+ this way than tweaking cov-instrumented basic blocks in the source\n+ code of the fuzzer executable.\n+\n+ Differential Revision: https://reviews.llvm.org/D85554\n+\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n+index 29541eac5dc..df9ada45bb0 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n++++ b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n+@@ -425,26 +425,26 @@ size_t MutationDispatcher::Mutate_CrossOver(uint8_t *Data, size_t Size,\n+ if (!CrossOverWith) return 0;\n+ const Unit &O = *CrossOverWith;\n+ if (O.empty()) return 0;\n+- MutateInPlaceHere.resize(MaxSize);\n+- auto &U = MutateInPlaceHere;\n+ size_t NewSize = 0;\n+ switch(Rand(3)) {\n+ case 0:\n+- NewSize = CrossOver(Data, Size, O.data(), O.size(), U.data(), U.size());\n++ MutateInPlaceHere.resize(MaxSize);\n++ NewSize = CrossOver(Data, Size, O.data(), O.size(),\n++ MutateInPlaceHere.data(), MaxSize);\n++ memcpy(Data, MutateInPlaceHere.data(), NewSize);\n+ break;\n+ case 1:\n+- NewSize = InsertPartOf(O.data(), O.size(), U.data(), U.size(), MaxSize);\n++ NewSize = InsertPartOf(O.data(), O.size(), Data, Size, MaxSize);\n+ if (!NewSize)\n+- NewSize = CopyPartOf(O.data(), O.size(), U.data(), U.size());\n++ NewSize = CopyPartOf(O.data(), O.size(), Data, Size);\n+ break;\n+ case 2:\n+- NewSize = CopyPartOf(O.data(), O.size(), U.data(), U.size());\n++ NewSize = CopyPartOf(O.data(), O.size(), Data, Size);\n+ break;\n+ default: assert(0);\n+ }\n+ assert(NewSize > 0 && \"CrossOver returned empty unit\");\n+ assert(NewSize <= MaxSize && \"CrossOver returned overisized unit\");\n+- memcpy(Data, U.data(), NewSize);\n+ return NewSize;\n+ }\n+\n+diff --git a/compiler-rt/test/fuzzer/CrossOverTest.cpp b/compiler-rt/test/fuzzer/CrossOverTest.cpp\n+index a7643570a92..3ca53a8a851 100644\n+--- a/compiler-rt/test/fuzzer/CrossOverTest.cpp\n++++ b/compiler-rt/test/fuzzer/CrossOverTest.cpp\n+@@ -4,10 +4,10 @@\n+\n+ // Test for a fuzzer. The fuzzer must find the string\n+ // ABCDEFGHIJ\n+-// We use it as a test for CrossOver functionality\n+-// by passing two inputs to it:\n+-// ABCDE00000\n+-// ZZZZZFGHIJ\n++// We use it as a test for each of CrossOver functionalities\n++// by passing the following sets of two inputs to it:\n++// {ABCDEHIJ, ZFG} to test InsertPartOf\n++// {ABCDE00HIJ, ZFG} to test CopyPartOf\n+ //\n+ #include <assert.h>\n+ #include <cstddef>\n+@@ -16,6 +16,17 @@\n+ #include <iostream>\n+ #include <ostream>\n+\n++#ifndef INPUT_A\n++#define INPUT_A \"ABCDE00000\"\n++#endif\n++\n++#ifndef INPUT_B\n++#define INPUT_B \"ZZZZZFGHIJ\"\n++#endif\n++\n++const char *InputA = INPUT_A;\n++const char *InputB = INPUT_B;\n++\n+ static volatile int Sink;\n+ static volatile int *NullPtr;\n+\n+@@ -42,13 +53,11 @@ static const uint32_t ExpectedHash = 0xe1677acb;\n+\n+ extern \"C\" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {\n+ // fprintf(stderr, \"ExpectedHash: %x\\n\", ExpectedHash);\n+- if (Size != 10) return 0;\n+- if (*Data == 'A')\n++ if (Size == 10 && ExpectedHash == simple_hash(Data, Size))\n++ *NullPtr = 0;\n++ if (*Data == InputA[0])\n+ Sink++;\n+- if (*Data == 'Z')\n++ if (*Data == InputB[0])\n+ Sink--;\n+- if (ExpectedHash == simple_hash(Data, Size))\n+- *NullPtr = 0;\n+ return 0;\n+ }\n+-\n+diff --git a/compiler-rt/test/fuzzer/cross_over.test b/compiler-rt/test/fuzzer/cross_over.test\n+index 058b5eb2c85..64e06e8cd36 100644\n+--- a/compiler-rt/test/fuzzer/cross_over.test\n++++ b/compiler-rt/test/fuzzer/cross_over.test\n+@@ -12,7 +12,7 @@ RUN: echo -n ABCDE00000 > %t-corpus/A\n+ RUN: echo -n ZZZZZFGHIJ > %t-corpus/B\n+\n+\n+-RUN: not %run %t-CrossOverTest -max_len=10 -seed=1 -runs=10000000 %t-corpus\n++RUN: not %run %t-CrossOverTest -max_len=10 -reduce_inputs=0 -seed=1 -runs=10000000 %t-corpus\n+\n+ # Test the same thing but using -seed_inputs instead of passing the corpus dir.\n+-RUN: not %run %t-CrossOverTest -max_len=10 -seed=1 -runs=10000000 -seed_inputs=%t-corpus/A,%t-corpus/B\n++RUN: not %run %t-CrossOverTest -max_len=10 -reduce_inputs=0 -seed=1 -runs=10000000 -seed_inputs=%t-corpus/A,%t-corpus/B\n+diff --git a/compiler-rt/test/fuzzer/cross_over_copy.test b/compiler-rt/test/fuzzer/cross_over_copy.test\n+new file mode 100644\n+index 00000000000..f8f45c974e2\n+--- /dev/null\n++++ b/compiler-rt/test/fuzzer/cross_over_copy.test\n+@@ -0,0 +1,20 @@\n++# Tests CrossOver CopyPartOf.\n++# We want to make sure that the test can find the input\n++# ABCDEFGHIJ when given two other inputs in the seed corpus:\n++# ABCDE00HIJ and\n++# (Z) FG\n++#\n++RUN: %cpp_compiler -DINPUT_A='\"ABCDE00HIJ\"' -DINPUT_B='\"ZFG\"' %S/CrossOverTest.cpp -o %t-CrossOverTest\n++\n++RUN: rm -rf %t-corpus\n++RUN: mkdir %t-corpus\n++RUN: echo -n ABCDE00HIJ > %t-corpus/A\n++RUN: echo -n ZFG > %t-corpus/B\n++\n++\n++RUN: not %run %t-CrossOverTest -mutate_depth=1 -max_len=1024 -reduce_inputs=0 -seed=1 -runs=10000000 %t-corpus 2>&1 | FileCheck %s\n++\n++# Test the same thing but using -seed_inputs instead of passing the corpus dir.\n++RUN: not %run %t-CrossOverTest -mutate_depth=1 -max_len=1024 -reduce_inputs=0 -seed=1 -runs=10000000 -seed_inputs=%t-corpus/A,%t-corpus/B 2>&1 | FileCheck %s\n++\n++CHECK: MS: 1 CrossOver-\n+diff --git a/compiler-rt/test/fuzzer/cross_over_insert.test b/compiler-rt/test/fuzzer/cross_over_insert.test\n+new file mode 100644\n+index 00000000000..5ad2ff0a633\n+--- /dev/null\n++++ b/compiler-rt/test/fuzzer/cross_over_insert.test\n+@@ -0,0 +1,20 @@\n++# Tests CrossOver InsertPartOf.\n++# We want to make sure that the test can find the input\n++# ABCDEFGHIJ when given two other inputs in the seed corpus:\n++# ABCDE HIJ and\n++# (Z) FG\n++#\n++RUN: %cpp_compiler -DINPUT_A='\"ABCDEHIJ\"' -DINPUT_B='\"ZFG\"' %S/CrossOverTest.cpp -o %t-CrossOverTest\n++\n++RUN: rm -rf %t-corpus\n++RUN: mkdir %t-corpus\n++RUN: echo -n ABCDEHIJ > %t-corpus/A\n++RUN: echo -n ZFG > %t-corpus/B\n++\n++\n++RUN: not %run %t-CrossOverTest -mutate_depth=1 -max_len=1024 -reduce_inputs=0 -seed=1 -runs=10000000 %t-corpus 2>&1 | FileCheck %s\n++\n++# Test the same thing but using -seed_inputs instead of passing the corpus dir.\n++RUN: not %run %t-CrossOverTest -mutate_depth=1 -max_len=1024 -reduce_inputs=0 -seed=1 -runs=10000000 -seed_inputs=%t-corpus/A,%t-corpus/B 2>&1 | FileCheck %s\n++\n++CHECK: MS: 1 CrossOver-\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/libfuzzer_keepseed/builder.Dockerfile",
"new_path": "fuzzers/libfuzzer_keepseed/builder.Dockerfile",
"diff": "ARG parent_image\nFROM $parent_image\n+COPY patch-crossover.diff /\nCOPY patch.diff /\nRUN git clone https://github.com/llvm/llvm-project.git /llvm-project && \\\ncd /llvm-project/ && \\\ngit checkout b52b2e1c188072e3cbc91500cfd503fb26d50ffc && \\\n+ patch -p1 < /patch-crossover.diff && \\\npatch -p1 < /patch.diff && \\\ncd compiler-rt/lib/fuzzer && \\\n(for f in *.cpp; do \\\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/libfuzzer_keepseed/patch-crossover.diff",
"diff": "+commit df6a841a7c8b1c540f0e3c31c3697a11b2a00f51\n+Author: Dokyung Song <[email protected]>\n+Date: Wed Aug 5 23:12:19 2020 +0000\n+\n+ [libFuzzer] Fix arguments of InsertPartOf/CopyPartOf calls in CrossOver mutator.\n+\n+ The CrossOver mutator is meant to cross over two given buffers\n+ (referred to as the first/second buffer below). Previously\n+ InsertPartOf/CopyPartOf calls used in the CrossOver mutator\n+ incorrectly inserted/copied part of the second buffer into a \"scratch\n+ buffer\" (MutateInPlaceHere of the size CurrentMaxMutationLen), rather\n+ than the first buffer. This is not intended behavior, because the\n+ scratch buffer does not always (i) contain the content of the first\n+ buffer, and (ii) have the same size as the first buffer;\n+ CurrentMaxMutationLen is typically a lot larger than the size of the\n+ first buffer. This patch fixes the issue by using the first buffer\n+ instead of the scratch buffer in InsertPartOf/CopyPartOf calls.\n+\n+ This patch also adds two new tests, namely \"cross_over_insert\" and\n+ \"cross_over_copy\", which specifically target InsertPartOf and\n+ CopyPartOf, respectively.\n+\n+ - cross_over_insert.test checks if the fuzzer can use InsertPartOf to\n+ trigger the crash.\n+\n+ - cross_over_copy.test checks if the fuzzer can use CopyPartOf to\n+ trigger the crash.\n+\n+ These newly added tests were designed to pass with the current patch,\n+ but not without the it (with b216c80cc2496b87bf827260ce7e24dc62247d71\n+ these tests do no pass). To achieve this, -max_len was intentionally\n+ given a high value. Without this patch, InsertPartOf/CopyPartOf will\n+ generate larger inputs, possibly with unpredictable data in it,\n+ thereby failing to trigger the crash.\n+\n+ The test pass condition for these new tests is narrowed down by (i)\n+ limiting mutation depth to 1 (i.e., a single CrossOver mutation should\n+ be able to trigger the crash) and (ii) checking whether the mutation\n+ sequence of \"CrossOver-\" leads to the crash.\n+\n+ Also note that these newly added tests and an existing test\n+ (cross_over.test) all use \"-reduce_inputs=0\" flags to prevent reducing\n+ inputs; it's easier to force the fuzzer to keep original input string\n+ this way than tweaking cov-instrumented basic blocks in the source\n+ code of the fuzzer executable.\n+\n+ Differential Revision: https://reviews.llvm.org/D85554\n+\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n+index 29541eac5dc..df9ada45bb0 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n++++ b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n+@@ -425,26 +425,26 @@ size_t MutationDispatcher::Mutate_CrossOver(uint8_t *Data, size_t Size,\n+ if (!CrossOverWith) return 0;\n+ const Unit &O = *CrossOverWith;\n+ if (O.empty()) return 0;\n+- MutateInPlaceHere.resize(MaxSize);\n+- auto &U = MutateInPlaceHere;\n+ size_t NewSize = 0;\n+ switch(Rand(3)) {\n+ case 0:\n+- NewSize = CrossOver(Data, Size, O.data(), O.size(), U.data(), U.size());\n++ MutateInPlaceHere.resize(MaxSize);\n++ NewSize = CrossOver(Data, Size, O.data(), O.size(),\n++ MutateInPlaceHere.data(), MaxSize);\n++ memcpy(Data, MutateInPlaceHere.data(), NewSize);\n+ break;\n+ case 1:\n+- NewSize = InsertPartOf(O.data(), O.size(), U.data(), U.size(), MaxSize);\n++ NewSize = InsertPartOf(O.data(), O.size(), Data, Size, MaxSize);\n+ if (!NewSize)\n+- NewSize = CopyPartOf(O.data(), O.size(), U.data(), U.size());\n++ NewSize = CopyPartOf(O.data(), O.size(), Data, Size);\n+ break;\n+ case 2:\n+- NewSize = CopyPartOf(O.data(), O.size(), U.data(), U.size());\n++ NewSize = CopyPartOf(O.data(), O.size(), Data, Size);\n+ break;\n+ default: assert(0);\n+ }\n+ assert(NewSize > 0 && \"CrossOver returned empty unit\");\n+ assert(NewSize <= MaxSize && \"CrossOver returned overisized unit\");\n+- memcpy(Data, U.data(), NewSize);\n+ return NewSize;\n+ }\n+\n+diff --git a/compiler-rt/test/fuzzer/CrossOverTest.cpp b/compiler-rt/test/fuzzer/CrossOverTest.cpp\n+index a7643570a92..3ca53a8a851 100644\n+--- a/compiler-rt/test/fuzzer/CrossOverTest.cpp\n++++ b/compiler-rt/test/fuzzer/CrossOverTest.cpp\n+@@ -4,10 +4,10 @@\n+\n+ // Test for a fuzzer. The fuzzer must find the string\n+ // ABCDEFGHIJ\n+-// We use it as a test for CrossOver functionality\n+-// by passing two inputs to it:\n+-// ABCDE00000\n+-// ZZZZZFGHIJ\n++// We use it as a test for each of CrossOver functionalities\n++// by passing the following sets of two inputs to it:\n++// {ABCDEHIJ, ZFG} to test InsertPartOf\n++// {ABCDE00HIJ, ZFG} to test CopyPartOf\n+ //\n+ #include <assert.h>\n+ #include <cstddef>\n+@@ -16,6 +16,17 @@\n+ #include <iostream>\n+ #include <ostream>\n+\n++#ifndef INPUT_A\n++#define INPUT_A \"ABCDE00000\"\n++#endif\n++\n++#ifndef INPUT_B\n++#define INPUT_B \"ZZZZZFGHIJ\"\n++#endif\n++\n++const char *InputA = INPUT_A;\n++const char *InputB = INPUT_B;\n++\n+ static volatile int Sink;\n+ static volatile int *NullPtr;\n+\n+@@ -42,13 +53,11 @@ static const uint32_t ExpectedHash = 0xe1677acb;\n+\n+ extern \"C\" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {\n+ // fprintf(stderr, \"ExpectedHash: %x\\n\", ExpectedHash);\n+- if (Size != 10) return 0;\n+- if (*Data == 'A')\n++ if (Size == 10 && ExpectedHash == simple_hash(Data, Size))\n++ *NullPtr = 0;\n++ if (*Data == InputA[0])\n+ Sink++;\n+- if (*Data == 'Z')\n++ if (*Data == InputB[0])\n+ Sink--;\n+- if (ExpectedHash == simple_hash(Data, Size))\n+- *NullPtr = 0;\n+ return 0;\n+ }\n+-\n+diff --git a/compiler-rt/test/fuzzer/cross_over.test b/compiler-rt/test/fuzzer/cross_over.test\n+index 058b5eb2c85..64e06e8cd36 100644\n+--- a/compiler-rt/test/fuzzer/cross_over.test\n++++ b/compiler-rt/test/fuzzer/cross_over.test\n+@@ -12,7 +12,7 @@ RUN: echo -n ABCDE00000 > %t-corpus/A\n+ RUN: echo -n ZZZZZFGHIJ > %t-corpus/B\n+\n+\n+-RUN: not %run %t-CrossOverTest -max_len=10 -seed=1 -runs=10000000 %t-corpus\n++RUN: not %run %t-CrossOverTest -max_len=10 -reduce_inputs=0 -seed=1 -runs=10000000 %t-corpus\n+\n+ # Test the same thing but using -seed_inputs instead of passing the corpus dir.\n+-RUN: not %run %t-CrossOverTest -max_len=10 -seed=1 -runs=10000000 -seed_inputs=%t-corpus/A,%t-corpus/B\n++RUN: not %run %t-CrossOverTest -max_len=10 -reduce_inputs=0 -seed=1 -runs=10000000 -seed_inputs=%t-corpus/A,%t-corpus/B\n+diff --git a/compiler-rt/test/fuzzer/cross_over_copy.test b/compiler-rt/test/fuzzer/cross_over_copy.test\n+new file mode 100644\n+index 00000000000..f8f45c974e2\n+--- /dev/null\n++++ b/compiler-rt/test/fuzzer/cross_over_copy.test\n+@@ -0,0 +1,20 @@\n++# Tests CrossOver CopyPartOf.\n++# We want to make sure that the test can find the input\n++# ABCDEFGHIJ when given two other inputs in the seed corpus:\n++# ABCDE00HIJ and\n++# (Z) FG\n++#\n++RUN: %cpp_compiler -DINPUT_A='\"ABCDE00HIJ\"' -DINPUT_B='\"ZFG\"' %S/CrossOverTest.cpp -o %t-CrossOverTest\n++\n++RUN: rm -rf %t-corpus\n++RUN: mkdir %t-corpus\n++RUN: echo -n ABCDE00HIJ > %t-corpus/A\n++RUN: echo -n ZFG > %t-corpus/B\n++\n++\n++RUN: not %run %t-CrossOverTest -mutate_depth=1 -max_len=1024 -reduce_inputs=0 -seed=1 -runs=10000000 %t-corpus 2>&1 | FileCheck %s\n++\n++# Test the same thing but using -seed_inputs instead of passing the corpus dir.\n++RUN: not %run %t-CrossOverTest -mutate_depth=1 -max_len=1024 -reduce_inputs=0 -seed=1 -runs=10000000 -seed_inputs=%t-corpus/A,%t-corpus/B 2>&1 | FileCheck %s\n++\n++CHECK: MS: 1 CrossOver-\n+diff --git a/compiler-rt/test/fuzzer/cross_over_insert.test b/compiler-rt/test/fuzzer/cross_over_insert.test\n+new file mode 100644\n+index 00000000000..5ad2ff0a633\n+--- /dev/null\n++++ b/compiler-rt/test/fuzzer/cross_over_insert.test\n+@@ -0,0 +1,20 @@\n++# Tests CrossOver InsertPartOf.\n++# We want to make sure that the test can find the input\n++# ABCDEFGHIJ when given two other inputs in the seed corpus:\n++# ABCDE HIJ and\n++# (Z) FG\n++#\n++RUN: %cpp_compiler -DINPUT_A='\"ABCDEHIJ\"' -DINPUT_B='\"ZFG\"' %S/CrossOverTest.cpp -o %t-CrossOverTest\n++\n++RUN: rm -rf %t-corpus\n++RUN: mkdir %t-corpus\n++RUN: echo -n ABCDEHIJ > %t-corpus/A\n++RUN: echo -n ZFG > %t-corpus/B\n++\n++\n++RUN: not %run %t-CrossOverTest -mutate_depth=1 -max_len=1024 -reduce_inputs=0 -seed=1 -runs=10000000 %t-corpus 2>&1 | FileCheck %s\n++\n++# Test the same thing but using -seed_inputs instead of passing the corpus dir.\n++RUN: not %run %t-CrossOverTest -mutate_depth=1 -max_len=1024 -reduce_inputs=0 -seed=1 -runs=10000000 -seed_inputs=%t-corpus/A,%t-corpus/B 2>&1 | FileCheck %s\n++\n++CHECK: MS: 1 CrossOver-\n"
},
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "fuzzers:\n- libfuzzer_fixcrossover\n- entropic_fixcrossover\n+ - libfuzzer_keepseed\n+ - entropic_keepseed\n- experiment: 2020-08-13\nfuzzers:\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Rebase libfuzzer/entropic's keepseed variants on fixcrossover variants (#669) |
258,396 | 15.08.2020 10:31:09 | 25,200 | e4004fa181446ae030212030418d0866d4256628 | Modify the patch for the libfuzzer/entropic's keepseed variants such that seed inputs are not replaced even if -reduce_inputs=1 | [
{
"change_type": "MODIFY",
"old_path": "fuzzers/entropic_keepseed/fuzzer.py",
"new_path": "fuzzers/entropic_keepseed/fuzzer.py",
"diff": "@@ -27,4 +27,7 @@ def fuzz(input_corpus, output_corpus, target_binary):\nlibfuzzer_fuzzer.run_fuzzer(input_corpus,\noutput_corpus,\ntarget_binary,\n- extra_flags=['-entropic=1', '-keep_seed=1'])\n+ extra_flags=[\n+ '-entropic=1', '-keep_seed=1',\n+ '-cross_over_uniformdist=1'\n+ ])\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/entropic_keepseed/patch.diff",
"new_path": "fuzzers/entropic_keepseed/patch.diff",
"diff": "-commit aac9771fa16e3fc00725d4bbd662d71186a09532\n+commit 72e16fc7160185305eee536c257a478ae84f7082\nAuthor: Dokyung Song <[email protected]>\nDate: Fri Jul 31 00:07:20 2020 +0000\n[libFuzzer] Optionally keep initial seed inputs regardless of whether they discover new features or not.\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerCorpus.h b/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n-index 54d1e09ec6d..80398a9d7ce 100644\n+index 54d1e09ec6d..5c687013c59 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n+++ b/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n@@ -33,6 +33,7 @@ struct InputInfo {\n@@ -16,7 +16,7 @@ index 54d1e09ec6d..80398a9d7ce 100644\nbool MayDeleteFile = false;\nbool Reduced = false;\nbool HasFocusFunction = false;\n-@@ -131,9 +132,11 @@ class InputCorpus {\n+@@ -131,9 +132,12 @@ class InputCorpus {\nEntropicOptions Entropic;\n@@ -25,12 +25,13 @@ index 54d1e09ec6d..80398a9d7ce 100644\npublic:\n- InputCorpus(const std::string &OutputCorpus, EntropicOptions Entropic)\n- : Entropic(Entropic), OutputCorpus(OutputCorpus) {\n-+ InputCorpus(const std::string &OutputCorpus, EntropicOptions Entropic, bool KeepSeed)\n++ InputCorpus(const std::string &OutputCorpus, EntropicOptions Entropic,\n++ bool KeepSeed)\n+ : Entropic(Entropic), OutputCorpus(OutputCorpus), KeepSeed(KeepSeed) {\nmemset(InputSizesPerFeature, 0, sizeof(InputSizesPerFeature));\nmemset(SmallestElementPerFeature, 0, sizeof(SmallestElementPerFeature));\n}\n-@@ -177,7 +180,7 @@ public:\n+@@ -177,7 +181,7 @@ public:\nbool empty() const { return Inputs.empty(); }\nconst Unit &operator[] (size_t Idx) const { return Inputs[Idx]->U; }\nInputInfo *AddToCorpus(const Unit &U, size_t NumFeatures, bool MayDeleteFile,\n@@ -39,7 +40,7 @@ index 54d1e09ec6d..80398a9d7ce 100644\nconst Vector<uint32_t> &FeatureSet,\nconst DataFlowTrace &DFT, const InputInfo *BaseII) {\nassert(!U.empty());\n-@@ -187,6 +190,7 @@ public:\n+@@ -187,6 +191,7 @@ public:\nInputInfo &II = *Inputs.back();\nII.U = U;\nII.NumFeatures = NumFeatures;\n@@ -47,26 +48,20 @@ index 54d1e09ec6d..80398a9d7ce 100644\nII.MayDeleteFile = MayDeleteFile;\nII.UniqFeatureSet = FeatureSet;\nII.HasFocusFunction = HasFocusFunction;\n-@@ -471,7 +475,7 @@ private:\n-\n- for (size_t i = 0; i < N; i++) {\n-\n-- if (Inputs[i]->NumFeatures == 0) {\n-+ if (Inputs[i]->NumFeatures == 0 && !(Inputs[i]->SeedInput && KeepSeed)) {\n- // If the seed doesn't represent any features, assign zero energy.\n- Weights[i] = 0.;\n- } else if (Inputs[i]->NumExecutedMutations / kMaxMutationFactor >\n-@@ -491,7 +495,7 @@ private:\n-\n- if (VanillaSchedule) {\n- for (size_t i = 0; i < N; i++)\n-- Weights[i] = Inputs[i]->NumFeatures\n-+ Weights[i] = (Inputs[i]->NumFeatures || (KeepSeed && Inputs[i]->SeedInput))\n- ? (i + 1) * (Inputs[i]->HasFocusFunction ? 1000 : 1)\n- : 0.;\n+@@ -276,6 +281,11 @@ public:\n+ return Idx;\n}\n+\n++ InputInfo &ChooseUnitToCrossOverWith(Random &Rand) {\n++ InputInfo &II = *Inputs[Rand(Inputs.size())];\n++ return II;\n++ }\n++\n+ void PrintStats() {\n+ for (size_t i = 0; i < Inputs.size(); i++) {\n+ const auto &II = *Inputs[i];\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n-index 00a33a413d2..ef7991c1e27 100644\n+index 8339697396c..0933af56804 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n+++ b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n@@ -649,6 +649,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\n@@ -77,33 +72,49 @@ index 00a33a413d2..ef7991c1e27 100644\nOptions.UnitTimeoutSec = Flags.timeout;\nOptions.ErrorExitCode = Flags.error_exitcode;\nOptions.TimeoutExitCode = Flags.timeout_exitcode;\n-@@ -753,7 +754,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\n+@@ -657,6 +658,8 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\n+ Options.IgnoreCrashes = Flags.ignore_crashes;\n+ Options.MaxTotalTimeSec = Flags.max_total_time;\n+ Options.DoCrossOver = Flags.cross_over;\n++ Options.CrossOverUniformDist = Flags.cross_over_uniformdist;\n++ Options.TraceSeedInput = Flags.trace_seed_input;\n+ Options.MutateDepth = Flags.mutate_depth;\n+ Options.ReduceDepth = Flags.reduce_depth;\n+ Options.UseCounters = Flags.use_counters;\n+@@ -753,7 +756,8 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\nRandom Rand(Seed);\nauto *MD = new MutationDispatcher(Rand, Options);\n- auto *Corpus = new InputCorpus(Options.OutputCorpus, Entropic);\n-+ auto *Corpus = new InputCorpus(Options.OutputCorpus, Entropic, Options.KeepSeed);\n++ auto *Corpus =\n++ new InputCorpus(Options.OutputCorpus, Entropic, Options.KeepSeed);\nauto *F = new Fuzzer(Callback, *Corpus, *MD, Options);\nfor (auto &U: Dictionary)\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerFlags.def b/compiler-rt/lib/fuzzer/FuzzerFlags.def\n-index 832224a705d..0dac7e705a3 100644\n+index 832224a705d..10b1f5f539a 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerFlags.def\n+++ b/compiler-rt/lib/fuzzer/FuzzerFlags.def\n-@@ -23,6 +23,8 @@ FUZZER_FLAG_INT(len_control, 100, \"Try generating small inputs first, \"\n+@@ -23,7 +23,14 @@ FUZZER_FLAG_INT(len_control, 100, \"Try generating small inputs first, \"\nFUZZER_FLAG_STRING(seed_inputs, \"A comma-separated list of input files \"\n\"to use as an additional seed corpus. Alternatively, an \\\"@\\\" followed by \"\n\"the name of a file containing the comma-separated list.\")\n-+FUZZER_FLAG_INT(keep_seed, 0, \"If 1, keep seed inputs for mutation even if \"\n-+ \"they do not produce new coverage.\")\n++FUZZER_FLAG_INT(keep_seed, 0, \"If 1, keep all seed inputs in the corpus even if \"\n++ \"they do not produce new coverage. This also invalidates -reduce_inputs for \"\n++ \"seed input mutations.\")\nFUZZER_FLAG_INT(cross_over, 1, \"If 1, cross over inputs.\")\n++FUZZER_FLAG_INT(cross_over_uniformdist, 0, \"Experimental. If 1, use a uniform \"\n++ \"probability distribution when choosing inputs to cross over with.\")\n++FUZZER_FLAG_INT(trace_seed_input, 0, \"Internal. Print all seed inputs picked \"\n++ \"up by the fuzzer.\")\nFUZZER_FLAG_INT(mutate_depth, 5,\n\"Apply this number of consecutive mutations to each input.\")\n+ FUZZER_FLAG_INT(reduce_depth, 0, \"Experimental/internal. \"\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerFork.cpp b/compiler-rt/lib/fuzzer/FuzzerFork.cpp\n-index d9e6b79443e..38fb82fc12d 100644\n+index d9e6b79443e..97e91cbe869 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerFork.cpp\n+++ b/compiler-rt/lib/fuzzer/FuzzerFork.cpp\n-@@ -309,11 +309,17 @@ void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,\n+@@ -309,11 +309,20 @@ void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,\nelse\nEnv.MainCorpusDir = CorpusDirs[0];\n@@ -115,13 +126,16 @@ index d9e6b79443e..38fb82fc12d 100644\n+ if (Options.KeepSeed) {\n+ for (auto &File : SeedFiles)\n+ Env.Files.push_back(File.File);\n-+ }\n-+ else {\n++ } else {\n+ auto CFPath = DirPlusFile(Env.TempDir, \"merge.txt\");\n+ CrashResistantMerge(Env.Args, {}, SeedFiles, &Env.Files, {}, &Env.Features,\n-+ {}, &Env.Cov,\n-+ CFPath, false);\n++ {}, &Env.Cov, CFPath, false);\n+ RemoveFile(CFPath);\n++ }\n++ if (Options.TraceSeedInput) {\n++ for (auto &File : Env.Files) {\n++ Printf(\"INFO: seed - %s\\n\", File.c_str());\n++ }\n+ }\nPrintf(\"INFO: -fork=%d: %zd seed inputs, starting to fuzz in %s\\n\", NumJobs,\nEnv.Files.size(), Env.TempDir.c_str());\n@@ -140,23 +154,53 @@ index 31096ce804b..e75807209f5 100644\nsize_t NumberOfLeakDetectionAttempts = 0;\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n-index 02db6d27b0a..a9af25a3070 100644\n+index 02db6d27b0a..28d5f32c0d6 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n+++ b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n-@@ -487,10 +487,11 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,\n+@@ -478,7 +478,7 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,\n+ UniqFeatureSetTmp.push_back(Feature);\n+ if (Options.Entropic)\n+ Corpus.UpdateFeatureFrequency(II, Feature);\n+- if (Options.ReduceInputs && II)\n++ if (Options.ReduceInputs && II && !(Options.KeepSeed && II->SeedInput))\n+ if (std::binary_search(II->UniqFeatureSet.begin(),\n+ II->UniqFeatureSet.end(), Feature))\n+ FoundUniqFeaturesOfII++;\n+@@ -487,11 +487,12 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,\n*FoundUniqFeatures = FoundUniqFeaturesOfII;\nPrintPulseAndReportSlowInput(Data, Size);\nsize_t NumNewFeatures = Corpus.NumFeatureUpdates() - NumUpdatesBefore;\n- if (NumNewFeatures) {\n+ if (NumNewFeatures || (Options.KeepSeed && IsExecutingSeedCorpora)) {\nTPC.UpdateObservedPCs();\n- auto NewII = Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures,\n- MayDeleteFile, TPC.ObservedFocusFunction(),\n-+ IsExecutingSeedCorpora,\n- UniqFeatureSetTmp, DFT, II);\n+- auto NewII = Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures,\n+- MayDeleteFile, TPC.ObservedFocusFunction(),\n+- UniqFeatureSetTmp, DFT, II);\n++ auto NewII =\n++ Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures, MayDeleteFile,\n++ TPC.ObservedFocusFunction(),\n++ IsExecutingSeedCorpora, UniqFeatureSetTmp, DFT, II);\nWriteFeatureSetToFile(Options.FeaturesDir, Sha1ToString(NewII->Sha1),\nNewII->UniqFeatureSet);\n-@@ -764,6 +765,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\n+ return true;\n+@@ -664,8 +665,14 @@ void Fuzzer::MutateAndTestOne() {\n+ MD.StartMutationSequence();\n+\n+ auto &II = Corpus.ChooseUnitToMutate(MD.GetRand());\n+- if (Options.DoCrossOver)\n+- MD.SetCrossOverWith(&Corpus.ChooseUnitToMutate(MD.GetRand()).U);\n++ if (Options.DoCrossOver) {\n++ if (Options.CrossOverUniformDist) {\n++ MD.SetCrossOverWith(&Corpus.ChooseUnitToCrossOverWith(MD.GetRand()).U);\n++ }\n++ else {\n++ MD.SetCrossOverWith(&Corpus.ChooseUnitToMutate(MD.GetRand()).U);\n++ }\n++ }\n+ const auto &U = II.U;\n+ memcpy(BaseSha1, II.Sha1, sizeof(BaseSha1));\n+ assert(CurrentUnitData);\n+@@ -764,6 +771,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\nassert(CorporaFiles.front().Size <= CorporaFiles.back().Size);\n}\n@@ -165,7 +209,7 @@ index 02db6d27b0a..a9af25a3070 100644\n// Load and execute inputs one by one.\nfor (auto &SF : CorporaFiles) {\nauto U = FileToVector(SF.File, MaxInputLen, /*ExitOnError=*/false);\n-@@ -773,6 +776,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\n+@@ -773,6 +782,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\nTryDetectingAMemoryLeak(U.data(), U.size(),\n/*DuringInitialCorpusExecution*/ true);\n}\n@@ -174,7 +218,7 @@ index 02db6d27b0a..a9af25a3070 100644\n}\nPrintStats(\"INITED\");\n-@@ -785,6 +790,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\n+@@ -785,6 +796,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\nCorpus.NumInputsThatTouchFocusFunction());\n}\n@@ -184,7 +228,7 @@ index 02db6d27b0a..a9af25a3070 100644\nPrintf(\"ERROR: no interesting inputs were found. \"\n\"Is the code instrumented for coverage? Exiting.\\n\");\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerOptions.h b/compiler-rt/lib/fuzzer/FuzzerOptions.h\n-index 9d975bd61fe..ccd0b3dcb56 100644\n+index 9d975bd61fe..bb2c14be47b 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerOptions.h\n+++ b/compiler-rt/lib/fuzzer/FuzzerOptions.h\n@@ -18,6 +18,7 @@ struct FuzzingOptions {\n@@ -195,6 +239,15 @@ index 9d975bd61fe..ccd0b3dcb56 100644\nint UnitTimeoutSec = 300;\nint TimeoutExitCode = 70;\nint OOMExitCode = 71;\n+@@ -30,6 +31,8 @@ struct FuzzingOptions {\n+ int RssLimitMb = 0;\n+ int MallocLimitMb = 0;\n+ bool DoCrossOver = true;\n++ bool CrossOverUniformDist = false;\n++ bool TraceSeedInput = false;\n+ int MutateDepth = 5;\n+ bool ReduceDepth = false;\n+ bool UseCounters = false;\ndiff --git a/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp b/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp\nindex 0e9435ab8fc..dfc642ab6d0 100644\n--- a/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/libfuzzer_keepseed/fuzzer.py",
"new_path": "fuzzers/libfuzzer_keepseed/fuzzer.py",
"diff": "@@ -23,7 +23,8 @@ def build():\ndef fuzz(input_corpus, output_corpus, target_binary):\n\"\"\"Run fuzzer.\"\"\"\n- libfuzzer_fuzzer.run_fuzzer(input_corpus,\n+ libfuzzer_fuzzer.run_fuzzer(\n+ input_corpus,\noutput_corpus,\ntarget_binary,\n- extra_flags=['-keep_seed=1'])\n+ extra_flags=['-keep_seed=1', '-cross_over_uniformdist=1'])\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/libfuzzer_keepseed/patch.diff",
"new_path": "fuzzers/libfuzzer_keepseed/patch.diff",
"diff": "-commit aac9771fa16e3fc00725d4bbd662d71186a09532\n+commit 72e16fc7160185305eee536c257a478ae84f7082\nAuthor: Dokyung Song <[email protected]>\nDate: Fri Jul 31 00:07:20 2020 +0000\n[libFuzzer] Optionally keep initial seed inputs regardless of whether they discover new features or not.\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerCorpus.h b/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n-index 54d1e09ec6d..80398a9d7ce 100644\n+index 54d1e09ec6d..5c687013c59 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n+++ b/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n@@ -33,6 +33,7 @@ struct InputInfo {\n@@ -16,7 +16,7 @@ index 54d1e09ec6d..80398a9d7ce 100644\nbool MayDeleteFile = false;\nbool Reduced = false;\nbool HasFocusFunction = false;\n-@@ -131,9 +132,11 @@ class InputCorpus {\n+@@ -131,9 +132,12 @@ class InputCorpus {\nEntropicOptions Entropic;\n@@ -25,12 +25,13 @@ index 54d1e09ec6d..80398a9d7ce 100644\npublic:\n- InputCorpus(const std::string &OutputCorpus, EntropicOptions Entropic)\n- : Entropic(Entropic), OutputCorpus(OutputCorpus) {\n-+ InputCorpus(const std::string &OutputCorpus, EntropicOptions Entropic, bool KeepSeed)\n++ InputCorpus(const std::string &OutputCorpus, EntropicOptions Entropic,\n++ bool KeepSeed)\n+ : Entropic(Entropic), OutputCorpus(OutputCorpus), KeepSeed(KeepSeed) {\nmemset(InputSizesPerFeature, 0, sizeof(InputSizesPerFeature));\nmemset(SmallestElementPerFeature, 0, sizeof(SmallestElementPerFeature));\n}\n-@@ -177,7 +180,7 @@ public:\n+@@ -177,7 +181,7 @@ public:\nbool empty() const { return Inputs.empty(); }\nconst Unit &operator[] (size_t Idx) const { return Inputs[Idx]->U; }\nInputInfo *AddToCorpus(const Unit &U, size_t NumFeatures, bool MayDeleteFile,\n@@ -39,7 +40,7 @@ index 54d1e09ec6d..80398a9d7ce 100644\nconst Vector<uint32_t> &FeatureSet,\nconst DataFlowTrace &DFT, const InputInfo *BaseII) {\nassert(!U.empty());\n-@@ -187,6 +190,7 @@ public:\n+@@ -187,6 +191,7 @@ public:\nInputInfo &II = *Inputs.back();\nII.U = U;\nII.NumFeatures = NumFeatures;\n@@ -47,26 +48,20 @@ index 54d1e09ec6d..80398a9d7ce 100644\nII.MayDeleteFile = MayDeleteFile;\nII.UniqFeatureSet = FeatureSet;\nII.HasFocusFunction = HasFocusFunction;\n-@@ -471,7 +475,7 @@ private:\n-\n- for (size_t i = 0; i < N; i++) {\n-\n-- if (Inputs[i]->NumFeatures == 0) {\n-+ if (Inputs[i]->NumFeatures == 0 && !(Inputs[i]->SeedInput && KeepSeed)) {\n- // If the seed doesn't represent any features, assign zero energy.\n- Weights[i] = 0.;\n- } else if (Inputs[i]->NumExecutedMutations / kMaxMutationFactor >\n-@@ -491,7 +495,7 @@ private:\n-\n- if (VanillaSchedule) {\n- for (size_t i = 0; i < N; i++)\n-- Weights[i] = Inputs[i]->NumFeatures\n-+ Weights[i] = (Inputs[i]->NumFeatures || (KeepSeed && Inputs[i]->SeedInput))\n- ? (i + 1) * (Inputs[i]->HasFocusFunction ? 1000 : 1)\n- : 0.;\n+@@ -276,6 +281,11 @@ public:\n+ return Idx;\n}\n+\n++ InputInfo &ChooseUnitToCrossOverWith(Random &Rand) {\n++ InputInfo &II = *Inputs[Rand(Inputs.size())];\n++ return II;\n++ }\n++\n+ void PrintStats() {\n+ for (size_t i = 0; i < Inputs.size(); i++) {\n+ const auto &II = *Inputs[i];\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n-index 00a33a413d2..ef7991c1e27 100644\n+index 8339697396c..0933af56804 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n+++ b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n@@ -649,6 +649,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\n@@ -77,33 +72,49 @@ index 00a33a413d2..ef7991c1e27 100644\nOptions.UnitTimeoutSec = Flags.timeout;\nOptions.ErrorExitCode = Flags.error_exitcode;\nOptions.TimeoutExitCode = Flags.timeout_exitcode;\n-@@ -753,7 +754,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\n+@@ -657,6 +658,8 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\n+ Options.IgnoreCrashes = Flags.ignore_crashes;\n+ Options.MaxTotalTimeSec = Flags.max_total_time;\n+ Options.DoCrossOver = Flags.cross_over;\n++ Options.CrossOverUniformDist = Flags.cross_over_uniformdist;\n++ Options.TraceSeedInput = Flags.trace_seed_input;\n+ Options.MutateDepth = Flags.mutate_depth;\n+ Options.ReduceDepth = Flags.reduce_depth;\n+ Options.UseCounters = Flags.use_counters;\n+@@ -753,7 +756,8 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\nRandom Rand(Seed);\nauto *MD = new MutationDispatcher(Rand, Options);\n- auto *Corpus = new InputCorpus(Options.OutputCorpus, Entropic);\n-+ auto *Corpus = new InputCorpus(Options.OutputCorpus, Entropic, Options.KeepSeed);\n++ auto *Corpus =\n++ new InputCorpus(Options.OutputCorpus, Entropic, Options.KeepSeed);\nauto *F = new Fuzzer(Callback, *Corpus, *MD, Options);\nfor (auto &U: Dictionary)\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerFlags.def b/compiler-rt/lib/fuzzer/FuzzerFlags.def\n-index 832224a705d..0dac7e705a3 100644\n+index 832224a705d..10b1f5f539a 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerFlags.def\n+++ b/compiler-rt/lib/fuzzer/FuzzerFlags.def\n-@@ -23,6 +23,8 @@ FUZZER_FLAG_INT(len_control, 100, \"Try generating small inputs first, \"\n+@@ -23,7 +23,14 @@ FUZZER_FLAG_INT(len_control, 100, \"Try generating small inputs first, \"\nFUZZER_FLAG_STRING(seed_inputs, \"A comma-separated list of input files \"\n\"to use as an additional seed corpus. Alternatively, an \\\"@\\\" followed by \"\n\"the name of a file containing the comma-separated list.\")\n-+FUZZER_FLAG_INT(keep_seed, 0, \"If 1, keep seed inputs for mutation even if \"\n-+ \"they do not produce new coverage.\")\n++FUZZER_FLAG_INT(keep_seed, 0, \"If 1, keep all seed inputs in the corpus even if \"\n++ \"they do not produce new coverage. This also invalidates -reduce_inputs for \"\n++ \"seed input mutations.\")\nFUZZER_FLAG_INT(cross_over, 1, \"If 1, cross over inputs.\")\n++FUZZER_FLAG_INT(cross_over_uniformdist, 0, \"Experimental. If 1, use a uniform \"\n++ \"probability distribution when choosing inputs to cross over with.\")\n++FUZZER_FLAG_INT(trace_seed_input, 0, \"Internal. Print all seed inputs picked \"\n++ \"up by the fuzzer.\")\nFUZZER_FLAG_INT(mutate_depth, 5,\n\"Apply this number of consecutive mutations to each input.\")\n+ FUZZER_FLAG_INT(reduce_depth, 0, \"Experimental/internal. \"\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerFork.cpp b/compiler-rt/lib/fuzzer/FuzzerFork.cpp\n-index d9e6b79443e..38fb82fc12d 100644\n+index d9e6b79443e..97e91cbe869 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerFork.cpp\n+++ b/compiler-rt/lib/fuzzer/FuzzerFork.cpp\n-@@ -309,11 +309,17 @@ void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,\n+@@ -309,11 +309,20 @@ void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,\nelse\nEnv.MainCorpusDir = CorpusDirs[0];\n@@ -115,13 +126,16 @@ index d9e6b79443e..38fb82fc12d 100644\n+ if (Options.KeepSeed) {\n+ for (auto &File : SeedFiles)\n+ Env.Files.push_back(File.File);\n-+ }\n-+ else {\n++ } else {\n+ auto CFPath = DirPlusFile(Env.TempDir, \"merge.txt\");\n+ CrashResistantMerge(Env.Args, {}, SeedFiles, &Env.Files, {}, &Env.Features,\n-+ {}, &Env.Cov,\n-+ CFPath, false);\n++ {}, &Env.Cov, CFPath, false);\n+ RemoveFile(CFPath);\n++ }\n++ if (Options.TraceSeedInput) {\n++ for (auto &File : Env.Files) {\n++ Printf(\"INFO: seed - %s\\n\", File.c_str());\n++ }\n+ }\nPrintf(\"INFO: -fork=%d: %zd seed inputs, starting to fuzz in %s\\n\", NumJobs,\nEnv.Files.size(), Env.TempDir.c_str());\n@@ -140,23 +154,53 @@ index 31096ce804b..e75807209f5 100644\nsize_t NumberOfLeakDetectionAttempts = 0;\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n-index 02db6d27b0a..a9af25a3070 100644\n+index 02db6d27b0a..28d5f32c0d6 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n+++ b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n-@@ -487,10 +487,11 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,\n+@@ -478,7 +478,7 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,\n+ UniqFeatureSetTmp.push_back(Feature);\n+ if (Options.Entropic)\n+ Corpus.UpdateFeatureFrequency(II, Feature);\n+- if (Options.ReduceInputs && II)\n++ if (Options.ReduceInputs && II && !(Options.KeepSeed && II->SeedInput))\n+ if (std::binary_search(II->UniqFeatureSet.begin(),\n+ II->UniqFeatureSet.end(), Feature))\n+ FoundUniqFeaturesOfII++;\n+@@ -487,11 +487,12 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,\n*FoundUniqFeatures = FoundUniqFeaturesOfII;\nPrintPulseAndReportSlowInput(Data, Size);\nsize_t NumNewFeatures = Corpus.NumFeatureUpdates() - NumUpdatesBefore;\n- if (NumNewFeatures) {\n+ if (NumNewFeatures || (Options.KeepSeed && IsExecutingSeedCorpora)) {\nTPC.UpdateObservedPCs();\n- auto NewII = Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures,\n- MayDeleteFile, TPC.ObservedFocusFunction(),\n-+ IsExecutingSeedCorpora,\n- UniqFeatureSetTmp, DFT, II);\n+- auto NewII = Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures,\n+- MayDeleteFile, TPC.ObservedFocusFunction(),\n+- UniqFeatureSetTmp, DFT, II);\n++ auto NewII =\n++ Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures, MayDeleteFile,\n++ TPC.ObservedFocusFunction(),\n++ IsExecutingSeedCorpora, UniqFeatureSetTmp, DFT, II);\nWriteFeatureSetToFile(Options.FeaturesDir, Sha1ToString(NewII->Sha1),\nNewII->UniqFeatureSet);\n-@@ -764,6 +765,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\n+ return true;\n+@@ -664,8 +665,14 @@ void Fuzzer::MutateAndTestOne() {\n+ MD.StartMutationSequence();\n+\n+ auto &II = Corpus.ChooseUnitToMutate(MD.GetRand());\n+- if (Options.DoCrossOver)\n+- MD.SetCrossOverWith(&Corpus.ChooseUnitToMutate(MD.GetRand()).U);\n++ if (Options.DoCrossOver) {\n++ if (Options.CrossOverUniformDist) {\n++ MD.SetCrossOverWith(&Corpus.ChooseUnitToCrossOverWith(MD.GetRand()).U);\n++ }\n++ else {\n++ MD.SetCrossOverWith(&Corpus.ChooseUnitToMutate(MD.GetRand()).U);\n++ }\n++ }\n+ const auto &U = II.U;\n+ memcpy(BaseSha1, II.Sha1, sizeof(BaseSha1));\n+ assert(CurrentUnitData);\n+@@ -764,6 +771,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\nassert(CorporaFiles.front().Size <= CorporaFiles.back().Size);\n}\n@@ -165,7 +209,7 @@ index 02db6d27b0a..a9af25a3070 100644\n// Load and execute inputs one by one.\nfor (auto &SF : CorporaFiles) {\nauto U = FileToVector(SF.File, MaxInputLen, /*ExitOnError=*/false);\n-@@ -773,6 +776,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\n+@@ -773,6 +782,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\nTryDetectingAMemoryLeak(U.data(), U.size(),\n/*DuringInitialCorpusExecution*/ true);\n}\n@@ -174,7 +218,7 @@ index 02db6d27b0a..a9af25a3070 100644\n}\nPrintStats(\"INITED\");\n-@@ -785,6 +790,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\n+@@ -785,6 +796,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\nCorpus.NumInputsThatTouchFocusFunction());\n}\n@@ -184,7 +228,7 @@ index 02db6d27b0a..a9af25a3070 100644\nPrintf(\"ERROR: no interesting inputs were found. \"\n\"Is the code instrumented for coverage? Exiting.\\n\");\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerOptions.h b/compiler-rt/lib/fuzzer/FuzzerOptions.h\n-index 9d975bd61fe..ccd0b3dcb56 100644\n+index 9d975bd61fe..bb2c14be47b 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerOptions.h\n+++ b/compiler-rt/lib/fuzzer/FuzzerOptions.h\n@@ -18,6 +18,7 @@ struct FuzzingOptions {\n@@ -195,6 +239,15 @@ index 9d975bd61fe..ccd0b3dcb56 100644\nint UnitTimeoutSec = 300;\nint TimeoutExitCode = 70;\nint OOMExitCode = 71;\n+@@ -30,6 +31,8 @@ struct FuzzingOptions {\n+ int RssLimitMb = 0;\n+ int MallocLimitMb = 0;\n+ bool DoCrossOver = true;\n++ bool CrossOverUniformDist = false;\n++ bool TraceSeedInput = false;\n+ int MutateDepth = 5;\n+ bool ReduceDepth = false;\n+ bool UseCounters = false;\ndiff --git a/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp b/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp\nindex 0e9435ab8fc..dfc642ab6d0 100644\n--- a/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp\n"
},
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "# are still testing this feature. You should request an experiment by contacting\n# us as you normally do.\n+- experiment: 2020-08-15\n+ fuzzers:\n+ - libfuzzer_fixcrossover\n+ - entropic_fixcrossover\n+ - libfuzzer_keepseed\n+ - entropic_keepseed\n+\n- experiment: 2020-08-14\nfuzzers:\n- aflplusplus\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Modify the patch for the libfuzzer/entropic's keepseed variants such that seed inputs are not replaced even if -reduce_inputs=1 (#678) |
258,371 | 15.08.2020 13:38:32 | 14,400 | d27bae7382e3863d9798b8342ad87bcfd4a119d7 | Differential graphs on report | [
{
"change_type": "MODIFY",
"old_path": "analysis/benchmark_results.py",
"new_path": "analysis/benchmark_results.py",
"diff": "@@ -17,10 +17,12 @@ import os\nimport functools\nfrom analysis import data_utils\n+from analysis import coverage_data_utils\nfrom analysis import stat_tests\n-class BenchmarkResults: # pylint: disable=too-many-public-methods\n+# pylint: disable=too-many-public-methods, too-many-arguments\n+class BenchmarkResults:\n\"\"\"Represents results of various analysis done on benchmark data.\nNOTE: Do not create this class manually! Instead, use the |benchmarks|\n@@ -31,11 +33,12 @@ class BenchmarkResults: # pylint: disable=too-many-public-methods\ntemplate, properties are computed on demand and only once.\n\"\"\"\n- def __init__(self, benchmark_name, experiment_df, output_directory,\n- plotter):\n+ def __init__(self, benchmark_name, experiment_df, coverage_dict,\n+ output_directory, plotter):\nself.name = benchmark_name\nself._experiment_df = experiment_df\n+ self._coverage_dict = coverage_dict\nself._output_directory = output_directory\nself._plotter = plotter\n@@ -45,6 +48,19 @@ class BenchmarkResults: # pylint: disable=too-many-public-methods\ndef _get_full_path(self, filename):\nreturn os.path.join(self._output_directory, filename)\n+ def _get_experiment_filestore_path(self, fuzzer_name):\n+ return coverage_data_utils.get_fuzzer_filestore_path(\n+ self._benchmark_df, fuzzer_name)\n+\n+ def get_filestore_name(self, fuzzer_name):\n+ \"\"\"Returns the filestore name of the |fuzzer_name|.\"\"\"\n+ filestore_path = self._get_experiment_filestore_path(fuzzer_name)\n+ gcs_prefix = 'gs://'\n+ gcs_http_prefix = 'https://storage.googleapis.com/'\n+ if filestore_path.startswith(gcs_prefix):\n+ filestore_path = filestore_path.replace(gcs_prefix, gcs_http_prefix)\n+ return filestore_path\n+\n@property\[email protected]_cache()\n# TODO(lszekeres): With python3.8+, replace above two decorators with:\n@@ -53,6 +69,12 @@ class BenchmarkResults: # pylint: disable=too-many-public-methods\nexp_df = self._experiment_df\nreturn exp_df[exp_df.benchmark == self.name]\n+ @property\n+ @functools.lru_cache()\n+ def fuzzer_names(self):\n+ \"\"\"Names of all fuzzers.\"\"\"\n+ return self._benchmark_df.fuzzer.unique()\n+\n@property\[email protected]_cache()\ndef _benchmark_snapshot_df(self):\n@@ -60,9 +82,31 @@ class BenchmarkResults: # pylint: disable=too-many-public-methods\n@property\[email protected]_cache()\n- def fuzzers(self):\n- \"\"\"Fuzzers with valid trials on this benchmark.\"\"\"\n- return self._benchmark_df.fuzzer.unique()\n+ def _benchmark_coverage_dict(self):\n+ \"\"\"Covered regions of each fuzzer on this benchmark.\"\"\"\n+ return coverage_data_utils.get_benchmark_cov_dict(\n+ self._coverage_dict, self.name)\n+\n+ @property\n+ @functools.lru_cache()\n+ def _benchmark_aggregated_coverage_df(self):\n+ \"\"\"Aggregated covered regions of each fuzzer on this benchmark.\"\"\"\n+ return coverage_data_utils.get_benchmark_aggregated_cov_df(\n+ self._benchmark_coverage_dict)\n+\n+ @property\n+ @functools.lru_cache()\n+ def _unique_region_dict(self):\n+ \"\"\"Unique regions with the fuzzers that cover it.\"\"\"\n+ return coverage_data_utils.get_unique_region_dict(\n+ self._benchmark_coverage_dict)\n+\n+ @property\n+ @functools.lru_cache()\n+ def _unique_region_cov_df(self):\n+ \"\"\"Fuzzers with the number of covered unique regions.\"\"\"\n+ return coverage_data_utils.get_unique_region_cov_df(\n+ self._unique_region_dict, self.fuzzer_names)\n@property\ndef fuzzers_with_not_enough_samples(self):\n@@ -236,3 +280,30 @@ class BenchmarkResults: # pylint: disable=too-many-public-methods\nself._plotter.write_better_than_plot(better_than_table,\nself._get_full_path(plot_filename))\nreturn plot_filename\n+\n+ @property\n+ def unique_coverage_ranking_plot(self):\n+ \"\"\"Ranking plot for unique coverage.\"\"\"\n+ plot_filename = self._prefix_with_benchmark('ranking_unique_region.svg')\n+ unique_region_cov_df_combined = self._unique_region_cov_df.merge(\n+ self._benchmark_aggregated_coverage_df, on='fuzzer')\n+ self._plotter.write_unique_coverage_ranking_plot(\n+ unique_region_cov_df_combined, self._get_full_path(plot_filename))\n+ return plot_filename\n+\n+ @property\n+ @functools.lru_cache()\n+ def pairwise_unique_coverage_table(self):\n+ \"\"\"Pairwise unique coverage table for each pair of fuzzers.\"\"\"\n+ return coverage_data_utils.get_pairwise_unique_coverage_table(\n+ self._benchmark_coverage_dict)\n+\n+ @property\n+ def pairwise_unique_coverage_plot(self):\n+ \"\"\"Pairwise unique coverage plot for each pair of fuzzers.\"\"\"\n+ plot_filename = self._prefix_with_benchmark(\n+ 'pairwise_unique_coverage_plot.svg')\n+ self._plotter.write_pairwise_unique_coverage_heatmap_plot(\n+ self.pairwise_unique_coverage_table,\n+ self._get_full_path(plot_filename))\n+ return plot_filename\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "analysis/coverage_data_utils.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Utility functions for coverage data calculation.\"\"\"\n+\n+import collections\n+import json\n+import os\n+import posixpath\n+import tempfile\n+import pandas as pd\n+\n+from common import filestore_utils\n+\n+\n+def get_fuzzer_benchmark_key(fuzzer: str, benchmark: str):\n+ \"\"\"Returns the key in coverage dict for a pair of fuzzer-benchmark.\"\"\"\n+ return fuzzer + ' ' + benchmark\n+\n+\n+def get_fuzzer_filestore_path(benchmark_df, fuzzer):\n+ \"\"\"Gets the filestore_path for |fuzzer| in |benchmark_df|.\"\"\"\n+ fuzzer_df = benchmark_df[benchmark_df.fuzzer == fuzzer]\n+ filestore_path = fuzzer_df.experiment_filestore.unique()[0]\n+ exp_name = fuzzer_df.experiment.unique()[0]\n+ return posixpath.join(filestore_path, exp_name)\n+\n+\n+def get_covered_regions_dict(experiment_df):\n+ \"\"\"Combines json files for different fuzzer-benchmark pair\n+ in |experiment_df| and returns a dictionary of the covered regions.\"\"\"\n+ covered_regions_dict = {}\n+ benchmarks = experiment_df.benchmark.unique()\n+ for benchmark in benchmarks:\n+ benchmark_df = experiment_df[experiment_df.benchmark == benchmark]\n+ fuzzers = benchmark_df.fuzzer.unique()\n+ for fuzzer in fuzzers:\n+ fuzzer_covered_regions = get_fuzzer_covered_regions(\n+ benchmark_df, benchmark, fuzzer)\n+ key = get_fuzzer_benchmark_key(fuzzer, benchmark)\n+ covered_regions_dict[key] = fuzzer_covered_regions\n+ return covered_regions_dict\n+\n+\n+def get_fuzzer_covered_regions(benchmark_df, benchmark, fuzzer):\n+ \"\"\"Gets the covered regions for |fuzzer| in |benchmark_df| from the json\n+ file in the bucket.\"\"\"\n+ with tempfile.TemporaryDirectory() as temp_dir:\n+ dst_file = os.path.join(temp_dir, 'tmp.json')\n+ src_filestore_path = get_fuzzer_filestore_path(benchmark_df, fuzzer)\n+ src_file = posixpath.join(src_filestore_path, 'coverage', 'data',\n+ benchmark, fuzzer, 'covered_regions.json')\n+ filestore_utils.cp(src_file, dst_file)\n+ with open(dst_file) as json_file:\n+ return json.load(json_file)\n+\n+\n+def get_unique_region_dict(benchmark_coverage_dict):\n+ \"\"\"Returns a dictionary containing the covering fuzzers for each\n+ unique region, where the |threshold| defines which regions are unique.\"\"\"\n+ region_dict = collections.defaultdict(list)\n+ unique_region_dict = {}\n+ threshold_count = 1\n+ for fuzzer in benchmark_coverage_dict:\n+ for region in benchmark_coverage_dict[fuzzer]:\n+ region_dict[region].append(fuzzer)\n+ for region, fuzzers in region_dict.items():\n+ if len(fuzzers) <= threshold_count:\n+ unique_region_dict[region] = fuzzers\n+ return unique_region_dict\n+\n+\n+def get_unique_region_cov_df(unique_region_dict, fuzzer_names):\n+ \"\"\"Returns a DataFrame where the two columns are fuzzers and the number\n+ of unique regions covered.\"\"\"\n+ fuzzers = collections.defaultdict(int)\n+ for region in unique_region_dict:\n+ for fuzzer in unique_region_dict[region]:\n+ fuzzers[fuzzer] += 1\n+ dict_to_transform = {'fuzzer': [], 'unique_regions_covered': []}\n+ for fuzzer in fuzzer_names:\n+ covered_num = fuzzers[fuzzer]\n+ dict_to_transform['fuzzer'].append(fuzzer)\n+ dict_to_transform['unique_regions_covered'].append(covered_num)\n+ return pd.DataFrame(dict_to_transform)\n+\n+\n+def get_benchmark_cov_dict(coverage_dict, benchmark):\n+ \"\"\"Returns a dictionary to store the covered regions of each fuzzer.\n+ Uses a set of tuples to store the covered regions.\"\"\"\n+ benchmark_cov_dict = {}\n+ for key_pair, covered_regions in coverage_dict.items():\n+ current_fuzzer, current_benchmark = key_pair.split()\n+ if current_benchmark == benchmark:\n+ covered_regions_in_set = set()\n+ for region in covered_regions:\n+ covered_regions_in_set.add(tuple(region))\n+ benchmark_cov_dict[current_fuzzer] = covered_regions_in_set\n+ return benchmark_cov_dict\n+\n+\n+def get_benchmark_aggregated_cov_df(benchmark_coverage_dict):\n+ \"\"\"Returns a dataframe where each row represents a fuzzer and its\n+ aggregated coverage number.\"\"\"\n+ dict_to_transform = {'fuzzer': [], 'aggregated_edges_covered': []}\n+ for fuzzer in benchmark_coverage_dict:\n+ aggregated_edges_covered = len(benchmark_coverage_dict[fuzzer])\n+ dict_to_transform['fuzzer'].append(fuzzer)\n+ dict_to_transform['aggregated_edges_covered'].append(\n+ aggregated_edges_covered)\n+ return pd.DataFrame(dict_to_transform)\n+\n+\n+def get_pairwise_unique_coverage_table(benchmark_coverage_dict):\n+ \"\"\"Returns a table that shows the unique coverage between\n+ each pair of fuzzers.\n+\n+ The pairwise unique coverage table is a square matrix where each\n+ row and column represents a fuzzer, and each cell contains a number\n+ showing the regions covered by the fuzzer of the column but not by\n+ the fuzzer of the row.\"\"\"\n+\n+ fuzzers = benchmark_coverage_dict.keys()\n+\n+ pairwise_unique_coverage_values = []\n+ for fuzzer_in_row in fuzzers:\n+ row = []\n+ for fuzzer_in_col in fuzzers:\n+ pairwise_unique_coverage_value = get_unique_covered_percentage(\n+ benchmark_coverage_dict[fuzzer_in_row],\n+ benchmark_coverage_dict[fuzzer_in_col])\n+ row.append(pairwise_unique_coverage_value)\n+ pairwise_unique_coverage_values.append(row)\n+\n+ return pd.DataFrame(pairwise_unique_coverage_values,\n+ index=fuzzers,\n+ columns=fuzzers)\n+\n+\n+def get_unique_covered_percentage(fuzzer_row_covered_regions,\n+ fuzzer_col_covered_regions):\n+ \"\"\"Returns the number of regions covered by the fuzzer of the column\n+ but not by the fuzzer of the row.\"\"\"\n+\n+ unique_region_count = 0\n+ for region in fuzzer_col_covered_regions:\n+ if region not in fuzzer_row_covered_regions:\n+ unique_region_count += 1\n+ return unique_region_count\n"
},
{
"change_type": "MODIFY",
"old_path": "analysis/data_utils.py",
"new_path": "analysis/data_utils.py",
"diff": "@@ -38,7 +38,8 @@ def validate_data(experiment_df):\ndef drop_uninteresting_columns(experiment_df):\n\"\"\"Returns table with only interesting columns.\"\"\"\nreturn experiment_df[[\n- 'benchmark', 'fuzzer', 'trial_id', 'time', 'edges_covered'\n+ 'benchmark', 'fuzzer', 'trial_id', 'time', 'edges_covered',\n+ 'experiment', 'experiment_filestore'\n]]\n"
},
{
"change_type": "MODIFY",
"old_path": "analysis/experiment_results.py",
"new_path": "analysis/experiment_results.py",
"diff": "@@ -21,7 +21,7 @@ from analysis import data_utils\nfrom analysis import stat_tests\n-class ExperimentResults:\n+class ExperimentResults: # pylint: disable=too-many-instance-attributes\n\"\"\"Provides the main interface for getting various analysis results and\nplots about an experiment, represented by |experiment_df|.\n@@ -31,8 +31,10 @@ class ExperimentResults:\ntemplate, only the properties needed for the given report will be computed.\n\"\"\"\n- def __init__(self,\n+ def __init__( # pylint: disable=too-many-arguments\n+ self,\nexperiment_df,\n+ coverage_dict,\noutput_directory,\nplotter,\nexperiment_name=None):\n@@ -63,6 +65,9 @@ class ExperimentResults:\nself._plotter = plotter\n+ # Dictionary to store the full coverage data.\n+ self._coverage_dict = coverage_dict\n+\ndef _get_full_path(self, filename):\nreturn os.path.join(self._output_directory, filename)\n@@ -87,6 +92,7 @@ class ExperimentResults:\nbenchmark_names = self._experiment_df.benchmark.unique()\nreturn [\nbenchmark_results.BenchmarkResults(name, self._experiment_df,\n+ self._coverage_dict,\nself._output_directory,\nself._plotter)\nfor name in sorted(benchmark_names)\n"
},
{
"change_type": "MODIFY",
"old_path": "analysis/generate_report.py",
"new_path": "analysis/generate_report.py",
"diff": "@@ -20,6 +20,7 @@ import sys\nimport pandas as pd\nfrom analysis import data_utils\n+from analysis import coverage_data_utils\nfrom analysis import experiment_results\nfrom analysis import plotting\nfrom analysis import queries\n@@ -76,11 +77,12 @@ def get_arg_parser():\n'--fuzzers',\nnargs='*',\nhelp='Names of the fuzzers to include in the report.')\n- parser.add_argument('-cov',\n+ parser.add_argument(\n+ '-cov',\n'--coverage-report',\naction='store_true',\n- default=False,\n- help='If set, clang coverage reports are linked.')\n+ default=True,\n+ help='If set, clang coverage reports and differential plots are shown.')\n# It doesn't make sense to clobber and label by experiment, since nothing\n# can get clobbered like this.\n@@ -159,6 +161,12 @@ def generate_report(experiment_names,\ndata_utils.validate_data(experiment_df)\n+ # Load the json summary file.\n+ coverage_dict = {}\n+ if coverage_report:\n+ coverage_dict = coverage_data_utils.get_covered_regions_dict(\n+ experiment_df)\n+\nif benchmarks is not None:\nexperiment_df = data_utils.filter_benchmarks(experiment_df, benchmarks)\n@@ -178,7 +186,11 @@ def generate_report(experiment_names,\nfuzzer_names = experiment_df.fuzzer.unique()\nplotter = plotting.Plotter(fuzzer_names, quick, log_scale)\nexperiment_ctx = experiment_results.ExperimentResults(\n- experiment_df, report_directory, plotter, experiment_name=report_name)\n+ experiment_df,\n+ coverage_dict,\n+ report_directory,\n+ plotter,\n+ experiment_name=report_name)\ntemplate = report_type + '.html'\ndetailed_report = rendering.render_report(experiment_ctx, template,\n@@ -195,8 +207,7 @@ def main():\nparser = get_arg_parser()\nargs = parser.parse_args()\n- generate_report(\n- experiment_names=args.experiments,\n+ generate_report(experiment_names=args.experiments,\nreport_directory=args.report_dir,\nreport_name=args.report_name,\nlabel_by_experiment=args.label_by_experiment,\n@@ -208,7 +219,6 @@ def main():\nfrom_cached_data=args.from_cached_data,\nend_time=args.end_time,\nmerge_with_clobber=args.merge_with_clobber,\n- merge_with_clobber_nonprivate=args.merge_with_clobber_nonprivate,\ncoverage_report=args.coverage_report)\n"
},
{
"change_type": "MODIFY",
"old_path": "analysis/plotting.py",
"new_path": "analysis/plotting.py",
"diff": "@@ -336,3 +336,73 @@ class Plotter:\nfig.savefig(image_path, bbox_inches=\"tight\")\nfinally:\nplt.close(fig)\n+\n+ def unique_coverage_ranking_plot(self,\n+ unique_region_cov_df_combined,\n+ axes=None):\n+ \"\"\"Draws unique_coverage_ranking plot. The fuzzer labels will be in\n+ the order of their coverage.\"\"\"\n+\n+ fuzzer_order = unique_region_cov_df_combined.sort_values(\n+ by='unique_regions_covered', ascending=False).fuzzer\n+\n+ axes = sns.barplot(y='unique_regions_covered',\n+ x='fuzzer',\n+ data=unique_region_cov_df_combined,\n+ order=fuzzer_order,\n+ palette=self._fuzzer_colors,\n+ ax=axes)\n+\n+ for patch in axes.patches:\n+ axes.annotate(\n+ format(patch.get_height(), '.2f'),\n+ (patch.get_x() + patch.get_width() / 2., patch.get_height()),\n+ ha='center',\n+ va='center',\n+ xytext=(0, 10),\n+ textcoords='offset points')\n+\n+ sns.barplot(y='aggregated_edges_covered',\n+ x='fuzzer',\n+ data=unique_region_cov_df_combined,\n+ order=fuzzer_order,\n+ facecolor=(1, 1, 1, 0),\n+ edgecolor='0.2',\n+ ax=axes)\n+\n+ axes.set(ylabel='Reached unique edge coverage')\n+ axes.set(xlabel='Fuzzer (highest coverage on the left)')\n+ axes.set_xticklabels(axes.get_xticklabels(),\n+ rotation=_DEFAULT_LABEL_ROTATION,\n+ horizontalalignment='right')\n+\n+ sns.despine(ax=axes, trim=True)\n+\n+ def write_unique_coverage_ranking_plot(self, unique_region_cov_df_combined,\n+ image_path):\n+ \"\"\"Writes ranking plot for unique coverage.\"\"\"\n+ self._write_plot_to_image(self.unique_coverage_ranking_plot,\n+ unique_region_cov_df_combined, image_path)\n+\n+ def pairwise_unique_coverage_heatmap_plot(self,\n+ pairwise_unique_coverage_table,\n+ axes=None):\n+ \"\"\"Draws the heatmap to visualize the unique coverage between\n+ each pair of fuzzers.\"\"\"\n+ heatmap_args = {\n+ 'annot': True,\n+ 'fmt': 'd',\n+ 'cmap': 'Blues',\n+ 'linewidths': 0.5\n+ }\n+ axes = sns.heatmap(pairwise_unique_coverage_table,\n+ ax=axes,\n+ **heatmap_args)\n+ axes.set(ylabel='Not covered by')\n+ axes.set(xlabel='Covered by')\n+\n+ def write_pairwise_unique_coverage_heatmap_plot(\n+ self, pairwise_unique_coverage_table, image_path):\n+ \"\"\"Writes pairwise unique coverage heatmap plot.\"\"\"\n+ self._write_plot_to_image(self.pairwise_unique_coverage_heatmap_plot,\n+ pairwise_unique_coverage_table, image_path)\n"
},
{
"change_type": "MODIFY",
"old_path": "analysis/queries.py",
"new_path": "analysis/queries.py",
"diff": "@@ -23,7 +23,7 @@ def get_experiment_data(experiment_names):\n\"\"\"Get measurements (such as coverage) on experiments from the database.\"\"\"\nsnapshots_query = db_utils.query(\n- Experiment.git_hash,\\\n+ Experiment.git_hash, Experiment.experiment_filestore,\\\nTrial.experiment, Trial.fuzzer, Trial.benchmark,\\\nTrial.time_started, Trial.time_ended,\\\nSnapshot.trial_id, Snapshot.time, Snapshot.edges_covered)\\\n"
},
{
"change_type": "MODIFY",
"old_path": "analysis/rendering.py",
"new_path": "analysis/rendering.py",
"diff": "@@ -38,16 +38,6 @@ def render_report(experiment_results, template, in_progress, coverage_report):\n)\ntemplate = environment.get_template(template)\n- # FIXME: Use |experiment_filestore_name| from experiment db.\n- # See #642: https://github.com/google/fuzzbench/issues/642\n- if 'EXPERIMENT_FILESTORE' in os.environ:\n- experiment_filestore = os.environ['EXPERIMENT_FILESTORE']\n- prefix = \"gs://\"\n- experiment_filestore_name = experiment_filestore[len(prefix):]\n- else:\n- experiment_filestore_name = 'fuzzbench-data'\n-\nreturn template.render(experiment=experiment_results,\nin_progress=in_progress,\n- coverage_report=coverage_report,\n- experiment_filestore_name=experiment_filestore_name)\n+ coverage_report=coverage_report)\n"
},
{
"change_type": "MODIFY",
"old_path": "analysis/report_templates/default.html",
"new_path": "analysis/report_templates/default.html",
"diff": "</ul>\n{% if coverage_report %}\n+ <ul class=\"collapsible\">\n+ <li>\n+ <div class=\"collapsible-header\">\n+ Unique coverage plots\n+ </div>\n+ <div class=\"collapsible-body\">\n+\n+ <div class=\"row\">\n+ <div class=\"col s6 offset-s3\">\n+ <h5 class=\"center-align\">Ranking by unique edge covered</h4>\n+ <img class=\"responsive-img materialboxed\"\n+ src=\"{{ benchmark.unique_coverage_ranking_plot }}\">\n+ Each bar shows the total number of edges found by a given fuzzer.\n+ The colored area shows the number of unique edges\n+ (i.e., edges that were not covered by any other fuzzers).\n+ </div>\n+ </div> <!-- row -->\n+\n+ <div class=\"row\">\n+ <div class=\"col s6 offset-s3\">\n+ <h5 class=\"center-align\">Pairwise unique coverage</h4>\n+ <img class=\"responsive-img materialboxed\"\n+ src=\"{{ benchmark.pairwise_unique_coverage_plot }}\">\n+ Each cell represents the number of edges covered by the fuzzer\n+ of the column but not by the fuzzer of the row\n+ </div>\n+ </div> <!-- row -->\n+\n+ </div>\n+ </li>\n+ </ul>\n+\n<ul class=\"collapsible\">\n<li>\n<div class=\"collapsible-header\">\n</div>\n<div class=\"collapsible-body\">\n<div class=\"row\">\n- {% for fuzzer in benchmark.fuzzers %}\n+ {% for fuzzer in benchmark.fuzzer_names %}\n<div class=\"col\">\n- <a class=\"waves-effect waves-light btn\" href=\"https://storage.googleapis.com/{{ experiment_filestore_name }}/{{ experiment.name }}/reports/coverage/{{ benchmark.name }}/{{ fuzzer }}/index.html\">{{ fuzzer }}</a>\n+ <a class=\"waves-effect waves-light btn\" href=\"{{ benchmark.get_filestore_name(fuzzer) }}/coverage/reports/{{ benchmark.name }}/{{ fuzzer }}/index.html\">{{ fuzzer }}</a>\n</div>\n{% endfor %}\n</div>\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "analysis/test_coverage_data_utils.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions andsss\n+# limitations under the License.\n+\"\"\"Tests for coverage_data_utils.py\"\"\"\n+\n+import pandas as pd\n+import pandas.testing as pd_test\n+\n+from analysis import coverage_data_utils\n+\n+\n+def create_coverage_data():\n+ \"\"\"Utility function to create test data.\"\"\"\n+ return {\n+ \"afl libpng-1.2.56\": [[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],\n+ \"libfuzzer libpng-1.2.56\": [[0, 0, 1, 1], [0, 0, 2, 3], [0, 0, 3, 3],\n+ [0, 0, 4, 4]]\n+ }\n+\n+\n+def test_get_unique_region_dict():\n+ \"\"\"Tests get_unique_region_dict() function.\"\"\"\n+ coverage_dict = create_coverage_data()\n+ benchmark_coverage_dict = coverage_data_utils.get_benchmark_cov_dict(\n+ coverage_dict, 'libpng-1.2.56')\n+ unique_region_dict = coverage_data_utils.get_unique_region_dict(\n+ benchmark_coverage_dict)\n+ expected_dict = {\n+ (0, 0, 2, 2): ['afl'],\n+ (0, 0, 2, 3): ['libfuzzer'],\n+ (0, 0, 4, 4): ['libfuzzer']\n+ }\n+ assert expected_dict == unique_region_dict\n+\n+\n+def test_get_unique_region_cov_df():\n+ \"\"\"Tests get_unique_region_cov_df() function.\"\"\"\n+ coverage_dict = create_coverage_data()\n+ benchmark_coverage_dict = coverage_data_utils.get_benchmark_cov_dict(\n+ coverage_dict, 'libpng-1.2.56')\n+ unique_region_dict = coverage_data_utils.get_unique_region_dict(\n+ benchmark_coverage_dict)\n+ fuzzer_names = ['afl', 'libfuzzer']\n+ unique_region_df = coverage_data_utils.get_unique_region_cov_df(\n+ unique_region_dict, fuzzer_names)\n+ unique_region_df = unique_region_df.sort_values(by=['fuzzer']).reset_index(\n+ drop=True)\n+ expected_df = pd.DataFrame([{\n+ 'fuzzer': 'afl',\n+ 'unique_regions_covered': 1\n+ }, {\n+ 'fuzzer': 'libfuzzer',\n+ 'unique_regions_covered': 2\n+ }])\n+ assert unique_region_df.equals(expected_df)\n+\n+\n+def test_get_benchmark_cov_dict():\n+ \"\"\"Tests that get_benchmark_cov_dict() returns correct dictionary.\"\"\"\n+ coverage_dict = create_coverage_data()\n+ benchmark = 'libpng-1.2.56'\n+ benchmark_cov_dict = coverage_data_utils.get_benchmark_cov_dict(\n+ coverage_dict, benchmark)\n+ expected_cov_dict = {\n+ \"afl\": {(0, 0, 3, 3), (0, 0, 2, 2), (0, 0, 1, 1)},\n+ \"libfuzzer\": {(0, 0, 4, 4), (0, 0, 3, 3), (0, 0, 2, 3), (0, 0, 1, 1)}\n+ }\n+ assert expected_cov_dict == benchmark_cov_dict\n+\n+\n+def test_get_pairwise_unique_coverage_table():\n+ \"\"\"Tests that get_pairwise_unique_coverage_table() gives the\n+ correct dataframe.\"\"\"\n+ coverage_dict = create_coverage_data()\n+ benchmark_coverage_dict = coverage_data_utils.get_benchmark_cov_dict(\n+ coverage_dict, 'libpng-1.2.56')\n+ table = coverage_data_utils.get_pairwise_unique_coverage_table(\n+ benchmark_coverage_dict)\n+ fuzzers = ['afl', 'libfuzzer']\n+ expected_table = pd.DataFrame([[0, 2], [1, 0]],\n+ index=fuzzers,\n+ columns=fuzzers)\n+ pd_test.assert_frame_equal(table, expected_table)\n"
},
{
"change_type": "MODIFY",
"old_path": "analysis/test_data_utils.py",
"new_path": "analysis/test_data_utils.py",
"diff": "@@ -23,7 +23,8 @@ from analysis import data_utils\ndef create_trial_data( # pylint: disable=too-many-arguments\n- trial_id, benchmark, fuzzer, cycles, reached_coverage, experiment):\n+ trial_id, benchmark, fuzzer, cycles, reached_coverage, experiment,\n+ experiment_filestore):\n\"\"\"Utility function to create test trial data.\"\"\"\nreturn pd.DataFrame([{\n'experiment': experiment,\n@@ -34,22 +35,31 @@ def create_trial_data( # pylint: disable=too-many-arguments\n'time_ended': None,\n'time': t,\n'edges_covered': reached_coverage,\n+ 'experiment_filestore': experiment_filestore\n} for t in range(cycles)])\n-def create_experiment_data(experiment='test_experiment', incomplete=False):\n+def create_experiment_data(experiment='test_experiment',\n+ incomplete=False,\n+ experiment_filestore='gs://fuzzbench-data'):\n\"\"\"Utility function to create test experiment data.\"\"\"\nreturn pd.concat([\n- create_trial_data(0, 'libpng', 'afl', 10, 100, experiment),\n- create_trial_data(1, 'libpng', 'afl', 10, 200, experiment),\n- create_trial_data(2, 'libpng', 'libfuzzer', 10, 200, experiment),\n- create_trial_data(3, 'libpng', 'libfuzzer', 10, 300, experiment),\n+ create_trial_data(0, 'libpng', 'afl', 10, 100, experiment,\n+ experiment_filestore),\n+ create_trial_data(1, 'libpng', 'afl', 10, 200, experiment,\n+ experiment_filestore),\n+ create_trial_data(2, 'libpng', 'libfuzzer', 10, 200, experiment,\n+ experiment_filestore),\n+ create_trial_data(3, 'libpng', 'libfuzzer', 10, 300, experiment,\n+ experiment_filestore),\ncreate_trial_data(4, 'libxml', 'afl', 6 if incomplete else 10, 1000,\n- experiment),\n- create_trial_data(5, 'libxml', 'afl', 10, 1200, experiment),\n+ experiment, experiment_filestore),\n+ create_trial_data(5, 'libxml', 'afl', 10, 1200, experiment,\n+ experiment_filestore),\ncreate_trial_data(6, 'libxml', 'libfuzzer', 8 if incomplete else 10,\n- 600, experiment),\n- create_trial_data(7, 'libxml', 'libfuzzer', 10, 800, experiment),\n+ 600, experiment, experiment_filestore),\n+ create_trial_data(7, 'libxml', 'libfuzzer', 10, 800, experiment,\n+ experiment_filestore),\n])\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/coverage_utils.py",
"new_path": "experiment/coverage_utils.py",
"diff": "import os\nimport multiprocessing\nimport json\n-import queue\nfrom common import experiment_utils as exp_utils\nfrom common import new_process\n@@ -24,23 +23,27 @@ from common import benchmark_utils\nfrom common import fuzzer_utils\nfrom common import logs\nfrom common import filestore_utils\n-from common import experiment_path as exp_path\nfrom common import filesystem\nfrom database import utils as db_utils\nfrom database import models\nfrom experiment.build import build_utils\n-from experiment import reporter\nlogger = logs.Logger('coverage_utils') # pylint: disable=invalid-name\nCOV_DIFF_QUEUE_GET_TIMEOUT = 1\n-def upload_coverage_reports_to_bucket():\n- \"\"\"Copies the coverage reports to gcs bucket.\"\"\"\n- report_dir = reporter.get_reports_dir()\n- src_dir = get_coverage_report_dir()\n- dst_dir = exp_path.filestore(report_dir)\n+def get_coverage_info_dir():\n+ \"\"\"Returns the directory to store coverage information including\n+ coverage report and json summary file.\"\"\"\n+ work_dir = exp_utils.get_work_dir()\n+ return os.path.join(work_dir, 'coverage')\n+\n+\n+def upload_coverage_info_to_bucket():\n+ \"\"\"Copies the coverage reports and json summary files to gcs bucket.\"\"\"\n+ src_dir = get_coverage_info_dir()\n+ dst_dir = exp_utils.get_experiment_filestore_path()\nfilestore_utils.cp(src_dir, dst_dir, recursive=True, parallel=True)\n@@ -55,8 +58,6 @@ def generate_coverage_reports(experiment_config: dict):\nfor benchmark in benchmarks\nfor fuzzer in fuzzers]\npool.starmap(generate_coverage_report, generate_coverage_report_args)\n- pool.close()\n- pool.join()\nlogger.info('Finished generating coverage report.')\n@@ -88,6 +89,9 @@ class CoverageReporter: # pylint: disable=too-many-instance-attributes\nself.trial_ids = get_trial_ids(experiment, fuzzer, benchmark)\ncov_report_directory = get_coverage_report_dir()\nself.report_dir = os.path.join(cov_report_directory, benchmark, fuzzer)\n+ coverage_info_dir = get_coverage_info_dir()\n+ self.json_file_dir = os.path.join(coverage_info_dir, 'data', benchmark,\n+ fuzzer)\nbenchmark_fuzzer_dir = exp_utils.get_benchmark_fuzzer_dir(\nbenchmark, fuzzer)\nwork_dir = exp_utils.get_work_dir()\n@@ -129,17 +133,20 @@ class CoverageReporter: # pylint: disable=too-many-instance-attributes\n'fuzzer: {fuzzer},benchmark: {benchmark}.'.format(\nfuzzer=self.fuzzer, benchmark=self.benchmark))\n+ def generate_json_summary_file(self, covered_regions):\n+ \"\"\"Stores the coverage data in a json file.\"\"\"\n+ json_src_dir = self.json_file_dir\n+ filesystem.create_directory(json_src_dir)\n+ json_src = os.path.join(json_src_dir, 'covered_regions.json')\n+ with open(json_src, 'w') as src_file:\n+ json.dump(covered_regions, src_file)\n+\ndef get_coverage_archive_name(benchmark):\n\"\"\"Gets the archive name for |benchmark|.\"\"\"\nreturn 'coverage-build-%s.tar.gz' % benchmark\n-def get_fuzzer_benchmark_key(fuzzer: str, benchmark: str):\n- \"\"\"Returns the key in coverage dict for a pair of fuzzer-benchmark.\"\"\"\n- return fuzzer + ' ' + benchmark\n-\n-\ndef get_profdata_file_name(trial_id):\n\"\"\"Returns the profdata file name for |trial_id|.\"\"\"\nreturn 'data-{id}.profdata'.format(id=trial_id)\n@@ -147,8 +154,8 @@ def get_profdata_file_name(trial_id):\ndef get_coverage_report_dir():\n\"\"\"Returns the directory to store all the coverage reports.\"\"\"\n- report_dir = reporter.get_reports_dir()\n- return os.path.join(report_dir, 'coverage')\n+ coverage_info_dir = get_coverage_info_dir()\n+ return os.path.join(coverage_info_dir, 'reports')\ndef get_coverage_binary(benchmark: str) -> str:\n@@ -191,65 +198,46 @@ def get_coverage_infomation(coverage_summary_file):\ndef store_coverage_data(experiment_config: dict):\n\"\"\"Generates the specific coverage data and store in cloud bucket.\"\"\"\nlogger.info('Start storing coverage data')\n- with multiprocessing.Pool() as pool, multiprocessing.Manager() as manager:\n- q = manager.Queue() # pytype: disable=attribute-error\n- covered_regions = get_all_covered_regions(experiment_config, pool, q)\n- json_src_dir = reporter.get_reports_dir()\n- filesystem.recreate_directory(json_src_dir)\n- json_src = os.path.join(json_src_dir, 'covered_regions.json')\n- with open(json_src, 'w') as src_file:\n- json.dump(covered_regions, src_file)\n- json_dst = exp_path.filestore(json_src)\n- filestore_utils.cp(json_src, json_dst)\n+ with multiprocessing.Pool() as pool:\n+ store_all_covered_regions(experiment_config, pool)\nlogger.info('Finished storing coverage data')\n-def get_all_covered_regions(experiment_config: dict, pool, q) -> dict:\n- \"\"\"Gets regions covered for each pair for fuzzer and benchmark.\"\"\"\n- logger.info('Measuring all fuzzer-benchmark pairs for final coverage data.')\n+def store_all_covered_regions(experiment_config: dict, pool):\n+ \"\"\"Stores regions covered for each pair for fuzzer and benchmark.\"\"\"\n+ logger.info('Storing all fuzzer-benchmark pairs for final coverage data.')\nbenchmarks = experiment_config['benchmarks'].split(',')\nfuzzers = experiment_config['fuzzers'].split(',')\nexperiment = experiment_config['experiment']\n- get_covered_region_args = [(experiment, fuzzer, benchmark, q)\n+ store_covered_region_args = [(experiment, fuzzer, benchmark)\nfor fuzzer in fuzzers\nfor benchmark in benchmarks]\n- result = pool.starmap_async(get_covered_region, get_covered_region_args)\n+ pool.starmap(store_covered_region, store_covered_region_args)\n+ logger.info('Done storing all coverage data.')\n- # Poll the queue for covered region data and save them in a dict until the\n- # pool is done processing each combination of fuzzers and benchmarks.\n- all_covered_regions = {}\n- while True:\n- try:\n- covered_regions = q.get(timeout=COV_DIFF_QUEUE_GET_TIMEOUT)\n- all_covered_regions.update(covered_regions)\n- except queue.Empty:\n- if result.ready():\n- # If \"ready\" that means pool has finished. Since it is\n- # finished and the queue is empty, we can stop checking\n- # the queue for more covered regions.\n- logger.debug(\n- 'Finished call to map with get_all_covered_regions.')\n- break\n-\n- for key in all_covered_regions:\n- all_covered_regions[key] = list(all_covered_regions[key])\n- logger.info('Done measuring all coverage data.')\n- return all_covered_regions\n-\n-\n-def get_covered_region(experiment: str, fuzzer: str, benchmark: str,\n- q: multiprocessing.Queue):\n+def store_covered_region(experiment: str, fuzzer: str, benchmark: str):\n\"\"\"Gets the final covered region for a specific pair of fuzzer-benchmark.\"\"\"\nlogs.initialize()\n+ logger.debug('Storing covered region: fuzzer: %s, benchmark: %s.', fuzzer,\n+ benchmark)\n+ covered_regions = get_covered_region(experiment, fuzzer, benchmark)\n+ generator = CoverageReporter(fuzzer, benchmark, experiment)\n+ generator.generate_json_summary_file(covered_regions)\n+\n+ logger.debug('Finished storing covered region: fuzzer: %s, benchmark: %s.',\n+ fuzzer, benchmark)\n+\n+\n+def get_covered_region(experiment: str, fuzzer: str, benchmark: str):\n+ \"\"\"Gets covered regions for |fuzzer| on |benchmark|.\"\"\"\nlogger.debug('Measuring covered region: fuzzer: %s, benchmark: %s.', fuzzer,\nbenchmark)\n- key = get_fuzzer_benchmark_key(fuzzer, benchmark)\n- covered_regions = {key: set()}\n+ covered_regions = set()\ntrial_ids = get_trial_ids(experiment, fuzzer, benchmark)\nfor trial_id in trial_ids:\nlogger.info('Measuring covered region: trial_id = %d.', trial_id)\n@@ -263,10 +251,10 @@ def get_covered_region(experiment: str, fuzzer: str, benchmark: str,\nsnapshot_logger)\ntrial_coverage.generate_summary(0, summary_only=False)\nnew_covered_regions = trial_coverage.get_current_covered_regions()\n- covered_regions[key] = covered_regions[key].union(new_covered_regions)\n- q.put(covered_regions)\n+ covered_regions = covered_regions.union(new_covered_regions)\nlogger.debug('Done measuring covered region: fuzzer: %s, benchmark: %s.',\nfuzzer, benchmark)\n+ return list(covered_regions)\nclass TrialCoverage: # pylint: disable=too-many-instance-attributes\n@@ -306,10 +294,13 @@ class TrialCoverage: # pylint: disable=too-many-instance-attributes\n# region it is; 'code_region' is used to obtain various code\n# coverage statistic and is represented by number 0.\ntype_index = -1\n+ # The number of index 5 represents the file number.\n+ file_index = 5\nfor function_data in functions_data:\nfor region in function_data['regions']:\nif region[hit_index] != 0 and region[type_index] == 0:\n- covered_regions.add(tuple(region[:hit_index]))\n+ covered_regions.add(\n+ tuple(region[:hit_index] + region[file_index:]))\nexcept Exception: # pylint: disable=broad-except\nself.logger.error(\n'Coverage summary json file defective or missing.')\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/measurer.py",
"new_path": "experiment/measurer.py",
"diff": "@@ -72,7 +72,8 @@ def measure_main(experiment_config):\n# Do the final measuring and store the coverage data.\ncoverage_utils.store_coverage_data(experiment_config)\ncoverage_utils.generate_coverage_reports(experiment_config)\n- coverage_utils.upload_coverage_reports_to_bucket()\n+ coverage_utils.upload_coverage_info_to_bucket()\n+\nlogger.info('Finished measuring.')\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Differential graphs on report (#657) |
258,388 | 17.08.2020 14:57:51 | 25,200 | 1f0ff440db47ff7216a7b96acceb9cd7d9dd2814 | [scheduler] Use instance api instead of operations to find preempted instances accurately
* [scheduler] Try using a query for terminated instances to find
the preempted instances.
The previous query for preemption operations seems to return incomplete
results in many-trial experiments.
* Complete fix with test fixes. | [
{
"change_type": "MODIFY",
"old_path": "common/gce.py",
"new_path": "common/gce.py",
"diff": "\"\"\"Module for using the Google Compute Engine (GCE) API.\"\"\"\nimport threading\n-import dateutil.parser\n-\nfrom googleapiclient import discovery\nfrom oauth2client.client import GoogleCredentials\n@@ -31,60 +29,27 @@ def initialize():\ncredentials=credentials)\n-def get_operations(project, zone):\n- \"\"\"Generator that yields GCE operations for compute engine |project| and\n- |zone| in descendending order by time.\"\"\"\n- zone_operations = thread_local.service.zoneOperations() # pylint: disable=no-member\n- request = zone_operations.list(project=project,\n- zone=zone,\n- orderBy='creationTimestamp desc')\n+def _get_instance_items(project, zone):\n+ \"\"\"Return an iterator of all instance response items for a project.\"\"\"\n+ instances = thread_local.service.instances()\n+ request = instances.list(project=project, zone=zone)\nwhile request is not None:\nresponse = request.execute()\n- for operation in response['items']:\n- yield operation\n-\n- request = zone_operations.list_next(previous_request=request,\n+ for instance in response['items']:\n+ yield instance\n+ request = instances.list_next(previous_request=request,\nprevious_response=response)\n-def get_preempted_operations(operations):\n- \"\"\"Generator that yields GCE preempted operations in |operations|.\"\"\"\n- # This endpoint doesn't support filtering by time (despite implications\n- # otherwise). Instead it supports ordering by time. It also supports\n- # filtering by operation but doesn't support it when combined with ordering.\n- # So we must filter manually.\n- # See the link below for an example of this query in action.\n- # https://cloud.google.com/compute/docs/reference/rest/v1/zoneOperations/list?apix_params=%7B%22project%22%3A%22fuzzbench%22%2C%22zone%22%3A%22us-central1-a%22%2C%22filter%22%3A%22(operationType%20%3D%20%5C%22compute.instances.preempted%5C%22)%22%2C%22orderBy%22%3A%22creationTimestamp%20desc%22%7D\n- for operation in operations:\n- if operation['operationType'] == 'compute.instances.preempted':\n- yield operation\n-\n-\n-def filter_by_end_time(min_end_time, operations):\n- \"\"\"Generator that yields GCE operations in |operations| that finished before\n- |min_end_time|. |operations| must be an iterable that is ordered by time.\"\"\"\n- for operation in operations:\n- end_time = operation.get('endTime')\n- if not end_time:\n- # Try to handle cases where the operation hasn't finished.\n- yield operation\n- continue\n-\n- operation_end_time = dateutil.parser.isoparse(end_time)\n- if operation_end_time < min_end_time:\n- break\n- yield operation\n-\n-\n-def get_base_target_link(experiment_config):\n- \"\"\"Returns the base of the target link for this experiment so that\n- get_instance_from_preempted_operation can return the instance.\"\"\"\n- return ('https://www.googleapis.com/compute/v1/projects/{project}/zones/'\n- '{zone}/instances/').format(\n- project=experiment_config['cloud_project'],\n- zone=experiment_config['cloud_compute_zone'])\n+def get_instances(project, zone):\n+ \"\"\"Return a list of all instance names in |project| and |zone|.\"\"\"\n+ for instance in _get_instance_items(project, zone):\n+ yield instance['name']\n-def get_instance_from_preempted_operation(operation, base_target_link) -> str:\n- \"\"\"Returns the instance name from a preempted |operation|.\"\"\"\n- return operation['targetLink'][len(base_target_link):]\n+def get_preempted_instances(project, zone):\n+ \"\"\"Return a list of preempted instance names in |project| and |zone|.\"\"\"\n+ for instance in _get_instance_items(project, zone):\n+ if (instance['scheduling']['preemptible'] and\n+ instance['status'] == 'TERMINATED'):\n+ yield instance['name']\n"
},
{
"change_type": "MODIFY",
"old_path": "common/gcloud.py",
"new_path": "common/gcloud.py",
"diff": "@@ -100,12 +100,6 @@ def delete_instances(instance_names: List[str], zone: str, **kwargs) -> bool:\nreturn not error_occurred\n-def list_instances() -> List[str]:\n- \"\"\"Return list of current running instances.\"\"\"\n- result = new_process.execute(['gcloud', 'compute', 'instances', 'list'])\n- return [instance.split(' ')[0] for instance in result.output.splitlines()]\n-\n-\ndef set_default_project(cloud_project: str):\n\"\"\"Set default project for future gcloud and gsutil commands.\"\"\"\nreturn new_process.execute(\n"
},
{
"change_type": "DELETE",
"old_path": "common/test_gce.py",
"new_path": null,
"diff": "-# Copyright 2020 Google LLC\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\"\"\"Tests for gce.py.\"\"\"\n-import posixpath\n-\n-from common import gce\n-\n-\n-def test_get_instance_from_preempted_operation():\n- \"\"\"Tests that _get_instance_from_preemption_operation returns the correct\n- value.\"\"\"\n- expected_instance = 'r-my-experiment-100'\n- base_target_link = 'www.myresourceurl/'\n- target_link = posixpath.join(base_target_link, expected_instance)\n- operation = {'targetLink': target_link}\n- instance = gce.get_instance_from_preempted_operation(\n- operation, base_target_link)\n-\n- assert instance == expected_instance\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/scheduler.py",
"new_path": "experiment/scheduler.py",
"diff": "@@ -106,8 +106,12 @@ def all_trials_ended(experiment: str) -> bool:\ndef delete_instances(instances, experiment_config):\n\"\"\"Deletes |instances|.\"\"\"\n- running_instances = gcloud.list_instances()\n- instances_to_delete = [i for i in instances if i in running_instances]\n+ cloud_project = experiment_config['cloud_project']\n+ cloud_compute_zone = experiment_config['cloud_compute_zone']\n+ instances_to_delete = [\n+ i for i in gce.get_instances(cloud_project, cloud_compute_zone)\n+ if i in instances\n+ ]\nreturn gcloud.delete_instances(instances_to_delete,\nexperiment_config['cloud_compute_zone'])\n@@ -231,8 +235,6 @@ class TrialInstanceManager: # pylint: disable=too-many-instance-attributes\nself.preempted_trials = {}\nself.preemptible_starts_futile = False\n- self.base_target_link = gce.get_base_target_link(experiment_config)\n-\n# Filter operations happening before the experiment started.\nself.last_preemptible_query = (db_utils.query(models.Experiment).filter(\nmodels.Experiment.name == experiment_config['experiment']).one(\n@@ -410,7 +412,7 @@ class TrialInstanceManager: # pylint: disable=too-many-instance-attributes\nstarted_instances = self._get_started_unfinished_instances()\nquery_time = datetime_now()\n- preempted_instances = list(self._query_preempted_instances())\n+ preempted_instances = self._get_preempted_instances_with_retries()\ntrials = []\nfor instance in preempted_instances:\ntrial = started_instances.get(instance)\n@@ -428,30 +430,20 @@ class TrialInstanceManager: # pylint: disable=too-many-instance-attributes\n# Update this now when we know that we have succeded processing the\n# query. It's far worse if we update the query too early than if we\n# don't update the query at this point (which will only result in\n- # redundant work.\n+ # redundant work).\nself.last_preemptible_query = query_time\n# Return all preempted instances, those we knew from beforehand and\n# those we discovered in the query.\nreturn trials\n- @retry.wrap(\n- NUM_RETRIES, RETRY_WAIT_SECONDS,\n- 'experiment.scheduler.TrialInstanceManager._query_preempted_instances')\n- def _query_preempted_instances(self):\n+ @retry.wrap(NUM_RETRIES, RETRY_WAIT_SECONDS,\n+ 'experiment.scheduler.TrialInstanceManager.'\n+ '_get_preempted_instances_with_retries')\n+ def _get_preempted_instances_with_retries(self):\nproject = self.experiment_config['cloud_project']\nzone = self.experiment_config['cloud_compute_zone']\n- operations = gce.filter_by_end_time(self.last_preemptible_query,\n- gce.get_operations(project, zone))\n- instances = []\n- for operation in gce.get_preempted_operations(operations):\n- if operation is None:\n- logs.error('Operation is None.')\n- continue\n- instance = gce.get_instance_from_preempted_operation(\n- operation, self.base_target_link)\n- instances.append(instance)\n- return instances\n+ return list(gce.get_preempted_instances(project, zone))\ndef handle_preempted_trials(self):\n\"\"\"Handle preempted trials by marking them as preempted and creating\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/stop_experiment.py",
"new_path": "experiment/stop_experiment.py",
"diff": "@@ -18,6 +18,7 @@ import sys\nfrom common import experiment_utils\nfrom common import logs\n+from common import gce\nfrom common import gcloud\nfrom common import yaml_utils\n@@ -27,14 +28,16 @@ logger = logs.Logger('stop_experiment') # pylint: disable=invalid-name\ndef stop_experiment(experiment_name, experiment_config_filename):\n\"\"\"Stop the experiment specified by |experiment_config_filename|.\"\"\"\nexperiment_config = yaml_utils.read(experiment_config_filename)\n-\nif experiment_config.get('local_experiment', False):\nraise NotImplementedError(\n'Local experiment stop logic is not implemented.')\n- instances = gcloud.list_instances()\n-\n+ cloud_project = experiment_config['cloud_project']\ncloud_compute_zone = experiment_config['cloud_compute_zone']\n+\n+ gce.initialize()\n+ instances = list(gce.get_instances(cloud_project, cloud_compute_zone))\n+\ntrial_prefix = 'r-' + experiment_name\nexperiment_instances = [\ninstance for instance in instances if instance.startswith(trial_prefix)\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_scheduler.py",
"new_path": "experiment/test_scheduler.py",
"diff": "import datetime\nfrom multiprocessing.pool import ThreadPool\nimport os\n-import posixpath\nimport time\nfrom unittest import mock\n@@ -215,7 +214,8 @@ def test_start_trials_not_started(mocked_create_instance, pending_trials,\[email protected]('experiment.scheduler.datetime_now')\[email protected]('common.benchmark_utils.get_fuzz_target',\nreturn_value='fuzz-target')\n-def test_schedule(_, mocked_datetime_now, mocked_execute, pending_trials,\[email protected]('common.gce._get_instance_items', return_value=[])\n+def test_schedule(_, __, mocked_datetime_now, mocked_execute, pending_trials,\nexperiment_config):\n\"\"\"Tests that schedule() ends expired trials and starts new ones as\nneeded.\"\"\"\n@@ -376,7 +376,7 @@ def test_get_preempted_trials_nonpreemptible(experiment_config, db):\nassert trial_instance_manager.get_preempted_trials() == []\[email protected]('common.gce.get_operations', return_value=[])\[email protected]('common.gce._get_instance_items', return_value=[])\ndef test_get_preempted_trials_stale_preempted(_, preempt_exp_conf):\n\"\"\"Tests that TrialInstanceManager.get_preempted_trials doesn't return\ntrials that we already know were preempted.\"\"\"\n@@ -395,37 +395,21 @@ def test_get_preempted_trials_stale_preempted(_, preempt_exp_conf):\nassert trial_instance_manager.get_preempted_trials() == []\n-def _get_preemption_operation(trial_id, exp_conf):\n- zone_url = (\n- 'https://www.googleapis.com/compute/v1/projects/{project}/zones/'\n- '{zone}').format(zone=exp_conf['cloud_compute_zone'],\n- project=exp_conf['cloud_project'])\n+def _get_preempted_instance_item(trial_id, exp_conf):\ninstance_name = experiment_utils.get_trial_instance_name(\nexp_conf['experiment'], trial_id)\n- target_link = posixpath.join('instances', zone_url, instance_name)\n- name = 'systemevent-blah'\n- self_link = posixpath.join(zone_url, name)\nreturn {\n'id': '1',\n- 'name': name,\n- 'zone': zone_url,\n- 'operationType': 'compute.instances.preempted',\n- 'targetLink': target_link,\n- 'targetId': '1',\n- 'status': 'DONE',\n- 'statusMessage': 'Instance was preempted.',\n- 'user': 'system',\n- 'progress': 100,\n- 'insertTime': '2020-01-24T29:16:46.842-02:00',\n- 'startTime': '2020-01-24T29:16:46.842-02:00',\n- 'endTime': '2020-01-24T29:16:46.842-02:00',\n- 'selfLink': self_link,\n- 'kind': 'compute#operation'\n+ 'name': instance_name,\n+ 'status': 'TERMINATED',\n+ 'scheduling': {\n+ 'preemptible': True,\n+ }\n}\[email protected]('common.gce.get_preempted_operations')\n-def test_get_preempted_trials_new_preempted(mocked_get_preempted_operations,\[email protected]('common.gce._get_instance_items')\n+def test_get_preempted_trials_new_preempted(mocked_get_instance_items,\npreempt_exp_conf):\n\"\"\"Tests that TrialInstanceManager.get_preempted_trials returns trials that\nnew preempted trials we don't know about until we query for them and not\n@@ -445,8 +429,8 @@ def test_get_preempted_trials_new_preempted(mocked_get_preempted_operations,\ntime_started=time_started)\ntrials = [known_preempted, unknown_preempted]\ndb_utils.add_all(trials)\n- mocked_get_preempted_operations.return_value = [\n- _get_preemption_operation(trial.id, preempt_exp_conf)\n+ mocked_get_instance_items.return_value = [\n+ _get_preempted_instance_item(trial.id, preempt_exp_conf)\nfor trial in trials\n]\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [scheduler] Use instance api instead of operations to find preempted instances accurately (#674)
* [scheduler] Try using a query for terminated instances to find
the preempted instances.
The previous query for preemption operations seems to return incomplete
results in many-trial experiments.
* Complete fix with test fixes.
Co-authored-by: Abhishek Arya <[email protected]> |
258,396 | 18.08.2020 17:57:06 | 25,200 | 12f3485bb0104f5cb2d6145db799f262363a11c5 | Add a temporary entropic variant for testing scheduling policy that uses exectime. | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/fuzzers.yml",
"new_path": ".github/workflows/fuzzers.yml",
"diff": "@@ -37,6 +37,7 @@ jobs:\n# Temporary variants.\n- libfuzzer_keepseed\n- entropic_keepseed\n+ - entropic_exectime\n- aflplusplus_classic\n- aflplusplus_lto_pcguard\n- aflplusplus_lto\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_exectime/builder.Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+ARG parent_image\n+FROM $parent_image\n+\n+COPY patch.diff /\n+\n+RUN git clone https://github.com/llvm/llvm-project.git /llvm-project && \\\n+ cd /llvm-project && \\\n+ git checkout bb54bcf84970c04c9748004f3a4cf59b0c1832a7 && \\\n+ patch -p1 < /patch.diff && \\\n+ cd /llvm-project/compiler-rt/lib/fuzzer && \\\n+ (for f in *.cpp; do \\\n+ clang++ -stdlib=libc++ -fPIC -O2 -std=c++11 $f -c & \\\n+ done && wait) && \\\n+ ar r /libEntropic.a *.o\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_exectime/fuzzer.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Integration code for libFuzzer fuzzer.\"\"\"\n+\n+from fuzzers.entropic import fuzzer as entropic_fuzzer\n+from fuzzers.libfuzzer import fuzzer as libfuzzer_fuzzer\n+\n+\n+def build():\n+ \"\"\"Build benchmark.\"\"\"\n+ entropic_fuzzer.build()\n+\n+\n+def fuzz(input_corpus, output_corpus, target_binary):\n+ \"\"\"Run fuzzer.\"\"\"\n+ libfuzzer_fuzzer.run_fuzzer(\n+ input_corpus,\n+ output_corpus,\n+ target_binary,\n+ extra_flags=['-entropic=1', '-entropic_scale_per_exec_time=1'])\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_exectime/patch.diff",
"diff": "+commit cf575f9bb0d180fa2069c30b6b78f8ada3502462\n+Author: Dokyung Song <[email protected]>\n+Date: Mon Aug 17 16:59:59 2020 +0000\n+\n+ [libFuzzer] Scale energy assigned to each input based on input execution time.\n+\n+ This is an experimental patch uploaded to get early feedback.\n+\n+ This patch scales the energy computed by the Entropic schedule based on the\n+ execution time of each input. The input execution time is compared with the\n+ average execution time of inputs in the corpus, and, based on the amount by\n+ which they differ, the energy is scaled from 0.1x (for inputs executing slow) to\n+ 30x (for inputs executing fast). Note that the exact formula is borrowed from\n+ AFL.\n+\n+ In my short, local experiments, this gives a sizeable throughput increase (which\n+ in turn leads to more coverage) on the SQLITE3 benchmark, which I will test\n+ further with longer and more experiments with more benchmarks. I am not sure for\n+ now that how this execution-time-based energy scaling works with other factors\n+ (e.g., the number of globally rare features) that are already considered by the\n+ Entropic schedule to compute the energy; I am going to do some more profiling\n+ and testing to examine this.\n+\n+ Differential Revision: https://reviews.llvm.org/D86092\n+\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerCorpus.h b/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n+index 54d1e09ec6d..66f137e9c7c 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n++++ b/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n+@@ -18,6 +18,7 @@\n+ #include \"FuzzerSHA1.h\"\n+ #include \"FuzzerTracePC.h\"\n+ #include <algorithm>\n++#include <chrono>\n+ #include <numeric>\n+ #include <random>\n+ #include <unordered_set>\n+@@ -26,6 +27,7 @@ namespace fuzzer {\n+\n+ struct InputInfo {\n+ Unit U; // The actual input data.\n++ std::chrono::microseconds TimeOfUnit;\n+ uint8_t Sha1[kSHA1NumBytes]; // Checksum.\n+ // Number of features that this input has and no smaller input has.\n+ size_t NumFeatures = 0;\n+@@ -65,7 +67,8 @@ struct InputInfo {\n+ // of the seed. Since we do not know the entropy of a seed that has\n+ // never been executed we assign fresh seeds maximum entropy and\n+ // let II->Energy approach the true entropy from above.\n+- void UpdateEnergy(size_t GlobalNumberOfFeatures) {\n++ void UpdateEnergy(size_t GlobalNumberOfFeatures, bool ScalePerExecTime,\n++ uint64_t AverageTimeOfUnit) {\n+ Energy = 0.0;\n+ SumIncidence = 0;\n+\n+@@ -88,6 +91,27 @@ struct InputInfo {\n+ // Normalize.\n+ if (SumIncidence != 0)\n+ Energy = (Energy / SumIncidence) + logl(SumIncidence);\n++\n++ if (ScalePerExecTime) {\n++ // Scaling to favor inputs with lower execution time.\n++ uint32_t PerfScore = 100;\n++ if (TimeOfUnit.count() * 0.1 > AverageTimeOfUnit)\n++ PerfScore = 10;\n++ else if (TimeOfUnit.count() * 0.25 > AverageTimeOfUnit)\n++ PerfScore = 25;\n++ else if (TimeOfUnit.count() * 0.5 > AverageTimeOfUnit)\n++ PerfScore = 50;\n++ else if (TimeOfUnit.count() * 0.75 > AverageTimeOfUnit)\n++ PerfScore = 75;\n++ else if (TimeOfUnit.count() * 4 < AverageTimeOfUnit)\n++ PerfScore = 300;\n++ else if (TimeOfUnit.count() * 3 < AverageTimeOfUnit)\n++ PerfScore = 200;\n++ else if (TimeOfUnit.count() * 2 < AverageTimeOfUnit)\n++ PerfScore = 150;\n++\n++ Energy *= PerfScore;\n++ }\n+ }\n+\n+ // Increment the frequency of the feature Idx.\n+@@ -120,6 +144,7 @@ struct EntropicOptions {\n+ bool Enabled;\n+ size_t NumberOfRarestFeatures;\n+ size_t FeatureFrequencyThreshold;\n++ bool ScalePerExecTime;\n+ };\n+\n+ class InputCorpus {\n+@@ -178,6 +203,7 @@ public:\n+ const Unit &operator[] (size_t Idx) const { return Inputs[Idx]->U; }\n+ InputInfo *AddToCorpus(const Unit &U, size_t NumFeatures, bool MayDeleteFile,\n+ bool HasFocusFunction,\n++ std::chrono::microseconds TimeOfUnit,\n+ const Vector<uint32_t> &FeatureSet,\n+ const DataFlowTrace &DFT, const InputInfo *BaseII) {\n+ assert(!U.empty());\n+@@ -187,6 +213,7 @@ public:\n+ InputInfo &II = *Inputs.back();\n+ II.U = U;\n+ II.NumFeatures = NumFeatures;\n++ II.TimeOfUnit = TimeOfUnit;\n+ II.MayDeleteFile = MayDeleteFile;\n+ II.UniqFeatureSet = FeatureSet;\n+ II.HasFocusFunction = HasFocusFunction;\n+@@ -460,12 +487,18 @@ private:\n+ Weights.resize(N);\n+ std::iota(Intervals.begin(), Intervals.end(), 0);\n+\n++ std::chrono::microseconds TotalTimeOfUnit(0);\n++ for (auto II : Inputs) {\n++ TotalTimeOfUnit += II->TimeOfUnit;\n++ }\n++\n+ bool VanillaSchedule = true;\n+ if (Entropic.Enabled) {\n+ for (auto II : Inputs) {\n+ if (II->NeedsEnergyUpdate && II->Energy != 0.0) {\n+ II->NeedsEnergyUpdate = false;\n+- II->UpdateEnergy(RareFeatures.size());\n++ II->UpdateEnergy(RareFeatures.size(), Entropic.ScalePerExecTime,\n++ TotalTimeOfUnit.count() / N);\n+ }\n+ }\n+\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n+index bed9e84de67..d31783debff 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n++++ b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n+@@ -718,6 +718,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\n+ (size_t)Flags.entropic_feature_frequency_threshold;\n+ Options.EntropicNumberOfRarestFeatures =\n+ (size_t)Flags.entropic_number_of_rarest_features;\n++ Options.EntropicScalePerExecTime = Flags.entropic_scale_per_exec_time;\n+ if (Options.Entropic) {\n+ if (!Options.FocusFunction.empty()) {\n+ Printf(\"ERROR: The parameters `--entropic` and `--focus_function` cannot \"\n+@@ -733,6 +734,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\n+ Entropic.FeatureFrequencyThreshold =\n+ Options.EntropicFeatureFrequencyThreshold;\n+ Entropic.NumberOfRarestFeatures = Options.EntropicNumberOfRarestFeatures;\n++ Entropic.ScalePerExecTime = Options.EntropicScalePerExecTime;\n+\n+ unsigned Seed = Flags.seed;\n+ // Initialize Seed.\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerFlags.def b/compiler-rt/lib/fuzzer/FuzzerFlags.def\n+index 832224a705d..dda66e012a8 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerFlags.def\n++++ b/compiler-rt/lib/fuzzer/FuzzerFlags.def\n+@@ -161,6 +161,7 @@ FUZZER_FLAG_INT(entropic_number_of_rarest_features, 100, \"Experimental. If \"\n+ \"entropic is enabled, we keep track of the frequencies only for the \"\n+ \"Top-X least abundant features (union features that are considered as \"\n+ \"rare).\")\n++FUZZER_FLAG_INT(entropic_scale_per_exec_time, 0, \"Experimental.\")\n+\n+ FUZZER_FLAG_INT(analyze_dict, 0, \"Experimental\")\n+ FUZZER_DEPRECATED_FLAG(use_clang_coverage)\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n+index 02db6d27b0a..9bb788c4efe 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n++++ b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n+@@ -469,6 +469,7 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,\n+ return false;\n+\n+ ExecuteCallback(Data, Size);\n++ auto TimeOfUnit = duration_cast<microseconds>(UnitStopTime - UnitStartTime);\n+\n+ UniqFeatureSetTmp.clear();\n+ size_t FoundUniqFeaturesOfII = 0;\n+@@ -491,7 +492,7 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,\n+ TPC.UpdateObservedPCs();\n+ auto NewII = Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures,\n+ MayDeleteFile, TPC.ObservedFocusFunction(),\n+- UniqFeatureSetTmp, DFT, II);\n++ TimeOfUnit, UniqFeatureSetTmp, DFT, II);\n+ WriteFeatureSetToFile(Options.FeaturesDir, Sha1ToString(NewII->Sha1),\n+ NewII->UniqFeatureSet);\n+ return true;\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerOptions.h b/compiler-rt/lib/fuzzer/FuzzerOptions.h\n+index b75e7c7af70..ed309eabf5c 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerOptions.h\n++++ b/compiler-rt/lib/fuzzer/FuzzerOptions.h\n+@@ -47,6 +47,7 @@ struct FuzzingOptions {\n+ bool Entropic = false;\n+ size_t EntropicFeatureFrequencyThreshold = 0xFF;\n+ size_t EntropicNumberOfRarestFeatures = 100;\n++ bool EntropicScalePerExecTime = false;\n+ std::string OutputCorpus;\n+ std::string ArtifactPrefix = \"./\";\n+ std::string ExactArtifactPath;\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_exectime/runner.Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+FROM gcr.io/fuzzbench/base-runner\n"
},
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "# are still testing this feature. You should request an experiment by contacting\n# us as you normally do.\n+- experiment: 2020-08-21\n+ fuzzers:\n+ - entropic_exectime\n+\n- experiment: 2020-08-20\nfuzzers:\n- aflplusplus\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Add a temporary entropic variant for testing scheduling policy that uses exectime. (#688) |
258,396 | 19.08.2020 23:11:21 | 25,200 | c585161d3253904c40b15616265520630a550f8a | Base entropic_keepseed variant on entropic_exectime variant | [
{
"change_type": "MODIFY",
"old_path": "fuzzers/entropic_keepseed/fuzzer.py",
"new_path": "fuzzers/entropic_keepseed/fuzzer.py",
"diff": "@@ -29,5 +29,6 @@ def fuzz(input_corpus, output_corpus, target_binary):\ntarget_binary,\nextra_flags=[\n'-entropic=1', '-keep_seed=1',\n- '-cross_over_uniformdist=1'\n+ '-cross_over_uniformdist=1',\n+ '-entropic_scale_per_exec_time=1'\n])\n"
},
{
"change_type": "MODIFY",
"old_path": "fuzzers/entropic_keepseed/patch.diff",
"new_path": "fuzzers/entropic_keepseed/patch.diff",
"diff": "-commit 72e16fc7160185305eee536c257a478ae84f7082\n-Author: Dokyung Song <[email protected]>\n-Date: Fri Jul 31 00:07:20 2020 +0000\n-\n- [libFuzzer] Optionally keep initial seed inputs regardless of whether they discover new features or not.\n-\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerCorpus.h b/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n-index 54d1e09ec6d..5c687013c59 100644\n+index 54d1e09ec6d..ed7d0837659 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n+++ b/compiler-rt/lib/fuzzer/FuzzerCorpus.h\n-@@ -33,6 +33,7 @@ struct InputInfo {\n+@@ -18,6 +18,7 @@\n+ #include \"FuzzerSHA1.h\"\n+ #include \"FuzzerTracePC.h\"\n+ #include <algorithm>\n++#include <chrono>\n+ #include <numeric>\n+ #include <random>\n+ #include <unordered_set>\n+@@ -26,6 +27,7 @@ namespace fuzzer {\n+\n+ struct InputInfo {\n+ Unit U; // The actual input data.\n++ std::chrono::microseconds TimeOfUnit;\n+ uint8_t Sha1[kSHA1NumBytes]; // Checksum.\n+ // Number of features that this input has and no smaller input has.\n+ size_t NumFeatures = 0;\n+@@ -33,6 +35,7 @@ struct InputInfo {\n// Stats.\nsize_t NumExecutedMutations = 0;\nsize_t NumSuccessfullMutations = 0;\n@@ -16,7 +26,53 @@ index 54d1e09ec6d..5c687013c59 100644\nbool MayDeleteFile = false;\nbool Reduced = false;\nbool HasFocusFunction = false;\n-@@ -131,9 +132,12 @@ class InputCorpus {\n+@@ -65,7 +68,8 @@ struct InputInfo {\n+ // of the seed. Since we do not know the entropy of a seed that has\n+ // never been executed we assign fresh seeds maximum entropy and\n+ // let II->Energy approach the true entropy from above.\n+- void UpdateEnergy(size_t GlobalNumberOfFeatures) {\n++ void UpdateEnergy(size_t GlobalNumberOfFeatures, bool ScalePerExecTime,\n++ uint64_t AverageTimeOfUnit) {\n+ Energy = 0.0;\n+ SumIncidence = 0;\n+\n+@@ -88,6 +92,27 @@ struct InputInfo {\n+ // Normalize.\n+ if (SumIncidence != 0)\n+ Energy = (Energy / SumIncidence) + logl(SumIncidence);\n++\n++ if (ScalePerExecTime) {\n++ // Scaling to favor inputs with lower execution time.\n++ uint32_t PerfScore = 100;\n++ if (TimeOfUnit.count() * 0.1 > AverageTimeOfUnit)\n++ PerfScore = 10;\n++ else if (TimeOfUnit.count() * 0.25 > AverageTimeOfUnit)\n++ PerfScore = 25;\n++ else if (TimeOfUnit.count() * 0.5 > AverageTimeOfUnit)\n++ PerfScore = 50;\n++ else if (TimeOfUnit.count() * 0.75 > AverageTimeOfUnit)\n++ PerfScore = 75;\n++ else if (TimeOfUnit.count() * 4 < AverageTimeOfUnit)\n++ PerfScore = 300;\n++ else if (TimeOfUnit.count() * 3 < AverageTimeOfUnit)\n++ PerfScore = 200;\n++ else if (TimeOfUnit.count() * 2 < AverageTimeOfUnit)\n++ PerfScore = 150;\n++\n++ Energy *= PerfScore;\n++ }\n+ }\n+\n+ // Increment the frequency of the feature Idx.\n+@@ -120,6 +145,7 @@ struct EntropicOptions {\n+ bool Enabled;\n+ size_t NumberOfRarestFeatures;\n+ size_t FeatureFrequencyThreshold;\n++ bool ScalePerExecTime;\n+ };\n+\n+ class InputCorpus {\n+@@ -131,9 +157,12 @@ class InputCorpus {\nEntropicOptions Entropic;\n@@ -31,24 +87,26 @@ index 54d1e09ec6d..5c687013c59 100644\nmemset(InputSizesPerFeature, 0, sizeof(InputSizesPerFeature));\nmemset(SmallestElementPerFeature, 0, sizeof(SmallestElementPerFeature));\n}\n-@@ -177,7 +181,7 @@ public:\n+@@ -177,7 +206,8 @@ public:\nbool empty() const { return Inputs.empty(); }\nconst Unit &operator[] (size_t Idx) const { return Inputs[Idx]->U; }\nInputInfo *AddToCorpus(const Unit &U, size_t NumFeatures, bool MayDeleteFile,\n- bool HasFocusFunction,\n+ bool HasFocusFunction, bool SeedInput,\n++ std::chrono::microseconds TimeOfUnit,\nconst Vector<uint32_t> &FeatureSet,\nconst DataFlowTrace &DFT, const InputInfo *BaseII) {\nassert(!U.empty());\n-@@ -187,6 +191,7 @@ public:\n+@@ -187,6 +217,8 @@ public:\nInputInfo &II = *Inputs.back();\nII.U = U;\nII.NumFeatures = NumFeatures;\n+ II.SeedInput = SeedInput;\n++ II.TimeOfUnit = TimeOfUnit;\nII.MayDeleteFile = MayDeleteFile;\nII.UniqFeatureSet = FeatureSet;\nII.HasFocusFunction = HasFocusFunction;\n-@@ -276,6 +281,11 @@ public:\n+@@ -276,6 +308,11 @@ public:\nreturn Idx;\n}\n@@ -60,8 +118,28 @@ index 54d1e09ec6d..5c687013c59 100644\nvoid PrintStats() {\nfor (size_t i = 0; i < Inputs.size(); i++) {\nconst auto &II = *Inputs[i];\n+@@ -460,12 +497,18 @@ private:\n+ Weights.resize(N);\n+ std::iota(Intervals.begin(), Intervals.end(), 0);\n+\n++ std::chrono::microseconds TotalTimeOfUnit(0);\n++ for (auto II : Inputs) {\n++ TotalTimeOfUnit += II->TimeOfUnit;\n++ }\n++\n+ bool VanillaSchedule = true;\n+ if (Entropic.Enabled) {\n+ for (auto II : Inputs) {\n+ if (II->NeedsEnergyUpdate && II->Energy != 0.0) {\n+ II->NeedsEnergyUpdate = false;\n+- II->UpdateEnergy(RareFeatures.size());\n++ II->UpdateEnergy(RareFeatures.size(), Entropic.ScalePerExecTime,\n++ TotalTimeOfUnit.count() / N);\n+ }\n+ }\n+\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n-index 8339697396c..0933af56804 100644\n+index bed9e84de67..c11e6a0084a 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n+++ b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp\n@@ -649,6 +649,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\n@@ -81,7 +159,23 @@ index 8339697396c..0933af56804 100644\nOptions.MutateDepth = Flags.mutate_depth;\nOptions.ReduceDepth = Flags.reduce_depth;\nOptions.UseCounters = Flags.use_counters;\n-@@ -753,7 +756,8 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\n+@@ -718,6 +721,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\n+ (size_t)Flags.entropic_feature_frequency_threshold;\n+ Options.EntropicNumberOfRarestFeatures =\n+ (size_t)Flags.entropic_number_of_rarest_features;\n++ Options.EntropicScalePerExecTime = Flags.entropic_scale_per_exec_time;\n+ if (Options.Entropic) {\n+ if (!Options.FocusFunction.empty()) {\n+ Printf(\"ERROR: The parameters `--entropic` and `--focus_function` cannot \"\n+@@ -733,6 +737,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\n+ Entropic.FeatureFrequencyThreshold =\n+ Options.EntropicFeatureFrequencyThreshold;\n+ Entropic.NumberOfRarestFeatures = Options.EntropicNumberOfRarestFeatures;\n++ Entropic.ScalePerExecTime = Options.EntropicScalePerExecTime;\n+\n+ unsigned Seed = Flags.seed;\n+ // Initialize Seed.\n+@@ -753,7 +758,8 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {\nRandom Rand(Seed);\nauto *MD = new MutationDispatcher(Rand, Options);\n@@ -92,7 +186,7 @@ index 8339697396c..0933af56804 100644\nfor (auto &U: Dictionary)\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerFlags.def b/compiler-rt/lib/fuzzer/FuzzerFlags.def\n-index 832224a705d..10b1f5f539a 100644\n+index 832224a705d..3f66242985d 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerFlags.def\n+++ b/compiler-rt/lib/fuzzer/FuzzerFlags.def\n@@ -23,7 +23,14 @@ FUZZER_FLAG_INT(len_control, 100, \"Try generating small inputs first, \"\n@@ -110,6 +204,14 @@ index 832224a705d..10b1f5f539a 100644\nFUZZER_FLAG_INT(mutate_depth, 5,\n\"Apply this number of consecutive mutations to each input.\")\nFUZZER_FLAG_INT(reduce_depth, 0, \"Experimental/internal. \"\n+@@ -161,6 +168,7 @@ FUZZER_FLAG_INT(entropic_number_of_rarest_features, 100, \"Experimental. If \"\n+ \"entropic is enabled, we keep track of the frequencies only for the \"\n+ \"Top-X least abundant features (union features that are considered as \"\n+ \"rare).\")\n++FUZZER_FLAG_INT(entropic_scale_per_exec_time, 0, \"Experimental.\")\n+\n+ FUZZER_FLAG_INT(analyze_dict, 0, \"Experimental\")\n+ FUZZER_DEPRECATED_FLAG(use_clang_coverage)\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerFork.cpp b/compiler-rt/lib/fuzzer/FuzzerFork.cpp\nindex d9e6b79443e..97e91cbe869 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerFork.cpp\n@@ -154,10 +256,18 @@ index 31096ce804b..e75807209f5 100644\nsize_t NumberOfLeakDetectionAttempts = 0;\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n-index 02db6d27b0a..28d5f32c0d6 100644\n+index 02db6d27b0a..65a02c35758 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n+++ b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp\n-@@ -478,7 +478,7 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,\n+@@ -469,6 +469,7 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,\n+ return false;\n+\n+ ExecuteCallback(Data, Size);\n++ auto TimeOfUnit = duration_cast<microseconds>(UnitStopTime - UnitStartTime);\n+\n+ UniqFeatureSetTmp.clear();\n+ size_t FoundUniqFeaturesOfII = 0;\n+@@ -478,7 +479,7 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,\nUniqFeatureSetTmp.push_back(Feature);\nif (Options.Entropic)\nCorpus.UpdateFeatureFrequency(II, Feature);\n@@ -166,24 +276,22 @@ index 02db6d27b0a..28d5f32c0d6 100644\nif (std::binary_search(II->UniqFeatureSet.begin(),\nII->UniqFeatureSet.end(), Feature))\nFoundUniqFeaturesOfII++;\n-@@ -487,11 +487,12 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,\n+@@ -487,11 +488,12 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,\n*FoundUniqFeatures = FoundUniqFeaturesOfII;\nPrintPulseAndReportSlowInput(Data, Size);\nsize_t NumNewFeatures = Corpus.NumFeatureUpdates() - NumUpdatesBefore;\n- if (NumNewFeatures) {\n+ if (NumNewFeatures || (Options.KeepSeed && IsExecutingSeedCorpora)) {\nTPC.UpdateObservedPCs();\n-- auto NewII = Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures,\n-- MayDeleteFile, TPC.ObservedFocusFunction(),\n+ auto NewII = Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures,\n+ MayDeleteFile, TPC.ObservedFocusFunction(),\n- UniqFeatureSetTmp, DFT, II);\n-+ auto NewII =\n-+ Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures, MayDeleteFile,\n-+ TPC.ObservedFocusFunction(),\n-+ IsExecutingSeedCorpora, UniqFeatureSetTmp, DFT, II);\n++ IsExecutingSeedCorpora,\n++ TimeOfUnit, UniqFeatureSetTmp, DFT, II);\nWriteFeatureSetToFile(Options.FeaturesDir, Sha1ToString(NewII->Sha1),\nNewII->UniqFeatureSet);\nreturn true;\n-@@ -664,8 +665,14 @@ void Fuzzer::MutateAndTestOne() {\n+@@ -664,8 +666,14 @@ void Fuzzer::MutateAndTestOne() {\nMD.StartMutationSequence();\nauto &II = Corpus.ChooseUnitToMutate(MD.GetRand());\n@@ -200,7 +308,7 @@ index 02db6d27b0a..28d5f32c0d6 100644\nconst auto &U = II.U;\nmemcpy(BaseSha1, II.Sha1, sizeof(BaseSha1));\nassert(CurrentUnitData);\n-@@ -764,6 +771,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\n+@@ -764,6 +772,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\nassert(CorporaFiles.front().Size <= CorporaFiles.back().Size);\n}\n@@ -209,7 +317,7 @@ index 02db6d27b0a..28d5f32c0d6 100644\n// Load and execute inputs one by one.\nfor (auto &SF : CorporaFiles) {\nauto U = FileToVector(SF.File, MaxInputLen, /*ExitOnError=*/false);\n-@@ -773,6 +782,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\n+@@ -773,6 +783,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\nTryDetectingAMemoryLeak(U.data(), U.size(),\n/*DuringInitialCorpusExecution*/ true);\n}\n@@ -218,7 +326,7 @@ index 02db6d27b0a..28d5f32c0d6 100644\n}\nPrintStats(\"INITED\");\n-@@ -785,6 +796,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\n+@@ -785,6 +797,8 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {\nCorpus.NumInputsThatTouchFocusFunction());\n}\n@@ -228,7 +336,7 @@ index 02db6d27b0a..28d5f32c0d6 100644\nPrintf(\"ERROR: no interesting inputs were found. \"\n\"Is the code instrumented for coverage? Exiting.\\n\");\ndiff --git a/compiler-rt/lib/fuzzer/FuzzerOptions.h b/compiler-rt/lib/fuzzer/FuzzerOptions.h\n-index 9d975bd61fe..bb2c14be47b 100644\n+index b75e7c7af70..9b98383b4aa 100644\n--- a/compiler-rt/lib/fuzzer/FuzzerOptions.h\n+++ b/compiler-rt/lib/fuzzer/FuzzerOptions.h\n@@ -18,6 +18,7 @@ struct FuzzingOptions {\n@@ -248,6 +356,14 @@ index 9d975bd61fe..bb2c14be47b 100644\nint MutateDepth = 5;\nbool ReduceDepth = false;\nbool UseCounters = false;\n+@@ -47,6 +50,7 @@ struct FuzzingOptions {\n+ bool Entropic = false;\n+ size_t EntropicFeatureFrequencyThreshold = 0xFF;\n+ size_t EntropicNumberOfRarestFeatures = 100;\n++ bool EntropicScalePerExecTime = false;\n+ std::string OutputCorpus;\n+ std::string ArtifactPrefix = \"./\";\n+ std::string ExactArtifactPath;\ndiff --git a/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp b/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp\nindex 0e9435ab8fc..dfc642ab6d0 100644\n--- a/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp\n"
},
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "# are still testing this feature. You should request an experiment by contacting\n# us as you normally do.\n+- experiment: 2020-08-21\n+ fuzzers:\n+ - entropic_keepseed\n+\n- experiment: 2020-08-20\nfuzzers:\n- aflplusplus\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Base entropic_keepseed variant on entropic_exectime variant (#690) |
258,388 | 25.08.2020 09:35:56 | 25,200 | 4c659ef98f942517773ddfc94c04568bd4fc95ea | Remove service users disclaimer for requesting experiments
The disclaimer is no longer true, we want people to request experiments now. | [
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "#\n# You can run \"make presubmit\" to do basic validation on this file.\n# Please add new experiment requests towards the top of this file.\n-# NOTE: Users of the FuzzBench service should not be editing this file yet, we\n-# are still testing this feature. You should request an experiment by contacting\n-# us as you normally do.\n- experiment: 2020-08-21\nfuzzers:\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Remove service users disclaimer for requesting experiments
The disclaimer is no longer true, we want people to request experiments now. |
258,388 | 25.08.2020 11:07:19 | 25,200 | 87e711666a552fec76d1743004fec83ec3647193 | [docs] Improve wording in benchmark docs | [
{
"change_type": "MODIFY",
"old_path": "docs/developing-fuzzbench/adding_a_new_benchmark.md",
"new_path": "docs/developing-fuzzbench/adding_a_new_benchmark.md",
"diff": "@@ -27,8 +27,8 @@ repo, you must create these yourself.\nYou can use most existing OSS-Fuzz projects a benchmark. First decide which project\nand fuzz target you want to use as a benchmark. Next, find out the commit at which\nyou want to use the project for the benchmark. Finally, find out the date and time\n-(UTC) of that commit in ISO format. This can be done in the (benchmark) project\n-repo as follows:\n+(UTC) of that commit in ISO format. You can get the date and time from the project\n+(benchmark) repo with this command:\n```shell\ngit --no-pager log -1 $COMMIT_HASH --format=%cd --date=iso-strict\n```\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [docs] Improve wording in benchmark docs |
258,396 | 25.08.2020 17:29:00 | 25,200 | 38e66c2cc0cec7b43bd2db8c39cedd3900b7e022 | Add (libfuzzer|entropic)_magicbytes variants for testing the effectiveness of magic byte mutation | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/fuzzers.yml",
"new_path": ".github/workflows/fuzzers.yml",
"diff": "@@ -36,7 +36,9 @@ jobs:\n- weizz\n# Temporary variants.\n- libfuzzer_keepseed\n+ - libfuzzer_magicbytes\n- entropic_keepseed\n+ - entropic_magicbytes\n- entropic_exectime\nbenchmark_type:\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_magicbytes/builder.Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+ARG parent_image\n+FROM $parent_image\n+\n+COPY patch.diff /\n+\n+RUN git clone https://github.com/llvm/llvm-project.git /llvm-project && \\\n+ cd /llvm-project && \\\n+ git checkout bb54bcf84970c04c9748004f3a4cf59b0c1832a7 && \\\n+ patch -p1 < /patch.diff && \\\n+ cd /llvm-project/compiler-rt/lib/fuzzer && \\\n+ (for f in *.cpp; do \\\n+ clang++ -stdlib=libc++ -fPIC -O2 -std=c++11 $f -c & \\\n+ done && wait) && \\\n+ ar r /libEntropic.a *.o\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_magicbytes/fuzzer.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Integration code for libFuzzer fuzzer.\"\"\"\n+\n+from fuzzers.entropic import fuzzer as entropic_fuzzer\n+from fuzzers.libfuzzer import fuzzer as libfuzzer_fuzzer\n+\n+\n+def build():\n+ \"\"\"Build benchmark.\"\"\"\n+ entropic_fuzzer.build()\n+\n+\n+def fuzz(input_corpus, output_corpus, target_binary):\n+ \"\"\"Run fuzzer.\"\"\"\n+ libfuzzer_fuzzer.run_fuzzer(input_corpus,\n+ output_corpus,\n+ target_binary,\n+ extra_flags=[\n+ '-entropic=1',\n+ ])\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_magicbytes/patch.diff",
"diff": "+commit c4ecc3388200ea614476cece3a7dde6f0c1a7ca8\n+Author: Dokyung Song <[email protected]>\n+Date: Fri Aug 21 00:54:14 2020 +0000\n+\n+ [libFuzzer] Extend ChangeBinaryInteger mutator to support overwriting selected input with predefined integers.\n+\n+ (Experimental - Uploading this to get early feedback before a large-scale experiment.)\n+\n+ This patch extends the ChangeBinaryInteger mutator to support overwriting the\n+ selected input with predefined integers. The rationale for this heuristic is\n+ that certain byte (word, qword, or qword) overwrite at a specific location (with\n+ \"magic\" integers) in a large input may make an invalid input valid, potentially\n+ triggering new neighbor code paths.\n+\n+ Currently, triggering such an overwrite is costly in libFuzzer.\n+ ChangeBinaryInteger mutator may do the same, but only with a low probability,\n+ because the chosen byte (word, dword, or qword) must already be an integer\n+ ranging from -10 to 10.\n+\n+ CopyPart/CrossOver mutator may also effectively do the same, but only if these\n+ predefined integers are found in any of the corpus inputs; even if the corpus\n+ inputs do contain the predefined integers, the chances are much narrower because\n+ a specific location and a specific width have to be selected.\n+\n+ InsertRepeatedBytes combined with EraseBytes mutators (or other combinations of\n+ existing mutators) may eventually trigger the desired change, but still the\n+ probability is low, as the probabilities of different mutators multiply.\n+\n+ This patch allows to find the desired input in a single mutation (as tested by\n+ the accompanying test - overwrite-bytes.test), effectively increasing the\n+ probability of finding the desired input given a corpus input.\n+\n+ Differential Revision: https://reviews.llvm.org/D86358\n+\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n+index 29541eac5dc..75527c95aca 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n++++ b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n+@@ -379,6 +379,67 @@ size_t MutationDispatcher::Mutate_ChangeASCIIInteger(uint8_t *Data, size_t Size,\n+ return Size;\n+ }\n+\n++#define INTERESTING_8 \\\n++ -128, /* Overflow signed 8-bit when decremented */ \\\n++ -1, /* */ \\\n++ 0, /* */ \\\n++ 1, /* */ \\\n++ 16, /* One-off with common buffer size */ \\\n++ 32, /* One-off with common buffer size */ \\\n++ 64, /* One-off with common buffer size */ \\\n++ 100, /* One-off with common buffer size */ \\\n++ 127 /* Overflow signed 8-bit when incremented */\n++\n++#define INTERESTING_16 \\\n++ -32768, /* Overflow signed 16-bit when decremented */ \\\n++ -129, /* Overflow signed 8-bit */ \\\n++ 128, /* Overflow signed 8-bit */ \\\n++ 255, /* Overflow unsig 8-bit when incremented */ \\\n++ 256, /* Overflow unsig 8-bit */ \\\n++ 512, /* One-off with common buffer size */ \\\n++ 1000, /* One-off with common buffer size */ \\\n++ 1024, /* One-off with common buffer size */ \\\n++ 4096, /* One-off with common buffer size */ \\\n++ 32767 /* Overflow signed 16-bit when incremented */\n++\n++#define INTERESTING_32 \\\n++ -2147483648LL, /* Overflow signed 32-bit when decremented */ \\\n++ -100663046, /* Large negative number (endian-agnostic) */ \\\n++ -32769, /* Overflow signed 16-bit */ \\\n++ 32768, /* Overflow signed 16-bit */ \\\n++ 65535, /* Overflow unsig 16-bit when incremented */ \\\n++ 65536, /* Overflow unsig 16 bit */ \\\n++ 100663045, /* Large positive number (endian-agnostic) */ \\\n++ 2147483647 /* Overflow signed 32-bit when incremented */\n++\n++template <class T> class MagicInt8 {\n++public:\n++ static constexpr T Values[] = {INTERESTING_8};\n++};\n++\n++template <class T> class MagicInt16 {\n++public:\n++ static constexpr T Values[] = {INTERESTING_8, INTERESTING_16};\n++};\n++\n++template <class T> class MagicInt32 {\n++public:\n++ static constexpr T Values[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32};\n++};\n++\n++// Definitions\n++template <class T> constexpr T MagicInt8<T>::Values[];\n++template <class T> constexpr T MagicInt16<T>::Values[];\n++template <class T> constexpr T MagicInt32<T>::Values[];\n++\n++template <class T>\n++using MagicInt = typename std::conditional<\n++ sizeof(T) == 1, MagicInt8<int8_t>,\n++ typename std::conditional<\n++ sizeof(T) == 2, MagicInt16<int16_t>,\n++ typename std::conditional<sizeof(T) == 4, MagicInt32<int32_t>,\n++ MagicInt32<int64_t>>::type>::type>::type;\n++\n+ template<class T>\n+ size_t ChangeBinaryInteger(uint8_t *Data, size_t Size, Random &Rand) {\n+ if (Size < sizeof(T)) return 0;\n+@@ -389,6 +450,13 @@ size_t ChangeBinaryInteger(uint8_t *Data, size_t Size, Random &Rand) {\n+ Val = Size;\n+ if (Rand.RandBool())\n+ Val = Bswap(Val);\n++ } else if (Rand.RandBool()) {\n++ auto SignedVal =\n++ MagicInt<T>::Values[Rand(sizeof(MagicInt<T>::Values) / sizeof(T))];\n++ memcpy(&Val, &SignedVal, sizeof(SignedVal));\n++ if (Rand.RandBool()) {\n++ Val = Bswap(Val);\n++ }\n+ } else {\n+ memcpy(&Val, Data + Off, sizeof(Val));\n+ T Add = Rand(21);\n+diff --git a/compiler-rt/test/fuzzer/OverwriteBytesMain.cpp b/compiler-rt/test/fuzzer/OverwriteBytesMain.cpp\n+new file mode 100644\n+index 00000000000..0c5f392cde2\n+--- /dev/null\n++++ b/compiler-rt/test/fuzzer/OverwriteBytesMain.cpp\n+@@ -0,0 +1,9 @@\n++#include <cstdint>\n++#include <cstdio>\n++\n++#include \"OverwriteBytesTest.h\"\n++\n++int main(int argc, char **argv) {\n++ fwrite(SeedInput, sizeof(SeedInput[0]), sizeof(SeedInput), stdout);\n++ return 0;\n++}\n+diff --git a/compiler-rt/test/fuzzer/OverwriteBytesTest.cpp b/compiler-rt/test/fuzzer/OverwriteBytesTest.cpp\n+new file mode 100644\n+index 00000000000..377a8797b70\n+--- /dev/null\n++++ b/compiler-rt/test/fuzzer/OverwriteBytesTest.cpp\n+@@ -0,0 +1,33 @@\n++// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n++// See https://llvm.org/LICENSE.txt for license information.\n++// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n++\n++#include <cassert>\n++#include <cstdint>\n++#include <cstdio>\n++#include <cstdlib>\n++#include <cstring>\n++\n++#include <algorithm>\n++#include <vector>\n++\n++#include \"OverwriteBytesTest.h\"\n++\n++#define MAGIC_BYTE_VALUE 0x1\n++#define MAGIC_BYTE_OFFSET 0xf\n++\n++static volatile int *Nil = nullptr;\n++\n++extern \"C\" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {\n++ if (Size != sizeof(SeedInput)) {\n++ return 0;\n++ }\n++\n++ *(uint64_t *)(SeedInput + MAGIC_BYTE_OFFSET) = MAGIC_BYTE_VALUE;\n++\n++ if (memmem(Data, Size, SeedInput, Size) == Data) {\n++ *Nil = 42; // crash.\n++ }\n++\n++ return 0;\n++}\n+diff --git a/compiler-rt/test/fuzzer/OverwriteBytesTest.h b/compiler-rt/test/fuzzer/OverwriteBytesTest.h\n+new file mode 100644\n+index 00000000000..def5705a435\n+--- /dev/null\n++++ b/compiler-rt/test/fuzzer/OverwriteBytesTest.h\n+@@ -0,0 +1,38 @@\n++uint8_t SeedInput[] = {\n++ 0xba,\n++ 0xe3,\n++ 0x92,\n++ 0x7c,\n++ 0x80,\n++ 0x86,\n++ 0x73,\n++ 0x0f,\n++ 0xf2,\n++ 0x83,\n++ 0x23,\n++ 0x0f,\n++ 0xf5,\n++ 0x17,\n++ 0x4c,\n++ 0x08,\n++ 0xf2,\n++ 0x83,\n++ 0x23,\n++ 0x0f,\n++ 0xd8,\n++ 0x71,\n++ 0x58,\n++ 0x1c,\n++ 0xb9,\n++ 0x8d,\n++ 0xf1,\n++ 0x0e,\n++ 0x80,\n++ 0x86,\n++ 0x73,\n++ 0x0f,\n++ 0xf0,\n++ 0x83,\n++ 0x23,\n++ 0x0f,\n++};\n+diff --git a/compiler-rt/test/fuzzer/overwrite-bytes.test b/compiler-rt/test/fuzzer/overwrite-bytes.test\n+new file mode 100644\n+index 00000000000..1383ff54492\n+--- /dev/null\n++++ b/compiler-rt/test/fuzzer/overwrite-bytes.test\n+@@ -0,0 +1,9 @@\n++REQUIRES: linux, x86_64\n++RUN: %cpp_compiler %S/OverwriteBytesTest.cpp -o %t-OverwriteBytesTest\n++RUN: %cpp_compiler -fno-sanitize=fuzzer %S/OverwriteBytesMain.cpp -o %t-OverwriteBytesPrintSeed\n++RUN: %t-OverwriteBytesPrintSeed > %t-OverwriteBytesTest.seed\n++\n++RUN: not %run %t-OverwriteBytesTest -seed=1 -use_memmem=0 -mutate_depth=1 -reduce_inputs=0 -runs=10000000 -seed_inputs=%t-OverwriteBytesTest.seed 2>&1 | FileCheck %s\n++\n++CHECK: ABORTING\n++CHECK-NEXT: MS: 1 ChangeBinInt-;\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/entropic_magicbytes/runner.Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+FROM gcr.io/fuzzbench/base-runner\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/libfuzzer_magicbytes/builder.Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+ARG parent_image\n+FROM $parent_image\n+\n+COPY patch.diff /\n+\n+RUN git clone https://github.com/llvm/llvm-project.git /llvm-project && \\\n+ cd /llvm-project/ && \\\n+ git checkout bb54bcf84970c04c9748004f3a4cf59b0c1832a7 && \\\n+ patch -p1 < /patch.diff && \\\n+ cd compiler-rt/lib/fuzzer && \\\n+ (for f in *.cpp; do \\\n+ clang++ -stdlib=libc++ -fPIC -O2 -std=c++11 $f -c & \\\n+ done && wait) && \\\n+ ar r /usr/lib/libFuzzer.a *.o\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/libfuzzer_magicbytes/fuzzer.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Integration code for libFuzzer fuzzer.\"\"\"\n+\n+from fuzzers.libfuzzer import fuzzer as libfuzzer_fuzzer\n+\n+\n+def build():\n+ \"\"\"Build benchmark.\"\"\"\n+ libfuzzer_fuzzer.build()\n+\n+\n+def fuzz(input_corpus, output_corpus, target_binary):\n+ \"\"\"Run fuzzer.\"\"\"\n+ libfuzzer_fuzzer.run_fuzzer(input_corpus, output_corpus, target_binary)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/libfuzzer_magicbytes/patch.diff",
"diff": "+commit c4ecc3388200ea614476cece3a7dde6f0c1a7ca8\n+Author: Dokyung Song <[email protected]>\n+Date: Fri Aug 21 00:54:14 2020 +0000\n+\n+ [libFuzzer] Extend ChangeBinaryInteger mutator to support overwriting selected input with predefined integers.\n+\n+ (Experimental - Uploading this to get early feedback before a large-scale experiment.)\n+\n+ This patch extends the ChangeBinaryInteger mutator to support overwriting the\n+ selected input with predefined integers. The rationale for this heuristic is\n+ that certain byte (word, qword, or qword) overwrite at a specific location (with\n+ \"magic\" integers) in a large input may make an invalid input valid, potentially\n+ triggering new neighbor code paths.\n+\n+ Currently, triggering such an overwrite is costly in libFuzzer.\n+ ChangeBinaryInteger mutator may do the same, but only with a low probability,\n+ because the chosen byte (word, dword, or qword) must already be an integer\n+ ranging from -10 to 10.\n+\n+ CopyPart/CrossOver mutator may also effectively do the same, but only if these\n+ predefined integers are found in any of the corpus inputs; even if the corpus\n+ inputs do contain the predefined integers, the chances are much narrower because\n+ a specific location and a specific width have to be selected.\n+\n+ InsertRepeatedBytes combined with EraseBytes mutators (or other combinations of\n+ existing mutators) may eventually trigger the desired change, but still the\n+ probability is low, as the probabilities of different mutators multiply.\n+\n+ This patch allows to find the desired input in a single mutation (as tested by\n+ the accompanying test - overwrite-bytes.test), effectively increasing the\n+ probability of finding the desired input given a corpus input.\n+\n+ Differential Revision: https://reviews.llvm.org/D86358\n+\n+diff --git a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n+index 29541eac5dc..75527c95aca 100644\n+--- a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n++++ b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp\n+@@ -379,6 +379,67 @@ size_t MutationDispatcher::Mutate_ChangeASCIIInteger(uint8_t *Data, size_t Size,\n+ return Size;\n+ }\n+\n++#define INTERESTING_8 \\\n++ -128, /* Overflow signed 8-bit when decremented */ \\\n++ -1, /* */ \\\n++ 0, /* */ \\\n++ 1, /* */ \\\n++ 16, /* One-off with common buffer size */ \\\n++ 32, /* One-off with common buffer size */ \\\n++ 64, /* One-off with common buffer size */ \\\n++ 100, /* One-off with common buffer size */ \\\n++ 127 /* Overflow signed 8-bit when incremented */\n++\n++#define INTERESTING_16 \\\n++ -32768, /* Overflow signed 16-bit when decremented */ \\\n++ -129, /* Overflow signed 8-bit */ \\\n++ 128, /* Overflow signed 8-bit */ \\\n++ 255, /* Overflow unsig 8-bit when incremented */ \\\n++ 256, /* Overflow unsig 8-bit */ \\\n++ 512, /* One-off with common buffer size */ \\\n++ 1000, /* One-off with common buffer size */ \\\n++ 1024, /* One-off with common buffer size */ \\\n++ 4096, /* One-off with common buffer size */ \\\n++ 32767 /* Overflow signed 16-bit when incremented */\n++\n++#define INTERESTING_32 \\\n++ -2147483648LL, /* Overflow signed 32-bit when decremented */ \\\n++ -100663046, /* Large negative number (endian-agnostic) */ \\\n++ -32769, /* Overflow signed 16-bit */ \\\n++ 32768, /* Overflow signed 16-bit */ \\\n++ 65535, /* Overflow unsig 16-bit when incremented */ \\\n++ 65536, /* Overflow unsig 16 bit */ \\\n++ 100663045, /* Large positive number (endian-agnostic) */ \\\n++ 2147483647 /* Overflow signed 32-bit when incremented */\n++\n++template <class T> class MagicInt8 {\n++public:\n++ static constexpr T Values[] = {INTERESTING_8};\n++};\n++\n++template <class T> class MagicInt16 {\n++public:\n++ static constexpr T Values[] = {INTERESTING_8, INTERESTING_16};\n++};\n++\n++template <class T> class MagicInt32 {\n++public:\n++ static constexpr T Values[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32};\n++};\n++\n++// Definitions\n++template <class T> constexpr T MagicInt8<T>::Values[];\n++template <class T> constexpr T MagicInt16<T>::Values[];\n++template <class T> constexpr T MagicInt32<T>::Values[];\n++\n++template <class T>\n++using MagicInt = typename std::conditional<\n++ sizeof(T) == 1, MagicInt8<int8_t>,\n++ typename std::conditional<\n++ sizeof(T) == 2, MagicInt16<int16_t>,\n++ typename std::conditional<sizeof(T) == 4, MagicInt32<int32_t>,\n++ MagicInt32<int64_t>>::type>::type>::type;\n++\n+ template<class T>\n+ size_t ChangeBinaryInteger(uint8_t *Data, size_t Size, Random &Rand) {\n+ if (Size < sizeof(T)) return 0;\n+@@ -389,6 +450,13 @@ size_t ChangeBinaryInteger(uint8_t *Data, size_t Size, Random &Rand) {\n+ Val = Size;\n+ if (Rand.RandBool())\n+ Val = Bswap(Val);\n++ } else if (Rand.RandBool()) {\n++ auto SignedVal =\n++ MagicInt<T>::Values[Rand(sizeof(MagicInt<T>::Values) / sizeof(T))];\n++ memcpy(&Val, &SignedVal, sizeof(SignedVal));\n++ if (Rand.RandBool()) {\n++ Val = Bswap(Val);\n++ }\n+ } else {\n+ memcpy(&Val, Data + Off, sizeof(Val));\n+ T Add = Rand(21);\n+diff --git a/compiler-rt/test/fuzzer/OverwriteBytesMain.cpp b/compiler-rt/test/fuzzer/OverwriteBytesMain.cpp\n+new file mode 100644\n+index 00000000000..0c5f392cde2\n+--- /dev/null\n++++ b/compiler-rt/test/fuzzer/OverwriteBytesMain.cpp\n+@@ -0,0 +1,9 @@\n++#include <cstdint>\n++#include <cstdio>\n++\n++#include \"OverwriteBytesTest.h\"\n++\n++int main(int argc, char **argv) {\n++ fwrite(SeedInput, sizeof(SeedInput[0]), sizeof(SeedInput), stdout);\n++ return 0;\n++}\n+diff --git a/compiler-rt/test/fuzzer/OverwriteBytesTest.cpp b/compiler-rt/test/fuzzer/OverwriteBytesTest.cpp\n+new file mode 100644\n+index 00000000000..377a8797b70\n+--- /dev/null\n++++ b/compiler-rt/test/fuzzer/OverwriteBytesTest.cpp\n+@@ -0,0 +1,33 @@\n++// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n++// See https://llvm.org/LICENSE.txt for license information.\n++// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n++\n++#include <cassert>\n++#include <cstdint>\n++#include <cstdio>\n++#include <cstdlib>\n++#include <cstring>\n++\n++#include <algorithm>\n++#include <vector>\n++\n++#include \"OverwriteBytesTest.h\"\n++\n++#define MAGIC_BYTE_VALUE 0x1\n++#define MAGIC_BYTE_OFFSET 0xf\n++\n++static volatile int *Nil = nullptr;\n++\n++extern \"C\" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {\n++ if (Size != sizeof(SeedInput)) {\n++ return 0;\n++ }\n++\n++ *(uint64_t *)(SeedInput + MAGIC_BYTE_OFFSET) = MAGIC_BYTE_VALUE;\n++\n++ if (memmem(Data, Size, SeedInput, Size) == Data) {\n++ *Nil = 42; // crash.\n++ }\n++\n++ return 0;\n++}\n+diff --git a/compiler-rt/test/fuzzer/OverwriteBytesTest.h b/compiler-rt/test/fuzzer/OverwriteBytesTest.h\n+new file mode 100644\n+index 00000000000..def5705a435\n+--- /dev/null\n++++ b/compiler-rt/test/fuzzer/OverwriteBytesTest.h\n+@@ -0,0 +1,38 @@\n++uint8_t SeedInput[] = {\n++ 0xba,\n++ 0xe3,\n++ 0x92,\n++ 0x7c,\n++ 0x80,\n++ 0x86,\n++ 0x73,\n++ 0x0f,\n++ 0xf2,\n++ 0x83,\n++ 0x23,\n++ 0x0f,\n++ 0xf5,\n++ 0x17,\n++ 0x4c,\n++ 0x08,\n++ 0xf2,\n++ 0x83,\n++ 0x23,\n++ 0x0f,\n++ 0xd8,\n++ 0x71,\n++ 0x58,\n++ 0x1c,\n++ 0xb9,\n++ 0x8d,\n++ 0xf1,\n++ 0x0e,\n++ 0x80,\n++ 0x86,\n++ 0x73,\n++ 0x0f,\n++ 0xf0,\n++ 0x83,\n++ 0x23,\n++ 0x0f,\n++};\n+diff --git a/compiler-rt/test/fuzzer/overwrite-bytes.test b/compiler-rt/test/fuzzer/overwrite-bytes.test\n+new file mode 100644\n+index 00000000000..1383ff54492\n+--- /dev/null\n++++ b/compiler-rt/test/fuzzer/overwrite-bytes.test\n+@@ -0,0 +1,9 @@\n++REQUIRES: linux, x86_64\n++RUN: %cpp_compiler %S/OverwriteBytesTest.cpp -o %t-OverwriteBytesTest\n++RUN: %cpp_compiler -fno-sanitize=fuzzer %S/OverwriteBytesMain.cpp -o %t-OverwriteBytesPrintSeed\n++RUN: %t-OverwriteBytesPrintSeed > %t-OverwriteBytesTest.seed\n++\n++RUN: not %run %t-OverwriteBytesTest -seed=1 -use_memmem=0 -mutate_depth=1 -reduce_inputs=0 -runs=10000000 -seed_inputs=%t-OverwriteBytesTest.seed 2>&1 | FileCheck %s\n++\n++CHECK: ABORTING\n++CHECK-NEXT: MS: 1 ChangeBinInt-;\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/libfuzzer_magicbytes/runner.Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+FROM gcr.io/fuzzbench/base-runner\n"
},
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "# You can run \"make presubmit\" to do basic validation on this file.\n# Please add new experiment requests towards the top of this file.\n+- experiment: 2020-08-25\n+ fuzzers:\n+ - libfuzzer_magicbytes\n+ - entropic_magicbytes\n+\n- experiment: 2020-08-21\nfuzzers:\n- entropic_keepseed\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Add (libfuzzer|entropic)_magicbytes variants for testing the effectiveness of magic byte mutation (#693) |
258,388 | 27.08.2020 12:14:30 | 25,200 | 729585596d74405758e87d7c41b72c36067f784e | Improve fuzzer and benchmark validation
* [presubmit] Fix validation for benchmarks and fuzzer names.
GCB apparently does not like capital letters.
* Seperate name validation into it's own function.
* improve validation
* nits and remove dead code | [
{
"change_type": "MODIFY",
"old_path": "common/benchmark_utils.py",
"new_path": "common/benchmark_utils.py",
"diff": "import os\nimport re\n+import yaml\n+\nfrom common import environment\nfrom common import logs\nfrom common import benchmark_config\nfrom common import utils\n-VALID_BENCHMARK_REGEX = re.compile(r'^[A-Za-z0-9\\._\\-]+$')\n+# Must be valid in a docker tag.\n+VALID_BENCHMARK_REGEX = re.compile(r'^[a-z0-9\\._\\-]+$')\nBENCHMARKS_DIR = os.path.join(utils.ROOT_DIR, 'benchmarks')\n-def get_project(benchmark):\n- \"\"\"Returns the OSS-Fuzz project of |benchmark| if it is based on an\n- OSS-Fuzz project, otherwise raises ValueError.\"\"\"\n- return benchmark_config.get_config(benchmark)['project']\n-\n-\ndef get_fuzz_target(benchmark):\n\"\"\"Returns the fuzz target of |benchmark|\"\"\"\nreturn benchmark_config.get_config(benchmark)['fuzz_target']\n@@ -53,17 +50,37 @@ def get_builder_image_url(benchmark, fuzzer, docker_registry):\ndocker_registry=docker_registry, fuzzer=fuzzer, benchmark=benchmark)\n-def validate(benchmark):\n- \"\"\"Return True if |benchmark| is a valid fuzzbench fuzzer.\"\"\"\n+def validate_name(benchmark):\n+ \"\"\"Returns True if |benchmark| is a valid fuzzbench benchmark name.\"\"\"\nif VALID_BENCHMARK_REGEX.match(benchmark) is None:\nlogs.error('%s does not conform to %s pattern.', benchmark,\nVALID_BENCHMARK_REGEX.pattern)\nreturn False\n- if benchmark in get_all_benchmarks():\nreturn True\n+\n+\n+def validate(benchmark):\n+ \"\"\"Returns True if |benchmark| is a valid fuzzbench benchmark.\"\"\"\n+ if not validate_name(benchmark):\n+ return False\n+\n+ if benchmark not in get_all_benchmarks():\nlogs.error('%s must have a benchmark.yaml.', benchmark)\nreturn False\n+ try:\n+ get_fuzz_target(benchmark)\n+ except yaml.parser.ParserError:\n+ logs.error('%s must have a valid benchmark.yaml file. Failed to parse.',\n+ benchmark)\n+ return False\n+ except KeyError:\n+ logs.error('%s\\'s benchmark.yaml does not define \"fuzz_target\".',\n+ benchmark)\n+ return False\n+\n+ return True\n+\ndef get_all_benchmarks():\n\"\"\"Returns the list of all benchmarks.\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "common/fuzzer_utils.py",
"new_path": "common/fuzzer_utils.py",
"diff": "@@ -23,7 +23,10 @@ from common import utils\nDEFAULT_FUZZ_TARGET_NAME = 'fuzz-target'\nFUZZ_TARGET_SEARCH_STRING = b'LLVMFuzzerTestOneInput'\n-VALID_FUZZER_REGEX = re.compile(r'^[A-Za-z0-9_]+$')\n+\n+# Must be a valid python module and docker tag.\n+VALID_FUZZER_REGEX = re.compile(r'^[a-z0-9_]+$')\n+\nFUZZERS_DIR = os.path.join(utils.ROOT_DIR, 'fuzzers')\nCOVERAGE_TOOLS = {'coverage', 'coverage_source_based'}\n@@ -95,16 +98,24 @@ def get_fuzz_target_binary(search_directory: str,\nreturn None\n-def validate(fuzzer):\n- \"\"\"Return True if |fuzzer| is a valid fuzzbench fuzzer.\"\"\"\n+def validate_name(fuzzer):\n+ \"\"\"Return True if |fuzzer| is a valid fuzzbench fuzzer name.\"\"\"\n# Although importing probably allows a subset of what the regex allows, use\n# the regex anyway to be safe. The regex is enforcing that the fuzzer is a\n# valid path for GCS or a linux system.\nif VALID_FUZZER_REGEX.match(fuzzer) is None:\n- logs.error('%s does not conform to %s pattern.', fuzzer,\n+ logs.error('Fuzzer: %s does not conform to pattern: %s.', fuzzer,\nVALID_FUZZER_REGEX.pattern)\nreturn False\n+ return True\n+\n+\n+def validate(fuzzer):\n+ \"\"\"Return True if |fuzzer| is a valid fuzzbench fuzzer.\"\"\"\n+ if not validate_name(fuzzer):\n+ return False\n+\n# Try importing the fuzzer module.\nmodule_name = 'fuzzers.{}.fuzzer'.format(fuzzer)\ntry:\n"
},
{
"change_type": "MODIFY",
"old_path": "common/test_benchmark_utils.py",
"new_path": "common/test_benchmark_utils.py",
"diff": "@@ -40,3 +40,19 @@ def test_get_runner_image_url(benchmark, expected_url, oss_fuzz_benchmark):\nassert benchmark_utils.get_runner_image_url('experiment', benchmark,\n'fuzzer',\nDOCKER_REGISTRY) == expected_url\n+\n+\[email protected](('benchmark_name',), [\n+ ('libPNG',),\n+ ('libpng!',),\n+])\n+def test_validate_name_invalid(benchmark_name):\n+ \"\"\"Tests that validate_name returns False for am invalid benchmark name.\"\"\"\n+ assert not benchmark_utils.validate_name(benchmark_name)\n+\n+\[email protected](('benchmark_name',), [('libpng',), ('libpng_1',),\n+ ('libpng-1',), ('libpng.1',)])\n+def test_validate_name_valid(benchmark_name):\n+ \"\"\"Tests that validate_name returns True for a valid benchmark name.\"\"\"\n+ assert benchmark_utils.validate_name(benchmark_name)\n"
},
{
"change_type": "MODIFY",
"old_path": "common/test_fuzzer_utils.py",
"new_path": "common/test_fuzzer_utils.py",
"diff": "# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for fuzzer_utils.py.\"\"\"\n+import pytest\n+\nfrom common import fuzzer_utils\n# pylint: disable=invalid-name,unused-argument\n@@ -53,3 +55,16 @@ def test_found_fuzzer_containing_string_without_fuzzer_name_arg(fs, environ):\nfs.create_file('/out/custom-target', contents='\\n\\nLLVMFuzzerTestOneInput')\nassert fuzzer_utils.get_fuzz_target_binary('/out',\nNone) == ('/out/custom-target')\n+\n+\[email protected](('fuzzer_name',), [('afl++',), ('mopt-afl',),\n+ ('mOptAFL',), ('AFL',)])\n+def test_validate_name_invalid(fuzzer_name):\n+ \"\"\"Tests that validate_name returns False for an invalid fuzzer name.\"\"\"\n+ assert not fuzzer_utils.validate_name(fuzzer_name)\n+\n+\[email protected](('fuzzer_name',), [('afl',), ('mopt_afl',), ('afl2',)])\n+def test_validate_name_valid(fuzzer_name):\n+ \"\"\"Tests that validate_name returns True for a valid fuzzer name.\"\"\"\n+ assert fuzzer_utils.validate_name(fuzzer_name)\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Improve fuzzer and benchmark validation (#698)
* [presubmit] Fix validation for benchmarks and fuzzer names.
GCB apparently does not like capital letters.
* Seperate name validation into it's own function.
* improve validation
* nits and remove dead code |
258,399 | 17.09.2020 15:54:18 | 18,000 | 1b20111481e7f5c779f33a9183b996b2c05b4119 | [doc] local running | [
{
"change_type": "MODIFY",
"old_path": "docs/getting-started/prerequisites.md",
"new_path": "docs/getting-started/prerequisites.md",
"diff": "@@ -90,3 +90,11 @@ You can format your changes using the following command:\n```bash\nmake format\n```\n+\n+### Local Support\n+\n+If you want to run FuzzBench [locally]({{ site.baseurl }}/running-a-local-experiment/#Running a local experiment)\n+on your own machine or servers, it needs `rsync` installed:\n+```bash\n+sudo apt-get install rsync\n+```\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/running-a-local-experiment/running_a_local_experiment.md",
"diff": "+---\n+layout: default\n+title: Running a local experiment\n+nav_order: 5\n+permalink: /running-a-local-experiment\n+---\n+\n+# Running a local experiment\n+\n+This page explains how to run a local [experiment]({{ site.baseurl }}/reference/glossary/#Experiment) on\n+your own.\n+\n+- TOC\n+{:toc}\n+\n+This page will walk you through on how to use `run_experiment.py` to start\n+a local experiment. The `run_experiment.py` script will\n+create and run a dispatcher docker container which runs the experiment,\n+including:\n+1. Building desired fuzzer-benchmark combinations.\n+1. Starting instances to run fuzzing trials with the fuzzer-benchmark\n+ builds and stopping them when they are done.\n+1. Measuring the coverage from these trials.\n+1. Generating reports based on these measurements.\n+\n+The rest of this page will assume all commands are run from the root of\n+FuzzBench checkout.\n+\n+**NOTE**: Currently, there is no resource control in experiment trials (e.g. allocated cpus, memory),\n+but we do plan to add it in the near future.\n+\n+# run_experiment.py\n+\n+## Experiment configuration file\n+\n+You need to create an experiment configuration yaml file.\n+This file contains the configuration parameters for experiments that do not\n+change very often.\n+Below is an example configuration file with explanations of each required\n+parameter.\n+\n+```yaml\n+# The number of trials of a fuzzer-benchmark pair.\n+trials: 5\n+\n+# The amount of time in seconds that each trial is run for.\n+# 1 day = 24 * 60 * 60 = 86400\n+max_total_time: 86400\n+\n+# The location of your docker registry.\n+docker_registry: lab-server:5000\n+\n+# The local experiment folder that will store most of the experiment data.\n+# Please use an absolute path.\n+experiment_filestore: /tmp/experiment-data\n+\n+# The local report folder where HTML reports and summary data will be stored.\n+# Please use an absolute path.\n+report_filestore: /tmp/report-data\n+```\n+\n+## Benchmarks\n+\n+Pick the benchmarks you want to use from the `benchmarks/` directory.\n+\n+For example: `freetype2-2017` and `bloaty_fuzz_target`.\n+\n+## Fuzzers\n+\n+Pick the fuzzers you want to use from the `fuzzers/` directory.\n+For example: `libfuzzer` and `afl`.\n+\n+## Executing run_experiment.py\n+\n+Now that everything is ready, execute `run_experiment.py`:\n+\n+```bash\n+PYTHONPATH=. python3 experiment/run_experiment.py \\\n+--experiment-config experiment-config.yaml \\\n+--benchmarks freetype2-2017 bloaty_fuzz_target \\\n+--experiment-name $EXPERIMENT_NAME \\\n+--fuzzers afl libfuzzer\n+```\n+\n+where `$EXPERIMENT_NAME` is the name you want to give the experiment.\n+\n+## Viewing reports\n+\n+You should eventually be able to see reports from your experiment, that are\n+update at some interval throughout the experiment. However, you may have to wait\n+a while until they first appear since a lot must happen before there is data to\n+generate report. Once they are available, you should be able to view them at:\n+`/tmp/report-data/$EXPERIMENT_NAME/index.html`.\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [doc] local running (#459) |
258,388 | 25.09.2020 15:28:07 | 25,200 | 08b0bbd86404770fb90a8ddd8f58227367d0383e | Add details on what we want to test privately. | [
{
"change_type": "MODIFY",
"old_path": "docker/generate_makefile.py",
"new_path": "docker/generate_makefile.py",
"diff": "@@ -137,6 +137,12 @@ def main():\n])\nprint('build-{fuzzer}-all: {all_targets}'.format(\nfuzzer=fuzzer, all_targets=all_build_targets))\n+ all_test_run_targets = ' '.join([\n+ 'test-run-{0}-{1}'.format(fuzzer, benchmark)\n+ for benchmark in benchmarks\n+ ])\n+ print('test-run-{fuzzer}-all: {all_targets}'.format(\n+ fuzzer=fuzzer, all_targets=all_test_run_targets))\n# Print all targets build target.\nall_build_targets = ' '.join(\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/faq.md",
"new_path": "docs/faq.md",
"diff": "@@ -75,13 +75,28 @@ to misunderstand configuration details that can have an impact on the results.\nIf you can, please reach out to the authors to confirm your configuration looks\ngood to them.\n-## I'd like to get my fuzzer evaluated on the free FuzzBench service, but I don't want the results to be public yet.\n-\n-Please reach out to us at [email protected].\n-\n-## I'm working on a new tool, but I'm not ready to make my code public yet. Can I use the FuzzBench service?\n+## I'd like to get my fuzzer evaluated, but I don't want the results and/or code to be public yet. Can I use the FuzzBench service?\n+\n+Probably yes. We run private experiments for this purpose.\n+Please reach out to us at [email protected]. If we agree to benchmark your\n+fuzzer, please follow the guide on\n+[adding a new fuzzer]({{ site.baseurl }}/getting-started/adding-a-new-fuzzer/)\n+on how to integrate your fuzzer with FuzzBench.\n+\n+You can ignore the sections on [Requesting an experiment]({{ site.baseurl }}/getting-started/adding-a-new-fuzzer/#requesting-an-experiment) and\n+[Submitting your integration]({{ site.baseurl }}/getting-started/adding-a-new-fuzzer/#submitting-your-integration).\n+Please test your fuzzer works with our benchmarks, we don't have CI to verify\n+this for private experiments.\n+Ideally, you should test all benchmarks using `make -j test-run-$FUZZER-all`.\n+This takes too long on most machines so you should at least test a few of them:\n+```\n+make test-run-$FUZZER-zlib_zlib_uncompress_fuzzer test-run-$FUZZER-libpng-1.2.56\n+```\n-Yes, please reach out to us at [email protected].\n+You should also run `make presubmit` to validate the fuzzer's name and\n+integration code.\n+When your fuzzer is ready, send us a patch file that applies cleanly to\n+FuzzBench with `git apply <patch_file>`.\n## How can you prevent researchers from optimizing their tools only for these benchmarks?\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Add details on what we want to test privately. (#321) |
258,388 | 13.10.2020 10:04:56 | 25,200 | 1447ee4ddeae15c670d42041674883f743b738f7 | [CI] Upgrade python environment action to silence warnings
This upgrades the versions of github actions we use to setup the python environment.
Using the old action results in these warnings:
More details here: | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/fuzzers.yml",
"new_path": ".github/workflows/fuzzers.yml",
"diff": "@@ -60,10 +60,6 @@ jobs:\n- oss-fuzz\n- standard\n- env:\n- FUZZER: ${{ matrix.fuzzer }}\n- BENCHMARK_TYPE: ${{ matrix.benchmark_type }}\n-\nsteps:\n- uses: actions/checkout@v2\n- run: | # Needed for git diff to work.\n@@ -79,7 +75,7 @@ jobs:\ndf -h\n- name: Setup Python environment\n- uses: actions/[email protected]\n+ uses: actions/setup-python@v2\nwith:\npython-version: 3.7\n@@ -103,4 +99,4 @@ jobs:\n- name: Build Benchmarks\nrun: |\n- PATH=.venv/bin/:$PATH PYTHONPATH=. python3 .github/workflows/build_and_test_run_fuzzer_benchmarks.py $BENCHMARK_TYPE $FUZZER\n+ PATH=.venv/bin/:$PATH PYTHONPATH=. python3 .github/workflows/build_and_test_run_fuzzer_benchmarks.py ${{ matrix.benchmark_type }} ${{ matrix.fuzzer }}\n"
},
{
"change_type": "MODIFY",
"old_path": ".github/workflows/presubmit.yml",
"new_path": ".github/workflows/presubmit.yml",
"diff": "@@ -12,7 +12,7 @@ jobs:\ngit symbolic-ref refs/remotes/origin/HEAD refs/remotes/origin/master\n- name: Setup Python environment\n- uses: actions/[email protected]\n+ uses: actions/setup-python@v2\nwith:\npython-version: 3.7\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [CI] Upgrade python environment action to silence warnings (#823)
This upgrades the versions of github actions we use to setup the python environment.
Using the old action results in these warnings: https://github.com/google/fuzzbench/actions/runs/304779767
More details here: https://github.blog/changelog/2020-10-01-github-actions-deprecating-set-env-and-add-path-commands/ |
258,388 | 13.10.2020 10:40:24 | 25,200 | 74d9f3a216a04354d393942bd82414355dddd6cf | Add optional experiment description
This can be specified using the service or manually.
When specified, Fuzzbench will include the description in the experiment report.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "analysis/generate_report.py",
"new_path": "analysis/generate_report.py",
"diff": "@@ -147,7 +147,8 @@ def generate_report(experiment_names,\nqueries.add_nonprivate_experiments_for_merge_with_clobber(\nexperiment_names))\n- report_name = report_name or experiment_names[0]\n+ main_experiment_name = experiment_names[0]\n+ report_name = report_name or main_experiment_name\nfilesystem.create_directory(report_directory)\n@@ -157,6 +158,8 @@ def generate_report(experiment_names,\nelse:\nexperiment_df = queries.get_experiment_data(experiment_names)\n+ description = queries.get_experiment_description(main_experiment_name)\n+\ndata_utils.validate_data(experiment_df)\nif benchmarks is not None:\n@@ -197,7 +200,8 @@ def generate_report(experiment_names,\ntemplate = report_type + '.html'\ndetailed_report = rendering.render_report(experiment_ctx, template,\n- in_progress, coverage_report)\n+ in_progress, coverage_report,\n+ description)\nfilesystem.write(os.path.join(report_directory, 'index.html'),\ndetailed_report)\n"
},
{
"change_type": "MODIFY",
"old_path": "analysis/queries.py",
"new_path": "analysis/queries.py",
"diff": "@@ -36,6 +36,15 @@ def get_experiment_data(experiment_names):\nreturn pd.read_sql_query(snapshots_query.statement, db_utils.engine)\n+def get_experiment_description(experiment_name):\n+ \"\"\"Get the description of the experiment named by |experiment_name|.\"\"\"\n+ # Do another query for the description so we don't explode the size of the\n+ # results from get_experiment_data.\n+ return db_utils.query(Experiment.description)\\\n+ .select_from(Experiment)\\\n+ .filter(Experiment.name == experiment_name).one()\n+\n+\ndef add_nonprivate_experiments_for_merge_with_clobber(experiment_names):\n\"\"\"Returns a new list containing experiment names preeceeded by a list of\nnonprivate experiments in the order in which they were run, such that\n"
},
{
"change_type": "MODIFY",
"old_path": "analysis/rendering.py",
"new_path": "analysis/rendering.py",
"diff": "@@ -20,13 +20,17 @@ import jinja2\nfrom common import utils\n-def render_report(experiment_results, template, in_progress, coverage_report):\n+def render_report(experiment_results, template, in_progress, coverage_report,\n+ description):\n\"\"\"Renders report with |template| using data provided by the\n|experiment_results| context.\nArguments:\ntemplate: filename of the report template. E.g., 'default.html'.\nexperiment_results: an ExperimentResults object.\n+ in_progress: Whether the experiment is still in progress.\n+ coverage_report: Whether to report detailed info about coverage.\n+ description: A description of the experiment.\nReturns the rendered template.\n\"\"\"\n@@ -40,4 +44,5 @@ def render_report(experiment_results, template, in_progress, coverage_report):\nreturn template.render(experiment=experiment_results,\nin_progress=in_progress,\n- coverage_report=coverage_report)\n+ coverage_report=coverage_report,\n+ description=description)\n"
},
{
"change_type": "MODIFY",
"old_path": "analysis/report_templates/default.html",
"new_path": "analysis/report_templates/default.html",
"diff": "The experiment was conducted using this FuzzBench commit:\n<a href=\"https://github.com/google/fuzzbench/commits/{{ experiment.git_hash }}\">{{ experiment.git_hash }}</a>\n{% endif %}\n+\n+ {% if description %}\n+ <br><br>\n+ Experiment Description:<br><br>\n+ {{ description }}\n+ {% endif %}\n</div> <!-- id=\"data\" -->\n</div> <!-- class=\"col\" -->\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "database/alembic/versions/26dcc0e12872_add_experiment_description.py",
"diff": "+\"\"\"Add experiment description\n+\n+Revision ID: 26dcc0e12872\n+Revises: c83ac04855b4\n+Create Date: 2020-10-13 09:04:25.881798\n+\n+\"\"\"\n+from alembic import op\n+import sqlalchemy as sa\n+\n+\n+# revision identifiers, used by Alembic.\n+revision = '26dcc0e12872'\n+down_revision = 'c83ac04855b4'\n+branch_labels = None\n+depends_on = None\n+\n+\n+def upgrade():\n+ op.add_column('experiment', sa.Column(\n+ 'description', sa.UnicodeText(), nullable=True))\n+\n+\n+def downgrade():\n+ op.drop_column('experiment', 'description')\n"
},
{
"change_type": "MODIFY",
"old_path": "database/models.py",
"new_path": "database/models.py",
"diff": "\"\"\"SQLAlchemy Database Models.\"\"\"\nimport sqlalchemy\nfrom sqlalchemy.ext import declarative\n-from sqlalchemy import Boolean, Column, Integer, String, ForeignKey, DateTime\n+from sqlalchemy import Boolean\n+from sqlalchemy import Column\n+from sqlalchemy import DateTime\n+from sqlalchemy import ForeignKey\n+from sqlalchemy import Integer\n+from sqlalchemy import String\n+from sqlalchemy import UnicodeText\nBase = declarative.declarative_base() # pylint: disable=invalid-name\n@@ -29,6 +35,7 @@ class Experiment(Base):\ngit_hash = Column(String, nullable=True)\nprivate = Column(Boolean, nullable=False, default=False)\nexperiment_filestore = Column(String, nullable=True)\n+ description = Column(UnicodeText, nullable=True)\nclass Trial(Base):\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/dispatcher.py",
"new_path": "experiment/dispatcher.py",
"diff": "@@ -66,7 +66,8 @@ def _initialize_experiment_in_db(experiment_config: dict):\nname=experiment_config['experiment'],\ngit_hash=experiment_config['git_hash'],\nprivate=experiment_config.get('private', True),\n- experiment_filestore=experiment_config['experiment_filestore'])\n+ experiment_filestore=experiment_config['experiment_filestore'],\n+ description=experiment_config['description']),\n])\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/run_experiment.py",
"new_path": "experiment/run_experiment.py",
"diff": "@@ -205,6 +205,7 @@ def start_experiment( # pylint: disable=too-many-arguments\nconfig_filename: str,\nbenchmarks: List[str],\nfuzzers: List[str],\n+ description: str = None,\nno_seeds=False,\nno_dictionaries=False,\noss_fuzz_corpus=False):\n@@ -222,6 +223,7 @@ def start_experiment( # pylint: disable=too-many-arguments\nconfig['no_seeds'] = no_seeds\nconfig['no_dictionaries'] = no_dictionaries\nconfig['oss_fuzz_corpus'] = oss_fuzz_corpus\n+ config['description'] = description\nset_up_experiment_config_file(config)\n@@ -476,6 +478,10 @@ def main():\n'--experiment-name',\nhelp='Experiment name.',\nrequired=True)\n+ parser.add_argument('-d',\n+ '--description',\n+ help='Description of the experiment.',\n+ required=False)\nparser.add_argument('-f',\n'--fuzzers',\nhelp='Fuzzers to use.',\n@@ -509,6 +515,7 @@ def main():\nargs.experiment_config,\nargs.benchmarks,\nfuzzers,\n+ description=args.description,\nno_seeds=args.no_seeds,\nno_dictionaries=args.no_dictionaries,\noss_fuzz_corpus=args.oss_fuzz_corpus)\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_data/experiment-config.yaml",
"new_path": "experiment/test_data/experiment-config.yaml",
"diff": "@@ -31,3 +31,4 @@ git_hash: \"git-hash\"\nno_seeds: false\nno_dictionaries: false\noss_fuzz_corpus: false\n+description: \"Test experiment\"\n"
},
{
"change_type": "MODIFY",
"old_path": "service/automatic_run_experiment.py",
"new_path": "service/automatic_run_experiment.py",
"diff": "@@ -21,6 +21,7 @@ import collections\nimport os\nimport re\nimport sys\n+from typing import Optional\nfrom common import logs\nfrom common import fuzzer_utils\n@@ -77,13 +78,19 @@ BENCHMARKS = [\ndef _get_experiment_name(experiment_config: dict) -> str:\n- \"\"\"Returns the name of the experiment described by experiment_config as a\n+ \"\"\"Returns the name of the experiment described by |experiment_config| as a\nstring.\"\"\"\n# Use str because the yaml parser will parse things like `2020-05-06` as\n# a datetime if not included in quotes.\nreturn str(experiment_config['experiment'])\n+def _get_description(experiment_config: dict) -> Optional[str]:\n+ \"\"\"Returns the description of the experiment described by\n+ |experiment_config| as a string.\"\"\"\n+ return experiment_config.get('description')\n+\n+\ndef _get_requested_experiments():\n\"\"\"Return requested experiments.\"\"\"\nreturn yaml_utils.read(REQUESTED_EXPERIMENTS_PATH)\n@@ -203,10 +210,11 @@ def run_requested_experiment(dry_run):\nlogs.info('Running experiment: %s with fuzzers: %s.', experiment_name,\n' '.join(fuzzers))\n- return _run_experiment(experiment_name, fuzzers, dry_run)\n+ description = _get_description(requested_experiment)\n+ return _run_experiment(experiment_name, fuzzers, description, dry_run)\n-def _run_experiment(experiment_name, fuzzers, dry_run=False):\n+def _run_experiment(experiment_name, fuzzers, description, dry_run=False):\n\"\"\"Run an experiment named |experiment_name| on |fuzzer_configs| and shut it\ndown once it terminates.\"\"\"\nlogs.info('Starting experiment: %s.', experiment_name)\n@@ -214,7 +222,7 @@ def _run_experiment(experiment_name, fuzzers, dry_run=False):\nlogs.info('Dry run. Not actually running experiment.')\nreturn\nrun_experiment.start_experiment(experiment_name, EXPERIMENT_CONFIG_FILE,\n- BENCHMARKS, fuzzers)\n+ BENCHMARKS, fuzzers, description)\ndef main():\n"
},
{
"change_type": "MODIFY",
"old_path": "service/test_automatic_run_experiment.py",
"new_path": "service/test_automatic_run_experiment.py",
"diff": "@@ -28,10 +28,11 @@ EXPERIMENT = '2020-01-01'\nEXPERIMENT_REQUESTS = [{\n'experiment': datetime.date(2020, 6, 8),\n- 'fuzzers': ['aflplusplus', 'libfuzzer']\n+ 'fuzzers': ['aflplusplus', 'libfuzzer'],\n}, {\n'experiment': datetime.date(2020, 6, 5),\n- 'fuzzers': ['honggfuzz', 'afl']\n+ 'fuzzers': ['honggfuzz', 'afl'],\n+ 'description': 'Test experiment',\n}]\n@@ -94,7 +95,7 @@ def test_run_requested_experiment(mocked_get_requested_experiments,\n]\nexpected_calls = [\nmock.call(expected_experiment_name, expected_config_file,\n- expected_benchmarks, expected_fuzzers)\n+ expected_benchmarks, expected_fuzzers, 'Test experiment')\n]\nstart_experiment_call_args = mocked_start_experiment.call_args_list\nassert len(start_experiment_call_args) == 1\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Add optional experiment description (#822)
This can be specified using the service or manually.
When specified, Fuzzbench will include the description in the experiment report.
Fixes #805. |
258,388 | 13.10.2020 11:39:47 | 25,200 | 521479c1cd9d4817861a7572b5d4889331956ee3 | [CI][GCB] Fix commit build trigger
This fixes the commit build trigger we use to build base-images
on every commit to master. While this trigger is not mandatory,
using it speeds up builds in CI by ensuring they never have to build
base-images (including python). | [
{
"change_type": "MODIFY",
"old_path": "docker/gcb/base-images.yaml",
"new_path": "docker/gcb/base-images.yaml",
"diff": "@@ -18,6 +18,13 @@ images:\n- gcr.io/fuzzbench/base-image:test-experiment\n- gcr.io/fuzzbench/base-image\nsteps:\n+- args:\n+ - pull\n+ - ubuntu:xenial\n+ env:\n+ - DOCKER_BUILDKIT=1\n+ id: pull-ubuntu-xenial\n+ name: docker:19.03.12\n- args:\n- build\n- --tag\n@@ -31,6 +38,8 @@ steps:\n- --file\n- docker/base-image/Dockerfile\n- docker/base-image\n- env: DOCKER_BUILDKIT=1\n+ env:\n+ - DOCKER_BUILDKIT=1\nid: base-image\nname: docker:19.03.12\n+ wait_for: []\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/build/generate_cloudbuild.py",
"new_path": "experiment/build/generate_cloudbuild.py",
"diff": "@@ -80,7 +80,7 @@ def create_cloud_build_spec(image_templates,\nif build_base_images:\ncloud_build_spec['steps'].append({\n'id': 'pull-ubuntu-xenial',\n- 'env': 'DOCKER_BUILDKIT=1',\n+ 'env': ['DOCKER_BUILDKIT=1'],\n'name': DOCKER_IMAGE,\n'args': ['pull', 'ubuntu:xenial'],\n})\n@@ -88,7 +88,7 @@ def create_cloud_build_spec(image_templates,\nfor image_name, image_specs in image_templates.items():\nstep = {\n'id': image_name,\n- 'env': 'DOCKER_BUILDKIT=1',\n+ 'env': ['DOCKER_BUILDKIT=1'],\n'name': DOCKER_IMAGE,\n}\nstep['args'] = [\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/build/test_generate_cloudbuild.py",
"new_path": "experiment/build/test_generate_cloudbuild.py",
"diff": "@@ -40,12 +40,12 @@ def test_generate_cloud_build_spec_build_base_image():\nexpected_spec = {\n'steps': [{\n'id': 'pull-ubuntu-xenial',\n- 'env': 'DOCKER_BUILDKIT=1',\n+ 'env': ['DOCKER_BUILDKIT=1'],\n'name': 'docker:19.03.12',\n'args': ['pull', 'ubuntu:xenial']\n}, {\n'id': 'base-image',\n- 'env': 'DOCKER_BUILDKIT=1',\n+ 'env': ['DOCKER_BUILDKIT=1'],\n'name': 'docker:19.03.12',\n'args': [\n'build', '--tag', 'gcr.io/fuzzbench/base-image', '--tag',\n@@ -90,7 +90,7 @@ def test_generate_cloud_build_spec_build_fuzzer_benchmark():\nexpected_spec = {\n'steps': [{\n'id': 'afl-zlib-builder-intermediate',\n- 'env': 'DOCKER_BUILDKIT=1',\n+ 'env': ['DOCKER_BUILDKIT=1'],\n'name': 'docker:19.03.12',\n'args': [\n'build', '--tag',\n@@ -159,7 +159,7 @@ def test_generate_cloud_build_spec_build_benchmark_coverage():\nexpected_spec = {\n'steps': [{\n'id': 'zlib-project-builder',\n- 'env': 'DOCKER_BUILDKIT=1',\n+ 'env': ['DOCKER_BUILDKIT=1'],\n'name': 'docker:19.03.12',\n'args': [\n'build', '--tag', 'gcr.io/fuzzbench/builders/benchmark/zlib',\n@@ -172,7 +172,7 @@ def test_generate_cloud_build_spec_build_benchmark_coverage():\n'wait_for': []\n}, {\n'id': 'coverage-zlib-builder-intermediate',\n- 'env': 'DOCKER_BUILDKIT=1',\n+ 'env': ['DOCKER_BUILDKIT=1'],\n'name': 'docker:19.03.12',\n'args': [\n'build', '--tag',\n@@ -188,7 +188,7 @@ def test_generate_cloud_build_spec_build_benchmark_coverage():\n'wait_for': ['zlib-project-builder']\n}, {\n'id': 'coverage-zlib-builder',\n- 'env': 'DOCKER_BUILDKIT=1',\n+ 'env': ['DOCKER_BUILDKIT=1'],\n'name': 'docker:19.03.12',\n'args': [\n'build', '--tag', 'gcr.io/fuzzbench/builders/coverage/zlib',\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [CI][GCB] Fix commit build trigger (#824)
This fixes the commit build trigger we use to build base-images
on every commit to master. While this trigger is not mandatory,
using it speeds up builds in CI by ensuring they never have to build
base-images (including python). |
258,388 | 16.10.2020 11:23:16 | 25,200 | 78068745ecc4b7156e33522c5b5a039d703c53a7 | Fix building in other cloud projects.
Allow specifying a different docker registry.
Also:
1. Change references from cloud_build to cloudbuild for consistency.
2. Use fixtures for consistency
Fixes | [
{
"change_type": "MODIFY",
"old_path": "conftest.py",
"new_path": "conftest.py",
"diff": "@@ -80,6 +80,7 @@ def experiment(environ): # pylint: disable=redefined-outer-name,unused-argument\nos.environ['EXPERIMENT_FILESTORE'] = 'gs://experiment-data'\nos.environ['REPORT_FILESTORE'] = 'gs://web-bucket'\nos.environ['CLOUD_PROJECT'] = 'fuzzbench'\n+ os.environ['DOCKER_REGISTRY'] = 'gcr.io/fuzzbench'\[email protected]\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/build/gcb_build.py",
"new_path": "experiment/build/gcb_build.py",
"diff": "@@ -47,7 +47,7 @@ def _get_buildable_images(fuzzer=None, benchmark=None):\ndef build_base_images():\n\"\"\"Build base images on GCB.\"\"\"\nimage_templates = {'base-image': _get_buildable_images()['base-image']}\n- config = generate_cloudbuild.create_cloud_build_spec(image_templates,\n+ config = generate_cloudbuild.create_cloudbuild_spec(image_templates,\nbuild_base_images=True)\n_build(config, 'base-images')\n@@ -61,7 +61,7 @@ def build_coverage(benchmark):\nif (image_name == (benchmark + '-project-builder') or\nimage_specs['type'] == 'coverage')\n}\n- config = generate_cloudbuild.create_cloud_build_spec(image_templates,\n+ config = generate_cloudbuild.create_cloudbuild_spec(image_templates,\nbenchmark=benchmark)\nconfig_name = 'benchmark-{benchmark}-coverage'.format(benchmark=benchmark)\n_build(config, config_name)\n@@ -110,7 +110,7 @@ def build_fuzzer_benchmark(fuzzer: str, benchmark: str):\nif image_specs['type'] in ('base', 'coverage', 'dispatcher'):\ncontinue\nimage_templates[image_name] = image_specs\n- config = generate_cloudbuild.create_cloud_build_spec(image_templates)\n+ config = generate_cloudbuild.create_cloudbuild_spec(image_templates)\nconfig_name = 'benchmark-{benchmark}-fuzzer-{fuzzer}'.format(\nbenchmark=benchmark, fuzzer=fuzzer)\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/build/generate_cloudbuild.py",
"new_path": "experiment/build/generate_cloudbuild.py",
"diff": "import os\nimport posixpath\n-from common import yaml_utils\n-from common import experiment_utils\nfrom common import experiment_path as exp_path\n+from common import experiment_utils\n+from common import yaml_utils\nfrom common.utils import ROOT_DIR\nfrom experiment.build import build_utils\nDOCKER_IMAGE = 'docker:19.03.12'\n-PROJECT_DOCKER_REGISTRY = 'gcr.io/fuzzbench'\ndef get_experiment_tag_for_image(image_specs, tag_by_experiment=True):\n@@ -60,7 +59,12 @@ def coverage_steps(benchmark):\nreturn steps\n-def create_cloud_build_spec(image_templates,\n+def get_docker_registry():\n+ \"\"\"Returns the docker registry for this experiment.\"\"\"\n+ return os.environ['DOCKER_REGISTRY']\n+\n+\n+def create_cloudbuild_spec(image_templates,\nbenchmark='',\nbuild_base_images=False):\n\"\"\"Generates Cloud Build specification.\n@@ -73,18 +77,20 @@ def create_cloud_build_spec(image_templates,\nReturns:\nGCB build steps.\n\"\"\"\n- cloud_build_spec = {'steps': [], 'images': []}\n+ cloudbuild_spec = {'steps': [], 'images': []}\n# Workaround for bug https://github.com/moby/moby/issues/40262.\n# This is only needed for base-image as it inherits from ubuntu:xenial.\nif build_base_images:\n- cloud_build_spec['steps'].append({\n+ cloudbuild_spec['steps'].append({\n'id': 'pull-ubuntu-xenial',\n'env': ['DOCKER_BUILDKIT=1'],\n'name': DOCKER_IMAGE,\n'args': ['pull', 'ubuntu:xenial'],\n})\n+ docker_registry = get_docker_registry()\n+\nfor image_name, image_specs in image_templates.items():\nstep = {\n'id': image_name,\n@@ -93,8 +99,7 @@ def create_cloud_build_spec(image_templates,\n}\nstep['args'] = [\n'build', '--tag',\n- posixpath.join(PROJECT_DOCKER_REGISTRY,\n- image_specs['tag']), '--tag',\n+ posixpath.join(docker_registry, image_specs['tag']), '--tag',\nget_experiment_tag_for_image(image_specs), '--cache-from',\nget_experiment_tag_for_image(image_specs, tag_by_experiment=False),\n'--build-arg', 'BUILDKIT_INLINE_CACHE=1'\n@@ -113,24 +118,24 @@ def create_cloud_build_spec(image_templates,\ncontinue\nstep['wait_for'] += [dependency]\n- cloud_build_spec['steps'].append(step)\n- cloud_build_spec['images'].append(\n+ cloudbuild_spec['steps'].append(step)\n+ cloudbuild_spec['images'].append(\nget_experiment_tag_for_image(image_specs))\n- cloud_build_spec['images'].append(\n+ cloudbuild_spec['images'].append(\nget_experiment_tag_for_image(image_specs, tag_by_experiment=False))\nif any(image_specs['type'] in 'coverage'\nfor _, image_specs in image_templates.items()):\n- cloud_build_spec['steps'] += coverage_steps(benchmark)\n+ cloudbuild_spec['steps'] += coverage_steps(benchmark)\n- return cloud_build_spec\n+ return cloudbuild_spec\ndef main():\n\"\"\"Write base-images build spec when run from command line.\"\"\"\nimage_templates = yaml_utils.read(\nos.path.join(ROOT_DIR, 'docker', 'image_types.yaml'))\n- base_images_spec = create_cloud_build_spec(\n+ base_images_spec = create_cloudbuild_spec(\n{'base-image': image_templates['base-image']}, build_base_images=True)\nbase_images_spec_file = os.path.join(ROOT_DIR, 'docker', 'gcb',\n'base-images.yaml')\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/build/test_generate_cloudbuild.py",
"new_path": "experiment/build/test_generate_cloudbuild.py",
"diff": "# limitations under the License.\n\"\"\"Tests for generate_cloudbuild.py.\"\"\"\n-import os\n-\n-from unittest.mock import patch\n-\nfrom experiment.build import generate_cloudbuild\n+# pylint: disable=unused-argument\n+\[email protected](os.environ, {\n- 'CLOUD_PROJECT': 'fuzzbench',\n- 'EXPERIMENT': 'test-experiment'\n-})\n-def test_generate_cloud_build_spec_build_base_image():\n+def test_generate_cloudbuild_spec_build_base_image(experiment):\n\"\"\"Tests cloud build configuration yaml for the base image.\"\"\"\nimage_templates = {\n'base-image': {\n@@ -34,7 +28,7 @@ def test_generate_cloud_build_spec_build_base_image():\n'type': 'base'\n}\n}\n- generated_spec = generate_cloudbuild.create_cloud_build_spec(\n+ generated_spec = generate_cloudbuild.create_cloudbuild_spec(\nimage_templates, build_base_images=True)\nexpected_spec = {\n@@ -65,11 +59,7 @@ def test_generate_cloud_build_spec_build_base_image():\nassert generated_spec == expected_spec\[email protected](os.environ, {\n- 'CLOUD_PROJECT': 'fuzzbench',\n- 'EXPERIMENT': 'test-experiment'\n-})\n-def test_generate_cloud_build_spec_build_fuzzer_benchmark():\n+def test_generate_cloudbuild_spec_build_fuzzer_benchmark(experiment):\n\"\"\"Tests cloud build configuration yaml for a fuzzer-benchmark build.\"\"\"\nimage_templates = {\n'afl-zlib-builder-intermediate': {\n@@ -84,8 +74,7 @@ def test_generate_cloud_build_spec_build_fuzzer_benchmark():\n}\n}\n- generated_spec = generate_cloudbuild.create_cloud_build_spec(\n- image_templates)\n+ generated_spec = generate_cloudbuild.create_cloudbuild_spec(image_templates)\nexpected_spec = {\n'steps': [{\n@@ -113,14 +102,7 @@ def test_generate_cloud_build_spec_build_fuzzer_benchmark():\nassert generated_spec == expected_spec\[email protected](\n- os.environ, {\n- 'CLOUD_PROJECT': 'fuzzbench',\n- 'EXPERIMENT': 'test-experiment',\n- 'EXPERIMENT_FILESTORE': 'gs://fuzzbench-data',\n- 'WORK': '/work',\n- })\n-def test_generate_cloud_build_spec_build_benchmark_coverage():\n+def test_generate_cloudbuild_spec_build_benchmark_coverage(experiment):\n\"\"\"Tests cloud build configuration yaml for a benchmark coverage build.\"\"\"\nimage_templates = {\n'zlib-project-builder': {\n@@ -153,7 +135,7 @@ def test_generate_cloud_build_spec_build_benchmark_coverage():\n}\n}\n- generated_spec = generate_cloudbuild.create_cloud_build_spec(\n+ generated_spec = generate_cloudbuild.create_cloudbuild_spec(\nimage_templates, benchmark='zlib')\nexpected_spec = {\n@@ -218,7 +200,7 @@ def test_generate_cloud_build_spec_build_benchmark_coverage():\n'gcr.io/cloud-builders/gsutil',\n'args': [\n'-m', 'cp', '/workspace/out/coverage-build-zlib.tar.gz',\n- 'gs://fuzzbench-data/test-experiment/coverage-binaries/'\n+ 'gs://experiment-data/test-experiment/coverage-binaries/'\n]\n}],\n'images': [\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Fix building in other cloud projects. (#843)
Allow specifying a different docker registry.
Also:
1. Change references from cloud_build to cloudbuild for consistency.
2. Use fixtures for consistency
Fixes https://github.com/google/fuzzbench/issues/842 |
258,388 | 16.10.2020 12:39:34 | 25,200 | e2fb96b96b7a679ef5774a41d960a919b90d174e | [docker] Authenticate to Google Cloud Artifact registries.
This allows use of private docker registries in the same project
as a public one. | [
{
"change_type": "MODIFY",
"old_path": "experiment/resources/dispatcher-startup-script-template.sh",
"new_path": "experiment/resources/dispatcher-startup-script-template.sh",
"diff": "# See the License for the specific language governing permissions and\n# limitations under the License.\n+docker-credential-gcr configure-docker -include-artifact-registry\necho 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope\ndocker run --rm \\\n-e INSTANCE_NAME={{instance_name}} -e EXPERIMENT={{experiment}} \\\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/resources/runner-startup-script-template.sh",
"new_path": "experiment/resources/runner-startup-script-template.sh",
"diff": "@@ -23,6 +23,7 @@ echo core >/proc/sys/kernel/core_pattern\n# Start docker.\n{% if not local_experiment %}\n+docker-credential-gcr configure-docker -include-artifact-registry\nwhile ! docker pull {{docker_image_url}}\ndo\necho 'Error pulling image, retrying...'\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/test_scheduler.py",
"new_path": "experiment/test_scheduler.py",
"diff": "@@ -94,6 +94,7 @@ def test_create_trial_instance(benchmark, expected_image, expected_target,\nand creates a startup script for the instance, as we expect it to.\"\"\"\nexpected_startup_script = '''# Start docker.\n+docker-credential-gcr configure-docker -include-artifact-registry\nwhile ! docker pull {docker_image_url}\ndo\necho 'Error pulling image, retrying...'\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [docker] Authenticate to Google Cloud Artifact registries. (#845)
This allows use of private docker registries in the same project
as a public one. |
258,388 | 16.10.2020 12:40:18 | 25,200 | 231373e461820b6c06990657aa86ee92a07efc4c | [build] Fix use of docker registries.
Use docker registry from config to set the docker URLs instead
of using CLOUD_PROJECT. | [
{
"change_type": "MODIFY",
"old_path": "common/experiment_utils.py",
"new_path": "common/experiment_utils.py",
"diff": "@@ -81,16 +81,6 @@ def get_crashes_archive_name(cycle: int) -> str:\nreturn 'crashes-%04d.tar.gz' % cycle\n-def get_base_docker_tag(cloud_project=None):\n- \"\"\"Returns the base docker tag (i.e. Docker repo URL) given cloud_project.\n- If cloud is not provided, then the value of the environment variable\n- CLOUD_PROJECT is used.\"\"\"\n- # Google Cloud Docker repos use the form \"gcr.io/$CLOUD_PROJECT\"\n- if cloud_project is None:\n- cloud_project = get_cloud_project()\n- return posixpath.join('gcr.io', cloud_project)\n-\n-\ndef is_local_experiment():\n\"\"\"Returns True if running a local experiment.\"\"\"\nreturn bool(environment.get('LOCAL_EXPERIMENT'))\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/build/generate_cloudbuild.py",
"new_path": "experiment/build/generate_cloudbuild.py",
"diff": "@@ -27,8 +27,7 @@ DOCKER_IMAGE = 'docker:19.03.12'\ndef get_experiment_tag_for_image(image_specs, tag_by_experiment=True):\n\"\"\"Returns the registry with the experiment tag for given image.\"\"\"\n- tag = posixpath.join(experiment_utils.get_base_docker_tag(),\n- image_specs['tag'])\n+ tag = posixpath.join(get_docker_registry(), image_specs['tag'])\nif tag_by_experiment:\ntag += ':' + experiment_utils.get_experiment_name()\nreturn tag\n@@ -43,8 +42,8 @@ def coverage_steps(benchmark):\nDOCKER_IMAGE,\n'args': [\n'run', '-v', '/workspace/out:/host-out',\n- posixpath.join(experiment_utils.get_base_docker_tag(), 'builders',\n- 'coverage', benchmark) + ':' +\n+ posixpath.join(get_docker_registry(), 'builders', 'coverage',\n+ benchmark) + ':' +\nexperiment_utils.get_experiment_name(), '/bin/bash', '-c',\n'cd /out; tar -czvf /host-out/coverage-build-' + benchmark +\n'.tar.gz * /src /work'\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [build] Fix use of docker registries. (#844)
Use docker registry from config to set the docker URLs instead
of using CLOUD_PROJECT. |
258,388 | 23.10.2020 12:40:04 | 25,200 | 123da94c7a0ff55e251b9aa9f5e7407759882340 | [build][make] Add dependency on base-image
base-image was not listed as a dependency of images that depended
on it in image_types.yaml. This problem was hidden by buildkit.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -53,7 +53,7 @@ ${VENV_ACTIVATE}: requirements.txt\ninstall-dependencies: ${VENV_ACTIVATE}\n-docker/generated.mk: docker/generate_makefile.py fuzzers benchmarks ${VENV_ACTIVATE}\n+docker/generated.mk: docker/generate_makefile.py docker/image_types.yaml fuzzers benchmarks ${VENV_ACTIVATE}\nsource ${VENV_ACTIVATE} && PYTHONPATH=. python3 $< > $@\npresubmit: install-dependencies\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/image_types.yaml",
"new_path": "docker/image_types.yaml",
"diff": "- 'parent_image=gcr.io/fuzzbench/builders/coverage/{benchmark}-intermediate'\ndepends_on:\n- 'coverage-{benchmark}-builder-intermediate'\n+ - 'base-image'\ndockerfile: 'docker/benchmark-builder/Dockerfile'\ncontext: '.'\ntag: 'builders/coverage/{benchmark}'\n- 'parent_image=gcr.io/fuzzbench/builders/{fuzzer}/{benchmark}-intermediate'\ndepends_on:\n- '{fuzzer}-{benchmark}-builder-intermediate'\n+ - 'base-image'\ndockerfile: 'docker/benchmark-builder/Dockerfile'\ncontext: '.'\ntag: 'builders/{fuzzer}/{benchmark}'\n'{fuzzer}-{benchmark}-intermediate-runner':\ndepends_on:\n- '{fuzzer}-{benchmark}-builder'\n+ - 'base-image'\ndockerfile: 'fuzzers/{fuzzer}/runner.Dockerfile'\ncontext: 'fuzzers/{fuzzer}'\ntag: 'runners/{fuzzer}/{benchmark}-intermediate'\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [build][make] Add dependency on base-image (#864)
base-image was not listed as a dependency of images that depended
on it in image_types.yaml. This problem was hidden by buildkit.
Fixes #863 |
258,388 | 26.10.2020 08:47:53 | 25,200 | 99ec8af8c0a682839ec392c7f9812516b672c74a | Write the makefile instead of printing it
This makes testing nicer and makes the makefile generation more readable. | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -54,7 +54,7 @@ ${VENV_ACTIVATE}: requirements.txt\ninstall-dependencies: ${VENV_ACTIVATE}\ndocker/generated.mk: docker/generate_makefile.py docker/image_types.yaml fuzzers benchmarks ${VENV_ACTIVATE}\n- source ${VENV_ACTIVATE} && PYTHONPATH=. python3 $< > $@\n+ source ${VENV_ACTIVATE} && PYTHONPATH=. python3 $< $@\npresubmit: install-dependencies\nsource ${VENV_ACTIVATE} && python3 presubmit.py\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/generate_makefile.py",
"new_path": "docker/generate_makefile.py",
"diff": "\"\"\"Simple generator for local Makefile rules.\"\"\"\nimport os\n+import sys\nfrom common import yaml_utils\nfrom common import benchmark_utils\n@@ -24,30 +25,33 @@ BASE_TAG = \"gcr.io/fuzzbench\"\nBENCHMARK_DIR = benchmark_utils.BENCHMARKS_DIR\n-def _print_benchmark_fuzz_target(benchmarks):\n- \"\"\"Prints benchmark variables from benchmark.yaml files.\"\"\"\n+def _get_benchmark_fuzz_target(benchmarks):\n+ \"\"\"Returns benchmark variables from benchmark.yaml files.\"\"\"\n+ variables = ''\nfor benchmark in benchmarks:\nbenchmark_vars = yaml_utils.read(\nos.path.join(BENCHMARK_DIR, benchmark, 'benchmark.yaml'))\n- print(benchmark + '-fuzz-target=' + benchmark_vars['fuzz_target'])\n- print()\n+ variables += (benchmark + '-fuzz-target=' +\n+ benchmark_vars['fuzz_target'] + '\\n')\n+ variables += '\\n'\n+ return variables\n-def _print_makefile_run_template(image):\n+def _get_makefile_run_template(image):\nfuzzer = image['fuzzer']\nbenchmark = image['benchmark']\n+ section = ''\n- for run_type in ('run', 'debug', 'test-run', 'repro-bugs'):\n- if run_type == 'repro-bugs':\n- bugs_testcases_dir = os.path.join(BENCHMARK_DIR, benchmark,\n- 'testcases')\n- if not os.path.isdir(bugs_testcases_dir):\n- continue\n- print(('{run_type}-{fuzzer}-{benchmark}: ' +\n- '.{fuzzer}-{benchmark}-runner').format(run_type=run_type,\n- benchmark=benchmark,\n- fuzzer=fuzzer))\n- print('\\\n+ run_types = ['run', 'debug', 'test-run']\n+ testcases_dir = os.path.join(BENCHMARK_DIR, benchmark, 'testcases')\n+ if os.path.exists(testcases_dir):\n+ run_types.append('repro-bugs')\n+\n+ for run_type in run_types:\n+ section += (\n+ f'{run_type}-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-runner\\n')\n+\n+ section += f'\\\n\\tdocker run \\\\\\n\\\n\\t--cpus=1 \\\\\\n\\\n\\t--cap-add SYS_NICE \\\\\\n\\\n@@ -58,116 +62,120 @@ def _print_makefile_run_template(image):\n\\t-e FUZZER={fuzzer} \\\\\\n\\\n\\t-e BENCHMARK={benchmark} \\\\\\n\\\n\\t-e FUZZ_TARGET=$({benchmark}-fuzz-target) \\\\\\\n-'.format(fuzzer=fuzzer, benchmark=benchmark))\n+\\n'\n- params = \"\"\nif run_type == 'test-run':\n- print('\\t-e MAX_TOTAL_TIME=20 \\\\\\n\\t-e SNAPSHOT_PERIOD=10 \\\\')\n+ section += '\\t-e MAX_TOTAL_TIME=20 \\\\\\n\\t-e SNAPSHOT_PERIOD=10 \\\\\\n'\nif run_type == 'debug':\n- print('\\t--entrypoint \"/bin/bash\" \\\\\\n\\t-it ', end='')\n+ section += '\\t--entrypoint \"/bin/bash\" \\\\\\n\\t-it '\nelif run_type == 'repro-bugs':\n- print('\\t-v {path}:/testcases \\\\\\n\\\n-\\t--entrypoint /bin/bash '.format(path=bugs_testcases_dir),\n- end='')\n- params = \" -c \\\"for f in /testcases/*; do\\\n- echo _________________________________________;\\\n- echo \\\\$$f:;\\\n- \\\\$$OUT/\\\\$$FUZZ_TARGET -timeout=25\\\n- -rss_limit_mb=2560 \\\\$$f; done;\\\"\"\n-\n+ section += f'\\t-v {testcases_dir}:/testcases \\\\\\n\\t'\n+ section += '--entrypoint /bin/bash '\n+ section += os.path.join(BASE_TAG, image['tag'])\n+ section += ' -c \"for f in /testcases/*; do '\n+ section += 'echo _________________________________________; '\n+ section += 'echo \\\\$$f:; '\n+ section += '\\\\$$OUT/\\\\$$FUZZ_TARGET -timeout=25 -rss_limit_mb=2560 '\n+ section += '\\\\$$f; done;\" '\n+ section += '\\n\\n'\n+ continue\nelif run_type == 'run':\n- print('\\t-it ', end='')\n+ section += '\\t-it '\nelse:\n- print('\\t', end='')\n+ section += '\\t'\n- if params:\n- print(os.path.join(BASE_TAG, image['tag']), end='')\n- print(params)\n- else:\n- print(os.path.join(BASE_TAG, image['tag']))\n- print()\n+ section += os.path.join(BASE_TAG, image['tag'])\n+ section += '\\n\\n'\n+ return section\n-# TODO(Tanq16): Function must return a string as opposed to printing it.\n-def print_rules_for_image(name, image):\n- \"\"\"Print makefile section for given image to stdout.\"\"\"\n+def get_rules_for_image(name, image):\n+ \"\"\"Returns makefile section for |image|.\"\"\"\nif not ('base-' in name or 'dispatcher-' in name):\n- print('.', end='')\n- print(name + ':', end='')\n+ section = '.'\n+ else:\n+ section = ''\n+ section += name + ':'\nif 'depends_on' in image:\nfor dep in image['depends_on']:\nif 'base' in dep:\n- print(' ' + dep, end='')\n+ section += ' ' + dep\nelse:\n- print(' .' + dep, end='')\n- print()\n+ section += ' .' + dep\n+ section += '\\n'\nif 'base-' in name:\n- print('\\tdocker pull ubuntu:xenial')\n- print('\\tdocker build \\\\')\n- print('\\t--tag ' + os.path.join(BASE_TAG, image['tag']) + ' \\\\')\n- print('\\t--build-arg BUILDKIT_INLINE_CACHE=1 \\\\')\n- print('\\t--cache-from ' + os.path.join(BASE_TAG, image['tag']) + ' \\\\')\n+ section += '\\tdocker pull ubuntu:xenial\\n'\n+ section += '\\tdocker build \\\\\\n'\n+ section += '\\t--tag ' + os.path.join(BASE_TAG, image['tag']) + ' \\\\\\n'\n+ section += '\\t--build-arg BUILDKIT_INLINE_CACHE=1 \\\\\\n'\n+ section += ('\\t--cache-from ' + os.path.join(BASE_TAG, image['tag']) +\n+ ' \\\\\\n')\n+\nif 'build_arg' in image:\nfor arg in image['build_arg']:\n- print('\\t--build-arg ' + arg + ' \\\\')\n+ section += '\\t--build-arg ' + arg + ' \\\\\\n'\nif 'dockerfile' in image:\n- print('\\t--file ' + image['dockerfile'] + ' \\\\')\n- print('\\t' + image['context'])\n- print()\n+ section += '\\t--file ' + image['dockerfile'] + ' \\\\\\n'\n+ section += '\\t' + image['context'] + '\\n'\n+ section += '\\n'\n# Print run, debug, test-run rules if image is a runner.\nif 'runner' in name and not ('intermediate' in name or 'base' in name):\n- _print_makefile_run_template(image)\n+ section += _get_makefile_run_template(image)\n+ return section\ndef main():\n- \"\"\"Generates Makefile with docker image build rules.\"\"\"\n+ \"\"\"Writes Makefile with docker image build rules to sys.argv[1].\"\"\"\n+ if len(sys.argv) != 2:\n+ print(f'Usage: {sys.argv[0]} <makefile>')\n+ return 1\n+ makefile_path = sys.argv[1]\n+ makefile_contents = generate_makefile()\n+ with open(makefile_path, 'w') as file_handle:\n+ file_handle.write(makefile_contents)\n+ return 0\n+\n+\n+def generate_makefile():\n+ \"\"\"Generates the contents of the makefile and returns it.\"\"\"\nfuzzers = fuzzer_utils.get_fuzzer_names()\nbenchmarks = benchmark_utils.get_all_benchmarks()\nbuildable_images = docker_images.get_images_to_build(fuzzers, benchmarks)\n- print('export DOCKER_BUILDKIT := 1')\n+ makefile = 'export DOCKER_BUILDKIT := 1\\n\\n'\n# Print oss-fuzz benchmarks property variables.\n- _print_benchmark_fuzz_target(benchmarks)\n+ makefile += _get_benchmark_fuzz_target(benchmarks)\nfor name, image in buildable_images.items():\n- print_rules_for_image(name, image)\n+ makefile += get_rules_for_image(name, image)\n# Print build targets for all fuzzer-benchmark pairs (including coverage).\nfuzzers.append('coverage')\nfor fuzzer in fuzzers:\n- image_type = \"runner\"\n+ image_type = 'runner'\nif 'coverage' in fuzzer:\n- image_type = \"builder\"\n+ image_type = 'builder'\nfor benchmark in benchmarks:\n- print(('build-{fuzzer}-{benchmark}: ' +\n- '.{fuzzer}-{benchmark}-{image_type}\\n').format(\n- fuzzer=fuzzer,\n- benchmark=benchmark,\n- image_type=image_type))\n- print()\n+ makefile += (f'build-{fuzzer}-{benchmark}: ' +\n+ f'.{fuzzer}-{benchmark}-{image_type}\\n')\n+ makefile += '\\n'\n# Print fuzzer-all benchmarks build targets.\nfor fuzzer in fuzzers:\n- all_build_targets = ' '.join([\n- 'build-{0}-{1}'.format(fuzzer, benchmark)\n- for benchmark in benchmarks\n- ])\n- print('build-{fuzzer}-all: {all_targets}'.format(\n- fuzzer=fuzzer, all_targets=all_build_targets))\n- all_test_run_targets = ' '.join([\n- 'test-run-{0}-{1}'.format(fuzzer, benchmark)\n- for benchmark in benchmarks\n- ])\n- print('test-run-{fuzzer}-all: {all_targets}'.format(\n- fuzzer=fuzzer, all_targets=all_test_run_targets))\n+ all_build_targets = ' '.join(\n+ [f'build-{fuzzer}-{benchmark}' for benchmark in benchmarks])\n+ makefile += f'build-{fuzzer}-all: {all_build_targets}\\n'\n+ all_test_run_targets = ' '.join(\n+ [f'test-run-{fuzzer}-{benchmark}' for benchmark in benchmarks])\n+ makefile += f'test-run-{fuzzer}-all: {all_test_run_targets}\\n'\n# Print all targets build target.\n- all_build_targets = ' '.join(\n- ['build-{0}-all'.format(name) for name in fuzzers])\n- print('build-all: {all_targets}'.format(all_targets=all_build_targets))\n+ all_build_targets = ' '.join([f'build-{fuzzer}-all' for fuzzer in fuzzers])\n+ makefile += f'build-all: {all_build_targets}'\n+ return makefile\nif __name__ == '__main__':\n- main()\n+ sys.exit(main())\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/test_generate_makefile.py",
"new_path": "docker/test_generate_makefile.py",
"diff": "# limitations under the License.\n\"\"\"Tests for generate_makefile.py.\"\"\"\n-from unittest.mock import call\n-from unittest.mock import patch\n-\nfrom docker import generate_makefile\n-@patch('builtins.print')\n-def test_print_makefile_build(mocked_print):\n+def test_get_rules_for_image():\n\"\"\"Tests result of a makefile generation for an image.\"\"\"\nname = 'afl-zlib-builder-intermediate'\n@@ -32,27 +28,21 @@ def test_print_makefile_build(mocked_print):\n'build_arg': ['parent_image=gcr.io/fuzzbench/builders/benchmark/zlib']\n}\n- generate_makefile.print_rules_for_image(name, image)\n- assert mocked_print.mock_calls == [\n- call('.', end=''),\n- call('afl-zlib-builder-intermediate:', end=''),\n- call(' .zlib-project-builder', end=''),\n- call(),\n- call('\\tdocker build \\\\'),\n- call('\\t--tag gcr.io/fuzzbench/builders/afl/zlib-intermediate \\\\'),\n- call('\\t--build-arg BUILDKIT_INLINE_CACHE=1 \\\\'),\n- call('\\t--cache-from gcr.io/fuzzbench/builders/afl/zlib-intermediate \\\\'\n- ),\n- call('\\t--build-arg parent_image=gcr.io/' +\n- 'fuzzbench/builders/benchmark/zlib \\\\'),\n- call('\\t--file fuzzers/afl/builder.Dockerfile \\\\'),\n- call('\\tfuzzers/afl'),\n- call()\n- ]\n+ rules_for_image = generate_makefile.get_rules_for_image(name, image)\n+ assert rules_for_image == (\n+ '.afl-zlib-builder-intermediate: .zlib-project-builder\\n'\n+ '\\tdocker build \\\\\\n'\n+ '\\t--tag gcr.io/fuzzbench/builders/afl/zlib-intermediate \\\\\\n'\n+ '\\t--build-arg BUILDKIT_INLINE_CACHE=1 \\\\\\n'\n+ '\\t--cache-from gcr.io/fuzzbench/builders/afl/zlib-intermediate \\\\\\n'\n+ '\\t--build-arg parent_image=gcr.io/fuzzbench/builders/benchmark/zlib \\\\'\n+ '\\n'\n+ '\\t--file fuzzers/afl/builder.Dockerfile \\\\\\n'\n+ '\\tfuzzers/afl\\n'\n+ '\\n')\n-@patch('builtins.print')\n-def test_print_makefile_runner_image(mocked_print):\n+def test_get_rules_for_runner_image():\n\"\"\"Tests result of a makefile generation for a runner image.\"\"\"\nname = 'afl-zlib-runner'\n@@ -65,26 +55,19 @@ def test_print_makefile_runner_image(mocked_print):\n'build_arg': ['fuzzer=afl', 'benchmark=zlib'],\n'depends_on': ['afl-zlib-builder', 'afl-zlib-intermediate-runner']\n}\n-\n- generate_makefile.print_rules_for_image(name, image)\n-\n- assert mocked_print.mock_calls == [\n- call('.', end=''),\n- call('afl-zlib-runner:', end=''),\n- call(' .afl-zlib-builder', end=''),\n- call(' .afl-zlib-intermediate-runner', end=''),\n- call(),\n- call('\\tdocker build \\\\'),\n- call('\\t--tag gcr.io/fuzzbench/runners/afl/zlib \\\\'),\n- call('\\t--build-arg BUILDKIT_INLINE_CACHE=1 \\\\'),\n- call('\\t--cache-from gcr.io/fuzzbench/runners/afl/zlib \\\\'),\n- call('\\t--build-arg fuzzer=afl \\\\'),\n- call('\\t--build-arg benchmark=zlib \\\\'),\n- call('\\t--file docker/benchmark-runner/Dockerfile \\\\'),\n- call('\\t.'),\n- call(),\n- call('run-afl-zlib: .afl-zlib-runner'),\n- call('\\tdocker run \\\\\\n\\\n+ rules_for_image = generate_makefile.get_rules_for_image(name, image)\n+ assert rules_for_image == (\n+ '.afl-zlib-runner: .afl-zlib-builder .afl-zlib-intermediate-runner\\n'\n+ '\\tdocker build \\\\\\n'\n+ '\\t--tag gcr.io/fuzzbench/runners/afl/zlib \\\\\\n'\n+ '\\t--build-arg BUILDKIT_INLINE_CACHE=1 \\\\\\n'\n+ '\\t--cache-from gcr.io/fuzzbench/runners/afl/zlib \\\\\\n'\n+ '\\t--build-arg fuzzer=afl \\\\\\n'\n+ '\\t--build-arg benchmark=zlib \\\\\\n'\n+ '\\t--file docker/benchmark-runner/Dockerfile \\\\\\n'\n+ '\\t.\\n\\n'\n+ 'run-afl-zlib: .afl-zlib-runner\\n' + ('\\\n+\\tdocker run \\\\\\n\\\n\\t--cpus=1 \\\\\\n\\\n\\t--cap-add SYS_NICE \\\\\\n\\\n\\t--cap-add SYS_PTRACE \\\\\\n\\\n@@ -93,12 +76,10 @@ def test_print_makefile_runner_image(mocked_print):\n\\t-e TRIAL_ID=1 \\\\\\n\\\n\\t-e FUZZER=afl \\\\\\n\\\n\\t-e BENCHMARK=zlib \\\\\\n\\\n-\\t-e FUZZ_TARGET=$(zlib-fuzz-target) \\\\'),\n- call('\\t-it ', end=''),\n- call('gcr.io/fuzzbench/runners/afl/zlib'),\n- call(),\n- call('debug-afl-zlib: .afl-zlib-runner'),\n- call('\\tdocker run \\\\\\n\\\n+\\t-e FUZZ_TARGET=$(zlib-fuzz-target) \\\\\\\n+\\n') + '\\t-it gcr.io/fuzzbench/runners/afl/zlib\\n\\n'\n+ 'debug-afl-zlib: .afl-zlib-runner\\n' + ('\\\n+\\tdocker run \\\\\\n\\\n\\t--cpus=1 \\\\\\n\\\n\\t--cap-add SYS_NICE \\\\\\n\\\n\\t--cap-add SYS_PTRACE \\\\\\n\\\n@@ -107,12 +88,11 @@ def test_print_makefile_runner_image(mocked_print):\n\\t-e TRIAL_ID=1 \\\\\\n\\\n\\t-e FUZZER=afl \\\\\\n\\\n\\t-e BENCHMARK=zlib \\\\\\n\\\n-\\t-e FUZZ_TARGET=$(zlib-fuzz-target) \\\\'),\n- call('\\t--entrypoint \"/bin/bash\" \\\\\\n\\t-it ', end=''),\n- call('gcr.io/fuzzbench/runners/afl/zlib'),\n- call(),\n- call('test-run-afl-zlib: .afl-zlib-runner'),\n- call('\\tdocker run \\\\\\n\\\n+\\t-e FUZZ_TARGET=$(zlib-fuzz-target) \\\\\\\n+\\n') + '\\t--entrypoint \"/bin/bash\" \\\\\\n\\t-it gcr.io/fuzzbench/runners/afl/zlib'\n+ '\\n\\n'\n+ 'test-run-afl-zlib: .afl-zlib-runner\\n' + ('\\\n+\\tdocker run \\\\\\n\\\n\\t--cpus=1 \\\\\\n\\\n\\t--cap-add SYS_NICE \\\\\\n\\\n\\t--cap-add SYS_PTRACE \\\\\\n\\\n@@ -121,9 +101,6 @@ def test_print_makefile_runner_image(mocked_print):\n\\t-e TRIAL_ID=1 \\\\\\n\\\n\\t-e FUZZER=afl \\\\\\n\\\n\\t-e BENCHMARK=zlib \\\\\\n\\\n-\\t-e FUZZ_TARGET=$(zlib-fuzz-target) \\\\'),\n- call('\\t-e MAX_TOTAL_TIME=20 \\\\\\n\\t-e SNAPSHOT_PERIOD=10 \\\\'),\n- call('\\t', end=''),\n- call('gcr.io/fuzzbench/runners/afl/zlib'),\n- call(),\n- ]\n+\\t-e FUZZ_TARGET=$(zlib-fuzz-target) \\\\\\\n+\\n') + '\\t-e MAX_TOTAL_TIME=20 \\\\\\n\\t-e SNAPSHOT_PERIOD=10 \\\\\\n'\n+ '\\tgcr.io/fuzzbench/runners/afl/zlib\\n\\n')\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Write the makefile instead of printing it (#865)
This makes testing nicer and makes the makefile generation more readable. |
258,388 | 26.10.2020 12:31:45 | 25,200 | 77bfc5c2fe5c5fcd051a4ea87dc0397d62fa8b76 | [CI] Sort benchmarks before building them.
Sort benchmarks before building them to make CI more deterministic.
This change will ensure that benchmarks are built in the same order. | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/build_and_test_run_fuzzer_benchmarks.py",
"new_path": ".github/workflows/build_and_test_run_fuzzer_benchmarks.py",
"diff": "@@ -96,6 +96,9 @@ def make_builds(benchmarks, fuzzer):\n\"\"\"Use make to test the fuzzer on each benchmark in |benchmarks|.\"\"\"\nfuzzer_benchmark_pairs = builder.get_fuzzer_benchmark_pairs([fuzzer],\nbenchmarks)\n+ # Sort benchmarks so that they get built in a deterministic order.\n+ fuzzer_benchmark_pairs = sorted(fuzzer_benchmark_pairs,\n+ key=lambda pair: pair[1])\nprint('Building fuzzer-benchmark pairs: {}'.format(fuzzer_benchmark_pairs))\nfor _, benchmark in fuzzer_benchmark_pairs:\nmake_target = get_make_target(fuzzer, benchmark)\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [CI] Sort benchmarks before building them. (#876)
Sort benchmarks before building them to make CI more deterministic.
This change will ensure that benchmarks are built in the same order. |
258,388 | 28.10.2020 07:05:15 | 25,200 | 0d309e68f7401fe9bc278a4638c4d8f9edb48f77 | [benchmark integration] Remove stale attribute
Remove attribute leftover from OSS-Fuzz corpus usage. | [
{
"change_type": "MODIFY",
"old_path": "benchmarks/oss_fuzz_benchmark_integration.py",
"new_path": "benchmarks/oss_fuzz_benchmark_integration.py",
"diff": "@@ -212,7 +212,7 @@ def main():\nhelp='Date of the commit. Example: 2019-10-19T09:07:25+01:00')\nargs = parser.parse_args()\nintegrate_benchmark(args.project, args.fuzz_target, args.benchmark_name,\n- args.commit, args.date, args.oss_fuzz_corpus)\n+ args.commit, args.date)\nreturn 0\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [benchmark integration] Remove stale attribute (#887)
Remove attribute leftover from OSS-Fuzz corpus usage. |
258,388 | 28.10.2020 07:24:36 | 25,200 | 7c8b3b2988ee21d150299d6451718f106478a914 | [new_process] Fix logging
When logging the result of an executed command process, the
command was put directly in the log message prior to formatting.
This caused exceptions during logging if the command contained
something that looked like a print format directive (e.g. %H). | [
{
"change_type": "MODIFY",
"old_path": "common/new_process.py",
"new_path": "common/new_process.py",
"diff": "@@ -109,8 +109,8 @@ def execute( # pylint: disable=too-many-locals,too-many-branches\nretcode = process.returncode\n- log_message = ('Executed command: \"{command}\" returned: {retcode}.'.format(\n- command=(' '.join(command))[:LOG_LIMIT_FIELD], retcode=retcode))\n+ command_log_str = ' '.join(command)[:LOG_LIMIT_FIELD]\n+ log_message = 'Executed command: \"%s\" returned: %d.'\nif output is not None:\noutput = output.decode('utf-8', errors='ignore')\n@@ -120,8 +120,8 @@ def execute( # pylint: disable=too-many-locals,too-many-branches\nlog_extras = None\nif expect_zero and retcode != 0 and not wrapped_process.timed_out:\n- logs.error(log_message, extras=log_extras)\n+ logs.error(log_message, command_log_str, retcode, extras=log_extras)\nraise subprocess.CalledProcessError(retcode, command)\n- logs.debug(log_message, extras=log_extras)\n+ logs.debug(log_message, command_log_str, retcode, extras=log_extras)\nreturn ProcessResult(retcode, output, wrapped_process.timed_out)\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [new_process] Fix logging (#888)
When logging the result of an executed command process, the
command was put directly in the log message prior to formatting.
This caused exceptions during logging if the command contained
something that looked like a print format directive (e.g. %H). |
258,388 | 28.10.2020 07:44:11 | 25,200 | 11ce9e316a824a718255708c4910b1c64af70a27 | [pylint][NFC] Remove old pylint protected-access directives
The benchmark integration script has some leftover
protected-access directives that were used when the script used
protected functions from OSS-Fuzz. These aren't needed any longer. | [
{
"change_type": "MODIFY",
"old_path": "benchmarks/oss_fuzz_benchmark_integration.py",
"new_path": "benchmarks/oss_fuzz_benchmark_integration.py",
"diff": "@@ -149,11 +149,11 @@ def replace_base_builder(benchmark_dir, commit_date):\n\"\"\"Replace the parent image of the Dockerfile in |benchmark_dir|,\nbase-builder (latest), with a version of base-builder that is likely to\nbuild the project as it was on |commit_date| without issue.\"\"\"\n- base_builder_repo = _load_base_builder_docker_repo() # pylint: disable=protected-access\n+ base_builder_repo = _load_base_builder_docker_repo()\nif base_builder_repo:\nbase_builder_digest = base_builder_repo.find_digest(commit_date)\nlogs.info('Using base-builder with digest %s.', base_builder_digest)\n- _replace_base_builder_digest( # pylint: disable=protected-access\n+ _replace_base_builder_digest(\nos.path.join(benchmark_dir, 'Dockerfile'), base_builder_digest)\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [pylint][NFC] Remove old pylint protected-access directives (#889)
The benchmark integration script has some leftover
protected-access directives that were used when the script used
protected functions from OSS-Fuzz. These aren't needed any longer. |
258,388 | 28.10.2020 07:57:22 | 25,200 | 3cf22fad6ec831ee5221cc63f0ceeb529ca374ad | [benchmark integration] Check third_party/oss-fuzz is a git repo.
If third_party/oss-fuzz isn't a repo, then exit and explain to
users that they need to checkout submodules.
Previously, if it was not a repo, `git reset --hard` would be done
on the fuzzbench repo, which could unexpectedly cause users to lose
work. | [
{
"change_type": "MODIFY",
"old_path": "benchmarks/oss_fuzz_benchmark_integration.py",
"new_path": "benchmarks/oss_fuzz_benchmark_integration.py",
"diff": "@@ -36,7 +36,7 @@ OSS_FUZZ_REPO_PATH = os.path.join(OSS_FUZZ_DIR, 'infra')\nclass GitRepoManager:\n- \"\"\"Base repo manager.\"\"\"\n+ \"\"\"Git repo manager.\"\"\"\ndef __init__(self, repo_dir):\nself.repo_dir = repo_dir\n@@ -76,6 +76,11 @@ class BaseBuilderDockerRepo:\ndef copy_oss_fuzz_files(project, commit_date, benchmark_dir):\n\"\"\"Checkout the right files from OSS-Fuzz to build the benchmark based on\n|project| and |commit_date|. Then copy them to |benchmark_dir|.\"\"\"\n+ if not os.path.exists(os.path.join(OSS_FUZZ_DIR, '.git')):\n+ logs.error(\n+ '%s is not a git repo. Try running git submodule update --init',\n+ OSS_FUZZ_DIR)\n+ raise RuntimeError('%s is not a git repo.' % OSS_FUZZ_DIR)\noss_fuzz_repo_manager = GitRepoManager(OSS_FUZZ_DIR)\nprojects_dir = os.path.join(OSS_FUZZ_DIR, 'projects', project)\ntry:\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [benchmark integration] Check third_party/oss-fuzz is a git repo. (#892)
If third_party/oss-fuzz isn't a repo, then exit and explain to
users that they need to checkout submodules.
Previously, if it was not a repo, `git reset --hard` would be done
on the fuzzbench repo, which could unexpectedly cause users to lose
work. |
258,388 | 28.10.2020 08:51:14 | 25,200 | dadb84735144cebdcc7310b62c238f299971ac02 | [experiment_utils] Use one source of truth for cycle filenames.
Use one source of truth for naming of cycle files (e.g.
corpus-archive-0001).
Part of queue based measurer | [
{
"change_type": "MODIFY",
"old_path": "common/experiment_utils.py",
"new_path": "common/experiment_utils.py",
"diff": "@@ -71,19 +71,26 @@ def get_trial_instance_name(experiment: str, trial_id: int) -> str:\nreturn 'r-%s-%d' % (experiment, trial_id)\n+def get_cycle_filename(basename: str, cycle: int) -> str:\n+ \"\"\"Returns a filename for a file that is relevant to a particular snapshot\n+ cycle.\"\"\"\n+ filename = basename + '-' + ('%04d' % cycle)\n+ return filename\n+\n+\ndef get_corpus_archive_name(cycle: int) -> str:\n\"\"\"Returns a corpus archive name given a cycle.\"\"\"\n- return 'corpus-archive-%04d.tar.gz' % cycle\n+ return get_cycle_filename('corpus-archive', cycle) + '.tar.gz'\ndef get_stats_filename(cycle: int) -> str:\n\"\"\"Returns a corpus archive name given a cycle.\"\"\"\n- return 'stats-%04d.json' % cycle\n+ return get_cycle_filename('stats', cycle) + '.json'\ndef get_crashes_archive_name(cycle: int) -> str:\n\"\"\"Return as crashes archive name given a cycle.\"\"\"\n- return 'crashes-%04d.tar.gz' % cycle\n+ return get_cycle_filename('crashes', cycle) + '.tar.gz'\ndef is_local_experiment():\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [experiment_utils] Use one source of truth for cycle filenames. (#896)
Use one source of truth for naming of cycle files (e.g.
corpus-archive-0001).
Part of queue based measurer (#895). |
258,388 | 28.10.2020 08:56:18 | 25,200 | 411b3bb5ebb0cab7a5d699383145ada68d0209c5 | [queue][NFC] Improve .gitignore file
Ignore emacs backup files.
Don't bother ignoring variants of virtualenvs, we only use one.
See | [
{
"change_type": "MODIFY",
"old_path": ".gitignore",
"new_path": ".gitignore",
"diff": "@@ -19,12 +19,8 @@ __pycache__/\n.pytype/\n-# Virtual environments.\n-.env\n+# Virtualenv\n.venv\n-env/\n-venv/\n-ENV/\n# Reports generated by FuzzBench.\nreport/\n@@ -36,3 +32,7 @@ docs/vendor/\n# Auto-generated build files.\ndocker/generated.mk\n+\n+# Emacs backup files.\n+*~\n+\\#*\\#\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [queue][NFC] Improve .gitignore file (#897)
Ignore emacs backup files.
Don't bother ignoring variants of virtualenvs, we only use one.
See #895. |
258,388 | 28.10.2020 09:29:30 | 25,200 | aeb8eff56139933fdba672211b310c7aa3943f07 | [queue][filestore_utils] Add `cat` and fix consistency.
Add `cat` function.
Make arguments to cp consistent with the rest of the file (parallel
should be last). | [
{
"change_type": "MODIFY",
"old_path": "common/filestore_utils.py",
"new_path": "common/filestore_utils.py",
"diff": "@@ -37,15 +37,15 @@ def get_impl():\nreturn local_filestore\n-def cp(source, destination, recursive=False, parallel=False, expect_zero=True): # pylint: disable=invalid-name\n+def cp(source, destination, recursive=False, expect_zero=True, parallel=False): # pylint: disable=invalid-name\n\"\"\"Copies |source| to |destination|. If |expect_zero| is True then it can\nraise subprocess.CalledProcessError. |parallel| is only used by the gsutil\nimplementation.\"\"\"\nreturn get_impl().cp(source,\ndestination,\nrecursive=recursive,\n- parallel=parallel,\n- expect_zero=expect_zero)\n+ expect_zero=expect_zero,\n+ parallel=parallel)\ndef ls(path, must_exist=True): # pylint: disable=invalid-name\n@@ -79,3 +79,8 @@ def rsync( # pylint: disable=too-many-arguments\ngsutil_options,\noptions,\nparallel=parallel)\n+\n+\n+def cat(file_path, expect_zero=True):\n+ \"\"\"Reads the file at |file_path| and returns the result.\"\"\"\n+ return get_impl().cat(file_path, expect_zero=expect_zero)\n"
},
{
"change_type": "MODIFY",
"old_path": "common/gsutil.py",
"new_path": "common/gsutil.py",
"diff": "@@ -28,7 +28,7 @@ def gsutil_command(arguments, expect_zero=True, parallel=False):\nreturn new_process.execute(command + arguments, expect_zero=expect_zero)\n-def cp(source, destination, recursive=False, parallel=False, expect_zero=True): # pylint: disable=invalid-name\n+def cp(source, destination, recursive=False, expect_zero=True, parallel=False): # pylint: disable=invalid-name\n\"\"\"Executes gsutil's \"cp\" command to copy |source| to |destination|. Uses -r\nif |recursive|. If |expect_zero| is True and the command fails then this\nfunction will raise a subprocess.CalledError.\"\"\"\n@@ -37,7 +37,7 @@ def cp(source, destination, recursive=False, parallel=False, expect_zero=True):\ncommand.append('-r')\ncommand.extend([source, destination])\n- return gsutil_command(command, parallel=parallel, expect_zero=expect_zero)\n+ return gsutil_command(command, expect_zero=expect_zero, parallel=parallel)\ndef ls(path, must_exist=True): # pylint: disable=invalid-name\n@@ -84,3 +84,12 @@ def rsync( # pylint: disable=too-many-arguments\ncommand.extend(options)\ncommand.extend([source, destination])\nreturn gsutil_command(command, parallel=parallel)\n+\n+\n+def cat(file_path, expect_zero=True):\n+ \"\"\"Does gsutil cat on |file_path| and returns the result.\"\"\"\n+ command = ['cat', file_path]\n+ # TODO(metzman): Consider replacing this technique with cp to temp file\n+ # and a local `cat`. The problem with this technique is stderr output\n+ # from gsutil can be included.\n+ return gsutil_command(command, expect_zero=expect_zero)\n"
},
{
"change_type": "MODIFY",
"old_path": "common/local_filestore.py",
"new_path": "common/local_filestore.py",
"diff": "@@ -23,8 +23,8 @@ def cp( # pylint: disable=invalid-name\nsource,\ndestination,\nrecursive=False,\n- parallel=False, # pylint: disable=unused-argument\n- expect_zero=True):\n+ expect_zero=True,\n+ parallel=False): # pylint: disable=unused-argument\n\"\"\"Executes \"cp\" command from |source| to |destination|.\"\"\"\n# Create intermediate folders for `cp` command to behave like `gsutil.cp`.\nfilesystem.create_directory(os.path.dirname(destination))\n@@ -91,3 +91,9 @@ def rsync( # pylint: disable=too-many-arguments\nsource = source + '/'\ncommand.extend([source, destination])\nreturn new_process.execute(command, expect_zero=True)\n+\n+\n+def cat(file_path, expect_zero=True):\n+ \"\"\"Does cat on |file_path| and returns the result.\"\"\"\n+ command = ['cat', file_path]\n+ return new_process.execute(command, expect_zero=expect_zero)\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [queue][filestore_utils] Add `cat` and fix consistency. (#899)
Add `cat` function.
Make arguments to cp consistent with the rest of the file (parallel
should be last). |
258,388 | 28.10.2020 11:59:39 | 25,200 | 894a9d598cfc47527b6eb9053896f3f81e7baa15 | [queue][presubmit] Allow third_party directories anywhere.
Ignore files in directories named third_party when doing license checks
even if they aren't in $ROOT/third_party.
See | [
{
"change_type": "MODIFY",
"old_path": "presubmit.py",
"new_path": "presubmit.py",
"diff": "@@ -60,9 +60,9 @@ _LICENSE_CHECK_EXTENSIONS = [\n_LICENSE_CHECK_STRING = 'http://www.apache.org/licenses/LICENSE-2.0'\n_SRC_ROOT = Path(__file__).absolute().parent\n+THIRD_PARTY_DIR_NAME = 'third_party'\n_IGNORE_DIRECTORIES = [\nos.path.join(_SRC_ROOT, 'database', 'alembic'),\n- os.path.join(_SRC_ROOT, 'third_party'),\nos.path.join(_SRC_ROOT, 'benchmarks'),\n]\n@@ -306,11 +306,18 @@ def validate_experiment_requests(paths: List[Path]):\nreturn result\n-def is_path_in_ignore_directory(path: Path) -> bool:\n- \"\"\"Returns True if |path| is a subpath of an ignored directory.\"\"\"\n+def is_path_ignored(path: Path) -> bool:\n+ \"\"\"Returns True if |path| is a subpath of an ignored directory or is a\n+ third_party directory.\"\"\"\nfor ignore_directory in _IGNORE_DIRECTORIES:\nif filesystem.is_subpath(ignore_directory, path):\nreturn True\n+\n+ # Third party directories can be anywhere.\n+ path_parts = str(path).split(os.sep)\n+ if any(path_part == THIRD_PARTY_DIR_NAME for path_part in path_parts):\n+ return True\n+\nreturn False\n@@ -327,7 +334,7 @@ def license_check(paths: List[Path]) -> bool:\nextension not in _LICENSE_CHECK_EXTENSIONS):\ncontinue\n- if is_path_in_ignore_directory(path):\n+ if is_path_ignored(path):\ncontinue\nwith open(path) as file_handle:\n@@ -349,7 +356,7 @@ def get_all_files() -> List[Path]:\ndef filter_ignored_files(paths: List[Path]) -> List[Path]:\n\"\"\"Returns a list of absolute paths of files in this repo that can be\nchecked statically.\"\"\"\n- return [path for path in paths if not is_path_in_ignore_directory(path)]\n+ return [path for path in paths if not is_path_ignored(path)]\ndef do_tests() -> bool:\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [queue][presubmit] Allow third_party directories anywhere. (#904)
Ignore files in directories named third_party when doing license checks
even if they aren't in $ROOT/third_party.
See #895. |
258,388 | 28.10.2020 12:10:17 | 25,200 | 1280814b2f8accaee9c8a01b45bf2dc34aea43ba | [queue] Add queue_utils
Add queue_utils module. | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "common/queue_utils.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Code for setting up a work queue with rq.\"\"\"\n+import redis\n+import rq\n+import rq.job\n+\n+from common import experiment_utils\n+\n+\n+def initialize_queue(redis_host):\n+ \"\"\"Returns a redis-backed rq queue.\"\"\"\n+ queue_name = experiment_utils.get_experiment_name()\n+ redis_connection = redis.Redis(host=redis_host)\n+ queue = rq.Queue(queue_name, connection=redis_connection)\n+ return queue\n+\n+\n+def get_all_jobs(queue):\n+ \"\"\"Returns all the jobs in queue.\"\"\"\n+ job_ids = queue.get_job_ids()\n+ return rq.job.Job.fetch_many(job_ids, queue.connection)\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [queue] Add queue_utils (#906)
Add queue_utils module. |
258,388 | 28.10.2020 12:27:27 | 25,200 | 7b169858bb9407d4ea8e9b5beb9a2908569b6654 | [queue][GCE] Add instance group code to gce module.
Also add tests.
See | [
{
"change_type": "MODIFY",
"old_path": "common/gce.py",
"new_path": "common/gce.py",
"diff": "@@ -54,3 +54,55 @@ def get_preempted_instances(project, zone):\nif (instance['scheduling']['preemptible'] and\ninstance['status'] == 'TERMINATED'):\nyield instance['name']\n+\n+\n+def get_instance_group_managers():\n+ \"\"\"Returns the instance group managers resource.\"\"\"\n+ return thread_local.service.instanceGroupManagers()\n+\n+\n+def get_instance_group_size(instance_group: str, project: str,\n+ zone: str) -> int:\n+ \"\"\"Returns the number of instances running in |instance_group|.\"\"\"\n+ managers = get_instance_group_managers()\n+ request = managers.get(instanceGroupManager=instance_group,\n+ project=project,\n+ zone=zone)\n+ return request.execute()['targetSize']\n+\n+\n+def resize_instance_group(size, instance_group, project, zone):\n+ \"\"\"Changes the number of instances running in |instance_group| to |size|.\"\"\"\n+ assert size >= 1\n+ managers = get_instance_group_managers()\n+ request = managers.resize(instanceGroupManager=instance_group,\n+ size=size,\n+ project=project,\n+ zone=zone)\n+ return request.execute()\n+\n+\n+def delete_instance_group(instance_group, project, zone):\n+ \"\"\"Deletes |instance_group|.\"\"\"\n+ managers = get_instance_group_managers()\n+ request = managers.delete(instanceGroupManager=instance_group,\n+ zone=zone,\n+ project=project)\n+ return request.execute()\n+\n+\n+def create_instance_group(name: str, instance_template_url: str,\n+ base_instance_name: str, project: str, zone: str):\n+ \"\"\"Creates an instance group named |name| from the template specified by\n+ |instance_template_url|.\"\"\"\n+ managers = get_instance_group_managers()\n+ target_size = 1\n+\n+ body = {\n+ 'baseInstanceName': base_instance_name,\n+ 'targetSize': target_size,\n+ 'name': name,\n+ 'instanceTemplate': instance_template_url\n+ }\n+ request = managers.insert(body=body, project=project, zone=zone)\n+ return request.execute()\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "common/test_gce.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Tests for gce.py.\"\"\"\n+from unittest import mock\n+\n+from common import gce\n+\n+PROJECT = 'my-cloud-project'\n+ZONE = 'my-compute-zone'\n+INSTANCE_GROUP = 'my-instance-group'\n+INSTANCE_TEMPLATE_URL = 'resource/my-instance-group'\n+EXPERIMENT = 'my-experiment'\n+\n+\[email protected]('common.gce.get_instance_group_managers')\n+def test_delete_instance_group(mocked_get_instance_group_managers):\n+ \"\"\"Tests that delete_instance_group uses the GCE API correctly.\"\"\"\n+ mock_managers = mock.Mock()\n+ mocked_get_instance_group_managers.return_value = mock_managers\n+ gce.delete_instance_group(INSTANCE_GROUP, PROJECT, ZONE)\n+ assert mock_managers.delete.call_args_list == [\n+ mock.call(instanceGroupManager=INSTANCE_GROUP,\n+ project=PROJECT,\n+ zone=ZONE)\n+ ]\n+\n+\[email protected]('common.gce.get_instance_group_managers')\n+def test_resize_instance_group(mocked_get_instance_group_managers):\n+ \"\"\"Tests that resize_instance_group uses the GCE API correctly.\"\"\"\n+ size = 10\n+ mock_managers = mock.Mock()\n+ mocked_get_instance_group_managers.return_value = mock_managers\n+ gce.resize_instance_group(size, INSTANCE_GROUP, PROJECT, ZONE)\n+ assert mock_managers.resize.call_args_list == [\n+ mock.call(instanceGroupManager=INSTANCE_GROUP,\n+ size=size,\n+ project=PROJECT,\n+ zone=ZONE)\n+ ]\n+\n+\[email protected]('common.gce.get_instance_group_managers')\n+def test_create_instance_group(mocked_get_instance_group_managers):\n+ \"\"\"Tests that create_instance_group uses the GCE API correctly.\"\"\"\n+ mock_managers = mock.Mock()\n+ mocked_get_instance_group_managers.return_value = mock_managers\n+ base_instance_name = 'm-' + EXPERIMENT\n+ gce.create_instance_group(INSTANCE_GROUP, INSTANCE_TEMPLATE_URL,\n+ base_instance_name, PROJECT, ZONE)\n+ body = {\n+ 'baseInstanceName': 'm-' + EXPERIMENT,\n+ 'targetSize': 1,\n+ 'name': INSTANCE_GROUP,\n+ 'instanceTemplate': INSTANCE_TEMPLATE_URL,\n+ }\n+ assert mock_managers.insert.call_args_list == [\n+ mock.call(body=body, project=PROJECT, zone=ZONE)\n+ ]\n+\n+\[email protected]('common.gce.get_instance_group_managers')\n+def test_get_instance_group_size(mocked_get_instance_group_managers):\n+ \"\"\"Tests that get_instance_group_size uses the GCE API correctly and returns\n+ the right value.\"\"\"\n+ mock_managers = mock.Mock()\n+ mocked_get_instance_group_managers.return_value = mock_managers\n+ mock_req = mock.Mock()\n+ mock_managers.get.return_value = mock_req\n+ size = 1\n+ mock_req.execute.return_value = {'targetSize': size}\n+ result = gce.get_instance_group_size(INSTANCE_GROUP, PROJECT, ZONE)\n+ assert mock_managers.get.call_args_list == [\n+ mock.call(instanceGroupManager=INSTANCE_GROUP,\n+ project=PROJECT,\n+ zone=ZONE)\n+ ]\n+ assert result == size\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [queue][GCE] Add instance group code to gce module. (#907)
Also add tests.
See #895. |
258,388 | 28.10.2020 13:49:55 | 25,200 | 17bb285afeed21370d5c86733c3d9b80d98024b0 | [queue][worker] Set up worker image and build
Create an image for workers and set up building of the image.
These will be used to run measure workers in the queue-based measurer.
See | [
{
"change_type": "MODIFY",
"old_path": "docker/generate_makefile.py",
"new_path": "docker/generate_makefile.py",
"diff": "@@ -91,7 +91,7 @@ def _get_makefile_run_template(image):\ndef get_rules_for_image(name, image):\n\"\"\"Returns makefile section for |image|.\"\"\"\n- if not ('base-' in name or 'dispatcher-' in name):\n+ if not ('base-' in name or 'dispatcher-' in name or name == 'worker'):\nsection = '.'\nelse:\nsection = ''\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/image_types.yaml",
"new_path": "docker/image_types.yaml",
"diff": "tag: 'dispatcher-image'\ntype: 'dispatcher'\n+'worker':\n+ depends_on:\n+ - 'base-image'\n+ dockerfile: 'docker/worker/Dockerfile'\n+ context: '.'\n+ tag: 'worker'\n+ type: 'worker'\n+\n# TODO: It would be better to call this benchmark builder. But that would be\n# confusing because this doesn't involve benchmark-builder/Dockerfile. Rename\n# that and then rename this.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docker/worker/Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+FROM gcr.io/fuzzbench/base-image\n+\n+ENV WORK /work\n+ENV SRC $WORK/src\n+RUN mkdir -p $SRC\n+\n+ADD . $SRC/\n+\n+ENTRYPOINT /bin/bash $SRC/docker/worker/startup-worker.sh\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docker/worker/startup-worker.sh",
"diff": "+#! /bin/bash\n+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+rq worker $EXPERIMENT --url redis://$REDIS_HOST:6379\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/build/gcb_build.py",
"new_path": "experiment/build/gcb_build.py",
"diff": "@@ -46,7 +46,10 @@ def _get_buildable_images(fuzzer=None, benchmark=None):\ndef build_base_images():\n\"\"\"Build base images on GCB.\"\"\"\n- image_templates = {'base-image': _get_buildable_images()['base-image']}\n+ buildable_images = _get_buildable_images()\n+ image_templates = {\n+ image: buildable_images[image] for image in ['base-image', 'worker']\n+ }\nconfig = generate_cloudbuild.create_cloudbuild_spec(image_templates,\nbuild_base_images=True)\n_build(config, 'base-images')\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/build/local_build.py",
"new_path": "experiment/build/local_build.py",
"diff": "@@ -35,7 +35,7 @@ def make(targets):\ndef build_base_images() -> Tuple[int, str]:\n\"\"\"Build base images locally.\"\"\"\n- return make(['base-image'])\n+ return make(['base-image', 'worker'])\ndef get_shared_coverage_binaries_dir():\n"
},
{
"change_type": "MODIFY",
"old_path": "experiment/build/test_docker_images.py",
"new_path": "experiment/build/test_docker_images.py",
"diff": "@@ -23,7 +23,7 @@ def test_images_to_build_list():\nbenchmarks = ['libxml', 'libpng']\nall_images = docker_images.get_images_to_build(fuzzers, benchmarks)\nassert set(all_images.keys()) == set([\n- 'base-image', 'dispatcher-image', 'libxml-project-builder',\n+ 'base-image', 'worker', 'dispatcher-image', 'libxml-project-builder',\n'libpng-project-builder', 'afl-libxml-builder-intermediate',\n'afl-libxml-intermediate-runner', 'afl-libxml-builder',\n'coverage-libxml-builder', 'afl-libpng-builder',\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [queue][worker] Set up worker image and build (#908)
Create an image for workers and set up building of the image.
These will be used to run measure workers in the queue-based measurer.
See #895. |
258,388 | 28.10.2020 15:02:59 | 25,200 | e807a4c5eb77a476ec6becfcf982da19e864910b | [queue][gcloud] Add instance template code to gcloud
See | [
{
"change_type": "MODIFY",
"old_path": "common/gcloud.py",
"new_path": "common/gcloud.py",
"diff": "\"\"\"Google cloud related code.\"\"\"\nimport enum\n+import posixpath\nimport subprocess\nfrom typing import List\n@@ -112,3 +113,33 @@ def run_local_instance(startup_script: str = None) -> bool:\ncommand = ['/bin/bash', startup_script]\nsubprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\nreturn new_process.ProcessResult(0, '', False)\n+\n+\n+def create_instance_template(template_name, docker_image, env, project, zone):\n+ \"\"\"Returns a ProcessResult from running the command to create an instance\n+ template.\"\"\"\n+ # Creating an instance template cannot be done using the GCE API because\n+ # there is no public API for handling some docker related functionality that\n+ # we need.\n+ command = [\n+ 'gcloud', 'compute', '--project', project, 'instance-templates',\n+ 'create-with-container', template_name, '--no-address',\n+ '--image-family=cos-stable', '--image-project=cos-cloud',\n+ '--region=%s' % zone, '--scopes=cloud-platform',\n+ '--machine-type=n1-standard-1', '--boot-disk-size=50GB',\n+ '--preemptible', '--container-image', docker_image\n+ ]\n+ for item in env.items():\n+ command.extend(['--container-env', '%s=%s' % item])\n+ new_process.execute(command)\n+ return posixpath.join('https://www.googleapis.com/compute/v1/projects/',\n+ project, 'global', 'instanceTemplates', template_name)\n+\n+\n+def delete_instance_template(template_name: str):\n+ \"\"\"Returns a ProcessResult from running the command to delete the\n+ measure_worker template for this |experiment|.\"\"\"\n+ command = [\n+ 'gcloud', 'compute', 'instance-templates', 'delete', template_name\n+ ]\n+ return new_process.execute(command)\n"
},
{
"change_type": "MODIFY",
"old_path": "common/test_gcloud.py",
"new_path": "common/test_gcloud.py",
"diff": "@@ -147,3 +147,42 @@ def test_delete_instances_fail(mocked_execute):\nresult = gcloud.delete_instances(instances, zone)\nassert not result\nmocked_execute.assert_called_with(expected_command, expect_zero=False)\n+\n+\[email protected]('common.new_process.execute')\n+def test_create_instance_template(mocked_execute):\n+ \"\"\"Tests that create_instance_template uses the correct gcloud command and\n+ returns the correct instance template URL.\"\"\"\n+ template_name = 'my-template'\n+ docker_image = 'docker_image'\n+ env = {'ENV_VAR': 'value'}\n+ project = 'fuzzbench'\n+ result = gcloud.create_instance_template(template_name, docker_image, env,\n+ project, ZONE)\n+ expected_command = [\n+ 'gcloud', 'compute', '--project', project, 'instance-templates',\n+ 'create-with-container', template_name, '--no-address',\n+ '--image-family=cos-stable', '--image-project=cos-cloud',\n+ '--region=zone-a', '--scopes=cloud-platform',\n+ '--machine-type=n1-standard-1', '--boot-disk-size=50GB',\n+ '--preemptible', '--container-image', docker_image, '--container-env',\n+ 'ENV_VAR=value'\n+ ]\n+ mocked_execute.assert_called_with(expected_command)\n+ expected_result = (\n+ 'https://www.googleapis.com/compute/v1/projects/{project}'\n+ '/global/instanceTemplates/{name}').format(project=project,\n+ name=template_name)\n+ assert result == expected_result\n+\n+\[email protected]('common.new_process.execute')\n+def test_delete_instance_template(mocked_execute):\n+ \"\"\"Tests that delete_instance_template uses the correct gcloud command to\n+ delete an instance template.\"\"\"\n+ template_name = 'my-template'\n+ gcloud.delete_instance_template(template_name)\n+ expected_command = [\n+ 'gcloud', 'compute', 'instance-templates', 'delete', template_name\n+ ]\n+ mocked_execute.assert_called_with(expected_command)\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [queue][gcloud] Add instance template code to gcloud (#909)
See #895 |
258,388 | 28.10.2020 17:40:13 | 25,200 | 1ffdcf3ed246f047c5d6ef7cbc911a60599918e7 | Remove .vscode directory
Also add .vscode to .gitignore. | [
{
"change_type": "MODIFY",
"old_path": ".gitignore",
"new_path": ".gitignore",
"diff": "@@ -36,3 +36,5 @@ docker/generated.mk\n# Emacs backup files.\n*~\n\\#*\\#\n+\n+.vscode\n\\ No newline at end of file\n"
},
{
"change_type": "DELETE",
"old_path": ".vscode/settings.json",
"new_path": null,
"diff": "-{\n- \"python.linting.pylintEnabled\": true\n-}\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Remove .vscode directory (#913)
Also add .vscode to .gitignore. |
258,388 | 29.10.2020 07:40:06 | 25,200 | c7fa3d8709ee5b1e4ec9270983a2bba41b4382c2 | [queue][gcloud] Add module for scheduling measure workers
See | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "experiment/schedule_measure_workers.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Module for starting instances to run measure workers.\"\"\"\n+import collections\n+import os\n+import posixpath\n+import sys\n+import time\n+\n+from common import experiment_utils\n+from common import gce\n+from common import gcloud\n+from common import logs\n+from common import queue_utils\n+from common import yaml_utils\n+\n+logger = logs.Logger('schedule_measure_workers') # pylint: disable=invalid-name\n+\n+# This is the default quota on GCE.\n+# TODO(metzman): Use the GCE API to determine this quota.\n+MAX_INSTANCES_PER_GROUP = 1000\n+\n+\n+def get_instance_group_name(experiment: str):\n+ \"\"\"Returns the name of the instance group of measure workers for\n+ |experiment|.\"\"\"\n+ # \"worker-\" needs to come first because name cannot start with number.\n+ return 'worker-' + experiment\n+\n+\n+def get_measure_worker_instance_template_name(experiment: str):\n+ \"\"\"Returns an instance template name for measurer workers running in\n+ |experiment|.\"\"\"\n+ return 'worker-' + experiment\n+\n+\n+def initialize(experiment_config: dict):\n+ \"\"\"Initialize everything that will be needed to schedule measurers.\"\"\"\n+ logger.info('Initializing worker scheduling.')\n+ gce.initialize()\n+ experiment = experiment_config['experiment']\n+ project = experiment_config['project']\n+ instance_template_name = get_measure_worker_instance_template_name(\n+ experiment)\n+ docker_image = posixpath.join(experiment_config['docker_registry'],\n+ 'measure-worker:{}'.format(experiment))\n+\n+ redis_host = experiment_config['redis_host']\n+ experiment_filestore = experiment_config['experiment_filestore']\n+ local_experiment = experiment_utils.is_local_experiment()\n+ cloud_compute_zone = experiment_config.get('cloud_compute_zone')\n+ env = {\n+ 'REDIS_HOST': redis_host,\n+ 'EXPERIMENT_FILESTORE': experiment_filestore,\n+ 'EXPERIMENT': experiment,\n+ 'LOCAL_EXPERIMENT': local_experiment,\n+ 'CLOUD_COMPUTE_ZONE': cloud_compute_zone,\n+ }\n+\n+ zone = experiment_config['cloud_compute_zone']\n+ instance_template_url = gcloud.create_instance_template(\n+ instance_template_name, docker_image, env, project, zone)\n+\n+ instance_group_name = get_instance_group_name(experiment)\n+\n+ # GCE will create instances for this group in the format\n+ # \"m-$experiment-$UNIQUE_ID\". Use 'm' is short for \"measurer\".\n+ base_instance_name = 'm-' + experiment\n+\n+ gce.create_instance_group(instance_group_name, instance_template_url,\n+ base_instance_name, project, zone)\n+ queue = queue_utils.initialize_queue(redis_host)\n+ return queue\n+\n+\n+def teardown(experiment_config: dict):\n+ \"\"\"Teardown all resources used for running measurer workers.\"\"\"\n+ instance_group_name = get_instance_group_name(\n+ experiment_config['experiment'])\n+ project = experiment_config['cloud_project']\n+ zone = experiment_config['cloud_compute_zone']\n+ gce.delete_instance_group(instance_group_name, project, zone)\n+ gcloud.delete_instance_template(experiment_config['experiment'])\n+\n+\n+def schedule(experiment_config: dict, queue):\n+ \"\"\"Schedule measurer workers. This cannot be called before\n+ initialize_measurers.\"\"\"\n+ logger.info('Scheduling measurer workers.')\n+\n+ # TODO(metzman): This method doesn't seem to correctly take into account\n+ # jobs that are running (the API provided by rq doesn't work intuitively).\n+ # That is OK for now since scheduling only happens while nothing is being\n+ # measured but this should be fixed.\n+ jobs = queue_utils.get_all_jobs(queue)\n+ counts = collections.defaultdict(int)\n+ for job in jobs:\n+ counts[job.get_status(refresh=False)] += 1\n+\n+ num_instances_needed = counts['queued'] + counts['started']\n+ num_instances_needed = min(num_instances_needed, MAX_INSTANCES_PER_GROUP)\n+\n+ logger.info('Scheduling %d workers.', num_instances_needed)\n+ instance_group_name = get_instance_group_name(\n+ experiment_config['experiment'])\n+ project = experiment_config['cloud_project']\n+ zone = experiment_config['cloud_compute_zone']\n+ num_instances = gce.get_instance_group_size(instance_group_name, project,\n+ zone)\n+\n+ # TODO(metzman): Use autoscaling as it probably can deal with quotas more\n+ # easily.\n+ if not num_instances_needed:\n+ # Can't go below 1 instance per group.\n+ logs.info('num_instances_needed = 0, resizing to 1.')\n+ num_instances_needed = 1\n+\n+ if num_instances_needed != num_instances:\n+ # TODO(metzman): Add some limits so always have some measurers but not\n+ # too many.\n+ gce.resize_instance_group(num_instances_needed, instance_group_name,\n+ project, zone)\n+\n+\n+def main():\n+ \"\"\"Run schedule_measure_workers as a standalone script by calling schedule\n+ in a loop. Useful for debugging.\"\"\"\n+ logs.initialize(\n+ default_extras={\n+ 'experiment': os.environ['EXPERIMENT'],\n+ 'component': 'dispatcher',\n+ 'subcomponent': 'scheduler'\n+ })\n+ gce.initialize()\n+ config_path = sys.argv[1]\n+ config = yaml_utils.read(config_path)\n+ queue = initialize(config)\n+ while True:\n+ schedule(config, queue)\n+ time.sleep(30)\n+\n+\n+if __name__ == '__main__':\n+ main()\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [queue][gcloud] Add module for scheduling measure workers (#910)
See #895 |
258,388 | 29.10.2020 07:43:44 | 25,200 | 497c9d08ec780d48d2e4819a88b172fbf855f27e | [CI] Seperate buggy benchmarks into their own build jobs
Do this to speed up builds and make timeouts less frequent. | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/build_and_test_run_fuzzer_benchmarks.py",
"new_path": ".github/workflows/build_and_test_run_fuzzer_benchmarks.py",
"diff": "@@ -25,26 +25,19 @@ ALWAYS_BUILD_FUZZER = 'afl'\nNUM_RETRIES = 2\nRETRY_DELAY = 60\n-# TODO(tanq16): Get list of Benchmarks automatically.\n-\n# Don't build php benchmark since it fills up disk in GH actions.\nOSS_FUZZ_BENCHMARKS = {\n'bloaty_fuzz_target',\n'curl_curl_fuzzer_http',\n'harfbuzz_hb-subset-fuzzer',\n'jsoncpp_jsoncpp_fuzzer',\n- 'libhevc_hevc_dec_fuzzer',\n'libpcap_fuzz_both',\n'libxslt_xpath',\n- 'matio_matio_fuzzer',\n'mbedtls_fuzz_dtlsclient',\n- 'ndpi_fuzz_ndpi_reader',\n'openh264_decoder_fuzzer',\n'openssl_x509',\n'sqlite3_ossfuzz',\n- 'stb_stbi_read_fuzzer',\n'systemd_fuzz-link-parser',\n- 'wabt_wasm2wat_fuzzer',\n'zlib_zlib_uncompress_fuzzer',\n}\n@@ -62,6 +55,14 @@ STANDARD_BENCHMARKS = {\n'woff2-2016-05-06',\n}\n+BUG_BENCHMARKS = {\n+ 'libhevc_hevc_dec_fuzzer',\n+ 'matio_matio_fuzzer',\n+ 'ndpi_fuzz_ndpi_reader',\n+ 'stb_stbi_read_fuzzer',\n+ 'wabt_wasm2wat_fuzzer',\n+}\n+\ndef get_make_target(fuzzer, benchmark):\n\"\"\"Return test target for a fuzzer and benchmark.\"\"\"\n@@ -121,6 +122,8 @@ def do_build(build_type, fuzzer, always_build):\nbenchmarks = OSS_FUZZ_BENCHMARKS\nelif build_type == 'standard':\nbenchmarks = STANDARD_BENCHMARKS\n+ elif build_type == 'bug':\n+ benchmarks = BUG_BENCHMARKS\nelse:\nraise Exception('Invalid build_type: %s' % build_type)\n"
},
{
"change_type": "MODIFY",
"old_path": ".github/workflows/fuzzers.yml",
"new_path": ".github/workflows/fuzzers.yml",
"diff": "@@ -5,7 +5,10 @@ on:\n- 'docker/**' # Base image changes.\n- 'fuzzers/**' # Changes to fuzzers themselves.\n- 'benchmarks/**' # Changes to benchmarks.\n- - 'src_analysis/**' # Changes that affect what gets built.\n+ # Changes that affect what gets built.\n+ - 'src_analysis/**'\n+ - '.github/worfkflows/fuzzers.yml'\n+ - '.github/worfkflows/build_and_test_run_fuzzer_benchmarks.py'\njobs:\nbuild:\n@@ -56,6 +59,7 @@ jobs:\nbenchmark_type:\n- oss-fuzz\n- standard\n+ - bug\nsteps:\n- uses: actions/checkout@v2\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [CI] Seperate buggy benchmarks into their own build jobs (#912)
Do this to speed up builds and make timeouts less frequent. |
258,388 | 29.10.2020 07:50:12 | 25,200 | 61b2314c30ed89be95c88339e3a86fcfad84e394 | [queue][schedule_measure_workers] Add helper method.
Add helper method requested in
See | [
{
"change_type": "MODIFY",
"old_path": "experiment/schedule_measure_workers.py",
"new_path": "experiment/schedule_measure_workers.py",
"diff": "@@ -45,6 +45,12 @@ def get_measure_worker_instance_template_name(experiment: str):\nreturn 'worker-' + experiment\n+def get_base_worker_instance_name(experiment):\n+ \"\"\"GCE will create instances for this group in the format\n+ \"w-|experiment|-$UNIQUE_ID\". 'w' is short for \"worker\".\"\"\"\n+ return 'w-' + experiment\n+\n+\ndef initialize(experiment_config: dict):\n\"\"\"Initialize everything that will be needed to schedule measurers.\"\"\"\nlogger.info('Initializing worker scheduling.')\n@@ -74,9 +80,7 @@ def initialize(experiment_config: dict):\ninstance_group_name = get_instance_group_name(experiment)\n- # GCE will create instances for this group in the format\n- # \"m-$experiment-$UNIQUE_ID\". Use 'm' is short for \"measurer\".\n- base_instance_name = 'm-' + experiment\n+ base_instance_name = get_base_worker_instance_name(experiment)\ngce.create_instance_group(instance_group_name, instance_template_url,\nbase_instance_name, project, zone)\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [queue][schedule_measure_workers] Add helper method. (#915)
Add helper method requested in #910.
See #895. |
258,420 | 30.10.2020 15:04:39 | 0 | 07e8d72cea7431c8d7595d2d5da33fa6abaf0f72 | Pin base-builder parent image using sha256 hash for bug based benchmarks | [
{
"change_type": "MODIFY",
"old_path": "benchmarks/harfbuzz_hb-subset-fuzzer/Dockerfile",
"new_path": "benchmarks/harfbuzz_hb-subset-fuzzer/Dockerfile",
"diff": "#\n################################################################################\n-FROM gcr.io/oss-fuzz-base/base-builder\n+FROM gcr.io/oss-fuzz-base/base-builder@sha256:7c6edc92d725ec4117ed7f380fb2fa51c2382a037c405dd3fe1ee2870deeb1a1\nRUN apt-get update && apt-get install -y python3-pip ragel pkg-config && \\\npip3 install meson==0.53.0 ninja\nRUN git clone --depth 1 https://github.com/harfbuzz/harfbuzz.git\n"
},
{
"change_type": "MODIFY",
"old_path": "benchmarks/libhevc_hevc_dec_fuzzer/Dockerfile",
"new_path": "benchmarks/libhevc_hevc_dec_fuzzer/Dockerfile",
"diff": "#\n################################################################################\n-FROM gcr.io/oss-fuzz-base/base-builder\n+FROM gcr.io/oss-fuzz-base/base-builder@sha256:270e52ac2c38de7d7d678ab488252c81652a161b378fc9fc9f3df6c79e475afa\nMAINTAINER [email protected]\nRUN apt-get update && apt-get install -y wget cmake\nRUN git clone https://android.googlesource.com/platform/external/libhevc\n"
},
{
"change_type": "MODIFY",
"old_path": "benchmarks/ndpi_fuzz_ndpi_reader/Dockerfile",
"new_path": "benchmarks/ndpi_fuzz_ndpi_reader/Dockerfile",
"diff": "#\n################################################################################\n-FROM gcr.io/oss-fuzz-base/base-builder\n+FROM gcr.io/oss-fuzz-base/base-builder@sha256:83cf1d8893892d6bc471008e50c5fd078a3b8efe80c421b56334e0a28a60fa53\nMAINTAINER [email protected]\nRUN apt-get update && apt-get install -y make autoconf automake autogen pkg-config libtool flex bison\nRUN git clone --depth 1 https://github.com/ntop/nDPI.git ndpi\n"
},
{
"change_type": "MODIFY",
"old_path": "benchmarks/openh264_decoder_fuzzer/Dockerfile",
"new_path": "benchmarks/openh264_decoder_fuzzer/Dockerfile",
"diff": "#\n################################################################################\n-FROM gcr.io/oss-fuzz-base/base-builder\n+FROM gcr.io/oss-fuzz-base/base-builder@sha256:d321113ffaca3cd8dcec0ceff2c594c01b6a860971d02058da7d42c0c82e3d23\nMAINTAINER [email protected]\nRUN dpkg --add-architecture i386 && \\\napt-get update && \\\n"
},
{
"change_type": "MODIFY",
"old_path": "benchmarks/stb_stbi_read_fuzzer/Dockerfile",
"new_path": "benchmarks/stb_stbi_read_fuzzer/Dockerfile",
"diff": "#\n################################################################################\n-FROM gcr.io/oss-fuzz-base/base-builder\n+FROM gcr.io/oss-fuzz-base/base-builder@sha256:12b7b470479f04fd1b6a124291486f555169a5496a355ee75cf333c117d4bb92\nRUN apt-get update && \\\napt-get install -y wget tar\n"
},
{
"change_type": "MODIFY",
"old_path": "benchmarks/wabt_wasm2wat_fuzzer/Dockerfile",
"new_path": "benchmarks/wabt_wasm2wat_fuzzer/Dockerfile",
"diff": "#\n################################################################################\n-FROM gcr.io/oss-fuzz-base/base-builder\n+FROM gcr.io/oss-fuzz-base/base-builder@sha256:c0466abc5bd8515aaceb3fa06725c982522c1127d3559da8615ad22fc43c78a7\nMAINTAINER [email protected]\nRUN apt-get update && apt-get install -y cmake libtool make python\nRUN git clone --recursive https://github.com/WebAssembly/wabt\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Pin base-builder parent image using sha256 hash for bug based benchmarks (#923) |
258,388 | 30.10.2020 08:14:39 | 25,200 | bfda841140c1e6db258cf9b2e9e387420fa3bf1e | [queue] Rename measurer to measurer_manager
Do this in anticipation of splitting the module into two parts:
manager and worker.
See | [
{
"change_type": "MODIFY",
"old_path": "experiment/dispatcher.py",
"new_path": "experiment/dispatcher.py",
"diff": "@@ -31,7 +31,7 @@ from common import yaml_utils\nfrom database import models\nfrom database import utils as db_utils\nfrom experiment.build import builder\n-from experiment.measurer import measurer\n+from experiment.measurer import measure_manager\nfrom experiment import reporter\nfrom experiment import scheduler\nfrom experiment import stop_experiment\n@@ -156,7 +156,7 @@ def dispatcher_main():\nscheduler_loop_thread.start()\nmeasurer_main_process = multiprocessing.Process(\n- target=measurer.measure_main, args=(experiment.config,))\n+ target=measure_manager.measure_main, args=(experiment.config,))\nmeasurer_main_process.start()\n"
},
{
"change_type": "RENAME",
"old_path": "experiment/measurer/measurer.py",
"new_path": "experiment/measurer/measure_manager.py",
"diff": "-#!/usr/bin/env python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n"
},
{
"change_type": "RENAME",
"old_path": "experiment/measurer/test_measurer.py",
"new_path": "experiment/measurer/test_measure_manager.py",
"diff": "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n-\"\"\"Tests for measurer.py.\"\"\"\n+\"\"\"Tests for measure_manager.py.\"\"\"\nimport os\nimport shutil\n@@ -25,7 +25,7 @@ from common import new_process\nfrom database import models\nfrom database import utils as db_utils\nfrom experiment.build import build_utils\n-from experiment.measurer import measurer\n+from experiment.measurer import measure_manager\nfrom test_libs import utils as test_utils\nTEST_DATA_PATH = os.path.join(os.path.dirname(__file__), 'test_data')\n@@ -41,7 +41,7 @@ MAX_TOTAL_TIME = 100\nGIT_HASH = 'FAKE-GIT-HASH'\nCYCLE = 1\n-SNAPSHOT_LOGGER = measurer.logger\n+SNAPSHOT_LOGGER = measure_manager.logger\n# pylint: disable=unused-argument,invalid-name,redefined-outer-name,protected-access\n@@ -58,8 +58,8 @@ def db_experiment(experiment_config, db):\ndef test_get_current_coverage(fs, experiment):\n\"\"\"Tests that get_current_coverage reads the correct data from json file.\"\"\"\n- snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,\n- SNAPSHOT_LOGGER)\n+ snapshot_measurer = measure_manager.SnapshotMeasurer(\n+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)\njson_cov_summary_file = get_test_data_path('cov_summary.json')\nfs.add_real_file(json_cov_summary_file, read_only=False)\nsnapshot_measurer.cov_summary_file = json_cov_summary_file\n@@ -70,8 +70,8 @@ def test_get_current_coverage(fs, experiment):\ndef test_get_current_coverage_error(fs, experiment):\n\"\"\"Tests that get_current_coverage returns None from a\ndefective json file.\"\"\"\n- snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,\n- SNAPSHOT_LOGGER)\n+ snapshot_measurer = measure_manager.SnapshotMeasurer(\n+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)\njson_cov_summary_file = get_test_data_path('cov_summary_defective.json')\nfs.add_real_file(json_cov_summary_file, read_only=False)\nsnapshot_measurer.cov_summary_file = json_cov_summary_file\n@@ -81,8 +81,8 @@ def test_get_current_coverage_error(fs, experiment):\ndef test_get_current_coverage_no_file(fs, experiment):\n\"\"\"Tests that get_current_coverage returns None with no json file.\"\"\"\n- snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,\n- SNAPSHOT_LOGGER)\n+ snapshot_measurer = measure_manager.SnapshotMeasurer(\n+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)\njson_cov_summary_file = get_test_data_path('cov_summary_not_exist.json')\nsnapshot_measurer.cov_summary_file = json_cov_summary_file\ncovered_regions = snapshot_measurer.get_current_coverage()\n@@ -93,8 +93,8 @@ def test_get_current_coverage_no_file(fs, experiment):\ndef test_generate_profdata_create(mocked_execute, experiment, fs):\n\"\"\"Tests that generate_profdata can run the correct command.\"\"\"\nmocked_execute.return_value = new_process.ProcessResult(0, '', False)\n- snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,\n- SNAPSHOT_LOGGER)\n+ snapshot_measurer = measure_manager.SnapshotMeasurer(\n+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)\nsnapshot_measurer.profdata_file = '/work/reports/data.profdata'\nsnapshot_measurer.profraw_file_pattern = '/work/reports/data-%m.profraw'\nprofraw_file = '/work/reports/data-123.profraw'\n@@ -115,8 +115,8 @@ def test_generate_profdata_create(mocked_execute, experiment, fs):\ndef test_generate_profdata_merge(mocked_execute, experiment, fs):\n\"\"\"Tests that generate_profdata can run correctly with existing profraw.\"\"\"\nmocked_execute.return_value = new_process.ProcessResult(0, '', False)\n- snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,\n- SNAPSHOT_LOGGER)\n+ snapshot_measurer = measure_manager.SnapshotMeasurer(\n+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)\nsnapshot_measurer.profdata_file = '/work/reports/data.profdata'\nsnapshot_measurer.profraw_file_pattern = '/work/reports/data-%m.profraw'\nprofraw_file = '/work/reports/data-123.profraw'\n@@ -143,8 +143,8 @@ def test_generate_summary(mocked_get_coverage_binary, mocked_execute,\ncoverage_binary_path = '/work/coverage-binaries/benchmark-a/fuzz-target'\nmocked_get_coverage_binary.return_value = coverage_binary_path\n- snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,\n- SNAPSHOT_LOGGER)\n+ snapshot_measurer = measure_manager.SnapshotMeasurer(\n+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)\nsnapshot_measurer.cov_summary_file = \"/reports/cov_summary.txt\"\nsnapshot_measurer.profdata_file = \"/reports/data.profdata\"\nfs.create_dir('/reports')\n@@ -164,17 +164,18 @@ def test_generate_summary(mocked_get_coverage_binary, mocked_execute,\[email protected]('common.logs.error')\[email protected]('experiment.measurer.measurer.initialize_logs')\[email protected]('experiment.measurer.measure_manager.initialize_logs')\[email protected]('multiprocessing.Queue')\[email protected]('experiment.measurer.measurer.measure_snapshot_coverage')\[email protected]('experiment.measurer.measure_manager.measure_snapshot_coverage')\ndef test_measure_trial_coverage(mocked_measure_snapshot_coverage, mocked_queue,\n_, __):\n\"\"\"Tests that measure_trial_coverage works as expected.\"\"\"\nmin_cycle = 1\nmax_cycle = 10\n- measure_request = measurer.SnapshotMeasureRequest(FUZZER, BENCHMARK,\n- TRIAL_NUM, min_cycle)\n- measurer.measure_trial_coverage(measure_request, max_cycle, mocked_queue())\n+ measure_request = measure_manager.SnapshotMeasureRequest(\n+ FUZZER, BENCHMARK, TRIAL_NUM, min_cycle)\n+ measure_manager.measure_trial_coverage(measure_request, max_cycle,\n+ mocked_queue())\nexpected_calls = [\nmock.call(FUZZER, BENCHMARK, TRIAL_NUM, cycle)\nfor cycle in range(min_cycle, max_cycle + 1)\n@@ -187,9 +188,9 @@ def test_measure_trial_coverage(mocked_measure_snapshot_coverage, mocked_queue,\ndef test_measure_all_trials_not_ready(mocked_rsync, mocked_ls, experiment):\n\"\"\"Test running measure_all_trials before it is ready works as intended.\"\"\"\nmocked_ls.return_value = new_process.ProcessResult(1, '', False)\n- assert measurer.measure_all_trials(experiment_utils.get_experiment_name(),\n- MAX_TOTAL_TIME, test_utils.MockPool(),\n- queue.Queue())\n+ assert measure_manager.measure_all_trials(\n+ experiment_utils.get_experiment_name(), MAX_TOTAL_TIME,\n+ test_utils.MockPool(), queue.Queue())\nassert not mocked_rsync.called\n@@ -204,7 +205,7 @@ def test_measure_all_trials_no_more(mocked_directories_have_same_files,\nmocked_directories_have_same_files.return_value = True\nmocked_execute.return_value = new_process.ProcessResult(0, '', False)\nmock_pool = test_utils.MockPool()\n- assert not measurer.measure_all_trials(\n+ assert not measure_manager.measure_all_trials(\nexperiment_utils.get_experiment_name(), MAX_TOTAL_TIME, mock_pool,\nqueue.Queue())\n@@ -212,8 +213,8 @@ def test_measure_all_trials_no_more(mocked_directories_have_same_files,\ndef test_is_cycle_unchanged_doesnt_exist(experiment):\n\"\"\"Test that is_cycle_unchanged can properly determine if a cycle is\nunchanged or not when it needs to copy the file for the first time.\"\"\"\n- snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,\n- SNAPSHOT_LOGGER)\n+ snapshot_measurer = measure_manager.SnapshotMeasurer(\n+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)\nthis_cycle = 1\nwith test_utils.mock_popen_ctx_mgr(returncode=1):\nassert not snapshot_measurer.is_cycle_unchanged(this_cycle)\n@@ -224,8 +225,8 @@ def test_is_cycle_unchanged_doesnt_exist(experiment):\ndef test_is_cycle_unchanged_first_copy(mocked_read, mocked_cp, experiment):\n\"\"\"Test that is_cycle_unchanged can properly determine if a cycle is\nunchanged or not when it needs to copy the file for the first time.\"\"\"\n- snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,\n- SNAPSHOT_LOGGER)\n+ snapshot_measurer = measure_manager.SnapshotMeasurer(\n+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)\nthis_cycle = 100\nunchanged_cycles_file_contents = (\n'\\n'.join([str(num) for num in range(10)] + [str(this_cycle)]))\n@@ -239,8 +240,8 @@ def test_is_cycle_unchanged_first_copy(mocked_read, mocked_cp, experiment):\ndef test_is_cycle_unchanged_update(fs, experiment):\n\"\"\"Test that is_cycle_unchanged can properly determine that a\ncycle has changed when it has the file but needs to update it.\"\"\"\n- snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,\n- SNAPSHOT_LOGGER)\n+ snapshot_measurer = measure_manager.SnapshotMeasurer(\n+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)\nthis_cycle = 100\ninitial_unchanged_cycles_file_contents = (\n@@ -263,8 +264,8 @@ def test_is_cycle_unchanged_update(fs, experiment):\ndef test_is_cycle_unchanged_skip_cp(mocked_cp, fs, experiment):\n\"\"\"Check that is_cycle_unchanged doesn't call filestore_utils.cp\nunnecessarily.\"\"\"\n- snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,\n- SNAPSHOT_LOGGER)\n+ snapshot_measurer = measure_manager.SnapshotMeasurer(\n+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)\nthis_cycle = 100\ninitial_unchanged_cycles_file_contents = (\n'\\n'.join([str(num) for num in range(10)] + [str(this_cycle + 1)]))\n@@ -279,8 +280,8 @@ def test_is_cycle_unchanged_no_file(mocked_cp, fs, experiment):\n\"\"\"Test that is_cycle_unchanged returns False when there is no\nunchanged-cycles file.\"\"\"\n# Make sure we log if there is no unchanged-cycles file.\n- snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,\n- SNAPSHOT_LOGGER)\n+ snapshot_measurer = measure_manager.SnapshotMeasurer(\n+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)\nmocked_cp.return_value = new_process.ProcessResult(1, '', False)\nassert not snapshot_measurer.is_cycle_unchanged(0)\n@@ -296,8 +297,8 @@ def test_run_cov_new_units(_, mocked_execute, fs, environ):\n'EXPERIMENT': 'experiment',\n}\nmocked_execute.return_value = new_process.ProcessResult(0, '', False)\n- snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,\n- SNAPSHOT_LOGGER)\n+ snapshot_measurer = measure_manager.SnapshotMeasurer(\n+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)\nsnapshot_measurer.initialize_measurement_dirs()\nshared_units = ['shared1', 'shared2']\nfs.create_file(snapshot_measurer.measured_files_path,\n@@ -351,8 +352,8 @@ class TestIntegrationMeasurement:\n# portable binary.\[email protected](not os.getenv('FUZZBENCH_TEST_INTEGRATION'),\nreason='Not running integration tests.')\n- @mock.patch(\n- 'experiment.measurer.measurer.SnapshotMeasurer.is_cycle_unchanged')\n+ @mock.patch('experiment.measurer.measure_manager.SnapshotMeasurer'\n+ '.is_cycle_unchanged')\ndef test_measure_snapshot_coverage( # pylint: disable=too-many-locals\nself, mocked_is_cycle_unchanged, db, experiment, tmp_path):\n\"\"\"Integration test for measure_snapshot_coverage.\"\"\"\n@@ -384,9 +385,8 @@ class TestIntegrationMeasurement:\nexperiment=os.environ['EXPERIMENT'])\ndb_utils.add_all([trial])\n- snapshot_measurer = measurer.SnapshotMeasurer(trial.fuzzer,\n- trial.benchmark, trial.id,\n- SNAPSHOT_LOGGER)\n+ snapshot_measurer = measure_manager.SnapshotMeasurer(\n+ trial.fuzzer, trial.benchmark, trial.id, SNAPSHOT_LOGGER)\n# Set up the snapshot archive.\ncycle = 1\n@@ -400,7 +400,7 @@ class TestIntegrationMeasurement:\nmocked_cp.return_value = new_process.ProcessResult(0, '', False)\n# TODO(metzman): Create a system for using actual buckets in\n# integration tests.\n- snapshot = measurer.measure_snapshot_coverage(\n+ snapshot = measure_manager.measure_snapshot_coverage(\nsnapshot_measurer.fuzzer, snapshot_measurer.benchmark,\nsnapshot_measurer.trial_num, cycle)\nassert snapshot\n@@ -413,7 +413,7 @@ class TestIntegrationMeasurement:\ndef test_extract_corpus(archive_name, tmp_path):\n\"\"\"\"Tests that extract_corpus unpacks a corpus as we expect.\"\"\"\narchive_path = get_test_data_path(archive_name)\n- measurer.extract_corpus(archive_path, set(), tmp_path)\n+ measure_manager.extract_corpus(archive_path, set(), tmp_path)\nexpected_corpus_files = {\n'5ea57dfc9631f35beecb5016c4f1366eb6faa810',\n'2f1507c3229c5a1f8b619a542a8e03ccdbb3c29c',\n@@ -423,8 +423,8 @@ def test_extract_corpus(archive_name, tmp_path):\[email protected]('time.sleep', return_value=None)\[email protected]('experiment.measurer.measurer.set_up_coverage_binaries')\[email protected]('experiment.measurer.measurer.measure_all_trials',\[email protected]('experiment.measurer.measure_manager.set_up_coverage_binaries')\[email protected]('experiment.measurer.measure_manager.measure_all_trials',\nreturn_value=False)\[email protected]('multiprocessing.Manager')\[email protected]('multiprocessing.pool')\n@@ -433,17 +433,17 @@ def test_measure_loop_end(_, __, ___, ____, _____, ______, experiment_config,\ndb_experiment):\n\"\"\"Tests that measure_loop stops when there is nothing left to measure. In\nthis test, there is nothing left to measure on the first call.\"\"\"\n- measurer.measure_loop(experiment_config, 100)\n+ measure_manager.measure_loop(experiment_config, 100)\n# If everything went well, we should get to this point without any\n# exceptions.\[email protected]('time.sleep', return_value=None)\[email protected]('experiment.measurer.measurer.set_up_coverage_binaries')\[email protected]('experiment.measurer.measure_manager.set_up_coverage_binaries')\[email protected]('multiprocessing.Manager')\[email protected]('multiprocessing.pool')\[email protected]('experiment.scheduler.all_trials_ended', return_value=True)\[email protected]('experiment.measurer.measurer.measure_all_trials')\[email protected]('experiment.measurer.measure_manager.measure_all_trials')\ndef test_measure_loop_loop_until_end(mocked_measure_all_trials, _, __, ___,\n____, _____, experiment_config,\ndb_experiment):\n@@ -464,7 +464,7 @@ def test_measure_loop_loop_until_end(mocked_measure_all_trials, _, __, ___,\nreturn True\nmocked_measure_all_trials.side_effect = mock_measure_all_trials\n- measurer.measure_loop(experiment_config, 100)\n+ measure_manager.measure_loop(experiment_config, 100)\nassert call_count == loop_iterations\n@@ -475,7 +475,7 @@ def test_path_exists_in_experiment_filestore(mocked_execute, environ):\nos.environ['WORK'] = work_dir\nos.environ['EXPERIMENT_FILESTORE'] = 'gs://cloud-bucket'\nos.environ['EXPERIMENT'] = 'example-experiment'\n- measurer.exists_in_experiment_filestore(work_dir)\n+ measure_manager.exists_in_experiment_filestore(work_dir)\nmocked_execute.assert_called_with(\n['gsutil', 'ls', 'gs://cloud-bucket/example-experiment'],\nexpect_zero=False)\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [queue] Rename measurer to measurer_manager (#916)
Do this in anticipation of splitting the module into two parts:
manager and worker.
See #895. |
258,417 | 03.12.2020 16:28:22 | 28,800 | 0ec632fb9e4f747436dd6d2f6d592f06eae94e20 | add aflplusplus_268c_qemu | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/fuzzers.yml",
"new_path": ".github/workflows/fuzzers.yml",
"diff": "@@ -55,6 +55,7 @@ jobs:\n- aflplusplus_fast_branches_v2\n- aflplusplus_cmplog\n- aflplusplus_dict2file\n+ - aflplusplus_268c_qemu\n- aflplusplus_fast_v2_add\n- aflplusplus_fast_v2_depth\n- aflplusplus_fast_v2_cutoff\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/aflplusplus_268c_qemu/builder.Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+ARG parent_image\n+FROM $parent_image\n+\n+# Install wget to download afl_driver.cpp. Install libstdc++ to use llvm_mode.\n+RUN apt-get update && \\\n+ apt-get install -y wget libstdc++-5-dev libtool-bin automake flex bison \\\n+ libglib2.0-dev libpixman-1-dev python3-setuptools unzip\n+\n+# Why do some build images have ninja, other not? Weird.\n+RUN cd / && wget https://github.com/ninja-build/ninja/releases/download/v1.10.1/ninja-linux.zip && \\\n+ unzip ninja-linux.zip && chmod 755 ninja && mv ninja /usr/local/bin\n+\n+# Build afl++ without Python support as we don't need it.\n+# Set AFL_NO_X86 to skip flaky tests.\n+RUN git clone https://github.com/AFLplusplus/AFLplusplus.git /afl && \\\n+ cd /afl && git checkout ee206da3897fd2d9f72206c3c5ea0e3fab109001 && \\\n+ unset CFLAGS && unset CXXFLAGS && \\\n+ AFL_NO_X86=1 CC=clang PYTHON_INCLUDE=/ make && \\\n+ cd qemu_mode && ./build_qemu_support.sh && cd .. && \\\n+ make -C examples/aflpp_driver && \\\n+ cp examples/aflpp_driver/libAFLQemuDriver.a /libAFLDriver.a && \\\n+ cp examples/aflpp_driver/aflpp_qemu_driver_hook.so /\n+\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/aflplusplus_268c_qemu/description.md",
"diff": "+# aflplusplus_qemu\n+\n+AFL++ fuzzer instance for binary-only fuzzing with qemu_mode.\n+The following config active for all benchmarks:\n+ - qemu_mode with:\n+ - laf-intel (integers and floats)\n+ - entrypoint set to LLVMFuzzerTestOneInput\n+ - persisten mode set to LLVMFuzzerTestOneInput\n+ - in-memory shared memory test cases\n+ - \"coe\" power schedule\n+\n+Repository: [https://github.com/AFLplusplus/AFLplusplus/](https://github.com/AFLplusplus/AFLplusplus/)\n+\n+[builder.Dockerfile](builder.Dockerfile)\n+[fuzzer.py](fuzzer.py)\n+[runner.Dockerfile](runner.Dockerfile)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/aflplusplus_268c_qemu/fuzzer.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Integration code for AFLplusplus fuzzer.\"\"\"\n+\n+import os\n+import subprocess\n+\n+from fuzzers.aflplusplus import fuzzer as aflplusplus_fuzzer\n+\n+\n+def build():\n+ \"\"\"Build benchmark.\"\"\"\n+ aflplusplus_fuzzer.build('qemu')\n+\n+\n+def fuzz(input_corpus, output_corpus, target_binary):\n+ \"\"\"Run fuzzer.\"\"\"\n+ # Get LLVMFuzzerTestOneInput address.\n+ nm_proc = subprocess.run([\n+ 'sh', '-c',\n+ 'nm \\'' + target_binary + '\\' | grep \\'T afl_qemu_driver_stdin_input\\''\n+ ],\n+ stdout=subprocess.PIPE,\n+ check=True)\n+ target_func = \"0x\" + nm_proc.stdout.split()[0].decode(\"utf-8\")\n+ print('[fuzz] afl_qemu_driver_stdin_input() address =', target_func)\n+\n+ # Fuzzer option for qemu_mode.\n+ flags = ['-Q']\n+\n+ os.environ['AFL_COMPCOV_LEVEL'] = '3' # Complete compcov including floats\n+ os.environ['AFL_QEMU_PERSISTENT_ADDR'] = target_func\n+ os.environ['AFL_ENTRYPOINT'] = target_func\n+ os.environ['AFL_QEMU_PERSISTENT_CNT'] = \"100000\"\n+ os.environ['AFL_QEMU_DRIVER_NO_HOOK'] = \"1\"\n+ aflplusplus_fuzzer.fuzz(input_corpus,\n+ output_corpus,\n+ target_binary,\n+ flags=flags)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "fuzzers/aflplusplus_268c_qemu/runner.Dockerfile",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+FROM gcr.io/fuzzbench/base-image\n+\n+# This makes interactive docker runs painless:\n+ENV LD_LIBRARY_PATH=\"$LD_LIBRARY_PATH:/out\"\n+ENV AFL_MAP_SIZE=1048576\n+ENV PATH=\"$PATH:/out\"\n+ENV AFL_SKIP_CPUFREQ=1\n+ENV AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES=1\n"
},
{
"change_type": "MODIFY",
"old_path": "service/experiment-requests.yaml",
"new_path": "service/experiment-requests.yaml",
"diff": "#\n# You can run \"make presubmit\" to do basic validation on this file.\n# Please add new experiment requests towards the top of this file.\n+- experiment: 2020-12-04\n+ description: \"AFL++ 2.68c qemu mode\"\n+ fuzzers:\n+ - aflplusplus_268c_qemu\n+ - aflplusplus_qemu\n- experiment: 2020-12-03\ndescription: \"AFL++ some fast variants\"\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | add aflplusplus_268c_qemu (#970)
Co-authored-by: jonathanmetzman <[email protected]> |
258,388 | 18.12.2020 11:30:31 | 28,800 | ea4f768b84424d51c67d04be06d66a6db6a69725 | Disable coverage building in CI and don't complain about coverage not being tested in CI. | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/fuzzers.yml",
"new_path": ".github/workflows/fuzzers.yml",
"diff": "@@ -17,7 +17,6 @@ jobs:\nfail-fast: false\nmatrix:\nfuzzer:\n- - coverage\n- afl\n- aflcc\n- aflfast\n"
},
{
"change_type": "MODIFY",
"old_path": "presubmit.py",
"new_path": "presubmit.py",
"diff": "@@ -125,7 +125,7 @@ class FuzzerAndBenchmarkValidator:\n# We know this is invalid and have already complained about it.\nreturn False\n- if not is_fuzzer_tested_in_ci(fuzzer):\n+ if fuzzer != 'coverage' and not is_fuzzer_tested_in_ci(fuzzer):\nself.invalid_fuzzers.add(fuzzer)\nreturn False\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | Disable coverage building in CI and don't complain about coverage not being tested in CI. (#997) |
258,388 | 11.01.2021 12:02:00 | 28,800 | fc35cc7bd571fdf23691fb5751d1454def51888c | [gcb] Log build failures | [
{
"change_type": "MODIFY",
"old_path": "experiment/build/gcb_build.py",
"new_path": "experiment/build/gcb_build.py",
"diff": "# limitations under the License.\n\"\"\"Module for building things on Google Cloud Build for use in trials.\"\"\"\n+import subprocess\nimport tempfile\nfrom typing import Dict\n@@ -100,8 +101,12 @@ def _build(\nresult = new_process.execute(command,\nwrite_to_stdout=False,\nkill_children=True,\n- timeout=timeout_seconds)\n+ timeout=timeout_seconds,\n+ expect_zero=False)\n+ # TODO(metzman): Refactor code so that local_build stores logs as well.\nbuild_utils.store_build_logs(config_name, result)\n+ if result.retcode != 0:\n+ raise subprocess.CalledProcessError(result.retcode, command)\nreturn result\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "experiment/build/test_gcb_build.py",
"diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"Tests for builder.py.\"\"\"\n+\n+import subprocess\n+from unittest import mock\n+\n+import pytest\n+\n+from common import new_process\n+from experiment.build import gcb_build\n+\n+# pylint: disable=protected-access\n+\n+FAIL_RESULT = new_process.ProcessResult(1, '', False)\n+\n+\[email protected]('common.new_process.execute', return_value=FAIL_RESULT)\[email protected]('experiment.build.build_utils.store_build_logs')\n+def test_build_error(mocked_store_build_logs, _):\n+ \"\"\"Tests that on error, _build raises subprocess.CalledProcessError and\n+ calls store_build_logs.\"\"\"\n+ config_name = 'config'\n+ with pytest.raises(subprocess.CalledProcessError):\n+ gcb_build._build({}, config_name)\n+ mocked_store_build_logs.assert_called_with(config_name, FAIL_RESULT)\n+\n+\n+SUCCESS_RESULT = new_process.ProcessResult(0, '', False)\n+\n+\[email protected]('common.new_process.execute', return_value=SUCCESS_RESULT)\[email protected]('experiment.build.build_utils.store_build_logs')\n+def test_build_success_store_logs(mocked_store_build_logs, _):\n+ \"\"\"Tests that on success _buiild stores build logs.\"\"\"\n+ config_name = 'config'\n+ gcb_build._build({}, config_name)\n+ mocked_store_build_logs.assert_called_with(config_name, SUCCESS_RESULT)\n"
}
] | Python | Apache License 2.0 | google/fuzzbench | [gcb] Log build failures (#1025) |