file_id
int64 1
215k
| content
stringlengths 7
454k
| repo
stringlengths 6
113
| path
stringlengths 6
251
|
---|---|---|---|
443 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.env;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.PathUtils;
import org.elasticsearch.core.SuppressForbidden;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.FileStore;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.function.Function;
/**
* The environment of where things exists.
*/
@SuppressForbidden(reason = "configures paths for the system")
// TODO: move PathUtils to be package-private here instead of
// public+forbidden api!
public class Environment {
private static final Path[] EMPTY_PATH_ARRAY = new Path[0];
public static final Setting<String> PATH_HOME_SETTING = Setting.simpleString("path.home", Property.NodeScope);
public static final Setting<List<String>> PATH_DATA_SETTING = Setting.stringListSetting("path.data", Property.NodeScope);
public static final Setting<String> PATH_LOGS_SETTING = new Setting<>("path.logs", "", Function.identity(), Property.NodeScope);
public static final Setting<List<String>> PATH_REPO_SETTING = Setting.stringListSetting("path.repo", Property.NodeScope);
public static final Setting<String> PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", Property.NodeScope);
private final Settings settings;
private final Path[] dataFiles;
private final Path[] repoFiles;
private final Path configFile;
private final Path pluginsFile;
private final Path modulesFile;
private final Path sharedDataFile;
/** location of bin/, used by plugin manager */
private final Path binFile;
/** location of lib/, */
private final Path libFile;
private final Path logsFile;
/** Path to the temporary file directory used by the JDK */
private final Path tmpFile;
public Environment(final Settings settings, final Path configPath) {
this(settings, configPath, PathUtils.get(System.getProperty("java.io.tmpdir")));
}
// Should only be called directly by this class's unit tests
Environment(final Settings settings, final Path configPath, final Path tmpPath) {
final Path homeFile;
if (PATH_HOME_SETTING.exists(settings)) {
homeFile = PathUtils.get(PATH_HOME_SETTING.get(settings)).toAbsolutePath().normalize();
} else {
throw new IllegalStateException(PATH_HOME_SETTING.getKey() + " is not configured");
}
if (configPath != null) {
configFile = configPath.toAbsolutePath().normalize();
} else {
configFile = homeFile.resolve("config");
}
tmpFile = Objects.requireNonNull(tmpPath);
pluginsFile = homeFile.resolve("plugins");
List<String> dataPaths = PATH_DATA_SETTING.get(settings);
if (dataPaths.isEmpty() == false) {
dataFiles = new Path[dataPaths.size()];
for (int i = 0; i < dataPaths.size(); i++) {
dataFiles[i] = PathUtils.get(dataPaths.get(i)).toAbsolutePath().normalize();
}
} else {
dataFiles = new Path[] { homeFile.resolve("data") };
}
if (PATH_SHARED_DATA_SETTING.exists(settings)) {
sharedDataFile = PathUtils.get(PATH_SHARED_DATA_SETTING.get(settings)).toAbsolutePath().normalize();
} else {
sharedDataFile = null;
}
List<String> repoPaths = PATH_REPO_SETTING.get(settings);
if (repoPaths.isEmpty()) {
repoFiles = EMPTY_PATH_ARRAY;
} else {
repoFiles = new Path[repoPaths.size()];
for (int i = 0; i < repoPaths.size(); i++) {
repoFiles[i] = PathUtils.get(repoPaths.get(i)).toAbsolutePath().normalize();
}
}
// this is trappy, Setting#get(Settings) will get a fallback setting yet return false for Settings#exists(Settings)
if (PATH_LOGS_SETTING.exists(settings)) {
logsFile = PathUtils.get(PATH_LOGS_SETTING.get(settings)).toAbsolutePath().normalize();
} else {
logsFile = homeFile.resolve("logs");
}
binFile = homeFile.resolve("bin");
libFile = homeFile.resolve("lib");
modulesFile = homeFile.resolve("modules");
final Settings.Builder finalSettings = Settings.builder().put(settings);
if (PATH_DATA_SETTING.exists(settings)) {
if (dataPathUsesList(settings)) {
finalSettings.putList(PATH_DATA_SETTING.getKey(), Arrays.stream(dataFiles).map(Path::toString).toList());
} else {
assert dataFiles.length == 1;
finalSettings.put(PATH_DATA_SETTING.getKey(), dataFiles[0]);
}
}
finalSettings.put(PATH_HOME_SETTING.getKey(), homeFile);
finalSettings.put(PATH_LOGS_SETTING.getKey(), logsFile.toString());
if (PATH_REPO_SETTING.exists(settings)) {
finalSettings.putList(Environment.PATH_REPO_SETTING.getKey(), Arrays.stream(repoFiles).map(Path::toString).toList());
}
if (PATH_SHARED_DATA_SETTING.exists(settings)) {
assert sharedDataFile != null;
finalSettings.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), sharedDataFile.toString());
}
this.settings = finalSettings.build();
}
/**
* The settings used to build this environment.
*/
public Settings settings() {
return this.settings;
}
/**
* The data location.
*/
public Path[] dataFiles() {
return dataFiles;
}
/**
* The shared data location
*/
public Path sharedDataFile() {
return sharedDataFile;
}
/**
* The shared filesystem repo locations.
*/
public Path[] repoFiles() {
return repoFiles;
}
/**
* Resolves the specified location against the list of configured repository roots
*
* If the specified location doesn't match any of the roots, returns null.
*/
public Path resolveRepoFile(String location) {
return PathUtils.get(repoFiles, location);
}
/**
* Checks if the specified URL is pointing to the local file system and if it does, resolves the specified url
* against the list of configured repository roots
*
* If the specified url doesn't match any of the roots, returns null.
*/
public URL resolveRepoURL(URL url) {
try {
if ("file".equalsIgnoreCase(url.getProtocol())) {
if (url.getHost() == null || "".equals(url.getHost())) {
// only local file urls are supported
Path path = PathUtils.get(repoFiles, url.toURI());
if (path == null) {
// Couldn't resolve against known repo locations
return null;
}
// Normalize URL
return path.toUri().toURL();
}
return null;
} else if ("jar".equals(url.getProtocol())) {
String file = url.getFile();
int pos = file.indexOf("!/");
if (pos < 0) {
return null;
}
String jarTail = file.substring(pos);
String filePath = file.substring(0, pos);
URL internalUrl = new URL(filePath);
URL normalizedUrl = resolveRepoURL(internalUrl);
if (normalizedUrl == null) {
return null;
}
return new URL("jar", "", normalizedUrl.toExternalForm() + jarTail);
} else {
// It's not file or jar url and it didn't match the white list - reject
return null;
}
} catch (MalformedURLException ex) {
// cannot make sense of this file url
return null;
} catch (URISyntaxException ex) {
return null;
}
}
// TODO: rename all these "file" methods to "dir"
/**
* The config directory.
*/
public Path configFile() {
return configFile;
}
public Path pluginsFile() {
return pluginsFile;
}
public Path binFile() {
return binFile;
}
public Path libFile() {
return libFile;
}
public Path modulesFile() {
return modulesFile;
}
public Path logsFile() {
return logsFile;
}
/** Path to the default temp directory used by the JDK */
public Path tmpFile() {
return tmpFile;
}
/** Ensure the configured temp directory is a valid directory */
public void validateTmpFile() throws IOException {
validateTemporaryDirectory("Temporary directory", tmpFile);
}
/**
* Ensure the temp directories needed for JNA are set up correctly.
*/
public void validateNativesConfig() throws IOException {
validateTmpFile();
if (Constants.LINUX) {
validateTemporaryDirectory(LIBFFI_TMPDIR_ENVIRONMENT_VARIABLE + " environment variable", getLibffiTemporaryDirectory());
}
}
private static void validateTemporaryDirectory(String description, Path path) throws IOException {
if (path == null) {
throw new NullPointerException(description + " was not specified");
}
if (Files.exists(path) == false) {
throw new FileNotFoundException(description + " [" + path + "] does not exist or is not accessible");
}
if (Files.isDirectory(path) == false) {
throw new IOException(description + " [" + path + "] is not a directory");
}
}
private static final String LIBFFI_TMPDIR_ENVIRONMENT_VARIABLE = "LIBFFI_TMPDIR";
@SuppressForbidden(reason = "using PathUtils#get since libffi resolves paths without interference from the JVM")
private static Path getLibffiTemporaryDirectory() {
final String environmentVariable = System.getenv(LIBFFI_TMPDIR_ENVIRONMENT_VARIABLE);
if (environmentVariable == null) {
return null;
}
// Explicitly resolve into an absolute path since the working directory might be different from the one in which we were launched
// and it would be confusing to report that the given relative path doesn't exist simply because it's being resolved relative to a
// different location than the one the user expects.
final String workingDirectory = System.getProperty("user.dir");
if (workingDirectory == null) {
assert false;
return null;
}
return PathUtils.get(workingDirectory).resolve(environmentVariable);
}
/** Returns true if the data path is a list, false otherwise */
public static boolean dataPathUsesList(Settings settings) {
if (settings.hasValue(PATH_DATA_SETTING.getKey()) == false) {
return false;
}
String rawDataPath = settings.get(PATH_DATA_SETTING.getKey());
return rawDataPath.startsWith("[") || rawDataPath.contains(",");
}
public static FileStore getFileStore(final Path path) throws IOException {
return new ESFileStore(Files.getFileStore(path));
}
public static long getUsableSpace(Path path) throws IOException {
long freeSpaceInBytes = Environment.getFileStore(path).getUsableSpace();
assert freeSpaceInBytes >= 0;
return freeSpaceInBytes;
}
/**
* asserts that the two environments are equivalent for all things the environment cares about (i.e., all but the setting
* object which may contain different setting)
*/
public static void assertEquivalent(Environment actual, Environment expected) {
assertEquals(actual.dataFiles(), expected.dataFiles(), "dataFiles");
assertEquals(actual.repoFiles(), expected.repoFiles(), "repoFiles");
assertEquals(actual.configFile(), expected.configFile(), "configFile");
assertEquals(actual.pluginsFile(), expected.pluginsFile(), "pluginsFile");
assertEquals(actual.binFile(), expected.binFile(), "binFile");
assertEquals(actual.libFile(), expected.libFile(), "libFile");
assertEquals(actual.modulesFile(), expected.modulesFile(), "modulesFile");
assertEquals(actual.logsFile(), expected.logsFile(), "logsFile");
assertEquals(actual.tmpFile(), expected.tmpFile(), "tmpFile");
}
private static void assertEquals(Object actual, Object expected, String name) {
assert Objects.deepEquals(actual, expected) : "actual " + name + " [" + actual + "] is different than [ " + expected + "]";
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/env/Environment.java |
444 | /*
* @notice
*
* Copyright 2013 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License, version
* 2.0 (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
* =============================================================================
* Modifications copyright Elasticsearch B.V.
*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.http;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.rest.RestUtils;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_EXPOSE_HEADERS;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE;
/**
* This file is forked from the https://netty.io project. In particular it combines the following three
* files: io.netty.handler.codec.http.cors.CorsHandler, io.netty.handler.codec.http.cors.CorsConfig, and
* io.netty.handler.codec.http.cors.CorsConfigBuilder.
*
* It modifies the original netty code to operate on Elasticsearch http request/response abstractions.
* Additionally, it removes CORS features that are not used by Elasticsearch.
*/
public class CorsHandler {
public static final String ANY_ORIGIN = "*";
public static final String ORIGIN = "origin";
public static final String DATE = "date";
public static final String VARY = "vary";
public static final String HOST = "host";
public static final String ACCESS_CONTROL_REQUEST_METHOD = "access-control-request-method";
public static final String ACCESS_CONTROL_ALLOW_HEADERS = "access-control-allow-headers";
public static final String ACCESS_CONTROL_ALLOW_CREDENTIALS = "access-control-allow-credentials";
public static final String ACCESS_CONTROL_ALLOW_METHODS = "access-control-allow-methods";
public static final String ACCESS_CONTROL_ALLOW_ORIGIN = "access-control-allow-origin";
public static final String ACCESS_CONTROL_MAX_AGE = "access-control-max-age";
public static final String ACCESS_CONTROL_EXPOSE_HEADERS = "access-control-expose-headers";
private static final Pattern SCHEME_PATTERN = Pattern.compile("^https?://");
private static final DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss O", Locale.ENGLISH);
private final Config config;
public CorsHandler(Config config) {
this.config = config;
}
public HttpResponse handleInbound(HttpRequest request) {
if (config.isCorsSupportEnabled()) {
if (isPreflightRequest(request)) {
return handlePreflight(request);
}
if (validateOrigin(request) == false) {
return forbidden(request);
}
}
return null;
}
public void setCorsResponseHeaders(final HttpRequest httpRequest, final HttpResponse httpResponse) {
if (config.isCorsSupportEnabled() == false) {
return;
}
if (setOrigin(httpRequest, httpResponse)) {
setAllowCredentials(httpResponse);
setExposeHeaders(httpResponse);
}
}
private HttpResponse handlePreflight(final HttpRequest request) {
final HttpResponse response = request.createResponse(RestStatus.OK, BytesArray.EMPTY);
if (setOrigin(request, response)) {
setAllowMethods(response);
setAllowHeaders(response);
setAllowCredentials(response);
setMaxAge(response);
setPreflightHeaders(response);
return response;
} else {
return forbidden(request);
}
}
private static HttpResponse forbidden(final HttpRequest request) {
HttpResponse response = request.createResponse(RestStatus.FORBIDDEN, BytesArray.EMPTY);
response.addHeader("content-length", "0");
return response;
}
private static boolean isSameOrigin(final String origin, final String host) {
if (Strings.isNullOrEmpty(host) == false) {
// strip protocol from origin
final String originDomain = SCHEME_PATTERN.matcher(origin).replaceFirst("");
if (host.equals(originDomain)) {
return true;
}
}
return false;
}
private static void setPreflightHeaders(final HttpResponse response) {
response.addHeader(CorsHandler.DATE, dateTimeFormatter.format(ZonedDateTime.now(ZoneOffset.UTC)));
response.addHeader("content-length", "0");
}
private boolean setOrigin(final HttpRequest request, final HttpResponse response) {
String origin = getOrigin(request);
if (Strings.isNullOrEmpty(origin) == false) {
if (config.isAnyOriginSupported()) {
if (config.isCredentialsAllowed()) {
setAllowOrigin(response, origin);
setVaryHeader(response);
} else {
setAllowOrigin(response, ANY_ORIGIN);
}
return true;
} else if (config.isOriginAllowed(origin) || isSameOrigin(origin, getHost(request))) {
setAllowOrigin(response, origin);
setVaryHeader(response);
return true;
}
}
return false;
}
private boolean validateOrigin(final HttpRequest request) {
if (config.isAnyOriginSupported()) {
return true;
}
final String origin = getOrigin(request);
if (Strings.isNullOrEmpty(origin)) {
// Not a CORS request so we cannot validate it. It may be a non CORS request.
return true;
}
// if the origin is the same as the host of the request, then allow
if (isSameOrigin(origin, getHost(request))) {
return true;
}
return config.isOriginAllowed(origin);
}
private static String getOrigin(HttpRequest request) {
List<String> headers = request.getHeaders().get(ORIGIN);
if (headers == null || headers.isEmpty()) {
return null;
} else {
return headers.get(0);
}
}
private static String getHost(HttpRequest request) {
List<String> headers = request.getHeaders().get(HOST);
if (headers == null || headers.isEmpty()) {
return null;
} else {
return headers.get(0);
}
}
private static boolean isPreflightRequest(final HttpRequest request) {
final Map<String, List<String>> headers = request.getHeaders();
return request.method().equals(RestRequest.Method.OPTIONS)
&& headers.containsKey(ORIGIN)
&& headers.containsKey(ACCESS_CONTROL_REQUEST_METHOD);
}
private static void setVaryHeader(final HttpResponse response) {
response.addHeader(VARY, ORIGIN);
}
private static void setAllowOrigin(final HttpResponse response, final String origin) {
response.addHeader(ACCESS_CONTROL_ALLOW_ORIGIN, origin);
}
private void setAllowMethods(final HttpResponse response) {
for (RestRequest.Method method : config.allowedRequestMethods()) {
response.addHeader(ACCESS_CONTROL_ALLOW_METHODS, method.name().trim());
}
}
private void setAllowHeaders(final HttpResponse response) {
for (String header : config.allowedRequestHeaders) {
response.addHeader(ACCESS_CONTROL_ALLOW_HEADERS, header);
}
}
private void setExposeHeaders(final HttpResponse response) {
for (String header : config.accessControlExposeHeaders) {
response.addHeader(ACCESS_CONTROL_EXPOSE_HEADERS, header);
}
}
private void setAllowCredentials(final HttpResponse response) {
if (config.isCredentialsAllowed()) {
response.addHeader(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
}
private void setMaxAge(final HttpResponse response) {
response.addHeader(ACCESS_CONTROL_MAX_AGE, Long.toString(config.maxAge));
}
public static class Config {
private final boolean enabled;
private final Optional<Set<String>> origins;
private final Optional<Pattern> pattern;
private final boolean anyOrigin;
private final boolean credentialsAllowed;
private final Set<RestRequest.Method> allowedRequestMethods;
private final Set<String> allowedRequestHeaders;
private final Set<String> accessControlExposeHeaders;
private final long maxAge;
public Config(Builder builder) {
this.enabled = builder.enabled;
origins = builder.origins.map(HashSet::new);
pattern = builder.pattern;
anyOrigin = builder.anyOrigin;
this.credentialsAllowed = builder.allowCredentials;
this.allowedRequestMethods = Collections.unmodifiableSet(builder.requestMethods);
this.allowedRequestHeaders = Collections.unmodifiableSet(builder.requestHeaders);
this.accessControlExposeHeaders = Collections.unmodifiableSet(builder.accessControlExposeHeaders);
this.maxAge = builder.maxAge;
}
public boolean isCorsSupportEnabled() {
return enabled;
}
public boolean isAnyOriginSupported() {
return anyOrigin;
}
public boolean isOriginAllowed(String origin) {
if (origins.isPresent()) {
return origins.get().contains(origin);
} else if (pattern.isPresent()) {
return pattern.get().matcher(origin).matches();
}
return false;
}
public boolean isCredentialsAllowed() {
return credentialsAllowed;
}
public Set<RestRequest.Method> allowedRequestMethods() {
return allowedRequestMethods;
}
public Set<String> allowedRequestHeaders() {
return allowedRequestHeaders;
}
public long maxAge() {
return maxAge;
}
public Optional<Set<String>> origins() {
return origins;
}
@Override
public String toString() {
return "Config{"
+ "enabled="
+ enabled
+ ", origins="
+ origins
+ ", pattern="
+ pattern
+ ", anyOrigin="
+ anyOrigin
+ ", credentialsAllowed="
+ credentialsAllowed
+ ", allowedRequestMethods="
+ allowedRequestMethods
+ ", allowedRequestHeaders="
+ allowedRequestHeaders
+ ", accessControlExposeHeaders="
+ accessControlExposeHeaders
+ ", maxAge="
+ maxAge
+ '}';
}
private static class Builder {
private boolean enabled = true;
private Optional<Set<String>> origins;
private Optional<Pattern> pattern;
private final boolean anyOrigin;
private boolean allowCredentials = false;
long maxAge;
private final Set<RestRequest.Method> requestMethods = EnumSet.noneOf(RestRequest.Method.class);
private final Set<String> requestHeaders = new HashSet<>();
private final Set<String> accessControlExposeHeaders = new HashSet<>();
private Builder() {
anyOrigin = true;
origins = Optional.empty();
pattern = Optional.empty();
}
private Builder(final String... origins) {
this.origins = Optional.of(new LinkedHashSet<>(Arrays.asList(origins)));
pattern = Optional.empty();
anyOrigin = false;
}
private Builder(final Pattern pattern) {
this.pattern = Optional.of(pattern);
origins = Optional.empty();
anyOrigin = false;
}
static Builder forOrigins(final String... origins) {
return new Builder(origins);
}
static Builder forAnyOrigin() {
return new Builder();
}
static Builder forPattern(Pattern pattern) {
return new Builder(pattern);
}
Builder allowCredentials() {
this.allowCredentials = true;
return this;
}
public Builder allowedRequestMethods(RestRequest.Method[] methods) {
requestMethods.addAll(Arrays.asList(methods));
return this;
}
public Builder maxAge(int maxAge) {
this.maxAge = maxAge;
return this;
}
public Builder allowedRequestHeaders(String[] headers) {
requestHeaders.addAll(Arrays.asList(headers));
return this;
}
public Builder accessControlExposeHeaders(String[] headers) {
accessControlExposeHeaders.addAll(Arrays.asList(headers));
return this;
}
public Config build() {
return new Config(this);
}
}
}
public static CorsHandler disabled() {
Config.Builder builder = new Config.Builder();
builder.enabled = false;
return new CorsHandler(new Config(builder));
}
public static Config buildConfig(Settings settings) {
if (SETTING_CORS_ENABLED.get(settings) == false) {
Config.Builder builder = new Config.Builder();
builder.enabled = false;
return new Config(builder);
}
String origin = SETTING_CORS_ALLOW_ORIGIN.get(settings);
final CorsHandler.Config.Builder builder;
if (Strings.isNullOrEmpty(origin)) {
builder = CorsHandler.Config.Builder.forOrigins();
} else if (origin.equals(CorsHandler.ANY_ORIGIN)) {
builder = CorsHandler.Config.Builder.forAnyOrigin();
} else {
try {
Pattern p = RestUtils.checkCorsSettingForRegex(origin);
if (p == null) {
builder = CorsHandler.Config.Builder.forOrigins(RestUtils.corsSettingAsArray(origin));
} else {
builder = CorsHandler.Config.Builder.forPattern(p);
}
} catch (PatternSyntaxException e) {
throw new SettingsException("Bad regex in [" + SETTING_CORS_ALLOW_ORIGIN.getKey() + "]: [" + origin + "]", e);
}
}
if (SETTING_CORS_ALLOW_CREDENTIALS.get(settings)) {
builder.allowCredentials();
}
String[] strMethods = Strings.tokenizeToStringArray(SETTING_CORS_ALLOW_METHODS.get(settings), ",");
RestRequest.Method[] methods = Arrays.stream(strMethods)
.map(s -> s.toUpperCase(Locale.ENGLISH))
.map(RestRequest.Method::valueOf)
.toArray(RestRequest.Method[]::new);
Config config = builder.allowedRequestMethods(methods)
.maxAge(SETTING_CORS_MAX_AGE.get(settings))
.allowedRequestHeaders(Strings.tokenizeToStringArray(SETTING_CORS_ALLOW_HEADERS.get(settings), ","))
.accessControlExposeHeaders(Strings.tokenizeToStringArray(SETTING_CORS_EXPOSE_HEADERS.get(settings), ","))
.build();
return config;
}
public static CorsHandler fromSettings(Settings settings) {
return new CorsHandler(buildConfig(settings));
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/http/CorsHandler.java |
445 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch;
import org.elasticsearch.core.Assertions;
import org.elasticsearch.core.UpdateForV9;
import java.lang.reflect.Field;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.function.IntFunction;
/**
* <p>Transport version is used to coordinate compatible wire protocol communication between nodes, at a fine-grained level. This replaces
* and supersedes the old Version constants.</p>
*
* <p>Before adding a new version constant, please read the block comment at the end of the list of constants.</p>
*/
public class TransportVersions {
/*
* NOTE: IntelliJ lies!
* This map is used during class construction, referenced by the registerTransportVersion method.
* When all the transport version constants have been registered, the map is cleared & never touched again.
*/
static TreeSet<Integer> IDS = new TreeSet<>();
static TransportVersion def(int id) {
if (IDS == null) throw new IllegalStateException("The IDS map needs to be present to call this method");
if (IDS.add(id) == false) {
throw new IllegalArgumentException("Version id " + id + " defined twice");
}
if (id < IDS.last()) {
throw new IllegalArgumentException("Version id " + id + " is not defined in the right location. Keep constants sorted");
}
return new TransportVersion(id);
}
@UpdateForV9 // remove the transport versions with which v9 will not need to interact
public static final TransportVersion ZERO = def(0);
public static final TransportVersion V_7_0_0 = def(7_00_00_99);
public static final TransportVersion V_7_0_1 = def(7_00_01_99);
public static final TransportVersion V_7_1_0 = def(7_01_00_99);
public static final TransportVersion V_7_2_0 = def(7_02_00_99);
public static final TransportVersion V_7_2_1 = def(7_02_01_99);
public static final TransportVersion V_7_3_0 = def(7_03_00_99);
public static final TransportVersion V_7_3_2 = def(7_03_02_99);
public static final TransportVersion V_7_4_0 = def(7_04_00_99);
public static final TransportVersion V_7_5_0 = def(7_05_00_99);
public static final TransportVersion V_7_6_0 = def(7_06_00_99);
public static final TransportVersion V_7_7_0 = def(7_07_00_99);
public static final TransportVersion V_7_8_0 = def(7_08_00_99);
public static final TransportVersion V_7_8_1 = def(7_08_01_99);
public static final TransportVersion V_7_9_0 = def(7_09_00_99);
public static final TransportVersion V_7_10_0 = def(7_10_00_99);
public static final TransportVersion V_7_10_1 = def(7_10_01_99);
public static final TransportVersion V_7_11_0 = def(7_11_00_99);
public static final TransportVersion V_7_12_0 = def(7_12_00_99);
public static final TransportVersion V_7_13_0 = def(7_13_00_99);
public static final TransportVersion V_7_14_0 = def(7_14_00_99);
public static final TransportVersion V_7_15_0 = def(7_15_00_99);
public static final TransportVersion V_7_15_1 = def(7_15_01_99);
public static final TransportVersion V_7_16_0 = def(7_16_00_99);
public static final TransportVersion V_7_17_0 = def(7_17_00_99);
public static final TransportVersion V_7_17_1 = def(7_17_01_99);
public static final TransportVersion V_7_17_8 = def(7_17_08_99);
public static final TransportVersion V_8_0_0 = def(8_00_00_99);
public static final TransportVersion V_8_1_0 = def(8_01_00_99);
public static final TransportVersion V_8_2_0 = def(8_02_00_99);
public static final TransportVersion V_8_3_0 = def(8_03_00_99);
public static final TransportVersion V_8_4_0 = def(8_04_00_99);
public static final TransportVersion V_8_5_0 = def(8_05_00_99);
public static final TransportVersion V_8_6_0 = def(8_06_00_99);
public static final TransportVersion V_8_6_1 = def(8_06_01_99);
public static final TransportVersion V_8_7_0 = def(8_07_00_99);
public static final TransportVersion V_8_7_1 = def(8_07_01_99);
public static final TransportVersion V_8_8_0 = def(8_08_00_99);
public static final TransportVersion V_8_8_1 = def(8_08_01_99);
/*
* READ THE COMMENT BELOW THIS BLOCK OF DECLARATIONS BEFORE ADDING NEW TRANSPORT VERSIONS
* Detached transport versions added below here.
*/
public static final TransportVersion V_8_9_X = def(8_500_020);
public static final TransportVersion V_8_10_X = def(8_500_061);
public static final TransportVersion V_8_11_X = def(8_512_00_1);
public static final TransportVersion V_8_12_0 = def(8_560_00_0);
public static final TransportVersion V_8_12_1 = def(8_560_00_1);
public static final TransportVersion V_8_13_0 = def(8_595_00_0);
public static final TransportVersion V_8_13_4 = def(8_595_00_1);
// 8.14.0+
public static final TransportVersion RANDOM_AGG_SHARD_SEED = def(8_596_00_0);
public static final TransportVersion ESQL_TIMINGS = def(8_597_00_0);
public static final TransportVersion DATA_STREAM_AUTO_SHARDING_EVENT = def(8_598_00_0);
public static final TransportVersion ADD_FAILURE_STORE_INDICES_OPTIONS = def(8_599_00_0);
public static final TransportVersion ESQL_ENRICH_OPERATOR_STATUS = def(8_600_00_0);
public static final TransportVersion ESQL_SERIALIZE_ARRAY_VECTOR = def(8_601_00_0);
public static final TransportVersion ESQL_SERIALIZE_ARRAY_BLOCK = def(8_602_00_0);
public static final TransportVersion ADD_DATA_STREAM_GLOBAL_RETENTION = def(8_603_00_0);
public static final TransportVersion ALLOCATION_STATS = def(8_604_00_0);
public static final TransportVersion ESQL_EXTENDED_ENRICH_TYPES = def(8_605_00_0);
public static final TransportVersion KNN_EXPLICIT_BYTE_QUERY_VECTOR_PARSING = def(8_606_00_0);
public static final TransportVersion ESQL_EXTENDED_ENRICH_INPUT_TYPE = def(8_607_00_0);
public static final TransportVersion ESQL_SERIALIZE_BIG_VECTOR = def(8_608_00_0);
public static final TransportVersion AGGS_EXCLUDED_DELETED_DOCS = def(8_609_00_0);
public static final TransportVersion ESQL_SERIALIZE_BIG_ARRAY = def(8_610_00_0);
public static final TransportVersion AUTO_SHARDING_ROLLOVER_CONDITION = def(8_611_00_0);
public static final TransportVersion KNN_QUERY_VECTOR_BUILDER = def(8_612_00_0);
public static final TransportVersion USE_DATA_STREAM_GLOBAL_RETENTION = def(8_613_00_0);
public static final TransportVersion ML_COMPLETION_INFERENCE_SERVICE_ADDED = def(8_614_00_0);
public static final TransportVersion ML_INFERENCE_EMBEDDING_BYTE_ADDED = def(8_615_00_0);
public static final TransportVersion ML_INFERENCE_L2_NORM_SIMILARITY_ADDED = def(8_616_00_0);
public static final TransportVersion SEARCH_NODE_LOAD_AUTOSCALING = def(8_617_00_0);
public static final TransportVersion ESQL_ES_SOURCE_OPTIONS = def(8_618_00_0);
public static final TransportVersion ADD_PERSISTENT_TASK_EXCEPTIONS = def(8_619_00_0);
public static final TransportVersion ESQL_REDUCER_NODE_FRAGMENT = def(8_620_00_0);
public static final TransportVersion FAILURE_STORE_ROLLOVER = def(8_621_00_0);
public static final TransportVersion CCR_STATS_API_TIMEOUT_PARAM = def(8_622_00_0);
public static final TransportVersion ESQL_ORDINAL_BLOCK = def(8_623_00_0);
public static final TransportVersion ML_INFERENCE_COHERE_RERANK = def(8_624_00_0);
public static final TransportVersion INDEXING_PRESSURE_DOCUMENT_REJECTIONS_COUNT = def(8_625_00_0);
public static final TransportVersion ALIAS_ACTION_RESULTS = def(8_626_00_0);
public static final TransportVersion HISTOGRAM_AGGS_KEY_SORTED = def(8_627_00_0);
public static final TransportVersion INFERENCE_FIELDS_METADATA = def(8_628_00_0);
public static final TransportVersion ML_INFERENCE_TIMEOUT_ADDED = def(8_629_00_0);
public static final TransportVersion MODIFY_DATA_STREAM_FAILURE_STORES = def(8_630_00_0);
public static final TransportVersion ML_INFERENCE_RERANK_NEW_RESPONSE_FORMAT = def(8_631_00_0);
public static final TransportVersion HIGHLIGHTERS_TAGS_ON_FIELD_LEVEL = def(8_632_00_0);
public static final TransportVersion TRACK_FLUSH_TIME_EXCLUDING_WAITING_ON_LOCKS = def(8_633_00_0);
public static final TransportVersion ML_INFERENCE_AZURE_OPENAI_EMBEDDINGS = def(8_634_00_0);
public static final TransportVersion ILM_SHRINK_ENABLE_WRITE = def(8_635_00_0);
public static final TransportVersion GEOIP_CACHE_STATS = def(8_636_00_0);
public static final TransportVersion SHUTDOWN_REQUEST_TIMEOUTS_FIX_8_14 = def(8_636_00_1);
public static final TransportVersion WATERMARK_THRESHOLDS_STATS = def(8_637_00_0);
public static final TransportVersion ENRICH_CACHE_ADDITIONAL_STATS = def(8_638_00_0);
public static final TransportVersion ML_INFERENCE_RATE_LIMIT_SETTINGS_ADDED = def(8_639_00_0);
public static final TransportVersion ML_TRAINED_MODEL_CACHE_METADATA_ADDED = def(8_640_00_0);
public static final TransportVersion TOP_LEVEL_KNN_SUPPORT_QUERY_NAME = def(8_641_00_0);
public static final TransportVersion INDEX_SEGMENTS_VECTOR_FORMATS = def(8_642_00_0);
public static final TransportVersion ADD_RESOURCE_ALREADY_UPLOADED_EXCEPTION = def(8_643_00_0);
public static final TransportVersion ESQL_MV_ORDERING_SORTED_ASCENDING = def(8_644_00_0);
public static final TransportVersion ESQL_PAGE_MAPPING_TO_ITERATOR = def(8_645_00_0);
public static final TransportVersion BINARY_PIT_ID = def(8_646_00_0);
public static final TransportVersion SECURITY_ROLE_MAPPINGS_IN_CLUSTER_STATE = def(8_647_00_0);
public static final TransportVersion ESQL_REQUEST_TABLES = def(8_648_00_0);
public static final TransportVersion ROLE_REMOTE_CLUSTER_PRIVS = def(8_649_00_0);
public static final TransportVersion NO_GLOBAL_RETENTION_FOR_SYSTEM_DATA_STREAMS = def(8_650_00_0);
public static final TransportVersion SHUTDOWN_REQUEST_TIMEOUTS_FIX = def(8_651_00_0);
public static final TransportVersion INDEXING_PRESSURE_REQUEST_REJECTIONS_COUNT = def(8_652_00_0);
public static final TransportVersion ROLLUP_USAGE = def(8_653_00_0);
public static final TransportVersion SECURITY_ROLE_DESCRIPTION = def(8_654_00_0);
public static final TransportVersion ML_INFERENCE_AZURE_OPENAI_COMPLETIONS = def(8_655_00_0);
public static final TransportVersion JOIN_STATUS_AGE_SERIALIZATION = def(8_656_00_0);
public static final TransportVersion ML_RERANK_DOC_OPTIONAL = def(8_657_00_0);
public static final TransportVersion FAILURE_STORE_FIELD_PARITY = def(8_658_00_0);
public static final TransportVersion ML_INFERENCE_AZURE_AI_STUDIO = def(8_659_00_0);
public static final TransportVersion ML_INFERENCE_COHERE_COMPLETION_ADDED = def(8_660_00_0);
public static final TransportVersion ESQL_REMOVE_ES_SOURCE_OPTIONS = def(8_661_00_0);
public static final TransportVersion NODE_STATS_INGEST_BYTES = def(8_662_00_0);
/*
* STOP! READ THIS FIRST! No, really,
* ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _
* / ___|_ _/ _ \| _ \| | | _ \| ____| / \ | _ \ |_ _| | | |_ _/ ___| | ___|_ _| _ \/ ___|_ _| |
* \___ \ | || | | | |_) | | | |_) | _| / _ \ | | | | | | | |_| || |\___ \ | |_ | || |_) \___ \ | | | |
* ___) || || |_| | __/|_| | _ <| |___ / ___ \| |_| | | | | _ || | ___) | | _| | || _ < ___) || | |_|
* |____/ |_| \___/|_| (_) |_| \_\_____/_/ \_\____/ |_| |_| |_|___|____/ |_| |___|_| \_\____/ |_| (_)
*
* A new transport version should be added EVERY TIME a change is made to the serialization protocol of one or more classes. Each
* transport version should only be used in a single merged commit (apart from the BwC versions copied from o.e.Version, ≤V_8_8_1).
*
* ADDING A TRANSPORT VERSION
* To add a new transport version, add a new constant at the bottom of the list, above this comment. Don't add other lines,
* comments, etc. The version id has the following layout:
*
* M_NNN_SS_P
*
* M - The major version of Elasticsearch
* NNN - The server version part
* SS - The serverless version part. It should always be 00 here, it is used by serverless only.
* P - The patch version part
*
* To determine the id of the next TransportVersion constant, do the following:
* - Use the same major version, unless bumping majors
* - Bump the server version part by 1, unless creating a patch version
* - Leave the serverless part as 00
* - Bump the patch part if creating a patch version
*
* If a patch version is created, it should be placed sorted among the other existing constants.
*
* REVERTING A TRANSPORT VERSION
*
* If you revert a commit with a transport version change, you MUST ensure there is a NEW transport version representing the reverted
* change. DO NOT let the transport version go backwards, it must ALWAYS be incremented.
*
* DETERMINING TRANSPORT VERSIONS FROM GIT HISTORY
*
* If your git checkout has the expected minor-version-numbered branches and the expected release-version tags then you can find the
* transport versions known by a particular release ...
*
* git show v8.11.0:server/src/main/java/org/elasticsearch/TransportVersions.java | grep '= def'
*
* ... or by a particular branch ...
*
* git show 8.11:server/src/main/java/org/elasticsearch/TransportVersions.java | grep '= def'
*
* ... and you can see which versions were added in between two versions too ...
*
* git diff v8.11.0..main -- server/src/main/java/org/elasticsearch/TransportVersions.java
*
* In branches 8.7-8.10 see server/src/main/java/org/elasticsearch/TransportVersion.java for the equivalent definitions.
*/
/**
* Reference to the earliest compatible transport version to this version of the codebase.
* This should be the transport version used by the highest minor version of the previous major.
*/
public static final TransportVersion MINIMUM_COMPATIBLE = V_7_17_0;
/**
* Reference to the minimum transport version that can be used with CCS.
* This should be the transport version used by the previous minor release.
*/
public static final TransportVersion MINIMUM_CCS_VERSION = V_8_13_0;
static final NavigableMap<Integer, TransportVersion> VERSION_IDS = getAllVersionIds(TransportVersions.class);
// the highest transport version constant defined in this file, used as a fallback for TransportVersion.current()
static final TransportVersion LATEST_DEFINED;
static {
LATEST_DEFINED = VERSION_IDS.lastEntry().getValue();
// see comment on IDS field
// now we're registered all the transport versions, we can clear the map
IDS = null;
}
public static NavigableMap<Integer, TransportVersion> getAllVersionIds(Class<?> cls) {
Map<Integer, String> versionIdFields = new HashMap<>();
NavigableMap<Integer, TransportVersion> builder = new TreeMap<>();
Set<String> ignore = Set.of("ZERO", "CURRENT", "MINIMUM_COMPATIBLE", "MINIMUM_CCS_VERSION");
for (Field declaredField : cls.getFields()) {
if (declaredField.getType().equals(TransportVersion.class)) {
String fieldName = declaredField.getName();
if (ignore.contains(fieldName)) {
continue;
}
TransportVersion version;
try {
version = (TransportVersion) declaredField.get(null);
} catch (IllegalAccessException e) {
throw new AssertionError(e);
}
builder.put(version.id(), version);
if (Assertions.ENABLED) {
// check the version number is unique
var sameVersionNumber = versionIdFields.put(version.id(), fieldName);
assert sameVersionNumber == null
: "Versions ["
+ sameVersionNumber
+ "] and ["
+ fieldName
+ "] have the same version number ["
+ version.id()
+ "]. Each TransportVersion should have a different version number";
}
}
}
return Collections.unmodifiableNavigableMap(builder);
}
static Collection<TransportVersion> getAllVersions() {
return VERSION_IDS.values();
}
static final IntFunction<String> VERSION_LOOKUP = ReleaseVersions.generateVersionsLookup(TransportVersions.class);
// no instance
private TransportVersions() {}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/TransportVersions.java |
446 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.bootstrap;
import org.elasticsearch.core.Predicates;
import org.elasticsearch.core.SuppressForbidden;
import java.io.FilePermission;
import java.io.IOException;
import java.net.SocketPermission;
import java.net.URL;
import java.security.CodeSource;
import java.security.Permission;
import java.security.PermissionCollection;
import java.security.Permissions;
import java.security.Policy;
import java.security.ProtectionDomain;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.function.Predicate;
/** custom policy for union of static and dynamic permissions */
final class ESPolicy extends Policy {
/** template policy file, the one used in tests */
static final String POLICY_RESOURCE = "security.policy";
/** limited policy for scripts */
static final String UNTRUSTED_RESOURCE = "untrusted.policy";
final Policy template;
final Policy untrusted;
final Policy system;
final PermissionCollection dynamic;
final PermissionCollection dataPathPermission;
final PermissionCollection forbiddenFilePermission;
final Map<String, Policy> plugins;
ESPolicy(
Map<String, URL> codebases,
PermissionCollection dynamic,
Map<String, Policy> plugins,
boolean filterBadDefaults,
List<FilePermission> dataPathPermissions,
List<FilePermission> forbiddenFilePermissions
) {
this.template = PolicyUtil.readPolicy(getClass().getResource(POLICY_RESOURCE), codebases);
this.dataPathPermission = createPermission(dataPathPermissions);
this.forbiddenFilePermission = createPermission(forbiddenFilePermissions);
this.untrusted = PolicyUtil.readPolicy(getClass().getResource(UNTRUSTED_RESOURCE), Collections.emptyMap());
if (filterBadDefaults) {
this.system = new SystemPolicy(Policy.getPolicy());
} else {
this.system = Policy.getPolicy();
}
this.dynamic = dynamic;
this.plugins = plugins;
}
private static PermissionCollection createPermission(List<FilePermission> permissions) {
PermissionCollection coll = null;
for (FilePermission permission : permissions) {
if (coll == null) {
coll = permission.newPermissionCollection();
}
coll.add(permission);
}
if (coll == null) {
coll = new Permissions();
}
coll.setReadOnly();
return coll;
}
@Override
@SuppressForbidden(reason = "fast equals check is desired")
public boolean implies(ProtectionDomain domain, Permission permission) {
CodeSource codeSource = domain.getCodeSource();
// codesource can be null when reducing privileges via doPrivileged()
if (codeSource == null) {
return false;
}
// completely deny access to specific files that are forbidden
if (forbiddenFilePermission.implies(permission)) {
return false;
}
URL location = codeSource.getLocation();
if (location != null) {
// run scripts with limited permissions
if (BootstrapInfo.UNTRUSTED_CODEBASE.equals(location.getFile())) {
return untrusted.implies(domain, permission);
}
// check for an additional plugin permission: plugin policy is
// only consulted for its codesources.
Policy plugin = plugins.get(location.getFile());
if (plugin != null && plugin.implies(domain, permission)) {
return true;
}
}
// The FilePermission to check access to the path.data is the hottest permission check in
// Elasticsearch, so we explicitly check it here.
if (dataPathPermission.implies(permission)) {
return true;
}
// Special handling for broken Hadoop code: "let me execute or my classes will not load"
// yeah right, REMOVE THIS when hadoop is fixed
if (permission instanceof FilePermission && "<<ALL FILES>>".equals(permission.getName())) {
hadoopHack();
}
// otherwise defer to template + dynamic file permissions
return template.implies(domain, permission) || dynamic.implies(permission) || system.implies(domain, permission);
}
private static void hadoopHack() {
for (StackTraceElement element : Thread.currentThread().getStackTrace()) {
if ("org.apache.hadoop.util.Shell".equals(element.getClassName()) && "runCommand".equals(element.getMethodName())) {
// we found the horrible method: the hack begins!
// force the hadoop code to back down, by throwing an exception that it catches.
rethrow(new IOException("no hadoop, you cannot do this."));
}
}
}
/**
* Classy puzzler to rethrow any checked exception as an unchecked one.
*/
private static class Rethrower<T extends Throwable> {
@SuppressWarnings("unchecked")
private void rethrow(Throwable t) throws T {
throw (T) t;
}
}
/**
* Rethrows <code>t</code> (identical object).
*/
private static void rethrow(Throwable t) {
new Rethrower<Error>().rethrow(t);
}
@Override
public PermissionCollection getPermissions(CodeSource codesource) {
// code should not rely on this method, or at least use it correctly:
// https://bugs.openjdk.java.net/browse/JDK-8014008
// return them a new empty permissions object so jvisualvm etc work
for (StackTraceElement element : Thread.currentThread().getStackTrace()) {
if ("sun.rmi.server.LoaderHandler".equals(element.getClassName()) && "loadClass".equals(element.getMethodName())) {
return new Permissions();
}
}
// return UNSUPPORTED_EMPTY_COLLECTION since it is safe.
return super.getPermissions(codesource);
}
// TODO: remove this hack when insecure defaults are removed from java
/**
* Wraps a bad default permission, applying a pre-implies to any permissions before checking if the wrapped bad default permission
* implies a permission.
*/
private static class BadDefaultPermission extends Permission {
private final Permission badDefaultPermission;
private final Predicate<Permission> preImplies;
/**
* Construct an instance with a pre-implies check to apply to desired permissions.
*
* @param badDefaultPermission the bad default permission to wrap
* @param preImplies a test that is applied to a desired permission before checking if the bad default permission that
* this instance wraps implies the desired permission
*/
BadDefaultPermission(final Permission badDefaultPermission, final Predicate<Permission> preImplies) {
super(badDefaultPermission.getName());
this.badDefaultPermission = badDefaultPermission;
this.preImplies = preImplies;
}
@Override
public final boolean implies(Permission permission) {
return preImplies.test(permission) && badDefaultPermission.implies(permission);
}
@Override
public final boolean equals(Object obj) {
return badDefaultPermission.equals(obj);
}
@Override
public int hashCode() {
return badDefaultPermission.hashCode();
}
@Override
public String getActions() {
return badDefaultPermission.getActions();
}
}
// default policy file states:
// "It is strongly recommended that you either remove this permission
// from this policy file or further restrict it to code sources
// that you specify, because Thread.stop() is potentially unsafe."
// not even sure this method still works...
private static final Permission BAD_DEFAULT_NUMBER_ONE = new BadDefaultPermission(
new RuntimePermission("stopThread"),
Predicates.always()
);
// default policy file states:
// "allows anyone to listen on dynamic ports"
// specified exactly because that is what we want, and fastest since it won't imply any
// expensive checks for the implicit "resolve"
private static final Permission BAD_DEFAULT_NUMBER_TWO = new BadDefaultPermission(
new SocketPermission("localhost:0", "listen"),
// we apply this pre-implies test because some SocketPermission#implies calls do expensive reverse-DNS resolves
p -> p instanceof SocketPermission && p.getActions().contains("listen")
);
/**
* Wraps the Java system policy, filtering out bad default permissions that
* are granted to all domains. Note, before java 8 these were even worse.
*/
static class SystemPolicy extends Policy {
final Policy delegate;
SystemPolicy(Policy delegate) {
this.delegate = delegate;
}
@Override
public boolean implies(ProtectionDomain domain, Permission permission) {
if (BAD_DEFAULT_NUMBER_ONE.implies(permission) || BAD_DEFAULT_NUMBER_TWO.implies(permission)) {
return false;
}
return delegate.implies(domain, permission);
}
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java |
447 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.cluster;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.TransportVersions;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Iterators;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ChunkedToXContent;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.xcontent.ToXContent;
import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import static org.elasticsearch.cluster.routing.ShardRouting.newUnassigned;
import static org.elasticsearch.cluster.routing.UnassignedInfo.Reason.REINITIALIZED;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.endArray;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.singleChunk;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.startObject;
/**
* ClusterInfo is an object representing a map of nodes to {@link DiskUsage}
* and a map of shard ids to shard sizes, see
* <code>InternalClusterInfoService.shardIdentifierFromRouting(String)</code>
* for the key used in the shardSizes map
*/
public class ClusterInfo implements ChunkedToXContent, Writeable {
public static final ClusterInfo EMPTY = new ClusterInfo();
public static final TransportVersion DATA_SET_SIZE_SIZE_VERSION = TransportVersions.V_7_13_0;
public static final TransportVersion DATA_PATH_NEW_KEY_VERSION = TransportVersions.V_8_6_0;
private final Map<String, DiskUsage> leastAvailableSpaceUsage;
private final Map<String, DiskUsage> mostAvailableSpaceUsage;
final Map<String, Long> shardSizes;
final Map<ShardId, Long> shardDataSetSizes;
final Map<NodeAndShard, String> dataPath;
final Map<NodeAndPath, ReservedSpace> reservedSpace;
protected ClusterInfo() {
this(Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), Map.of());
}
/**
* Creates a new ClusterInfo instance.
*
* @param leastAvailableSpaceUsage a node id to disk usage mapping for the path that has the least available space on the node.
* @param mostAvailableSpaceUsage a node id to disk usage mapping for the path that has the most available space on the node.
* @param shardSizes a shardkey to size in bytes mapping per shard.
* @param shardDataSetSizes a shard id to data set size in bytes mapping per shard
* @param dataPath the shard routing to datapath mapping
* @param reservedSpace reserved space per shard broken down by node and data path
* @see #shardIdentifierFromRouting
*/
public ClusterInfo(
Map<String, DiskUsage> leastAvailableSpaceUsage,
Map<String, DiskUsage> mostAvailableSpaceUsage,
Map<String, Long> shardSizes,
Map<ShardId, Long> shardDataSetSizes,
Map<NodeAndShard, String> dataPath,
Map<NodeAndPath, ReservedSpace> reservedSpace
) {
this.leastAvailableSpaceUsage = Map.copyOf(leastAvailableSpaceUsage);
this.mostAvailableSpaceUsage = Map.copyOf(mostAvailableSpaceUsage);
this.shardSizes = Map.copyOf(shardSizes);
this.shardDataSetSizes = Map.copyOf(shardDataSetSizes);
this.dataPath = Map.copyOf(dataPath);
this.reservedSpace = Map.copyOf(reservedSpace);
}
public ClusterInfo(StreamInput in) throws IOException {
this.leastAvailableSpaceUsage = in.readImmutableMap(DiskUsage::new);
this.mostAvailableSpaceUsage = in.readImmutableMap(DiskUsage::new);
this.shardSizes = in.readImmutableMap(StreamInput::readLong);
this.shardDataSetSizes = in.getTransportVersion().onOrAfter(DATA_SET_SIZE_SIZE_VERSION)
? in.readImmutableMap(ShardId::new, StreamInput::readLong)
: Map.of();
this.dataPath = in.getTransportVersion().onOrAfter(DATA_PATH_NEW_KEY_VERSION)
? in.readImmutableMap(NodeAndShard::new, StreamInput::readString)
: in.readImmutableMap(nested -> NodeAndShard.from(new ShardRouting(nested)), StreamInput::readString);
this.reservedSpace = in.getTransportVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)
? in.readImmutableMap(NodeAndPath::new, ReservedSpace::new)
: Map.of();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeMap(this.leastAvailableSpaceUsage, StreamOutput::writeWriteable);
out.writeMap(this.mostAvailableSpaceUsage, StreamOutput::writeWriteable);
out.writeMap(this.shardSizes, (o, v) -> o.writeLong(v == null ? -1 : v));
if (out.getTransportVersion().onOrAfter(DATA_SET_SIZE_SIZE_VERSION)) {
out.writeMap(this.shardDataSetSizes, StreamOutput::writeWriteable, StreamOutput::writeLong);
}
if (out.getTransportVersion().onOrAfter(DATA_PATH_NEW_KEY_VERSION)) {
out.writeMap(this.dataPath, StreamOutput::writeWriteable, StreamOutput::writeString);
} else {
out.writeMap(this.dataPath, (o, k) -> createFakeShardRoutingFromNodeAndShard(k).writeTo(o), StreamOutput::writeString);
}
if (out.getTransportVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) {
out.writeMap(this.reservedSpace);
}
}
/**
* This creates a fake ShardRouting from limited info available in NodeAndShard.
* This will not be the same as real shard, however this is fine as ClusterInfo is only written
* in TransportClusterAllocationExplainAction when handling an allocation explain request with includeDiskInfo during upgrade
* that is later presented to the user and is not used by any code.
*/
private static ShardRouting createFakeShardRoutingFromNodeAndShard(NodeAndShard nodeAndShard) {
return newUnassigned(
nodeAndShard.shardId,
true,
RecoverySource.EmptyStoreRecoverySource.INSTANCE,
new UnassignedInfo(REINITIALIZED, "fake"),
ShardRouting.Role.DEFAULT // ok, this is only used prior to DATA_PATH_NEW_KEY_VERSION which has no other roles
).initialize(nodeAndShard.nodeId, null, 0L).moveToStarted(0L);
}
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) {
return Iterators.concat(startObject("nodes"), Iterators.map(leastAvailableSpaceUsage.entrySet().iterator(), c -> (builder, p) -> {
builder.startObject(c.getKey());
{ // node
builder.field("node_name", c.getValue().nodeName());
builder.startObject("least_available");
{
c.getValue().toShortXContent(builder);
}
builder.endObject(); // end "least_available"
builder.startObject("most_available");
{
DiskUsage most = this.mostAvailableSpaceUsage.get(c.getKey());
if (most != null) {
most.toShortXContent(builder);
}
}
builder.endObject(); // end "most_available"
}
builder.endObject(); // end $nodename
return builder;
}),
singleChunk(
(builder, p) -> builder.endObject() // end "nodes"
.startObject("shard_sizes")
),
Iterators.map(
shardSizes.entrySet().iterator(),
c -> (builder, p) -> builder.humanReadableField(c.getKey() + "_bytes", c.getKey(), ByteSizeValue.ofBytes(c.getValue()))
),
singleChunk(
(builder, p) -> builder.endObject() // end "shard_sizes"
.startObject("shard_data_set_sizes")
),
Iterators.map(
shardDataSetSizes.entrySet().iterator(),
c -> (builder, p) -> builder.humanReadableField(
c.getKey() + "_bytes",
c.getKey().toString(),
ByteSizeValue.ofBytes(c.getValue())
)
),
singleChunk(
(builder, p) -> builder.endObject() // end "shard_data_set_sizes"
.startObject("shard_paths")
),
Iterators.map(dataPath.entrySet().iterator(), c -> (builder, p) -> builder.field(c.getKey().toString(), c.getValue())),
singleChunk(
(builder, p) -> builder.endObject() // end "shard_paths"
.startArray("reserved_sizes")
),
Iterators.map(reservedSpace.entrySet().iterator(), c -> (builder, p) -> {
builder.startObject();
{
builder.field("node_id", c.getKey().nodeId);
builder.field("path", c.getKey().path);
c.getValue().toXContent(builder, params);
}
return builder.endObject(); // NodeAndPath
}),
endArray() // end "reserved_sizes"
);
}
/**
* Returns a node id to disk usage mapping for the path that has the least available space on the node.
* Note that this does not take account of reserved space: there may be another path with less available _and unreserved_ space.
*/
public Map<String, DiskUsage> getNodeLeastAvailableDiskUsages() {
return this.leastAvailableSpaceUsage;
}
/**
* Returns a node id to disk usage mapping for the path that has the most available space on the node.
* Note that this does not take account of reserved space: there may be another path with more available _and unreserved_ space.
*/
public Map<String, DiskUsage> getNodeMostAvailableDiskUsages() {
return this.mostAvailableSpaceUsage;
}
/**
* Returns the shard size for the given shardId or <code>null</code> if that metric is not available.
*/
public Long getShardSize(ShardId shardId, boolean primary) {
return shardSizes.get(shardIdentifierFromRouting(shardId, primary));
}
/**
* Returns the shard size for the given shard routing or <code>null</code> if that metric is not available.
*/
public Long getShardSize(ShardRouting shardRouting) {
return getShardSize(shardRouting.shardId(), shardRouting.primary());
}
/**
* Returns the shard size for the given shard routing or <code>defaultValue</code> it that metric is not available.
*/
public long getShardSize(ShardRouting shardRouting, long defaultValue) {
Long shardSize = getShardSize(shardRouting);
return shardSize == null ? defaultValue : shardSize;
}
/**
* Returns the shard size for the given shard routing or <code>defaultValue</code> it that metric is not available.
*/
public long getShardSize(ShardId shardId, boolean primary, long defaultValue) {
Long shardSize = getShardSize(shardId, primary);
return shardSize == null ? defaultValue : shardSize;
}
/**
* Returns the nodes absolute data-path the given shard is allocated on or <code>null</code> if the information is not available.
*/
public String getDataPath(ShardRouting shardRouting) {
return dataPath.get(NodeAndShard.from(shardRouting));
}
public String getDataPath(NodeAndShard nodeAndShard) {
return dataPath.get(nodeAndShard);
}
public Optional<Long> getShardDataSetSize(ShardId shardId) {
return Optional.ofNullable(shardDataSetSizes.get(shardId));
}
/**
* Returns the reserved space for each shard on the given node/path pair
*/
public ReservedSpace getReservedSpace(String nodeId, String dataPath) {
final ReservedSpace result = reservedSpace.get(new NodeAndPath(nodeId, dataPath));
return result == null ? ReservedSpace.EMPTY : result;
}
/**
* Method that incorporates the ShardId for the shard into a string that
* includes a 'p' or 'r' depending on whether the shard is a primary.
*/
public static String shardIdentifierFromRouting(ShardRouting shardRouting) {
return shardIdentifierFromRouting(shardRouting.shardId(), shardRouting.primary());
}
public static String shardIdentifierFromRouting(ShardId shardId, boolean primary) {
return shardId.toString() + "[" + (primary ? "p" : "r") + "]";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ClusterInfo that = (ClusterInfo) o;
return leastAvailableSpaceUsage.equals(that.leastAvailableSpaceUsage)
&& mostAvailableSpaceUsage.equals(that.mostAvailableSpaceUsage)
&& shardSizes.equals(that.shardSizes)
&& shardDataSetSizes.equals(that.shardDataSetSizes)
&& dataPath.equals(that.dataPath)
&& reservedSpace.equals(that.reservedSpace);
}
@Override
public int hashCode() {
return Objects.hash(leastAvailableSpaceUsage, mostAvailableSpaceUsage, shardSizes, shardDataSetSizes, dataPath, reservedSpace);
}
@Override
public String toString() {
return Strings.toString(this, true, false);
}
// exposed for tests, computed here rather than exposing all the collections separately
int getChunkCount() {
return leastAvailableSpaceUsage.size() + shardSizes.size() + shardDataSetSizes.size() + dataPath.size() + reservedSpace.size() + 6;
}
public record NodeAndShard(String nodeId, ShardId shardId) implements Writeable {
public NodeAndShard {
Objects.requireNonNull(nodeId);
Objects.requireNonNull(shardId);
}
public NodeAndShard(StreamInput in) throws IOException {
this(in.readString(), new ShardId(in));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(nodeId);
shardId.writeTo(out);
}
public static NodeAndShard from(ShardRouting shardRouting) {
return new NodeAndShard(shardRouting.currentNodeId(), shardRouting.shardId());
}
}
/**
* Represents a data path on a node
*/
public record NodeAndPath(String nodeId, String path) implements Writeable {
public NodeAndPath {
Objects.requireNonNull(nodeId);
Objects.requireNonNull(path);
}
public NodeAndPath(StreamInput in) throws IOException {
this(in.readString(), in.readString());
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(nodeId);
out.writeString(path);
}
}
/**
* Represents the total amount of "reserved" space on a particular data path, together with the set of shards considered.
*/
public record ReservedSpace(long total, Set<ShardId> shardIds) implements Writeable {
public static final ReservedSpace EMPTY = new ReservedSpace(0, new HashSet<>());
ReservedSpace(StreamInput in) throws IOException {
this(in.readVLong(), in.readCollectionAsSet(ShardId::new));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(total);
out.writeCollection(shardIds);
}
public boolean containsShardId(ShardId shardId) {
return shardIds.contains(shardId);
}
void toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.field("total", total);
builder.startArray("shards");
{
for (ShardId shardIdCursor : shardIds) {
shardIdCursor.toXContent(builder, params);
}
}
builder.endArray(); // end "shards"
}
public static class Builder {
private long total;
private HashSet<ShardId> shardIds = new HashSet<>();
public ReservedSpace build() {
assert shardIds != null : "already built";
final ReservedSpace reservedSpace = new ReservedSpace(total, shardIds);
shardIds = null;
return reservedSpace;
}
public Builder add(ShardId shardId, long reservedBytes) {
assert shardIds != null : "already built";
assert reservedBytes >= 0 : reservedBytes;
shardIds.add(shardId);
total += reservedBytes;
return this;
}
}
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java |
448 | // Companion Code to the paper "Generative Trees: Adversarial and Copycat" by R. Nock and M.
// Guillame-Bert, in ICML'22
// class to generate data only
import java.io.*;
import java.util.*;
class FeaturePlus {
Feature feat;
int feat_arc_index;
FeaturePlus(Feature f, int fai) {
feat = f;
feat_arc_index = fai;
}
}
class Generate implements Debuggable {
public static Random R = new Random();
public static int SEPARATION_INDEX = 0;
public static String KEY_HELP = "--help",
KEY_GENERATOR_LINK = "-L",
KEY_NUMBER_EXAMPLES = "-N",
KEY_DIRECTORY = "-D",
KEY_PREFIX = "-P",
KEY_X = "-X",
KEY_Y = "-Y",
SEP = "/",
KEY_FORCE_INTEGER_CODING = "-F",
KEY_UNKNOWN_VALUE_CODING = "-U";
public static String KEY_NODES = "@NODES", KEY_ARCS = "@ARCS";
String generator_file,
x_name,
y_name,
statistics_experiment,
directory_data,
prefix_data,
generator_name,
unknown_v_coding;
boolean force_int_coding;
int number_ex, x_index, y_index;
Wrapper wrap;
Generate(
String xn,
String yn,
String dir,
String pref,
String gen_name,
boolean force_int_coding,
String unknown_v_coding) {
statistics_experiment = null;
x_name = xn;
y_name = yn;
directory_data = dir;
prefix_data = pref;
generator_name = gen_name;
generator_file = null;
this.force_int_coding = force_int_coding;
if (unknown_v_coding.equals("")) this.unknown_v_coding = "-1";
else this.unknown_v_coding = unknown_v_coding;
wrap = null;
x_index = y_index = -1;
}
public static String help() {
String ret = "";
ret += KEY_HELP + " : example command line\n\n";
ret +=
"java -Xmx10000m Generate -D Datasets/generate/ -P open_policing_hartford -U NA -F true -N"
+ " 1000 -L example-generator_open_policing_hartford.csv\n\n";
ret += KEY_DIRECTORY + " (String) :: mandatory -- directory where to find the resource below\n";
ret +=
KEY_PREFIX
+ " (String) :: mandatory -- prefix of the domain (the .csv datafile must be at"
+ " Datasets/generate/open_policing_hartford.csv)\n";
ret +=
KEY_UNKNOWN_VALUE_CODING
+ " (String) :: optional -- representation of 'unknown' value in dataset -- default:"
+ " \"-1\"\n";
ret +=
KEY_FORCE_INTEGER_CODING
+ " (boolean) :: optional -- if true, recognizes integer variables and codes them as"
+ " such (otherwise, codes them as doubles) -- default: false\n";
ret +=
KEY_GENERATOR_LINK
+ " (String) :: mandatory -- file containing the generator to be used (the file must be"
+ " in directory Datasets/generate/open_policing_hartford/)\n";
ret +=
KEY_NUMBER_EXAMPLES
+ " (integer) :: optional -- #examples to be generated (the file name will be at the"
+ " same location as generator, with name ending in '_GeneratedSample.csv'; if"
+ " unspecified, just displays generator)\n";
ret +=
KEY_X
+ " (String) :: optional -- variable name in x value for xy density plot of generated"
+ " data\n";
ret +=
KEY_Y
+ " (String) :: optional -- variable name in y value for xy density plot of generated"
+ " data\n";
return ret;
}
public static void main(String[] arg) {
int i;
String generator_n = "";
int number_ex = -1;
boolean kF = false;
String kD = "", kP = "", kU = "", xn = null, yn = null;
System.out.println("");
System.out.println(
"////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////");
System.out.println(
"// Companion code to ICML'22 paper \"Generative Trees: Adversarial and Copycat\", by"
+ " Richard Nock and Mathieu Guillame-Bert //");
System.out.println(
"// (generating examples from a generative tree; to *train* a generative tree, try 'java"
+ " Wrapper --help') //");
if (arg.length == 0) {
System.out.println("// *No parameters*. Run 'java Generate --help' for more");
System.exit(0);
}
System.out.println(
"// Help & example run: 'java Generate --help' "
+ " //");
System.out.println(
"////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////");
for (i = 0; i < arg.length; i++) {
if (arg[i].equals(KEY_HELP)) {
Dataset.perror(help());
} else if (arg[i].equals(KEY_GENERATOR_LINK)) {
generator_n = arg[i + 1];
} else if (arg[i].equals(KEY_DIRECTORY)) {
kD = arg[i + 1];
} else if (arg[i].equals(KEY_PREFIX)) {
kP = arg[i + 1];
} else if (arg[i].equals(KEY_UNKNOWN_VALUE_CODING)) {
kU = arg[i + 1];
} else if (arg[i].equals(KEY_FORCE_INTEGER_CODING)) {
kF = Boolean.parseBoolean(arg[i + 1]);
} else if (arg[i].equals(KEY_NUMBER_EXAMPLES)) {
number_ex = Integer.parseInt(arg[i + 1]);
} else if (arg[i].equals(KEY_X)) {
xn = new String(arg[i + 1]);
} else if (arg[i].equals(KEY_Y)) {
yn = new String(arg[i + 1]);
}
}
if (generator_n.equals(""))
Dataset.perror("Generate.class :: no sample to generate, check parameters");
if (kD.equals("")) Dataset.perror("Generate.class :: no directory, check parameters");
if (kP.equals("")) Dataset.perror("Generate.class :: no prefix, check parameters");
if (number_ex <= 0) Dataset.warning("Experiment.class :: no example generation");
Generate ee = new Generate(xn, yn, kD, kP, generator_n, kF, kU);
ee.go(number_ex);
System.out.println("\nBibTex:");
System.out.println("@inproceedings{ngbGT,");
System.out.println(" title={Generative Trees: Adversarial and Copycat},");
System.out.println(" author={R. Nock and M. Guillame-Bert},");
System.out.println(" booktitle={39$^{~th}$ International Conference on Machine Learning},");
System.out.println(" year={2022}");
System.out.println("}\n");
}
public void go(int nex) {
number_ex = nex;
wrap = new Wrapper();
wrap.force_integer_coding = force_int_coding;
Unknown_Feature_Value.S_UNKNOWN = unknown_v_coding;
wrap.path_and_name_of_domain_dataset =
directory_data + prefix_data + SEP + prefix_data + ".csv";
wrap.myDomain = new Domain(wrap);
generator_file = directory_data + prefix_data + SEP + generator_name;
System.out.println("Datafile at " + wrap.path_and_name_of_domain_dataset);
System.out.println("Generator at " + generator_file);
Generator_Tree gt = from_file(wrap.myDomain);
System.out.println("GT loaded:");
System.out.println(gt);
if (number_ex > 0) {
System.out.print("\nGenerating " + number_ex + " examples... ");
Vector<Example> v_gen;
if (((x_name != null) && (!x_name.equals(""))) && ((y_name != null) && (!y_name.equals(""))))
v_gen = gt.generate_sample_with_density(number_ex);
else v_gen = gt.generate_sample(number_ex);
String save_sample_csv =
directory_data + prefix_data + SEP + prefix_data + "_GeneratedSample.csv";
String save_sample_density_plot_csv;
System.out.print("ok.\nSaving generated sample in file " + save_sample_csv + " ...");
to_file(v_gen, save_sample_csv, false, -1, -1);
System.out.println(" ok.");
if ((x_name != null) && (y_name != null)) {
save_sample_density_plot_csv =
directory_data
+ prefix_data
+ SEP
+ prefix_data
+ "_GeneratedSample_DENSITY_X_"
+ x_name
+ "_Y_"
+ y_name
+ ".csv";
System.out.print(
"\nSaving generated sample for density plot in file "
+ save_sample_density_plot_csv
+ " ...");
to_file(v_gen, save_sample_density_plot_csv, true, x_index, y_index);
System.out.println(" ok.");
}
}
System.out.println("Stopping...");
wrap.myDomain.myMemoryMonitor.stop();
}
public void to_file(
Vector<Example> v_gen, String nameFile, boolean save_density, int x_index, int y_index) {
FileWriter f;
int i;
try {
f = new FileWriter(nameFile);
if (save_density) {
f.write(
(wrap.myDomain.myDS.domain_feature(x_index)).name
+ Dataset.KEY_SEPARATION_STRING[Dataset.SEPARATION_INDEX]
+ (wrap.myDomain.myDS.domain_feature(y_index)).name
+ Dataset.KEY_SEPARATION_STRING[Dataset.SEPARATION_INDEX]
+ "generated_density");
} else {
for (i = 0; i < wrap.myDomain.myDS.number_domain_features(); i++) {
f.write((wrap.myDomain.myDS.domain_feature(i)).name);
if (i < wrap.myDomain.myDS.number_domain_features() - 1)
f.write(Dataset.KEY_SEPARATION_STRING[Dataset.SEPARATION_INDEX]);
}
}
f.write("\n");
for (i = 0; i < v_gen.size(); i++)
if (save_density)
f.write(((Example) v_gen.elementAt(i)).toStringSaveDensity(x_index, y_index) + "\n");
else f.write(((Example) v_gen.elementAt(i)).toStringSave() + "\n");
f.close();
} catch (IOException e) {
Dataset.perror("Generate.class :: Saving results error in file " + nameFile);
}
}
public Generator_Tree from_file(Domain md) {
Generator_Tree gt = new Generator_Tree(-1, null, -1);
boolean in_nodes = false, in_arcs = false, skip = false;
HashSet<Generator_Node> leaves = new HashSet<>();
Iterator lit;
FileReader e;
BufferedReader br;
StringTokenizer t;
int name,
myParentGenerator_Node_name,
myParentGenerator_Node_children_number,
depth,
i,
j,
index,
feature_node_index,
gt_depth = -1;
double p_node;
boolean is_leaf;
double[] multi_p;
Hashtable<Integer, Vector> nodes_name_to_node_and_succ_indexes_and_parent_index =
new Hashtable<>();
Generator_Node gn, gn_begin_node, gn_end_node, gn_parent_node;
Integer parent_index;
Vector vbundle, vbundle_parent_node, vbundle_begin_node, vbundle_end_node;
Vector<Vector> arc_list_with_arcs_and_nodes_names = new Vector<>();
Vector current_arc_and_nodes_names;
int begin_node_name, end_node_name, feature_arc_index;
Integer ibegin_node_name, iend_node_name, inode_name, iparent_name;
FeaturePlus fga;
Generator_Arc ga;
Feature ff;
Vector<String> modalities;
String fname1, fname2, ftype, dum, n, dumname;
double dmin, dmax;
Enumeration enu;
Vector<Integer> vsucc;
Vector<Double> vmulti_p;
System.out.print("Loading generator at " + generator_file + "... ");
try {
e = new FileReader(generator_file);
br = new BufferedReader(e);
while ((dum = br.readLine()) != null) {
if ((dum.length() == 1)
|| ((dum.length() > 1)
&& (!dum.substring(0, Dataset.KEY_COMMENT.length()).equals(Dataset.KEY_COMMENT)))) {
t = new StringTokenizer(dum, Dataset.KEY_SEPARATION_STRING[Generate.SEPARATION_INDEX]);
n = t.nextToken();
if (n.equals(Generate.KEY_NODES)) {
in_nodes = true;
in_arcs = false;
skip = true;
} else if (n.equals(Generate.KEY_ARCS)) {
in_arcs = true;
in_nodes = false;
skip = true;
}
if ((!skip) && ((in_nodes) || (in_arcs))) {
if ((in_nodes) && (in_arcs)) Dataset.perror("Generate.java :: generator file mixed up");
if (in_nodes) {
name = Integer.parseInt(n);
n = t.nextToken();
myParentGenerator_Node_name = Integer.parseInt(n);
n = t.nextToken();
myParentGenerator_Node_children_number = Integer.parseInt(n);
n = t.nextToken();
depth = Integer.parseInt(n);
if ((gt_depth == -1) || (depth > gt_depth)) gt_depth = depth;
n = t.nextToken();
p_node = Double.parseDouble(n);
n = t.nextToken();
is_leaf = Boolean.parseBoolean(n);
multi_p = null;
vsucc = null;
vmulti_p = null;
feature_node_index = -1;
if (!is_leaf) {
n = t.nextToken();
feature_node_index = Integer.parseInt(n);
dumname = t.nextToken(); // not used
vsucc = new Vector<>();
vmulti_p = new Vector<>();
while (t.hasMoreTokens()) {
n = t.nextToken();
vmulti_p.addElement(new Double(Double.parseDouble(n)));
n = t.nextToken();
vsucc.addElement(new Integer(Integer.parseInt(n)));
}
multi_p = new double[vmulti_p.size()];
for (i = 0; i < multi_p.length; i++)
multi_p[i] = ((Double) vmulti_p.elementAt(i)).doubleValue();
}
gn = new Generator_Node(gt, null, myParentGenerator_Node_children_number);
if (name == 1) gt.root = gn;
gn.name = name;
gn.depth = depth;
gn.p_node = p_node;
gn.is_leaf = is_leaf;
gn.multi_p = multi_p;
if (!is_leaf) {
gn.feature_node_index = feature_node_index;
gn.children_arcs = new Generator_Arc[multi_p.length];
} else {
gn.children_arcs = null;
leaves.add(gn);
}
parent_index = new Integer(myParentGenerator_Node_name);
vbundle = new Vector();
vbundle.addElement(gn);
vbundle.addElement(vsucc);
vbundle.addElement(parent_index);
nodes_name_to_node_and_succ_indexes_and_parent_index.put(new Integer(name), vbundle);
}
if (in_arcs) {
begin_node_name = Integer.parseInt(n);
n = t.nextToken();
end_node_name = Integer.parseInt(n);
n = t.nextToken();
feature_arc_index = Integer.parseInt(n);
fname1 = t.nextToken();
fname2 = t.nextToken();
if (!fname1.equals(fname2))
Dataset.perror("Generate.java :: feature names " + fname1 + " != " + fname2);
ftype = t.nextToken();
modalities = null;
dmin = dmax = -1.0;
if (Feature.IS_NOMINAL(ftype)) {
modalities = new Vector<>();
while (t.hasMoreTokens()) modalities.addElement(new String(t.nextToken()));
} else {
n = t.nextToken();
dmin = Double.parseDouble(n);
n = t.nextToken();
dmax = Double.parseDouble(n);
}
ff = new Feature(fname1, ftype, modalities, dmin, dmax, false);
fga = new FeaturePlus(ff, feature_arc_index);
current_arc_and_nodes_names = new Vector();
current_arc_and_nodes_names.addElement(fga);
current_arc_and_nodes_names.addElement(new Integer(begin_node_name));
current_arc_and_nodes_names.addElement(new Integer(end_node_name));
arc_list_with_arcs_and_nodes_names.addElement(current_arc_and_nodes_names);
}
} else if (skip) skip = false;
}
}
e.close();
} catch (IOException eee) {
System.out.println(
"Problem loading ." + generator_file + " resource file --- Check the access to file");
System.exit(0);
}
System.out.print("ok .\nFilling nodes' parents... ");
// fill parents
enu = nodes_name_to_node_and_succ_indexes_and_parent_index.keys();
while (enu.hasMoreElements()) {
inode_name = (Integer) enu.nextElement();
vbundle = nodes_name_to_node_and_succ_indexes_and_parent_index.get(inode_name);
gn = (Generator_Node) vbundle.elementAt(0);
iparent_name = (Integer) vbundle.elementAt(2);
if (iparent_name.intValue() != -1) {
vbundle_parent_node =
nodes_name_to_node_and_succ_indexes_and_parent_index.get(iparent_name);
gn_parent_node = (Generator_Node) vbundle_parent_node.elementAt(0);
gn.myParentGenerator_Node = gn_parent_node;
}
}
System.out.print("ok .\nFilling arcs... ");
// includes arcs in nodes
for (i = 0; i < arc_list_with_arcs_and_nodes_names.size(); i++) {
current_arc_and_nodes_names = (Vector) arc_list_with_arcs_and_nodes_names.elementAt(i);
fga = (FeaturePlus) current_arc_and_nodes_names.elementAt(0);
ibegin_node_name = (Integer) current_arc_and_nodes_names.elementAt(1);
iend_node_name = (Integer) current_arc_and_nodes_names.elementAt(2);
vbundle_begin_node =
nodes_name_to_node_and_succ_indexes_and_parent_index.get(ibegin_node_name);
gn_begin_node = (Generator_Node) vbundle_begin_node.elementAt(0);
vsucc = (Vector) vbundle_begin_node.elementAt(1);
vbundle_end_node = nodes_name_to_node_and_succ_indexes_and_parent_index.get(iend_node_name);
gn_end_node = (Generator_Node) vbundle_end_node.elementAt(0);
ga = new Generator_Arc(gn_begin_node, gn_end_node, fga.feat, fga.feat_arc_index, i);
// arc now complete
j = 0;
index = -1;
do {
if (((Integer) vsucc.elementAt(j)).equals(iend_node_name)) index = j;
j++;
} while ((index == -1) && (j < vsucc.size()));
if (index == -1)
Dataset.perror(
"Generate.java :: index of node # " + iend_node_name + " not found at arc #" + i);
gn_begin_node.children_arcs[index] = ga;
}
// finishing
gt.leaves = leaves;
gt.depth = gt_depth;
gt.number_nodes = nodes_name_to_node_and_succ_indexes_and_parent_index.size();
gt.myBoost = new Boost(wrap.myDomain);
lit = leaves.iterator();
while (lit.hasNext()) {
gn = (Generator_Node) lit.next();
gn.compute_all_features_domain();
}
if ((x_name != null) && (!x_name.equals(""))) {
i = 0;
while ((i < wrap.myDomain.myDS.number_domain_features())
&& (!(wrap.myDomain.myDS.domain_feature(i)).name.equals(x_name))) i++;
if (!(wrap.myDomain.myDS.domain_feature(i)).name.equals(x_name))
Dataset.perror("Generate.class :: no feature named " + x_name + " in dataset");
x_index = i;
}
if ((y_name != null) && (!y_name.equals(""))) {
i = 0;
while ((i < wrap.myDomain.myDS.number_domain_features())
&& (!(wrap.myDomain.myDS.domain_feature(i)).name.equals(y_name))) i++;
if (!(wrap.myDomain.myDS.domain_feature(i)).name.equals(y_name))
Dataset.perror("Generate.class :: no feature named " + y_name + " in dataset");
y_index = i;
}
System.out.print("ok.\n");
return gt;
}
}
| google-research/google-research | generative_trees/src/Generate.java |
450 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.index.store;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.TransportVersions;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.xcontent.ToXContentFragment;
import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Objects;
public class StoreStats implements Writeable, ToXContentFragment {
/**
* Sentinel value for cases where the shard does not yet know its reserved size so we must fall back to an estimate, for instance
* prior to receiving the list of files in a peer recovery.
*/
public static final long UNKNOWN_RESERVED_BYTES = -1L;
public static final TransportVersion RESERVED_BYTES_VERSION = TransportVersions.V_7_9_0;
public static final TransportVersion TOTAL_DATA_SET_SIZE_SIZE_VERSION = TransportVersions.V_7_13_0;
private long sizeInBytes;
private long totalDataSetSizeInBytes;
private long reservedSizeInBytes;
public StoreStats() {
}
public StoreStats(StreamInput in) throws IOException {
sizeInBytes = in.readVLong();
if (in.getTransportVersion().onOrAfter(TOTAL_DATA_SET_SIZE_SIZE_VERSION)) {
totalDataSetSizeInBytes = in.readVLong();
} else {
totalDataSetSizeInBytes = sizeInBytes;
}
if (in.getTransportVersion().onOrAfter(RESERVED_BYTES_VERSION)) {
reservedSizeInBytes = in.readZLong();
} else {
reservedSizeInBytes = UNKNOWN_RESERVED_BYTES;
}
}
/**
* @param sizeInBytes the size of the store in bytes
* @param totalDataSetSizeInBytes the size of the total data set in bytes, can differ from sizeInBytes for shards using shared cache
* storage
* @param reservedSize a prediction of how much larger the store is expected to grow, or {@link StoreStats#UNKNOWN_RESERVED_BYTES}.
*/
public StoreStats(long sizeInBytes, long totalDataSetSizeInBytes, long reservedSize) {
assert reservedSize == UNKNOWN_RESERVED_BYTES || reservedSize >= 0 : reservedSize;
this.sizeInBytes = sizeInBytes;
this.totalDataSetSizeInBytes = totalDataSetSizeInBytes;
this.reservedSizeInBytes = reservedSize;
}
public void add(StoreStats stats) {
if (stats == null) {
return;
}
sizeInBytes += stats.sizeInBytes;
totalDataSetSizeInBytes += stats.totalDataSetSizeInBytes;
reservedSizeInBytes = ignoreIfUnknown(reservedSizeInBytes) + ignoreIfUnknown(stats.reservedSizeInBytes);
}
private static long ignoreIfUnknown(long reservedSize) {
return reservedSize == UNKNOWN_RESERVED_BYTES ? 0L : reservedSize;
}
public long sizeInBytes() {
return sizeInBytes;
}
public ByteSizeValue size() {
return ByteSizeValue.ofBytes(sizeInBytes);
}
public long totalDataSetSizeInBytes() {
return totalDataSetSizeInBytes;
}
public ByteSizeValue totalDataSetSize() {
return ByteSizeValue.ofBytes(totalDataSetSizeInBytes);
}
public long reservedSizeInBytes() {
return reservedSizeInBytes;
}
/**
* A prediction of how much larger this store will eventually grow. For instance, if we are currently doing a peer recovery or restoring
* a snapshot into this store then we can account for the rest of the recovery using this field. A value of {@code -1B} indicates that
* the reserved size is unknown.
*/
public ByteSizeValue getReservedSize() {
return ByteSizeValue.ofBytes(reservedSizeInBytes);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(sizeInBytes);
if (out.getTransportVersion().onOrAfter(TOTAL_DATA_SET_SIZE_SIZE_VERSION)) {
out.writeVLong(totalDataSetSizeInBytes);
}
if (out.getTransportVersion().onOrAfter(RESERVED_BYTES_VERSION)) {
out.writeZLong(reservedSizeInBytes);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.STORE);
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, size());
builder.humanReadableField(Fields.TOTAL_DATA_SET_SIZE_IN_BYTES, Fields.TOTAL_DATA_SET_SIZE, totalDataSetSize());
builder.humanReadableField(Fields.RESERVED_IN_BYTES, Fields.RESERVED, getReservedSize());
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
StoreStats that = (StoreStats) o;
return sizeInBytes == that.sizeInBytes
&& totalDataSetSizeInBytes == that.totalDataSetSizeInBytes
&& reservedSizeInBytes == that.reservedSizeInBytes;
}
@Override
public int hashCode() {
return Objects.hash(sizeInBytes, totalDataSetSizeInBytes, reservedSizeInBytes);
}
static final class Fields {
static final String STORE = "store";
static final String SIZE = "size";
static final String SIZE_IN_BYTES = "size_in_bytes";
static final String TOTAL_DATA_SET_SIZE = "total_data_set_size";
static final String TOTAL_DATA_SET_SIZE_IN_BYTES = "total_data_set_size_in_bytes";
static final String RESERVED = "reserved";
static final String RESERVED_IN_BYTES = "reserved_in_bytes";
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/index/store/StoreStats.java |
451 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.gateway;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RerouteService;
import org.elasticsearch.cluster.routing.ShardRoutingRoleStrategy;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.service.MasterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.SuppressForbidden;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.threadpool.Scheduler;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.concurrent.atomic.AtomicBoolean;
public class GatewayService extends AbstractLifecycleComponent implements ClusterStateListener {
private static final Logger logger = LogManager.getLogger(GatewayService.class);
public static final Setting<Integer> EXPECTED_DATA_NODES_SETTING = Setting.intSetting(
"gateway.expected_data_nodes",
-1,
-1,
Property.NodeScope
);
public static final Setting<TimeValue> RECOVER_AFTER_TIME_SETTING = Setting.positiveTimeSetting(
"gateway.recover_after_time",
TimeValue.timeValueMillis(0),
Property.NodeScope
);
public static final Setting<Integer> RECOVER_AFTER_DATA_NODES_SETTING = Setting.intSetting(
"gateway.recover_after_data_nodes",
-1,
-1,
Property.NodeScope
);
public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock(
1,
"state not recovered / initialized",
true,
true,
false,
RestStatus.SERVICE_UNAVAILABLE,
ClusterBlockLevel.ALL
);
static final TimeValue DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET = TimeValue.timeValueMinutes(5);
private final ShardRoutingRoleStrategy shardRoutingRoleStrategy;
private final ThreadPool threadPool;
private final RerouteService rerouteService;
private final ClusterService clusterService;
private final TimeValue recoverAfterTime;
private final int recoverAfterDataNodes;
private final int expectedDataNodes;
volatile PendingStateRecovery currentPendingStateRecovery;
@Inject
public GatewayService(
final Settings settings,
final RerouteService rerouteService,
final ClusterService clusterService,
final ShardRoutingRoleStrategy shardRoutingRoleStrategy,
final ThreadPool threadPool
) {
this.rerouteService = rerouteService;
this.clusterService = clusterService;
this.shardRoutingRoleStrategy = shardRoutingRoleStrategy;
this.threadPool = threadPool;
this.expectedDataNodes = EXPECTED_DATA_NODES_SETTING.get(settings);
if (RECOVER_AFTER_TIME_SETTING.exists(settings)) {
recoverAfterTime = RECOVER_AFTER_TIME_SETTING.get(settings);
} else if (expectedDataNodes >= 0) {
recoverAfterTime = DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET;
} else {
recoverAfterTime = null;
}
this.recoverAfterDataNodes = RECOVER_AFTER_DATA_NODES_SETTING.get(settings);
}
@Override
protected void doStart() {
if (DiscoveryNode.isMasterNode(clusterService.getSettings())) {
// use post applied so that the state will be visible to the background recovery thread we spawn in performStateRecovery
clusterService.addListener(this);
}
}
@Override
protected void doStop() {
clusterService.removeListener(this);
}
@Override
protected void doClose() {}
@Override
public void clusterChanged(final ClusterChangedEvent event) {
if (lifecycle.stoppedOrClosed()) {
return;
}
final ClusterState state = event.state();
final DiscoveryNodes nodes = state.nodes();
if (nodes.isLocalNodeElectedMaster() == false) {
// not our job to recover
return;
}
if (state.blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) {
// already recovered
return;
}
// At this point, we know the state is not recovered and this node is qualified for state recovery
// But we still need to check whether a previous one is running already
final long currentTerm = state.term();
final PendingStateRecovery existingPendingStateRecovery = currentPendingStateRecovery;
// Always start a new state recovery if the master term changes
// If there is a previous one still waiting, both will probably run but at most one of them will
// actually make changes to cluster state because either:
// 1. The previous recovers the cluster state and the current one will be skipped
// 2. The previous one sees a new cluster term and skips its own execution
if (existingPendingStateRecovery == null || existingPendingStateRecovery.expectedTerm < currentTerm) {
currentPendingStateRecovery = new PendingStateRecovery(currentTerm);
}
currentPendingStateRecovery.onDataNodeSize(nodes.getDataNodes().size());
}
/**
* This class manages the cluster state recovery behaviours. It has two major scenarios depending
* on whether {@code recoverAfterDataNodes} is configured.
*
* <p> <b>When</b> {@code recoverAfterDataNodes} is configured:
* <ol>
* <li>Nothing can happen until it is reached
* <li>When {@code recoverAfterDataNodes} is reached, the cluster either:
* <ul>
* <li>Recover immediately when {@code expectedDataNodes} is reached or
* both {@code expectedDataNodes} and {@code recoverAfterTime} are not configured
* <li>Or schedule a recovery with a delay of {@code recoverAfterTime}
* </ul>
* <li>The scheduled recovery can be cancelled if {@code recoverAfterDataNodes} drops below required number
* before the recovery can happen. When this happens, the process goes back to the beginning (step 1).
* <li>The recovery is scheduled only once each time {@code recoverAfterDataNodes} crosses the required number
* </ol>
*
* <p> <b>When</b> {@code recoverAfterDataNodes} is <b>Not</b> configured, the cluster either:
* <ul>
* <li>Recover immediately when {@code expectedDataNodes} is reached or
* both {@code expectedDataNodes} and {@code recoverAfterTime} are not configured
* <li>Or schedule a recovery with a delay of {@code recoverAfterTime}
* </ul>
*/
class PendingStateRecovery {
private final long expectedTerm;
@Nullable
private Scheduler.ScheduledCancellable scheduledRecovery;
private final AtomicBoolean taskSubmitted = new AtomicBoolean();
PendingStateRecovery(long expectedTerm) {
this.expectedTerm = expectedTerm;
}
void onDataNodeSize(int currentDataNodeSize) {
if (recoverAfterDataNodes != -1 && currentDataNodeSize < recoverAfterDataNodes) {
logger.debug(
"not recovering from gateway, nodes_size (data) [{}] < recover_after_data_nodes [{}]",
currentDataNodeSize,
recoverAfterDataNodes
);
cancelScheduledRecovery();
} else {
maybePerformOrScheduleRecovery(currentDataNodeSize);
}
}
void maybePerformOrScheduleRecovery(int currentDataNodeSize) {
if (expectedDataNodes != -1 && expectedDataNodes <= currentDataNodeSize) {
logger.debug(
"performing state recovery of term [{}], expected data nodes [{}] is reached",
expectedTerm,
expectedDataNodes
);
cancelScheduledRecovery();
runRecoveryImmediately();
} else if (recoverAfterTime == null) {
logger.debug("performing state recovery of term [{}], no delay time is configured", expectedTerm);
cancelScheduledRecovery();
runRecoveryImmediately();
} else {
if (scheduledRecovery == null) {
logger.info(
"delaying initial state recovery for [{}] of term [{}]. expecting [{}] data nodes, but only have [{}]",
recoverAfterTime,
expectedTerm,
expectedDataNodes,
currentDataNodeSize
);
scheduledRecovery = threadPool.schedule(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
logger.warn("delayed state recovery of term [" + expectedTerm + "] failed", e);
}
@Override
protected void doRun() {
final PendingStateRecovery existingPendingStateRecovery = currentPendingStateRecovery;
if (PendingStateRecovery.this == existingPendingStateRecovery) {
runRecoveryImmediately();
} else {
logger.debug(
"skip scheduled state recovery since a new one of term [{}] has started",
existingPendingStateRecovery.expectedTerm
);
}
}
}, recoverAfterTime, threadPool.generic());
} else {
logger.debug("state recovery is in already scheduled for term [{}]", expectedTerm);
}
}
}
void runRecoveryImmediately() {
if (taskSubmitted.compareAndSet(false, true)) {
submitUnbatchedTask(TASK_SOURCE, new RecoverStateUpdateTask(expectedTerm));
} else {
logger.debug("state recovery task is already submitted");
}
}
void cancelScheduledRecovery() {
if (scheduledRecovery != null) {
scheduledRecovery.cancel();
scheduledRecovery = null;
}
}
}
private static final String TASK_SOURCE = "local-gateway-elected-state";
class RecoverStateUpdateTask extends ClusterStateUpdateTask {
private final long expectedTerm;
RecoverStateUpdateTask(long expectedTerm) {
this.expectedTerm = expectedTerm;
}
@Override
public ClusterState execute(final ClusterState currentState) {
if (currentState.blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) {
logger.debug("cluster is already recovered");
return currentState;
}
if (expectedTerm != currentState.term()) {
logger.debug("skip state recovery since current term [{}] != expected term [{}]", currentState.term(), expectedTerm);
return currentState;
}
return ClusterStateUpdaters.removeStateNotRecoveredBlock(
ClusterStateUpdaters.updateRoutingTable(currentState, shardRoutingRoleStrategy)
);
}
@Override
public void clusterStateProcessed(final ClusterState oldState, final ClusterState newState) {
logger.info("recovered [{}] indices into cluster_state", newState.metadata().indices().size());
// reset flag even though state recovery completed, to ensure that if we subsequently become leader again based on a
// not-recovered state, that we again do another state recovery.
rerouteService.reroute("state recovered", Priority.NORMAL, ActionListener.noop());
}
@Override
public void onFailure(final Exception e) {
logger.log(
MasterService.isPublishFailureException(e) ? Level.DEBUG : Level.INFO,
() -> "unexpected failure during [" + TASK_SOURCE + "]",
e
);
}
}
// used for testing
TimeValue recoverAfterTime() {
return recoverAfterTime;
}
@SuppressForbidden(reason = "legacy usage of unbatched task") // TODO add support for batching here
private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String source, ClusterStateUpdateTask task) {
clusterService.submitUnbatchedStateUpdateTask(source, task);
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/gateway/GatewayService.java |
452 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.index.engine;
import org.apache.lucene.index.SegmentInfos;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.xcontent.ToXContentFragment;
import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Base64;
import java.util.Map;
import java.util.Objects;
/** a class the returns dynamic information with respect to the last commit point of this shard */
public final class CommitStats implements Writeable, ToXContentFragment {
private final Map<String, String> userData;
private final long generation;
private final String id; // lucene commit id in base 64;
private final int numDocs;
public CommitStats(SegmentInfos segmentInfos) {
// clone the map to protect against concurrent changes
userData = Map.copyOf(segmentInfos.getUserData());
// lucene calls the current generation, last generation.
generation = segmentInfos.getLastGeneration();
id = Base64.getEncoder().encodeToString(segmentInfos.getId());
numDocs = Lucene.getNumDocs(segmentInfos);
}
CommitStats(StreamInput in) throws IOException {
userData = in.readImmutableMap(StreamInput::readString);
generation = in.readLong();
id = in.readOptionalString();
numDocs = in.readInt();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CommitStats that = (CommitStats) o;
return userData.equals(that.userData) && generation == that.generation && Objects.equals(id, that.id) && numDocs == that.numDocs;
}
@Override
public int hashCode() {
return Objects.hash(userData, generation, id, numDocs);
}
public static CommitStats readOptionalCommitStatsFrom(StreamInput in) throws IOException {
return in.readOptionalWriteable(CommitStats::new);
}
public Map<String, String> getUserData() {
return userData;
}
public long getGeneration() {
return generation;
}
/** base64 version of the commit id (see {@link SegmentInfos#getId()} */
public String getId() {
return id;
}
/**
* Returns the number of documents in the in this commit
*/
public int getNumDocs() {
return numDocs;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeMap(userData, StreamOutput::writeString);
out.writeLong(generation);
out.writeOptionalString(id);
out.writeInt(numDocs);
}
static final class Fields {
static final String GENERATION = "generation";
static final String USER_DATA = "user_data";
static final String ID = "id";
static final String COMMIT = "commit";
static final String NUM_DOCS = "num_docs";
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.COMMIT);
builder.field(Fields.ID, id);
builder.field(Fields.GENERATION, generation);
builder.field(Fields.USER_DATA, userData);
builder.field(Fields.NUM_DOCS, numDocs);
builder.endObject();
return builder;
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/index/engine/CommitStats.java |
453 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.sharding;
/**
* Basic data structure for each tuple stored in data shards.
*/
public class Data {
private int key;
private String value;
private DataType type;
/**
* Constructor of Data class.
* @param key data key
* @param value data vlue
* @param type data type
*/
public Data(final int key, final String value, final DataType type) {
this.key = key;
this.value = value;
this.type = type;
}
public int getKey() {
return key;
}
public void setKey(final int key) {
this.key = key;
}
public String getValue() {
return value;
}
public void setValue(final String value) {
this.value = value;
}
public DataType getType() {
return type;
}
public void setType(DataType type) {
this.type = type;
}
enum DataType {
TYPE_1, TYPE_2, TYPE_3
}
@Override
public String toString() {
return "Data {" + "key="
+ key + ", value='" + value
+ '\'' + ", type=" + type + '}';
}
}
| smedals/java-design-patterns | sharding/src/main/java/com/iluwatar/sharding/Data.java |
454 | // Companion Code to the paper "Generative Trees: Adversarial and Copycat" by R. Nock and M.
// Guillame-Bert, in ICML'22
import java.io.*;
import java.util.*;
class Wrapper implements Debuggable {
public static String DATASET = "--dataset=",
DATASET_SPEC = "--dataset_spec=",
NUM_SAMPLES = "--num_samples=",
WORK_DIR = "--work_dir=",
OUTPUT_SAMPLES = "--output_samples=",
OUTPUT_STATS = "--output_stats=",
X = "--x=",
Y = "--y=",
FLAGS = "--flags",
HELP = "--help",
IMPUTE_MISSING = "--impute_missing=";
public static String[] ALL_FLAGS = {
"iterations",
"unknown_value_coding",
"force_integer_coding",
"number_bins_for_histograms",
"force_binary_coding",
"faster_induction",
"copycat_local_generation"
};
// all flag names recognized in command line in --flags = {"name" : value, ...}
// nodes_in_gt: integer = number of nodes in the GT learned
// unknown_value_coding: String = enforces an "unknown value" different from default
// force_integer_coding: boolean = if true, enforce integer coding of observation variables
// recognizable as integers ("cleaner" GT)
// number_bins_for_histograms = integer, number of bins to compute the GT histograms of marginals
// at the end
public static int ALL_FLAGS_INDEX_ITERATIONS = 0,
ALL_FLAGS_INDEX_UNKNOWN_VALUE_CODING = 1,
ALL_FLAGS_FORCE_INTEGER_CODING = 2,
ALL_FLAGS_NUMBER_BINS_FOR_HISTOGRAMS = 3,
ALL_FLAGS_FORCE_BINARY_CODING = 4,
ALL_FLAGS_FASTER_INDUCTION = 5,
ALL_FLAGS_COPYCAT_LOCAL_GENERATION = 6;
public static String[] DATASET_TOKENS = {
"\"name\":", "\"path\":", "\"label\":", "\"task\":"
}; // spec_name, spec_path, spec_label, spec_task
public static String PREFIX_GENERATOR = "generator_";
public String path_and_name_of_domain_dataset,
path_to_generated_samples,
working_directory,
blueprint_save_name,
spec_name,
prefix_domain,
spec_path,
spec_label,
spec_task,
x_name,
y_name,
output_stats_file,
output_stats_directory,
generator_filename,
densityplot_filename;
// spec_name = prefix name
public String[] flags_values;
int size_generated, number_iterations; // was nums
int index_x_name, index_y_name;
Algorithm myAlgos;
Domain myDomain;
boolean force_integer_coding = false,
force_binary_coding = true,
faster_induction = false,
impute_missing = false,
has_missing_values,
copycat_local_generation = true;
long loading_time,
gt_computation_time,
marginal_computation_time,
saving_generator_time,
saving_stats_time,
generate_examples_time,
saving_generated_sample_time,
saving_density_plot_generated_sample_time,
imputation_time;
Wrapper() {
flags_values = new String[ALL_FLAGS.length];
size_generated = number_iterations = -1;
index_x_name = index_y_name = -1;
densityplot_filename = null;
x_name = y_name = null;
path_and_name_of_domain_dataset = spec_path = null;
loading_time =
gt_computation_time =
marginal_computation_time =
saving_generator_time =
saving_stats_time =
generate_examples_time =
saving_generated_sample_time =
saving_density_plot_generated_sample_time = 0;
has_missing_values = false;
}
public static String help() {
String ret = "";
ret += "Example run\n";
ret += "Java Wrapper --dataset=${ANYDIR}/Datasets/iris/iris.csv\n";
ret +=
" '--dataset_spec={\"name\": \"iris\", \"path\":"
+ " \"${ANYDIR}/Datasets/iris/iris.csv\", \"label\": \"class\", \"task\":"
+ " \"BINARY_CLASSIFICATION\"}'\n";
ret += " --num_samples=10000 \n";
ret += " --work_dir=${ANYDIR}/Datasets/iris/working_dir \n";
ret +=
" "
+ " --output_samples=${ANYDIR}/Datasets/iris/output_samples/iris_gt_generated.csv\n";
ret +=
" --output_stats=${ANYDIR}/Datasets/iris/results/generated_examples.stats \n";
ret += " --x=Sepal.Length --y=Sepal.Width \n";
ret +=
" '--flags={\"iterations\" : \"10\", \"force_integer_coding\" : \"true\","
+ " \"force_binary_coding\" : \"true\", \"faster_induction\" : \"true\","
+ " \"unknown_value_coding\" : \"?\", \"number_bins_for_histograms\" : \"11\"}'\n";
ret += " --impute_missing=true\n\n";
ret += " --dataset: path to access the.csv data file containing variable names in first line\n";
ret += " --dataset_spec: self explanatory\n";
ret += " --num_samples: number of generated samples\n";
ret += " --work_dir: directory where the generator and density plots are saved\n";
ret += " --output_samples: generated samples filename\n";
ret +=
" --output_stats: file to store all data related to run (execution times, GT marginals"
+ " histograms, GT tree node stats, etc)\n";
ret +=
" --x --y: (optional) variables used to save a 2D density plot"
+ " (x,y,denxity_value_at_(x,y))\n";
ret += " --flags: flags...\n";
ret +=
" iterations (mandatory): integer; number of splits in the GT; final number of"
+ " nodes = 2 * iteration + 1\n";
ret +=
" force_integer_coding (optional): boolean; if true, recognizes integer variables"
+ " and codes them as such (otherwise, codes them as doubles) -- default: false\n";
ret +=
" force_binary_coding (optional): boolean; if true, recognizes 0/1/unknown"
+ " variables and codes them as nominal, otherwise treat them as integers or doubles --"
+ " default: true\n";
ret +=
" faster_induction (optional): boolean; if true, optimises training by sampling DT"
+ " splits if too many (i.e. more than "
+ Discriminator_Tree.MAX_SPLITS_BEFORE_RANDOMISATION
+ ") -- default: false\n";
ret +=
" unknown_value_coding (optional): String; representation of 'unknown' value in"
+ " dataset -- default: \"-1\"\n";
ret +=
" number_bins_for_histograms (optional): integer; number of bins for non-nominal"
+ " variables to store the learned GT marginals -- default: 19\n";
ret +=
" copycat_local_generation (optional): boolean; if true, when copycat induction"
+ " used, after a new split in GT, example generation only replaces the affected"
+ " feature for the locally generated examples -- default: true\n";
ret +=
" --impute_missing: if true, uses the generated tree to impute the missing values in the"
+ " training data\n";
return ret;
}
public static void main(String[] arg) {
int i;
Wrapper w = new Wrapper();
System.out.println("");
System.out.println(
"////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////");
System.out.println(
"// Companion code to ICML'22 paper \"Generative Trees: Adversarial and Copycat\", by"
+ " Richard Nock and Mathieu Guillame-Bert //");
System.out.println(
"// (copycat training of generative trees) "
+ " //");
if (arg.length == 0) {
System.out.println("// *No parameters*. Run 'java Wrapper --help' for more");
System.exit(0);
}
System.out.println(
"// Help & example run: 'java Wrapper --help' "
+ " //");
System.out.println(
"////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////");
for (i = 0; i < arg.length; i++) {
if (arg[i].equals(HELP)) {
System.out.println(help());
System.exit(0);
}
w.fit_vars(arg[i]);
}
w.summary();
w.simple_go();
System.out.println("\nBibTex:");
System.out.println("@inproceedings{ngbGT,");
System.out.println(" title={Generative Trees: Adversarial and Copycat},");
System.out.println(" author={R. Nock and M. Guillame-Bert},");
System.out.println(" booktitle={39$^{~th}$ International Conference on Machine Learning},");
System.out.println(" year={2022}");
System.out.println("}\n");
}
public void simple_go() {
Algorithm.INIT();
long b, e;
Vector<Example> v_gen;
System.out.print("Loading stuff start... ");
b = System.currentTimeMillis();
myDomain = new Domain(this);
myAlgos = new Algorithm(myDomain);
e = System.currentTimeMillis();
loading_time = e - b;
System.out.println("Loading stuff ok (time elapsed: " + loading_time + " ms).\n");
String[] parameters = {
"@MatuErr", "1.0", "COPYCAT", number_iterations + "", copycat_local_generation + ""
};
Vector<String> params = new Vector<>(Arrays.asList(parameters));
myAlgos.addAlgorithm(params);
System.out.print("Learning the generator... ");
b = System.currentTimeMillis();
Generator_Tree gt = myAlgos.simple_go();
e = System.currentTimeMillis();
gt_computation_time = e - b;
System.out.println(
"ok (time elapsed: " + gt_computation_time + " ms).\n\nGenerative tree learned: " + gt);
System.out.print("Computing GT marginals histograms... ");
b = System.currentTimeMillis();
gt.compute_generator_histograms();
e = System.currentTimeMillis();
marginal_computation_time = e - b;
System.out.println("ok (time elapsed: " + marginal_computation_time + " ms).");
System.out.print("Saving the GT... ");
b = System.currentTimeMillis();
Generator_Tree.SAVE_GENERATOR_TREE(gt, working_directory + "/" + generator_filename, "");
e = System.currentTimeMillis();
saving_generator_time = e - b;
System.out.println("ok (time elapsed: " + saving_generator_time + " ms).");
System.out.print("Generating " + size_generated + " examples using the GT... ");
b = System.currentTimeMillis();
v_gen = gt.generate_sample_with_density(size_generated);
e = System.currentTimeMillis();
generate_examples_time = e - b;
System.out.println("ok (time elapsed: " + generate_examples_time + " ms).");
if ((has_missing_values) && (impute_missing)) {
System.out.print("Imputing examples using the GT... ");
b = System.currentTimeMillis();
impute_and_save(gt);
e = System.currentTimeMillis();
imputation_time = e - b;
System.out.println("ok (time elapsed: " + imputation_time + " ms).");
}
System.out.print("Saving generated sample... ");
b = System.currentTimeMillis();
save_sample(v_gen);
e = System.currentTimeMillis();
saving_generated_sample_time = e - b;
System.out.println("ok (time elapsed: " + saving_generated_sample_time + " ms).");
if ((x_name != null) && (y_name != null)) {
System.out.print("Saving density plot sample... ");
b = System.currentTimeMillis();
save_density_plot_sample(v_gen, index_x_name, index_y_name);
e = System.currentTimeMillis();
saving_density_plot_generated_sample_time = e - b;
System.out.println(
"ok (time elapsed: " + saving_density_plot_generated_sample_time + " ms).");
}
System.out.print("Saving stats file... ");
b = System.currentTimeMillis();
save_stats(gt);
e = System.currentTimeMillis();
saving_stats_time = e - b;
System.out.println("ok (time elapsed: " + saving_stats_time + " ms).");
System.out.println("All finished. Stopping...");
myDomain.myMemoryMonitor.stop();
}
public void save_density_plot_sample(Vector<Example> v_gen, int x, int y) {
FileWriter f;
int i;
String nameFile = working_directory + "/" + densityplot_filename;
try {
f = new FileWriter(nameFile);
f.write("//" + x_name + "," + y_name + ",density_value\n");
for (i = 0; i < v_gen.size(); i++)
f.write(
((Example) v_gen.elementAt(i)).toStringSaveDensity(index_x_name, index_y_name) + "\n");
f.close();
} catch (IOException e) {
Dataset.perror("Experiments.class :: Saving results error in file " + nameFile);
}
}
public void save_sample(Vector<Example> v_gen) {
FileWriter f;
int i;
String nameFile = path_to_generated_samples + "/" + blueprint_save_name;
try {
f = new FileWriter(nameFile);
for (i = 0; i < myDomain.myDS.features_names_from_file.length; i++) {
f.write(myDomain.myDS.features_names_from_file[i]);
if (i < myDomain.myDS.features_names_from_file.length - 1)
f.write(Dataset.KEY_SEPARATION_STRING[Dataset.SEPARATION_INDEX]);
}
f.write("\n");
for (i = 0; i < v_gen.size(); i++)
f.write(((Example) v_gen.elementAt(i)).toStringSave() + "\n");
f.close();
} catch (IOException e) {
Dataset.perror("Experiments.class :: Saving results error in file " + nameFile);
}
}
public void impute_and_save(Generator_Tree gt) {
FileWriter f;
int i, j;
String nameFile = working_directory + "/" + spec_name + "_imputed.csv";
Example ee, ee_cop;
try {
f = new FileWriter(nameFile);
for (i = 0; i < myDomain.myDS.features_names_from_file.length; i++) {
f.write(myDomain.myDS.features_names_from_file[i]);
if (i < myDomain.myDS.features_names_from_file.length - 1)
f.write(Dataset.KEY_SEPARATION_STRING[Dataset.SEPARATION_INDEX]);
}
f.write("\n");
for (i = 0; i < myDomain.myDS.number_examples_total_from_file; i++) {
if (i % (myDomain.myDS.number_examples_total_from_file / 10) == 0)
System.out.print((i / (myDomain.myDS.number_examples_total_from_file / 10)) * 10 + "% ");
ee = (Example) myDomain.myDS.examples_from_file.elementAt(i);
ee_cop = Example.copyOf(ee);
if (ee.contains_unknown_values()) {
gt.impute_all_values_from_one_leaf(ee_cop);
}
f.write(ee_cop.toStringSave() + "\n");
}
f.close();
} catch (IOException e) {
Dataset.perror("Experiments.class :: Saving results error in file " + nameFile);
}
}
public void save_stats(Generator_Tree gt) {
int i;
FileWriter f = null;
int[] node_counts = new int[myDomain.myDS.features_names_from_file.length];
String output_stats_additional = output_stats_file + "_more.txt";
long time_gt_plus_generation = gt_computation_time + generate_examples_time;
long time_gt_plus_imputation = -1;
if ((has_missing_values) && (impute_missing))
time_gt_plus_imputation = gt_computation_time + imputation_time;
try {
f = new FileWriter(output_stats_file);
f.write("{\n");
f.write(
" \"running_time_seconds\": "
+ DF8.format(((double) gt_computation_time + generate_examples_time) / 1000)
+ ",\n");
f.write(" \"gt_number_nodes\": " + gt.number_nodes + ",\n");
f.write(" \"gt_depth\": " + gt.depth + ",\n");
f.write(
" \"running_time_gt_training_plus_exemple_generation\": "
+ DF8.format(((double) time_gt_plus_generation / 1000))
+ ",\n");
if ((has_missing_values) && (impute_missing))
f.write(
" \"running_time_gt_training_plus_imputation\": "
+ DF8.format(((double) time_gt_plus_imputation / 1000))
+ "\n");
f.write("}\n");
f.close();
} catch (IOException e) {
Dataset.perror("Wrapper.class :: Saving results error in file " + output_stats_file);
}
try {
f = new FileWriter(output_stats_additional);
f.write("// flag values used: ");
for (i = 0; i < flags_values.length; i++)
f.write(" [" + ALL_FLAGS[i] + ":" + flags_values[i] + "] ");
f.write("\n\n");
f.write("// Time to learn the generator: " + gt_computation_time + " ms.\n");
f.write("// Time to generate sample: " + generate_examples_time + " ms.\n");
// saves the gt histograms for comparison
f.write("\n// GT Histograms of marginals\n");
for (i = 0; i < gt.gt_histograms.size(); i++) {
f.write((gt.gt_histograms.elementAt(i)).toStringSave());
f.write("//\n");
}
gt.root.recursive_fill_node_counts(node_counts, myDomain.myDS.features_names_from_file);
f.write("\n// GT node counts per feature name\n");
for (i = 0; i < myDomain.myDS.features_names_from_file.length; i++) {
f.write("// " + myDomain.myDS.features_names_from_file[i] + " : " + node_counts[i] + "\n");
}
f.close();
} catch (IOException e) {
Dataset.perror("Wrapper.class :: Saving results error in file " + output_stats_additional);
}
}
public void summary() {
int i;
System.out.println("\nRunning copycat generator training with the following inputs:");
System.out.println(" * dataset path to train generator:" + path_and_name_of_domain_dataset);
System.out.println(" * working directory:" + working_directory);
System.out.println(
" * generated samples ("
+ size_generated
+ " examples) stored in directory "
+ path_to_generated_samples
+ " with filename "
+ blueprint_save_name);
System.out.println(
" * generator (gt) stored in working directory with filename " + generator_filename);
System.out.println(" * stats file at " + output_stats_file);
if (spec_path == null) Dataset.warning(" No path information in --dataset_spec");
else if (!path_and_name_of_domain_dataset.equals(spec_path))
Dataset.warning(" Non identical information in --dataset_spec path vs --dataset\n");
if (impute_missing)
System.out.println(
" * imputed sample saved at filename "
+ working_directory
+ "/"
+ spec_name
+ "_imputed.csv");
if ((x_name != null) && (y_name != null) && (!x_name.equals("")) && (!y_name.equals(""))) {
densityplot_filename =
blueprint_save_name.substring(0, blueprint_save_name.lastIndexOf('.'))
+ "_2DDensity_plot_X_"
+ x_name
+ "_Y_"
+ y_name
+ blueprint_save_name.substring(
blueprint_save_name.lastIndexOf('.'), blueprint_save_name.length());
System.out.println(
" * 2D density plot for ("
+ x_name
+ ","
+ y_name
+ ") stored in working directory with filename "
+ densityplot_filename);
}
System.out.print(" * flags (non null): ");
for (i = 0; i < flags_values.length; i++)
if (flags_values[i] != null)
System.out.print("[" + ALL_FLAGS[i] + ":" + flags_values[i] + "] ");
System.out.println("");
if (flags_values[ALL_FLAGS_INDEX_UNKNOWN_VALUE_CODING] != null)
Unknown_Feature_Value.S_UNKNOWN = flags_values[ALL_FLAGS_INDEX_UNKNOWN_VALUE_CODING];
if (flags_values[ALL_FLAGS_FORCE_INTEGER_CODING] != null)
force_integer_coding = Boolean.parseBoolean(flags_values[ALL_FLAGS_FORCE_INTEGER_CODING]);
if (flags_values[ALL_FLAGS_FORCE_BINARY_CODING] != null)
force_binary_coding = Boolean.parseBoolean(flags_values[ALL_FLAGS_FORCE_BINARY_CODING]);
if (flags_values[ALL_FLAGS_FASTER_INDUCTION] != null)
faster_induction = Boolean.parseBoolean(flags_values[ALL_FLAGS_FASTER_INDUCTION]);
if (flags_values[ALL_FLAGS_INDEX_ITERATIONS] != null)
number_iterations = Integer.parseInt(flags_values[ALL_FLAGS_INDEX_ITERATIONS]);
if (flags_values[ALL_FLAGS_COPYCAT_LOCAL_GENERATION] != null)
copycat_local_generation =
Boolean.parseBoolean(flags_values[ALL_FLAGS_COPYCAT_LOCAL_GENERATION]);
Boost.COPYCAT_GENERATE_WITH_WHOLE_GT = !copycat_local_generation;
if (flags_values[ALL_FLAGS_NUMBER_BINS_FOR_HISTOGRAMS] != null) {
Histogram.NUMBER_CONTINUOUS_FEATURE_BINS =
Integer.parseInt(flags_values[ALL_FLAGS_NUMBER_BINS_FOR_HISTOGRAMS]);
Histogram.MAX_NUMBER_INTEGER_FEATURE_BINS =
Integer.parseInt(flags_values[ALL_FLAGS_NUMBER_BINS_FOR_HISTOGRAMS]);
}
Dataset.NUMBER_GENERATED_EXAMPLES_DEFAULT = -1;
Discriminator_Tree.USE_OBSERVED_FEATURE_VALUES_FOR_SPLITS = true;
Discriminator_Tree.RANDOMISE_SPLIT_FINDING_WHEN_TOO_MANY_SPLITS = faster_induction;
System.out.println("");
}
public void fit_vars(String s) {
String dummys;
if (s.contains(DATASET)) {
path_and_name_of_domain_dataset = s.substring(DATASET.length(), s.length());
spec_name =
path_and_name_of_domain_dataset.substring(
path_and_name_of_domain_dataset.lastIndexOf('/') + 1,
path_and_name_of_domain_dataset.lastIndexOf('.'));
} else if (s.contains(DATASET_SPEC)) {
spec_path = spec_label = spec_task = null;
int i, begin_ind, end_ind;
String[] values = new String[4];
int[] index_tokens = {0, 0, 0, 0};
for (i = 0; i < DATASET_TOKENS.length; i++) {
if (s.indexOf(DATASET_TOKENS[i]) != s.lastIndexOf(DATASET_TOKENS[i]))
Dataset.perror(
"Wrapper.class :: more than one occurrence of "
+ DATASET_TOKENS[i]
+ " in string"
+ s);
if (s.indexOf(DATASET_TOKENS[i]) == -1)
Dataset.perror(
"Wrapper.class :: zero occurrence of " + DATASET_TOKENS[i] + " in string" + s);
else index_tokens[i] = s.indexOf(DATASET_TOKENS[i]);
}
for (i = 0; i < DATASET_TOKENS.length - 1; i++)
if (index_tokens[i] > index_tokens[i + 1])
Dataset.perror(
"Wrapper.class :: token "
+ DATASET_TOKENS[i]
+ " should appear before token "
+ DATASET_TOKENS[i + 1]
+ " in string"
+ s);
for (i = 0; i < DATASET_TOKENS.length; i++) {
begin_ind = index_tokens[i] + DATASET_TOKENS[i].length();
if (i == DATASET_TOKENS.length - 1) end_ind = s.length();
else end_ind = index_tokens[i + 1] - 1;
dummys = s.substring(begin_ind, end_ind);
values[i] = dummys.substring(dummys.indexOf('\"') + 1, dummys.lastIndexOf('\"'));
}
prefix_domain = spec_name;
spec_path = values[1];
spec_label = values[2];
spec_task = values[3];
} else if (s.contains(NUM_SAMPLES)) {
size_generated = Integer.parseInt(s.substring(NUM_SAMPLES.length(), s.length()));
} else if (s.contains(WORK_DIR)) {
working_directory = s.substring(WORK_DIR.length(), s.length());
} else if (s.contains(OUTPUT_SAMPLES)) {
dummys = s.substring(OUTPUT_SAMPLES.length(), s.length());
path_to_generated_samples = dummys.substring(0, dummys.lastIndexOf('/'));
blueprint_save_name = dummys.substring(dummys.lastIndexOf('/') + 1, dummys.length());
generator_filename = PREFIX_GENERATOR + blueprint_save_name;
} else if (s.contains(OUTPUT_STATS)) {
output_stats_file = s.substring(OUTPUT_STATS.length(), s.length());
output_stats_directory = s.substring(OUTPUT_STATS.length(), s.lastIndexOf('/'));
} else if (s.contains(X)) {
x_name = s.substring(X.length(), s.length());
} else if (s.contains(Y)) {
y_name = s.substring(Y.length(), s.length());
} else if (s.contains(FLAGS)) {
dummys = ((s.substring(FLAGS.length(), s.length())).replaceAll(" ", "")).replaceAll("=", "");
if (!dummys.substring(0, 1).equals("{"))
Dataset.perror("Wrapper.class :: FLAGS flags does not begin with '{'");
if (!dummys.substring(dummys.length() - 1, dummys.length()).equals("}"))
Dataset.perror("Wrapper.class :: FLAGS flags does not end with '}'");
dummys = (dummys.substring(1, dummys.length() - 1)).replace("\"", "");
int b = 0, e = -1, i;
String subs, tags, vals;
while (e < dummys.length()) {
b = e + 1;
do {
e++;
} while ((e < dummys.length()) && (!dummys.substring(e, e + 1).equals(",")));
subs = dummys.substring(b, e);
if (!subs.contains(":"))
Dataset.perror("Wrapper.class :: flags string " + subs + " not of the syntax tag:value");
tags = subs.substring(0, subs.lastIndexOf(':'));
vals = subs.substring(subs.lastIndexOf(':') + 1, subs.length());
i = 0;
do {
if (!ALL_FLAGS[i].equals(tags)) i++;
} while ((i < ALL_FLAGS.length) && (!ALL_FLAGS[i].equals(tags)));
if (i == ALL_FLAGS.length)
Dataset.perror("Wrapper.class :: flags string " + tags + " not in authorized tags");
flags_values[i] = vals;
}
} else if (s.contains(IMPUTE_MISSING)) {
impute_missing = Boolean.parseBoolean(s.substring(IMPUTE_MISSING.length(), s.length()));
}
if ((x_name != null) && (y_name != null) && (x_name.equals(y_name)))
Dataset.perror(
"Wrapper.class :: density plot requested on the same X and Y variable = " + x_name);
}
}
| google-research/google-research | generative_trees/src/Wrapper.java |
455 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.search;
import org.apache.lucene.document.InetAddressPoint;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.TransportVersions;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.network.InetAddresses;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.time.DateFormatter;
import org.elasticsearch.common.time.DateMathParser;
import org.elasticsearch.geometry.utils.Geohash;
import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper;
import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper.TimeSeriesIdBuilder;
import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils;
import java.io.IOException;
import java.math.BigInteger;
import java.net.InetAddress;
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
import java.text.NumberFormat;
import java.text.ParseException;
import java.time.ZoneId;
import java.util.Arrays;
import java.util.Base64;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.function.LongSupplier;
/** A formatter for values as returned by the fielddata/doc-values APIs. */
public interface DocValueFormat extends NamedWriteable {
long MASK_2_63 = 0x8000000000000000L;
BigInteger BIGINTEGER_2_64_MINUS_ONE = BigInteger.ONE.shiftLeft(64).subtract(BigInteger.ONE); // 2^64 -1
/** Format a long value. This is used by terms and histogram aggregations
* to format keys for fields that use longs as a doc value representation
* such as the {@code long} and {@code date} fields. */
default Object format(long value) {
throw new UnsupportedOperationException();
}
/** Format a double value. This is used by terms and stats aggregations
* to format keys for fields that use numbers as a doc value representation
* such as the {@code long}, {@code double} or {@code date} fields. */
default Object format(double value) {
throw new UnsupportedOperationException();
}
/** Format a binary value. This is used by terms aggregations to format
* keys for fields that use binary doc value representations such as the
* {@code keyword} and {@code ip} fields. */
default Object format(BytesRef value) {
throw new UnsupportedOperationException();
}
/** Parse a value that was formatted with {@link #format(long)} back to the
* original long value. */
default long parseLong(String value, boolean roundUp, LongSupplier now) {
throw new UnsupportedOperationException();
}
/** Parse a value that was formatted with {@link #format(double)} back to
* the original double value. */
default double parseDouble(String value, boolean roundUp, LongSupplier now) {
throw new UnsupportedOperationException();
}
/** Parse a value that was formatted with {@link #format(BytesRef)} back
* to the original BytesRef. */
default BytesRef parseBytesRef(Object value) {
throw new UnsupportedOperationException();
}
/**
* Formats a value of a sort field in a search response. This is used by {@link SearchSortValues}
* to avoid sending the internal representation of a value of a sort field in a search response.
* The default implementation formats {@link BytesRef} but leave other types as-is.
*/
default Object formatSortValue(Object value) {
if (value instanceof BytesRef) {
return format((BytesRef) value);
}
return value;
}
DocValueFormat RAW = RawDocValueFormat.INSTANCE;
/**
* Singleton, stateless formatter for "Raw" values, generally taken to mean keywords and other strings.
*/
class RawDocValueFormat implements DocValueFormat {
public static final DocValueFormat INSTANCE = new RawDocValueFormat();
private RawDocValueFormat() {}
@Override
public String getWriteableName() {
return "raw";
}
@Override
public void writeTo(StreamOutput out) {}
@Override
public Long format(long value) {
return value;
}
@Override
public Double format(double value) {
return value;
}
@Override
public String format(BytesRef value) {
try {
return value.utf8ToString();
} catch (Exception | AssertionError e) {
throw new IllegalArgumentException("Failed trying to format bytes as UTF8. Possibly caused by a mapping mismatch", e);
}
}
@Override
public long parseLong(String value, boolean roundUp, LongSupplier now) {
try {
// Prefer parsing as a long to avoid losing precision
return Long.parseLong(value);
} catch (NumberFormatException e) {
// retry as a double
}
double d = Double.parseDouble(value);
if (roundUp) {
d = Math.ceil(d);
} else {
d = Math.floor(d);
}
return Math.round(d);
}
@Override
public double parseDouble(String value, boolean roundUp, LongSupplier now) {
return Double.parseDouble(value);
}
@Override
public BytesRef parseBytesRef(Object value) {
return new BytesRef(value.toString());
}
@Override
public String toString() {
return "raw";
}
};
DocValueFormat BINARY = BinaryDocValueFormat.INSTANCE;
/**
* Singleton, stateless formatter, for representing bytes as base64 strings
*/
class BinaryDocValueFormat implements DocValueFormat {
public static final DocValueFormat INSTANCE = new BinaryDocValueFormat();
private BinaryDocValueFormat() {}
@Override
public String getWriteableName() {
return "binary";
}
@Override
public void writeTo(StreamOutput out) {}
@Override
public String format(BytesRef value) {
return Base64.getEncoder().encodeToString(Arrays.copyOfRange(value.bytes, value.offset, value.offset + value.length));
}
@Override
public BytesRef parseBytesRef(Object value) {
return new BytesRef(Base64.getDecoder().decode(value.toString()));
}
};
static DocValueFormat withNanosecondResolution(final DocValueFormat format) {
if (format instanceof DateTime dateTime) {
return new DateTime(dateTime.formatter, dateTime.timeZone, DateFieldMapper.Resolution.NANOSECONDS, dateTime.formatSortValues);
} else {
throw new IllegalArgumentException("trying to convert a known date time formatter to a nanosecond one, wrong field used?");
}
}
static DocValueFormat enableFormatSortValues(DocValueFormat format) {
if (format instanceof DateTime dateTime) {
return new DateTime(dateTime.formatter, dateTime.timeZone, dateTime.resolution, true);
}
throw new IllegalArgumentException("require a date_time formatter; got [" + format.getWriteableName() + "]");
}
final class DateTime implements DocValueFormat {
public static final String NAME = "date_time";
final DateFormatter formatter;
final ZoneId timeZone;
private final DateMathParser parser;
final DateFieldMapper.Resolution resolution;
final boolean formatSortValues;
public DateTime(DateFormatter formatter, ZoneId timeZone, DateFieldMapper.Resolution resolution) {
this(formatter, timeZone, resolution, false);
}
private DateTime(DateFormatter formatter, ZoneId timeZone, DateFieldMapper.Resolution resolution, boolean formatSortValues) {
this.timeZone = Objects.requireNonNull(timeZone);
this.formatter = formatter.withZone(timeZone);
this.parser = this.formatter.toDateMathParser();
this.resolution = resolution;
this.formatSortValues = formatSortValues;
}
public DateTime(StreamInput in) throws IOException {
String formatterPattern = in.readString();
String zoneId = in.readString();
this.timeZone = ZoneId.of(zoneId);
this.formatter = DateFormatter.forPattern(formatterPattern).withZone(this.timeZone);
this.parser = formatter.toDateMathParser();
this.resolution = DateFieldMapper.Resolution.ofOrdinal(in.readVInt());
if (in.getTransportVersion().between(TransportVersions.V_7_7_0, TransportVersions.V_8_0_0)) {
/* when deserialising from 7.7+ nodes expect a flag indicating if a pattern is of joda style
This is only used to support joda style indices in 7.x, in 8 we no longer support this.
All indices in 8 should use java style pattern. Hence we can ignore this flag.
*/
in.readBoolean();
}
this.formatSortValues = in.readBoolean();
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(formatter.pattern());
out.writeString(timeZone.getId());
out.writeVInt(resolution.ordinal());
if (out.getTransportVersion().between(TransportVersions.V_7_7_0, TransportVersions.V_8_0_0)) {
/* when serializing to 7.7+ send out a flag indicating if a pattern is of joda style
This is only used to support joda style indices in 7.x, in 8 we no longer support this.
All indices in 8 should use java style pattern. Hence this flag is always false.
*/
out.writeBoolean(false);
}
out.writeBoolean(formatSortValues);
}
public DateMathParser getDateMathParser() {
return parser;
}
@Override
public String format(long value) {
return formatter.format(resolution.toInstant(value).atZone(timeZone));
}
@Override
public String format(double value) {
return format((long) value);
}
@Override
public Object formatSortValue(Object value) {
if (formatSortValues) {
if (value instanceof Long) {
return format((Long) value);
}
}
return value;
}
@Override
public long parseLong(String value, boolean roundUp, LongSupplier now) {
return resolution.convert(parser.parse(value, now, roundUp, timeZone));
}
@Override
public double parseDouble(String value, boolean roundUp, LongSupplier now) {
return parseLong(value, roundUp, now);
}
@Override
public String toString() {
return "DocValueFormat.DateTime(" + formatter + ", " + timeZone + ", " + resolution + ")";
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DateTime that = (DateTime) o;
return formatter.equals(that.formatter)
&& timeZone.equals(that.timeZone)
&& resolution == that.resolution
&& formatSortValues == that.formatSortValues;
}
@Override
public int hashCode() {
return Objects.hash(formatter, timeZone, resolution, formatSortValues);
}
}
DocValueFormat GEOHASH = GeoHashDocValueFormat.INSTANCE;
/**
* Singleton, stateless formatter for geo hash values
*/
class GeoHashDocValueFormat implements DocValueFormat {
public static final DocValueFormat INSTANCE = new GeoHashDocValueFormat();
private GeoHashDocValueFormat() {}
@Override
public String getWriteableName() {
return "geo_hash";
}
@Override
public void writeTo(StreamOutput out) {}
@Override
public String format(long value) {
return Geohash.stringEncode(value);
}
@Override
public String format(double value) {
return format((long) value);
}
};
DocValueFormat GEOTILE = GeoTileDocValueFormat.INSTANCE;
class GeoTileDocValueFormat implements DocValueFormat {
public static final DocValueFormat INSTANCE = new GeoTileDocValueFormat();
private GeoTileDocValueFormat() {}
@Override
public String getWriteableName() {
return "geo_tile";
}
@Override
public void writeTo(StreamOutput out) {}
@Override
public String format(long value) {
return GeoTileUtils.stringEncode(value);
}
@Override
public String format(double value) {
return format((long) value);
}
@Override
public long parseLong(String value, boolean roundUp, LongSupplier now) {
return GeoTileUtils.longEncode(value);
}
};
DocValueFormat BOOLEAN = BooleanDocValueFormat.INSTANCE;
/**
* Stateless, Singleton formatter for boolean values. Parses the strings "true" and "false" as inputs.
*/
class BooleanDocValueFormat implements DocValueFormat {
public static final DocValueFormat INSTANCE = new BooleanDocValueFormat();
private BooleanDocValueFormat() {}
@Override
public String getWriteableName() {
return "bool";
}
@Override
public void writeTo(StreamOutput out) {}
@Override
public Boolean format(long value) {
return value != 0;
}
@Override
public Boolean format(double value) {
return value != 0;
}
@Override
public long parseLong(String value, boolean roundUp, LongSupplier now) {
switch (value) {
case "false":
return 0;
case "true":
return 1;
}
throw new IllegalArgumentException("Cannot parse boolean [" + value + "], expected either [true] or [false]");
}
@Override
public double parseDouble(String value, boolean roundUp, LongSupplier now) {
return parseLong(value, roundUp, now);
}
};
IpDocValueFormat IP = IpDocValueFormat.INSTANCE;
/**
* Stateless, singleton formatter for IP address data
*/
class IpDocValueFormat implements DocValueFormat {
public static final IpDocValueFormat INSTANCE = new IpDocValueFormat();
private IpDocValueFormat() {}
@Override
public String getWriteableName() {
return "ip";
}
@Override
public void writeTo(StreamOutput out) {}
@Override
public String format(BytesRef value) {
try {
byte[] bytes = Arrays.copyOfRange(value.bytes, value.offset, value.offset + value.length);
InetAddress inet = InetAddressPoint.decode(bytes);
return NetworkAddress.format(inet);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(
"Failed trying to format bytes as IP address. Possibly caused by a mapping mismatch",
e
);
}
}
@Override
public BytesRef parseBytesRef(Object value) {
return new BytesRef(InetAddressPoint.encode(InetAddresses.forString(value.toString())));
}
@Override
public String toString() {
return "ip";
}
};
final class Decimal implements DocValueFormat {
public static final String NAME = "decimal";
private static final DecimalFormatSymbols SYMBOLS = new DecimalFormatSymbols(Locale.ROOT);
final String pattern;
private final NumberFormat format;
public Decimal(String pattern) {
this.pattern = pattern;
this.format = new DecimalFormat(pattern, SYMBOLS);
}
public Decimal(StreamInput in) throws IOException {
this(in.readString());
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(pattern);
}
@Override
public String format(long value) {
return format.format(value);
}
@Override
public String format(double value) {
/*
* Explicitly check for NaN, since it formats to "�" or "NaN" depending on JDK version.
*
* Decimal formatter uses the JRE's default symbol list (via Locale.ROOT above). In JDK8,
* this translates into using {@link sun.util.locale.provider.JRELocaleProviderAdapter}, which loads
* {@link sun.text.resources.FormatData} for symbols. There, `NaN` is defined as `\ufffd` (�)
*
* In JDK9+, {@link sun.util.cldr.CLDRLocaleProviderAdapter} is used instead, which loads
* {@link sun.text.resources.cldr.FormatData}. There, `NaN` is defined as `"NaN"`
*
* Since the character � isn't very useful, and makes the output change depending on JDK version,
* we manually check to see if the value is NaN and return the string directly.
*/
if (Double.isNaN(value)) {
return String.valueOf(Double.NaN);
}
return format.format(value);
}
@Override
public long parseLong(String value, boolean roundUp, LongSupplier now) {
Number n;
try {
n = format.parse(value);
} catch (ParseException e) {
throw new RuntimeException("Cannot parse the value [" + value + "] using the pattern [" + pattern + "]", e);
}
if (format.isParseIntegerOnly()) {
return n.longValue();
} else {
double d = n.doubleValue();
if (roundUp) {
d = Math.ceil(d);
} else {
d = Math.floor(d);
}
return Math.round(d);
}
}
@Override
public double parseDouble(String value, boolean roundUp, LongSupplier now) {
Number n;
try {
n = format.parse(value);
} catch (ParseException e) {
throw new RuntimeException("Cannot parse the value [" + value + "] using the pattern [" + pattern + "]", e);
}
return n.doubleValue();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Decimal that = (Decimal) o;
return Objects.equals(pattern, that.pattern);
}
@Override
public int hashCode() {
return Objects.hash(pattern);
}
@Override
public String toString() {
return pattern;
}
};
DocValueFormat UNSIGNED_LONG_SHIFTED = UnsignedLongShiftedDocValueFormat.INSTANCE;
/**
* DocValues format for unsigned 64 bit long values,
* that are stored as shifted signed 64 bit long values.
*/
class UnsignedLongShiftedDocValueFormat implements DocValueFormat {
public static final DocValueFormat INSTANCE = new UnsignedLongShiftedDocValueFormat();
private UnsignedLongShiftedDocValueFormat() {}
@Override
public String getWriteableName() {
return "unsigned_long_shifted";
}
@Override
public void writeTo(StreamOutput out) {}
@Override
public String toString() {
return "unsigned_long_shifted";
}
/**
* Formats the unsigned long to the shifted long format
*/
@Override
public long parseLong(String value, boolean roundUp, LongSupplier now) {
long parsedValue = Long.parseUnsignedLong(value);
// subtract 2^63 or 10000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
// equivalent to flipping the first bit
return parsedValue ^ MASK_2_63;
}
/**
* Formats a raw docValue that is stored in the shifted long format to the unsigned long representation.
*/
@Override
public Object format(long value) {
// add 2^63 or 10000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000,
// equivalent to flipping the first bit
long formattedValue = value ^ MASK_2_63;
if (formattedValue >= 0) {
return formattedValue;
} else {
return BigInteger.valueOf(formattedValue).and(BIGINTEGER_2_64_MINUS_ONE);
}
}
@Override
public Object formatSortValue(Object value) {
if (value instanceof Long) {
return format((Long) value);
}
return value;
}
/**
* Double docValues of the unsigned_long field type are already in the formatted representation,
* so we don't need to do anything here
*/
@Override
public Double format(double value) {
return value;
}
@Override
public double parseDouble(String value, boolean roundUp, LongSupplier now) {
return Double.parseDouble(value);
}
};
DocValueFormat TIME_SERIES_ID = new TimeSeriesIdDocValueFormat();
/**
* DocValues format for time series id.
*/
class TimeSeriesIdDocValueFormat implements DocValueFormat {
private static final Base64.Decoder BASE64_DECODER = Base64.getUrlDecoder();
private TimeSeriesIdDocValueFormat() {}
@Override
public String getWriteableName() {
return "tsid";
}
@Override
public void writeTo(StreamOutput out) {}
@Override
public String toString() {
return "tsid";
}
/**
* @param value The TSID as a {@link BytesRef}
* @return the Base 64 encoded TSID
*/
@Override
public Object format(BytesRef value) {
try {
// NOTE: if the tsid is a map of dimension key/value pairs (as it was before introducing
// tsid hashing) we just decode the map and return it.
return TimeSeriesIdFieldMapper.decodeTsidAsMap(value);
} catch (Exception e) {
// NOTE: otherwise the _tsid field is just a hash and we can't decode it
return TimeSeriesIdFieldMapper.encodeTsid(value);
}
}
@Override
public BytesRef parseBytesRef(Object value) {
if (value instanceof BytesRef valueAsBytesRef) {
return valueAsBytesRef;
}
if (value instanceof String valueAsString) {
return new BytesRef(BASE64_DECODER.decode(valueAsString));
}
return parseBytesRefMap(value);
}
/**
* After introducing tsid hashing this tsid parsing logic is deprecated.
* Tsid hashing does not allow us to parse the tsid extracting dimension fields key/values pairs.
* @param value The Map encoding tsid dimension fields key/value pairs.
*
* @return a {@link BytesRef} representing a map of key/value pairs
*/
private BytesRef parseBytesRefMap(Object value) {
if (value instanceof Map<?, ?> == false) {
throw new IllegalArgumentException("Cannot parse tsid object [" + value + "]");
}
Map<?, ?> m = (Map<?, ?>) value;
TimeSeriesIdBuilder builder = new TimeSeriesIdBuilder(null);
for (Map.Entry<?, ?> entry : m.entrySet()) {
String f = entry.getKey().toString();
Object v = entry.getValue();
if (v instanceof String s) {
builder.addString(f, s);
} else if (v instanceof Long l) {
builder.addLong(f, l);
} else if (v instanceof Integer i) {
builder.addLong(f, i.longValue());
} else if (v instanceof BigInteger ul) {
long ll = UNSIGNED_LONG_SHIFTED.parseLong(ul.toString(), false, () -> 0L);
builder.addUnsignedLong(f, ll);
} else {
throw new IllegalArgumentException("Unexpected value in tsid object [" + v + "]");
}
}
try {
// NOTE: we can decode the tsid only if it is not hashed (represented as a map)
return builder.buildLegacyTsid().toBytesRef();
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
};
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/search/DocValueFormat.java |
456 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.search.sort;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Scorable;
import org.elasticsearch.common.lucene.ScorerAware;
import org.elasticsearch.common.util.BigArray;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.BitArray;
import org.elasticsearch.common.util.DoubleArray;
import org.elasticsearch.common.util.FloatArray;
import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.core.Releasable;
import org.elasticsearch.core.Releasables;
import org.elasticsearch.search.DocValueFormat;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import static java.util.Collections.emptyList;
/**
* Type specialized sort implementations designed for use in aggregations.
* Aggregations have a couple of super interesting characteristics:
* <ul>
* <li>They can have many, many buckets so this implementation backs to
* {@link BigArrays} so it doesn't need to allocate any objects per bucket
* and the circuit breaker in {@linkplain BigArrays} will automatically
* track memory usage and abort execution if it grows too large.</li>
* <li>Its fairly common for a bucket to be collected but not returned so
* these implementations delay as much work as possible until collection</li>
* </ul>
* <p>
* Every bucket is in one of two states: "gathering" or min/max "heap". While
* "gathering" the next empty slot is stored in the "root" offset of the
* bucket and collecting a value is just adding it in the next slot bumping
* the tracking value at the root. So collecting values is {@code O(1)}.
* Extracting the results in sorted order is {@code O(n * log n)} because,
* well, sorting is {@code O(n * log n)}. When a bucket has collected
* {@link #bucketSize} entries it is converted into a min "heap" in
* {@code O(n)} time. Or into max heap, if {@link #order} is ascending.
* </p>
* <p>
* Once a "heap", collecting a document is the heap-standard {@code O(log n)}
* worst case. Critically, it is a very fast {@code O(1)} to check if a value
* is competitive at all which, so long as buckets aren't hit in reverse
* order, they mostly won't be. Extracting results in sorted order is still
* {@code O(n * log n)}.
* </p>
* <p>
* When we first collect a bucket we make sure that we've allocated enough
* slots to hold all sort values for the entire bucket. In other words: the
* storage is "dense" and we don't try to save space when storing partially
* filled buckets.
* </p>
* <p>
* We actually *oversize* the allocations
* (like {@link BigArrays#overSize(long)}) to get amortized linear number
* of allocations and to play well with our paged arrays.
* </p>
*/
public abstract class BucketedSort implements Releasable {
/**
* Callbacks for storing extra data along with competitive sorts.
*/
public interface ExtraData {
/**
* Swap the position of two bits of extra data.
* <p>
* Both parameters will have previously been loaded by
* {@link Loader#loadFromDoc(long, int)} so the implementer shouldn't
* need to grow the underlying storage to implement this.
* </p>
*/
void swap(long lhs, long rhs);
/**
* Prepare to load extra data from a leaf.
*/
Loader loader(LeafReaderContext ctx) throws IOException;
@FunctionalInterface
interface Loader {
/**
* Load extra data from a doc.
* <p>
* Implementers <strong>should</strong> grow their underlying
* storage to fit the {@code index}.
* </p>
*/
void loadFromDoc(long index, int doc) throws IOException;
}
}
/**
* An implementation of {@linkplain ExtraData} that does nothing.
*/
public static final ExtraData NOOP_EXTRA_DATA = new ExtraData() {
@Override
public void swap(long lhs, long rhs) {}
@Override
public Loader loader(LeafReaderContext ctx) {
return (index, doc) -> {};
}
};
protected final BigArrays bigArrays;
private final SortOrder order;
private final DocValueFormat format;
private final int bucketSize;
protected final ExtraData extra;
/**
* {@code true} if the bucket is in heap mode, {@code false} if
* it is still gathering.
*/
private final BitArray heapMode;
protected BucketedSort(BigArrays bigArrays, SortOrder order, DocValueFormat format, int bucketSize, ExtraData extra) {
this.bigArrays = bigArrays;
this.order = order;
this.format = format;
this.bucketSize = bucketSize;
this.extra = extra;
heapMode = new BitArray(1, bigArrays);
}
/**
* The order of the sort.
*/
public final SortOrder getOrder() {
return order;
}
/**
* The format to use when presenting the values.
*/
public final DocValueFormat getFormat() {
return format;
}
/**
* The number of values to store per bucket.
*/
public int getBucketSize() {
return bucketSize;
}
/**
* Used with {@link BucketedSort#getValues(long, ResultBuilder)} to
* build results from the sorting operation.
*/
@FunctionalInterface
public interface ResultBuilder<T> {
T build(long index, SortValue sortValue) throws IOException;
}
/**
* Get the values for a bucket if it has been collected. If it hasn't
* then returns an empty list.
* @param builder builds results. See {@link ExtraData} for how to store
* data along side the sort for this to extract.
*/
public final <T extends Comparable<T>> List<T> getValues(long bucket, ResultBuilder<T> builder) throws IOException {
long rootIndex = bucket * bucketSize;
if (rootIndex >= values().size()) {
// We've never seen this bucket.
return emptyList();
}
long start = inHeapMode(bucket) ? rootIndex : (rootIndex + getNextGatherOffset(rootIndex) + 1);
long end = rootIndex + bucketSize;
List<T> result = new ArrayList<>(bucketSize);
for (long index = start; index < end; index++) {
result.add(builder.build(index, getValue(index)));
}
// TODO we usually have a heap here so we could use that to build the results sorted.
result.sort(order.wrap(Comparator.<T>naturalOrder()));
return result;
}
/**
* Get the values for a bucket if it has been collected. If it hasn't
* then returns an empty array.
*/
public final List<SortValue> getValues(long bucket) throws IOException {
return getValues(bucket, (i, sv) -> sv);
}
/**
* Is this bucket a min heap {@code true} or in gathering mode {@code false}?
*/
public boolean inHeapMode(long bucket) {
return heapMode.get(bucket);
}
/**
* Get the {@linkplain Leaf} implementation that'll do that actual collecting.
* @throws IOException most implementations need to perform IO to prepare for each leaf
*/
public abstract Leaf forLeaf(LeafReaderContext ctx) throws IOException;
/**
* Does this sort need scores? Most don't, but sorting on {@code _score} does.
*/
public abstract boolean needsScores();
/**
* The {@linkplain BigArray} backing this sort.
*/
protected abstract BigArray values();
/**
* Grow the {@linkplain BigArray} backing this sort to account for new buckets.
* This will only be called if the array is too small.
*/
protected abstract void growValues(long minSize);
/**
* Get the next index that should be "gathered" for a bucket rooted
* at {@code rootIndex}.
*/
protected abstract int getNextGatherOffset(long rootIndex);
/**
* Set the next index that should be "gathered" for a bucket rooted
* at {@code rootIndex}.
*/
protected abstract void setNextGatherOffset(long rootIndex, int offset);
/**
* Get the value at an index.
*/
protected abstract SortValue getValue(long index);
/**
* {@code true} if the entry at index {@code lhs} is "better" than
* the entry at {@code rhs}. "Better" in this means "lower" for
* {@link SortOrder#ASC} and "higher" for {@link SortOrder#DESC}.
*/
protected abstract boolean betterThan(long lhs, long rhs);
/**
* Swap the data at two indices.
*/
protected abstract void swap(long lhs, long rhs);
/**
* Initialize the gather offsets after setting up values. Subclasses
* should call this once, after setting up their {@link #values()}.
*/
protected final void initGatherOffsets() {
setNextGatherOffsets(0);
}
/**
* Allocate storage for more buckets and store the "next gather offset"
* for those new buckets.
*/
private void grow(long minSize) {
long oldMax = values().size() - 1;
growValues(minSize);
// Set the next gather offsets for all newly allocated buckets.
setNextGatherOffsets(oldMax - (oldMax % getBucketSize()) + getBucketSize());
}
/**
* Maintain the "next gather offsets" for newly allocated buckets.
*/
private void setNextGatherOffsets(long startingAt) {
int nextOffset = getBucketSize() - 1;
for (long bucketRoot = startingAt; bucketRoot < values().size(); bucketRoot += getBucketSize()) {
setNextGatherOffset(bucketRoot, nextOffset);
}
}
/**
* Heapify a bucket who's entries are in random order.
* <p>
* This works by validating the heap property on each node, iterating
* "upwards", pushing any out of order parents "down". Check out the
* <a href="https://en.wikipedia.org/w/index.php?title=Binary_heap&oldid=940542991#Building_a_heap">wikipedia</a>
* entry on binary heaps for more about this.
* </p>
* <p>
* While this *looks* like it could easily be {@code O(n * log n)}, it is
* a fairly well studied algorithm attributed to Floyd. There's
* been a bunch of work that puts this at {@code O(n)}, close to 1.88n worst
* case.
* </p>
* <ul>
* <li>Hayward, Ryan; McDiarmid, Colin (1991).
* <a href="https://web.archive.org/web/20160205023201/http://www.stats.ox.ac.uk/__data/assets/pdf_file/0015/4173/heapbuildjalg.pdf">
* Average Case Analysis of Heap Building byRepeated Insertion</a> J. Algorithms.
* <li>D.E. Knuth, ”The Art of Computer Programming, Vol. 3, Sorting and Searching”</li>
* </ul>
* @param rootIndex the index the start of the bucket
*/
private void heapify(long rootIndex) {
int maxParent = bucketSize / 2 - 1;
for (int parent = maxParent; parent >= 0; parent--) {
downHeap(rootIndex, parent);
}
}
/**
* Correct the heap invariant of a parent and its children. This
* runs in {@code O(log n)} time.
* @param rootIndex index of the start of the bucket
* @param parent Index within the bucket of the parent to check.
* For example, 0 is the "root".
*/
private void downHeap(long rootIndex, int parent) {
while (true) {
long parentIndex = rootIndex + parent;
int worst = parent;
long worstIndex = parentIndex;
int leftChild = parent * 2 + 1;
long leftIndex = rootIndex + leftChild;
if (leftChild < bucketSize) {
if (betterThan(worstIndex, leftIndex)) {
worst = leftChild;
worstIndex = leftIndex;
}
int rightChild = leftChild + 1;
long rightIndex = rootIndex + rightChild;
if (rightChild < bucketSize && betterThan(worstIndex, rightIndex)) {
worst = rightChild;
worstIndex = rightIndex;
}
}
if (worst == parent) {
break;
}
swap(worstIndex, parentIndex);
extra.swap(worstIndex, parentIndex);
parent = worst;
}
}
@Override
public final void close() {
Releasables.close(values(), heapMode);
}
/**
* Performs the actual collection against a {@linkplain LeafReaderContext}.
*/
public abstract class Leaf implements ScorerAware {
private final LeafReaderContext ctx;
private ExtraData.Loader loader = null;
protected Leaf(LeafReaderContext ctx) {
this.ctx = ctx;
}
/**
* Collect this doc, returning {@code true} if it is competitive.
*/
public final void collect(int doc, long bucket) throws IOException {
if (false == advanceExact(doc)) {
return;
}
long rootIndex = bucket * bucketSize;
if (inHeapMode(bucket)) {
if (docBetterThan(rootIndex)) {
// TODO a "bottom up" insert would save a couple of comparisons. Worth it?
setIndexToDocValue(rootIndex);
loader().loadFromDoc(rootIndex, doc);
downHeap(rootIndex, 0);
}
return;
}
// Gathering mode
long requiredSize = rootIndex + bucketSize;
if (values().size() < requiredSize) {
grow(requiredSize);
}
int next = getNextGatherOffset(rootIndex);
assert 0 <= next && next < bucketSize
: "Expected next to be in the range of valid buckets [0 <= " + next + " < " + bucketSize + "]";
long index = next + rootIndex;
setIndexToDocValue(index);
loader().loadFromDoc(index, doc);
if (next == 0) {
heapMode.set(bucket);
heapify(rootIndex);
} else {
setNextGatherOffset(rootIndex, next - 1);
}
}
/**
* Read the sort value from {@code doc} and return {@code true}
* if there is a value for that document. Otherwise return
* {@code false} and the sort will skip that document.
*/
protected abstract boolean advanceExact(int doc) throws IOException;
/**
* Set the value at the index to the value of the document to which
* we just advanced.
*/
protected abstract void setIndexToDocValue(long index);
/**
* {@code true} if the sort value for the doc is "better" than the
* entry at {@code index}. "Better" in means is "lower" for
* {@link SortOrder#ASC} and "higher" for {@link SortOrder#DESC}.
*/
protected abstract boolean docBetterThan(long index);
/**
* Get the extra data loader, building it if we haven't yet built one for this leaf.
*/
private ExtraData.Loader loader() throws IOException {
if (loader == null) {
loader = extra.loader(ctx);
}
return loader;
}
}
/**
* Superclass for implementations of {@linkplain BucketedSort} for {@code double} keys.
*/
public abstract static class ForDoubles extends BucketedSort {
private DoubleArray values;
@SuppressWarnings("this-escape")
public ForDoubles(BigArrays bigArrays, SortOrder sortOrder, DocValueFormat format, int bucketSize, ExtraData extra) {
super(bigArrays, sortOrder, format, bucketSize, extra);
boolean success = false;
try {
values = bigArrays.newDoubleArray(getBucketSize(), false);
success = true;
} finally {
if (success == false) {
close();
}
}
initGatherOffsets();
}
@Override
public boolean needsScores() {
return false;
}
@Override
protected final BigArray values() {
return values;
}
@Override
protected final void growValues(long minSize) {
values = bigArrays.grow(values, minSize);
}
@Override
protected final int getNextGatherOffset(long rootIndex) {
// This cast is safe because all ints fit accurately into a double.
return (int) values.get(rootIndex);
}
@Override
protected final void setNextGatherOffset(long rootIndex, int offset) {
values.set(rootIndex, offset);
}
@Override
protected final SortValue getValue(long index) {
return SortValue.from(values.get(index));
}
@Override
protected final boolean betterThan(long lhs, long rhs) {
return getOrder().reverseMul() * Double.compare(values.get(lhs), values.get(rhs)) < 0;
}
@Override
protected final void swap(long lhs, long rhs) {
double tmp = values.get(lhs);
values.set(lhs, values.get(rhs));
values.set(rhs, tmp);
}
protected abstract class Leaf extends BucketedSort.Leaf {
protected Leaf(LeafReaderContext ctx) {
super(ctx);
}
/**
* Return the value for of this sort for the document to which
* we just {@link #advanceExact(int) moved}. This should be fast
* because it is called twice per competitive hit when in heap
* mode, once for {@link #docBetterThan(long)} and once
* for {@link #setIndexToDocValue(long)}.
*/
protected abstract double docValue();
@Override
public final void setScorer(Scorable scorer) {}
@Override
protected final void setIndexToDocValue(long index) {
values.set(index, docValue());
}
@Override
protected final boolean docBetterThan(long index) {
return getOrder().reverseMul() * Double.compare(docValue(), values.get(index)) < 0;
}
}
}
/**
* Superclass for implementations of {@linkplain BucketedSort} for {@code float} keys.
*/
public abstract static class ForFloats extends BucketedSort {
/**
* The maximum size of buckets this can store. This is because we
* store the next offset to write to in a float and floats only have
* {@code 23} bits of mantissa so they can't accurate store values
* higher than {@code 2 ^ 24}.
*/
public static final int MAX_BUCKET_SIZE = (int) Math.pow(2, 24);
private FloatArray values;
@SuppressWarnings("this-escape")
public ForFloats(BigArrays bigArrays, SortOrder sortOrder, DocValueFormat format, int bucketSize, ExtraData extra) {
super(bigArrays, sortOrder, format, bucketSize, extra);
if (bucketSize > MAX_BUCKET_SIZE) {
close();
throw new IllegalArgumentException("bucket size must be less than [2^24] but was [" + bucketSize + "]");
}
boolean success = false;
try {
values = bigArrays.newFloatArray(1, false);
success = true;
} finally {
if (success == false) {
close();
}
}
initGatherOffsets();
}
@Override
protected final BigArray values() {
return values;
}
@Override
protected final void growValues(long minSize) {
values = bigArrays.grow(values, minSize);
}
@Override
protected final int getNextGatherOffset(long rootIndex) {
/*
* This cast will not lose precision because we make sure never
* to write values here that float can't store precisely.
*/
return (int) values.get(rootIndex);
}
@Override
protected final void setNextGatherOffset(long rootIndex, int offset) {
values.set(rootIndex, offset);
}
@Override
protected final SortValue getValue(long index) {
return SortValue.from(values.get(index));
}
@Override
protected final boolean betterThan(long lhs, long rhs) {
return getOrder().reverseMul() * Float.compare(values.get(lhs), values.get(rhs)) < 0;
}
@Override
protected final void swap(long lhs, long rhs) {
float tmp = values.get(lhs);
values.set(lhs, values.get(rhs));
values.set(rhs, tmp);
}
protected abstract class Leaf extends BucketedSort.Leaf {
protected Leaf(LeafReaderContext ctx) {
super(ctx);
}
/**
* Return the value for of this sort for the document to which
* we just {@link #advanceExact(int) moved}. This should be fast
* because it is called twice per competitive hit when in heap
* mode, once for {@link #docBetterThan(long)} and once
* for {@link #setIndexToDocValue(long)}.
*/
protected abstract float docValue();
@Override
protected final void setIndexToDocValue(long index) {
values.set(index, docValue());
}
@Override
protected final boolean docBetterThan(long index) {
return getOrder().reverseMul() * Float.compare(docValue(), values.get(index)) < 0;
}
}
}
/**
* Superclass for implementations of {@linkplain BucketedSort} for {@code long} keys.
*/
public abstract static class ForLongs extends BucketedSort {
private LongArray values;
@SuppressWarnings("this-escape")
public ForLongs(BigArrays bigArrays, SortOrder sortOrder, DocValueFormat format, int bucketSize, ExtraData extra) {
super(bigArrays, sortOrder, format, bucketSize, extra);
boolean success = false;
try {
values = bigArrays.newLongArray(1, false);
success = true;
} finally {
if (success == false) {
close();
}
}
initGatherOffsets();
}
@Override
public final boolean needsScores() {
return false;
}
@Override
protected final BigArray values() {
return values;
}
@Override
protected final void growValues(long minSize) {
values = bigArrays.grow(values, minSize);
}
@Override
protected final int getNextGatherOffset(long rootIndex) {
return (int) values.get(rootIndex);
}
@Override
protected final void setNextGatherOffset(long rootIndex, int offset) {
values.set(rootIndex, offset);
}
@Override
protected final SortValue getValue(long index) {
return SortValue.from(values.get(index));
}
@Override
protected final boolean betterThan(long lhs, long rhs) {
return getOrder().reverseMul() * Long.compare(values.get(lhs), values.get(rhs)) < 0;
}
@Override
protected final void swap(long lhs, long rhs) {
long tmp = values.get(lhs);
values.set(lhs, values.get(rhs));
values.set(rhs, tmp);
}
protected abstract class Leaf extends BucketedSort.Leaf {
protected Leaf(LeafReaderContext ctx) {
super(ctx);
}
/**
* Return the value for of this sort for the document to which
* we just {@link #advanceExact(int) moved}. This should be fast
* because it is called twice per competitive hit when in heap
* mode, once for {@link #docBetterThan(long)} and once
* for {@link #setIndexToDocValue(long)}.
*/
protected abstract long docValue();
@Override
public final void setScorer(Scorable scorer) {}
@Override
protected final void setIndexToDocValue(long index) {
values.set(index, docValue());
}
@Override
protected final boolean docBetterThan(long index) {
return getOrder().reverseMul() * Long.compare(docValue(), values.get(index)) < 0;
}
}
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/search/sort/BucketedSort.java |
458 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/*
The MIT License (MIT)
Copyright (c) 2016 Paul Campbell
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package com.iluwatar.databus;
import lombok.Getter;
import lombok.Setter;
/**
* Base for data to send via the Data-Bus.
*
* @author Paul Campbell ([email protected])
*/
@Getter
@Setter
public class AbstractDataType implements DataType {
private DataBus dataBus;
}
| smedals/java-design-patterns | data-bus/src/main/java/com/iluwatar/databus/AbstractDataType.java |
459 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.common;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.ArrayUtil;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.LocalTimeOffset.Gap;
import org.elasticsearch.common.LocalTimeOffset.Overlap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.time.DateUtils;
import org.elasticsearch.core.TimeValue;
import java.io.IOException;
import java.time.Instant;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.time.OffsetDateTime;
import java.time.ZoneId;
import java.time.ZoneOffset;
import java.time.temporal.ChronoField;
import java.time.temporal.ChronoUnit;
import java.time.temporal.IsoFields;
import java.time.temporal.TemporalField;
import java.time.temporal.TemporalQueries;
import java.time.zone.ZoneOffsetTransition;
import java.time.zone.ZoneRules;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
/**
* A strategy for rounding milliseconds since epoch.
* <p>
* There are two implementations for rounding.
* The first one requires a date time unit and rounds to the supplied date time unit (i.e. quarter of year, day of month).
* The second one allows you to specify an interval to round to.
* <p>
* See <a href="https://davecturner.github.io/2019/04/14/timezone-rounding.html">this</a>
* blog for some background reading. Its super interesting and the links are
* a comedy gold mine. If you like time zones. Or hate them.
*/
public abstract class Rounding implements Writeable {
private static final Logger logger = LogManager.getLogger(Rounding.class);
public enum DateTimeUnit {
WEEK_OF_WEEKYEAR((byte) 1, "week", IsoFields.WEEK_OF_WEEK_BASED_YEAR, true, TimeUnit.DAYS.toMillis(7)) {
private final long extraLocalOffsetLookup = TimeUnit.DAYS.toMillis(7);
long roundFloor(long utcMillis) {
return DateUtils.roundWeekOfWeekYear(utcMillis);
}
@Override
long extraLocalOffsetLookup() {
return extraLocalOffsetLookup;
}
},
YEAR_OF_CENTURY((byte) 2, "year", ChronoField.YEAR_OF_ERA, false, 12) {
private final long extraLocalOffsetLookup = TimeUnit.DAYS.toMillis(366);
long roundFloor(long utcMillis) {
return DateUtils.roundYear(utcMillis);
}
long extraLocalOffsetLookup() {
return extraLocalOffsetLookup;
}
},
QUARTER_OF_YEAR((byte) 3, "quarter", IsoFields.QUARTER_OF_YEAR, false, 3) {
private final long extraLocalOffsetLookup = TimeUnit.DAYS.toMillis(92);
long roundFloor(long utcMillis) {
return DateUtils.roundQuarterOfYear(utcMillis);
}
long extraLocalOffsetLookup() {
return extraLocalOffsetLookup;
}
},
MONTH_OF_YEAR((byte) 4, "month", ChronoField.MONTH_OF_YEAR, false, 1) {
private final long extraLocalOffsetLookup = TimeUnit.DAYS.toMillis(31);
long roundFloor(long utcMillis) {
return DateUtils.roundMonthOfYear(utcMillis);
}
long extraLocalOffsetLookup() {
return extraLocalOffsetLookup;
}
},
DAY_OF_MONTH((byte) 5, "day", ChronoField.DAY_OF_MONTH, true, ChronoField.DAY_OF_MONTH.getBaseUnit().getDuration().toMillis()) {
long roundFloor(long utcMillis) {
return DateUtils.roundFloor(utcMillis, this.ratio);
}
long extraLocalOffsetLookup() {
return ratio;
}
},
HOUR_OF_DAY((byte) 6, "hour", ChronoField.HOUR_OF_DAY, true, ChronoField.HOUR_OF_DAY.getBaseUnit().getDuration().toMillis()) {
long roundFloor(long utcMillis) {
return DateUtils.roundFloor(utcMillis, ratio);
}
long extraLocalOffsetLookup() {
return ratio;
}
},
MINUTES_OF_HOUR(
(byte) 7,
"minute",
ChronoField.MINUTE_OF_HOUR,
true,
ChronoField.MINUTE_OF_HOUR.getBaseUnit().getDuration().toMillis()
) {
long roundFloor(long utcMillis) {
return DateUtils.roundFloor(utcMillis, ratio);
}
long extraLocalOffsetLookup() {
return ratio;
}
},
SECOND_OF_MINUTE(
(byte) 8,
"second",
ChronoField.SECOND_OF_MINUTE,
true,
ChronoField.SECOND_OF_MINUTE.getBaseUnit().getDuration().toMillis()
) {
long roundFloor(long utcMillis) {
return DateUtils.roundFloor(utcMillis, ratio);
}
long extraLocalOffsetLookup() {
return ratio;
}
};
private final byte id;
private final TemporalField field;
private final boolean isMillisBased;
private final String shortName;
/**
* ratio to milliseconds if isMillisBased == true or to month otherwise
*/
protected final long ratio;
DateTimeUnit(byte id, String shortName, TemporalField field, boolean isMillisBased, long ratio) {
this.id = id;
this.shortName = shortName;
this.field = field;
this.isMillisBased = isMillisBased;
this.ratio = ratio;
}
/**
* This rounds down the supplied milliseconds since the epoch down to the next unit. In order to retain performance this method
* should be as fast as possible and not try to convert dates to java-time objects if possible
*
* @param utcMillis the milliseconds since the epoch
* @return the rounded down milliseconds since the epoch
*/
abstract long roundFloor(long utcMillis);
/**
* When looking up {@link LocalTimeOffset} go this many milliseconds
* in the past from the minimum millis since epoch that we plan to
* look up so that we can see transitions that we might have rounded
* down beyond.
*/
abstract long extraLocalOffsetLookup();
public byte getId() {
return id;
}
public TemporalField getField() {
return field;
}
public String shortName() {
return shortName;
}
public static DateTimeUnit resolve(byte id) {
return switch (id) {
case 1 -> WEEK_OF_WEEKYEAR;
case 2 -> YEAR_OF_CENTURY;
case 3 -> QUARTER_OF_YEAR;
case 4 -> MONTH_OF_YEAR;
case 5 -> DAY_OF_MONTH;
case 6 -> HOUR_OF_DAY;
case 7 -> MINUTES_OF_HOUR;
case 8 -> SECOND_OF_MINUTE;
default -> throw new ElasticsearchException("Unknown date time unit id [" + id + "]");
};
}
}
public abstract void innerWriteTo(StreamOutput out) throws IOException;
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeByte(id());
innerWriteTo(out);
}
public abstract byte id();
/**
* A strategy for rounding milliseconds since epoch.
*/
public interface Prepared {
/**
* Rounds the given value.
*/
long round(long utcMillis);
/**
* Given the rounded value (which was potentially generated by
* {@link #round(long)}, returns the next rounding value. For
* example, with interval based rounding, if the interval is
* 3, {@code nextRoundValue(6) = 9}.
*/
long nextRoundingValue(long utcMillis);
/**
* Given the rounded value, returns the size between this value and the
* next rounded value in specified units if possible.
*/
double roundingSize(long utcMillis, DateTimeUnit timeUnit);
/**
* Returns the size of each rounding bucket in timeUnits.
*/
double roundingSize(DateTimeUnit timeUnit);
/**
* If this rounding mechanism precalculates rounding points then
* this array stores dates such that each date between each entry.
* if the rounding mechanism doesn't precalculate points then this
* is {@code null}.
*/
long[] fixedRoundingPoints();
}
/**
* Prepare to round many times.
*/
public abstract Prepared prepare(long minUtcMillis, long maxUtcMillis);
/**
* Prepare to round many dates over an unknown range. Prefer
* {@link #prepare(long, long)} if you can find the range because
* it'll be much more efficient.
*/
public abstract Prepared prepareForUnknown();
/**
* Prepare rounding forcing the java time implementation. Prefer
* {@link #prepare} or {@link #prepareForUnknown} which can be much
* faster.
*/
public abstract Prepared prepareJavaTime();
/**
* Rounds the given value.
* @deprecated Prefer {@link #prepare} and then {@link Prepared#round(long)}
*/
@Deprecated
public final long round(long utcMillis) {
return prepare(utcMillis, utcMillis).round(utcMillis);
}
/**
* Given the rounded value (which was potentially generated by
* {@link #round(long)}, returns the next rounding value. For
* example, with interval based rounding, if the interval is
* 3, {@code nextRoundValue(6) = 9}.
* @deprecated Prefer {@link #prepare} and then {@link Prepared#nextRoundingValue(long)}
*/
@Deprecated
public final long nextRoundingValue(long utcMillis) {
return prepare(utcMillis, utcMillis).nextRoundingValue(utcMillis);
}
/**
* How "offset" this rounding is from the traditional "start" of the period.
* @deprecated We're in the process of abstracting offset *into* Rounding
* so keep any usage to migratory shims
*/
@Deprecated
public abstract long offset();
/**
* Strip the {@code offset} from these bounds.
*/
public abstract Rounding withoutOffset();
@Override
public abstract boolean equals(Object obj);
@Override
public abstract int hashCode();
public static Builder builder(DateTimeUnit unit) {
return new Builder(unit);
}
public static Builder builder(TimeValue interval) {
return new Builder(interval);
}
public static class Builder {
private final DateTimeUnit unit;
private final long interval;
private ZoneId timeZone = ZoneOffset.UTC;
private long offset = 0;
public Builder(DateTimeUnit unit) {
this.unit = unit;
this.interval = -1;
}
public Builder(TimeValue interval) {
this.unit = null;
if (interval.millis() < 1) throw new IllegalArgumentException("Zero or negative time interval not supported");
this.interval = interval.millis();
}
public Builder timeZone(ZoneId timeZone) {
if (timeZone == null) {
throw new IllegalArgumentException("Setting null as timezone is not supported");
}
this.timeZone = timeZone;
return this;
}
/**
* Sets the offset of this rounding from the normal beginning of the interval. Use this
* to start days at 6am or months on the 15th.
* @param offset the offset, in milliseconds
*/
public Builder offset(long offset) {
this.offset = offset;
return this;
}
public Rounding build() {
Rounding rounding;
if (unit != null) {
rounding = new TimeUnitRounding(unit, timeZone);
} else {
rounding = new TimeIntervalRounding(interval, timeZone);
}
if (offset != 0) {
rounding = new OffsetRounding(rounding, offset);
}
return rounding;
}
}
private abstract static class PreparedRounding implements Prepared {
/**
* Attempt to build a {@link Prepared} implementation that relies on pre-calcuated
* "round down" points. If there would be more than {@code max} points then return
* the original implementation, otherwise return the new, faster implementation.
*/
protected Prepared maybeUseArray(long minUtcMillis, long maxUtcMillis, int max) {
long[] values = new long[1];
long rounded = round(minUtcMillis);
int i = 0;
values[i++] = rounded;
while ((rounded = nextRoundingValue(rounded)) <= maxUtcMillis) {
if (i >= max) {
logger.trace(
"can't realize [{}] to fixed rounding points, more than [{}] rounding points between [{}] and [{}]",
this,
max,
minUtcMillis,
maxUtcMillis
);
return this;
}
/*
* We expect a time in the last transition (rounded - 1) to round
* to the last value we calculated. If it doesn't then we're
* probably doing something wrong here....
*/
assert values[i - 1] == round(rounded - 1);
values = ArrayUtil.grow(values, i + 1);
values[i++] = rounded;
}
return new ArrayRounding(values, i, this);
}
@Override
public long[] fixedRoundingPoints() {
return null;
}
}
static class TimeUnitRounding extends Rounding {
static final byte ID = 1;
private final DateTimeUnit unit;
private final ZoneId timeZone;
private final boolean unitRoundsToMidnight;
TimeUnitRounding(DateTimeUnit unit, ZoneId timeZone) {
this.unit = unit;
this.timeZone = timeZone;
this.unitRoundsToMidnight = this.unit.field.getBaseUnit().getDuration().toMillis() > 3600000L;
}
TimeUnitRounding(StreamInput in) throws IOException {
this(DateTimeUnit.resolve(in.readByte()), in.readZoneId());
}
@Override
public void innerWriteTo(StreamOutput out) throws IOException {
out.writeByte(unit.getId());
out.writeZoneId(timeZone);
}
@Override
public byte id() {
return ID;
}
private LocalDateTime truncateLocalDateTime(LocalDateTime localDateTime) {
switch (unit) {
case SECOND_OF_MINUTE:
return localDateTime.withNano(0);
case MINUTES_OF_HOUR:
return LocalDateTime.of(
localDateTime.getYear(),
localDateTime.getMonthValue(),
localDateTime.getDayOfMonth(),
localDateTime.getHour(),
localDateTime.getMinute(),
0,
0
);
case HOUR_OF_DAY:
return LocalDateTime.of(
localDateTime.getYear(),
localDateTime.getMonth(),
localDateTime.getDayOfMonth(),
localDateTime.getHour(),
0,
0
);
case DAY_OF_MONTH:
LocalDate localDate = localDateTime.query(TemporalQueries.localDate());
return localDate.atStartOfDay();
case WEEK_OF_WEEKYEAR:
return LocalDateTime.of(localDateTime.toLocalDate(), LocalTime.MIDNIGHT).with(ChronoField.DAY_OF_WEEK, 1);
case MONTH_OF_YEAR:
return LocalDateTime.of(localDateTime.getYear(), localDateTime.getMonthValue(), 1, 0, 0);
case QUARTER_OF_YEAR:
return LocalDateTime.of(localDateTime.getYear(), localDateTime.getMonth().firstMonthOfQuarter(), 1, 0, 0);
case YEAR_OF_CENTURY:
return LocalDateTime.of(LocalDate.of(localDateTime.getYear(), 1, 1), LocalTime.MIDNIGHT);
default:
throw new IllegalArgumentException("NOT YET IMPLEMENTED for unit " + unit);
}
}
@Override
public Prepared prepare(long minUtcMillis, long maxUtcMillis) {
/*
* 128 is a power of two that isn't huge. We might be able to do
* better if the limit was based on the actual type of prepared
* rounding but this'll do for now.
*/
return prepareOffsetOrJavaTimeRounding(minUtcMillis, maxUtcMillis).maybeUseArray(minUtcMillis, maxUtcMillis, 128);
}
private TimeUnitPreparedRounding prepareOffsetOrJavaTimeRounding(long minUtcMillis, long maxUtcMillis) {
/*
minUtcMillis has to be decreased by 2 units.
This is because if a minUtcMillis can be rounded down up to unit.extraLocalOffsetLookup
and that rounding down might still fall within DST gap/overlap.
Meaning that minUtcMillis has to be decreased by additional unit
so that the transition just before the minUtcMillis is applied
*/
long minLookup = minUtcMillis - 2 * unit.extraLocalOffsetLookup();
long maxLookup = maxUtcMillis;
long unitMillis = 0;
if (false == unitRoundsToMidnight) {
/*
* Units that round to midnight can round down from two
* units worth of millis in the future to find the
* nextRoundingValue.
*/
unitMillis = unit.field.getBaseUnit().getDuration().toMillis();
maxLookup += 2 * unitMillis;
}
LocalTimeOffset.Lookup lookup = LocalTimeOffset.lookup(timeZone, minLookup, maxLookup);
if (lookup == null) {
// Range too long, just use java.time
return prepareJavaTime();
}
LocalTimeOffset fixedOffset = lookup.fixedInRange(minLookup, maxLookup);
if (fixedOffset != null) {
// The time zone is effectively fixed
if (unitRoundsToMidnight) {
return new FixedToMidnightRounding(fixedOffset);
}
return new FixedNotToMidnightRounding(fixedOffset, unitMillis);
}
if (unitRoundsToMidnight) {
return new ToMidnightRounding(lookup);
}
return new NotToMidnightRounding(lookup, unitMillis);
}
@Override
public Prepared prepareForUnknown() {
LocalTimeOffset offset = LocalTimeOffset.fixedOffset(timeZone);
if (offset != null) {
if (unitRoundsToMidnight) {
return new FixedToMidnightRounding(offset);
}
return new FixedNotToMidnightRounding(offset, unit.field.getBaseUnit().getDuration().toMillis());
}
return prepareJavaTime();
}
@Override
public TimeUnitPreparedRounding prepareJavaTime() {
if (unitRoundsToMidnight) {
return new JavaTimeToMidnightRounding();
}
return new JavaTimeNotToMidnightRounding(unit.field.getBaseUnit().getDuration().toMillis());
}
@Override
public long offset() {
return 0;
}
@Override
public Rounding withoutOffset() {
return this;
}
@Override
public int hashCode() {
return Objects.hash(unit, timeZone);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
TimeUnitRounding other = (TimeUnitRounding) obj;
return Objects.equals(unit, other.unit) && Objects.equals(timeZone, other.timeZone);
}
@Override
public String toString() {
return "Rounding[" + unit + " in " + timeZone + "]";
}
private abstract class TimeUnitPreparedRounding extends PreparedRounding {
@Override
public double roundingSize(long utcMillis, DateTimeUnit timeUnit) {
if (unit.isMillisBased) {
if (timeUnit.isMillisBased) {
return (double) unit.ratio / timeUnit.ratio;
} else {
throw new IllegalArgumentException(
"Cannot use month-based rate unit ["
+ timeUnit.shortName
+ "] with non-month based calendar interval histogram ["
+ unit.shortName
+ "] only week, day, hour, minute and second are supported for this histogram"
);
}
} else {
if (timeUnit.isMillisBased) {
return (double) (nextRoundingValue(utcMillis) - utcMillis) / timeUnit.ratio;
} else {
return (double) unit.ratio / timeUnit.ratio;
}
}
}
@Override
public double roundingSize(DateTimeUnit timeUnit) {
if (unit.isMillisBased) {
if (timeUnit.isMillisBased) {
return (double) unit.ratio / timeUnit.ratio;
} else {
throw new IllegalArgumentException(
"Cannot use month-based rate unit ["
+ timeUnit.shortName
+ "] with non-month based calendar interval histogram ["
+ unit.shortName
+ "] only week, day, hour, minute and second are supported for this histogram"
);
}
} else {
if (timeUnit.isMillisBased) {
throw new IllegalArgumentException(
"Cannot use non month-based rate unit ["
+ timeUnit.shortName
+ "] with calendar interval histogram ["
+ unit.shortName
+ "] only month, quarter and year are supported for this histogram"
);
} else {
return (double) unit.ratio / timeUnit.ratio;
}
}
}
@Override
public abstract String toString();
}
private class FixedToMidnightRounding extends TimeUnitPreparedRounding {
private final LocalTimeOffset offset;
FixedToMidnightRounding(LocalTimeOffset offset) {
this.offset = offset;
}
@Override
public long round(long utcMillis) {
return offset.localToUtcInThisOffset(unit.roundFloor(offset.utcToLocalTime(utcMillis)));
}
@Override
public long nextRoundingValue(long utcMillis) {
// TODO this is used in date range's collect so we should optimize it too
return new JavaTimeToMidnightRounding().nextRoundingValue(utcMillis);
}
@Override
public String toString() {
return TimeUnitRounding.this + "[fixed to midnight]";
}
}
private class FixedNotToMidnightRounding extends TimeUnitPreparedRounding {
private final LocalTimeOffset offset;
private final long unitMillis;
FixedNotToMidnightRounding(LocalTimeOffset offset, long unitMillis) {
this.offset = offset;
this.unitMillis = unitMillis;
}
@Override
public long round(long utcMillis) {
return offset.localToUtcInThisOffset(unit.roundFloor(offset.utcToLocalTime(utcMillis)));
}
@Override
public final long nextRoundingValue(long utcMillis) {
return round(utcMillis + unitMillis);
}
@Override
public String toString() {
return TimeUnitRounding.this + "[fixed to " + unitMillis + "]";
}
}
private class ToMidnightRounding extends TimeUnitPreparedRounding implements LocalTimeOffset.Strategy {
private final LocalTimeOffset.Lookup lookup;
ToMidnightRounding(LocalTimeOffset.Lookup lookup) {
this.lookup = lookup;
}
@Override
public long round(long utcMillis) {
LocalTimeOffset offset = lookup.lookup(utcMillis);
return offset.localToUtc(unit.roundFloor(offset.utcToLocalTime(utcMillis)), this);
}
@Override
public long nextRoundingValue(long utcMillis) {
// TODO this is actually used date range's collect so we should optimize it
return new JavaTimeToMidnightRounding().nextRoundingValue(utcMillis);
}
@Override
public long inGap(long localMillis, Gap gap) {
return gap.startUtcMillis();
}
@Override
public long beforeGap(long localMillis, Gap gap) {
return gap.previous().localToUtc(localMillis, this);
}
@Override
public long inOverlap(long localMillis, Overlap overlap) {
return overlap.previous().localToUtc(localMillis, this);
}
@Override
public long beforeOverlap(long localMillis, Overlap overlap) {
return overlap.previous().localToUtc(localMillis, this);
}
@Override
protected Prepared maybeUseArray(long minUtcMillis, long maxUtcMillis, int max) {
if (lookup.anyMoveBackToPreviousDay()) {
return this;
}
return super.maybeUseArray(minUtcMillis, maxUtcMillis, max);
}
@Override
public String toString() {
return TimeUnitRounding.this + "[across DST to midnight]";
}
}
private class NotToMidnightRounding extends AbstractNotToMidnightRounding implements LocalTimeOffset.Strategy {
private final LocalTimeOffset.Lookup lookup;
NotToMidnightRounding(LocalTimeOffset.Lookup lookup, long unitMillis) {
super(unitMillis);
this.lookup = lookup;
}
@Override
public long round(long utcMillis) {
LocalTimeOffset offset = lookup.lookup(utcMillis);
long roundedLocalMillis = unit.roundFloor(offset.utcToLocalTime(utcMillis));
return offset.localToUtc(roundedLocalMillis, this);
}
@Override
public long inGap(long localMillis, Gap gap) {
// Round from just before the start of the gap
return gap.previous().localToUtc(unit.roundFloor(gap.firstMissingLocalTime() - 1), this);
}
@Override
public long beforeGap(long localMillis, Gap gap) {
return inGap(localMillis, gap);
}
@Override
public long inOverlap(long localMillis, Overlap overlap) {
// Convert the overlap at this offset because that'll produce the largest result.
return overlap.localToUtcInThisOffset(localMillis);
}
@Override
public long beforeOverlap(long localMillis, Overlap overlap) {
if (overlap.firstNonOverlappingLocalTime() - overlap.firstOverlappingLocalTime() >= unitMillis) {
return overlap.localToUtcInThisOffset(localMillis);
}
return overlap.previous().localToUtc(localMillis, this); // This is mostly for Asia/Lord_Howe
}
@Override
public String toString() {
return TimeUnitRounding.this + "[across DST to " + unitMillis + "]";
}
}
private class JavaTimeToMidnightRounding extends TimeUnitPreparedRounding {
@Override
public long round(long utcMillis) {
LocalDateTime localDateTime = LocalDateTime.ofInstant(Instant.ofEpochMilli(utcMillis), timeZone);
LocalDateTime localMidnight = truncateLocalDateTime(localDateTime);
return firstTimeOnDay(localMidnight);
}
@Override
public long nextRoundingValue(long utcMillis) {
LocalDateTime localDateTime = LocalDateTime.ofInstant(Instant.ofEpochMilli(utcMillis), timeZone);
LocalDateTime earlierLocalMidnight = truncateLocalDateTime(localDateTime);
LocalDateTime localMidnight = nextRelevantMidnight(earlierLocalMidnight);
return firstTimeOnDay(localMidnight);
}
@Override
protected Prepared maybeUseArray(long minUtcMillis, long maxUtcMillis, int max) {
// We don't have the right information needed to know if this is safe for this time zone so we always use java rounding
return this;
}
private long firstTimeOnDay(LocalDateTime localMidnight) {
assert localMidnight.toLocalTime().equals(LocalTime.of(0, 0, 0)) : "firstTimeOnDay should only be called at midnight";
// Now work out what localMidnight actually means
final List<ZoneOffset> currentOffsets = timeZone.getRules().getValidOffsets(localMidnight);
if (currentOffsets.isEmpty() == false) {
// There is at least one midnight on this day, so choose the first
final ZoneOffset firstOffset = currentOffsets.get(0);
final OffsetDateTime offsetMidnight = localMidnight.atOffset(firstOffset);
return offsetMidnight.toInstant().toEpochMilli();
} else {
// There were no midnights on this day, so we must have entered the day via an offset transition.
// Use the time of the transition as it is the earliest time on the right day.
ZoneOffsetTransition zoneOffsetTransition = timeZone.getRules().getTransition(localMidnight);
return zoneOffsetTransition.getInstant().toEpochMilli();
}
}
private LocalDateTime nextRelevantMidnight(LocalDateTime localMidnight) {
assert localMidnight.toLocalTime().equals(LocalTime.MIDNIGHT) : "nextRelevantMidnight should only be called at midnight";
return switch (unit) {
case DAY_OF_MONTH -> localMidnight.plus(1, ChronoUnit.DAYS);
case WEEK_OF_WEEKYEAR -> localMidnight.plus(7, ChronoUnit.DAYS);
case MONTH_OF_YEAR -> localMidnight.plus(1, ChronoUnit.MONTHS);
case QUARTER_OF_YEAR -> localMidnight.plus(3, ChronoUnit.MONTHS);
case YEAR_OF_CENTURY -> localMidnight.plus(1, ChronoUnit.YEARS);
default -> throw new IllegalArgumentException("Unknown round-to-midnight unit: " + unit);
};
}
@Override
public String toString() {
return TimeUnitRounding.this + "[java.time to midnight]";
}
}
private class JavaTimeNotToMidnightRounding extends AbstractNotToMidnightRounding {
JavaTimeNotToMidnightRounding(long unitMillis) {
super(unitMillis);
}
@Override
public long round(long utcMillis) {
Instant instant = Instant.ofEpochMilli(utcMillis);
final ZoneRules rules = timeZone.getRules();
while (true) {
final Instant truncatedTime = truncateAsLocalTime(instant, rules);
final ZoneOffsetTransition previousTransition = rules.previousTransition(instant);
if (previousTransition == null) {
// truncateAsLocalTime cannot have failed if there were no previous transitions
return truncatedTime.toEpochMilli();
}
Instant previousTransitionInstant = previousTransition.getInstant();
if (truncatedTime != null && previousTransitionInstant.compareTo(truncatedTime) < 1) {
return truncatedTime.toEpochMilli();
}
// There was a transition in between the input time and the truncated time. Return to the transition time and
// round that down instead.
instant = previousTransitionInstant.minusNanos(1_000_000);
}
}
private Instant truncateAsLocalTime(Instant instant, final ZoneRules rules) {
assert unitRoundsToMidnight == false : "truncateAsLocalTime should not be called if unitRoundsToMidnight";
LocalDateTime localDateTime = LocalDateTime.ofInstant(instant, timeZone);
final LocalDateTime truncatedLocalDateTime = truncateLocalDateTime(localDateTime);
final List<ZoneOffset> currentOffsets = rules.getValidOffsets(truncatedLocalDateTime);
if (currentOffsets.isEmpty() == false) {
// at least one possibilities - choose the latest one that's still no later than the input time
for (int offsetIndex = currentOffsets.size() - 1; offsetIndex >= 0; offsetIndex--) {
final Instant result = truncatedLocalDateTime.atOffset(currentOffsets.get(offsetIndex)).toInstant();
if (result.isAfter(instant) == false) {
return result;
}
}
assert false : "rounded time not found for " + instant + " with " + this;
return null;
} else {
// The chosen local time didn't happen. This means we were given a time in an hour (or a minute) whose start
// is missing due to an offset transition, so the time cannot be truncated.
return null;
}
}
@Override
public String toString() {
return TimeUnitRounding.this + "[java.time to " + unitMillis + "]";
}
}
private abstract class AbstractNotToMidnightRounding extends TimeUnitPreparedRounding {
protected final long unitMillis;
AbstractNotToMidnightRounding(long unitMillis) {
this.unitMillis = unitMillis;
}
@Override
public final long nextRoundingValue(long utcMillis) {
final long roundedAfterOneIncrement = round(utcMillis + unitMillis);
if (utcMillis < roundedAfterOneIncrement) {
return roundedAfterOneIncrement;
} else {
return round(utcMillis + 2 * unitMillis);
}
}
}
}
static class TimeIntervalRounding extends Rounding {
static final byte ID = 2;
private final long interval;
private final ZoneId timeZone;
TimeIntervalRounding(long interval, ZoneId timeZone) {
if (interval < 1) throw new IllegalArgumentException("Zero or negative time interval not supported");
this.interval = interval;
this.timeZone = timeZone;
}
TimeIntervalRounding(StreamInput in) throws IOException {
this(in.readVLong(), in.readZoneId());
}
@Override
public void innerWriteTo(StreamOutput out) throws IOException {
out.writeVLong(interval);
out.writeZoneId(timeZone);
}
@Override
public byte id() {
return ID;
}
@Override
public Prepared prepare(long minUtcMillis, long maxUtcMillis) {
/*
* 128 is a power of two that isn't huge. We might be able to do
* better if the limit was based on the actual type of prepared
* rounding but this'll do for now.
*/
return prepareOffsetOrJavaTimeRounding(minUtcMillis, maxUtcMillis).maybeUseArray(minUtcMillis, maxUtcMillis, 128);
}
private TimeIntervalPreparedRounding prepareOffsetOrJavaTimeRounding(long minUtcMillis, long maxUtcMillis) {
long minLookup = minUtcMillis - interval;
long maxLookup = maxUtcMillis;
LocalTimeOffset.Lookup lookup = LocalTimeOffset.lookup(timeZone, minLookup, maxLookup);
if (lookup == null) {
return prepareJavaTime();
}
LocalTimeOffset fixedOffset = lookup.fixedInRange(minLookup, maxLookup);
if (fixedOffset != null) {
return new FixedRounding(fixedOffset);
}
return new VariableRounding(lookup);
}
@Override
public Prepared prepareForUnknown() {
LocalTimeOffset offset = LocalTimeOffset.fixedOffset(timeZone);
if (offset != null) {
return new FixedRounding(offset);
}
return prepareJavaTime();
}
@Override
public TimeIntervalPreparedRounding prepareJavaTime() {
return new JavaTimeRounding();
}
@Override
public long offset() {
return 0;
}
@Override
public Rounding withoutOffset() {
return this;
}
@Override
public int hashCode() {
return Objects.hash(interval, timeZone);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
TimeIntervalRounding other = (TimeIntervalRounding) obj;
return Objects.equals(interval, other.interval) && Objects.equals(timeZone, other.timeZone);
}
@Override
public String toString() {
return "Rounding[" + interval + " in " + timeZone + "]";
}
private static long roundKey(long value, long interval) {
if (value < 0) {
return (value - interval + 1) / interval;
} else {
return value / interval;
}
}
private abstract class TimeIntervalPreparedRounding extends PreparedRounding {
@Override
public double roundingSize(long utcMillis, DateTimeUnit timeUnit) {
return roundingSize(timeUnit);
}
@Override
public double roundingSize(DateTimeUnit timeUnit) {
if (timeUnit.isMillisBased) {
return (double) interval / timeUnit.ratio;
} else {
throw new IllegalArgumentException(
"Cannot use month-based rate unit ["
+ timeUnit.shortName
+ "] with fixed interval based histogram, only week, day, hour, minute and second are supported for "
+ "this histogram"
);
}
}
@Override
public abstract String toString();
}
/**
* Rounds to down inside of a time zone with an "effectively fixed"
* time zone. A time zone can be "effectively fixed" if:
* <ul>
* <li>It is UTC</li>
* <li>It is a fixed offset from UTC at all times (UTC-5, America/Phoenix)</li>
* <li>It is fixed over the entire range of dates that will be rounded</li>
* </ul>
*/
private class FixedRounding extends TimeIntervalPreparedRounding {
private final LocalTimeOffset offset;
FixedRounding(LocalTimeOffset offset) {
this.offset = offset;
}
@Override
public long round(long utcMillis) {
return offset.localToUtcInThisOffset(roundKey(offset.utcToLocalTime(utcMillis), interval) * interval);
}
@Override
public long nextRoundingValue(long utcMillis) {
// TODO this is used in date range's collect so we should optimize it too
return new JavaTimeRounding().nextRoundingValue(utcMillis);
}
@Override
public String toString() {
return TimeIntervalRounding.this + "[fixed]";
}
}
/**
* Rounds down inside of any time zone, even if it is not
* "effectively fixed". See {@link FixedRounding} for a description of
* "effectively fixed".
*/
private class VariableRounding extends TimeIntervalPreparedRounding implements LocalTimeOffset.Strategy {
private final LocalTimeOffset.Lookup lookup;
VariableRounding(LocalTimeOffset.Lookup lookup) {
this.lookup = lookup;
}
@Override
public long round(long utcMillis) {
LocalTimeOffset offset = lookup.lookup(utcMillis);
return offset.localToUtc(roundKey(offset.utcToLocalTime(utcMillis), interval) * interval, this);
}
@Override
public long nextRoundingValue(long utcMillis) {
// TODO this is used in date range's collect so we should optimize it too
return new JavaTimeRounding().nextRoundingValue(utcMillis);
}
@Override
public long inGap(long localMillis, Gap gap) {
return gap.startUtcMillis();
}
@Override
public long beforeGap(long localMillis, Gap gap) {
return gap.previous().localToUtc(localMillis, this);
}
@Override
public long inOverlap(long localMillis, Overlap overlap) {
// Convert the overlap at this offset because that'll produce the largest result.
return overlap.localToUtcInThisOffset(localMillis);
}
@Override
public long beforeOverlap(long localMillis, Overlap overlap) {
return overlap.previous().localToUtc(roundKey(overlap.firstNonOverlappingLocalTime() - 1, interval) * interval, this);
}
@Override
public String toString() {
return TimeIntervalRounding.this + "[lookup]";
}
}
/**
* Rounds down inside of any time zone using {@link LocalDateTime}
* directly. It'll be slower than {@link VariableRounding} and much
* slower than {@link FixedRounding}. We use it when we don' have an
* "effectively fixed" time zone and we can't get a
* {@link LocalTimeOffset.Lookup}. We might not be able to get one
* because:
* <ul>
* <li>We don't know how to look up the minimum and maximum dates we
* are going to round.</li>
* <li>We expect to round over thousands and thousands of years worth
* of dates with the same {@link Prepared} instance.</li>
* </ul>
*/
class JavaTimeRounding extends TimeIntervalPreparedRounding {
@Override
public long round(long originalUtcMillis) {
/*
* We give up after 5000 attempts and throw an exception. The
* most attempts I could get running locally are 500 - for
* Asia/Tehran with an 80,000 day range. You just can't declare
* ranges much larger than that in ES right now.
*/
return round(originalUtcMillis, 5000);
}
long round(long originalUtcMillis, int maxAttempts) {
long utcMillis = originalUtcMillis;
int attempts = 0;
attempt: while (attempts < maxAttempts) {
final Instant utcInstant = Instant.ofEpochMilli(utcMillis);
final LocalDateTime rawLocalDateTime = LocalDateTime.ofInstant(utcInstant, timeZone);
// a millisecond value with the same local time, in UTC, as `utcMillis` has in `timeZone`
final long localMillis = utcMillis + timeZone.getRules().getOffset(utcInstant).getTotalSeconds() * 1000;
assert localMillis == rawLocalDateTime.toInstant(ZoneOffset.UTC).toEpochMilli();
final long roundedMillis = roundKey(localMillis, interval) * interval;
final LocalDateTime roundedLocalDateTime = LocalDateTime.ofInstant(Instant.ofEpochMilli(roundedMillis), ZoneOffset.UTC);
// Now work out what roundedLocalDateTime actually means
final List<ZoneOffset> currentOffsets = timeZone.getRules().getValidOffsets(roundedLocalDateTime);
if (currentOffsets.isEmpty() == false) {
// There is at least one instant with the desired local time. In general the desired result is
// the latest rounded time that's no later than the input time, but this could involve rounding across
// a timezone transition, which may yield the wrong result
final ZoneOffsetTransition previousTransition = timeZone.getRules().previousTransition(utcInstant.plusMillis(1));
for (int offsetIndex = currentOffsets.size() - 1; 0 <= offsetIndex; offsetIndex--) {
final OffsetDateTime offsetTime = roundedLocalDateTime.atOffset(currentOffsets.get(offsetIndex));
final Instant offsetInstant = offsetTime.toInstant();
if (previousTransition != null && offsetInstant.isBefore(previousTransition.getInstant())) {
/*
* Rounding down across the transition can yield the
* wrong result. It's best to return to the transition
* time and round that down.
*/
attempts++;
utcMillis = previousTransition.getInstant().toEpochMilli() - 1;
continue attempt;
}
if (utcInstant.isBefore(offsetTime.toInstant()) == false) {
return offsetInstant.toEpochMilli();
}
}
final OffsetDateTime offsetTime = roundedLocalDateTime.atOffset(currentOffsets.get(0));
final Instant offsetInstant = offsetTime.toInstant();
assert false : this + " failed to round " + utcMillis + " down: " + offsetInstant + " is the earliest possible";
return offsetInstant.toEpochMilli(); // TODO or throw something?
} else {
// The desired time isn't valid because within a gap, so just return the start of the gap
ZoneOffsetTransition zoneOffsetTransition = timeZone.getRules().getTransition(roundedLocalDateTime);
return zoneOffsetTransition.getInstant().toEpochMilli();
}
}
throw new IllegalArgumentException(
this
+ " failed to round "
+ utcMillis
+ " down: transitioned backwards through too many daylight savings time transitions"
);
}
@Override
public long nextRoundingValue(long utcMillis) {
/*
* Ok. I'm not proud of this, but it gets the job done. So here is the deal:
* its super important that nextRoundingValue be *exactly* the next rounding
* value. And I can't come up with a nice way to use the java time API to figure
* it out. Thus, we treat "round" like a black box here and run a kind of whacky
* binary search, newton's method hybrid. We don't have a "slope" so we can't do
* a "real" newton's method, so we just sort of cut the diff in half. As janky
* as it looks, it tends to get the job done in under four iterations. Frankly,
* `round(round(utcMillis) + interval)` is usually a good guess so we mostly get
* it in a single iteration. But daylight savings time and other janky stuff can
* make it less likely.
*/
long prevRound = round(utcMillis);
long increment = interval;
long from = prevRound;
int iterations = 0;
while (++iterations < 100) {
from += increment;
long rounded = round(from);
boolean highEnough = rounded > prevRound;
if (false == highEnough) {
if (increment < 0) {
increment = -increment / 2;
}
continue;
}
long roundedRoundedDown = round(rounded - 1);
boolean tooHigh = roundedRoundedDown > prevRound;
if (tooHigh) {
if (increment > 0) {
increment = -increment / 2;
}
continue;
}
assert highEnough && (false == tooHigh);
assert roundedRoundedDown == prevRound;
if (iterations > 3 && logger.isDebugEnabled()) {
logger.debug("Iterated {} time for {} using {}", iterations, utcMillis, TimeIntervalRounding.this.toString());
}
return rounded;
}
/*
* After 100 iterations we still couldn't settle on something! Crazy!
* The most I've seen in tests is 20 and its usually 1 or 2. If we're
* not in a test let's log something and round from our best guess.
*/
assert false
: String.format(
Locale.ROOT,
"Expected to find the rounding in 100 iterations but didn't for [%d] with [%s]",
utcMillis,
TimeIntervalRounding.this.toString()
);
logger.debug(
"Expected to find the rounding in 100 iterations but didn't for {} using {}",
utcMillis,
TimeIntervalRounding.this.toString()
);
return round(from);
}
@Override
public String toString() {
return TimeIntervalRounding.this + "[java.time]";
}
}
}
static class OffsetRounding extends Rounding {
static final byte ID = 3;
private final Rounding delegate;
private final long offset;
OffsetRounding(Rounding delegate, long offset) {
this.delegate = delegate;
this.offset = offset;
}
OffsetRounding(StreamInput in) throws IOException {
// Versions before 7.6.0 will never send this type of rounding.
delegate = Rounding.read(in);
offset = in.readZLong();
}
@Override
public void innerWriteTo(StreamOutput out) throws IOException {
delegate.writeTo(out);
out.writeZLong(offset);
}
@Override
public byte id() {
return ID;
}
@Override
public Prepared prepare(long minUtcMillis, long maxUtcMillis) {
return wrapPreparedRounding(delegate.prepare(minUtcMillis - offset, maxUtcMillis - offset));
}
@Override
public Prepared prepareForUnknown() {
return wrapPreparedRounding(delegate.prepareForUnknown());
}
@Override
public Prepared prepareJavaTime() {
return wrapPreparedRounding(delegate.prepareJavaTime());
}
private Prepared wrapPreparedRounding(Prepared delegatePrepared) {
return new Prepared() {
@Override
public long round(long utcMillis) {
return delegatePrepared.round(utcMillis - offset) + offset;
}
@Override
public long nextRoundingValue(long utcMillis) {
return delegatePrepared.nextRoundingValue(utcMillis - offset) + offset;
}
@Override
public double roundingSize(long utcMillis, DateTimeUnit timeUnit) {
return delegatePrepared.roundingSize(utcMillis, timeUnit);
}
@Override
public double roundingSize(DateTimeUnit timeUnit) {
return delegatePrepared.roundingSize(timeUnit);
}
@Override
public long[] fixedRoundingPoints() {
// TODO we can likely translate here
return null;
}
};
}
@Override
public long offset() {
return offset;
}
@Override
public Rounding withoutOffset() {
return delegate;
}
@Override
public int hashCode() {
return Objects.hash(delegate, offset);
}
@Override
public boolean equals(Object obj) {
if (obj == null || getClass() != obj.getClass()) {
return false;
}
OffsetRounding other = (OffsetRounding) obj;
return delegate.equals(other.delegate) && offset == other.offset;
}
@Override
public String toString() {
return delegate + " offset by " + offset;
}
}
public static Rounding read(StreamInput in) throws IOException {
byte id = in.readByte();
return switch (id) {
case TimeUnitRounding.ID -> new TimeUnitRounding(in);
case TimeIntervalRounding.ID -> new TimeIntervalRounding(in);
case OffsetRounding.ID -> new OffsetRounding(in);
default -> throw new ElasticsearchException("unknown rounding id [" + id + "]");
};
}
/**
* Implementation of {@link Prepared} using pre-calculated "round down" points.
*/
private static class ArrayRounding implements Prepared {
private final long[] values;
private final int max;
private final Prepared delegate;
private ArrayRounding(long[] values, int max, Prepared delegate) {
this.values = values;
this.max = max;
this.delegate = delegate;
}
@Override
public long round(long utcMillis) {
assert values[0] <= utcMillis : utcMillis + " must be after " + values[0];
int idx = Arrays.binarySearch(values, 0, max, utcMillis);
assert idx != -1 : "The insertion point is before the array! This should have tripped the assertion above.";
assert -1 - idx <= values.length : "This insertion point is after the end of the array.";
if (idx < 0) {
idx = -2 - idx;
}
return values[idx];
}
@Override
public long nextRoundingValue(long utcMillis) {
return delegate.nextRoundingValue(utcMillis);
}
@Override
public double roundingSize(long utcMillis, DateTimeUnit timeUnit) {
return delegate.roundingSize(utcMillis, timeUnit);
}
@Override
public double roundingSize(DateTimeUnit timeUnit) {
return delegate.roundingSize(timeUnit);
}
@Override
public long[] fixedRoundingPoints() {
return Arrays.copyOf(values, max);
}
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/common/Rounding.java |
460 | package com.thealgorithms.ciphers;
import java.math.BigInteger;
import java.util.Scanner;
/**
* This class is build to demonstrate the application of the AES-algorithm on a
* single 128-Bit block of data.
*/
public final class AES {
private AES() {
}
/**
* Precalculated values for x to the power of 2 in Rijndaels galois field.
* Used as 'RCON' during the key expansion.
*/
private static final int[] RCON = {
0x8d,
0x01,
0x02,
0x04,
0x08,
0x10,
0x20,
0x40,
0x80,
0x1b,
0x36,
0x6c,
0xd8,
0xab,
0x4d,
0x9a,
0x2f,
0x5e,
0xbc,
0x63,
0xc6,
0x97,
0x35,
0x6a,
0xd4,
0xb3,
0x7d,
0xfa,
0xef,
0xc5,
0x91,
0x39,
0x72,
0xe4,
0xd3,
0xbd,
0x61,
0xc2,
0x9f,
0x25,
0x4a,
0x94,
0x33,
0x66,
0xcc,
0x83,
0x1d,
0x3a,
0x74,
0xe8,
0xcb,
0x8d,
0x01,
0x02,
0x04,
0x08,
0x10,
0x20,
0x40,
0x80,
0x1b,
0x36,
0x6c,
0xd8,
0xab,
0x4d,
0x9a,
0x2f,
0x5e,
0xbc,
0x63,
0xc6,
0x97,
0x35,
0x6a,
0xd4,
0xb3,
0x7d,
0xfa,
0xef,
0xc5,
0x91,
0x39,
0x72,
0xe4,
0xd3,
0xbd,
0x61,
0xc2,
0x9f,
0x25,
0x4a,
0x94,
0x33,
0x66,
0xcc,
0x83,
0x1d,
0x3a,
0x74,
0xe8,
0xcb,
0x8d,
0x01,
0x02,
0x04,
0x08,
0x10,
0x20,
0x40,
0x80,
0x1b,
0x36,
0x6c,
0xd8,
0xab,
0x4d,
0x9a,
0x2f,
0x5e,
0xbc,
0x63,
0xc6,
0x97,
0x35,
0x6a,
0xd4,
0xb3,
0x7d,
0xfa,
0xef,
0xc5,
0x91,
0x39,
0x72,
0xe4,
0xd3,
0xbd,
0x61,
0xc2,
0x9f,
0x25,
0x4a,
0x94,
0x33,
0x66,
0xcc,
0x83,
0x1d,
0x3a,
0x74,
0xe8,
0xcb,
0x8d,
0x01,
0x02,
0x04,
0x08,
0x10,
0x20,
0x40,
0x80,
0x1b,
0x36,
0x6c,
0xd8,
0xab,
0x4d,
0x9a,
0x2f,
0x5e,
0xbc,
0x63,
0xc6,
0x97,
0x35,
0x6a,
0xd4,
0xb3,
0x7d,
0xfa,
0xef,
0xc5,
0x91,
0x39,
0x72,
0xe4,
0xd3,
0xbd,
0x61,
0xc2,
0x9f,
0x25,
0x4a,
0x94,
0x33,
0x66,
0xcc,
0x83,
0x1d,
0x3a,
0x74,
0xe8,
0xcb,
0x8d,
0x01,
0x02,
0x04,
0x08,
0x10,
0x20,
0x40,
0x80,
0x1b,
0x36,
0x6c,
0xd8,
0xab,
0x4d,
0x9a,
0x2f,
0x5e,
0xbc,
0x63,
0xc6,
0x97,
0x35,
0x6a,
0xd4,
0xb3,
0x7d,
0xfa,
0xef,
0xc5,
0x91,
0x39,
0x72,
0xe4,
0xd3,
0xbd,
0x61,
0xc2,
0x9f,
0x25,
0x4a,
0x94,
0x33,
0x66,
0xcc,
0x83,
0x1d,
0x3a,
0x74,
0xe8,
0xcb,
0x8d,
};
/**
* Rijndael S-box Substitution table used for encryption in the subBytes
* step, as well as the key expansion.
*/
private static final int[] SBOX = {
0x63,
0x7C,
0x77,
0x7B,
0xF2,
0x6B,
0x6F,
0xC5,
0x30,
0x01,
0x67,
0x2B,
0xFE,
0xD7,
0xAB,
0x76,
0xCA,
0x82,
0xC9,
0x7D,
0xFA,
0x59,
0x47,
0xF0,
0xAD,
0xD4,
0xA2,
0xAF,
0x9C,
0xA4,
0x72,
0xC0,
0xB7,
0xFD,
0x93,
0x26,
0x36,
0x3F,
0xF7,
0xCC,
0x34,
0xA5,
0xE5,
0xF1,
0x71,
0xD8,
0x31,
0x15,
0x04,
0xC7,
0x23,
0xC3,
0x18,
0x96,
0x05,
0x9A,
0x07,
0x12,
0x80,
0xE2,
0xEB,
0x27,
0xB2,
0x75,
0x09,
0x83,
0x2C,
0x1A,
0x1B,
0x6E,
0x5A,
0xA0,
0x52,
0x3B,
0xD6,
0xB3,
0x29,
0xE3,
0x2F,
0x84,
0x53,
0xD1,
0x00,
0xED,
0x20,
0xFC,
0xB1,
0x5B,
0x6A,
0xCB,
0xBE,
0x39,
0x4A,
0x4C,
0x58,
0xCF,
0xD0,
0xEF,
0xAA,
0xFB,
0x43,
0x4D,
0x33,
0x85,
0x45,
0xF9,
0x02,
0x7F,
0x50,
0x3C,
0x9F,
0xA8,
0x51,
0xA3,
0x40,
0x8F,
0x92,
0x9D,
0x38,
0xF5,
0xBC,
0xB6,
0xDA,
0x21,
0x10,
0xFF,
0xF3,
0xD2,
0xCD,
0x0C,
0x13,
0xEC,
0x5F,
0x97,
0x44,
0x17,
0xC4,
0xA7,
0x7E,
0x3D,
0x64,
0x5D,
0x19,
0x73,
0x60,
0x81,
0x4F,
0xDC,
0x22,
0x2A,
0x90,
0x88,
0x46,
0xEE,
0xB8,
0x14,
0xDE,
0x5E,
0x0B,
0xDB,
0xE0,
0x32,
0x3A,
0x0A,
0x49,
0x06,
0x24,
0x5C,
0xC2,
0xD3,
0xAC,
0x62,
0x91,
0x95,
0xE4,
0x79,
0xE7,
0xC8,
0x37,
0x6D,
0x8D,
0xD5,
0x4E,
0xA9,
0x6C,
0x56,
0xF4,
0xEA,
0x65,
0x7A,
0xAE,
0x08,
0xBA,
0x78,
0x25,
0x2E,
0x1C,
0xA6,
0xB4,
0xC6,
0xE8,
0xDD,
0x74,
0x1F,
0x4B,
0xBD,
0x8B,
0x8A,
0x70,
0x3E,
0xB5,
0x66,
0x48,
0x03,
0xF6,
0x0E,
0x61,
0x35,
0x57,
0xB9,
0x86,
0xC1,
0x1D,
0x9E,
0xE1,
0xF8,
0x98,
0x11,
0x69,
0xD9,
0x8E,
0x94,
0x9B,
0x1E,
0x87,
0xE9,
0xCE,
0x55,
0x28,
0xDF,
0x8C,
0xA1,
0x89,
0x0D,
0xBF,
0xE6,
0x42,
0x68,
0x41,
0x99,
0x2D,
0x0F,
0xB0,
0x54,
0xBB,
0x16,
};
/**
* Inverse Rijndael S-box Substitution table used for decryption in the
* subBytesDec step.
*/
private static final int[] INVERSE_SBOX = {
0x52,
0x09,
0x6A,
0xD5,
0x30,
0x36,
0xA5,
0x38,
0xBF,
0x40,
0xA3,
0x9E,
0x81,
0xF3,
0xD7,
0xFB,
0x7C,
0xE3,
0x39,
0x82,
0x9B,
0x2F,
0xFF,
0x87,
0x34,
0x8E,
0x43,
0x44,
0xC4,
0xDE,
0xE9,
0xCB,
0x54,
0x7B,
0x94,
0x32,
0xA6,
0xC2,
0x23,
0x3D,
0xEE,
0x4C,
0x95,
0x0B,
0x42,
0xFA,
0xC3,
0x4E,
0x08,
0x2E,
0xA1,
0x66,
0x28,
0xD9,
0x24,
0xB2,
0x76,
0x5B,
0xA2,
0x49,
0x6D,
0x8B,
0xD1,
0x25,
0x72,
0xF8,
0xF6,
0x64,
0x86,
0x68,
0x98,
0x16,
0xD4,
0xA4,
0x5C,
0xCC,
0x5D,
0x65,
0xB6,
0x92,
0x6C,
0x70,
0x48,
0x50,
0xFD,
0xED,
0xB9,
0xDA,
0x5E,
0x15,
0x46,
0x57,
0xA7,
0x8D,
0x9D,
0x84,
0x90,
0xD8,
0xAB,
0x00,
0x8C,
0xBC,
0xD3,
0x0A,
0xF7,
0xE4,
0x58,
0x05,
0xB8,
0xB3,
0x45,
0x06,
0xD0,
0x2C,
0x1E,
0x8F,
0xCA,
0x3F,
0x0F,
0x02,
0xC1,
0xAF,
0xBD,
0x03,
0x01,
0x13,
0x8A,
0x6B,
0x3A,
0x91,
0x11,
0x41,
0x4F,
0x67,
0xDC,
0xEA,
0x97,
0xF2,
0xCF,
0xCE,
0xF0,
0xB4,
0xE6,
0x73,
0x96,
0xAC,
0x74,
0x22,
0xE7,
0xAD,
0x35,
0x85,
0xE2,
0xF9,
0x37,
0xE8,
0x1C,
0x75,
0xDF,
0x6E,
0x47,
0xF1,
0x1A,
0x71,
0x1D,
0x29,
0xC5,
0x89,
0x6F,
0xB7,
0x62,
0x0E,
0xAA,
0x18,
0xBE,
0x1B,
0xFC,
0x56,
0x3E,
0x4B,
0xC6,
0xD2,
0x79,
0x20,
0x9A,
0xDB,
0xC0,
0xFE,
0x78,
0xCD,
0x5A,
0xF4,
0x1F,
0xDD,
0xA8,
0x33,
0x88,
0x07,
0xC7,
0x31,
0xB1,
0x12,
0x10,
0x59,
0x27,
0x80,
0xEC,
0x5F,
0x60,
0x51,
0x7F,
0xA9,
0x19,
0xB5,
0x4A,
0x0D,
0x2D,
0xE5,
0x7A,
0x9F,
0x93,
0xC9,
0x9C,
0xEF,
0xA0,
0xE0,
0x3B,
0x4D,
0xAE,
0x2A,
0xF5,
0xB0,
0xC8,
0xEB,
0xBB,
0x3C,
0x83,
0x53,
0x99,
0x61,
0x17,
0x2B,
0x04,
0x7E,
0xBA,
0x77,
0xD6,
0x26,
0xE1,
0x69,
0x14,
0x63,
0x55,
0x21,
0x0C,
0x7D,
};
/**
* Precalculated lookup table for galois field multiplication by 2 used in
* the MixColums step during encryption.
*/
private static final int[] MULT2 = {
0x00,
0x02,
0x04,
0x06,
0x08,
0x0a,
0x0c,
0x0e,
0x10,
0x12,
0x14,
0x16,
0x18,
0x1a,
0x1c,
0x1e,
0x20,
0x22,
0x24,
0x26,
0x28,
0x2a,
0x2c,
0x2e,
0x30,
0x32,
0x34,
0x36,
0x38,
0x3a,
0x3c,
0x3e,
0x40,
0x42,
0x44,
0x46,
0x48,
0x4a,
0x4c,
0x4e,
0x50,
0x52,
0x54,
0x56,
0x58,
0x5a,
0x5c,
0x5e,
0x60,
0x62,
0x64,
0x66,
0x68,
0x6a,
0x6c,
0x6e,
0x70,
0x72,
0x74,
0x76,
0x78,
0x7a,
0x7c,
0x7e,
0x80,
0x82,
0x84,
0x86,
0x88,
0x8a,
0x8c,
0x8e,
0x90,
0x92,
0x94,
0x96,
0x98,
0x9a,
0x9c,
0x9e,
0xa0,
0xa2,
0xa4,
0xa6,
0xa8,
0xaa,
0xac,
0xae,
0xb0,
0xb2,
0xb4,
0xb6,
0xb8,
0xba,
0xbc,
0xbe,
0xc0,
0xc2,
0xc4,
0xc6,
0xc8,
0xca,
0xcc,
0xce,
0xd0,
0xd2,
0xd4,
0xd6,
0xd8,
0xda,
0xdc,
0xde,
0xe0,
0xe2,
0xe4,
0xe6,
0xe8,
0xea,
0xec,
0xee,
0xf0,
0xf2,
0xf4,
0xf6,
0xf8,
0xfa,
0xfc,
0xfe,
0x1b,
0x19,
0x1f,
0x1d,
0x13,
0x11,
0x17,
0x15,
0x0b,
0x09,
0x0f,
0x0d,
0x03,
0x01,
0x07,
0x05,
0x3b,
0x39,
0x3f,
0x3d,
0x33,
0x31,
0x37,
0x35,
0x2b,
0x29,
0x2f,
0x2d,
0x23,
0x21,
0x27,
0x25,
0x5b,
0x59,
0x5f,
0x5d,
0x53,
0x51,
0x57,
0x55,
0x4b,
0x49,
0x4f,
0x4d,
0x43,
0x41,
0x47,
0x45,
0x7b,
0x79,
0x7f,
0x7d,
0x73,
0x71,
0x77,
0x75,
0x6b,
0x69,
0x6f,
0x6d,
0x63,
0x61,
0x67,
0x65,
0x9b,
0x99,
0x9f,
0x9d,
0x93,
0x91,
0x97,
0x95,
0x8b,
0x89,
0x8f,
0x8d,
0x83,
0x81,
0x87,
0x85,
0xbb,
0xb9,
0xbf,
0xbd,
0xb3,
0xb1,
0xb7,
0xb5,
0xab,
0xa9,
0xaf,
0xad,
0xa3,
0xa1,
0xa7,
0xa5,
0xdb,
0xd9,
0xdf,
0xdd,
0xd3,
0xd1,
0xd7,
0xd5,
0xcb,
0xc9,
0xcf,
0xcd,
0xc3,
0xc1,
0xc7,
0xc5,
0xfb,
0xf9,
0xff,
0xfd,
0xf3,
0xf1,
0xf7,
0xf5,
0xeb,
0xe9,
0xef,
0xed,
0xe3,
0xe1,
0xe7,
0xe5,
};
/**
* Precalculated lookup table for galois field multiplication by 3 used in
* the MixColums step during encryption.
*/
private static final int[] MULT3 = {
0x00,
0x03,
0x06,
0x05,
0x0c,
0x0f,
0x0a,
0x09,
0x18,
0x1b,
0x1e,
0x1d,
0x14,
0x17,
0x12,
0x11,
0x30,
0x33,
0x36,
0x35,
0x3c,
0x3f,
0x3a,
0x39,
0x28,
0x2b,
0x2e,
0x2d,
0x24,
0x27,
0x22,
0x21,
0x60,
0x63,
0x66,
0x65,
0x6c,
0x6f,
0x6a,
0x69,
0x78,
0x7b,
0x7e,
0x7d,
0x74,
0x77,
0x72,
0x71,
0x50,
0x53,
0x56,
0x55,
0x5c,
0x5f,
0x5a,
0x59,
0x48,
0x4b,
0x4e,
0x4d,
0x44,
0x47,
0x42,
0x41,
0xc0,
0xc3,
0xc6,
0xc5,
0xcc,
0xcf,
0xca,
0xc9,
0xd8,
0xdb,
0xde,
0xdd,
0xd4,
0xd7,
0xd2,
0xd1,
0xf0,
0xf3,
0xf6,
0xf5,
0xfc,
0xff,
0xfa,
0xf9,
0xe8,
0xeb,
0xee,
0xed,
0xe4,
0xe7,
0xe2,
0xe1,
0xa0,
0xa3,
0xa6,
0xa5,
0xac,
0xaf,
0xaa,
0xa9,
0xb8,
0xbb,
0xbe,
0xbd,
0xb4,
0xb7,
0xb2,
0xb1,
0x90,
0x93,
0x96,
0x95,
0x9c,
0x9f,
0x9a,
0x99,
0x88,
0x8b,
0x8e,
0x8d,
0x84,
0x87,
0x82,
0x81,
0x9b,
0x98,
0x9d,
0x9e,
0x97,
0x94,
0x91,
0x92,
0x83,
0x80,
0x85,
0x86,
0x8f,
0x8c,
0x89,
0x8a,
0xab,
0xa8,
0xad,
0xae,
0xa7,
0xa4,
0xa1,
0xa2,
0xb3,
0xb0,
0xb5,
0xb6,
0xbf,
0xbc,
0xb9,
0xba,
0xfb,
0xf8,
0xfd,
0xfe,
0xf7,
0xf4,
0xf1,
0xf2,
0xe3,
0xe0,
0xe5,
0xe6,
0xef,
0xec,
0xe9,
0xea,
0xcb,
0xc8,
0xcd,
0xce,
0xc7,
0xc4,
0xc1,
0xc2,
0xd3,
0xd0,
0xd5,
0xd6,
0xdf,
0xdc,
0xd9,
0xda,
0x5b,
0x58,
0x5d,
0x5e,
0x57,
0x54,
0x51,
0x52,
0x43,
0x40,
0x45,
0x46,
0x4f,
0x4c,
0x49,
0x4a,
0x6b,
0x68,
0x6d,
0x6e,
0x67,
0x64,
0x61,
0x62,
0x73,
0x70,
0x75,
0x76,
0x7f,
0x7c,
0x79,
0x7a,
0x3b,
0x38,
0x3d,
0x3e,
0x37,
0x34,
0x31,
0x32,
0x23,
0x20,
0x25,
0x26,
0x2f,
0x2c,
0x29,
0x2a,
0x0b,
0x08,
0x0d,
0x0e,
0x07,
0x04,
0x01,
0x02,
0x13,
0x10,
0x15,
0x16,
0x1f,
0x1c,
0x19,
0x1a,
};
/**
* Precalculated lookup table for galois field multiplication by 9 used in
* the MixColums step during decryption.
*/
private static final int[] MULT9 = {
0x00,
0x09,
0x12,
0x1b,
0x24,
0x2d,
0x36,
0x3f,
0x48,
0x41,
0x5a,
0x53,
0x6c,
0x65,
0x7e,
0x77,
0x90,
0x99,
0x82,
0x8b,
0xb4,
0xbd,
0xa6,
0xaf,
0xd8,
0xd1,
0xca,
0xc3,
0xfc,
0xf5,
0xee,
0xe7,
0x3b,
0x32,
0x29,
0x20,
0x1f,
0x16,
0x0d,
0x04,
0x73,
0x7a,
0x61,
0x68,
0x57,
0x5e,
0x45,
0x4c,
0xab,
0xa2,
0xb9,
0xb0,
0x8f,
0x86,
0x9d,
0x94,
0xe3,
0xea,
0xf1,
0xf8,
0xc7,
0xce,
0xd5,
0xdc,
0x76,
0x7f,
0x64,
0x6d,
0x52,
0x5b,
0x40,
0x49,
0x3e,
0x37,
0x2c,
0x25,
0x1a,
0x13,
0x08,
0x01,
0xe6,
0xef,
0xf4,
0xfd,
0xc2,
0xcb,
0xd0,
0xd9,
0xae,
0xa7,
0xbc,
0xb5,
0x8a,
0x83,
0x98,
0x91,
0x4d,
0x44,
0x5f,
0x56,
0x69,
0x60,
0x7b,
0x72,
0x05,
0x0c,
0x17,
0x1e,
0x21,
0x28,
0x33,
0x3a,
0xdd,
0xd4,
0xcf,
0xc6,
0xf9,
0xf0,
0xeb,
0xe2,
0x95,
0x9c,
0x87,
0x8e,
0xb1,
0xb8,
0xa3,
0xaa,
0xec,
0xe5,
0xfe,
0xf7,
0xc8,
0xc1,
0xda,
0xd3,
0xa4,
0xad,
0xb6,
0xbf,
0x80,
0x89,
0x92,
0x9b,
0x7c,
0x75,
0x6e,
0x67,
0x58,
0x51,
0x4a,
0x43,
0x34,
0x3d,
0x26,
0x2f,
0x10,
0x19,
0x02,
0x0b,
0xd7,
0xde,
0xc5,
0xcc,
0xf3,
0xfa,
0xe1,
0xe8,
0x9f,
0x96,
0x8d,
0x84,
0xbb,
0xb2,
0xa9,
0xa0,
0x47,
0x4e,
0x55,
0x5c,
0x63,
0x6a,
0x71,
0x78,
0x0f,
0x06,
0x1d,
0x14,
0x2b,
0x22,
0x39,
0x30,
0x9a,
0x93,
0x88,
0x81,
0xbe,
0xb7,
0xac,
0xa5,
0xd2,
0xdb,
0xc0,
0xc9,
0xf6,
0xff,
0xe4,
0xed,
0x0a,
0x03,
0x18,
0x11,
0x2e,
0x27,
0x3c,
0x35,
0x42,
0x4b,
0x50,
0x59,
0x66,
0x6f,
0x74,
0x7d,
0xa1,
0xa8,
0xb3,
0xba,
0x85,
0x8c,
0x97,
0x9e,
0xe9,
0xe0,
0xfb,
0xf2,
0xcd,
0xc4,
0xdf,
0xd6,
0x31,
0x38,
0x23,
0x2a,
0x15,
0x1c,
0x07,
0x0e,
0x79,
0x70,
0x6b,
0x62,
0x5d,
0x54,
0x4f,
0x46,
};
/**
* Precalculated lookup table for galois field multiplication by 11 used in
* the MixColums step during decryption.
*/
private static final int[] MULT11 = {
0x00,
0x0b,
0x16,
0x1d,
0x2c,
0x27,
0x3a,
0x31,
0x58,
0x53,
0x4e,
0x45,
0x74,
0x7f,
0x62,
0x69,
0xb0,
0xbb,
0xa6,
0xad,
0x9c,
0x97,
0x8a,
0x81,
0xe8,
0xe3,
0xfe,
0xf5,
0xc4,
0xcf,
0xd2,
0xd9,
0x7b,
0x70,
0x6d,
0x66,
0x57,
0x5c,
0x41,
0x4a,
0x23,
0x28,
0x35,
0x3e,
0x0f,
0x04,
0x19,
0x12,
0xcb,
0xc0,
0xdd,
0xd6,
0xe7,
0xec,
0xf1,
0xfa,
0x93,
0x98,
0x85,
0x8e,
0xbf,
0xb4,
0xa9,
0xa2,
0xf6,
0xfd,
0xe0,
0xeb,
0xda,
0xd1,
0xcc,
0xc7,
0xae,
0xa5,
0xb8,
0xb3,
0x82,
0x89,
0x94,
0x9f,
0x46,
0x4d,
0x50,
0x5b,
0x6a,
0x61,
0x7c,
0x77,
0x1e,
0x15,
0x08,
0x03,
0x32,
0x39,
0x24,
0x2f,
0x8d,
0x86,
0x9b,
0x90,
0xa1,
0xaa,
0xb7,
0xbc,
0xd5,
0xde,
0xc3,
0xc8,
0xf9,
0xf2,
0xef,
0xe4,
0x3d,
0x36,
0x2b,
0x20,
0x11,
0x1a,
0x07,
0x0c,
0x65,
0x6e,
0x73,
0x78,
0x49,
0x42,
0x5f,
0x54,
0xf7,
0xfc,
0xe1,
0xea,
0xdb,
0xd0,
0xcd,
0xc6,
0xaf,
0xa4,
0xb9,
0xb2,
0x83,
0x88,
0x95,
0x9e,
0x47,
0x4c,
0x51,
0x5a,
0x6b,
0x60,
0x7d,
0x76,
0x1f,
0x14,
0x09,
0x02,
0x33,
0x38,
0x25,
0x2e,
0x8c,
0x87,
0x9a,
0x91,
0xa0,
0xab,
0xb6,
0xbd,
0xd4,
0xdf,
0xc2,
0xc9,
0xf8,
0xf3,
0xee,
0xe5,
0x3c,
0x37,
0x2a,
0x21,
0x10,
0x1b,
0x06,
0x0d,
0x64,
0x6f,
0x72,
0x79,
0x48,
0x43,
0x5e,
0x55,
0x01,
0x0a,
0x17,
0x1c,
0x2d,
0x26,
0x3b,
0x30,
0x59,
0x52,
0x4f,
0x44,
0x75,
0x7e,
0x63,
0x68,
0xb1,
0xba,
0xa7,
0xac,
0x9d,
0x96,
0x8b,
0x80,
0xe9,
0xe2,
0xff,
0xf4,
0xc5,
0xce,
0xd3,
0xd8,
0x7a,
0x71,
0x6c,
0x67,
0x56,
0x5d,
0x40,
0x4b,
0x22,
0x29,
0x34,
0x3f,
0x0e,
0x05,
0x18,
0x13,
0xca,
0xc1,
0xdc,
0xd7,
0xe6,
0xed,
0xf0,
0xfb,
0x92,
0x99,
0x84,
0x8f,
0xbe,
0xb5,
0xa8,
0xa3,
};
/**
* Precalculated lookup table for galois field multiplication by 13 used in
* the MixColums step during decryption.
*/
private static final int[] MULT13 = {
0x00,
0x0d,
0x1a,
0x17,
0x34,
0x39,
0x2e,
0x23,
0x68,
0x65,
0x72,
0x7f,
0x5c,
0x51,
0x46,
0x4b,
0xd0,
0xdd,
0xca,
0xc7,
0xe4,
0xe9,
0xfe,
0xf3,
0xb8,
0xb5,
0xa2,
0xaf,
0x8c,
0x81,
0x96,
0x9b,
0xbb,
0xb6,
0xa1,
0xac,
0x8f,
0x82,
0x95,
0x98,
0xd3,
0xde,
0xc9,
0xc4,
0xe7,
0xea,
0xfd,
0xf0,
0x6b,
0x66,
0x71,
0x7c,
0x5f,
0x52,
0x45,
0x48,
0x03,
0x0e,
0x19,
0x14,
0x37,
0x3a,
0x2d,
0x20,
0x6d,
0x60,
0x77,
0x7a,
0x59,
0x54,
0x43,
0x4e,
0x05,
0x08,
0x1f,
0x12,
0x31,
0x3c,
0x2b,
0x26,
0xbd,
0xb0,
0xa7,
0xaa,
0x89,
0x84,
0x93,
0x9e,
0xd5,
0xd8,
0xcf,
0xc2,
0xe1,
0xec,
0xfb,
0xf6,
0xd6,
0xdb,
0xcc,
0xc1,
0xe2,
0xef,
0xf8,
0xf5,
0xbe,
0xb3,
0xa4,
0xa9,
0x8a,
0x87,
0x90,
0x9d,
0x06,
0x0b,
0x1c,
0x11,
0x32,
0x3f,
0x28,
0x25,
0x6e,
0x63,
0x74,
0x79,
0x5a,
0x57,
0x40,
0x4d,
0xda,
0xd7,
0xc0,
0xcd,
0xee,
0xe3,
0xf4,
0xf9,
0xb2,
0xbf,
0xa8,
0xa5,
0x86,
0x8b,
0x9c,
0x91,
0x0a,
0x07,
0x10,
0x1d,
0x3e,
0x33,
0x24,
0x29,
0x62,
0x6f,
0x78,
0x75,
0x56,
0x5b,
0x4c,
0x41,
0x61,
0x6c,
0x7b,
0x76,
0x55,
0x58,
0x4f,
0x42,
0x09,
0x04,
0x13,
0x1e,
0x3d,
0x30,
0x27,
0x2a,
0xb1,
0xbc,
0xab,
0xa6,
0x85,
0x88,
0x9f,
0x92,
0xd9,
0xd4,
0xc3,
0xce,
0xed,
0xe0,
0xf7,
0xfa,
0xb7,
0xba,
0xad,
0xa0,
0x83,
0x8e,
0x99,
0x94,
0xdf,
0xd2,
0xc5,
0xc8,
0xeb,
0xe6,
0xf1,
0xfc,
0x67,
0x6a,
0x7d,
0x70,
0x53,
0x5e,
0x49,
0x44,
0x0f,
0x02,
0x15,
0x18,
0x3b,
0x36,
0x21,
0x2c,
0x0c,
0x01,
0x16,
0x1b,
0x38,
0x35,
0x22,
0x2f,
0x64,
0x69,
0x7e,
0x73,
0x50,
0x5d,
0x4a,
0x47,
0xdc,
0xd1,
0xc6,
0xcb,
0xe8,
0xe5,
0xf2,
0xff,
0xb4,
0xb9,
0xae,
0xa3,
0x80,
0x8d,
0x9a,
0x97,
};
/**
* Precalculated lookup table for galois field multiplication by 14 used in
* the MixColums step during decryption.
*/
private static final int[] MULT14 = {
0x00,
0x0e,
0x1c,
0x12,
0x38,
0x36,
0x24,
0x2a,
0x70,
0x7e,
0x6c,
0x62,
0x48,
0x46,
0x54,
0x5a,
0xe0,
0xee,
0xfc,
0xf2,
0xd8,
0xd6,
0xc4,
0xca,
0x90,
0x9e,
0x8c,
0x82,
0xa8,
0xa6,
0xb4,
0xba,
0xdb,
0xd5,
0xc7,
0xc9,
0xe3,
0xed,
0xff,
0xf1,
0xab,
0xa5,
0xb7,
0xb9,
0x93,
0x9d,
0x8f,
0x81,
0x3b,
0x35,
0x27,
0x29,
0x03,
0x0d,
0x1f,
0x11,
0x4b,
0x45,
0x57,
0x59,
0x73,
0x7d,
0x6f,
0x61,
0xad,
0xa3,
0xb1,
0xbf,
0x95,
0x9b,
0x89,
0x87,
0xdd,
0xd3,
0xc1,
0xcf,
0xe5,
0xeb,
0xf9,
0xf7,
0x4d,
0x43,
0x51,
0x5f,
0x75,
0x7b,
0x69,
0x67,
0x3d,
0x33,
0x21,
0x2f,
0x05,
0x0b,
0x19,
0x17,
0x76,
0x78,
0x6a,
0x64,
0x4e,
0x40,
0x52,
0x5c,
0x06,
0x08,
0x1a,
0x14,
0x3e,
0x30,
0x22,
0x2c,
0x96,
0x98,
0x8a,
0x84,
0xae,
0xa0,
0xb2,
0xbc,
0xe6,
0xe8,
0xfa,
0xf4,
0xde,
0xd0,
0xc2,
0xcc,
0x41,
0x4f,
0x5d,
0x53,
0x79,
0x77,
0x65,
0x6b,
0x31,
0x3f,
0x2d,
0x23,
0x09,
0x07,
0x15,
0x1b,
0xa1,
0xaf,
0xbd,
0xb3,
0x99,
0x97,
0x85,
0x8b,
0xd1,
0xdf,
0xcd,
0xc3,
0xe9,
0xe7,
0xf5,
0xfb,
0x9a,
0x94,
0x86,
0x88,
0xa2,
0xac,
0xbe,
0xb0,
0xea,
0xe4,
0xf6,
0xf8,
0xd2,
0xdc,
0xce,
0xc0,
0x7a,
0x74,
0x66,
0x68,
0x42,
0x4c,
0x5e,
0x50,
0x0a,
0x04,
0x16,
0x18,
0x32,
0x3c,
0x2e,
0x20,
0xec,
0xe2,
0xf0,
0xfe,
0xd4,
0xda,
0xc8,
0xc6,
0x9c,
0x92,
0x80,
0x8e,
0xa4,
0xaa,
0xb8,
0xb6,
0x0c,
0x02,
0x10,
0x1e,
0x34,
0x3a,
0x28,
0x26,
0x7c,
0x72,
0x60,
0x6e,
0x44,
0x4a,
0x58,
0x56,
0x37,
0x39,
0x2b,
0x25,
0x0f,
0x01,
0x13,
0x1d,
0x47,
0x49,
0x5b,
0x55,
0x7f,
0x71,
0x63,
0x6d,
0xd7,
0xd9,
0xcb,
0xc5,
0xef,
0xe1,
0xf3,
0xfd,
0xa7,
0xa9,
0xbb,
0xb5,
0x9f,
0x91,
0x83,
0x8d,
};
/**
* Subroutine of the Rijndael key expansion.
*/
public static BigInteger scheduleCore(BigInteger t, int rconCounter) {
StringBuilder rBytes = new StringBuilder(t.toString(16));
// Add zero padding
while (rBytes.length() < 8) {
rBytes.insert(0, "0");
}
// rotate the first 16 bits to the back
String rotatingBytes = rBytes.substring(0, 2);
String fixedBytes = rBytes.substring(2);
rBytes = new StringBuilder(fixedBytes + rotatingBytes);
// apply S-Box to all 8-Bit Substrings
for (int i = 0; i < 4; i++) {
StringBuilder currentByteBits = new StringBuilder(rBytes.substring(i * 2, (i + 1) * 2));
int currentByte = Integer.parseInt(currentByteBits.toString(), 16);
currentByte = SBOX[currentByte];
// add the current RCON value to the first byte
if (i == 0) {
currentByte = currentByte ^ RCON[rconCounter];
}
currentByteBits = new StringBuilder(Integer.toHexString(currentByte));
// Add zero padding
while (currentByteBits.length() < 2) {
currentByteBits.insert(0, '0');
}
// replace bytes in original string
rBytes = new StringBuilder(rBytes.substring(0, i * 2) + currentByteBits + rBytes.substring((i + 1) * 2));
}
// t = new BigInteger(rBytes, 16);
// return t;
return new BigInteger(rBytes.toString(), 16);
}
/**
* Returns an array of 10 + 1 round keys that are calculated by using
* Rijndael key schedule
*
* @return array of 10 + 1 round keys
*/
public static BigInteger[] keyExpansion(BigInteger initialKey) {
BigInteger[] roundKeys = {
initialKey,
BigInteger.ZERO,
BigInteger.ZERO,
BigInteger.ZERO,
BigInteger.ZERO,
BigInteger.ZERO,
BigInteger.ZERO,
BigInteger.ZERO,
BigInteger.ZERO,
BigInteger.ZERO,
BigInteger.ZERO,
};
// initialize rcon iteration
int rconCounter = 1;
for (int i = 1; i < 11; i++) {
// get the previous 32 bits the key
BigInteger t = roundKeys[i - 1].remainder(new BigInteger("100000000", 16));
// split previous key into 8-bit segments
BigInteger[] prevKey = {
roundKeys[i - 1].remainder(new BigInteger("100000000", 16)),
roundKeys[i - 1].remainder(new BigInteger("10000000000000000", 16)).divide(new BigInteger("100000000", 16)),
roundKeys[i - 1].remainder(new BigInteger("1000000000000000000000000", 16)).divide(new BigInteger("10000000000000000", 16)),
roundKeys[i - 1].divide(new BigInteger("1000000000000000000000000", 16)),
};
// run schedule core
t = scheduleCore(t, rconCounter);
rconCounter += 1;
// Calculate partial round key
BigInteger t0 = t.xor(prevKey[3]);
BigInteger t1 = t0.xor(prevKey[2]);
BigInteger t2 = t1.xor(prevKey[1]);
BigInteger t3 = t2.xor(prevKey[0]);
// Join round key segments
t2 = t2.multiply(new BigInteger("100000000", 16));
t1 = t1.multiply(new BigInteger("10000000000000000", 16));
t0 = t0.multiply(new BigInteger("1000000000000000000000000", 16));
roundKeys[i] = t0.add(t1).add(t2).add(t3);
}
return roundKeys;
}
/**
* representation of the input 128-bit block as an array of 8-bit integers.
*
* @param block of 128-bit integers
* @return array of 8-bit integers
*/
public static int[] splitBlockIntoCells(BigInteger block) {
int[] cells = new int[16];
StringBuilder blockBits = new StringBuilder(block.toString(2));
// Append leading 0 for full "128-bit" string
while (blockBits.length() < 128) {
blockBits.insert(0, '0');
}
// split 128 to 8 bit cells
for (int i = 0; i < cells.length; i++) {
String cellBits = blockBits.substring(8 * i, 8 * (i + 1));
cells[i] = Integer.parseInt(cellBits, 2);
}
return cells;
}
/**
* Returns the 128-bit BigInteger representation of the input of an array of
* 8-bit integers.
*
* @param cells that we need to merge
* @return block of merged cells
*/
public static BigInteger mergeCellsIntoBlock(int[] cells) {
StringBuilder blockBits = new StringBuilder();
for (int i = 0; i < 16; i++) {
StringBuilder cellBits = new StringBuilder(Integer.toBinaryString(cells[i]));
// Append leading 0 for full "8-bit" strings
while (cellBits.length() < 8) {
cellBits.insert(0, '0');
}
blockBits.append(cellBits);
}
return new BigInteger(blockBits.toString(), 2);
}
/**
* @return ciphertext XOR key
*/
public static BigInteger addRoundKey(BigInteger ciphertext, BigInteger key) {
return ciphertext.xor(key);
}
/**
* substitutes 8-Bit long substrings of the input using the S-Box and
* returns the result.
*
* @return subtraction Output
*/
public static BigInteger subBytes(BigInteger ciphertext) {
int[] cells = splitBlockIntoCells(ciphertext);
for (int i = 0; i < 16; i++) {
cells[i] = SBOX[cells[i]];
}
return mergeCellsIntoBlock(cells);
}
/**
* substitutes 8-Bit long substrings of the input using the inverse S-Box
* for decryption and returns the result.
*
* @return subtraction Output
*/
public static BigInteger subBytesDec(BigInteger ciphertext) {
int[] cells = splitBlockIntoCells(ciphertext);
for (int i = 0; i < 16; i++) {
cells[i] = INVERSE_SBOX[cells[i]];
}
return mergeCellsIntoBlock(cells);
}
/**
* Cell permutation step. Shifts cells within the rows of the input and
* returns the result.
*/
public static BigInteger shiftRows(BigInteger ciphertext) {
int[] cells = splitBlockIntoCells(ciphertext);
int[] output = new int[16];
// do nothing in the first row
output[0] = cells[0];
output[4] = cells[4];
output[8] = cells[8];
output[12] = cells[12];
// shift the second row backwards by one cell
output[1] = cells[5];
output[5] = cells[9];
output[9] = cells[13];
output[13] = cells[1];
// shift the third row backwards by two cell
output[2] = cells[10];
output[6] = cells[14];
output[10] = cells[2];
output[14] = cells[6];
// shift the forth row backwards by tree cell
output[3] = cells[15];
output[7] = cells[3];
output[11] = cells[7];
output[15] = cells[11];
return mergeCellsIntoBlock(output);
}
/**
* Cell permutation step for decryption . Shifts cells within the rows of
* the input and returns the result.
*/
public static BigInteger shiftRowsDec(BigInteger ciphertext) {
int[] cells = splitBlockIntoCells(ciphertext);
int[] output = new int[16];
// do nothing in the first row
output[0] = cells[0];
output[4] = cells[4];
output[8] = cells[8];
output[12] = cells[12];
// shift the second row forwards by one cell
output[1] = cells[13];
output[5] = cells[1];
output[9] = cells[5];
output[13] = cells[9];
// shift the third row forwards by two cell
output[2] = cells[10];
output[6] = cells[14];
output[10] = cells[2];
output[14] = cells[6];
// shift the forth row forwards by tree cell
output[3] = cells[7];
output[7] = cells[11];
output[11] = cells[15];
output[15] = cells[3];
return mergeCellsIntoBlock(output);
}
/**
* Applies the Rijndael MixColumns to the input and returns the result.
*/
public static BigInteger mixColumns(BigInteger ciphertext) {
int[] cells = splitBlockIntoCells(ciphertext);
int[] outputCells = new int[16];
for (int i = 0; i < 4; i++) {
int[] row = {
cells[i * 4],
cells[i * 4 + 1],
cells[i * 4 + 2],
cells[i * 4 + 3],
};
outputCells[i * 4] = MULT2[row[0]] ^ MULT3[row[1]] ^ row[2] ^ row[3];
outputCells[i * 4 + 1] = row[0] ^ MULT2[row[1]] ^ MULT3[row[2]] ^ row[3];
outputCells[i * 4 + 2] = row[0] ^ row[1] ^ MULT2[row[2]] ^ MULT3[row[3]];
outputCells[i * 4 + 3] = MULT3[row[0]] ^ row[1] ^ row[2] ^ MULT2[row[3]];
}
return mergeCellsIntoBlock(outputCells);
}
/**
* Applies the inverse Rijndael MixColumns for decryption to the input and
* returns the result.
*/
public static BigInteger mixColumnsDec(BigInteger ciphertext) {
int[] cells = splitBlockIntoCells(ciphertext);
int[] outputCells = new int[16];
for (int i = 0; i < 4; i++) {
int[] row = {
cells[i * 4],
cells[i * 4 + 1],
cells[i * 4 + 2],
cells[i * 4 + 3],
};
outputCells[i * 4] = MULT14[row[0]] ^ MULT11[row[1]] ^ MULT13[row[2]] ^ MULT9[row[3]];
outputCells[i * 4 + 1] = MULT9[row[0]] ^ MULT14[row[1]] ^ MULT11[row[2]] ^ MULT13[row[3]];
outputCells[i * 4 + 2] = MULT13[row[0]] ^ MULT9[row[1]] ^ MULT14[row[2]] ^ MULT11[row[3]];
outputCells[i * 4 + 3] = MULT11[row[0]] ^ MULT13[row[1]] ^ MULT9[row[2]] ^ MULT14[row[3]];
}
return mergeCellsIntoBlock(outputCells);
}
/**
* Encrypts the plaintext with the key and returns the result
*
* @param plainText which we want to encrypt
* @param key the key for encrypt
* @return EncryptedText
*/
public static BigInteger encrypt(BigInteger plainText, BigInteger key) {
BigInteger[] roundKeys = keyExpansion(key);
// Initial round
plainText = addRoundKey(plainText, roundKeys[0]);
// Main rounds
for (int i = 1; i < 10; i++) {
plainText = subBytes(plainText);
plainText = shiftRows(plainText);
plainText = mixColumns(plainText);
plainText = addRoundKey(plainText, roundKeys[i]);
}
// Final round
plainText = subBytes(plainText);
plainText = shiftRows(plainText);
plainText = addRoundKey(plainText, roundKeys[10]);
return plainText;
}
/**
* Decrypts the ciphertext with the key and returns the result
*
* @param cipherText The Encrypted text which we want to decrypt
* @return decryptedText
*/
public static BigInteger decrypt(BigInteger cipherText, BigInteger key) {
BigInteger[] roundKeys = keyExpansion(key);
// Invert final round
cipherText = addRoundKey(cipherText, roundKeys[10]);
cipherText = shiftRowsDec(cipherText);
cipherText = subBytesDec(cipherText);
// Invert main rounds
for (int i = 9; i > 0; i--) {
cipherText = addRoundKey(cipherText, roundKeys[i]);
cipherText = mixColumnsDec(cipherText);
cipherText = shiftRowsDec(cipherText);
cipherText = subBytesDec(cipherText);
}
// Invert initial round
cipherText = addRoundKey(cipherText, roundKeys[0]);
return cipherText;
}
public static void main(String[] args) {
try (Scanner input = new Scanner(System.in)) {
System.out.println("Enter (e) letter for encrpyt or (d) letter for decrypt :");
char choice = input.nextLine().charAt(0);
String in;
switch (choice) {
case 'E', 'e' -> {
System.out.println(
"Choose a plaintext block (128-Bit Integer in base 16):"
);
in = input.nextLine();
BigInteger plaintext = new BigInteger(in, 16);
System.out.println(
"Choose a Key (128-Bit Integer in base 16):"
);
in = input.nextLine();
BigInteger encryptionKey = new BigInteger(in, 16);
System.out.println(
"The encrypted message is: \n" +
encrypt(plaintext, encryptionKey).toString(16)
);
}
case 'D', 'd' -> {
System.out.println(
"Enter your ciphertext block (128-Bit Integer in base 16):"
);
in = input.nextLine();
BigInteger ciphertext = new BigInteger(in, 16);
System.out.println(
"Choose a Key (128-Bit Integer in base 16):"
);
in = input.nextLine();
BigInteger decryptionKey = new BigInteger(in, 16);
System.out.println(
"The deciphered message is:\n" +
decrypt(ciphertext, decryptionKey).toString(16)
);
}
default -> System.out.println("** End **");
}
}
}
}
| TheAlgorithms/Java | src/main/java/com/thealgorithms/ciphers/AES.java |
461 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.slob.dbservice;
import java.sql.ResultSet;
import java.sql.SQLException;
import javax.sql.DataSource;
import lombok.extern.slf4j.Slf4j;
import org.h2.jdbcx.JdbcDataSource;
/**
* Service to handle database operations.
*/
@Slf4j
public class DatabaseService {
public static final String CREATE_BINARY_SCHEMA_DDL =
"CREATE TABLE IF NOT EXISTS FORESTS (ID NUMBER UNIQUE, NAME VARCHAR(30),FOREST VARBINARY)";
public static final String CREATE_TEXT_SCHEMA_DDL =
"CREATE TABLE IF NOT EXISTS FORESTS (ID NUMBER UNIQUE, NAME VARCHAR(30),FOREST VARCHAR)";
public static final String DELETE_SCHEMA_SQL = "DROP TABLE FORESTS IF EXISTS";
public static final String BINARY_DATA = "BINARY";
private static final String DB_URL = "jdbc:h2:~/test";
private static final String INSERT = "insert into FORESTS (id,name, forest) values (?,?,?)";
private static final String SELECT = "select FOREST from FORESTS where id = ?";
private static final DataSource dataSource = createDataSource();
public String dataTypeDb;
/**
* Constructor initializes {@link DatabaseService#dataTypeDb}.
*
* @param dataTypeDb Type of data that is to be stored in DB can be 'TEXT' or 'BINARY'.
*/
public DatabaseService(String dataTypeDb) {
this.dataTypeDb = dataTypeDb;
}
/**
* Initiates Data source.
*
* @return created data source
*/
private static DataSource createDataSource() {
var dataSource = new JdbcDataSource();
dataSource.setURL(DB_URL);
return dataSource;
}
/**
* Shutdown Sequence executes Query {@link DatabaseService#DELETE_SCHEMA_SQL}.
*
* @throws SQLException if any issue occurs while executing DROP Query
*/
public void shutDownService()
throws SQLException {
try (var connection = dataSource.getConnection();
var statement = connection.createStatement()) {
statement.execute(DELETE_SCHEMA_SQL);
}
}
/**
* Initaites startup sequence and executes the query
* {@link DatabaseService#CREATE_BINARY_SCHEMA_DDL} if {@link DatabaseService#dataTypeDb} is
* binary else will execute the query {@link DatabaseService#CREATE_TEXT_SCHEMA_DDL}.
*
* @throws SQLException if there are any issues during DDL execution
*/
public void startupService()
throws SQLException {
try (var connection = dataSource.getConnection();
var statement = connection.createStatement()) {
if (dataTypeDb.equals("BINARY")) {
statement.execute(CREATE_BINARY_SCHEMA_DDL);
} else {
statement.execute(CREATE_TEXT_SCHEMA_DDL);
}
}
}
/**
* Executes the insert query {@link DatabaseService#INSERT}.
*
* @param id with which row is to be inserted
* @param name name to be added in the row
* @param data object data to be saved in the row
* @throws SQLException if there are any issues in executing insert query
* {@link DatabaseService#INSERT}
*/
public void insert(int id, String name, Object data)
throws SQLException {
try (var connection = dataSource.getConnection();
var insert = connection.prepareStatement(INSERT)) {
insert.setInt(1, id);
insert.setString(2, name);
insert.setObject(3, data);
insert.execute();
}
}
/**
* Runs the select query {@link DatabaseService#SELECT} form the result set returns an
* {@link java.io.InputStream} if {@link DatabaseService#dataTypeDb} is 'binary' else will return
* the object as a {@link String}.
*
* @param id with which row is to be selected
* @param columnsName column in which the object is stored
* @return object found from DB
* @throws SQLException if there are any issues in executing insert query *
* {@link DatabaseService#SELECT}
*/
public Object select(final long id, String columnsName) throws SQLException {
ResultSet resultSet = null;
try (var connection = dataSource.getConnection();
var preparedStatement =
connection.prepareStatement(SELECT)
) {
Object result = null;
preparedStatement.setLong(1, id);
resultSet = preparedStatement.executeQuery();
while (resultSet.next()) {
if (dataTypeDb.equals(BINARY_DATA)) {
result = resultSet.getBinaryStream(columnsName);
} else {
result = resultSet.getString(columnsName);
}
}
return result;
} finally {
if (resultSet != null) {
resultSet.close();
}
}
}
}
| rajprins/java-design-patterns | slob/src/main/java/com/iluwatar/slob/dbservice/DatabaseService.java |
462 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.index;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Sort;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Accountable;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.RefCountingRunnable;
import org.elasticsearch.client.internal.Client;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.Maps;
import org.elasticsearch.common.util.concurrent.AbstractAsyncTask;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.core.Assertions;
import org.elasticsearch.core.CheckedFunction;
import org.elasticsearch.core.IOUtils;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.gateway.MetadataStateFormat;
import org.elasticsearch.gateway.WriteStateException;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.fielddata.FieldDataContext;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalsAccounting;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperMetrics;
import org.elasticsearch.index.mapper.MapperRegistry;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MappingLookup;
import org.elasticsearch.index.mapper.MappingParserContext;
import org.elasticsearch.index.mapper.NodeMappingStats;
import org.elasticsearch.index.mapper.RuntimeField;
import org.elasticsearch.index.query.QueryRewriteContext;
import org.elasticsearch.index.query.SearchExecutionContext;
import org.elasticsearch.index.query.SearchIndexNameMatcher;
import org.elasticsearch.index.seqno.RetentionLeaseSyncer;
import org.elasticsearch.index.shard.GlobalCheckpointSyncer;
import org.elasticsearch.index.shard.IndexEventListener;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardClosedException;
import org.elasticsearch.index.shard.IndexingOperationListener;
import org.elasticsearch.index.shard.SearchOperationListener;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.index.shard.ShardNotInPrimaryModeException;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.plugins.IndexStorePlugin;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.RemoteClusterAware;
import org.elasticsearch.xcontent.XContentParserConfiguration;
import java.io.Closeable;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.BooleanSupplier;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.LongSupplier;
import java.util.function.LongUnaryOperator;
import java.util.function.Supplier;
import static java.util.Collections.emptyMap;
import static org.elasticsearch.core.Strings.format;
public class IndexService extends AbstractIndexComponent implements IndicesClusterStateService.AllocatedIndex<IndexShard> {
private final IndexEventListener eventListener;
private final IndexFieldDataService indexFieldData;
private final BitsetFilterCache bitsetFilterCache;
private final NodeEnvironment nodeEnv;
private final ShardStoreDeleter shardStoreDeleter;
private final IndexStorePlugin.IndexFoldersDeletionListener indexFoldersDeletionListener;
private final IndexStorePlugin.DirectoryFactory directoryFactory;
private final IndexStorePlugin.RecoveryStateFactory recoveryStateFactory;
private final IndexStorePlugin.SnapshotCommitSupplier snapshotCommitSupplier;
private final CheckedFunction<DirectoryReader, DirectoryReader, IOException> readerWrapper;
private final Engine.IndexCommitListener indexCommitListener;
private final IndexCache indexCache;
private final MapperService mapperService;
private final XContentParserConfiguration parserConfiguration;
private final NamedWriteableRegistry namedWriteableRegistry;
private final SimilarityService similarityService;
private final EngineFactory engineFactory;
private final IndexWarmer warmer;
private volatile Map<Integer, IndexShard> shards = Map.of();
private final AtomicBoolean closed = new AtomicBoolean(false);
private final AtomicBoolean deleted = new AtomicBoolean(false);
private final IndexSettings indexSettings;
private final IndexAnalyzers indexAnalyzers;
private final List<SearchOperationListener> searchOperationListeners;
private final List<IndexingOperationListener> indexingOperationListeners;
private final BooleanSupplier allowExpensiveQueries;
private volatile AsyncRefreshTask refreshTask;
private volatile AsyncTranslogFSync fsyncTask;
private final AsyncGlobalCheckpointTask globalCheckpointTask;
private final AsyncRetentionLeaseSyncTask retentionLeaseSyncTask;
// don't convert to Setting<> and register... we only set this in tests and register via a plugin
private final String INDEX_TRANSLOG_RETENTION_CHECK_INTERVAL_SETTING = "index.translog.retention.check_interval";
private final AsyncTrimTranslogTask trimTranslogTask;
private final ThreadPool threadPool;
private final BigArrays bigArrays;
private final ScriptService scriptService;
private final ClusterService clusterService;
private final Client client;
private final CircuitBreakerService circuitBreakerService;
private final IndexNameExpressionResolver expressionResolver;
private final Supplier<Sort> indexSortSupplier;
private final ValuesSourceRegistry valuesSourceRegistry;
private final MapperMetrics mapperMetrics;
@SuppressWarnings("this-escape")
public IndexService(
IndexSettings indexSettings,
IndexCreationContext indexCreationContext,
NodeEnvironment nodeEnv,
XContentParserConfiguration parserConfiguration,
SimilarityService similarityService,
ShardStoreDeleter shardStoreDeleter,
IndexAnalyzers indexAnalyzers,
EngineFactory engineFactory,
CircuitBreakerService circuitBreakerService,
BigArrays bigArrays,
ThreadPool threadPool,
ScriptService scriptService,
ClusterService clusterService,
Client client,
QueryCache queryCache,
IndexStorePlugin.DirectoryFactory directoryFactory,
IndexEventListener eventListener,
Function<IndexService, CheckedFunction<DirectoryReader, DirectoryReader, IOException>> wrapperFactory,
MapperRegistry mapperRegistry,
IndicesFieldDataCache indicesFieldDataCache,
List<SearchOperationListener> searchOperationListeners,
List<IndexingOperationListener> indexingOperationListeners,
NamedWriteableRegistry namedWriteableRegistry,
IdFieldMapper idFieldMapper,
BooleanSupplier allowExpensiveQueries,
IndexNameExpressionResolver expressionResolver,
ValuesSourceRegistry valuesSourceRegistry,
IndexStorePlugin.RecoveryStateFactory recoveryStateFactory,
IndexStorePlugin.IndexFoldersDeletionListener indexFoldersDeletionListener,
IndexStorePlugin.SnapshotCommitSupplier snapshotCommitSupplier,
Engine.IndexCommitListener indexCommitListener,
MapperMetrics mapperMetrics
) {
super(indexSettings);
assert indexCreationContext != IndexCreationContext.RELOAD_ANALYZERS
: "IndexCreationContext.RELOAD_ANALYZERS should only be used when reloading analysers";
this.allowExpensiveQueries = allowExpensiveQueries;
this.indexSettings = indexSettings;
this.parserConfiguration = parserConfiguration;
this.similarityService = similarityService;
this.namedWriteableRegistry = namedWriteableRegistry;
this.circuitBreakerService = circuitBreakerService;
this.expressionResolver = expressionResolver;
this.valuesSourceRegistry = valuesSourceRegistry;
this.snapshotCommitSupplier = snapshotCommitSupplier;
this.indexAnalyzers = indexAnalyzers;
if (needsMapperService(indexSettings, indexCreationContext)) {
assert indexAnalyzers != null;
this.mapperService = new MapperService(
clusterService,
indexSettings,
indexAnalyzers,
parserConfiguration,
similarityService,
mapperRegistry,
// we parse all percolator queries as they would be parsed on shard 0
() -> newSearchExecutionContext(0, 0, null, System::currentTimeMillis, null, emptyMap()),
idFieldMapper,
scriptService,
mapperMetrics
);
this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService);
if (indexSettings.getIndexSortConfig().hasIndexSort()) {
// we delay the actual creation of the sort order for this index because the mapping has not been merged yet.
// The sort order is validated right after the merge of the mapping later in the process.
this.indexSortSupplier = () -> indexSettings.getIndexSortConfig()
.buildIndexSort(
mapperService::fieldType,
(fieldType, searchLookup) -> loadFielddata(fieldType, FieldDataContext.noRuntimeFields("index sort"))
);
} else {
this.indexSortSupplier = () -> null;
}
indexFieldData.setListener(new FieldDataCacheListener(this));
this.bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetCacheListener(this));
this.warmer = new IndexWarmer(threadPool, indexFieldData, bitsetFilterCache.createListener(threadPool));
this.indexCache = new IndexCache(queryCache, bitsetFilterCache);
} else {
assert indexAnalyzers == null;
this.mapperService = null;
this.indexFieldData = null;
this.indexSortSupplier = () -> null;
this.bitsetFilterCache = null;
this.warmer = null;
this.indexCache = null;
}
this.shardStoreDeleter = shardStoreDeleter;
this.indexFoldersDeletionListener = indexFoldersDeletionListener;
this.bigArrays = bigArrays;
this.threadPool = threadPool;
this.scriptService = scriptService;
this.clusterService = clusterService;
this.client = client;
this.eventListener = eventListener;
this.nodeEnv = nodeEnv;
this.directoryFactory = directoryFactory;
this.recoveryStateFactory = recoveryStateFactory;
this.engineFactory = Objects.requireNonNull(engineFactory);
// initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE
this.readerWrapper = wrapperFactory.apply(this);
this.searchOperationListeners = Collections.unmodifiableList(searchOperationListeners);
this.indexingOperationListeners = Collections.unmodifiableList(indexingOperationListeners);
this.indexCommitListener = indexCommitListener;
this.mapperMetrics = mapperMetrics;
try (var ignored = threadPool.getThreadContext().clearTraceContext()) {
// kick off async ops for the first shard in this index
this.refreshTask = new AsyncRefreshTask(this);
this.trimTranslogTask = new AsyncTrimTranslogTask(this);
this.globalCheckpointTask = new AsyncGlobalCheckpointTask(this);
this.retentionLeaseSyncTask = new AsyncRetentionLeaseSyncTask(this);
}
updateFsyncTaskIfNecessary();
}
static boolean needsMapperService(IndexSettings indexSettings, IndexCreationContext indexCreationContext) {
return false == (indexSettings.getIndexMetadata().getState() == IndexMetadata.State.CLOSE
&& indexCreationContext == IndexCreationContext.CREATE_INDEX); // metadata verification needs a mapper service
}
public enum IndexCreationContext {
CREATE_INDEX,
METADATA_VERIFICATION,
RELOAD_ANALYZERS
}
public int numberOfShards() {
return shards.size();
}
public IndexEventListener getIndexEventListener() {
return this.eventListener;
}
@Override
public Iterator<IndexShard> iterator() {
return shards.values().iterator();
}
public boolean hasShard(int shardId) {
return shards.containsKey(shardId);
}
/**
* Return the shard with the provided id, or null if there is no such shard.
*/
@Override
@Nullable
public IndexShard getShardOrNull(int shardId) {
return shards.get(shardId);
}
/**
* Return the shard with the provided id, or throw an exception if it doesn't exist.
*/
public IndexShard getShard(int shardId) {
IndexShard indexShard = getShardOrNull(shardId);
if (indexShard == null) {
throw new ShardNotFoundException(new ShardId(index(), shardId));
}
return indexShard;
}
public NodeMappingStats getNodeMappingStats() {
if (mapperService == null) {
return null;
}
long totalCount = mapperService().mappingLookup().getTotalMapperCount();
long totalEstimatedOverhead = totalCount * 1024L; // 1KiB estimated per mapping
NodeMappingStats indexNodeMappingStats = new NodeMappingStats(totalCount, totalEstimatedOverhead);
return indexNodeMappingStats;
}
public Set<Integer> shardIds() {
return shards.keySet();
}
public IndexCache cache() {
return indexCache;
}
public IndexAnalyzers getIndexAnalyzers() {
return this.mapperService.getIndexAnalyzers();
}
public MapperService mapperService() {
return mapperService;
}
public SimilarityService similarityService() {
return similarityService;
}
public Supplier<Sort> getIndexSortSupplier() {
return indexSortSupplier;
}
public synchronized void close(final String reason, boolean delete, Executor shardCloseExecutor, ActionListener<Void> closeListener) {
if (closed.compareAndSet(false, true)) {
deleted.compareAndSet(false, delete);
try (var refs = new RefCountingRunnable(() -> ActionListener.run(closeListener, l -> {
IOUtils.close(
bitsetFilterCache,
indexCache,
indexFieldData,
indexAnalyzers,
refreshTask,
fsyncTask,
trimTranslogTask,
globalCheckpointTask,
retentionLeaseSyncTask
);
l.onResponse(null);
}))) {
final Set<Integer> shardIds = shardIds();
for (final int shardId : shardIds) {
ActionListener.run(refs.acquireListener().delegateResponse((l, e) -> {
logger.warn("failed to close shard", e);
l.onResponse(null);
}), l -> removeShard(shardId, reason, shardCloseExecutor, l));
}
}
} else {
closeListener.onResponse(null);
}
}
// method is synchronized so that IndexService can't be closed while we're writing out dangling indices information
public synchronized void writeDanglingIndicesInfo() {
if (closed.get()) {
return;
}
try {
IndexMetadata.FORMAT.writeAndCleanup(getMetadata(), nodeEnv.indexPaths(index()));
} catch (WriteStateException e) {
logger.warn(() -> format("failed to write dangling indices state for index %s", index()), e);
}
}
// method is synchronized so that IndexService can't be closed while we're deleting dangling indices information
public synchronized void deleteDanglingIndicesInfo() {
if (closed.get()) {
return;
}
try {
MetadataStateFormat.deleteMetaState(nodeEnv.indexPaths(index()));
} catch (IOException e) {
logger.warn(() -> format("failed to delete dangling indices state for index %s", index()), e);
}
}
public String indexUUID() {
return indexSettings.getUUID();
}
// NOTE: O(numShards) cost, but numShards should be smallish?
private long getAvgShardSizeInBytes() throws IOException {
long sum = 0;
int count = 0;
for (IndexShard indexShard : this) {
sum += indexShard.store().stats(0L, LongUnaryOperator.identity()).sizeInBytes();
count++;
}
if (count == 0) {
return -1L;
} else {
return sum / count;
}
}
public synchronized IndexShard createShard(
final ShardRouting routing,
final GlobalCheckpointSyncer globalCheckpointSyncer,
final RetentionLeaseSyncer retentionLeaseSyncer
) throws IOException {
Objects.requireNonNull(retentionLeaseSyncer);
/*
* TODO: we execute this in parallel but it's a synced method. Yet, we might
* be able to serialize the execution via the cluster state in the future. for now we just
* keep it synced.
*/
if (closed.get()) {
throw new IllegalStateException("Can't create shard " + routing.shardId() + ", closed");
}
final Settings indexSettings = this.indexSettings.getSettings();
final ShardId shardId = routing.shardId();
boolean success = false;
Store store = null;
IndexShard indexShard = null;
ShardLock lock = null;
eventListener.beforeIndexShardCreated(routing, indexSettings);
try {
// Try and acquire the shard lock, but we are on the cluster applier thread so we do not wait if it is unavailable; in that
// case, the IndicesClusterStateService will try again (in the background)
lock = nodeEnv.shardLock(shardId, "starting shard");
ShardPath path;
try {
path = ShardPath.loadShardPath(logger, nodeEnv, shardId, this.indexSettings.customDataPath());
} catch (IllegalStateException ex) {
logger.warn("{} failed to load shard path, trying to remove leftover", shardId);
try {
ShardPath.deleteLeftoverShardDirectory(
logger,
nodeEnv,
lock,
this.indexSettings,
shardPaths -> indexFoldersDeletionListener.beforeShardFoldersDeleted(shardId, this.indexSettings, shardPaths)
);
path = ShardPath.loadShardPath(logger, nodeEnv, shardId, this.indexSettings.customDataPath());
} catch (Exception inner) {
ex.addSuppressed(inner);
throw ex;
}
}
if (path == null) {
// TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard will be, e.g. for a shard
// that's being relocated/replicated we know how large it will become once it's done copying:
// Count up how many shards are currently on each data path:
Map<Path, Integer> dataPathToShardCount = new HashMap<>();
for (IndexShard shard : this) {
Path dataPath = shard.shardPath().getRootStatePath();
Integer curCount = dataPathToShardCount.get(dataPath);
if (curCount == null) {
curCount = 0;
}
dataPathToShardCount.put(dataPath, curCount + 1);
}
path = ShardPath.selectNewPathForShard(
nodeEnv,
shardId,
this.indexSettings,
routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE
? getAvgShardSizeInBytes()
: routing.getExpectedShardSize(),
dataPathToShardCount
);
logger.debug("{} creating using a new path [{}]", shardId, path);
} else {
logger.debug("{} creating using an existing path [{}]", shardId, path);
}
if (shards.containsKey(shardId.id())) {
throw new IllegalStateException(shardId + " already exists");
}
logger.debug("creating shard_id {}", shardId);
// if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary.
final Engine.Warmer engineWarmer = (reader) -> {
IndexShard shard = getShardOrNull(shardId.getId());
if (shard != null) {
warmer.warm(reader, shard, IndexService.this.indexSettings);
}
};
final Directory directory = directoryFactory.newDirectory(this.indexSettings, path, routing);
store = new Store(
shardId,
this.indexSettings,
directory,
lock,
new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId))
);
eventListener.onStoreCreated(shardId);
indexShard = new IndexShard(
routing,
this.indexSettings,
path,
store,
indexSortSupplier,
indexCache,
mapperService,
similarityService,
engineFactory,
eventListener,
readerWrapper,
threadPool,
bigArrays,
engineWarmer,
searchOperationListeners,
indexingOperationListeners,
globalCheckpointSyncer,
retentionLeaseSyncer,
circuitBreakerService,
snapshotCommitSupplier,
System::nanoTime,
indexCommitListener,
mapperMetrics
);
eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created");
eventListener.afterIndexShardCreated(indexShard);
shards = Maps.copyMapWithAddedEntry(shards, shardId.id(), indexShard);
success = true;
return indexShard;
} finally {
if (success == false) {
if (lock != null) {
IOUtils.closeWhileHandlingException(lock);
}
final var finalStore = store;
final var finalIndexShard = indexShard;
CloseUtils.executeDirectly(
l -> closeShard(
"initialization failed",
shardId,
finalIndexShard,
finalStore,
eventListener,
EsExecutors.DIRECT_EXECUTOR_SERVICE /* closing a shard that failed to start up should be fast enough */,
l
)
);
}
}
}
@Override
public synchronized void removeShard(int shardId, String reason, Executor closeExecutor, ActionListener<Void> closeListener) {
final IndexShard indexShard = shards.get(shardId);
if (indexShard == null) {
closeListener.onResponse(null);
return;
}
logger.debug("[{}] closing... (reason: [{}])", shardId, reason);
final var wrappedListener = logger.isDebugEnabled()
? ActionListener.runBefore(closeListener, () -> logger.debug("[{}] closed (reason: [{}])", shardId, reason))
: closeListener;
shards = Maps.copyMapWithRemovedEntry(shards, shardId);
closeShard(
reason,
indexShard.shardId(),
indexShard,
indexShard.store(),
indexShard.getIndexEventListener(),
closeExecutor,
wrappedListener
);
logger.debug("[{}] removed (reason: [{}])", shardId, reason);
}
private void closeShard(
String reason,
ShardId sId,
IndexShard indexShard,
Store store,
IndexEventListener listener,
Executor closeExecutor,
ActionListener<Void> closeListener
) {
final int shardId = sId.id();
final Settings indexSettings = this.getIndexSettings().getSettings();
if (store != null) {
store.beforeClose();
}
try {
try {
listener.beforeIndexShardClosed(sId, indexShard, indexSettings);
} finally {
// this logic is tricky, we want to close the engine so we rollback the changes done to it
// and close the shard so no operations are allowed to it
if (indexShard == null) {
closeListener.onResponse(null);
} else {
// only flush if we are closed (closed index or shutdown) and if we are not deleted
final boolean flushEngine = deleted.get() == false && closed.get();
// if the store is still open, want to keep it open until afterIndexShardClosed
assert store == null || store.hasReferences() : "store exists but already closed";
final var hasStoreRef = store != null && store.tryIncRef(); // being cautious
ActionListener.run(new ActionListener<Void>() {
@Override
public void onResponse(Void unused) {
try {
// call this before we close the store, so we can release resources for it
listener.afterIndexShardClosed(sId, indexShard, indexSettings);
} finally {
try {
if (hasStoreRef) {
store.decRef();
}
} finally {
closeListener.onResponse(null);
}
}
}
@Override
public void onFailure(Exception e) {
logger.debug(() -> "[" + shardId + "] failed to close index shard", e);
onResponse(null); // otherwise ignore the exception
}
}, l -> indexShard.close(reason, flushEngine, closeExecutor, l));
listener.afterIndexShardClosing(sId, indexShard, indexSettings);
}
}
} finally {
try {
if (store != null) {
store.close();
} else {
logger.trace("[{}] store not initialized prior to closing shard, nothing to close", shardId);
}
} catch (Exception e) {
logger.warn(() -> format("[%s] failed to close store on shard removal (reason: [%s])", shardId, reason), e);
}
}
}
private void onShardClose(ShardLock lock) {
if (deleted.get()) { // we remove that shards content if this index has been deleted
try {
try {
eventListener.beforeIndexShardDeleted(lock.getShardId(), indexSettings.getSettings());
} finally {
shardStoreDeleter.deleteShardStore("delete index", lock, indexSettings);
eventListener.afterIndexShardDeleted(lock.getShardId(), indexSettings.getSettings());
}
} catch (IOException e) {
shardStoreDeleter.addPendingDelete(lock.getShardId(), indexSettings);
logger.debug(() -> "[" + lock.getShardId().id() + "] failed to delete shard content - scheduled a retry", e);
}
}
}
public RecoveryState createRecoveryState(ShardRouting shardRouting, DiscoveryNode targetNode, DiscoveryNode sourceNode) {
return recoveryStateFactory.newRecoveryState(shardRouting, targetNode, sourceNode);
}
@Override
public IndexSettings getIndexSettings() {
return indexSettings;
}
/**
* Creates a new {@link SearchExecutionContext}.
*
* Passing a {@code null} {@link IndexSearcher} will return a valid context, however it won't be able to make
* {@link IndexReader}-specific optimizations, such as rewriting containing range queries.
*/
public SearchExecutionContext newSearchExecutionContext(
int shardId,
int shardRequestIndex,
IndexSearcher searcher,
LongSupplier nowInMillis,
String clusterAlias,
Map<String, Object> runtimeMappings
) {
return newSearchExecutionContext(shardId, shardRequestIndex, searcher, nowInMillis, clusterAlias, runtimeMappings, null);
}
public SearchExecutionContext newSearchExecutionContext(
int shardId,
int shardRequestIndex,
IndexSearcher searcher,
LongSupplier nowInMillis,
String clusterAlias,
Map<String, Object> runtimeMappings,
Integer requestSize
) {
final SearchIndexNameMatcher indexNameMatcher = new SearchIndexNameMatcher(
index().getName(),
clusterAlias,
clusterService,
expressionResolver
);
return new SearchExecutionContext(
shardId,
shardRequestIndex,
indexSettings,
indexCache.bitsetFilterCache(),
this::loadFielddata,
mapperService(),
mapperService().mappingLookup(),
similarityService(),
scriptService,
parserConfiguration,
namedWriteableRegistry,
client,
searcher,
nowInMillis,
clusterAlias,
indexNameMatcher,
allowExpensiveQueries,
valuesSourceRegistry,
runtimeMappings,
requestSize,
mapperMetrics
);
}
/**
* Creates a new {@link QueryRewriteContext}.
* This class is used to rewrite queries in case access to the index is not required, since we can
* decide rewriting based on mappings alone. This saves the cost of pulling an index searcher as
* well as the associated cost of refreshing idle shards.
*/
public QueryRewriteContext newQueryRewriteContext(
final LongSupplier nowInMillis,
final Map<String, Object> runtimeMappings,
final String clusterAlias
) {
final SearchIndexNameMatcher indexNameMatcher = new SearchIndexNameMatcher(
index().getName(),
clusterAlias,
clusterService,
expressionResolver
);
final MapperService mapperService = mapperService();
final MappingLookup mappingLookup = mapperService().mappingLookup();
return new QueryRewriteContext(
parserConfiguration,
client,
nowInMillis,
mapperService,
mappingLookup,
parseRuntimeMappings(runtimeMappings, mapperService, indexSettings, mappingLookup),
indexSettings,
new Index(
RemoteClusterAware.buildRemoteIndexName(clusterAlias, indexSettings.getIndex().getName()),
indexSettings.getIndex().getUUID()
),
indexNameMatcher,
namedWriteableRegistry,
valuesSourceRegistry,
allowExpensiveQueries,
scriptService,
null
);
}
/**
* The {@link ThreadPool} to use for this index.
*/
public ThreadPool getThreadPool() {
return threadPool;
}
/**
* The {@link BigArrays} to use for this index.
*/
public BigArrays getBigArrays() {
return bigArrays;
}
/**
* The {@link ScriptService} to use for this index.
*/
public ScriptService getScriptService() {
return scriptService;
}
List<IndexingOperationListener> getIndexOperationListeners() { // pkg private for testing
return indexingOperationListeners;
}
List<SearchOperationListener> getSearchOperationListener() { // pkg private for testing
return searchOperationListeners;
}
public void updateMapping(final IndexMetadata currentIndexMetadata, final IndexMetadata newIndexMetadata) {
if (mapperService != null) {
mapperService.updateMapping(currentIndexMetadata, newIndexMetadata);
}
}
private class StoreCloseListener implements Store.OnClose {
private final ShardId shardId;
private final Closeable[] toClose;
StoreCloseListener(ShardId shardId, Closeable... toClose) {
this.shardId = shardId;
this.toClose = toClose;
}
@Override
public void accept(ShardLock lock) {
try {
assert lock.getShardId().equals(shardId) : "shard id mismatch, expected: " + shardId + " but got: " + lock.getShardId();
onShardClose(lock);
} finally {
try {
IOUtils.close(toClose);
} catch (IOException ex) {
logger.debug("failed to close resource", ex);
}
}
}
}
private static final class BitsetCacheListener implements BitsetFilterCache.Listener {
final IndexService indexService;
private BitsetCacheListener(IndexService indexService) {
this.indexService = indexService;
}
@Override
public void onCache(ShardId shardId, Accountable accountable) {
if (shardId != null) {
final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) {
long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0L;
shard.shardBitsetFilterCache().onCached(ramBytesUsed);
}
}
}
@Override
public void onRemoval(ShardId shardId, Accountable accountable) {
if (shardId != null) {
final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) {
long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0L;
shard.shardBitsetFilterCache().onRemoval(ramBytesUsed);
}
}
}
}
private static final class FieldDataCacheListener implements IndexFieldDataCache.Listener {
final IndexService indexService;
FieldDataCacheListener(IndexService indexService) {
this.indexService = indexService;
}
@Override
public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) {
if (shardId != null) {
final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) {
shard.fieldData().onCache(shardId, fieldName, ramUsage);
}
}
}
@Override
public void onCache(ShardId shardId, String fieldName, GlobalOrdinalsAccounting info) {
if (shardId != null) {
final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) {
shard.fieldData().onCache(shardId, fieldName, info);
}
}
}
@Override
public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) {
if (shardId != null) {
final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) {
shard.fieldData().onRemoval(shardId, fieldName, wasEvicted, sizeInBytes);
}
}
}
}
public IndexMetadata getMetadata() {
return indexSettings.getIndexMetadata();
}
private final CopyOnWriteArrayList<Consumer<IndexMetadata>> metadataListeners = new CopyOnWriteArrayList<>();
public void addMetadataListener(Consumer<IndexMetadata> listener) {
metadataListeners.add(listener);
}
@Override
public synchronized void updateMetadata(final IndexMetadata currentIndexMetadata, final IndexMetadata newIndexMetadata) {
final boolean updateIndexSettings = indexSettings.updateIndexMetadata(newIndexMetadata);
if (Assertions.ENABLED && currentIndexMetadata != null) {
final long currentSettingsVersion = currentIndexMetadata.getSettingsVersion();
final long newSettingsVersion = newIndexMetadata.getSettingsVersion();
if (currentSettingsVersion == newSettingsVersion) {
assert updateIndexSettings == false : "No index updates are expected as index settings version has not changed";
} else {
assert updateIndexSettings : "Index updates are expected as index settings version has changed";
assert currentSettingsVersion < newSettingsVersion
: "expected current settings version ["
+ currentSettingsVersion
+ "] "
+ "to be less than new settings version ["
+ newSettingsVersion
+ "]";
}
}
if (updateIndexSettings) {
for (final IndexShard shard : this.shards.values()) {
try {
shard.onSettingsChanged();
} catch (Exception e) {
logger.warn(() -> "[" + shard.shardId().id() + "] failed to notify shard about setting change", e);
}
}
if (refreshTask.getInterval().equals(indexSettings.getRefreshInterval()) == false) {
// once we change the refresh interval we schedule yet another refresh
// to ensure we are in a clean and predictable state.
// it doesn't matter if we move from or to <code>-1</code> in both cases we want
// docs to become visible immediately. This also flushes all pending indexing / search requests
// that are waiting for a refresh.
threadPool.executor(ThreadPool.Names.REFRESH).execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
logger.warn("forced refresh failed after interval change", e);
}
@Override
protected void doRun() {
maybeRefreshEngine(true);
}
@Override
public boolean isForceExecution() {
return true;
}
});
rescheduleRefreshTasks();
}
updateFsyncTaskIfNecessary();
}
metadataListeners.forEach(c -> c.accept(newIndexMetadata));
}
private void updateFsyncTaskIfNecessary() {
if (indexSettings.getTranslogDurability() == Translog.Durability.REQUEST) {
try {
if (fsyncTask != null) {
fsyncTask.close();
}
} finally {
fsyncTask = null;
}
} else if (fsyncTask == null) {
fsyncTask = new AsyncTranslogFSync(this);
} else {
fsyncTask.updateIfNeeded();
}
}
private void rescheduleRefreshTasks() {
try {
refreshTask.close();
} finally {
refreshTask = new AsyncRefreshTask(this);
}
}
public static Function<String, String> dateMathExpressionResolverAt() {
return expression -> IndexNameExpressionResolver.resolveDateMathExpression(expression, System.currentTimeMillis());
}
public static Function<String, String> dateMathExpressionResolverAt(long instant) {
return expression -> IndexNameExpressionResolver.resolveDateMathExpression(expression, instant);
}
public interface ShardStoreDeleter {
void deleteShardStore(String reason, ShardLock lock, IndexSettings indexSettings) throws IOException;
void addPendingDelete(ShardId shardId, IndexSettings indexSettings);
}
public final EngineFactory getEngineFactory() {
return engineFactory;
}
final CheckedFunction<DirectoryReader, DirectoryReader, IOException> getReaderWrapper() {
return readerWrapper;
} // pkg private for testing
final IndexStorePlugin.DirectoryFactory getDirectoryFactory() {
return directoryFactory;
} // pkg private for testing
private void maybeFSyncTranslogs() {
if (indexSettings.getTranslogDurability() == Translog.Durability.ASYNC) {
for (IndexShard shard : this.shards.values()) {
try {
if (shard.isSyncNeeded()) {
shard.sync();
}
} catch (AlreadyClosedException ex) {
// fine - continue;
} catch (IOException e) {
logger.warn("failed to sync translog", e);
}
}
}
}
private void maybeRefreshEngine(boolean force) {
if (indexSettings.getRefreshInterval().millis() > 0 || force) {
for (IndexShard shard : this.shards.values()) {
shard.scheduledRefresh(new ActionListener<>() {
@Override
public void onResponse(Boolean ignored) {}
@Override
public void onFailure(Exception e) {
if (e instanceof IndexShardClosedException == false && e instanceof AlreadyClosedException == false) {
logger.warn("unexpected exception while performing scheduled refresh", e);
}
}
});
}
}
}
private void maybeTrimTranslog() {
for (IndexShard shard : this.shards.values()) {
switch (shard.state()) {
case CREATED:
case RECOVERING:
case CLOSED:
continue;
case POST_RECOVERY:
case STARTED:
try {
shard.trimTranslog();
} catch (IndexShardClosedException | AlreadyClosedException ex) {
// fine - continue;
}
continue;
default:
throw new IllegalStateException("unknown state: " + shard.state());
}
}
}
private void maybeSyncGlobalCheckpoints() {
sync(is -> is.maybeSyncGlobalCheckpoint("background"), "global checkpoint");
}
private void syncRetentionLeases() {
sync(IndexShard::syncRetentionLeases, "retention lease");
}
private void sync(final Consumer<IndexShard> sync, final String source) {
for (final IndexShard shard : this.shards.values()) {
if (shard.routingEntry().active() && shard.routingEntry().primary()) {
switch (shard.state()) {
case CLOSED:
case CREATED:
case RECOVERING:
continue;
case POST_RECOVERY:
assert false : "shard " + shard.shardId() + " is in post-recovery but marked as active";
continue;
case STARTED:
try {
shard.runUnderPrimaryPermit(() -> sync.accept(shard), e -> {
if (e instanceof AlreadyClosedException == false
&& e instanceof IndexShardClosedException == false
&& e instanceof ShardNotInPrimaryModeException == false) {
logger.warn(() -> format("%s failed to execute %s sync", shard.shardId(), source), e);
}
}, EsExecutors.DIRECT_EXECUTOR_SERVICE);
} catch (final AlreadyClosedException | IndexShardClosedException e) {
// the shard was closed concurrently, continue
}
continue;
default:
throw new IllegalStateException("unknown state [" + shard.state() + "]");
}
}
}
}
abstract static class BaseAsyncTask extends AbstractAsyncTask {
protected final IndexService indexService;
BaseAsyncTask(final IndexService indexService, final Executor executor, final TimeValue interval) {
super(indexService.logger, indexService.threadPool, executor, interval, true);
this.indexService = indexService;
rescheduleIfNecessary();
}
@Override
protected boolean mustReschedule() {
// don't re-schedule if the IndexService instance is closed or if the index is closed
return indexService.closed.get() == false
&& indexService.indexSettings.getIndexMetadata().getState() == IndexMetadata.State.OPEN;
}
}
/**
* FSyncs the translog for all shards of this index in a defined interval.
*/
static final class AsyncTranslogFSync extends BaseAsyncTask {
AsyncTranslogFSync(IndexService indexService) {
super(
indexService,
indexService.threadPool.executor(ThreadPool.Names.FLUSH),
indexService.getIndexSettings().getTranslogSyncInterval()
);
}
@Override
protected void runInternal() {
indexService.maybeFSyncTranslogs();
}
void updateIfNeeded() {
final TimeValue newInterval = indexService.getIndexSettings().getTranslogSyncInterval();
if (newInterval.equals(getInterval()) == false) {
setInterval(newInterval);
}
}
@Override
public String toString() {
return "translog_sync";
}
}
static final class AsyncRefreshTask extends BaseAsyncTask {
AsyncRefreshTask(IndexService indexService) {
super(
indexService,
indexService.threadPool.executor(ThreadPool.Names.REFRESH),
indexService.getIndexSettings().getRefreshInterval()
);
}
@Override
protected void runInternal() {
indexService.maybeRefreshEngine(false);
}
@Override
public String toString() {
return "refresh";
}
}
final class AsyncTrimTranslogTask extends BaseAsyncTask {
AsyncTrimTranslogTask(IndexService indexService) {
super(
indexService,
threadPool.generic(),
indexService.getIndexSettings()
.getSettings()
.getAsTime(INDEX_TRANSLOG_RETENTION_CHECK_INTERVAL_SETTING, TimeValue.timeValueMinutes(10))
);
}
@Override
protected boolean mustReschedule() {
return indexService.closed.get() == false;
}
@Override
protected void runInternal() {
indexService.maybeTrimTranslog();
}
@Override
public String toString() {
return "trim_translog";
}
}
// this setting is intentionally not registered, it is only used in tests
public static final Setting<TimeValue> GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING = Setting.timeSetting(
"index.global_checkpoint_sync.interval",
new TimeValue(30, TimeUnit.SECONDS),
new TimeValue(0, TimeUnit.MILLISECONDS),
Property.Dynamic,
Property.IndexScope
);
// this setting is intentionally not registered, it is only used in tests
public static final Setting<TimeValue> RETENTION_LEASE_SYNC_INTERVAL_SETTING = Setting.timeSetting(
"index.soft_deletes.retention_lease.sync_interval",
new TimeValue(30, TimeUnit.SECONDS),
new TimeValue(0, TimeUnit.MILLISECONDS),
Property.Dynamic,
Property.IndexScope
);
/**
* Background task that syncs the global checkpoint to replicas.
*/
private static final class AsyncGlobalCheckpointTask extends BaseAsyncTask {
AsyncGlobalCheckpointTask(final IndexService indexService) {
// index.global_checkpoint_sync_interval is not a real setting, it is only registered in tests
super(
indexService,
indexService.getThreadPool().generic(),
GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.get(indexService.getIndexSettings().getSettings())
);
}
@Override
protected void runInternal() {
indexService.maybeSyncGlobalCheckpoints();
}
@Override
public String toString() {
return "global_checkpoint_sync";
}
}
private static final class AsyncRetentionLeaseSyncTask extends BaseAsyncTask {
AsyncRetentionLeaseSyncTask(final IndexService indexService) {
super(
indexService,
indexService.threadPool.executor(ThreadPool.Names.MANAGEMENT),
RETENTION_LEASE_SYNC_INTERVAL_SETTING.get(indexService.getIndexSettings().getSettings())
);
}
@Override
protected void runInternal() {
indexService.syncRetentionLeases();
}
@Override
public String toString() {
return "retention_lease_sync";
}
}
AsyncRefreshTask getRefreshTask() { // for tests
return refreshTask;
}
AsyncTranslogFSync getFsyncTask() { // for tests
return fsyncTask;
}
AsyncTrimTranslogTask getTrimTranslogTask() { // for tests
return trimTranslogTask;
}
/**
* Clears the caches for the given shard id if the shard is still allocated on this node
*/
public boolean clearCaches(boolean queryCache, boolean fieldDataCache, String... fields) {
boolean clearedAtLeastOne = false;
if (queryCache) {
clearedAtLeastOne = true;
indexCache.query().clear("api");
}
if (fieldDataCache) {
clearedAtLeastOne = true;
if (fields.length == 0) {
indexFieldData.clear();
} else {
for (String field : fields) {
indexFieldData.clearField(field);
}
}
}
if (clearedAtLeastOne == false) {
if (fields.length == 0) {
indexCache.clear("api");
indexFieldData.clear();
} else {
// only clear caches relating to the specified fields
for (String field : fields) {
indexFieldData.clearField(field);
}
}
}
return clearedAtLeastOne;
}
public static Map<String, MappedFieldType> parseRuntimeMappings(
Map<String, Object> runtimeMappings,
MapperService mapperService,
IndexSettings indexSettings,
MappingLookup lookup
) {
if (runtimeMappings.isEmpty()) {
return Collections.emptyMap();
}
// TODO add specific tests to SearchExecutionTests similar to the ones in FieldTypeLookupTests
MappingParserContext parserContext = mapperService.parserContext();
Map<String, RuntimeField> runtimeFields = RuntimeField.parseRuntimeFields(new HashMap<>(runtimeMappings), parserContext, false);
Map<String, MappedFieldType> runtimeFieldTypes = RuntimeField.collectFieldTypes(runtimeFields.values());
if (false == indexSettings.getIndexMetadata().getRoutingPaths().isEmpty()) {
for (String r : runtimeMappings.keySet()) {
if (Regex.simpleMatch(indexSettings.getIndexMetadata().getRoutingPaths(), r)) {
throw new IllegalArgumentException("runtime fields may not match [routing_path] but [" + r + "] matched");
}
}
}
runtimeFieldTypes.keySet().forEach(lookup::validateDoesNotShadow);
return runtimeFieldTypes;
}
public IndexFieldData<?> loadFielddata(MappedFieldType fieldType, FieldDataContext fieldDataContext) {
return indexFieldData.getForField(fieldType, fieldDataContext);
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/index/IndexService.java |
464 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.indices;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.automaton.Automata;
import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
import org.apache.lucene.util.automaton.MinimizationOperations;
import org.apache.lucene.util.automaton.Operations;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.snapshots.features.ResetFeatureStateResponse.ResetFeatureStateStatus;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction;
import org.elasticsearch.action.support.RefCountingListener;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.internal.Client;
import org.elasticsearch.client.internal.OriginSettingClient;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.TriConsumer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.Maps;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.core.Booleans;
import org.elasticsearch.core.CheckedConsumer;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.Predicates;
import org.elasticsearch.core.Tuple;
import org.elasticsearch.index.Index;
import org.elasticsearch.plugins.SystemIndexPlugin;
import org.elasticsearch.snapshots.SnapshotsService;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.elasticsearch.core.Strings.format;
import static org.elasticsearch.synonyms.SynonymsManagementAPIService.SYNONYMS_DESCRIPTOR;
import static org.elasticsearch.synonyms.SynonymsManagementAPIService.SYNONYMS_FEATURE_NAME;
import static org.elasticsearch.tasks.TaskResultsService.TASKS_DESCRIPTOR;
import static org.elasticsearch.tasks.TaskResultsService.TASKS_FEATURE_NAME;
/**
* Provides information about system-owned indices and data streams for Elasticsearch and Elasticsearch plugins.
*
* <p>Various Elasticsearch features such as Security or Watcher keep their state in their own indices. We keep these indices separate
* from the user index space for a few reasons. In some cases, the indices contain information that should be hidden from users. But,
* more generally, we want to protect these indices and data streams from being inadvertently modified or deleted.
*
* <p>The system resources are grouped by feature, using the {@link SystemIndices.Feature} class. Most features will be loaded from
* instances of {@link SystemIndexPlugin}; any other features will be described in this class. Features may be retrieved by name or
* iterated over (see {@link #getFeature(String)} and {@link #getFeatures()}). Each Feature provides collections of
* {@link SystemIndexDescriptor}s or {@link SystemDataStreamDescriptor}s. These descriptors define their resources by means of patterns.
* Any index name or data stream name that matches one of these patterns is considered a system resource for * that descriptor, and user
* access to it will be restricted. These patterns are gathered and validated so that the SystemIndices object can provide information
* about system resources: for example, whether a particular string will be considered a “system name” (see {@link #isSystemIndex(String)}).
*
* <p>For more information about the expected behavior of system indices, see {@link SystemIndexDescriptor}. For more information about
* the expected behavior of system data streams, see {@link SystemDataStreamDescriptor}.
*
* <p>The SystemIndices object is constructed during {@link org.elasticsearch.node.Node} startup, and is not modified after construction.
* In other words, the set of system resources will be consistent over the lifetime of a node.
*
* <p>System resources will specify thread pools for reads, writes, and searches. This can ensure that system-critical operations, such
* as user authentication, are not blocked by heavy thread contention from user activities. {@link #getExecutorSelector()} provides an
* object for convenient look-ups of these thread pools.
*
* <p>There are a few requirements for system features, collectively:
* <ol>
* <li>The feature names must be distinct.
* <li>System index patterns must not overlap.
* <li>Aliases for system indices must be distinct.
* <li>Feature names must not be reserved names. Right now, the only
* reserved name is “none”.
* </ol>
*
* <p>System index access is currently controlled by Security role index permissions. However, we have deprecated general rest access to
* system indices. This class provides checks for system index “access levels” (see {@link #getSystemIndexAccessLevel(ThreadContext)}).
* If a request has the wrong access level for a system index it is targeting, then we will issue a deprecation warning. In the future,
* we will block access. The non-deprecated way to access certain external system indices is to use the correct request headers. This
* behavior is already in place in {@link SystemDataStreamDescriptor} and “net-new” system indices (see
* {@link SystemIndexDescriptor#isNetNew()}).
*
* <p>The implementation of the system index name checks makes heavy use of the Lucene {@link Automaton} class. At a high level, an
* automaton is a kind of matcher that can be created from a regex. Lucene Automata give us the ability to check for overlapping
* patterns, and to create efficient unions of patterns.
*/
public class SystemIndices {
public static final String SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY = "_system_index_access_allowed";
public static final String EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY = "_external_system_index_access_origin";
public static final String UPGRADED_INDEX_SUFFIX = "-reindexed-for-8";
private static final Automaton EMPTY = Automata.makeEmpty();
private static final Logger logger = LogManager.getLogger(SystemIndices.class);
/**
* This is the source for non-plugin system features.
*/
private static final Map<String, Feature> SERVER_SYSTEM_FEATURE_DESCRIPTORS = Stream.of(
new Feature(TASKS_FEATURE_NAME, "Manages task results", List.of(TASKS_DESCRIPTOR)),
new Feature(SYNONYMS_FEATURE_NAME, "Manages synonyms", List.of(SYNONYMS_DESCRIPTOR))
).collect(Collectors.toUnmodifiableMap(Feature::getName, Function.identity()));
public static final Map<String, SystemIndexDescriptor.MappingsVersion> SERVER_SYSTEM_MAPPINGS_VERSIONS =
SERVER_SYSTEM_FEATURE_DESCRIPTORS.values()
.stream()
.flatMap(feature -> feature.getIndexDescriptors().stream())
.filter(SystemIndexDescriptor::isAutomaticallyManaged)
.collect(Collectors.toMap(SystemIndexDescriptor::getIndexPattern, SystemIndexDescriptor::getMappingsVersion));
/**
* The node's full list of system features is stored here. The map is keyed
* on the value of {@link Feature#getName()}, and is used for fast lookup of
* feature objects via {@link #getFeature(String)}.
*/
private final Map<String, Feature> featureDescriptors;
private final Automaton systemNameAutomaton;
private final CharacterRunAutomaton netNewSystemIndexAutomaton;
private final CharacterRunAutomaton systemNameRunAutomaton;
private final CharacterRunAutomaton systemIndexRunAutomaton;
private final CharacterRunAutomaton systemDataStreamIndicesRunAutomaton;
private final Predicate<String> systemDataStreamPredicate;
private final SystemIndexDescriptor[] indexDescriptors;
private final Map<String, SystemDataStreamDescriptor> dataStreamDescriptors;
private final Map<String, CharacterRunAutomaton> productToSystemIndicesMatcher;
private final ExecutorSelector executorSelector;
/**
* Initialize the SystemIndices object
* @param pluginAndModuleFeatures A list of features from which we will load system indices.
* These features come from plugins and modules. Non-plugin system
* features such as Tasks will be added automatically.
*/
@SuppressWarnings("this-escape")
public SystemIndices(List<Feature> pluginAndModuleFeatures) {
featureDescriptors = buildFeatureMap(pluginAndModuleFeatures);
indexDescriptors = featureDescriptors.values()
.stream()
.flatMap(f -> f.getIndexDescriptors().stream())
.toArray(SystemIndexDescriptor[]::new);
dataStreamDescriptors = featureDescriptors.values()
.stream()
.flatMap(f -> f.getDataStreamDescriptors().stream())
.collect(Collectors.toUnmodifiableMap(SystemDataStreamDescriptor::getDataStreamName, Function.identity()));
checkForOverlappingPatterns(featureDescriptors);
ensurePatternsAllowSuffix(featureDescriptors);
checkForDuplicateAliases(this.getSystemIndexDescriptors());
Automaton systemIndexAutomata = buildIndexAutomaton(featureDescriptors);
this.systemIndexRunAutomaton = new CharacterRunAutomaton(systemIndexAutomata);
Automaton systemDataStreamIndicesAutomata = buildDataStreamBackingIndicesAutomaton(featureDescriptors);
this.systemDataStreamIndicesRunAutomaton = new CharacterRunAutomaton(systemDataStreamIndicesAutomata);
this.systemDataStreamPredicate = buildDataStreamNamePredicate(featureDescriptors);
this.netNewSystemIndexAutomaton = buildNetNewIndexCharacterRunAutomaton(featureDescriptors);
this.productToSystemIndicesMatcher = getProductToSystemIndicesMap(featureDescriptors);
this.executorSelector = new ExecutorSelector(this);
this.systemNameAutomaton = MinimizationOperations.minimize(
Operations.union(List.of(systemIndexAutomata, systemDataStreamIndicesAutomata, buildDataStreamAutomaton(featureDescriptors))),
Operations.DEFAULT_DETERMINIZE_WORK_LIMIT
);
this.systemNameRunAutomaton = new CharacterRunAutomaton(systemNameAutomaton);
}
static void ensurePatternsAllowSuffix(Map<String, Feature> featureDescriptors) {
String suffixPattern = "*" + UPGRADED_INDEX_SUFFIX;
final List<String> descriptorsWithNoRoomForSuffix = featureDescriptors.values()
.stream()
.flatMap(
feature -> feature.getIndexDescriptors()
.stream()
// The below filter & map are inside the enclosing flapMap so that we have access to both the feature and the descriptor
.filter(descriptor -> overlaps(descriptor.getIndexPattern(), suffixPattern) == false)
.map(descriptor -> format("pattern [%s] from feature [%s]", descriptor.getIndexPattern(), feature.getName()))
)
.toList();
if (descriptorsWithNoRoomForSuffix.isEmpty() == false) {
throw new IllegalStateException(
format(
"the following system index patterns do not allow suffix [%s] required to allow upgrades: [%s]",
UPGRADED_INDEX_SUFFIX,
descriptorsWithNoRoomForSuffix
)
);
}
}
private static void checkForDuplicateAliases(Collection<SystemIndexDescriptor> descriptors) {
final Map<String, Integer> aliasCounts = new HashMap<>();
for (SystemIndexDescriptor descriptor : descriptors) {
final String aliasName = descriptor.getAliasName();
if (aliasName != null) {
aliasCounts.compute(aliasName, (alias, existingCount) -> 1 + (existingCount == null ? 0 : existingCount));
}
}
final List<String> duplicateAliases = aliasCounts.entrySet()
.stream()
.filter(entry -> entry.getValue() > 1)
.map(Map.Entry::getKey)
.sorted()
.toList();
if (duplicateAliases.isEmpty() == false) {
throw new IllegalStateException("Found aliases associated with multiple system index descriptors: " + duplicateAliases + "");
}
}
private static Map<String, CharacterRunAutomaton> getProductToSystemIndicesMap(Map<String, Feature> featureDescriptors) {
Map<String, Automaton> productToSystemIndicesMap = new HashMap<>();
for (Feature feature : featureDescriptors.values()) {
feature.getIndexDescriptors().forEach(systemIndexDescriptor -> {
if (systemIndexDescriptor.isExternal()) {
systemIndexDescriptor.getAllowedElasticProductOrigins()
.forEach(origin -> productToSystemIndicesMap.compute(origin, (key, value) -> {
Automaton automaton = SystemIndexDescriptor.buildAutomaton(
systemIndexDescriptor.getIndexPattern(),
systemIndexDescriptor.getAliasName()
);
return value == null ? automaton : Operations.union(value, automaton);
}));
}
});
feature.getDataStreamDescriptors().forEach(dataStreamDescriptor -> {
if (dataStreamDescriptor.isExternal()) {
dataStreamDescriptor.getAllowedElasticProductOrigins()
.forEach(origin -> productToSystemIndicesMap.compute(origin, (key, value) -> {
Automaton automaton = SystemIndexDescriptor.buildAutomaton(
dataStreamDescriptor.getBackingIndexPattern(),
dataStreamDescriptor.getDataStreamName()
);
return value == null ? automaton : Operations.union(value, automaton);
}));
}
});
}
return productToSystemIndicesMap.entrySet()
.stream()
.collect(
Collectors.toUnmodifiableMap(
Entry::getKey,
entry -> new CharacterRunAutomaton(
MinimizationOperations.minimize(entry.getValue(), Operations.DEFAULT_DETERMINIZE_WORK_LIMIT)
)
)
);
}
/**
* Checks whether the given name matches a reserved name or pattern that is intended for use by a system component. The name
* is checked against index names, aliases, data stream names, and the names of indices that back a system data stream.
*/
public boolean isSystemName(String name) {
return systemNameRunAutomaton.run(name);
}
/**
* Determines whether a given index is a system index by comparing its name to the collection of loaded {@link SystemIndexDescriptor}s
* @param index the {@link Index} object to check against loaded {@link SystemIndexDescriptor}s
* @return true if the {@link Index}'s name matches a pattern from a {@link SystemIndexDescriptor}
*/
public boolean isSystemIndex(Index index) {
return isSystemIndex(index.getName());
}
/**
* Determines whether a given index is a system index by comparing its name to the collection of loaded {@link SystemIndexDescriptor}s.
* This will also match alias names that belong to system indices.
* @param indexName the index name to check against loaded {@link SystemIndexDescriptor}s
* @return true if the index name matches a pattern from a {@link SystemIndexDescriptor}
*/
public boolean isSystemIndex(String indexName) {
return systemIndexRunAutomaton.run(indexName);
}
/**
* Determines whether the provided name matches that of a system data stream that has been defined by a
* {@link SystemDataStreamDescriptor}
*/
public boolean isSystemDataStream(String name) {
return systemDataStreamPredicate.test(name);
}
/**
* Determines whether the provided name matches that of an index that backs a system data stream. Backing indices
* for system data streams are marked as "system" in their metadata (see {@link
* org.elasticsearch.cluster.metadata.SystemIndexMetadataUpgradeService}) and receive the same protections as the
* system data stream.
*/
public boolean isSystemIndexBackingDataStream(String name) {
return systemDataStreamIndicesRunAutomaton.run(name);
}
/**
* The Elasticsearch security plugin can use the automaton that matches all
* system resource names to efficiently authorize requests.
*
* @return An {@link Automaton} that tests whether strings are names of system indices, aliases, or
* data streams.
*/
public Automaton getSystemNameAutomaton() {
return systemNameAutomaton;
}
/**
* Checks whether an index is a net-new system index, meaning we can apply non-BWC behavior to it.
* See {@link SystemIndexDescriptor#isNetNew()}.
* @param indexName The index name to check.
* @return {@code true} if the given index is covered by a net-new system index descriptor, {@code false} otherwise.
*/
public boolean isNetNewSystemIndex(String indexName) {
return netNewSystemIndexAutomaton.run(indexName);
}
/**
* Used to determine which executor should be used for operations on this index. See {@link ExecutorSelector} docs for
* details.
*/
public ExecutorSelector getExecutorSelector() {
return executorSelector;
}
/**
* Finds a single matching {@link SystemIndexDescriptor}, if any, for the given index name.
* @param name the name of the index
* @return The matching {@link SystemIndexDescriptor} or {@code null} if no descriptor is found
*/
public @Nullable SystemIndexDescriptor findMatchingDescriptor(String name) {
return findMatchingDescriptor(indexDescriptors, name);
}
@Nullable
static SystemIndexDescriptor findMatchingDescriptor(SystemIndexDescriptor[] indexDescriptors, String name) {
SystemIndexDescriptor matchingDescriptor = null;
for (SystemIndexDescriptor systemIndexDescriptor : indexDescriptors) {
if (systemIndexDescriptor.matchesIndexPattern(name)) {
matchingDescriptor = systemIndexDescriptor;
break;
}
}
return matchingDescriptor;
}
/**
* Finds a single matching {@link SystemDataStreamDescriptor}, if any, for the given DataStream name.
* @param name the name of the DataStream
* @return The matching {@link SystemDataStreamDescriptor} or {@code null} if no descriptor is found
*/
public @Nullable SystemDataStreamDescriptor findMatchingDataStreamDescriptor(String name) {
return dataStreamDescriptors.get(name);
}
/**
* Builds a predicate that tests whether a system index should be accessible for a given ThreadContext. We allow guaranteed (that is,
* non-deprecated) external access to system indices based on special request headers in addition to security roles. If those
* headers are present and provide a product name with access to the index, they will be added to the thread context and checked
* here. Without these headers, we will add deprecation warnings to the response. In future versions, we will deny access altogether.
* @param threadContext the threadContext containing headers used for system index access
* @return Predicate to check external system index names with
*/
public Predicate<String> getProductSystemIndexNamePredicate(ThreadContext threadContext) {
final String product = threadContext.getHeader(EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY);
if (product == null) {
return Predicates.never();
}
final CharacterRunAutomaton automaton = productToSystemIndicesMatcher.get(product);
if (automaton == null) {
return Predicates.never();
}
return automaton::run;
}
/**
* Get a set of feature names. This is useful for checking whether particular
* features are present on the node.
* @return A set of all feature names
*/
public Set<String> getFeatureNames() {
return Set.copyOf(featureDescriptors.keySet());
}
/**
* Get a feature by name.
* @param name Name of a feature.
* @return The corresponding feature if it exists on this node, null otherwise.
*/
public Feature getFeature(String name) {
return featureDescriptors.get(name);
}
/**
* Get a collection of the Features this SystemIndices object is managing.
* @return A collection of Features.
*/
public Collection<Feature> getFeatures() {
return List.copyOf(featureDescriptors.values());
}
private static Automaton buildIndexAutomaton(Map<String, Feature> featureDescriptors) {
Optional<Automaton> automaton = featureDescriptors.values()
.stream()
.map(SystemIndices::featureToIndexAutomaton)
.reduce(Operations::union);
return MinimizationOperations.minimize(automaton.orElse(EMPTY), Operations.DEFAULT_DETERMINIZE_WORK_LIMIT);
}
private static CharacterRunAutomaton buildNetNewIndexCharacterRunAutomaton(Map<String, Feature> featureDescriptors) {
Optional<Automaton> automaton = featureDescriptors.values()
.stream()
.flatMap(feature -> feature.getIndexDescriptors().stream())
.filter(SystemIndexDescriptor::isAutomaticallyManaged)
.filter(SystemIndexDescriptor::isNetNew)
.map(descriptor -> SystemIndexDescriptor.buildAutomaton(descriptor.getIndexPattern(), descriptor.getAliasName()))
.reduce(Operations::union);
return new CharacterRunAutomaton(
MinimizationOperations.minimize(automaton.orElse(EMPTY), Operations.DEFAULT_DETERMINIZE_WORK_LIMIT)
);
}
private static Automaton featureToIndexAutomaton(Feature feature) {
Optional<Automaton> systemIndexAutomaton = feature.getIndexDescriptors()
.stream()
.map(descriptor -> SystemIndexDescriptor.buildAutomaton(descriptor.getIndexPattern(), descriptor.getAliasName()))
.reduce(Operations::union);
return systemIndexAutomaton.orElse(EMPTY);
}
private static Automaton buildDataStreamAutomaton(Map<String, Feature> featureDescriptors) {
Optional<Automaton> automaton = featureDescriptors.values()
.stream()
.flatMap(feature -> feature.getDataStreamDescriptors().stream())
.map(SystemDataStreamDescriptor::getDataStreamName)
.map(dsName -> SystemIndexDescriptor.buildAutomaton(dsName, null))
.reduce(Operations::union);
return automaton.isPresent() ? MinimizationOperations.minimize(automaton.get(), Operations.DEFAULT_DETERMINIZE_WORK_LIMIT) : EMPTY;
}
private static Predicate<String> buildDataStreamNamePredicate(Map<String, Feature> featureDescriptors) {
CharacterRunAutomaton characterRunAutomaton = new CharacterRunAutomaton(buildDataStreamAutomaton(featureDescriptors));
return characterRunAutomaton::run;
}
private static Automaton buildDataStreamBackingIndicesAutomaton(Map<String, Feature> featureDescriptors) {
Optional<Automaton> automaton = featureDescriptors.values()
.stream()
.map(SystemIndices::featureToDataStreamBackingIndicesAutomaton)
.reduce(Operations::union);
return MinimizationOperations.minimize(automaton.orElse(EMPTY), Operations.DEFAULT_DETERMINIZE_WORK_LIMIT);
}
private static Automaton featureToDataStreamBackingIndicesAutomaton(Feature feature) {
Optional<Automaton> systemDataStreamAutomaton = feature.getDataStreamDescriptors()
.stream()
.map(descriptor -> SystemIndexDescriptor.buildAutomaton(descriptor.getBackingIndexPattern(), null))
.reduce(Operations::union);
return systemDataStreamAutomaton.orElse(EMPTY);
}
public SystemDataStreamDescriptor validateDataStreamAccess(String dataStreamName, ThreadContext threadContext) {
if (systemDataStreamPredicate.test(dataStreamName)) {
SystemDataStreamDescriptor dataStreamDescriptor = featureDescriptors.values()
.stream()
.flatMap(feature -> feature.getDataStreamDescriptors().stream())
.filter(descriptor -> descriptor.getDataStreamName().equals(dataStreamName))
.findFirst()
.orElseThrow(() -> new IllegalStateException("system data stream descriptor not found for [" + dataStreamName + "]"));
if (dataStreamDescriptor.isExternal()) {
final SystemIndexAccessLevel accessLevel = getSystemIndexAccessLevel(threadContext);
assert accessLevel != SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY : "BACKWARDS_COMPATIBLE access level is leaking";
if (accessLevel == SystemIndexAccessLevel.NONE) {
throw dataStreamAccessException(null, dataStreamName);
} else if (accessLevel == SystemIndexAccessLevel.RESTRICTED) {
if (getProductSystemIndexNamePredicate(threadContext).test(dataStreamName) == false) {
throw dataStreamAccessException(
threadContext.getHeader(EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY),
dataStreamName
);
} else {
return dataStreamDescriptor;
}
} else {
assert accessLevel == SystemIndexAccessLevel.ALL || accessLevel == SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY;
return dataStreamDescriptor;
}
} else {
return dataStreamDescriptor;
}
} else {
return null;
}
}
public static IllegalArgumentException dataStreamAccessException(ThreadContext threadContext, Collection<String> names) {
return dataStreamAccessException(
threadContext.getHeader(EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY),
names.toArray(Strings.EMPTY_ARRAY)
);
}
public static IllegalArgumentException netNewSystemIndexAccessException(ThreadContext threadContext, Collection<String> names) {
final String product = threadContext.getHeader(EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY);
if (product == null) {
return new IllegalArgumentException(
"Indices " + Arrays.toString(names.toArray(Strings.EMPTY_ARRAY)) + " use and access is reserved for system operations"
);
} else {
return new IllegalArgumentException(
"Indices " + Arrays.toString(names.toArray(Strings.EMPTY_ARRAY)) + " may not be accessed by product [" + product + "]"
);
}
}
static IllegalArgumentException dataStreamAccessException(@Nullable String product, String... dataStreamNames) {
if (product == null) {
return new IllegalArgumentException(
"Data stream(s) " + Arrays.toString(dataStreamNames) + " use and access is reserved for system operations"
);
} else {
return new IllegalArgumentException(
"Data stream(s) " + Arrays.toString(dataStreamNames) + " may not be accessed by product [" + product + "]"
);
}
}
/**
* Determines what level of system index access should be allowed in the current context. For system data streams and "net-new" system
* indices (see {@link SystemIndexDescriptor#isNetNew()}), access levels should be used to reject requests entirely. For non-net-new,
* backwards-compatible system indices, these access levels should be used for deprecation warnings.
*
* @param threadContext the current thread context that has headers associated with the current request
* @return {@link SystemIndexAccessLevel#ALL} if unrestricted system index access should be allowed,
* {@link SystemIndexAccessLevel#RESTRICTED} if a subset of system index access should be allowed, or
* {@link SystemIndexAccessLevel#NONE} if no system index access should be allowed.
*/
public static SystemIndexAccessLevel getSystemIndexAccessLevel(ThreadContext threadContext) {
// This method intentionally cannot return BACKWARDS_COMPATIBLE_ONLY - that access level should only be used manually
// in known special cases.
final String headerValue = threadContext.getHeader(SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY);
final String productHeaderValue = threadContext.getHeader(EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY);
final boolean allowed = Booleans.parseBoolean(headerValue, true);
if (allowed) {
if (productHeaderValue != null) {
return SystemIndexAccessLevel.RESTRICTED;
} else {
return SystemIndexAccessLevel.ALL;
}
} else {
return SystemIndexAccessLevel.NONE;
}
}
/**
* In a future release, these access levels will be used to allow or deny requests for system resources. Currently, the behavior
* differs for different types of system resources.
*
* <ol>
* <li>For a system index whose descriptor returns false for {@link SystemIndexDescriptor#isNetNew()}: if a request is
* determined to have an access level of NONE or if it accesses indices belonging to another product at a level of RESTRICTED,
* we issue a depreciation warning.
* <li>For a system index whose descriptor returns true for {@link SystemIndexDescriptor#isNetNew()} or any system data stream:
* if a request is determined to have an access level of NONE or if it accesses indices belonging to another product at a level of
* RESTRICTED, we deny access to the system resource.
* </ol>
*/
public enum SystemIndexAccessLevel {
/** Access level that skips system resource access checks. */
ALL,
/**
* Access level that should deny access to net-new system indices and system data streams, and issue deprecation warnings for
* backwards-compatible system indices.
*/
NONE,
/**
* At this access level, check the value of the {@link SystemIndices#EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY}. If the
* request has an allowed product origin, allow access. If not, deny access to net-new system indices and system data streams, and
* issue deprecation warnings for backwards-compatible system indices.
*/
RESTRICTED,
/**
* This value exists because there was a desire for "net-new" system indices to opt in to the post-8.0 behavior of having
* access blocked in most cases, but this caused problems with certain APIs (see
* <a href="https://github.com/elastic/elasticsearch/issues/74687">issue #74687</a>), so this access level was added as a
* workaround. Once we no longer have to support accessing existing system indices, this can and should be removed, along with the
* net-new property of system indices in general.
*/
BACKWARDS_COMPATIBLE_ONLY
}
/**
* Given a collection of {@link SystemIndexDescriptor}s and their sources, checks to see if the index patterns of the listed
* descriptors overlap with any of the other patterns. If any do, throws an exception.
*
* @param featureDescriptors A map of feature names to the Features that will provide SystemIndexDescriptors
* @throws IllegalStateException Thrown if any of the index patterns overlaps with another.
*/
static void checkForOverlappingPatterns(Map<String, Feature> featureDescriptors) {
List<Tuple<String, SystemIndexDescriptor>> sourceDescriptorPair = featureDescriptors.values()
.stream()
.flatMap(feature -> feature.getIndexDescriptors().stream().map(descriptor -> new Tuple<>(feature.getName(), descriptor)))
.sorted(Comparator.comparing(d -> d.v1() + ":" + d.v2().getIndexPattern())) // Consistent ordering -> consistent error message
.toList();
List<Tuple<String, SystemDataStreamDescriptor>> sourceDataStreamDescriptorPair = featureDescriptors.values()
.stream()
.filter(feature -> feature.getDataStreamDescriptors().isEmpty() == false)
.flatMap(feature -> feature.getDataStreamDescriptors().stream().map(descriptor -> new Tuple<>(feature.getName(), descriptor)))
.sorted(Comparator.comparing(d -> d.v1() + ":" + d.v2().getDataStreamName())) // Consistent ordering -> consistent error message
.toList();
// This is O(n^2) with the number of system index descriptors, and each check is quadratic with the number of states in the
// automaton, but the absolute number of system index descriptors should be quite small (~10s at most), and the number of states
// per pattern should be low as well. If these assumptions change, this might need to be reworked.
sourceDescriptorPair.forEach(descriptorToCheck -> {
List<Tuple<String, SystemIndexDescriptor>> descriptorsMatchingThisPattern = sourceDescriptorPair.stream()
.filter(d -> descriptorToCheck.v2() != d.v2()) // Exclude the pattern currently being checked
.filter(
d -> overlaps(descriptorToCheck.v2(), d.v2())
|| (d.v2().getAliasName() != null && descriptorToCheck.v2().matchesIndexPattern(d.v2().getAliasName()))
)
.toList();
if (descriptorsMatchingThisPattern.isEmpty() == false) {
throw new IllegalStateException(
"a system index descriptor ["
+ descriptorToCheck.v2()
+ "] from ["
+ descriptorToCheck.v1()
+ "] overlaps with other system index descriptors: ["
+ descriptorsMatchingThisPattern.stream()
.map(descriptor -> descriptor.v2() + " from [" + descriptor.v1() + "]")
.collect(Collectors.joining(", "))
);
}
List<Tuple<String, SystemDataStreamDescriptor>> dataStreamsMatching = sourceDataStreamDescriptorPair.stream()
.filter(
dsTuple -> descriptorToCheck.v2().matchesIndexPattern(dsTuple.v2().getDataStreamName())
|| overlaps(descriptorToCheck.v2().getIndexPattern(), dsTuple.v2().getBackingIndexPattern())
)
.toList();
if (dataStreamsMatching.isEmpty() == false) {
throw new IllegalStateException(
"a system index descriptor ["
+ descriptorToCheck.v2()
+ "] from ["
+ descriptorToCheck.v1()
+ "] overlaps with one or more data stream descriptors: ["
+ dataStreamsMatching.stream()
.map(descriptor -> descriptor.v2() + " from [" + descriptor.v1() + "]")
.collect(Collectors.joining(", "))
);
}
});
}
private static boolean overlaps(SystemIndexDescriptor a1, SystemIndexDescriptor a2) {
return overlaps(a1.getIndexPattern(), a2.getIndexPattern());
}
private static boolean overlaps(String pattern1, String pattern2) {
Automaton a1Automaton = SystemIndexDescriptor.buildAutomaton(pattern1, null);
Automaton a2Automaton = SystemIndexDescriptor.buildAutomaton(pattern2, null);
return Operations.isEmpty(Operations.intersection(a1Automaton, a2Automaton)) == false;
}
private static Map<String, Feature> buildFeatureMap(List<Feature> features) {
final Map<String, Feature> map = Maps.newMapWithExpectedSize(features.size() + SERVER_SYSTEM_FEATURE_DESCRIPTORS.size());
features.forEach(feature -> map.put(feature.getName(), feature));
// put the server items last since we expect less of them
SERVER_SYSTEM_FEATURE_DESCRIPTORS.forEach((source, feature) -> {
if (map.putIfAbsent(source, feature) != null) {
throw new IllegalArgumentException(
"plugin or module attempted to define the same source [" + source + "] as a built-in system index"
);
}
});
return Map.copyOf(map);
}
Collection<SystemIndexDescriptor> getSystemIndexDescriptors() {
return this.featureDescriptors.values().stream().flatMap(f -> f.getIndexDescriptors().stream()).toList();
}
public Map<String, SystemIndexDescriptor.MappingsVersion> getMappingsVersions() {
return getSystemIndexDescriptors().stream()
.filter(SystemIndexDescriptor::isAutomaticallyManaged)
.collect(Collectors.toMap(SystemIndexDescriptor::getPrimaryIndex, SystemIndexDescriptor::getMappingsVersion));
}
/**
* Check that a feature name is not reserved
* @param name Name of feature
* @param plugin Name of plugin providing the feature
*/
public static void validateFeatureName(String name, String plugin) {
if (SnapshotsService.NO_FEATURE_STATES_VALUE.equalsIgnoreCase(name)) {
throw new IllegalArgumentException(
String.format(
Locale.ROOT,
"feature name cannot be reserved name [\"%s\"], but was for plugin [%s]",
SnapshotsService.NO_FEATURE_STATES_VALUE,
plugin
)
);
}
}
/**
* Describes an Elasticsearch system feature that keeps state in protected indices and data streams.
*
* <p>This is an internal class that closely follows the model of {@link SystemIndexPlugin}. See that class’s documents for high-level
* details about what constitutes a system feature.
*
* <p>This class has a static {@link #cleanUpFeature(Collection, Collection, String, ClusterService, Client, ActionListener)} method
* that is the default implementation for resetting feature state.
*/
public static class Feature {
private final String name;
private final String description;
private final Collection<SystemIndexDescriptor> indexDescriptors;
private final Collection<SystemDataStreamDescriptor> dataStreamDescriptors;
private final Collection<AssociatedIndexDescriptor> associatedIndexDescriptors;
private final TriConsumer<ClusterService, Client, ActionListener<ResetFeatureStateStatus>> cleanUpFunction;
private final MigrationPreparationHandler preMigrationFunction;
private final MigrationCompletionHandler postMigrationFunction;
/**
* Construct a Feature with a custom cleanup function
* @param name The name of the feature
* @param description Description of the feature
* @param indexDescriptors Collection of objects describing system indices for this feature
* @param dataStreamDescriptors Collection of objects describing system data streams for this feature
* @param associatedIndexDescriptors Collection of objects describing associated indices for this feature
* @param cleanUpFunction A function that will clean up the feature's state
* @param preMigrationFunction A function that will be called prior to upgrading any of this plugin's system indices
* @param postMigrationFunction A function that will be called after upgrading all of this plugin's system indices
*/
public Feature(
String name,
String description,
Collection<SystemIndexDescriptor> indexDescriptors,
Collection<SystemDataStreamDescriptor> dataStreamDescriptors,
Collection<AssociatedIndexDescriptor> associatedIndexDescriptors,
TriConsumer<ClusterService, Client, ActionListener<ResetFeatureStateStatus>> cleanUpFunction,
MigrationPreparationHandler preMigrationFunction,
MigrationCompletionHandler postMigrationFunction
) {
this.name = name;
this.description = description;
this.indexDescriptors = indexDescriptors;
this.dataStreamDescriptors = dataStreamDescriptors;
this.associatedIndexDescriptors = associatedIndexDescriptors;
this.cleanUpFunction = cleanUpFunction;
this.preMigrationFunction = preMigrationFunction;
this.postMigrationFunction = postMigrationFunction;
}
/**
* Construct a Feature using the default clean-up function
* @param name Name of the feature, used in logging
* @param description Description of the feature
* @param indexDescriptors Patterns describing system indices for this feature
*/
public Feature(String name, String description, Collection<SystemIndexDescriptor> indexDescriptors) {
this(
name,
description,
indexDescriptors,
Collections.emptyList(),
Collections.emptyList(),
(clusterService, client, listener) -> cleanUpFeature(
indexDescriptors,
Collections.emptyList(),
name,
clusterService,
client,
listener
),
Feature::noopPreMigrationFunction,
Feature::noopPostMigrationFunction
);
}
/**
* Construct a Feature using the default clean-up function
* @param name Name of the feature, used in logging
* @param description Description of the feature
* @param indexDescriptors Patterns describing system indices for this feature
* @param dataStreamDescriptors Collection of objects describing system data streams for this feature
*/
public Feature(
String name,
String description,
Collection<SystemIndexDescriptor> indexDescriptors,
Collection<SystemDataStreamDescriptor> dataStreamDescriptors
) {
this(
name,
description,
indexDescriptors,
dataStreamDescriptors,
Collections.emptyList(),
(clusterService, client, listener) -> cleanUpFeature(
indexDescriptors,
Collections.emptyList(),
name,
clusterService,
client,
listener
),
Feature::noopPreMigrationFunction,
Feature::noopPostMigrationFunction
);
}
/**
* Creates a {@link Feature} from a {@link SystemIndexPlugin}.
* @param plugin The {@link SystemIndexPlugin} that adds this feature.
* @param settings Node-level settings, as this may impact the descriptors returned by the plugin.
* @return A {@link Feature} which represents the feature added by the given plugin.
*/
public static Feature fromSystemIndexPlugin(SystemIndexPlugin plugin, Settings settings) {
return new Feature(
plugin.getFeatureName(),
plugin.getFeatureDescription(),
plugin.getSystemIndexDescriptors(settings),
plugin.getSystemDataStreamDescriptors(),
plugin.getAssociatedIndexDescriptors(),
plugin::cleanUpFeature,
plugin::prepareForIndicesMigration,
plugin::indicesMigrationComplete
);
}
public String getDescription() {
return description;
}
public Collection<SystemIndexDescriptor> getIndexDescriptors() {
return indexDescriptors;
}
public Collection<SystemDataStreamDescriptor> getDataStreamDescriptors() {
return dataStreamDescriptors;
}
public Collection<AssociatedIndexDescriptor> getAssociatedIndexDescriptors() {
return associatedIndexDescriptors;
}
public TriConsumer<ClusterService, Client, ActionListener<ResetFeatureStateStatus>> getCleanUpFunction() {
return cleanUpFunction;
}
public String getName() {
return name;
}
public MigrationPreparationHandler getPreMigrationFunction() {
return preMigrationFunction;
}
public MigrationCompletionHandler getPostMigrationFunction() {
return postMigrationFunction;
}
private static void cleanUpFeatureForIndices(
String name,
Client client,
String[] indexNames,
final ActionListener<ResetFeatureStateStatus> listener
) {
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest();
deleteIndexRequest.indices(indexNames);
client.execute(TransportDeleteIndexAction.TYPE, deleteIndexRequest, new ActionListener<>() {
@Override
public void onResponse(AcknowledgedResponse acknowledgedResponse) {
listener.onResponse(ResetFeatureStateStatus.success(name));
}
@Override
public void onFailure(Exception e) {
listener.onResponse(ResetFeatureStateStatus.failure(name, e));
}
});
}
/**
* Clean up the state of a feature
* @param indexDescriptors List of descriptors of a feature's system indices
* @param associatedIndexDescriptors List of descriptors of a feature's associated indices
* @param name Name of the feature, used in logging
* @param clusterService A clusterService, for retrieving cluster metadata
* @param client A client, for issuing delete requests
* @param listener A listener to return success or failure of cleanup
*/
public static void cleanUpFeature(
Collection<SystemIndexDescriptor> indexDescriptors,
Collection<? extends IndexPatternMatcher> associatedIndexDescriptors,
String name,
ClusterService clusterService,
Client client,
final ActionListener<ResetFeatureStateStatus> listener
) {
Metadata metadata = clusterService.state().getMetadata();
final List<Exception> exceptions = new ArrayList<>();
final CheckedConsumer<ResetFeatureStateStatus, Exception> handleResponse = resetFeatureStateStatus -> {
if (resetFeatureStateStatus.getStatus() == ResetFeatureStateStatus.Status.FAILURE) {
synchronized (exceptions) {
exceptions.add(resetFeatureStateStatus.getException());
}
}
};
try (var listeners = new RefCountingListener(listener.map(ignored -> {
if (exceptions.isEmpty()) {
return ResetFeatureStateStatus.success(name);
} else {
for (final var exception : exceptions) {
logger.warn(() -> "error while resetting feature [" + name + "]", exception);
}
return ResetFeatureStateStatus.failure(
name,
new Exception(exceptions.stream().map(Exception::getMessage).collect(Collectors.joining(", ", "[", "]")))
);
}
}))) {
// Send cleanup for the associated indices, they don't need special origin since they are not protected
String[] associatedIndices = associatedIndexDescriptors.stream()
.flatMap(descriptor -> descriptor.getMatchingIndices(metadata).stream())
.toArray(String[]::new);
if (associatedIndices.length > 0) {
cleanUpFeatureForIndices(name, client, associatedIndices, listeners.acquire(handleResponse));
}
// One descriptor at a time, create an originating client and clean up the feature
for (final var indexDescriptor : indexDescriptors) {
List<String> matchingIndices = indexDescriptor.getMatchingIndices(metadata);
if (matchingIndices.isEmpty() == false) {
final Client clientWithOrigin = (indexDescriptor.getOrigin() == null)
? client
: new OriginSettingClient(client, indexDescriptor.getOrigin());
cleanUpFeatureForIndices(
name,
clientWithOrigin,
matchingIndices.toArray(Strings.EMPTY_ARRAY),
listeners.acquire(handleResponse)
);
}
}
}
}
// No-op pre-migration function to be used as the default in case none are provided.
private static void noopPreMigrationFunction(
ClusterService clusterService,
Client client,
ActionListener<Map<String, Object>> listener
) {
listener.onResponse(Collections.emptyMap());
}
// No-op pre-migration function to be used as the default in case none are provided.
private static void noopPostMigrationFunction(
Map<String, Object> preUpgradeMetadata,
ClusterService clusterService,
Client client,
ActionListener<Boolean> listener
) {
listener.onResponse(true);
}
/**
* Type for the handler that's invoked prior to migrating a Feature's system indices.
* See {@link SystemIndexPlugin#prepareForIndicesMigration(ClusterService, Client, ActionListener)}.
*/
@FunctionalInterface
public interface MigrationPreparationHandler {
void prepareForIndicesMigration(ClusterService clusterService, Client client, ActionListener<Map<String, Object>> listener);
}
/**
* Type for the handler that's invoked when all of a feature's system indices have been migrated.
* See {@link SystemIndexPlugin#indicesMigrationComplete(Map, ClusterService, Client, ActionListener)}.
*/
@FunctionalInterface
public interface MigrationCompletionHandler {
void indicesMigrationComplete(
Map<String, Object> preUpgradeMetadata,
ClusterService clusterService,
Client client,
ActionListener<Boolean> listener
);
}
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/indices/SystemIndices.java |
465 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.env;
import org.elasticsearch.Build;
import org.elasticsearch.core.UpdateForV9;
import org.elasticsearch.gateway.MetadataStateFormat;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.IndexVersions;
import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.XContentParser;
import org.elasticsearch.xcontent.XContentType;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Objects;
/**
* Metadata associated with this node: its persistent node ID and its version.
* The metadata is persisted in the data folder of this node and is reused across restarts.
*/
public final class NodeMetadata {
static final String NODE_ID_KEY = "node_id";
static final String NODE_VERSION_KEY = "node_version";
static final String OLDEST_INDEX_VERSION_KEY = "oldest_index_version";
private final String nodeId;
private final BuildVersion nodeVersion;
private final BuildVersion previousNodeVersion;
private final IndexVersion oldestIndexVersion;
@UpdateForV9 // version should be non-null in the node metadata from v9 onwards
private NodeMetadata(
final String nodeId,
final BuildVersion buildVersion,
final BuildVersion previousBuildVersion,
final IndexVersion oldestIndexVersion
) {
this.nodeId = Objects.requireNonNull(nodeId);
this.nodeVersion = Objects.requireNonNull(buildVersion);
this.previousNodeVersion = Objects.requireNonNull(previousBuildVersion);
this.oldestIndexVersion = Objects.requireNonNull(oldestIndexVersion);
}
public NodeMetadata(final String nodeId, final BuildVersion buildVersion, final IndexVersion oldestIndexVersion) {
this(nodeId, buildVersion, buildVersion, oldestIndexVersion);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
NodeMetadata that = (NodeMetadata) o;
return nodeId.equals(that.nodeId)
&& nodeVersion.equals(that.nodeVersion)
&& oldestIndexVersion.equals(that.oldestIndexVersion)
&& Objects.equals(previousNodeVersion, that.previousNodeVersion);
}
@Override
public int hashCode() {
return Objects.hash(nodeId, nodeVersion, previousNodeVersion, oldestIndexVersion);
}
@Override
public String toString() {
return "NodeMetadata{"
+ "nodeId='"
+ nodeId
+ '\''
+ ", nodeVersion="
+ nodeVersion
+ ", previousNodeVersion="
+ previousNodeVersion
+ ", oldestIndexVersion="
+ oldestIndexVersion
+ '}';
}
public String nodeId() {
return nodeId;
}
public BuildVersion nodeVersion() {
return nodeVersion;
}
/**
* When a node starts we read the existing node metadata from disk (see NodeEnvironment@loadNodeMetadata), store a reference to the
* node version that we read from there in {@code previousNodeVersion} and then proceed to upgrade the version to
* the current version of the node ({@link NodeMetadata#upgradeToCurrentVersion()} before storing the node metadata again on disk.
* In doing so, {@code previousNodeVersion} refers to the previously last known version that this node was started on.
*/
public BuildVersion previousNodeVersion() {
return previousNodeVersion;
}
public IndexVersion oldestIndexVersion() {
return oldestIndexVersion;
}
@UpdateForV9
public void verifyUpgradeToCurrentVersion() {
// Enable the following assertion for V9:
// assert (nodeVersion.equals(BuildVersion.empty()) == false) : "version is required in the node metadata from v9 onwards";
if (nodeVersion.onOrAfterMinimumCompatible() == false) {
throw new IllegalStateException(
"cannot upgrade a node from version ["
+ nodeVersion
+ "] directly to version ["
+ Build.current().version()
+ "], "
+ "upgrade to version ["
+ Build.current().minWireCompatVersion()
+ "] first."
);
}
if (nodeVersion.isFutureVersion()) {
throw new IllegalStateException(
"cannot downgrade a node from version [" + nodeVersion + "] to version [" + Build.current().version() + "]"
);
}
}
public NodeMetadata upgradeToCurrentVersion() {
verifyUpgradeToCurrentVersion();
return nodeVersion.equals(BuildVersion.current())
? this
: new NodeMetadata(nodeId, BuildVersion.current(), nodeVersion, oldestIndexVersion);
}
private static class Builder {
String nodeId;
BuildVersion nodeVersion;
BuildVersion previousNodeVersion;
IndexVersion oldestIndexVersion;
public void setNodeId(String nodeId) {
this.nodeId = nodeId;
}
public void setNodeVersionId(int nodeVersionId) {
this.nodeVersion = BuildVersion.fromVersionId(nodeVersionId);
}
public void setOldestIndexVersion(int oldestIndexVersion) {
this.oldestIndexVersion = IndexVersion.fromId(oldestIndexVersion);
}
@UpdateForV9 // version is required in the node metadata from v9 onwards
public NodeMetadata build() {
final IndexVersion oldestIndexVersion;
if (this.nodeVersion == null) {
nodeVersion = BuildVersion.fromVersionId(0);
}
if (this.previousNodeVersion == null) {
previousNodeVersion = nodeVersion;
}
if (this.oldestIndexVersion == null) {
oldestIndexVersion = IndexVersions.ZERO;
} else {
oldestIndexVersion = this.oldestIndexVersion;
}
return new NodeMetadata(nodeId, nodeVersion, previousNodeVersion, oldestIndexVersion);
}
}
static class NodeMetadataStateFormat extends MetadataStateFormat<NodeMetadata> {
private ObjectParser<Builder, Void> objectParser;
/**
* @param ignoreUnknownFields whether to ignore unknown fields or not. Normally we are strict about this, but
* {@link OverrideNodeVersionCommand} is lenient.
*/
NodeMetadataStateFormat(boolean ignoreUnknownFields) {
super("node-");
objectParser = new ObjectParser<>("node_meta_data", ignoreUnknownFields, Builder::new);
objectParser.declareString(Builder::setNodeId, new ParseField(NODE_ID_KEY));
objectParser.declareInt(Builder::setNodeVersionId, new ParseField(NODE_VERSION_KEY));
objectParser.declareInt(Builder::setOldestIndexVersion, new ParseField(OLDEST_INDEX_VERSION_KEY));
}
@Override
protected XContentBuilder newXContentBuilder(XContentType type, OutputStream stream) throws IOException {
XContentBuilder xContentBuilder = super.newXContentBuilder(type, stream);
xContentBuilder.prettyPrint();
return xContentBuilder;
}
@Override
public void toXContent(XContentBuilder builder, NodeMetadata nodeMetadata) throws IOException {
builder.field(NODE_ID_KEY, nodeMetadata.nodeId);
builder.field(NODE_VERSION_KEY, nodeMetadata.nodeVersion.id());
builder.field(OLDEST_INDEX_VERSION_KEY, nodeMetadata.oldestIndexVersion.id());
}
@Override
public NodeMetadata fromXContent(XContentParser parser) throws IOException {
return objectParser.apply(parser, null).build();
}
}
public static final MetadataStateFormat<NodeMetadata> FORMAT = new NodeMetadataStateFormat(false);
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/env/NodeMetadata.java |
467 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.index;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.util.Strings;
import org.apache.lucene.index.MergePolicy;
import org.apache.lucene.search.uhighlight.UnifiedHighlighter;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.IndexRouting;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.AbstractScopedSettings;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.time.DateUtils;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.ingest.IngestService;
import org.elasticsearch.node.Node;
import java.time.Instant;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.function.Function;
import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_INDEX_VERSION_CREATED;
import static org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING;
import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING;
import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING;
import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING;
import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_IGNORE_DYNAMIC_BEYOND_LIMIT_SETTING;
import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING;
import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING;
import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING;
/**
* This class encapsulates all index level settings and handles settings updates.
* It's created per index and available to all index level classes and allows them to retrieve
* the latest updated settings instance. Classes that need to listen to settings updates can register
* a settings consumer at index creation via {@link IndexModule#addSettingsUpdateConsumer(Setting, Consumer)} that will
* be called for each settings update.
*/
public final class IndexSettings {
public static final Setting<List<String>> DEFAULT_FIELD_SETTING = Setting.stringListSetting(
"index.query.default_field",
Collections.singletonList("*"),
Property.IndexScope,
Property.Dynamic,
Property.ServerlessPublic
);
public static final Setting<Boolean> QUERY_STRING_LENIENT_SETTING = Setting.boolSetting(
"index.query_string.lenient",
false,
Property.IndexScope,
Property.ServerlessPublic
);
public static final Setting<Boolean> QUERY_STRING_ANALYZE_WILDCARD = Setting.boolSetting(
"indices.query.query_string.analyze_wildcard",
false,
Property.NodeScope
);
public static final Setting<Boolean> QUERY_STRING_ALLOW_LEADING_WILDCARD = Setting.boolSetting(
"indices.query.query_string.allowLeadingWildcard",
true,
Property.NodeScope
);
public static final Setting<Boolean> ALLOW_UNMAPPED = Setting.boolSetting(
"index.query.parse.allow_unmapped_fields",
true,
Property.IndexScope
);
public static final Setting<TimeValue> INDEX_TRANSLOG_SYNC_INTERVAL_SETTING = Setting.timeSetting(
"index.translog.sync_interval",
TimeValue.timeValueSeconds(5),
TimeValue.timeValueMillis(100),
Property.Dynamic,
Property.IndexScope
);
public static final Setting<TimeValue> INDEX_SEARCH_IDLE_AFTER = Setting.timeSetting(
"index.search.idle.after",
TimeValue.timeValueSeconds(30),
TimeValue.timeValueMinutes(0),
Property.IndexScope,
Property.Dynamic
);
public static final Setting<Translog.Durability> INDEX_TRANSLOG_DURABILITY_SETTING = Setting.enumSetting(
Translog.Durability.class,
"index.translog.durability",
Translog.Durability.REQUEST,
Property.Dynamic,
Property.IndexScope
);
public static final Setting<Boolean> INDEX_WARMER_ENABLED_SETTING = Setting.boolSetting(
"index.warmer.enabled",
true,
Property.Dynamic,
Property.IndexScope
);
public static final Setting<String> INDEX_CHECK_ON_STARTUP = new Setting<>("index.shard.check_on_startup", "false", (s) -> {
return switch (s) {
case "false", "true", "checksum" -> s;
default -> throw new IllegalArgumentException(
"unknown value for [index.shard.check_on_startup] must be one of " + "[true, false, checksum] but was: " + s
);
};
}, Property.IndexScope);
/**
* Index setting describing the maximum value of from + size on a query.
* The Default maximum value of from + size on a query is 10,000. This was chosen as
* a conservative default as it is sure to not cause trouble. Users can
* certainly profile their cluster and decide to set it to 100,000
* safely. 1,000,000 is probably way to high for any cluster to set
* safely.
*/
public static final Setting<Integer> MAX_RESULT_WINDOW_SETTING = Setting.intSetting(
"index.max_result_window",
10000,
1,
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting describing the maximum value of from + size on an individual inner hit definition or
* top hits aggregation. The default maximum of 100 is defensive for the reason that the number of inner hit responses
* and number of top hits buckets returned is unbounded. Profile your cluster when increasing this setting.
*/
public static final Setting<Integer> MAX_INNER_RESULT_WINDOW_SETTING = Setting.intSetting(
"index.max_inner_result_window",
100,
1,
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting describing the maximum value of allowed `script_fields`that can be retrieved
* per search request. The default maximum of 32 is defensive for the reason that retrieving
* script fields is a costly operation.
*/
public static final Setting<Integer> MAX_SCRIPT_FIELDS_SETTING = Setting.intSetting(
"index.max_script_fields",
32,
0,
Property.Dynamic,
Property.IndexScope
);
/**
* A setting describing the maximum number of tokens that can be
* produced using _analyze API. The default maximum of 10000 is defensive
* to prevent generating too many token objects.
*/
public static final Setting<Integer> MAX_TOKEN_COUNT_SETTING = Setting.intSetting(
"index.analyze.max_token_count",
10000,
1,
Property.Dynamic,
Property.IndexScope
);
/**
* A setting describing the maximum number of characters that will be analyzed for a highlight request.
* This setting is only applicable when highlighting is requested on a text that was indexed without
* offsets or term vectors.
* The default maximum of 1M characters is defensive as for highlighting larger texts,
* indexing with offsets or term vectors is recommended.
*/
public static final Setting<Integer> MAX_ANALYZED_OFFSET_SETTING = Setting.intSetting(
"index.highlight.max_analyzed_offset",
1000000,
1,
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting to enable/disable the {@link UnifiedHighlighter.HighlightFlag#WEIGHT_MATCHES}
* mode of the unified highlighter.
*/
public static final Setting<Boolean> WEIGHT_MATCHES_MODE_ENABLED_SETTING = Setting.boolSetting(
"index.highlight.weight_matches_mode.enabled",
true,
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting describing the maximum number of terms that can be used in Terms Query.
* The default maximum of 65536 terms is defensive, as extra processing and memory is involved
* for each additional term, and a large number of terms degrade the cluster performance.
*/
public static final Setting<Integer> MAX_TERMS_COUNT_SETTING = Setting.intSetting(
"index.max_terms_count",
65536,
1,
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting describing for NGramTokenizer and NGramTokenFilter
* the maximum difference between
* max_gram (maximum length of characters in a gram) and
* min_gram (minimum length of characters in a gram).
* The default value is 1 as this is default difference in NGramTokenizer,
* and is defensive as it prevents generating too many index terms.
*/
public static final Setting<Integer> MAX_NGRAM_DIFF_SETTING = Setting.intSetting(
"index.max_ngram_diff",
1,
0,
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting describing for ShingleTokenFilter
* the maximum difference between
* max_shingle_size and min_shingle_size.
* The default value is 3 is defensive as it prevents generating too many tokens.
*/
public static final Setting<Integer> MAX_SHINGLE_DIFF_SETTING = Setting.intSetting(
"index.max_shingle_diff",
3,
0,
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting describing the maximum value of allowed `docvalue_fields`that can be retrieved
* per search request. The default maximum of 100 is defensive for the reason that retrieving
* doc values might incur a per-field per-document seek.
*/
public static final Setting<Integer> MAX_DOCVALUE_FIELDS_SEARCH_SETTING = Setting.intSetting(
"index.max_docvalue_fields_search",
100,
0,
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting describing the maximum size of the rescore window. Defaults to {@link #MAX_RESULT_WINDOW_SETTING}
* because they both do the same thing: control the size of the heap of hits.
*/
public static final Setting<Integer> MAX_RESCORE_WINDOW_SETTING = Setting.intSetting(
"index.max_rescore_window",
MAX_RESULT_WINDOW_SETTING,
1,
Property.Dynamic,
Property.IndexScope
);
/**
* Only intended for stateless.
*/
public static final Setting<Boolean> INDEX_FAST_REFRESH_SETTING = Setting.boolSetting(
"index.fast_refresh",
false,
Property.Final,
Property.IndexScope
);
public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS);
public static final Setting<TimeValue> NODE_DEFAULT_REFRESH_INTERVAL_SETTING = Setting.timeSetting(
"node._internal.default_refresh_interval",
DEFAULT_REFRESH_INTERVAL,
TimeValue.MINUS_ONE,
Property.NodeScope
); // TODO: remove setting
public static TimeValue STATELESS_DEFAULT_REFRESH_INTERVAL = TimeValue.timeValueSeconds(15); // TODO: this value is still not final
public static TimeValue STATELESS_MIN_NON_FAST_REFRESH_INTERVAL = TimeValue.timeValueSeconds(5);
public static final Setting<TimeValue> INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.refresh_interval", (settings) -> {
if (EXISTING_SHARDS_ALLOCATOR_SETTING.get(settings).equals("stateless") && INDEX_FAST_REFRESH_SETTING.get(settings) == false) {
return STATELESS_DEFAULT_REFRESH_INTERVAL;
}
return DEFAULT_REFRESH_INTERVAL;
}, new RefreshIntervalValidator(), Property.Dynamic, Property.IndexScope, Property.ServerlessPublic);
static class RefreshIntervalValidator implements Setting.Validator<TimeValue> {
@Override
public void validate(TimeValue value) {}
@Override
public void validate(final TimeValue value, final Map<Setting<?>, Object> settings) {
final String existingShardsAllocator = (String) settings.get(EXISTING_SHARDS_ALLOCATOR_SETTING);
final Boolean fastRefresh = (Boolean) settings.get(INDEX_FAST_REFRESH_SETTING);
final IndexVersion indexVersion = (IndexVersion) settings.get(SETTING_INDEX_VERSION_CREATED);
if (existingShardsAllocator.equals("stateless")
&& fastRefresh == false
&& value.compareTo(TimeValue.ZERO) > 0
&& value.compareTo(STATELESS_MIN_NON_FAST_REFRESH_INTERVAL) < 0
&& indexVersion.after(IndexVersions.V_8_10_0)) {
throw new IllegalArgumentException(
"index setting ["
+ IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey()
+ "="
+ value
+ "] should be either "
+ TimeValue.MINUS_ONE
+ " or equal to or greater than "
+ STATELESS_MIN_NON_FAST_REFRESH_INTERVAL
);
}
}
@Override
public Iterator<Setting<?>> settings() {
return REFRESH_INTERVAL_VALIDATOR_SETTINGS_LIST.iterator();
}
}
private static final List<Setting<?>> REFRESH_INTERVAL_VALIDATOR_SETTINGS_LIST = List.of(
EXISTING_SHARDS_ALLOCATOR_SETTING,
INDEX_FAST_REFRESH_SETTING,
SETTING_INDEX_VERSION_CREATED
);
public static final Setting<ByteSizeValue> INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting(
"index.translog.flush_threshold_size",
/*
* Prevent the translog from growing over 10GB or 20% of the recommended shard size of 50GB. This helps bound the maximum disk usage
* overhead of translogs.
*/
new ByteSizeValue(10, ByteSizeUnit.GB),
/*
* An empty translog occupies 55 bytes on disk. If the flush threshold is below this, the flush thread
* can get stuck in an infinite loop as the shouldPeriodicallyFlush can still be true after flushing.
* However, small thresholds are useful for testing so we do not add a large lower bound here.
*/
ByteSizeValue.ofBytes(Translog.DEFAULT_HEADER_SIZE_IN_BYTES + 1),
ByteSizeValue.ofBytes(Long.MAX_VALUE),
Property.Dynamic,
Property.IndexScope
);
public static final Setting<TimeValue> INDEX_TRANSLOG_FLUSH_THRESHOLD_AGE_SETTING = Setting.timeSetting(
"index.translog.flush_threshold_age",
/*
* Flush at least every minute by default. This gives a first order approximation of the maximum time it takes to replay translogs
* of about one minute as well. In practice, this is not exactly true since replaying translogs is not as concurrent as indexing,
* especially as Elasticsearch bounds the maximum number of concurrent replays of translogs, but it should still be a good enough
* approximation.
*/
new TimeValue(1, TimeUnit.MINUTES),
new TimeValue(1, TimeUnit.SECONDS),
new TimeValue(1, TimeUnit.HOURS),
Property.Dynamic,
Property.IndexScope
);
/**
* The minimum size of a merge that triggers a flush in order to free resources
*/
public static final Setting<ByteSizeValue> INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting(
"index.flush_after_merge",
new ByteSizeValue(512, ByteSizeUnit.MB),
ByteSizeValue.ZERO, // always flush after merge
ByteSizeValue.ofBytes(Long.MAX_VALUE), // never flush after merge
Property.Dynamic,
Property.IndexScope
);
/**
* The maximum size of a translog generation. This is independent of the maximum size of
* translog operations that have not been flushed.
*/
public static final Setting<ByteSizeValue> INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting(
"index.translog.generation_threshold_size",
new ByteSizeValue(64, ByteSizeUnit.MB),
/*
* An empty translog occupies 55 bytes on disk. If the generation threshold is
* below this, the flush thread can get stuck in an infinite loop repeatedly
* rolling the generation as every new generation will already exceed the
* generation threshold. However, small thresholds are useful for testing so we
* do not add a large lower bound here.
*/
ByteSizeValue.ofBytes(Translog.DEFAULT_HEADER_SIZE_IN_BYTES + 1),
ByteSizeValue.ofBytes(Long.MAX_VALUE),
Property.Dynamic,
Property.IndexScope
);
/**
* Index setting to enable / disable deletes garbage collection.
* This setting is realtime updateable
*/
public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60);
public static final Setting<TimeValue> INDEX_GC_DELETES_SETTING = Setting.timeSetting(
"index.gc_deletes",
DEFAULT_GC_DELETES,
new TimeValue(-1, TimeUnit.MILLISECONDS),
Property.Dynamic,
Property.IndexScope
);
/**
* Specifies if the index should use soft-delete instead of hard-delete for update/delete operations.
* Soft-deletes is enabled by default for 7.0 indices and mandatory for 8.0 indices.
*/
public static final Setting<Boolean> INDEX_SOFT_DELETES_SETTING = Setting.boolSetting(
"index.soft_deletes.enabled",
true,
Property.IndexScope,
Property.Final
);
/**
* Controls how many soft-deleted documents will be kept around before being merged away. Keeping more deleted
* documents increases the chance of operation-based recoveries and allows querying a longer history of documents.
* If soft-deletes is enabled, an engine by default will retain all operations up to the global checkpoint.
**/
public static final Setting<Long> INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING = Setting.longSetting(
"index.soft_deletes.retention.operations",
0,
0,
Property.IndexScope,
Property.Dynamic
);
/**
* Controls how long translog files that are no longer needed for persistence reasons
* will be kept around before being deleted. Keeping more files is useful to increase
* the chance of ops based recoveries for indices with soft-deletes disabled.
* TODO: Remove this setting in 9.0.
**/
public static final Setting<TimeValue> INDEX_TRANSLOG_RETENTION_AGE_SETTING = Setting.timeSetting(
"index.translog.retention.age",
TimeValue.MINUS_ONE,
TimeValue.MINUS_ONE,
Property.Dynamic,
Property.IndexScope
);
/**
* Controls how many translog files that are no longer needed for persistence reasons
* will be kept around before being deleted. Keeping more files is useful to increase
* the chance of ops based recoveries for indices with soft-deletes disabled.
* TODO: Remove this setting in 9.0.
**/
public static final Setting<ByteSizeValue> INDEX_TRANSLOG_RETENTION_SIZE_SETTING = Setting.byteSizeSetting(
"index.translog.retention.size",
settings -> "-1",
Property.Dynamic,
Property.IndexScope
);
/**
* Controls the maximum length of time since a retention lease is created or renewed before it is considered expired.
*/
public static final Setting<TimeValue> INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING = Setting.timeSetting(
"index.soft_deletes.retention_lease.period",
TimeValue.timeValueHours(12),
TimeValue.ZERO,
Property.Dynamic,
Property.IndexScope
);
/**
* The maximum number of refresh listeners allows on this shard.
*/
public static final Setting<Integer> MAX_REFRESH_LISTENERS_PER_SHARD = Setting.intSetting(
"index.max_refresh_listeners",
1000,
0,
Property.Dynamic,
Property.IndexScope
);
/**
* The maximum number of slices allowed in a scroll request
*/
public static final Setting<Integer> MAX_SLICES_PER_SCROLL = Setting.intSetting(
"index.max_slices_per_scroll",
1024,
1,
Property.Dynamic,
Property.IndexScope
);
/**
* The maximum length of regex string allowed in a regexp query.
*/
public static final Setting<Integer> MAX_REGEX_LENGTH_SETTING = Setting.intSetting(
"index.max_regex_length",
1000,
1,
Property.Dynamic,
Property.IndexScope
);
public static final Setting<String> DEFAULT_PIPELINE = new Setting<>(
"index.default_pipeline",
IngestService.NOOP_PIPELINE_NAME,
Function.identity(),
Property.Dynamic,
Property.IndexScope,
Property.ServerlessPublic
);
public static final Setting<String> FINAL_PIPELINE = new Setting<>(
"index.final_pipeline",
IngestService.NOOP_PIPELINE_NAME,
Function.identity(),
Property.Dynamic,
Property.IndexScope,
Property.ServerlessPublic
);
/**
* Marks an index to be searched throttled. This means that never more than one shard of such an index will be searched concurrently
*/
public static final Setting<Boolean> INDEX_SEARCH_THROTTLED = Setting.boolSetting(
"index.search.throttled",
false,
Property.IndexScope,
Property.PrivateIndex,
Property.Dynamic
);
/**
* Determines a balance between file-based and operations-based peer recoveries. The number of operations that will be used in an
* operations-based peer recovery is limited to this proportion of the total number of documents in the shard (including deleted
* documents) on the grounds that a file-based peer recovery may copy all of the documents in the shard over to the new peer, but is
* significantly faster than replaying the missing operations on the peer, so once a peer falls far enough behind the primary it makes
* more sense to copy all the data over again instead of replaying history.
*
* Defaults to retaining history for up to 10% of the documents in the shard. This can only be changed in tests, since this setting is
* intentionally unregistered.
*/
public static final Setting<Double> FILE_BASED_RECOVERY_THRESHOLD_SETTING = Setting.doubleSetting(
"index.recovery.file_based_threshold",
0.1d,
0.0d,
Setting.Property.IndexScope
);
/**
* This index setting is intentionally undocumented and should be used as an escape hatch to disable BloomFilter of the
* _id field of non-data-stream indices, which is enabled by default. This setting doesn't affect data-stream indices.
*/
public static final Setting<Boolean> BLOOM_FILTER_ID_FIELD_ENABLED_SETTING = Setting.boolSetting(
"index.bloom_filter_for_id_field.enabled",
true,
Setting.Property.Dynamic,
Setting.Property.IndexScope,
Property.DeprecatedWarning
);
public static final String LIFECYCLE_ORIGINATION_DATE = "index.lifecycle.origination_date";
public static final Setting<Long> LIFECYCLE_ORIGINATION_DATE_SETTING = Setting.longSetting(
LIFECYCLE_ORIGINATION_DATE,
-1,
-1,
Property.Dynamic,
Property.IndexScope,
Property.ServerlessPublic
);
public static final String LIFECYCLE_PARSE_ORIGINATION_DATE = "index.lifecycle.parse_origination_date";
public static final Setting<Boolean> LIFECYCLE_PARSE_ORIGINATION_DATE_SETTING = Setting.boolSetting(
LIFECYCLE_PARSE_ORIGINATION_DATE,
false,
Property.Dynamic,
Property.IndexScope
);
public static final String PREFER_ILM = "index.lifecycle.prefer_ilm";
public static final Setting<Boolean> PREFER_ILM_SETTING = Setting.boolSetting(PREFER_ILM, true, Property.Dynamic, Property.IndexScope);
/**
* in time series mode, the start time of the index, timestamp must larger than start_time
*/
public static final Setting<Instant> TIME_SERIES_START_TIME = Setting.dateSetting(
"index.time_series.start_time",
Instant.ofEpochMilli(DateUtils.MAX_MILLIS_BEFORE_MINUS_9999),
v -> {},
Property.IndexScope,
Property.Final,
Property.ServerlessPublic
);
/**
* in time series mode, the end time of the index, timestamp must smaller than start_time
*/
public static final Setting<Instant> TIME_SERIES_END_TIME = Setting.dateSetting(
"index.time_series.end_time",
Instant.ofEpochMilli(DateUtils.MAX_MILLIS_BEFORE_9999),
new Setting.Validator<>() {
@Override
public void validate(Instant value) {}
@Override
public void validate(Instant value, Map<Setting<?>, Object> settings) {
@SuppressWarnings("unchecked")
Instant startTime = (Instant) settings.get(TIME_SERIES_START_TIME);
if (startTime.toEpochMilli() > value.toEpochMilli()) {
throw new IllegalArgumentException("index.time_series.end_time must be larger than index.time_series.start_time");
}
}
@Override
public Iterator<Setting<?>> settings() {
List<Setting<?>> settings = List.of(TIME_SERIES_START_TIME);
return settings.iterator();
}
},
Property.IndexScope,
Property.Dynamic,
Property.ServerlessPublic
);
public static final Setting<Boolean> TIME_SERIES_ES87TSDB_CODEC_ENABLED_SETTING = Setting.boolSetting(
"index.time_series.es87tsdb_codec.enabled",
true,
Property.IndexScope,
Property.Final
);
/**
* Returns <code>true</code> if TSDB encoding is enabled. The default is <code>true</code>
*/
public boolean isES87TSDBCodecEnabled() {
return es87TSDBCodecEnabled;
}
/**
* The {@link IndexMode "mode"} of the index.
*/
public static final Setting<IndexMode> MODE = Setting.enumSetting(
IndexMode.class,
"index.mode",
IndexMode.STANDARD,
new Setting.Validator<>() {
@Override
public void validate(IndexMode value) {}
@Override
public void validate(IndexMode value, Map<Setting<?>, Object> settings) {
value.validateWithOtherSettings(settings);
}
@Override
public Iterator<Setting<?>> settings() {
return IndexMode.VALIDATE_WITH_SETTINGS.iterator();
}
},
Property.IndexScope,
Property.Final,
Property.ServerlessPublic
);
/**
* Legacy index setting, kept for 7.x BWC compatibility. This setting has no effect in 8.x. Do not use.
* TODO: Remove in 9.0
*/
@Deprecated
public static final Setting<Integer> MAX_ADJACENCY_MATRIX_FILTERS_SETTING = Setting.intSetting(
"index.max_adjacency_matrix_filters",
100,
2,
Property.Dynamic,
Property.IndexScope,
Property.IndexSettingDeprecatedInV7AndRemovedInV8
);
private final Index index;
private final IndexVersion version;
private final Logger logger;
private final String nodeName;
private final Settings nodeSettings;
private final int numberOfShards;
/**
* The {@link IndexMode "mode"} of the index.
*/
private final IndexMode mode;
/**
* The bounds for {@code @timestamp} on this index or
* {@code null} if there are no bounds.
*/
private volatile TimestampBounds timestampBounds;
// volatile fields are updated via #updateIndexMetadata(IndexMetadata) under lock
private volatile Settings settings;
private volatile IndexMetadata indexMetadata;
private volatile List<String> defaultFields;
private final boolean queryStringLenient;
private final boolean queryStringAnalyzeWildcard;
private final boolean queryStringAllowLeadingWildcard;
private final boolean defaultAllowUnmappedFields;
private volatile Translog.Durability durability;
private volatile TimeValue syncInterval;
private volatile TimeValue refreshInterval;
private final boolean fastRefresh;
private volatile ByteSizeValue flushThresholdSize;
private volatile TimeValue flushThresholdAge;
private volatile ByteSizeValue generationThresholdSize;
private volatile ByteSizeValue flushAfterMergeThresholdSize;
private final MergeSchedulerConfig mergeSchedulerConfig;
private final MergePolicyConfig mergePolicyConfig;
private final IndexSortConfig indexSortConfig;
private final IndexScopedSettings scopedSettings;
private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis();
private final boolean softDeleteEnabled;
private volatile long softDeleteRetentionOperations;
private final boolean es87TSDBCodecEnabled;
private volatile long retentionLeaseMillis;
/**
* The maximum age of a retention lease before it is considered expired.
*
* @return the maximum age
*/
public long getRetentionLeaseMillis() {
return retentionLeaseMillis;
}
private void setRetentionLeaseMillis(final TimeValue retentionLease) {
this.retentionLeaseMillis = retentionLease.millis();
}
private volatile boolean warmerEnabled;
private volatile int maxResultWindow;
private volatile int maxInnerResultWindow;
private volatile int maxRescoreWindow;
private volatile int maxDocvalueFields;
private volatile int maxScriptFields;
private volatile int maxTokenCount;
private volatile int maxNgramDiff;
private volatile int maxShingleDiff;
private volatile TimeValue searchIdleAfter;
private volatile int maxAnalyzedOffset;
private volatile boolean weightMatchesEnabled;
private volatile int maxTermsCount;
private volatile String defaultPipeline;
private volatile String requiredPipeline;
private volatile boolean searchThrottled;
private volatile long mappingNestedFieldsLimit;
private volatile long mappingNestedDocsLimit;
private volatile long mappingTotalFieldsLimit;
private volatile boolean ignoreDynamicFieldsBeyondLimit;
private volatile long mappingDepthLimit;
private volatile long mappingFieldNameLengthLimit;
private volatile long mappingDimensionFieldsLimit;
/**
* The maximum number of refresh listeners allows on this shard.
*/
private volatile int maxRefreshListeners;
/**
* The maximum number of slices allowed in a scroll request.
*/
private volatile int maxSlicesPerScroll;
/**
* The maximum length of regex string allowed in a regexp query.
*/
private volatile int maxRegexLength;
private final IndexRouting indexRouting;
/**
* Returns the default search fields for this index.
*/
public List<String> getDefaultFields() {
return defaultFields;
}
private void setDefaultFields(List<String> defaultFields) {
this.defaultFields = defaultFields;
}
/**
* Returns <code>true</code> if query string parsing should be lenient. The default is <code>false</code>
*/
public boolean isQueryStringLenient() {
return queryStringLenient;
}
/**
* Returns <code>true</code> if the query string should analyze wildcards. The default is <code>false</code>
*/
public boolean isQueryStringAnalyzeWildcard() {
return queryStringAnalyzeWildcard;
}
/**
* Returns <code>true</code> if the query string parser should allow leading wildcards. The default is <code>true</code>
*/
public boolean isQueryStringAllowLeadingWildcard() {
return queryStringAllowLeadingWildcard;
}
/**
* Returns <code>true</code> if queries should be lenient about unmapped fields. The default is <code>true</code>
*/
public boolean isDefaultAllowUnmappedFields() {
return defaultAllowUnmappedFields;
}
/**
* Creates a new {@link IndexSettings} instance. The given node settings will be merged with the settings in the metadata
* while index level settings will overwrite node settings.
*
* @param indexMetadata the index metadata this settings object is associated with
* @param nodeSettings the nodes settings this index is allocated on.
*/
public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSettings) {
this(indexMetadata, nodeSettings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
}
/**
* Creates a new {@link IndexSettings} instance. The given node settings will be merged with the settings in the metadata
* while index level settings will overwrite node settings.
*
* @param indexMetadata the index metadata this settings object is associated with
* @param nodeSettings the nodes settings this index is allocated on.
*/
public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSettings, IndexScopedSettings indexScopedSettings) {
scopedSettings = indexScopedSettings.copy(nodeSettings, indexMetadata);
this.nodeSettings = nodeSettings;
this.settings = Settings.builder().put(nodeSettings).put(indexMetadata.getSettings()).build();
this.index = indexMetadata.getIndex();
version = SETTING_INDEX_VERSION_CREATED.get(settings);
logger = Loggers.getLogger(getClass(), index);
nodeName = Node.NODE_NAME_SETTING.get(settings);
this.indexMetadata = indexMetadata;
numberOfShards = settings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_SHARDS, null);
mode = scopedSettings.get(MODE);
this.timestampBounds = mode.getTimestampBound(indexMetadata);
if (timestampBounds != null) {
scopedSettings.addSettingsUpdateConsumer(IndexSettings.TIME_SERIES_END_TIME, endTime -> {
this.timestampBounds = TimestampBounds.updateEndTime(this.timestampBounds, endTime);
});
}
this.searchThrottled = INDEX_SEARCH_THROTTLED.get(settings);
this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings);
this.queryStringAnalyzeWildcard = QUERY_STRING_ANALYZE_WILDCARD.get(nodeSettings);
this.queryStringAllowLeadingWildcard = QUERY_STRING_ALLOW_LEADING_WILDCARD.get(nodeSettings);
this.defaultAllowUnmappedFields = scopedSettings.get(ALLOW_UNMAPPED);
this.durability = scopedSettings.get(INDEX_TRANSLOG_DURABILITY_SETTING);
defaultFields = scopedSettings.get(DEFAULT_FIELD_SETTING);
syncInterval = INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.get(settings);
refreshInterval = scopedSettings.get(INDEX_REFRESH_INTERVAL_SETTING);
fastRefresh = scopedSettings.get(INDEX_FAST_REFRESH_SETTING);
if (fastRefresh) {
if (DiscoveryNode.isStateless(nodeSettings) == false) {
throw new IllegalArgumentException(INDEX_FAST_REFRESH_SETTING.getKey() + " is allowed only in stateless");
}
if (indexMetadata.isSystem() == false) {
throw new IllegalArgumentException(INDEX_FAST_REFRESH_SETTING.getKey() + " is allowed only for system indices");
}
}
flushThresholdSize = scopedSettings.get(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING);
flushThresholdAge = scopedSettings.get(INDEX_TRANSLOG_FLUSH_THRESHOLD_AGE_SETTING);
generationThresholdSize = scopedSettings.get(INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING);
flushAfterMergeThresholdSize = scopedSettings.get(INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING);
mergeSchedulerConfig = new MergeSchedulerConfig(this);
gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis();
softDeleteEnabled = scopedSettings.get(INDEX_SOFT_DELETES_SETTING);
assert softDeleteEnabled || version.before(IndexVersions.V_8_0_0) : "soft deletes must be enabled in version " + version;
softDeleteRetentionOperations = scopedSettings.get(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING);
retentionLeaseMillis = scopedSettings.get(INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING).millis();
warmerEnabled = scopedSettings.get(INDEX_WARMER_ENABLED_SETTING);
maxResultWindow = scopedSettings.get(MAX_RESULT_WINDOW_SETTING);
maxInnerResultWindow = scopedSettings.get(MAX_INNER_RESULT_WINDOW_SETTING);
maxRescoreWindow = scopedSettings.get(MAX_RESCORE_WINDOW_SETTING);
maxDocvalueFields = scopedSettings.get(MAX_DOCVALUE_FIELDS_SEARCH_SETTING);
maxScriptFields = scopedSettings.get(MAX_SCRIPT_FIELDS_SETTING);
maxTokenCount = scopedSettings.get(MAX_TOKEN_COUNT_SETTING);
maxNgramDiff = scopedSettings.get(MAX_NGRAM_DIFF_SETTING);
maxShingleDiff = scopedSettings.get(MAX_SHINGLE_DIFF_SETTING);
maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD);
maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL);
maxAnalyzedOffset = scopedSettings.get(MAX_ANALYZED_OFFSET_SETTING);
weightMatchesEnabled = scopedSettings.get(WEIGHT_MATCHES_MODE_ENABLED_SETTING);
maxTermsCount = scopedSettings.get(MAX_TERMS_COUNT_SETTING);
maxRegexLength = scopedSettings.get(MAX_REGEX_LENGTH_SETTING);
this.mergePolicyConfig = new MergePolicyConfig(logger, this);
this.indexSortConfig = new IndexSortConfig(this);
searchIdleAfter = scopedSettings.get(INDEX_SEARCH_IDLE_AFTER);
defaultPipeline = scopedSettings.get(DEFAULT_PIPELINE);
mappingNestedFieldsLimit = scopedSettings.get(INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING);
mappingNestedDocsLimit = scopedSettings.get(INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING);
mappingTotalFieldsLimit = scopedSettings.get(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING);
ignoreDynamicFieldsBeyondLimit = scopedSettings.get(INDEX_MAPPING_IGNORE_DYNAMIC_BEYOND_LIMIT_SETTING);
mappingDepthLimit = scopedSettings.get(INDEX_MAPPING_DEPTH_LIMIT_SETTING);
mappingFieldNameLengthLimit = scopedSettings.get(INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING);
mappingDimensionFieldsLimit = scopedSettings.get(INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING);
indexRouting = IndexRouting.fromIndexMetadata(indexMetadata);
es87TSDBCodecEnabled = scopedSettings.get(TIME_SERIES_ES87TSDB_CODEC_ENABLED_SETTING);
scopedSettings.addSettingsUpdateConsumer(
MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING,
mergePolicyConfig::setCompoundFormatThreshold
);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_TYPE_SETTING, mergePolicyConfig::setMergePolicyType);
scopedSettings.addSettingsUpdateConsumer(
MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING,
mergePolicyConfig::setDeletesPctAllowed
);
scopedSettings.addSettingsUpdateConsumer(
MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING,
mergePolicyConfig::setExpungeDeletesAllowed
);
scopedSettings.addSettingsUpdateConsumer(
MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING,
mergePolicyConfig::setFloorSegmentSetting
);
scopedSettings.addSettingsUpdateConsumer(
MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING,
mergePolicyConfig::setMaxMergesAtOnce
);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING, ignored -> {});
scopedSettings.addSettingsUpdateConsumer(
MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING,
mergePolicyConfig::setMaxMergedSegment
);
scopedSettings.addSettingsUpdateConsumer(
MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING,
mergePolicyConfig::setSegmentsPerTier
);
scopedSettings.addSettingsUpdateConsumer(
MergePolicyConfig.INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING,
mergePolicyConfig::setMergeFactor
);
scopedSettings.addSettingsUpdateConsumer(
MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING,
MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING,
mergeSchedulerConfig::setMaxThreadAndMergeCount
);
scopedSettings.addSettingsUpdateConsumer(MergeSchedulerConfig.AUTO_THROTTLE_SETTING, mergeSchedulerConfig::setAutoThrottle);
scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_DURABILITY_SETTING, this::setTranslogDurability);
scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_SYNC_INTERVAL_SETTING, this::setTranslogSyncInterval);
scopedSettings.addSettingsUpdateConsumer(MAX_RESULT_WINDOW_SETTING, this::setMaxResultWindow);
scopedSettings.addSettingsUpdateConsumer(MAX_INNER_RESULT_WINDOW_SETTING, this::setMaxInnerResultWindow);
scopedSettings.addSettingsUpdateConsumer(MAX_RESCORE_WINDOW_SETTING, this::setMaxRescoreWindow);
scopedSettings.addSettingsUpdateConsumer(MAX_DOCVALUE_FIELDS_SEARCH_SETTING, this::setMaxDocvalueFields);
scopedSettings.addSettingsUpdateConsumer(MAX_SCRIPT_FIELDS_SETTING, this::setMaxScriptFields);
scopedSettings.addSettingsUpdateConsumer(MAX_TOKEN_COUNT_SETTING, this::setMaxTokenCount);
scopedSettings.addSettingsUpdateConsumer(MAX_NGRAM_DIFF_SETTING, this::setMaxNgramDiff);
scopedSettings.addSettingsUpdateConsumer(MAX_SHINGLE_DIFF_SETTING, this::setMaxShingleDiff);
scopedSettings.addSettingsUpdateConsumer(INDEX_WARMER_ENABLED_SETTING, this::setEnableWarmer);
scopedSettings.addSettingsUpdateConsumer(INDEX_GC_DELETES_SETTING, this::setGCDeletes);
scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, this::setTranslogFlushThresholdSize);
scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_FLUSH_THRESHOLD_AGE_SETTING, this::setTranslogFlushThresholdAge);
scopedSettings.addSettingsUpdateConsumer(INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING, this::setFlushAfterMergeThresholdSize);
scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING, this::setGenerationThresholdSize);
scopedSettings.addSettingsUpdateConsumer(INDEX_REFRESH_INTERVAL_SETTING, this::setRefreshInterval);
scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners);
scopedSettings.addSettingsUpdateConsumer(MAX_ANALYZED_OFFSET_SETTING, this::setHighlightMaxAnalyzedOffset);
scopedSettings.addSettingsUpdateConsumer(WEIGHT_MATCHES_MODE_ENABLED_SETTING, this::setWeightMatchesEnabled);
scopedSettings.addSettingsUpdateConsumer(MAX_TERMS_COUNT_SETTING, this::setMaxTermsCount);
scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll);
scopedSettings.addSettingsUpdateConsumer(DEFAULT_FIELD_SETTING, this::setDefaultFields);
scopedSettings.addSettingsUpdateConsumer(INDEX_SEARCH_IDLE_AFTER, this::setSearchIdleAfter);
scopedSettings.addSettingsUpdateConsumer(MAX_REGEX_LENGTH_SETTING, this::setMaxRegexLength);
scopedSettings.addSettingsUpdateConsumer(DEFAULT_PIPELINE, this::setDefaultPipeline);
scopedSettings.addSettingsUpdateConsumer(FINAL_PIPELINE, this::setRequiredPipeline);
scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING, this::setSoftDeleteRetentionOperations);
scopedSettings.addSettingsUpdateConsumer(INDEX_SEARCH_THROTTLED, this::setSearchThrottled);
scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING, this::setRetentionLeaseMillis);
scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING, this::setMappingNestedFieldsLimit);
scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING, this::setMappingNestedDocsLimit);
scopedSettings.addSettingsUpdateConsumer(
INDEX_MAPPING_IGNORE_DYNAMIC_BEYOND_LIMIT_SETTING,
this::setIgnoreDynamicFieldsBeyondLimit
);
scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING, this::setMappingTotalFieldsLimit);
scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_DEPTH_LIMIT_SETTING, this::setMappingDepthLimit);
scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING, this::setMappingFieldNameLengthLimit);
scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING, this::setMappingDimensionFieldsLimit);
}
private void setSearchIdleAfter(TimeValue searchIdleAfter) {
this.searchIdleAfter = searchIdleAfter;
}
private void setTranslogFlushThresholdSize(ByteSizeValue byteSizeValue) {
this.flushThresholdSize = byteSizeValue;
}
private void setTranslogFlushThresholdAge(TimeValue timeValue) {
this.flushThresholdAge = timeValue;
}
private void setFlushAfterMergeThresholdSize(ByteSizeValue byteSizeValue) {
this.flushAfterMergeThresholdSize = byteSizeValue;
}
private void setGenerationThresholdSize(final ByteSizeValue generationThresholdSize) {
this.generationThresholdSize = generationThresholdSize;
}
private void setGCDeletes(TimeValue timeValue) {
this.gcDeletesInMillis = timeValue.getMillis();
}
private void setRefreshInterval(TimeValue timeValue) {
this.refreshInterval = timeValue;
}
/**
* Returns the settings for this index. These settings contain the node and index level settings where
* settings that are specified on both index and node level are overwritten by the index settings.
*/
public Settings getSettings() {
return settings;
}
/**
* Returns the index this settings object belongs to
*/
public Index getIndex() {
return index;
}
/**
* Returns the indexes UUID
*/
public String getUUID() {
return getIndex().getUUID();
}
/**
* Returns <code>true</code> if the index has a custom data path
*/
public boolean hasCustomDataPath() {
return Strings.isNotEmpty(customDataPath());
}
/**
* Returns the customDataPath for this index, if configured. <code>""</code> o.w.
*/
public String customDataPath() {
return IndexMetadata.INDEX_DATA_PATH_SETTING.get(settings);
}
/**
* Returns the version the index was created on.
* @see IndexMetadata#SETTING_VERSION_CREATED
*/
public IndexVersion getIndexVersionCreated() {
return version;
}
/**
* Returns the current node name
*/
public String getNodeName() {
return nodeName;
}
/**
* Returns the current IndexMetadata for this index
*/
public IndexMetadata getIndexMetadata() {
return indexMetadata;
}
/**
* Returns the number of shards this index has.
*/
public int getNumberOfShards() {
return numberOfShards;
}
/**
* Returns the number of replicas this index has.
*/
public int getNumberOfReplicas() {
return settings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, null);
}
/**
* "Mode" that controls which behaviors and settings an index supports.
*/
public IndexMode getMode() {
return mode;
}
/**
* Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the
* index settings and the node settings where node settings are overwritten by index settings.
*/
public Settings getNodeSettings() {
return nodeSettings;
}
/**
* Updates the settings and index metadata and notifies all registered settings consumers with the new settings iff at least one
* setting has changed.
*
* @return <code>true</code> iff any setting has been updated otherwise <code>false</code>.
*/
public synchronized boolean updateIndexMetadata(IndexMetadata indexMetadata) {
final Settings newSettings = indexMetadata.getSettings();
IndexVersion newIndexVersion = SETTING_INDEX_VERSION_CREATED.get(newSettings);
if (version.equals(newIndexVersion) == false) {
throw new IllegalArgumentException(
"version mismatch on settings update expected: "
+ version.toReleaseVersion()
+ " but was: "
+ newIndexVersion.toReleaseVersion()
);
}
IndexVersion newCompatibilityVersion = IndexMetadata.SETTING_INDEX_VERSION_COMPATIBILITY.get(newSettings);
IndexVersion compatibilityVersion = IndexMetadata.SETTING_INDEX_VERSION_COMPATIBILITY.get(settings);
if (compatibilityVersion.equals(newCompatibilityVersion) == false) {
throw new IllegalArgumentException(
"compatibility version mismatch on settings update expected: "
+ compatibilityVersion.toReleaseVersion()
+ " but was: "
+ newCompatibilityVersion.toReleaseVersion()
);
}
final String newUUID = newSettings.get(IndexMetadata.SETTING_INDEX_UUID, IndexMetadata.INDEX_UUID_NA_VALUE);
if (newUUID.equals(getUUID()) == false) {
throw new IllegalArgumentException("uuid mismatch on settings update expected: " + getUUID() + " but was: " + newUUID);
}
final String newRestoreUUID = newSettings.get(IndexMetadata.SETTING_HISTORY_UUID, IndexMetadata.INDEX_UUID_NA_VALUE);
final String restoreUUID = this.settings.get(IndexMetadata.SETTING_HISTORY_UUID, IndexMetadata.INDEX_UUID_NA_VALUE);
if (newRestoreUUID.equals(restoreUUID) == false) {
throw new IllegalArgumentException("uuid mismatch on settings update expected: " + restoreUUID + " but was: " + newRestoreUUID);
}
this.indexMetadata = indexMetadata;
final Settings newIndexSettings = Settings.builder().put(nodeSettings).put(newSettings).build();
if (same(this.settings, newIndexSettings)) {
// nothing to update, same settings
return false;
}
scopedSettings.applySettings(newSettings);
this.settings = newIndexSettings;
return true;
}
/**
* Compare the specified settings for equality.
*
* @param left the left settings
* @param right the right settings
* @return true if the settings are the same, otherwise false
*/
public static boolean same(final Settings left, final Settings right) {
if (left.equals(right)) {
return true;
}
return left.getByPrefix(IndexMetadata.INDEX_SETTING_PREFIX).equals(right.getByPrefix(IndexMetadata.INDEX_SETTING_PREFIX))
&& left.getByPrefix(AbstractScopedSettings.ARCHIVED_SETTINGS_PREFIX)
.equals(right.getByPrefix(AbstractScopedSettings.ARCHIVED_SETTINGS_PREFIX));
}
/**
* Returns the translog durability for this index.
*/
public Translog.Durability getTranslogDurability() {
return durability;
}
private void setTranslogDurability(Translog.Durability durability) {
this.durability = durability;
}
/**
* Returns true if index warmers are enabled, otherwise <code>false</code>
*/
public boolean isWarmerEnabled() {
return warmerEnabled;
}
private void setEnableWarmer(boolean enableWarmer) {
this.warmerEnabled = enableWarmer;
}
/**
* Returns the translog sync interval. This is the interval in which the transaction log is asynchronously fsynced unless
* the transaction log is fsyncing on every operations
*/
public TimeValue getTranslogSyncInterval() {
return syncInterval;
}
public void setTranslogSyncInterval(TimeValue translogSyncInterval) {
this.syncInterval = translogSyncInterval;
}
/**
* Returns this interval in which the shards of this index are asynchronously refreshed. {@code -1} means async refresh is disabled.
*/
public TimeValue getRefreshInterval() {
return refreshInterval;
}
/**
* Only intended for stateless.
*/
public boolean isFastRefresh() {
return fastRefresh;
}
/**
* Returns the transaction log threshold size when to forcefully flush the index and clear the transaction log.
*/
public ByteSizeValue getFlushThresholdSize(ByteSizeValue totalDiskSpace) {
// Never return more than 1% of the total disk space as a protection for small instances that may not have much disk space.
long onePercentOfTotalDiskSpace = totalDiskSpace.getBytes() / 100;
if (onePercentOfTotalDiskSpace <= ByteSizeUnit.MB.toBytes(10)) {
// Paranoia: total disk usage should always be at least in the GBs. Make sure the translog is always allowed at least 10MB.
onePercentOfTotalDiskSpace = ByteSizeUnit.MB.toBytes(10);
}
assert onePercentOfTotalDiskSpace > Translog.DEFAULT_HEADER_SIZE_IN_BYTES;
if (onePercentOfTotalDiskSpace < flushThresholdSize.getBytes()) {
return new ByteSizeValue(onePercentOfTotalDiskSpace, ByteSizeUnit.BYTES);
} else {
return flushThresholdSize;
}
}
/**
* Returns the transaction log threshold age when to forcefully flush the index and clear the transaction log.
*/
public TimeValue getFlushThresholdAge() {
return flushThresholdAge;
}
/**
* Returns the merge threshold size when to forcefully flush the index and free resources.
*/
public ByteSizeValue getFlushAfterMergeThresholdSize() {
return flushAfterMergeThresholdSize;
}
/**
* Returns the generation threshold size. As sequence numbers can cause multiple generations to
* be preserved for rollback purposes, we want to keep the size of individual generations from
* growing too large to avoid excessive disk space consumption. Therefore, the translog is
* automatically rolled to a new generation when the current generation exceeds this generation
* threshold size.
*
* @return the generation threshold size
*/
public ByteSizeValue getGenerationThresholdSize() {
return generationThresholdSize;
}
/**
* Returns the {@link MergeSchedulerConfig}
*/
public MergeSchedulerConfig getMergeSchedulerConfig() {
return mergeSchedulerConfig;
}
/**
* Returns the max result window for search requests, describing the maximum value of from + size on a query.
*/
public int getMaxResultWindow() {
return this.maxResultWindow;
}
private void setMaxResultWindow(int maxResultWindow) {
this.maxResultWindow = maxResultWindow;
}
/**
* Returns the max result window for an individual inner hit definition or top hits aggregation.
*/
public int getMaxInnerResultWindow() {
return maxInnerResultWindow;
}
private void setMaxInnerResultWindow(int maxInnerResultWindow) {
this.maxInnerResultWindow = maxInnerResultWindow;
}
/**
* Returns the maximum rescore window for search requests.
*/
public int getMaxRescoreWindow() {
return maxRescoreWindow;
}
private void setMaxRescoreWindow(int maxRescoreWindow) {
this.maxRescoreWindow = maxRescoreWindow;
}
/**
* Returns the maximum number of allowed docvalue_fields to retrieve in a search request
*/
public int getMaxDocvalueFields() {
return this.maxDocvalueFields;
}
private void setMaxDocvalueFields(int maxDocvalueFields) {
this.maxDocvalueFields = maxDocvalueFields;
}
/**
* Returns the maximum number of tokens that can be produced
*/
public int getMaxTokenCount() {
return maxTokenCount;
}
private void setMaxTokenCount(int maxTokenCount) {
this.maxTokenCount = maxTokenCount;
}
/**
* Returns the maximum allowed difference between max and min length of ngram
*/
public int getMaxNgramDiff() {
return this.maxNgramDiff;
}
private void setMaxNgramDiff(int maxNgramDiff) {
this.maxNgramDiff = maxNgramDiff;
}
/**
* Returns the maximum allowed difference between max and min shingle_size
*/
public int getMaxShingleDiff() {
return this.maxShingleDiff;
}
private void setMaxShingleDiff(int maxShingleDiff) {
this.maxShingleDiff = maxShingleDiff;
}
/**
* Returns the maximum number of chars that will be analyzed in a highlight request
*/
public int getHighlightMaxAnalyzedOffset() {
return this.maxAnalyzedOffset;
}
private void setHighlightMaxAnalyzedOffset(int maxAnalyzedOffset) {
this.maxAnalyzedOffset = maxAnalyzedOffset;
}
public boolean isWeightMatchesEnabled() {
return this.weightMatchesEnabled;
}
private void setWeightMatchesEnabled(boolean value) {
this.weightMatchesEnabled = value;
}
/**
* Returns the maximum number of terms that can be used in a Terms Query request
*/
public int getMaxTermsCount() {
return this.maxTermsCount;
}
private void setMaxTermsCount(int maxTermsCount) {
this.maxTermsCount = maxTermsCount;
}
/**
* Returns the maximum number of allowed script_fields to retrieve in a search request
*/
public int getMaxScriptFields() {
return this.maxScriptFields;
}
private void setMaxScriptFields(int maxScriptFields) {
this.maxScriptFields = maxScriptFields;
}
/**
* Returns the GC deletes cycle in milliseconds.
*/
public long getGcDeletesInMillis() {
return gcDeletesInMillis;
}
/**
* Returns the merge policy that should be used for this index.
*/
public MergePolicy getMergePolicy(boolean isTimeBasedIndex) {
return mergePolicyConfig.getMergePolicy(isTimeBasedIndex);
}
public <T> T getValue(Setting<T> setting) {
return scopedSettings.get(setting);
}
/**
* The maximum number of refresh listeners allows on this shard.
*/
public int getMaxRefreshListeners() {
return maxRefreshListeners;
}
private void setMaxRefreshListeners(int maxRefreshListeners) {
this.maxRefreshListeners = maxRefreshListeners;
}
/**
* The maximum number of slices allowed in a scroll request.
*/
public int getMaxSlicesPerScroll() {
return maxSlicesPerScroll;
}
private void setMaxSlicesPerScroll(int value) {
this.maxSlicesPerScroll = value;
}
/**
* The maximum length of regex string allowed in a regexp query.
*/
public int getMaxRegexLength() {
return maxRegexLength;
}
private void setMaxRegexLength(int maxRegexLength) {
this.maxRegexLength = maxRegexLength;
}
/**
* Returns the index sort config that should be used for this index.
*/
public IndexSortConfig getIndexSortConfig() {
return indexSortConfig;
}
public IndexScopedSettings getScopedSettings() {
return scopedSettings;
}
/**
* Returns true iff the refresh setting exists or in other words is explicitly set.
*/
public boolean isExplicitRefresh() {
return INDEX_REFRESH_INTERVAL_SETTING.exists(settings);
}
/**
* Returns the time that an index shard becomes search idle unless it's accessed in between
*/
public TimeValue getSearchIdleAfter() {
return searchIdleAfter;
}
public String getDefaultPipeline() {
return defaultPipeline;
}
public void setDefaultPipeline(String defaultPipeline) {
this.defaultPipeline = defaultPipeline;
}
public String getRequiredPipeline() {
return requiredPipeline;
}
public void setRequiredPipeline(final String requiredPipeline) {
this.requiredPipeline = requiredPipeline;
}
/**
* Returns <code>true</code> if soft-delete is enabled.
*/
public boolean isSoftDeleteEnabled() {
return softDeleteEnabled;
}
private void setSoftDeleteRetentionOperations(long ops) {
this.softDeleteRetentionOperations = ops;
}
/**
* Returns the number of extra operations (i.e. soft-deleted documents) to be kept for recoveries and history purpose.
*/
public long getSoftDeleteRetentionOperations() {
return this.softDeleteRetentionOperations;
}
/**
* Returns true if the this index should be searched throttled ie. using the
* {@link org.elasticsearch.threadpool.ThreadPool.Names#SEARCH_THROTTLED} thread-pool
*/
public boolean isSearchThrottled() {
return searchThrottled;
}
private void setSearchThrottled(boolean searchThrottled) {
this.searchThrottled = searchThrottled;
}
public long getMappingNestedFieldsLimit() {
return mappingNestedFieldsLimit;
}
private void setMappingNestedFieldsLimit(long value) {
this.mappingNestedFieldsLimit = value;
}
public long getMappingNestedDocsLimit() {
return mappingNestedDocsLimit;
}
private void setMappingNestedDocsLimit(long value) {
this.mappingNestedDocsLimit = value;
}
public long getMappingTotalFieldsLimit() {
return mappingTotalFieldsLimit;
}
private void setMappingTotalFieldsLimit(long value) {
this.mappingTotalFieldsLimit = value;
}
private void setIgnoreDynamicFieldsBeyondLimit(boolean ignoreDynamicFieldsBeyondLimit) {
this.ignoreDynamicFieldsBeyondLimit = ignoreDynamicFieldsBeyondLimit;
}
public boolean isIgnoreDynamicFieldsBeyondLimit() {
return ignoreDynamicFieldsBeyondLimit;
}
public long getMappingDepthLimit() {
return mappingDepthLimit;
}
private void setMappingDepthLimit(long value) {
this.mappingDepthLimit = value;
}
public long getMappingFieldNameLengthLimit() {
return mappingFieldNameLengthLimit;
}
private void setMappingFieldNameLengthLimit(long value) {
this.mappingFieldNameLengthLimit = value;
}
public long getMappingDimensionFieldsLimit() {
return mappingDimensionFieldsLimit;
}
private void setMappingDimensionFieldsLimit(long value) {
this.mappingDimensionFieldsLimit = value;
}
/**
* The bounds for {@code @timestamp} on this index or
* {@code null} if there are no bounds.
*/
public TimestampBounds getTimestampBounds() {
return timestampBounds;
}
/**
* The way that documents are routed on the coordinating
* node when being sent to shards of this index.
*/
public IndexRouting getIndexRouting() {
return indexRouting;
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/index/IndexSettings.java |
468 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.env;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.util.Strings;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.NIOFSDirectory;
import org.apache.lucene.store.NativeFSLockFactory;
import org.elasticsearch.Build;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeRole;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.ReferenceDocs;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.core.CheckedFunction;
import org.elasticsearch.core.CheckedRunnable;
import org.elasticsearch.core.IOUtils;
import org.elasticsearch.core.Predicates;
import org.elasticsearch.core.Releasable;
import org.elasticsearch.core.SuppressForbidden;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.core.Tuple;
import org.elasticsearch.gateway.CorruptStateException;
import org.elasticsearch.gateway.MetadataStateFormat;
import org.elasticsearch.gateway.PersistedClusterStateService;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.IndexVersions;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.store.FsDirectoryFactory;
import org.elasticsearch.monitor.fs.FsInfo;
import org.elasticsearch.monitor.fs.FsProbe;
import org.elasticsearch.monitor.jvm.HotThreads;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.xcontent.NamedXContentRegistry;
import java.io.Closeable;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.file.AtomicMoveNotSupportedException;
import java.nio.file.DirectoryStream;
import java.nio.file.FileStore;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.elasticsearch.core.Strings.format;
/**
* A component that holds all data paths for a single node.
*/
public final class NodeEnvironment implements Closeable {
public static class DataPath {
/* ${data.paths} */
public final Path path;
/* ${data.paths}/indices */
public final Path indicesPath;
/** Cached FileStore from path */
public final FileStore fileStore;
public final int majorDeviceNumber;
public final int minorDeviceNumber;
public DataPath(Path path) throws IOException {
this.path = path;
this.indicesPath = path.resolve(INDICES_FOLDER);
this.fileStore = Environment.getFileStore(path);
if (fileStore.supportsFileAttributeView("lucene")) {
this.majorDeviceNumber = (int) fileStore.getAttribute("lucene:major_device_number");
this.minorDeviceNumber = (int) fileStore.getAttribute("lucene:minor_device_number");
} else {
this.majorDeviceNumber = -1;
this.minorDeviceNumber = -1;
}
}
/**
* Resolves the given shards directory against this DataPath
* ${data.paths}/indices/{index.uuid}/{shard.id}
*/
public Path resolve(ShardId shardId) {
return resolve(shardId.getIndex()).resolve(Integer.toString(shardId.id()));
}
/**
* Resolves index directory against this DataPath
* ${data.paths}/indices/{index.uuid}
*/
public Path resolve(Index index) {
return resolve(index.getUUID());
}
Path resolve(String uuid) {
return indicesPath.resolve(uuid);
}
@Override
public String toString() {
return "DataPath{"
+ "path="
+ path
+ ", indicesPath="
+ indicesPath
+ ", fileStore="
+ fileStore
+ ", majorDeviceNumber="
+ majorDeviceNumber
+ ", minorDeviceNumber="
+ minorDeviceNumber
+ '}';
}
}
private final Logger logger = LogManager.getLogger(NodeEnvironment.class);
private final DataPath[] dataPaths;
private final Path sharedDataPath;
private final Lock[] locks;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final Map<ShardId, InternalShardLock> shardLocks = new HashMap<>();
private final NodeMetadata nodeMetadata;
/**
* Seed for determining a persisted unique uuid of this node. If the node has already a persisted uuid on disk,
* this seed will be ignored and the uuid from disk will be reused.
*/
public static final Setting<Long> NODE_ID_SEED_SETTING = Setting.longSetting("node.id.seed", 0L, Long.MIN_VALUE, Property.NodeScope);
/**
* If true the [verbose] SegmentInfos.infoStream logging is sent to System.out.
*/
public static final Setting<Boolean> ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING = Setting.boolSetting(
"node.enable_lucene_segment_infos_trace",
false,
Property.NodeScope
);
public static final String INDICES_FOLDER = "indices";
public static final String NODE_LOCK_FILENAME = "node.lock";
/**
* Searchable snapshot's Lucene index directory.
*/
private static final String SNAPSHOT_CACHE_FOLDER = "snapshot_cache";
/**
* Searchable snapshot's shared cache file
*/
static final String SEARCHABLE_SHARED_CACHE_FILE = "shared_snapshot_cache";
public static final class NodeLock implements Releasable {
private final Lock[] locks;
private final DataPath[] dataPaths;
public NodeLock(final Logger logger, final Environment environment, final CheckedFunction<Path, Boolean, IOException> pathFunction)
throws IOException {
this(logger, environment, pathFunction, Function.identity());
}
/**
* Tries to acquire a node lock for a node id, throws {@code IOException} if it is unable to acquire it
* @param pathFunction function to check node path before attempt of acquiring a node lock
*/
public NodeLock(
final Logger logger,
final Environment environment,
final CheckedFunction<Path, Boolean, IOException> pathFunction,
final Function<Path, Path> subPathMapping
) throws IOException {
dataPaths = new DataPath[environment.dataFiles().length];
locks = new Lock[dataPaths.length];
try {
final Path[] dataPaths = environment.dataFiles();
for (int dirIndex = 0; dirIndex < dataPaths.length; dirIndex++) {
Path dataDir = dataPaths[dirIndex];
Path dir = subPathMapping.apply(dataDir);
if (pathFunction.apply(dir) == false) {
continue;
}
try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME);
this.dataPaths[dirIndex] = new DataPath(dir);
} catch (IOException e) {
logger.trace(() -> format("failed to obtain node lock on %s", dir.toAbsolutePath()), e);
// release all the ones that were obtained up until now
throw (e instanceof LockObtainFailedException
? e
: new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e));
}
}
} catch (IOException e) {
close();
throw e;
}
}
public DataPath[] getDataPaths() {
return dataPaths;
}
@Override
public void close() {
for (int i = 0; i < locks.length; i++) {
if (locks[i] != null) {
IOUtils.closeWhileHandlingException(locks[i]);
}
locks[i] = null;
}
}
}
/**
* Setup the environment.
* @param settings settings from elasticsearch.yml
* @param environment global environment
*/
public NodeEnvironment(Settings settings, Environment environment) throws IOException {
boolean success = false;
try {
sharedDataPath = environment.sharedDataFile();
for (Path path : environment.dataFiles()) {
if (Files.exists(path)) {
// Call to toRealPath required to resolve symlinks.
// We let it fall through to create directories to ensure the symlink
// isn't a file instead of a directory.
path = path.toRealPath();
}
Files.createDirectories(path);
}
final NodeLock nodeLock;
try {
nodeLock = new NodeLock(logger, environment, dir -> true);
} catch (IOException e) {
final String message = String.format(
Locale.ROOT,
"failed to obtain node locks, tried %s;"
+ " maybe these locations are not writable or multiple nodes were started on the same data path?",
Arrays.toString(environment.dataFiles())
);
throw new IllegalStateException(message, e);
}
this.locks = nodeLock.locks;
this.dataPaths = nodeLock.dataPaths;
logger.debug("using node location {}", Arrays.toString(dataPaths));
maybeLogPathDetails();
maybeLogHeapDetails();
applySegmentInfosTrace(settings);
assertCanWrite();
ensureAtomicMoveSupported(dataPaths);
if (upgradeLegacyNodeFolders(logger, settings, environment, nodeLock)) {
assertCanWrite();
}
// versions 7.x and earlier put their data under ${path.data}/nodes/; leave a file at that location to prevent downgrades
for (Path dataPath : environment.dataFiles()) {
final Path legacyNodesPath = dataPath.resolve("nodes");
if (Files.isRegularFile(legacyNodesPath) == false) {
final String content = "written by Elasticsearch "
+ Build.current().version()
+ " to prevent a downgrade to a version prior to v8.0.0 which would result in data loss";
Files.writeString(legacyNodesPath, content);
IOUtils.fsync(legacyNodesPath, false);
IOUtils.fsync(dataPath, true);
}
}
if (DiscoveryNode.canContainData(settings) == false) {
if (DiscoveryNode.isMasterNode(settings) == false) {
ensureNoIndexMetadata(dataPaths);
}
ensureNoShardData(dataPaths);
}
this.nodeMetadata = loadNodeMetadata(settings, logger, dataPaths);
success = true;
} finally {
if (success == false) {
close();
}
}
}
/**
* Upgrades all data paths that have been written to by an older ES version to the 8.0+ compatible folder layout,
* removing the "nodes/${lockId}" folder prefix
*/
private static boolean upgradeLegacyNodeFolders(Logger logger, Settings settings, Environment environment, NodeLock nodeLock)
throws IOException {
boolean upgradeNeeded = false;
// check if we can do an auto-upgrade
for (Path path : environment.dataFiles()) {
final Path nodesFolderPath = path.resolve("nodes");
if (Files.isDirectory(nodesFolderPath)) {
final List<Integer> nodeLockIds = new ArrayList<>();
try (DirectoryStream<Path> stream = Files.newDirectoryStream(nodesFolderPath)) {
for (Path nodeLockIdPath : stream) {
String fileName = nodeLockIdPath.getFileName().toString();
if (Files.isDirectory(nodeLockIdPath) && fileName.chars().allMatch(Character::isDigit)) {
int nodeLockId = Integer.parseInt(fileName);
nodeLockIds.add(nodeLockId);
} else if (FileSystemUtils.isDesktopServicesStore(nodeLockIdPath) == false) {
throw new IllegalStateException(
"unexpected file/folder encountered during data folder upgrade: " + nodeLockIdPath
);
}
}
}
if (nodeLockIds.isEmpty() == false) {
upgradeNeeded = true;
if (nodeLockIds.equals(Arrays.asList(0)) == false) {
throw new IllegalStateException(
"data path "
+ nodesFolderPath
+ " cannot be upgraded automatically because it "
+ "contains data from nodes with ordinals "
+ nodeLockIds
+ ", due to previous use of the now obsolete "
+ "[node.max_local_storage_nodes] setting. Please check the breaking changes docs for the current version "
+ "of Elasticsearch to find an upgrade path"
);
}
}
}
}
if (upgradeNeeded == false) {
logger.trace("data folder upgrade not required");
return false;
}
logger.info("upgrading legacy data folders: {}", Arrays.toString(environment.dataFiles()));
// acquire locks on legacy path for duration of upgrade (to ensure there is no older ES version running on this path)
final NodeLock legacyNodeLock;
try {
legacyNodeLock = new NodeLock(logger, environment, dir -> true, path -> path.resolve("nodes").resolve("0"));
} catch (IOException e) {
final String message = String.format(
Locale.ROOT,
"failed to obtain legacy node locks, tried %s;"
+ " maybe these locations are not writable or multiple nodes were started on the same data path?",
Arrays.toString(environment.dataFiles())
);
throw new IllegalStateException(message, e);
}
// move contents from legacy path to new path
assert nodeLock.getDataPaths().length == legacyNodeLock.getDataPaths().length;
try {
// first check if we are upgrading from an index compatible version
checkForIndexCompatibility(logger, legacyNodeLock.getDataPaths());
final List<CheckedRunnable<IOException>> upgradeActions = new ArrayList<>();
for (int i = 0; i < legacyNodeLock.getDataPaths().length; i++) {
final DataPath legacyDataPath = legacyNodeLock.getDataPaths()[i];
final DataPath dataPath = nodeLock.getDataPaths()[i];
// determine folders to move and check that there are no extra files/folders
final Set<String> folderNames = new HashSet<>();
final Set<String> expectedFolderNames = new HashSet<>(
Arrays.asList(
// node state directory, containing MetadataStateFormat-based node metadata as well as cluster state
MetadataStateFormat.STATE_DIR_NAME,
// indices
INDICES_FOLDER,
// searchable snapshot cache Lucene index
SNAPSHOT_CACHE_FOLDER
)
);
final Set<String> ignoredFileNames = new HashSet<>(
Arrays.asList(
NODE_LOCK_FILENAME,
TEMP_FILE_NAME,
TEMP_FILE_NAME + ".tmp",
TEMP_FILE_NAME + ".final",
SEARCHABLE_SHARED_CACHE_FILE
)
);
try (DirectoryStream<Path> stream = Files.newDirectoryStream(legacyDataPath.path)) {
for (Path subFolderPath : stream) {
final String fileName = subFolderPath.getFileName().toString();
if (FileSystemUtils.isDesktopServicesStore(subFolderPath)) {
// ignore
} else if (FileSystemUtils.isAccessibleDirectory(subFolderPath, logger)) {
if (expectedFolderNames.contains(fileName) == false) {
throw new IllegalStateException(
"unexpected folder encountered during data folder upgrade: " + subFolderPath
);
}
final Path targetSubFolderPath = dataPath.path.resolve(fileName);
if (Files.exists(targetSubFolderPath)) {
throw new IllegalStateException(
"target folder already exists during data folder upgrade: " + targetSubFolderPath
);
}
folderNames.add(fileName);
} else if (ignoredFileNames.contains(fileName) == false) {
throw new IllegalStateException(
"unexpected file/folder encountered during data folder upgrade: " + subFolderPath
);
}
}
}
assert Sets.difference(folderNames, expectedFolderNames).isEmpty()
: "expected indices and/or state dir folder but was " + folderNames;
upgradeActions.add(() -> {
for (String folderName : folderNames) {
final Path sourceSubFolderPath = legacyDataPath.path.resolve(folderName);
final Path targetSubFolderPath = dataPath.path.resolve(folderName);
Files.move(sourceSubFolderPath, targetSubFolderPath, StandardCopyOption.ATOMIC_MOVE);
logger.info("data folder upgrade: moved from [{}] to [{}]", sourceSubFolderPath, targetSubFolderPath);
}
IOUtils.fsync(dataPath.path, true);
});
}
// now do the actual upgrade
for (CheckedRunnable<IOException> upgradeAction : upgradeActions) {
upgradeAction.run();
}
} finally {
legacyNodeLock.close();
}
// upgrade successfully completed, remove legacy nodes folders
IOUtils.rm(Stream.of(environment.dataFiles()).map(path -> path.resolve("nodes")).toArray(Path[]::new));
return true;
}
/**
* Checks to see if we can upgrade to this version based on the existing index state. Upgrading
* from older versions can cause irreversible changes if allowed.
*/
static void checkForIndexCompatibility(Logger logger, DataPath... dataPaths) throws IOException {
final Path[] paths = Arrays.stream(dataPaths).map(np -> np.path).toArray(Path[]::new);
NodeMetadata metadata = PersistedClusterStateService.nodeMetadata(paths);
// We are upgrading the cluster, but we didn't find any previous metadata. Corrupted state or incompatible version.
if (metadata == null) {
throw new CorruptStateException(
"Format version is not supported. Upgrading to ["
+ Build.current().version()
+ "] is only supported from version ["
+ Build.current().minWireCompatVersion()
+ "]."
);
}
metadata.verifyUpgradeToCurrentVersion();
logger.info("oldest index version recorded in NodeMetadata {}", metadata.oldestIndexVersion());
if (metadata.oldestIndexVersion().isLegacyIndexVersion()) {
String bestDowngradeVersion = getBestDowngradeVersion(metadata.previousNodeVersion().toString());
throw new IllegalStateException(
"Cannot start this node because it holds metadata for indices with version ["
+ metadata.oldestIndexVersion().toReleaseVersion()
+ "] with which this node of version ["
+ Build.current().version()
+ "] is incompatible. Revert this node to version ["
+ bestDowngradeVersion
+ "] and delete any indices with versions earlier than ["
+ IndexVersions.MINIMUM_COMPATIBLE.toReleaseVersion()
+ "] before upgrading to version ["
+ Build.current().version()
+ "]. If all such indices have already been deleted, revert this node to version ["
+ bestDowngradeVersion
+ "] and wait for it to join the cluster to clean up any older indices from its metadata."
);
}
}
private void maybeLogPathDetails() throws IOException {
// We do some I/O in here, so skip this if DEBUG/INFO are not enabled:
if (logger.isDebugEnabled()) {
// Log one line per path.data:
StringBuilder sb = new StringBuilder();
for (DataPath dataPath : dataPaths) {
sb.append('\n').append(" -> ").append(dataPath.path.toAbsolutePath());
FsInfo.Path fsPath = FsProbe.getFSInfo(dataPath);
sb.append(", free_space [")
.append(fsPath.getFree())
.append("], usable_space [")
.append(fsPath.getAvailable())
.append("], total_space [")
.append(fsPath.getTotal())
.append("], mount [")
.append(fsPath.getMount())
.append("], type [")
.append(fsPath.getType())
.append(']');
}
logger.debug("node data locations details:{}", sb);
} else if (logger.isInfoEnabled()) {
FsInfo.Path totFSPath = new FsInfo.Path();
Set<String> allTypes = new HashSet<>();
Set<String> allMounts = new HashSet<>();
for (DataPath dataPath : dataPaths) {
FsInfo.Path fsPath = FsProbe.getFSInfo(dataPath);
String mount = fsPath.getMount();
if (allMounts.contains(mount) == false) {
allMounts.add(mount);
String type = fsPath.getType();
if (type != null) {
allTypes.add(type);
}
totFSPath.add(fsPath);
}
}
// Just log a 1-line summary:
logger.info(
"using [{}] data paths, mounts [{}], net usable_space [{}], net total_space [{}], types [{}]",
dataPaths.length,
allMounts,
totFSPath.getAvailable(),
totFSPath.getTotal(),
toString(allTypes)
);
}
}
private void maybeLogHeapDetails() {
JvmInfo jvmInfo = JvmInfo.jvmInfo();
ByteSizeValue maxHeapSize = jvmInfo.getMem().getHeapMax();
String useCompressedOops = jvmInfo.useCompressedOops();
logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, useCompressedOops);
}
/**
* scans the node paths and loads existing metadata file. If not found a new meta data will be generated
*/
private static NodeMetadata loadNodeMetadata(Settings settings, Logger logger, DataPath... dataPaths) throws IOException {
final Path[] paths = Arrays.stream(dataPaths).map(np -> np.path).toArray(Path[]::new);
NodeMetadata metadata = PersistedClusterStateService.nodeMetadata(paths);
if (metadata == null) {
// load legacy metadata
final Set<String> nodeIds = new HashSet<>();
for (final Path path : paths) {
final NodeMetadata oldStyleMetadata = NodeMetadata.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path);
if (oldStyleMetadata != null) {
nodeIds.add(oldStyleMetadata.nodeId());
}
}
if (nodeIds.size() > 1) {
throw new IllegalStateException("data paths " + Arrays.toString(paths) + " belong to multiple nodes with IDs " + nodeIds);
}
// load legacy metadata
final NodeMetadata legacyMetadata = NodeMetadata.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, paths);
if (legacyMetadata == null) {
assert nodeIds.isEmpty() : nodeIds;
// If we couldn't find legacy metadata, we set the latest index version to this version. This happens
// when we are starting a new node and there are no indices to worry about.
metadata = new NodeMetadata(generateNodeId(settings), BuildVersion.current(), IndexVersion.current());
} else {
assert nodeIds.equals(Collections.singleton(legacyMetadata.nodeId())) : nodeIds + " doesn't match " + legacyMetadata;
metadata = legacyMetadata;
}
}
metadata = metadata.upgradeToCurrentVersion();
assert metadata.nodeVersion().equals(BuildVersion.current()) : metadata.nodeVersion() + " != " + Build.current();
return metadata;
}
public static String generateNodeId(Settings settings) {
Random random = Randomness.get(settings, NODE_ID_SEED_SETTING);
return UUIDs.randomBase64UUID(random);
}
@SuppressForbidden(reason = "System.out.*")
static void applySegmentInfosTrace(Settings settings) {
if (ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING.get(settings)) {
SegmentInfos.setInfoStream(System.out);
}
}
private static String toString(Collection<String> items) {
StringBuilder b = new StringBuilder();
for (String item : items) {
if (b.length() > 0) {
b.append(", ");
}
b.append(item);
}
return b.toString();
}
/**
* Deletes a shard data directory iff the shards locks were successfully acquired.
*
* @param shardId the id of the shard to delete to delete
* @throws IOException if an IOException occurs
*/
public void deleteShardDirectorySafe(ShardId shardId, IndexSettings indexSettings, Consumer<Path[]> listener) throws IOException,
ShardLockObtainFailedException {
final Path[] paths = availableShardPaths(shardId);
logger.trace("deleting shard {} directory, paths: [{}]", shardId, paths);
try (ShardLock lock = shardLock(shardId, "shard deletion under lock")) {
deleteShardDirectoryUnderLock(lock, indexSettings, listener);
}
}
/**
* Acquires, then releases, all {@code write.lock} files in the given
* shard paths. The "write.lock" file is assumed to be under the shard
* path's "index" directory as used by Elasticsearch.
*
* @throws LockObtainFailedException if any of the locks could not be acquired
*/
public static void acquireFSLockForPaths(IndexSettings indexSettings, Path... shardPaths) throws IOException {
Lock[] locks = new Lock[shardPaths.length];
Directory[] dirs = new Directory[shardPaths.length];
try {
for (int i = 0; i < shardPaths.length; i++) {
// resolve the directory the shard actually lives in
Path p = shardPaths[i].resolve("index");
// open a directory (will be immediately closed) on the shard's location
dirs[i] = new NIOFSDirectory(p, indexSettings.getValue(FsDirectoryFactory.INDEX_LOCK_FACTOR_SETTING));
// create a lock for the "write.lock" file
try {
locks[i] = dirs[i].obtainLock(IndexWriter.WRITE_LOCK_NAME);
} catch (IOException ex) {
throw new LockObtainFailedException("unable to acquire " + IndexWriter.WRITE_LOCK_NAME + " for " + p, ex);
}
}
} finally {
IOUtils.closeWhileHandlingException(locks);
IOUtils.closeWhileHandlingException(dirs);
}
}
/**
* Deletes a shard data directory. Note: this method assumes that the shard
* lock is acquired. This method will also attempt to acquire the write
* locks for the shard's paths before deleting the data, but this is best
* effort, as the lock is released before the deletion happens in order to
* allow the folder to be deleted
*
* @param lock the shards lock
* @throws IOException if an IOException occurs
* @throws ElasticsearchException if the write.lock is not acquirable
*/
public void deleteShardDirectoryUnderLock(ShardLock lock, IndexSettings indexSettings, Consumer<Path[]> listener) throws IOException {
final ShardId shardId = lock.getShardId();
assert isShardLocked(shardId) : "shard " + shardId + " is not locked";
final Path[] paths = availableShardPaths(shardId);
logger.trace("acquiring locks for {}, paths: [{}]", shardId, paths);
acquireFSLockForPaths(indexSettings, paths);
listener.accept(paths);
IOUtils.rm(paths);
if (indexSettings.hasCustomDataPath()) {
Path customLocation = resolveCustomLocation(indexSettings.customDataPath(), shardId);
logger.trace("acquiring lock for {}, custom path: [{}]", shardId, customLocation);
acquireFSLockForPaths(indexSettings, customLocation);
logger.trace("deleting custom shard {} directory [{}]", shardId, customLocation);
listener.accept(new Path[] { customLocation });
IOUtils.rm(customLocation);
}
logger.trace("deleted shard {} directory, paths: [{}]", shardId, paths);
assert assertPathsDoNotExist(paths);
}
private static boolean assertPathsDoNotExist(final Path[] paths) {
Set<Path> existingPaths = Stream.of(paths).filter(FileSystemUtils::exists).filter(leftOver -> {
// Relaxed assertion for the special case where only the empty state directory exists after deleting
// the shard directory because it was created again as a result of a metadata read action concurrently.
try (DirectoryStream<Path> children = Files.newDirectoryStream(leftOver)) {
Iterator<Path> iter = children.iterator();
if (iter.hasNext() == false) {
return true;
}
Path maybeState = iter.next();
if (iter.hasNext() || maybeState.equals(leftOver.resolve(MetadataStateFormat.STATE_DIR_NAME)) == false) {
return true;
}
try (DirectoryStream<Path> stateChildren = Files.newDirectoryStream(maybeState)) {
return stateChildren.iterator().hasNext();
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}).collect(Collectors.toSet());
assert existingPaths.size() == 0 : "Paths exist that should have been deleted: " + existingPaths;
return existingPaths.size() == 0;
}
private boolean isShardLocked(ShardId id) {
try {
shardLock(id, "checking if shard is locked").close();
return false;
} catch (ShardLockObtainFailedException ex) {
return true;
}
}
/**
* Deletes an indexes data directory recursively iff all of the indexes
* shards locks were successfully acquired. If any of the indexes shard directories can't be locked
* non of the shards will be deleted
*
* @param index the index to delete
* @param lockTimeoutMS how long to wait for acquiring the indices shard locks
* @param indexSettings settings for the index being deleted
* @throws IOException if any of the shards data directories can't be locked or deleted
*/
public void deleteIndexDirectorySafe(Index index, long lockTimeoutMS, IndexSettings indexSettings, Consumer<Path[]> listener)
throws IOException, ShardLockObtainFailedException {
final List<ShardLock> locks = lockAllForIndex(index, indexSettings, "deleting index directory", lockTimeoutMS);
try {
deleteIndexDirectoryUnderLock(index, indexSettings, listener);
} finally {
IOUtils.closeWhileHandlingException(locks);
}
}
/**
* Deletes an indexes data directory recursively.
* Note: this method assumes that the shard lock is acquired
*
* @param index the index to delete
* @param indexSettings settings for the index being deleted
*/
public void deleteIndexDirectoryUnderLock(Index index, IndexSettings indexSettings, Consumer<Path[]> listener) throws IOException {
final Path[] indexPaths = indexPaths(index);
logger.trace("deleting index {} directory, paths({}): [{}]", index, indexPaths.length, indexPaths);
listener.accept(indexPaths);
IOUtils.rm(indexPaths);
if (indexSettings.hasCustomDataPath()) {
Path customLocation = resolveIndexCustomLocation(indexSettings.customDataPath(), index.getUUID());
logger.trace("deleting custom index {} directory [{}]", index, customLocation);
listener.accept(new Path[] { customLocation });
IOUtils.rm(customLocation);
}
}
/**
* Tries to lock all local shards for the given index. If any of the shard locks can't be acquired
* a {@link ShardLockObtainFailedException} is thrown and all previously acquired locks are released.
*
* @param index the index to lock shards for
* @param lockTimeoutMS how long to wait for acquiring the indices shard locks
* @return the {@link ShardLock} instances for this index.
*/
public List<ShardLock> lockAllForIndex(
final Index index,
final IndexSettings settings,
final String lockDetails,
final long lockTimeoutMS
) throws ShardLockObtainFailedException {
final int numShards = settings.getNumberOfShards();
if (numShards <= 0) {
throw new IllegalArgumentException("settings must contain a non-null > 0 number of shards");
}
logger.trace("locking all shards for index {} - [{}]", index, numShards);
List<ShardLock> allLocks = new ArrayList<>(numShards);
boolean success = false;
long startTimeNS = System.nanoTime();
try {
for (int i = 0; i < numShards; i++) {
long timeoutLeftMS = Math.max(0, lockTimeoutMS - TimeValue.nsecToMSec((System.nanoTime() - startTimeNS)));
allLocks.add(shardLock(new ShardId(index, i), lockDetails, timeoutLeftMS));
}
success = true;
} finally {
if (success == false) {
logger.trace("unable to lock all shards for index {}", index);
IOUtils.closeWhileHandlingException(allLocks);
}
}
return allLocks;
}
/**
* Tries to lock the given shards ID. A shard lock is required to perform any kind of
* write operation on a shards data directory like deleting files, creating a new index writer
* or recover from a different shard instance into it. If the shard lock can not be acquired
* a {@link ShardLockObtainFailedException} is thrown.
*
* Note: this method will return immediately if the lock can't be acquired.
*
* @param id the shard ID to lock
* @param details information about why the shard is being locked
* @return the shard lock. Call {@link ShardLock#close()} to release the lock
*/
public ShardLock shardLock(ShardId id, final String details) throws ShardLockObtainFailedException {
return shardLock(id, details, 0);
}
/**
* Tries to lock the given shards ID. A shard lock is required to perform any kind of
* write operation on a shards data directory like deleting files, creating a new index writer
* or recover from a different shard instance into it. If the shard lock can not be acquired
* a {@link ShardLockObtainFailedException} is thrown
* @param shardId the shard ID to lock
* @param details information about why the shard is being locked
* @param lockTimeoutMS the lock timeout in milliseconds
* @return the shard lock. Call {@link ShardLock#close()} to release the lock
*/
public ShardLock shardLock(final ShardId shardId, final String details, final long lockTimeoutMS)
throws ShardLockObtainFailedException {
logger.trace("acquiring node shardlock on [{}], timeout [{}], details [{}]", shardId, lockTimeoutMS, details);
final InternalShardLock shardLock;
final boolean acquired;
synchronized (shardLocks) {
final InternalShardLock found = shardLocks.get(shardId);
if (found != null) {
shardLock = found;
shardLock.incWaitCount();
acquired = false;
} else {
shardLock = new InternalShardLock(shardId, details);
shardLocks.put(shardId, shardLock);
acquired = true;
}
}
if (acquired == false) {
boolean success = false;
try {
shardLock.acquire(lockTimeoutMS, details);
success = true;
} finally {
if (success == false) {
shardLock.decWaitCount();
}
}
}
logger.trace("successfully acquired shardlock for [{}]", shardId);
return new ShardLock(shardId) { // new instance prevents double closing
@Override
protected void closeInternal() {
shardLock.release();
logger.trace("released shard lock for [{}]", shardId);
}
@Override
public void setDetails(String details) {
shardLock.setDetails(details);
}
};
}
/**
* A functional interface that people can use to reference {@link #shardLock(ShardId, String, long)}
*/
@FunctionalInterface
public interface ShardLocker {
ShardLock lock(ShardId shardId, String lockDetails, long lockTimeoutMS) throws ShardLockObtainFailedException;
}
/**
* Returns all currently lock shards.
*
* Note: the shard ids return do not contain a valid Index UUID
*/
public Set<ShardId> lockedShards() {
synchronized (shardLocks) {
return Set.copyOf(shardLocks.keySet());
}
}
// throttle the hot-threads calls: no more than one per minute
private final Semaphore shardLockHotThreadsPermit = new Semaphore(1);
private long nextShardLockHotThreadsNanos = Long.MIN_VALUE;
private void maybeLogThreadDump(ShardId shardId, String message) {
if (logger.isDebugEnabled() == false) {
return;
}
final var prefix = format("hot threads while failing to obtain shard lock for %s: %s", shardId, message);
if (shardLockHotThreadsPermit.tryAcquire()) {
try {
final var now = System.nanoTime();
if (now <= nextShardLockHotThreadsNanos) {
return;
}
nextShardLockHotThreadsNanos = now + TimeUnit.SECONDS.toNanos(60);
HotThreads.logLocalHotThreads(logger, Level.DEBUG, prefix, ReferenceDocs.SHARD_LOCK_TROUBLESHOOTING);
} catch (Exception e) {
logger.error(format("could not obtain %s", prefix), e);
} finally {
shardLockHotThreadsPermit.release();
}
}
}
private final class InternalShardLock {
/*
* This class holds a mutex for exclusive access and timeout / wait semantics
* and a reference count to cleanup the shard lock instance form the internal data
* structure if nobody is waiting for it. the wait count is guarded by the same lock
* that is used to mutate the map holding the shard locks to ensure exclusive access
*/
private final Semaphore mutex = new Semaphore(1);
private int waitCount = 1; // guarded by shardLocks
private final ShardId shardId;
private volatile Tuple<Long, String> lockDetails;
InternalShardLock(final ShardId shardId, final String details) {
this.shardId = shardId;
mutex.acquireUninterruptibly();
lockDetails = Tuple.tuple(System.nanoTime(), details);
}
private void release() {
mutex.release();
decWaitCount();
}
void incWaitCount() {
synchronized (shardLocks) {
assert waitCount > 0 : "waitCount is " + waitCount + " but should be > 0";
waitCount++;
}
}
private void decWaitCount() {
synchronized (shardLocks) {
assert waitCount > 0 : "waitCount is " + waitCount + " but should be > 0";
--waitCount;
logger.trace("shard lock wait count for {} is now [{}]", shardId, waitCount);
if (waitCount == 0) {
logger.trace("last shard lock wait decremented, removing lock for {}", shardId);
InternalShardLock remove = shardLocks.remove(shardId);
assert remove != null : "Removed lock was null";
}
}
}
void acquire(long timeoutInMillis, final String details) throws ShardLockObtainFailedException {
try {
if (mutex.tryAcquire(timeoutInMillis, TimeUnit.MILLISECONDS)) {
setDetails(details);
} else {
final Tuple<Long, String> lockDetails = this.lockDetails; // single volatile read
final var message = format(
"obtaining shard lock for [%s] timed out after [%dms], lock already held for [%s] with age [%dms]",
details,
timeoutInMillis,
lockDetails.v2(),
TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - lockDetails.v1())
);
maybeLogThreadDump(shardId, message);
throw new ShardLockObtainFailedException(shardId, message);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new ShardLockObtainFailedException(shardId, "thread interrupted while trying to obtain shard lock", e);
}
}
public void setDetails(String details) {
lockDetails = Tuple.tuple(System.nanoTime(), details);
}
}
public boolean hasNodeFile() {
return dataPaths != null && locks != null;
}
/**
* Returns an array of all of the nodes data locations.
* @throws IllegalStateException if the node is not configured to store local locations
*/
public Path[] nodeDataPaths() {
assertEnvIsLocked();
Path[] paths = new Path[dataPaths.length];
for (int i = 0; i < paths.length; i++) {
paths[i] = dataPaths[i].path;
}
return paths;
}
/**
* Returns shared data path for this node environment
*/
public Path sharedDataPath() {
return sharedDataPath;
}
/**
* returns the unique uuid describing this node. The uuid is persistent in the data folder of this node
* and remains across restarts.
**/
public String nodeId() {
// we currently only return the ID and hide the underlying nodeMetadata implementation in order to avoid
// confusion with other "metadata" like node settings found in elasticsearch.yml. In future
// we can encapsulate both (and more) in one NodeMetadata (or NodeSettings) object ala IndexSettings
return nodeMetadata.nodeId();
}
/**
* Returns the loaded NodeMetadata for this node
*/
public NodeMetadata nodeMetadata() {
return nodeMetadata;
}
/**
* Returns an array of all of the {@link DataPath}s.
*/
public DataPath[] dataPaths() {
assertEnvIsLocked();
if (dataPaths == null || locks == null) {
throw new IllegalStateException("node is not configured to store local location");
}
return dataPaths;
}
/**
* Returns all index paths.
*/
public Path[] indexPaths(Index index) {
assertEnvIsLocked();
Path[] indexPaths = new Path[dataPaths.length];
for (int i = 0; i < dataPaths.length; i++) {
indexPaths[i] = dataPaths[i].resolve(index);
}
return indexPaths;
}
/**
* Returns all shard paths excluding custom shard path. Note: Shards are only allocated on one of the
* returned paths. The returned array may contain paths to non-existing directories.
*
* @see IndexSettings#hasCustomDataPath()
* @see #resolveCustomLocation(String, ShardId)
*
*/
public Path[] availableShardPaths(ShardId shardId) {
assertEnvIsLocked();
final DataPath[] dataPaths = dataPaths();
final Path[] shardLocations = new Path[dataPaths.length];
for (int i = 0; i < dataPaths.length; i++) {
shardLocations[i] = dataPaths[i].resolve(shardId);
}
return shardLocations;
}
/**
* Returns all folder names in ${data.paths}/indices folder
*/
public Set<String> availableIndexFolders() throws IOException {
return availableIndexFolders(Predicates.never());
}
/**
* Returns folder names in ${data.paths}/indices folder that don't match the given predicate.
* @param excludeIndexPathIdsPredicate folder names to exclude
*/
public Set<String> availableIndexFolders(Predicate<String> excludeIndexPathIdsPredicate) throws IOException {
if (dataPaths == null || locks == null) {
throw new IllegalStateException("node is not configured to store local location");
}
assertEnvIsLocked();
Set<String> indexFolders = new HashSet<>();
for (DataPath dataPath : dataPaths) {
indexFolders.addAll(availableIndexFoldersForPath(dataPath, excludeIndexPathIdsPredicate));
}
return indexFolders;
}
/**
* Return all directory names in the indices directory for the given node path.
*
* @param dataPath the path
* @return all directories that could be indices for the given node path.
* @throws IOException if an I/O exception occurs traversing the filesystem
*/
public Set<String> availableIndexFoldersForPath(final DataPath dataPath) throws IOException {
return availableIndexFoldersForPath(dataPath, Predicates.never());
}
/**
* Return directory names in the indices directory for the given node path that don't match the given predicate.
*
* @param dataPath the path
* @param excludeIndexPathIdsPredicate folder names to exclude
* @return all directories that could be indices for the given node path.
* @throws IOException if an I/O exception occurs traversing the filesystem
*/
public Set<String> availableIndexFoldersForPath(final DataPath dataPath, Predicate<String> excludeIndexPathIdsPredicate)
throws IOException {
if (dataPaths == null || locks == null) {
throw new IllegalStateException("node is not configured to store local location");
}
assertEnvIsLocked();
final Set<String> indexFolders = new HashSet<>();
Path indicesLocation = dataPath.indicesPath;
if (Files.isDirectory(indicesLocation)) {
try (DirectoryStream<Path> stream = Files.newDirectoryStream(indicesLocation)) {
for (Path index : stream) {
final String fileName = index.getFileName().toString();
if (excludeIndexPathIdsPredicate.test(fileName) == false && Files.isDirectory(index)) {
indexFolders.add(fileName);
}
}
}
}
return indexFolders;
}
/**
* Resolves all existing paths to <code>indexFolderName</code> in ${data.paths}/indices
*/
public Path[] resolveIndexFolder(String indexFolderName) {
if (dataPaths == null || locks == null) {
throw new IllegalStateException("node is not configured to store local location");
}
assertEnvIsLocked();
List<Path> paths = new ArrayList<>(dataPaths.length);
for (DataPath dataPath : dataPaths) {
Path indexFolder = dataPath.indicesPath.resolve(indexFolderName);
if (Files.exists(indexFolder)) {
paths.add(indexFolder);
}
}
return paths.toArray(Path[]::new);
}
/**
* Tries to find all allocated shards for the given index
* on the current node. NOTE: This methods is prone to race-conditions on the filesystem layer since it might not
* see directories created concurrently or while it's traversing.
* @param index the index to filter shards
* @return a set of shard IDs
* @throws IOException if an IOException occurs
*/
public Set<ShardId> findAllShardIds(final Index index) throws IOException {
assert index != null;
if (dataPaths == null || locks == null) {
throw new IllegalStateException("node is not configured to store local location");
}
assertEnvIsLocked();
final Set<ShardId> shardIds = new HashSet<>();
final String indexUniquePathId = index.getUUID();
for (final DataPath dataPath : dataPaths) {
shardIds.addAll(findAllShardsForIndex(dataPath.indicesPath.resolve(indexUniquePathId), index));
}
return shardIds;
}
/**
* Find all the shards for this index, returning a map of the {@code DataPath} to the number of shards on that path
* @param index the index by which to filter shards
* @return a map of DataPath to count of the shards for the index on that path
* @throws IOException if an IOException occurs
*/
public Map<DataPath, Long> shardCountPerPath(final Index index) throws IOException {
assert index != null;
if (dataPaths == null || locks == null) {
throw new IllegalStateException("node is not configured to store local location");
}
assertEnvIsLocked();
final Map<DataPath, Long> shardCountPerPath = new HashMap<>();
final String indexUniquePathId = index.getUUID();
for (final DataPath dataPath : dataPaths) {
Path indexLocation = dataPath.indicesPath.resolve(indexUniquePathId);
if (Files.isDirectory(indexLocation)) {
shardCountPerPath.put(dataPath, (long) findAllShardsForIndex(indexLocation, index).size());
}
}
return shardCountPerPath;
}
private static Set<ShardId> findAllShardsForIndex(Path indexPath, Index index) throws IOException {
assert indexPath.getFileName().toString().equals(index.getUUID());
Set<ShardId> shardIds = new HashSet<>();
if (Files.isDirectory(indexPath)) {
try (DirectoryStream<Path> stream = Files.newDirectoryStream(indexPath)) {
for (Path shardPath : stream) {
String fileName = shardPath.getFileName().toString();
if (Files.isDirectory(shardPath) && fileName.chars().allMatch(Character::isDigit)) {
int shardId = Integer.parseInt(fileName);
ShardId id = new ShardId(index, shardId);
shardIds.add(id);
}
}
}
}
return shardIds;
}
@Override
public void close() {
if (closed.compareAndSet(false, true) && locks != null) {
synchronized (locks) {
for (Lock lock : locks) {
try {
logger.trace("releasing lock [{}]", lock);
lock.close();
} catch (IOException e) {
logger.trace(() -> "failed to release lock [" + lock + "]", e);
}
}
}
}
}
private void assertEnvIsLocked() {
if (closed.get() == false && locks != null) {
synchronized (locks) {
if (closed.get()) return; // raced with close() - we lost
for (Lock lock : locks) {
try {
lock.ensureValid();
} catch (IOException e) {
logger.warn("lock assertion failed", e);
throw new IllegalStateException("environment is not locked", e);
}
}
}
}
}
/**
* This method tries to write an empty file and moves it using an atomic move operation.
* This method throws an {@link IllegalStateException} if this operation is
* not supported by the filesystem. This test is executed on each of the data directories.
* This method cleans up all files even in the case of an error.
*/
private static void ensureAtomicMoveSupported(final DataPath[] dataPaths) throws IOException {
for (DataPath dataPath : dataPaths) {
assert Files.isDirectory(dataPath.path) : dataPath.path + " is not a directory";
final Path src = dataPath.path.resolve(TEMP_FILE_NAME + ".tmp");
final Path target = dataPath.path.resolve(TEMP_FILE_NAME + ".final");
try {
Files.deleteIfExists(src);
Files.createFile(src);
Files.move(src, target, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
} catch (AtomicMoveNotSupportedException ex) {
throw new IllegalStateException(
"atomic_move is not supported by the filesystem on path ["
+ dataPath.path
+ "] atomic_move is required for elasticsearch to work correctly.",
ex
);
} finally {
try {
Files.deleteIfExists(src);
} finally {
Files.deleteIfExists(target);
}
}
}
}
private static void ensureNoShardData(final DataPath[] dataPaths) throws IOException {
List<Path> shardDataPaths = collectShardDataPaths(dataPaths);
if (shardDataPaths.isEmpty() == false) {
final String message = String.format(
Locale.ROOT,
"node does not have the %s role but has shard data: %s. Use 'elasticsearch-node repurpose' tool to clean up",
DiscoveryNodeRole.DATA_ROLE.roleName(),
shardDataPaths
);
throw new IllegalStateException(message);
}
}
private static void ensureNoIndexMetadata(final DataPath[] dataPaths) throws IOException {
List<Path> indexMetadataPaths = collectIndexMetadataPaths(dataPaths);
if (indexMetadataPaths.isEmpty() == false) {
final String message = String.format(
Locale.ROOT,
"node does not have the %s and %s roles but has index metadata: %s. Use 'elasticsearch-node repurpose' tool to clean up",
DiscoveryNodeRole.DATA_ROLE.roleName(),
DiscoveryNodeRole.MASTER_ROLE.roleName(),
indexMetadataPaths
);
throw new IllegalStateException(message);
}
}
/**
* Collect the paths containing shard data in the indicated node paths. The returned paths will point to the shard data folder.
*/
static List<Path> collectShardDataPaths(DataPath[] dataPaths) throws IOException {
return collectIndexSubPaths(dataPaths, NodeEnvironment::isShardPath);
}
/**
* Collect the paths containing index meta data in the indicated node paths. The returned paths will point to the
* {@link MetadataStateFormat#STATE_DIR_NAME} folder
*/
static List<Path> collectIndexMetadataPaths(DataPath[] dataPaths) throws IOException {
return collectIndexSubPaths(dataPaths, NodeEnvironment::isIndexMetadataPath);
}
private static List<Path> collectIndexSubPaths(DataPath[] dataPaths, Predicate<Path> subPathPredicate) throws IOException {
List<Path> indexSubPaths = new ArrayList<>();
for (DataPath dataPath : dataPaths) {
Path indicesPath = dataPath.indicesPath;
if (Files.isDirectory(indicesPath)) {
try (DirectoryStream<Path> indexStream = Files.newDirectoryStream(indicesPath)) {
for (Path indexPath : indexStream) {
if (Files.isDirectory(indexPath)) {
try (Stream<Path> shardStream = Files.list(indexPath)) {
shardStream.filter(subPathPredicate).map(Path::toAbsolutePath).forEach(indexSubPaths::add);
}
}
}
}
}
}
return indexSubPaths;
}
private static boolean isShardPath(Path path) {
return Files.isDirectory(path) && path.getFileName().toString().chars().allMatch(Character::isDigit);
}
private static boolean isIndexMetadataPath(Path path) {
return Files.isDirectory(path) && path.getFileName().toString().equals(MetadataStateFormat.STATE_DIR_NAME);
}
/**
* Resolve the custom path for a index's shard.
*/
public static Path resolveBaseCustomLocation(String customDataPath, Path sharedDataPath) {
if (Strings.isNotEmpty(customDataPath)) {
// This assert is because this should be caught by MetadataCreateIndexService
assert sharedDataPath != null;
return sharedDataPath.resolve(customDataPath).resolve("0");
} else {
throw new IllegalArgumentException("no custom " + IndexMetadata.SETTING_DATA_PATH + " setting available");
}
}
/**
* Resolve the custom path for a index's shard.
* Uses the {@code IndexMetadata.SETTING_DATA_PATH} setting to determine
* the root path for the index.
*
* @param customDataPath the custom data path
*/
private Path resolveIndexCustomLocation(String customDataPath, String indexUUID) {
return resolveIndexCustomLocation(customDataPath, indexUUID, sharedDataPath);
}
private static Path resolveIndexCustomLocation(String customDataPath, String indexUUID, Path sharedDataPath) {
return resolveBaseCustomLocation(customDataPath, sharedDataPath).resolve(indexUUID);
}
/**
* Resolve the custom path for a index's shard.
* Uses the {@code IndexMetadata.SETTING_DATA_PATH} setting to determine
* the root path for the index.
*
* @param customDataPath the custom data path
* @param shardId shard to resolve the path to
*/
public Path resolveCustomLocation(String customDataPath, final ShardId shardId) {
return resolveCustomLocation(customDataPath, shardId, sharedDataPath);
}
public static Path resolveCustomLocation(String customDataPath, final ShardId shardId, Path sharedDataPath) {
return resolveIndexCustomLocation(customDataPath, shardId.getIndex().getUUID(), sharedDataPath).resolve(
Integer.toString(shardId.id())
);
}
/**
* Returns the {@code DataPath.path} for this shard.
*/
public static Path shardStatePathToDataPath(Path shardPath) {
int count = shardPath.getNameCount();
// Sanity check:
assert Integer.parseInt(shardPath.getName(count - 1).toString()) >= 0;
assert "indices".equals(shardPath.getName(count - 3).toString());
return shardPath.getParent().getParent().getParent();
}
/**
* This is a best effort to ensure that we actually have write permissions to write in all our data directories.
* This prevents disasters if nodes are started under the wrong username etc.
*/
private void assertCanWrite() throws IOException {
for (Path path : nodeDataPaths()) { // check node-paths are writable
tryWriteTempFile(path);
}
for (String indexFolderName : this.availableIndexFolders()) {
for (Path indexPath : this.resolveIndexFolder(indexFolderName)) { // check index paths are writable
Path indexStatePath = indexPath.resolve(MetadataStateFormat.STATE_DIR_NAME);
tryWriteTempFile(indexStatePath);
tryWriteTempFile(indexPath);
try (DirectoryStream<Path> stream = Files.newDirectoryStream(indexPath)) {
for (Path shardPath : stream) {
String fileName = shardPath.getFileName().toString();
if (Files.isDirectory(shardPath) && fileName.chars().allMatch(Character::isDigit)) {
Path indexDir = shardPath.resolve(ShardPath.INDEX_FOLDER_NAME);
Path statePath = shardPath.resolve(MetadataStateFormat.STATE_DIR_NAME);
Path translogDir = shardPath.resolve(ShardPath.TRANSLOG_FOLDER_NAME);
tryWriteTempFile(indexDir);
tryWriteTempFile(translogDir);
tryWriteTempFile(statePath);
tryWriteTempFile(shardPath);
}
}
}
}
}
}
// package private for testing
static final String TEMP_FILE_NAME = ".es_temp_file";
private static void tryWriteTempFile(Path path) throws IOException {
if (Files.exists(path)) {
Path resolve = path.resolve(TEMP_FILE_NAME);
try {
// delete any lingering file from a previous failure
Files.deleteIfExists(resolve);
Files.createFile(resolve);
Files.delete(resolve);
} catch (IOException ex) {
throw new IOException("failed to test writes in data directory [" + path + "] write permission is required", ex);
}
}
}
/**
* Get a useful version string to direct a user's downgrade operation
*
* <p>If a user is trying to install 8.0 but has incompatible indices, the user should
* downgrade to 7.17.x. We return 7.17.0, unless the user is trying to upgrade from
* a 7.17.x release, in which case we return the last installed version.
* @return Version to downgrade to
*/
// visible for testing
static String getBestDowngradeVersion(String previousNodeVersion) {
// this method should only be called in the context of an upgrade to 8.x
assert Build.current().version().startsWith("9.") == false;
Pattern pattern = Pattern.compile("^7\\.(\\d+)\\.\\d+$");
Matcher matcher = pattern.matcher(previousNodeVersion);
if (matcher.matches()) {
try {
int minorVersion = Integer.parseInt(matcher.group(1));
if (minorVersion >= 17) {
return previousNodeVersion;
}
} catch (NumberFormatException e) {
// continue and return default
}
}
return "7.17.0";
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/env/NodeEnvironment.java |
469 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.caching.database;
import static com.iluwatar.caching.constants.CachingConstants.ADD_INFO;
import static com.iluwatar.caching.constants.CachingConstants.USER_ACCOUNT;
import static com.iluwatar.caching.constants.CachingConstants.USER_ID;
import static com.iluwatar.caching.constants.CachingConstants.USER_NAME;
import com.iluwatar.caching.UserAccount;
import com.iluwatar.caching.constants.CachingConstants;
import com.mongodb.MongoClient;
import com.mongodb.MongoClientOptions;
import com.mongodb.MongoCredential;
import com.mongodb.ServerAddress;
import com.mongodb.client.MongoDatabase;
import com.mongodb.client.model.UpdateOptions;
import lombok.extern.slf4j.Slf4j;
import org.bson.Document;
/**
* Implementation of DatabaseManager.
* implements base methods to work with MongoDb.
*/
@Slf4j
public class MongoDb implements DbManager {
private static final String DATABASE_NAME = "admin";
private static final String MONGO_USER = "root";
private static final String MONGO_PASSWORD = "rootpassword";
private MongoClient client;
private MongoDatabase db;
void setDb(MongoDatabase db) {
this.db = db;
}
/**
* Connect to Db. Check th connection
*/
@Override
public void connect() {
MongoCredential mongoCredential = MongoCredential.createCredential(MONGO_USER,
DATABASE_NAME,
MONGO_PASSWORD.toCharArray());
MongoClientOptions options = MongoClientOptions.builder().build();
client = new MongoClient(new ServerAddress(), mongoCredential, options);
db = client.getDatabase(DATABASE_NAME);
}
@Override
public void disconnect() {
client.close();
}
/**
* Read data from DB.
*
* @param userId {@link String}
* @return {@link UserAccount}
*/
@Override
public UserAccount readFromDb(final String userId) {
var iterable = db
.getCollection(CachingConstants.USER_ACCOUNT)
.find(new Document(USER_ID, userId));
if (iterable.first() == null) {
return null;
}
Document doc = iterable.first();
if (doc != null) {
String userName = doc.getString(USER_NAME);
String appInfo = doc.getString(ADD_INFO);
return new UserAccount(userId, userName, appInfo);
} else {
return null;
}
}
/**
* Write data to DB.
*
* @param userAccount {@link UserAccount}
* @return {@link UserAccount}
*/
@Override
public UserAccount writeToDb(final UserAccount userAccount) {
db.getCollection(USER_ACCOUNT).insertOne(
new Document(USER_ID, userAccount.getUserId())
.append(USER_NAME, userAccount.getUserName())
.append(ADD_INFO, userAccount.getAdditionalInfo())
);
return userAccount;
}
/**
* Update DB.
*
* @param userAccount {@link UserAccount}
* @return {@link UserAccount}
*/
@Override
public UserAccount updateDb(final UserAccount userAccount) {
Document id = new Document(USER_ID, userAccount.getUserId());
Document dataSet = new Document(USER_NAME, userAccount.getUserName())
.append(ADD_INFO, userAccount.getAdditionalInfo());
db.getCollection(CachingConstants.USER_ACCOUNT)
.updateOne(id, new Document("$set", dataSet));
return userAccount;
}
/**
* Update data if exists.
*
* @param userAccount {@link UserAccount}
* @return {@link UserAccount}
*/
@Override
public UserAccount upsertDb(final UserAccount userAccount) {
String userId = userAccount.getUserId();
String userName = userAccount.getUserName();
String additionalInfo = userAccount.getAdditionalInfo();
db.getCollection(CachingConstants.USER_ACCOUNT).updateOne(
new Document(USER_ID, userId),
new Document("$set",
new Document(USER_ID, userId)
.append(USER_NAME, userName)
.append(ADD_INFO, additionalInfo)
),
new UpdateOptions().upsert(true)
);
return userAccount;
}
}
| tyrellbaker-blip/java-design-patterns | caching/src/main/java/com/iluwatar/caching/database/MongoDb.java |
470 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.datamapper;
import java.io.Serial;
/**
* Using Runtime Exception for avoiding dependency on implementation exceptions. This helps in
* decoupling.
*
* @author amit.dixit
*/
public final class DataMapperException extends RuntimeException {
@Serial
private static final long serialVersionUID = 1L;
/**
* Constructs a new runtime exception with the specified detail message. The cause is not
* initialized, and may subsequently be initialized by a call to {@link #initCause}.
*
* @param message the detail message. The detail message is saved for later retrieval by the
* {@link #getMessage()} method.
*/
public DataMapperException(final String message) {
super(message);
}
}
| rajprins/java-design-patterns | data-mapper/src/main/java/com/iluwatar/datamapper/DataMapperException.java |
471 | package com.thealgorithms.maths;
import java.math.BigDecimal;
import java.util.Arrays;
import java.util.Objects;
import java.util.Optional;
import java.util.function.BiFunction;
import java.util.stream.IntStream;
/**
* @author: caos321
* @date: 31 October 2021 (Sunday)
*/
public final class MatrixUtil {
private MatrixUtil() {
}
public static boolean isValid(final BigDecimal[][] matrix) {
return matrix != null && matrix.length > 0 && matrix[0].length > 0;
}
public static boolean hasEqualSizes(final BigDecimal[][] matrix1, final BigDecimal[][] matrix2) {
return (isValid(matrix1) && isValid(matrix2) && matrix1.length == matrix2.length && matrix1[0].length == matrix2[0].length);
}
public static boolean canMultiply(final BigDecimal[][] matrix1, final BigDecimal[][] matrix2) {
return (isValid(matrix1) && isValid(matrix2) && matrix1[0].length == matrix2.length);
}
public static Optional<BigDecimal[][]> operate(final BigDecimal[][] matrix1, final BigDecimal[][] matrix2, final BiFunction<BigDecimal, BigDecimal, BigDecimal> operation) {
if (!hasEqualSizes(matrix1, matrix2)) {
return Optional.empty();
}
final int rowSize = matrix1.length;
final int columnSize = matrix1[0].length;
final BigDecimal[][] result = new BigDecimal[rowSize][columnSize];
IntStream.range(0, rowSize).forEach(rowIndex -> IntStream.range(0, columnSize).forEach(columnIndex -> {
final BigDecimal value1 = matrix1[rowIndex][columnIndex];
final BigDecimal value2 = matrix2[rowIndex][columnIndex];
result[rowIndex][columnIndex] = operation.apply(value1, value2);
}));
return Optional.of(result);
}
public static Optional<BigDecimal[][]> add(final BigDecimal[][] matrix1, final BigDecimal[][] matrix2) {
return operate(matrix1, matrix2, BigDecimal::add);
}
public static Optional<BigDecimal[][]> subtract(final BigDecimal[][] matrix1, final BigDecimal[][] matrix2) {
return operate(matrix1, matrix2, BigDecimal::subtract);
}
public static Optional<BigDecimal[][]> multiply(final BigDecimal[][] matrix1, final BigDecimal[][] matrix2) {
if (!canMultiply(matrix1, matrix2)) {
return Optional.empty();
}
final int size = matrix1[0].length;
final int matrix1RowSize = matrix1.length;
final int matrix2ColumnSize = matrix2[0].length;
final BigDecimal[][] result = new BigDecimal[matrix1RowSize][matrix2ColumnSize];
IntStream.range(0, matrix1RowSize)
.forEach(rowIndex
-> IntStream.range(0, matrix2ColumnSize)
.forEach(columnIndex
-> result[rowIndex][columnIndex] = IntStream.range(0, size)
.mapToObj(index -> {
final BigDecimal value1 = matrix1[rowIndex][index];
final BigDecimal value2 = matrix2[index][columnIndex];
return value1.multiply(value2);
})
.reduce(BigDecimal.ZERO, BigDecimal::add)));
return Optional.of(result);
}
public static void assertThat(final BigDecimal[][] actual, final BigDecimal[][] expected) {
if (!Objects.deepEquals(actual, expected)) {
throw new AssertionError(String.format("expected=%s but was actual=%s", Arrays.deepToString(expected), Arrays.deepToString(actual)));
}
}
public static void main(final String[] args) {
{
final BigDecimal[][] matrix1 = {
{new BigDecimal(3), new BigDecimal(2)},
{new BigDecimal(0), new BigDecimal(1)},
};
final BigDecimal[][] matrix2 = {
{new BigDecimal(1), new BigDecimal(3)},
{new BigDecimal(2), new BigDecimal(0)},
};
final BigDecimal[][] actual = add(matrix1, matrix2).orElseThrow(() -> new AssertionError("Could not compute matrix!"));
final BigDecimal[][] expected = {
{new BigDecimal(4), new BigDecimal(5)},
{new BigDecimal(2), new BigDecimal(1)},
};
assertThat(actual, expected);
}
{
final BigDecimal[][] matrix1 = {
{new BigDecimal(1), new BigDecimal(4)},
{new BigDecimal(5), new BigDecimal(6)},
};
final BigDecimal[][] matrix2 = {
{new BigDecimal(2), new BigDecimal(0)},
{new BigDecimal(-2), new BigDecimal(-3)},
};
final BigDecimal[][] actual = subtract(matrix1, matrix2).orElseThrow(() -> new AssertionError("Could not compute matrix!"));
final BigDecimal[][] expected = {
{new BigDecimal(-1), new BigDecimal(4)},
{new BigDecimal(7), new BigDecimal(9)},
};
assertThat(actual, expected);
}
{
final BigDecimal[][] matrix1 = {
{new BigDecimal(1), new BigDecimal(2), new BigDecimal(3)},
{new BigDecimal(4), new BigDecimal(5), new BigDecimal(6)},
{new BigDecimal(7), new BigDecimal(8), new BigDecimal(9)},
};
final BigDecimal[][] matrix2 = {
{new BigDecimal(1), new BigDecimal(2)},
{new BigDecimal(3), new BigDecimal(4)},
{new BigDecimal(5), new BigDecimal(6)},
};
final BigDecimal[][] actual = multiply(matrix1, matrix2).orElseThrow(() -> new AssertionError("Could not compute matrix!"));
final BigDecimal[][] expected = {
{new BigDecimal(22), new BigDecimal(28)},
{new BigDecimal(49), new BigDecimal(64)},
{new BigDecimal(76), new BigDecimal(100)},
};
assertThat(actual, expected);
}
}
}
| TheAlgorithms/Java | src/main/java/com/thealgorithms/maths/MatrixUtil.java |
472 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.indices;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader.CacheHelper;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util.CollectionUtil;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ResolvedIndices;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.action.admin.indices.mapping.put.TransportAutoPutMappingAction;
import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction;
import org.elasticsearch.action.admin.indices.stats.CommonStats;
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag;
import org.elasticsearch.action.admin.indices.stats.IndexShardStats;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.action.support.RefCountAwareThreadedActionListener;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.client.internal.Client;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.DataStream;
import org.elasticsearch.cluster.metadata.IndexAbstraction;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.CheckedBiConsumer;
import org.elasticsearch.common.CheckedSupplier;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.Maps;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.core.AbstractRefCounted;
import org.elasticsearch.core.CheckedConsumer;
import org.elasticsearch.core.CheckedFunction;
import org.elasticsearch.core.IOUtils;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.Releasable;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.env.ShardLockObtainFailedException;
import org.elasticsearch.features.FeatureService;
import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.gateway.MetaStateService;
import org.elasticsearch.gateway.MetadataStateFormat;
import org.elasticsearch.index.CloseUtils;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexMode;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.SlowLogFieldProvider;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.bulk.stats.BulkStats;
import org.elasticsearch.index.cache.request.ShardRequestCache;
import org.elasticsearch.index.engine.CommitStats;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.InternalEngineFactory;
import org.elasticsearch.index.engine.NoOpEngine;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.MapperMetrics;
import org.elasticsearch.index.mapper.MapperRegistry;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MappingLookup;
import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.CoordinatorRewriteContextProvider;
import org.elasticsearch.index.query.DataRewriteContext;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryRewriteContext;
import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.refresh.RefreshStats;
import org.elasticsearch.index.search.stats.SearchStats;
import org.elasticsearch.index.seqno.RetentionLeaseStats;
import org.elasticsearch.index.seqno.RetentionLeaseSyncer;
import org.elasticsearch.index.seqno.SeqNoStats;
import org.elasticsearch.index.shard.GlobalCheckpointSyncer;
import org.elasticsearch.index.shard.IllegalIndexShardStateException;
import org.elasticsearch.index.shard.IndexEventListener;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.IndexingOperationListener;
import org.elasticsearch.index.shard.IndexingStats;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.indices.store.CompositeIndexFoldersDeletionListener;
import org.elasticsearch.node.Node;
import org.elasticsearch.plugins.FieldPredicate;
import org.elasticsearch.plugins.IndexStorePlugin;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.query.QueryPhase;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xcontent.XContentParser;
import org.elasticsearch.xcontent.XContentParserConfiguration;
import org.elasticsearch.xcontent.XContentType;
import java.io.Closeable;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.LongSupplier;
import java.util.stream.Collectors;
import static java.util.Collections.emptyList;
import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList;
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
import static org.elasticsearch.core.Strings.format;
import static org.elasticsearch.index.IndexService.IndexCreationContext.CREATE_INDEX;
import static org.elasticsearch.index.IndexService.IndexCreationContext.METADATA_VERIFICATION;
import static org.elasticsearch.index.query.AbstractQueryBuilder.parseTopLevelQuery;
import static org.elasticsearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES;
public class IndicesService extends AbstractLifecycleComponent
implements
IndicesClusterStateService.AllocatedIndices<IndexShard, IndexService>,
IndexService.ShardStoreDeleter {
private static final Logger logger = LogManager.getLogger(IndicesService.class);
public static final Setting<TimeValue> INDICES_CACHE_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting(
"indices.cache.cleanup_interval",
TimeValue.timeValueMinutes(1),
Property.NodeScope
);
public static final Setting<Boolean> INDICES_ID_FIELD_DATA_ENABLED_SETTING = Setting.boolSetting(
"indices.id_field_data.enabled",
false,
Property.Dynamic,
Property.NodeScope
);
public static final Setting<Boolean> WRITE_DANGLING_INDICES_INFO_SETTING = Setting.boolSetting(
"gateway.write_dangling_indices_info",
true,
Setting.Property.NodeScope
);
static final NodeFeature SUPPORTS_AUTO_PUT = new NodeFeature("indices.auto_put_supported");
/**
* The node's settings.
*/
private final Settings settings;
private final PluginsService pluginsService;
private final NodeEnvironment nodeEnv;
private final XContentParserConfiguration parserConfig;
private final AnalysisRegistry analysisRegistry;
private final IndexNameExpressionResolver indexNameExpressionResolver;
private final IndexScopedSettings indexScopedSettings;
private final IndicesFieldDataCache indicesFieldDataCache;
private final CacheCleaner cacheCleaner;
private final ThreadPool threadPool;
private final CircuitBreakerService circuitBreakerService;
private final BigArrays bigArrays;
private final ScriptService scriptService;
private final ClusterService clusterService;
private final Client client;
private final FeatureService featureService;
private volatile Map<String, IndexService> indices = Map.of();
private final Map<Index, List<PendingDelete>> pendingDeletes = new HashMap<>();
private final AtomicInteger numUncompletedDeletes = new AtomicInteger();
private final OldShardsStats oldShardsStats = new OldShardsStats();
private final MapperRegistry mapperRegistry;
private final NamedWriteableRegistry namedWriteableRegistry;
private final Map<String, IndexStorePlugin.SnapshotCommitSupplier> snapshotCommitSuppliers;
private final IndexingMemoryController indexingMemoryController;
private final TimeValue cleanInterval;
final IndicesRequestCache indicesRequestCache; // pkg-private for testing
private final IndicesQueryCache indicesQueryCache;
private final MetaStateService metaStateService;
private final Collection<Function<IndexSettings, Optional<EngineFactory>>> engineFactoryProviders;
private final Map<String, IndexStorePlugin.DirectoryFactory> directoryFactories;
private final Map<String, IndexStorePlugin.RecoveryStateFactory> recoveryStateFactories;
private final IndexStorePlugin.IndexFoldersDeletionListener indexFoldersDeletionListeners;
final AbstractRefCounted indicesRefCount; // pkg-private for testing
private final CountDownLatch closeLatch = new CountDownLatch(1);
private volatile boolean idFieldDataEnabled;
private volatile boolean allowExpensiveQueries;
private final Function<IndexMode, IdFieldMapper> idFieldMappers;
@Nullable
private final EsThreadPoolExecutor danglingIndicesThreadPoolExecutor;
private final Set<Index> danglingIndicesToWrite = ConcurrentCollections.newConcurrentSet();
private final boolean nodeWriteDanglingIndicesInfo;
private final ValuesSourceRegistry valuesSourceRegistry;
private final TimestampFieldMapperService timestampFieldMapperService;
private final CheckedBiConsumer<ShardSearchRequest, StreamOutput, IOException> requestCacheKeyDifferentiator;
private final MapperMetrics mapperMetrics;
@Override
protected void doStart() {
// Start thread that will manage cleaning the field data cache periodically
threadPool.schedule(this.cacheCleaner, this.cleanInterval, EsExecutors.DIRECT_EXECUTOR_SERVICE);
// Start watching for timestamp fields
clusterService.addStateApplier(timestampFieldMapperService);
}
@SuppressWarnings("this-escape")
IndicesService(IndicesServiceBuilder builder) {
this.settings = builder.settings;
this.threadPool = builder.threadPool;
this.pluginsService = builder.pluginsService;
this.nodeEnv = builder.nodeEnv;
this.parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE)
.withRegistry(builder.xContentRegistry);
this.valuesSourceRegistry = builder.valuesSourceRegistry;
this.analysisRegistry = builder.analysisRegistry;
this.indexNameExpressionResolver = builder.indexNameExpressionResolver;
this.indicesRequestCache = new IndicesRequestCache(settings);
this.indicesQueryCache = new IndicesQueryCache(settings);
this.mapperRegistry = builder.mapperRegistry;
this.namedWriteableRegistry = builder.namedWriteableRegistry;
indexingMemoryController = new IndexingMemoryController(
settings,
threadPool,
// ensure we pull an iter with new shards - flatten makes a copy
() -> Iterables.flatten(this).iterator()
);
this.indexScopedSettings = builder.indexScopedSettings;
this.circuitBreakerService = builder.circuitBreakerService;
this.bigArrays = builder.bigArrays;
this.scriptService = builder.scriptService;
this.clusterService = builder.clusterService;
this.client = builder.client;
this.featureService = builder.featureService;
this.idFieldDataEnabled = INDICES_ID_FIELD_DATA_ENABLED_SETTING.get(clusterService.getSettings());
clusterService.getClusterSettings().addSettingsUpdateConsumer(INDICES_ID_FIELD_DATA_ENABLED_SETTING, this::setIdFieldDataEnabled);
this.indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() {
@Override
public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) {
assert sizeInBytes >= 0
: "When reducing circuit breaker, it should be adjusted with a number higher or "
+ "equal to 0 and not ["
+ sizeInBytes
+ "]";
circuitBreakerService.getBreaker(CircuitBreaker.FIELDDATA).addWithoutBreaking(-sizeInBytes);
}
});
this.cleanInterval = INDICES_CACHE_CLEAN_INTERVAL_SETTING.get(settings);
this.cacheCleaner = new CacheCleaner(indicesFieldDataCache, indicesRequestCache, threadPool, this.cleanInterval);
this.metaStateService = builder.metaStateService;
this.engineFactoryProviders = builder.engineFactoryProviders;
// do not allow any plugin-provided index store type to conflict with a built-in type
for (final String indexStoreType : builder.directoryFactories.keySet()) {
if (IndexModule.isBuiltinType(indexStoreType)) {
throw new IllegalStateException("registered index store type [" + indexStoreType + "] conflicts with a built-in type");
}
}
this.directoryFactories = builder.directoryFactories;
this.recoveryStateFactories = builder.recoveryStateFactories;
this.indexFoldersDeletionListeners = new CompositeIndexFoldersDeletionListener(builder.indexFoldersDeletionListeners);
this.snapshotCommitSuppliers = builder.snapshotCommitSuppliers;
this.requestCacheKeyDifferentiator = builder.requestCacheKeyDifferentiator;
this.mapperMetrics = builder.mapperMetrics;
// doClose() is called when shutting down a node, yet there might still be ongoing requests
// that we need to wait for before closing some resources such as the caches. In order to
// avoid closing these resources while ongoing requests are still being processed, we use a
// ref count which will only close them when both this service and all index services are
// actually closed
indicesRefCount = AbstractRefCounted.of(() -> {
try {
IOUtils.close(
analysisRegistry,
indexingMemoryController,
indicesFieldDataCache,
cacheCleaner,
indicesRequestCache,
indicesQueryCache
);
} catch (IOException e) {
throw new UncheckedIOException(e);
} finally {
closeLatch.countDown();
}
});
Map<IndexMode, IdFieldMapper> idFieldMappers = new EnumMap<>(IndexMode.class);
for (IndexMode mode : IndexMode.values()) {
idFieldMappers.put(mode, mode.buildIdFieldMapper(() -> idFieldDataEnabled));
}
this.idFieldMappers = idFieldMappers::get;
final String nodeName = Objects.requireNonNull(Node.NODE_NAME_SETTING.get(settings));
nodeWriteDanglingIndicesInfo = WRITE_DANGLING_INDICES_INFO_SETTING.get(settings);
danglingIndicesThreadPoolExecutor = nodeWriteDanglingIndicesInfo
? EsExecutors.newScaling(
nodeName + "/" + DANGLING_INDICES_UPDATE_THREAD_NAME,
1,
1,
0,
TimeUnit.MILLISECONDS,
true,
daemonThreadFactory(nodeName, DANGLING_INDICES_UPDATE_THREAD_NAME),
threadPool.getThreadContext()
)
: null;
this.allowExpensiveQueries = ALLOW_EXPENSIVE_QUERIES.get(clusterService.getSettings());
clusterService.getClusterSettings().addSettingsUpdateConsumer(ALLOW_EXPENSIVE_QUERIES, this::setAllowExpensiveQueries);
this.timestampFieldMapperService = new TimestampFieldMapperService(settings, threadPool, this);
}
private static final String DANGLING_INDICES_UPDATE_THREAD_NAME = "DanglingIndices#updateTask";
public ClusterService clusterService() {
return clusterService;
}
@Override
protected void doStop() {
clusterService.removeApplier(timestampFieldMapperService);
timestampFieldMapperService.doStop();
ThreadPool.terminate(danglingIndicesThreadPoolExecutor, 10, TimeUnit.SECONDS);
ExecutorService indicesStopExecutor = Executors.newFixedThreadPool(5, daemonThreadFactory(settings, "indices_shutdown"));
// Copy indices because we modify it asynchronously in the body of the loop
final Set<Index> indices = this.indices.values().stream().map(s -> s.index()).collect(Collectors.toSet());
final CountDownLatch latch = new CountDownLatch(indices.size());
for (final Index index : indices) {
indicesStopExecutor.execute(
() -> ActionListener.run(
ActionListener.assertOnce(ActionListener.<Void>releasing(latch::countDown)),
l -> removeIndex(
index,
IndexRemovalReason.SHUTDOWN,
"shutdown",
EsExecutors.DIRECT_EXECUTOR_SERVICE /* node shutdown can be blocking */,
l
)
)
);
}
try {
latch.await();
} catch (InterruptedException e) {
// continue with shutdown
Thread.currentThread().interrupt();
} finally {
indicesStopExecutor.shutdown();
}
}
@Override
protected void doClose() throws IOException {
indicesRefCount.decRef();
}
/**
* Wait for this {@link IndicesService} to be effectively closed. When this returns {@code true}, all shards and shard stores
* are closed and all shard {@link CacheHelper#addClosedListener(org.apache.lucene.index.IndexReader.ClosedListener) closed
* listeners} have run. However some {@link IndexEventListener#onStoreClosed(ShardId) shard closed listeners} might not have
* run.
* @return true if all shards closed within the given timeout, false otherwise
* @throws InterruptedException if the current thread got interrupted while waiting for shards to close
*/
public boolean awaitClose(long timeout, TimeUnit timeUnit) throws InterruptedException {
return closeLatch.await(timeout, timeUnit);
}
public NodeIndicesStats stats(CommonStatsFlags flags, boolean includeShardsStats) {
CommonStats commonStats = new CommonStats(flags);
// the cumulative statistics also account for shards that are no longer on this node, which is tracked by oldShardsStats
for (Flag flag : flags.getFlags()) {
switch (flag) {
case Get -> commonStats.get.add(oldShardsStats.getStats);
case Indexing -> commonStats.indexing.add(oldShardsStats.indexingStats);
case Search -> commonStats.search.add(oldShardsStats.searchStats);
case Merge -> commonStats.merge.add(oldShardsStats.mergeStats);
case Refresh -> commonStats.refresh.add(oldShardsStats.refreshStats);
case Recovery -> commonStats.recoveryStats.add(oldShardsStats.recoveryStats);
case Flush -> commonStats.flush.add(oldShardsStats.flushStats);
case Bulk -> commonStats.bulk.add(oldShardsStats.bulkStats);
}
}
return new NodeIndicesStats(commonStats, statsByIndex(this, flags), statsByShard(this, flags), includeShardsStats);
}
static Map<Index, CommonStats> statsByIndex(final IndicesService indicesService, final CommonStatsFlags flags) {
// Currently only the Mappings flag is the only possible index-level flag.
if (flags.isSet(CommonStatsFlags.Flag.Mappings) == false) {
return Map.of();
}
final Map<Index, CommonStats> statsByIndex = Maps.newHashMapWithExpectedSize(indicesService.indices.size());
for (final IndexService indexService : indicesService) {
Index index = indexService.index();
CommonStats commonStats = new CommonStats(CommonStatsFlags.NONE);
commonStats.nodeMappings = indexService.getNodeMappingStats();
var existing = statsByIndex.putIfAbsent(index, commonStats);
assert existing == null;
}
return statsByIndex;
}
static Map<Index, List<IndexShardStats>> statsByShard(final IndicesService indicesService, final CommonStatsFlags flags) {
final Map<Index, List<IndexShardStats>> statsByShard = new HashMap<>();
for (final IndexService indexService : indicesService) {
for (final IndexShard indexShard : indexService) {
try {
final IndexShardStats indexShardStats = indicesService.indexShardStats(indicesService, indexShard, flags);
if (indexShardStats == null) {
continue;
}
if (statsByShard.containsKey(indexService.index()) == false) {
statsByShard.put(indexService.index(), arrayAsArrayList(indexShardStats));
} else {
statsByShard.get(indexService.index()).add(indexShardStats);
}
} catch (IllegalIndexShardStateException | AlreadyClosedException e) {
// we can safely ignore illegal state on ones that are closing for example
logger.trace(() -> format("%s ignoring shard stats", indexShard.shardId()), e);
}
}
}
return statsByShard;
}
IndexShardStats indexShardStats(final IndicesService indicesService, final IndexShard indexShard, final CommonStatsFlags flags) {
if (indexShard.routingEntry() == null) {
return null;
}
CommitStats commitStats;
SeqNoStats seqNoStats;
RetentionLeaseStats retentionLeaseStats;
try {
commitStats = indexShard.commitStats();
seqNoStats = indexShard.seqNoStats();
retentionLeaseStats = indexShard.getRetentionLeaseStats();
} catch (AlreadyClosedException e) {
// shard is closed - no stats is fine
commitStats = null;
seqNoStats = null;
retentionLeaseStats = null;
}
return new IndexShardStats(
indexShard.shardId(),
new ShardStats[] {
new ShardStats(
indexShard.routingEntry(),
indexShard.shardPath(),
CommonStats.getShardLevelStats(indicesService.getIndicesQueryCache(), indexShard, flags),
commitStats,
seqNoStats,
retentionLeaseStats,
indexShard.isSearchIdle(),
indexShard.searchIdleTime()
) }
);
}
/**
* Checks if changes (adding / removing) indices, shards and so on are allowed.
*
* @throws IllegalStateException if no changes allowed.
*/
private void ensureChangesAllowed() {
if (lifecycle.started() == false) {
throw new IllegalStateException("Can't make changes to indices service, node is closed");
}
}
@Override
public Iterator<IndexService> iterator() {
return indices.values().iterator();
}
public boolean hasIndex(Index index) {
return indices.containsKey(index.getUUID());
}
/**
* Returns an IndexService for the specified index if exists otherwise returns <code>null</code>.
*/
@Override
@Nullable
public IndexService indexService(Index index) {
return indices.get(index.getUUID());
}
/**
* Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown.
*/
public IndexService indexServiceSafe(Index index) {
IndexService indexService = indices.get(index.getUUID());
if (indexService == null) {
throw new IndexNotFoundException(index);
}
assert indexService.indexUUID().equals(index.getUUID())
: "uuid mismatch local: " + indexService.indexUUID() + " incoming: " + index.getUUID();
return indexService;
}
/**
* Creates a new {@link IndexService} for the given metadata.
*
* @param indexMetadata the index metadata to create the index for
* @param builtInListeners a list of built-in lifecycle {@link IndexEventListener} that should be used alongside with the
* per-index listeners
* @throws ResourceAlreadyExistsException if the index already exists.
*/
@Override
public synchronized IndexService createIndex(
final IndexMetadata indexMetadata,
final List<IndexEventListener> builtInListeners,
final boolean writeDanglingIndices
) throws IOException {
ensureChangesAllowed();
if (indexMetadata.getIndexUUID().equals(IndexMetadata.INDEX_UUID_NA_VALUE)) {
throw new IllegalArgumentException("index must have a real UUID found value: [" + indexMetadata.getIndexUUID() + "]");
}
final Index index = indexMetadata.getIndex();
if (hasIndex(index)) {
throw new ResourceAlreadyExistsException(index);
}
List<IndexEventListener> finalListeners = new ArrayList<>(builtInListeners);
final IndexEventListener onStoreClose = new IndexEventListener() {
@Override
public void onStoreCreated(ShardId shardId) {
indicesRefCount.incRef();
}
@Override
public void onStoreClosed(ShardId shardId) {
try {
indicesQueryCache.onClose(shardId);
} finally {
indicesRefCount.decRef();
}
}
};
final IndexEventListener beforeIndexShardRecovery = new IndexEventListener() {
volatile boolean reloaded;
@Override
public void beforeIndexShardRecovery(IndexShard indexShard, IndexSettings indexSettings, ActionListener<Void> listener) {
try {
if (indexShard.mapperService() != null) {
// we need to reload once, not on every shard recovery in case multiple shards are on the same node
if (reloaded == false) {
synchronized (indexShard.mapperService()) {
if (reloaded == false) {
// we finish loading analyzers from resources here
// during shard recovery in the generic thread pool,
// as this may require longer running operations and blocking calls
indexShard.mapperService().reloadSearchAnalyzers(getAnalysis(), null, false);
}
reloaded = true;
}
}
}
listener.onResponse(null);
} catch (Exception e) {
listener.onFailure(e);
}
}
};
finalListeners.add(onStoreClose);
finalListeners.add(oldShardsStats);
finalListeners.add(beforeIndexShardRecovery);
IndexService indexService;
try (var ignored = threadPool.getThreadContext().newStoredContext()) {
indexService = createIndexService(
CREATE_INDEX,
indexMetadata,
indicesQueryCache,
indicesFieldDataCache,
finalListeners,
indexingMemoryController
);
}
boolean success = false;
try {
if (writeDanglingIndices && nodeWriteDanglingIndicesInfo) {
indexService.addMetadataListener(imd -> updateDanglingIndicesInfo(index));
}
indexService.getIndexEventListener().afterIndexCreated(indexService);
indices = Maps.copyMapWithAddedEntry(indices, index.getUUID(), indexService);
if (writeDanglingIndices) {
if (nodeWriteDanglingIndicesInfo) {
updateDanglingIndicesInfo(index);
} else {
indexService.deleteDanglingIndicesInfo();
}
}
success = true;
return indexService;
} finally {
if (success == false) {
CloseUtils.executeDirectly(l -> indexService.close("plugins_failed", true, CloseUtils.NO_SHARDS_CREATED_EXECUTOR, l));
}
}
}
public <T, E extends Exception> T withTempIndexService(
final IndexMetadata indexMetadata,
CheckedFunction<IndexService, T, E> indexServiceConsumer
) throws IOException, E {
final Index index = indexMetadata.getIndex();
if (hasIndex(index)) {
throw new ResourceAlreadyExistsException(index);
}
List<IndexEventListener> finalListeners = List.of(
// double check that shard is not created.
new IndexEventListener() {
@Override
public void beforeIndexShardCreated(ShardRouting shardRouting, Settings indexSettings) {
assert false : "temp index should not trigger shard creation";
throw new ElasticsearchException("temp index should not trigger shard creation [{}]", index);
}
@Override
public void onStoreCreated(ShardId shardId) {
assert false : "temp index should not trigger store creation";
throw new ElasticsearchException("temp index should not trigger store creation [{}]", index);
}
}
);
final IndexService indexService = createIndexService(
CREATE_INDEX,
indexMetadata,
indicesQueryCache,
indicesFieldDataCache,
finalListeners,
indexingMemoryController
);
try (
Closeable ignored = () -> CloseUtils.executeDirectly(
l -> indexService.close("temp", false, CloseUtils.NO_SHARDS_CREATED_EXECUTOR, l)
)
) {
return indexServiceConsumer.apply(indexService);
}
}
/**
* This creates a new IndexService without registering it
*/
private synchronized IndexService createIndexService(
IndexService.IndexCreationContext indexCreationContext,
IndexMetadata indexMetadata,
IndicesQueryCache indicesQueryCache,
IndicesFieldDataCache indicesFieldDataCache,
List<IndexEventListener> builtInListeners,
IndexingOperationListener... indexingOperationListeners
) throws IOException {
final IndexSettings idxSettings = new IndexSettings(indexMetadata, settings, indexScopedSettings);
// we ignore private settings since they are not registered settings
indexScopedSettings.validate(indexMetadata.getSettings(), true, true, true);
logger.debug(
"creating Index [{}], shards [{}]/[{}] - reason [{}]",
indexMetadata.getIndex(),
idxSettings.getNumberOfShards(),
idxSettings.getNumberOfReplicas(),
indexCreationContext
);
final IndexModule indexModule = new IndexModule(
idxSettings,
analysisRegistry,
getEngineFactory(idxSettings),
directoryFactories,
() -> allowExpensiveQueries,
indexNameExpressionResolver,
recoveryStateFactories,
loadSlowLogFieldProvider(),
mapperMetrics
);
for (IndexingOperationListener operationListener : indexingOperationListeners) {
indexModule.addIndexOperationListener(operationListener);
}
pluginsService.forEach(p -> p.onIndexModule(indexModule));
for (IndexEventListener listener : builtInListeners) {
indexModule.addIndexEventListener(listener);
}
return indexModule.newIndexService(
indexCreationContext,
nodeEnv,
parserConfig,
this,
circuitBreakerService,
bigArrays,
threadPool,
scriptService,
clusterService,
client,
indicesQueryCache,
mapperRegistry,
indicesFieldDataCache,
namedWriteableRegistry,
idFieldMappers.apply(idxSettings.getMode()),
valuesSourceRegistry,
indexFoldersDeletionListeners,
snapshotCommitSuppliers
);
}
private EngineFactory getEngineFactory(final IndexSettings idxSettings) {
final IndexMetadata indexMetadata = idxSettings.getIndexMetadata();
if (indexMetadata != null && indexMetadata.getState() == IndexMetadata.State.CLOSE) {
// NoOpEngine takes precedence as long as the index is closed
return NoOpEngine::new;
}
final List<Optional<EngineFactory>> engineFactories = engineFactoryProviders.stream()
.map(engineFactoryProvider -> engineFactoryProvider.apply(idxSettings))
.filter(maybe -> Objects.requireNonNull(maybe).isPresent())
.toList();
if (engineFactories.isEmpty()) {
return new InternalEngineFactory();
} else if (engineFactories.size() == 1) {
assert engineFactories.get(0).isPresent();
return engineFactories.get(0).get();
} else {
final String message = String.format(
Locale.ROOT,
"multiple engine factories provided for %s: %s",
idxSettings.getIndex(),
engineFactories.stream().map(t -> {
assert t.isPresent();
return "[" + t.get().getClass().getName() + "]";
}).collect(Collectors.joining(","))
);
throw new IllegalStateException(message);
}
}
/**
* creates a new mapper service for the given index, in order to do administrative work like mapping updates.
* This *should not* be used for document parsing. Doing so will result in an exception.
*
* Note: the returned {@link MapperService} should be closed when unneeded.
*/
public synchronized MapperService createIndexMapperServiceForValidation(IndexMetadata indexMetadata) throws IOException {
final IndexSettings idxSettings = new IndexSettings(indexMetadata, this.settings, indexScopedSettings);
final IndexModule indexModule = new IndexModule(
idxSettings,
analysisRegistry,
getEngineFactory(idxSettings),
directoryFactories,
() -> allowExpensiveQueries,
indexNameExpressionResolver,
recoveryStateFactories,
loadSlowLogFieldProvider(),
mapperMetrics
);
pluginsService.forEach(p -> p.onIndexModule(indexModule));
return indexModule.newIndexMapperService(clusterService, parserConfig, mapperRegistry, scriptService);
}
/**
* This method verifies that the given {@code metadata} holds sane values to create an {@link IndexService}.
* This method tries to update the meta data of the created {@link IndexService} if the given {@code metadataUpdate}
* is different from the given {@code metadata}.
* This method will throw an exception if the creation or the update fails.
* The created {@link IndexService} will not be registered and will be closed immediately.
*/
public synchronized void verifyIndexMetadata(IndexMetadata metadata, IndexMetadata metadataUpdate) throws IOException {
final List<Closeable> closeables = new ArrayList<>();
try {
IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() {
});
closeables.add(indicesFieldDataCache);
IndicesQueryCache indicesQueryCache = new IndicesQueryCache(settings);
closeables.add(indicesQueryCache);
// this will also fail if some plugin fails etc. which is nice since we can verify that early
final IndexService service = createIndexService(
METADATA_VERIFICATION,
metadata,
indicesQueryCache,
indicesFieldDataCache,
emptyList()
);
closeables.add(
() -> CloseUtils.executeDirectly(
l -> service.close("metadata verification", false, CloseUtils.NO_SHARDS_CREATED_EXECUTOR, l)
)
);
service.mapperService().merge(metadata, MapperService.MergeReason.MAPPING_RECOVERY);
if (metadata.equals(metadataUpdate) == false) {
service.updateMetadata(metadata, metadataUpdate);
}
} finally {
IOUtils.close(closeables);
}
}
@Override
public void createShard(
final ShardRouting shardRouting,
final PeerRecoveryTargetService recoveryTargetService,
final PeerRecoveryTargetService.RecoveryListener recoveryListener,
final RepositoriesService repositoriesService,
final Consumer<IndexShard.ShardFailure> onShardFailure,
final GlobalCheckpointSyncer globalCheckpointSyncer,
final RetentionLeaseSyncer retentionLeaseSyncer,
final DiscoveryNode targetNode,
final DiscoveryNode sourceNode,
long clusterStateVersion
) throws IOException {
Objects.requireNonNull(retentionLeaseSyncer);
ensureChangesAllowed();
IndexService indexService = indexService(shardRouting.index());
assert indexService != null;
RecoveryState recoveryState = indexService.createRecoveryState(shardRouting, targetNode, sourceNode);
IndexShard indexShard = indexService.createShard(shardRouting, globalCheckpointSyncer, retentionLeaseSyncer);
indexShard.addShardFailureCallback(onShardFailure);
indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService, (mapping, listener) -> {
assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS
: "mapping update consumer only required by local shards recovery";
AcknowledgedRequest<PutMappingRequest> putMappingRequestAcknowledgedRequest = new PutMappingRequest().setConcreteIndex(
shardRouting.index()
)
.setConcreteIndex(shardRouting.index()) // concrete index - no name clash, it uses uuid
.source(mapping.source().string(), XContentType.JSON);
// concrete index - no name clash, it uses uuid
client.execute(
featureService.clusterHasFeature(clusterService.state(), SUPPORTS_AUTO_PUT)
? TransportAutoPutMappingAction.TYPE
: TransportPutMappingAction.TYPE,
putMappingRequestAcknowledgedRequest.ackTimeout(TimeValue.MAX_VALUE).masterNodeTimeout(TimeValue.MAX_VALUE),
new RefCountAwareThreadedActionListener<>(threadPool.generic(), listener.map(ignored -> null))
);
}, this, clusterStateVersion);
}
@Override
public void removeIndex(
final Index index,
final IndexRemovalReason reason,
final String extraInfo,
Executor shardCloseExecutor,
ActionListener<Void> shardsClosedListener
) {
final String indexName = index.getName();
ActionListener.run(ActionListener.assertOnce(shardsClosedListener.delegateResponse((l, e) -> {
logger.warn(() -> format("failed to remove index %s ([%s][%s])", index, reason, extraInfo), e);
l.onResponse(null);
})), l -> {
final IndexService indexService;
final IndexEventListener listener;
synchronized (this) {
if (hasIndex(index)) {
logger.debug("[{}] closing ... (reason [{}])", indexName, reason);
indexService = indices.get(index.getUUID());
assert indexService != null : "IndexService is null for index: " + index;
indices = Maps.copyMapWithRemovedEntry(indices, index.getUUID());
listener = indexService.getIndexEventListener();
} else {
indexService = null;
listener = null;
}
}
assert (indexService == null) == (listener == null) : indexService + " vs " + listener;
if (indexService == null) {
l.onResponse(null);
return;
}
listener.beforeIndexRemoved(indexService, reason);
logger.debug("{} closing index service (reason [{}][{}])", index, reason, extraInfo);
indexService.close(extraInfo, reason == IndexRemovalReason.DELETED, shardCloseExecutor, ActionListener.runBefore(l, () -> {
logger.debug("{} closed... (reason [{}][{}])", index, reason, extraInfo);
final IndexSettings indexSettings = indexService.getIndexSettings();
listener.afterIndexRemoved(indexService.index(), indexSettings, reason);
if (reason == IndexRemovalReason.DELETED) {
// now we are done - try to wipe data on disk if possible
deleteIndexStore(extraInfo, indexService.index(), indexSettings);
}
}));
});
}
public IndicesFieldDataCache getIndicesFieldDataCache() {
return indicesFieldDataCache;
}
public CircuitBreakerService getCircuitBreakerService() {
return circuitBreakerService;
}
public IndicesQueryCache getIndicesQueryCache() {
return indicesQueryCache;
}
static class OldShardsStats implements IndexEventListener {
final SearchStats searchStats = new SearchStats();
final GetStats getStats = new GetStats();
final IndexingStats indexingStats = new IndexingStats();
final MergeStats mergeStats = new MergeStats();
final RefreshStats refreshStats = new RefreshStats();
final FlushStats flushStats = new FlushStats();
final RecoveryStats recoveryStats = new RecoveryStats();
final BulkStats bulkStats = new BulkStats();
@Override
public synchronized void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) {
if (indexShard != null) {
getStats.addTotals(indexShard.getStats());
indexingStats.addTotals(indexShard.indexingStats());
// if this index was closed or deleted, we should eliminate the effect of the current scroll for this shard
searchStats.addTotalsForClosingShard(indexShard.searchStats());
mergeStats.addTotals(indexShard.mergeStats());
refreshStats.addTotals(indexShard.refreshStats());
flushStats.addTotals(indexShard.flushStats());
recoveryStats.addTotals(indexShard.recoveryStats());
bulkStats.addTotals(indexShard.bulkStats());
}
}
@Override
public void afterIndexShardClosed(ShardId shardId, IndexShard indexShard, Settings indexSettings) {
}
}
/**
* Deletes an index that is not assigned to this node. This method cleans up all disk folders relating to the index
* but does not deal with in-memory structures. For those call {@link #removeIndex}
*/
@Override
public void deleteUnassignedIndex(String reason, IndexMetadata oldIndexMetadata, ClusterState clusterState) {
if (nodeEnv.hasNodeFile()) {
Index index = oldIndexMetadata.getIndex();
try {
if (clusterState.metadata().hasIndex(index)) {
final IndexMetadata currentMetadata = clusterState.metadata().index(index);
throw new IllegalStateException(
"Can't delete unassigned index store for ["
+ index.getName()
+ "] - it's still part "
+ "of the cluster state ["
+ currentMetadata.getIndexUUID()
+ "] ["
+ oldIndexMetadata.getIndexUUID()
+ "]"
);
}
deleteIndexStore(reason, oldIndexMetadata);
} catch (Exception e) {
logger.warn(() -> format("[%s] failed to delete unassigned index (reason [%s])", oldIndexMetadata.getIndex(), reason), e);
}
}
}
/**
* Deletes the index store trying to acquire all shards locks for this index.
* This method will delete the metadata for the index even if the actual shards can't be locked.
*
* Package private for testing
*/
void deleteIndexStore(String reason, IndexMetadata metadata) throws IOException {
if (nodeEnv.hasNodeFile()) {
synchronized (this) {
Index index = metadata.getIndex();
if (hasIndex(index)) {
String localUUid = indexService(index).indexUUID();
throw new IllegalStateException(
"Can't delete index store for ["
+ index.getName()
+ "] - it's still part of the indices service ["
+ localUUid
+ "] ["
+ metadata.getIndexUUID()
+ "]"
);
}
}
final IndexSettings indexSettings = buildIndexSettings(metadata);
deleteIndexStore(reason, indexSettings.getIndex(), indexSettings);
}
}
private void deleteIndexStore(String reason, Index index, IndexSettings indexSettings) throws IOException {
deleteIndexStoreIfDeletionAllowed(reason, index, indexSettings, DEFAULT_INDEX_DELETION_PREDICATE);
}
private void deleteIndexStoreIfDeletionAllowed(
final String reason,
final Index index,
final IndexSettings indexSettings,
final IndexDeletionAllowedPredicate predicate
) throws IOException {
boolean success = false;
try {
// we are trying to delete the index store here - not a big deal if the lock can't be obtained
// the store metadata gets wiped anyway even without the lock this is just best effort since
// every shards deletes its content under the shard lock it owns.
logger.debug("{} deleting index store reason [{}]", index, reason);
if (predicate.apply(index, indexSettings)) {
// its safe to delete all index metadata and shard data
nodeEnv.deleteIndexDirectorySafe(
index,
0,
indexSettings,
paths -> indexFoldersDeletionListeners.beforeIndexFoldersDeleted(index, indexSettings, paths)
);
}
success = true;
} catch (ShardLockObtainFailedException ex) {
logger.debug(() -> format("%s failed to delete index store - at least one shards is still locked", index), ex);
} catch (Exception ex) {
logger.warn(() -> format("%s failed to delete index", index), ex);
} finally {
if (success == false) {
addPendingDelete(index, indexSettings);
}
// this is a pure protection to make sure this index doesn't get re-imported as a dangling index.
// we should in the future rather write a tombstone rather than wiping the metadata.
MetadataStateFormat.deleteMetaState(nodeEnv.indexPaths(index));
}
}
/**
* Deletes the shard with an already acquired shard lock.
* @param reason the reason for the shard deletion
* @param lock the lock of the shard to delete
* @param indexSettings the shards index settings.
* @throws IOException if an IOException occurs
*/
@Override
public void deleteShardStore(String reason, ShardLock lock, IndexSettings indexSettings) throws IOException {
ShardId shardId = lock.getShardId();
logger.trace("{} deleting shard reason [{}]", shardId, reason);
nodeEnv.deleteShardDirectoryUnderLock(
lock,
indexSettings,
paths -> indexFoldersDeletionListeners.beforeShardFoldersDeleted(shardId, indexSettings, paths)
);
}
/**
* This method deletes the shard contents on disk for the given shard ID. This method will fail if the shard deleting
* is prevented by {@link #canDeleteShardContent(ShardId, IndexSettings)}
* of if the shards lock can not be acquired.
*
* On data nodes, if the deleted shard is the last shard folder in its index, the method will attempt to remove
* the index folder as well.
*
* @param reason the reason for the shard deletion
* @param shardId the shards ID to delete
* @param clusterState . This is required to access the indexes settings etc.
* @throws IOException if an IOException occurs
*/
public void deleteShardStore(String reason, ShardId shardId, ClusterState clusterState) throws IOException,
ShardLockObtainFailedException {
final IndexMetadata metadata = clusterState.getMetadata().indices().get(shardId.getIndexName());
final IndexSettings indexSettings = buildIndexSettings(metadata);
ShardDeletionCheckResult shardDeletionCheckResult = canDeleteShardContent(shardId, indexSettings);
if (shardDeletionCheckResult != ShardDeletionCheckResult.FOLDER_FOUND_CAN_DELETE) {
throw new IllegalStateException("Can't delete shard " + shardId + " (cause: " + shardDeletionCheckResult + ")");
}
nodeEnv.deleteShardDirectorySafe(
shardId,
indexSettings,
paths -> indexFoldersDeletionListeners.beforeShardFoldersDeleted(shardId, indexSettings, paths)
);
logger.debug("{} deleted shard reason [{}]", shardId, reason);
if (canDeleteIndexContents(shardId.getIndex())) {
if (nodeEnv.findAllShardIds(shardId.getIndex()).isEmpty()) {
try {
// note that deleteIndexStore have more safety checks and may throw an exception if index was concurrently created.
deleteIndexStore("no longer used", metadata);
} catch (Exception e) {
// wrap the exception to indicate we already deleted the shard
throw new ElasticsearchException("failed to delete unused index after deleting its last shard (" + shardId + ")", e);
}
} else {
logger.trace("[{}] still has shard stores, leaving as is", shardId.getIndex());
}
}
}
/**
* This method returns true if the current node is allowed to delete the given index.
* This is the case if the index is deleted in the metadata or there is no allocation
* on the local node and the index isn't on a shared file system.
* @param index {@code Index} to check whether deletion is allowed
* @return true if the index can be deleted on this node
*/
public boolean canDeleteIndexContents(Index index) {
// index contents can be deleted if its an already closed index (so all its resources have
// already been relinquished)
final IndexService indexService = indexService(index);
return indexService == null && nodeEnv.hasNodeFile();
}
/**
* Verify that the contents on disk for the given index is deleted; if not, delete the contents.
* This method assumes that an index is already deleted in the cluster state and/or explicitly
* through index tombstones.
* @param index {@code Index} to make sure its deleted from disk
* @param clusterState {@code ClusterState} to ensure the index is not part of it
* @return IndexMetadata for the index loaded from disk
*/
@Override
@Nullable
public IndexMetadata verifyIndexIsDeleted(final Index index, final ClusterState clusterState) {
// this method should only be called when we know the index (name + uuid) is not part of the cluster state
if (clusterState.metadata().index(index) != null) {
throw new IllegalStateException("Cannot delete index [" + index + "], it is still part of the cluster state.");
}
if (nodeEnv.hasNodeFile() && FileSystemUtils.exists(nodeEnv.indexPaths(index))) {
final IndexMetadata metadata;
try {
metadata = metaStateService.loadIndexState(index);
if (metadata == null) {
return null;
}
} catch (Exception e) {
logger.warn(
() -> format("[%s] failed to load state file from a stale deleted index, " + "folders will be left on disk", index),
e
);
return null;
}
final IndexSettings indexSettings = buildIndexSettings(metadata);
try {
deleteIndexStoreIfDeletionAllowed("stale deleted index", index, indexSettings, ALWAYS_TRUE);
} catch (Exception e) {
// we just warn about the exception here because if deleteIndexStoreIfDeletionAllowed
// throws an exception, it gets added to the list of pending deletes to be tried again
logger.warn(() -> "[" + metadata.getIndex() + "] failed to delete index on disk", e);
}
return metadata;
}
return null;
}
/**
* result type returned by {@link #canDeleteShardContent signaling different reasons why a shard can / cannot be deleted}
*/
public enum ShardDeletionCheckResult {
FOLDER_FOUND_CAN_DELETE, // shard data exists and can be deleted
STILL_ALLOCATED, // the shard is still allocated / active on this node
NO_FOLDER_FOUND // the shards data locations do not exist
}
/**
* Returns <code>ShardDeletionCheckResult</code> signaling whether the shards content for the given shard can be deleted.
*
* @param shardId the shard to delete.
* @param indexSettings the shards's relevant {@link IndexSettings}. This is required to access the indexes settings etc.
*/
public ShardDeletionCheckResult canDeleteShardContent(ShardId shardId, IndexSettings indexSettings) {
assert shardId.getIndex().equals(indexSettings.getIndex());
final IndexService indexService = indexService(shardId.getIndex());
final boolean isAllocated = indexService != null && indexService.hasShard(shardId.id());
if (isAllocated) {
return ShardDeletionCheckResult.STILL_ALLOCATED; // we are allocated - can't delete the shard
} else if (indexSettings.hasCustomDataPath()) {
// lets see if it's on a custom path (return false if the shard doesn't exist)
// we don't need to delete anything that is not there
return Files.exists(nodeEnv.resolveCustomLocation(indexSettings.customDataPath(), shardId))
? ShardDeletionCheckResult.FOLDER_FOUND_CAN_DELETE
: ShardDeletionCheckResult.NO_FOLDER_FOUND;
} else {
// lets see if it's path is available (return false if the shard doesn't exist)
// we don't need to delete anything that is not there
return FileSystemUtils.exists(nodeEnv.availableShardPaths(shardId))
? ShardDeletionCheckResult.FOLDER_FOUND_CAN_DELETE
: ShardDeletionCheckResult.NO_FOLDER_FOUND;
}
}
private IndexSettings buildIndexSettings(IndexMetadata metadata) {
// play safe here and make sure that we take node level settings into account.
// we might run on nodes where we use shard FS and then in the future don't delete
// actual content.
return new IndexSettings(metadata, settings);
}
/**
* Adds a pending delete for the given index shard.
*/
@Override
public void addPendingDelete(ShardId shardId, IndexSettings settings) {
if (shardId == null) {
throw new IllegalArgumentException("shardId must not be null");
}
if (settings == null) {
throw new IllegalArgumentException("settings must not be null");
}
PendingDelete pendingDelete = new PendingDelete(shardId, settings);
addPendingDelete(shardId.getIndex(), pendingDelete);
}
/**
* Adds a pending delete for the given index.
*/
public void addPendingDelete(Index index, IndexSettings settings) {
PendingDelete pendingDelete = new PendingDelete(index, settings);
addPendingDelete(index, pendingDelete);
}
private void addPendingDelete(Index index, PendingDelete pendingDelete) {
synchronized (pendingDeletes) {
pendingDeletes.computeIfAbsent(index, k -> new ArrayList<>()).add(pendingDelete);
numUncompletedDeletes.incrementAndGet();
}
}
private static final class PendingDelete implements Comparable<PendingDelete> {
final Index index;
final int shardId;
final IndexSettings settings;
final boolean deleteIndex;
/**
* Creates a new pending delete of an index
*/
PendingDelete(ShardId shardId, IndexSettings settings) {
this.index = shardId.getIndex();
this.shardId = shardId.getId();
this.settings = settings;
this.deleteIndex = false;
}
/**
* Creates a new pending delete of a shard
*/
PendingDelete(Index index, IndexSettings settings) {
this.index = index;
this.shardId = -1;
this.settings = settings;
this.deleteIndex = true;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("[").append(index).append("]");
if (shardId != -1) {
sb.append("[").append(shardId).append("]");
}
return sb.toString();
}
@Override
public int compareTo(PendingDelete o) {
return Integer.compare(shardId, o.shardId);
}
}
/**
* Processes all pending deletes for the given index. This method will acquire all locks for the given index and will
* process all pending deletes for this index. Pending deletes might occur if the OS doesn't allow deletion of files because
* they are used by a different process ie. on Windows where files might still be open by a virus scanner. On a shared
* filesystem a replica might not have been closed when the primary is deleted causing problems on delete calls so we
* schedule there deletes later.
* @param index the index to process the pending deletes for
* @param timeout the timeout used for processing pending deletes
*/
@Override
public void processPendingDeletes(Index index, IndexSettings indexSettings, TimeValue timeout) throws IOException, InterruptedException,
ShardLockObtainFailedException {
logger.debug("{} processing pending deletes", index);
final long startTimeNS = System.nanoTime();
final List<ShardLock> shardLocks = nodeEnv.lockAllForIndex(index, indexSettings, "process pending deletes", timeout.millis());
int numRemoved = 0;
try {
Map<ShardId, ShardLock> locks = new HashMap<>();
for (ShardLock lock : shardLocks) {
locks.put(lock.getShardId(), lock);
}
final List<PendingDelete> remove;
synchronized (pendingDeletes) {
remove = pendingDeletes.remove(index);
}
if (remove != null && remove.isEmpty() == false) {
numRemoved = remove.size();
CollectionUtil.timSort(remove); // make sure we delete indices first
final long maxSleepTimeMs = 10 * 1000; // ensure we retry after 10 sec
long sleepTime = 10;
do {
if (remove.isEmpty()) {
break;
}
Iterator<PendingDelete> iterator = remove.iterator();
while (iterator.hasNext()) {
PendingDelete delete = iterator.next();
if (delete.deleteIndex) {
assert delete.shardId == -1;
logger.debug("{} deleting index store reason [{}]", index, "pending delete");
try {
nodeEnv.deleteIndexDirectoryUnderLock(
index,
indexSettings,
paths -> indexFoldersDeletionListeners.beforeIndexFoldersDeleted(index, indexSettings, paths)
);
iterator.remove();
} catch (IOException ex) {
logger.debug(() -> format("%s retry pending delete", index), ex);
}
} else {
assert delete.shardId != -1;
final ShardId shardId = new ShardId(delete.index, delete.shardId);
final ShardLock shardLock = locks.get(shardId);
if (shardLock != null) {
try {
deleteShardStore("pending delete", shardLock, delete.settings);
iterator.remove();
} catch (IOException ex) {
logger.debug(() -> format("%s retry pending delete", shardLock.getShardId()), ex);
}
} else {
logger.warn("{} no shard lock for pending delete", delete.shardId);
iterator.remove();
}
}
}
if (remove.isEmpty() == false) {
logger.warn("{} still pending deletes present for shards {} - retrying", index, remove.toString());
Thread.sleep(sleepTime);
sleepTime = Math.min(maxSleepTimeMs, sleepTime * 2); // increase the sleep time gradually
logger.debug("{} schedule pending delete retry after {} ms", index, sleepTime);
}
} while ((System.nanoTime() - startTimeNS) < timeout.nanos());
}
} finally {
IOUtils.close(shardLocks);
if (numRemoved > 0) {
int remainingUncompletedDeletes = numUncompletedDeletes.addAndGet(-numRemoved);
assert remainingUncompletedDeletes >= 0;
}
}
}
int numPendingDeletes(Index index) {
synchronized (pendingDeletes) {
List<PendingDelete> deleteList = pendingDeletes.get(index);
if (deleteList == null) {
return 0;
}
return deleteList.size();
}
}
// pkg-private for testing
SlowLogFieldProvider loadSlowLogFieldProvider() {
List<? extends SlowLogFieldProvider> slowLogFieldProviders = pluginsService.loadServiceProviders(SlowLogFieldProvider.class);
return new SlowLogFieldProvider() {
@Override
public void init(IndexSettings indexSettings) {
slowLogFieldProviders.forEach(provider -> provider.init(indexSettings));
}
@Override
public Map<String, String> indexSlowLogFields() {
return slowLogFieldProviders.stream()
.flatMap(provider -> provider.indexSlowLogFields().entrySet().stream())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
@Override
public Map<String, String> searchSlowLogFields() {
return slowLogFieldProviders.stream()
.flatMap(provider -> provider.searchSlowLogFields().entrySet().stream())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
};
}
/**
* Checks if all pending deletes have completed. Used by tests to ensure we don't check directory contents
* while deletion still ongoing. * The reason is that, on Windows, browsing the directory contents can interfere
* with the deletion process and delay it unnecessarily.
*/
public boolean hasUncompletedPendingDeletes() {
return numUncompletedDeletes.get() > 0;
}
public AnalysisRegistry getAnalysis() {
return analysisRegistry;
}
/**
* FieldDataCacheCleaner is a scheduled Runnable used to clean a Guava cache
* periodically. In this case it is the field data cache, because a cache that
* has an entry invalidated may not clean up the entry if it is not read from
* or written to after invalidation.
*/
private static final class CacheCleaner implements Runnable, Releasable {
private final IndicesFieldDataCache cache;
private final ThreadPool threadPool;
private final TimeValue interval;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final IndicesRequestCache requestCache;
CacheCleaner(IndicesFieldDataCache cache, IndicesRequestCache requestCache, ThreadPool threadPool, TimeValue interval) {
this.cache = cache;
this.requestCache = requestCache;
this.threadPool = threadPool;
this.interval = interval;
}
@Override
public void run() {
long startTimeNS = System.nanoTime();
if (logger.isTraceEnabled()) {
logger.trace("running periodic field data cache cleanup");
}
try {
this.cache.getCache().refresh();
} catch (Exception e) {
logger.warn("Exception during periodic field data cache cleanup:", e);
}
if (logger.isTraceEnabled()) {
logger.trace(
"periodic field data cache cleanup finished in {} milliseconds",
TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)
);
}
try {
this.requestCache.cleanCache();
} catch (Exception e) {
logger.warn("Exception during periodic request cache cleanup:", e);
}
// Reschedule itself to run again if not closed
if (closed.get() == false) {
threadPool.scheduleUnlessShuttingDown(interval, EsExecutors.DIRECT_EXECUTOR_SERVICE, this);
}
}
@Override
public void close() {
closed.compareAndSet(false, true);
}
}
/**
* Can the shard request be cached at all?
*/
public static boolean canCache(ShardSearchRequest request, SearchContext context) {
// Queries that create a scroll context cannot use the cache.
// They modify the search context during their execution so using the cache
// may invalidate the scroll for the next query.
if (request.scroll() != null) {
return false;
}
// We cannot cache with DFS because results depend not only on the content of the index but also
// on the overridden statistics. So if you ran two queries on the same index with different stats
// (because an other shard was updated) you would get wrong results because of the scores
// (think about top_hits aggs or scripts using the score)
if (SearchType.QUERY_THEN_FETCH != context.searchType()) {
return false;
}
// Profiled queries should not use the cache
if (request.source() != null && request.source().profile()) {
return false;
}
IndexSettings settings = context.indexShard().indexSettings();
// if not explicitly set in the request, use the index setting, if not, use the request
if (request.requestCache() == null) {
if (settings.getValue(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING) == false) {
return false;
} else if (context.size() != 0) {
// If no request cache query parameter and shard request cache
// is enabled in settings don't cache for requests with size > 0
return false;
}
} else if (request.requestCache() == false) {
return false;
}
// We use the cacheKey of the index reader as a part of a key of the IndicesRequestCache.
assert context.searcher().getIndexReader().getReaderCacheHelper() != null;
// if now in millis is used (or in the future, a more generic "isDeterministic" flag
// then we can't cache based on "now" key within the search request, as it is not deterministic
if (context.getSearchExecutionContext().isCacheable() == false) {
return false;
}
return true;
}
/**
* Loads the cache result, computing it if needed by executing the query phase and otherwise deserializing the cached
* value into the {@link SearchContext#queryResult() context's query result}. The combination of load + compute allows
* to have a single load operation that will cause other requests with the same key to wait till its loaded an reuse
* the same cache.
*/
public void loadIntoContext(ShardSearchRequest request, SearchContext context) throws Exception {
assert canCache(request, context);
final DirectoryReader directoryReader = context.searcher().getDirectoryReader();
boolean[] loadedFromCache = new boolean[] { true };
BytesReference cacheKey = request.cacheKey(requestCacheKeyDifferentiator);
BytesReference bytesReference = cacheShardLevelResult(
context.indexShard(),
context.getSearchExecutionContext().mappingCacheKey(),
directoryReader,
cacheKey,
out -> {
QueryPhase.execute(context);
context.queryResult().writeToNoId(out);
loadedFromCache[0] = false;
}
);
if (loadedFromCache[0]) {
// restore the cached query result into the context
final QuerySearchResult result = context.queryResult();
StreamInput in = new NamedWriteableAwareStreamInput(bytesReference.streamInput(), namedWriteableRegistry);
result.readFromWithId(context.id(), in);
result.setSearchShardTarget(context.shardTarget());
} else if (context.queryResult().searchTimedOut()) {
// we have to invalidate the cache entry if we cached a query result form a request that timed out.
// we can't really throw exceptions in the loading part to signal a timed out search to the outside world since if there are
// multiple requests that wait for the cache entry to be calculated they'd fail all with the same exception.
// instead we all caching such a result for the time being, return the timed out result for all other searches with that cache
// key invalidate the result in the thread that caused the timeout. This will end up to be simpler and eventually correct since
// running a search that times out concurrently will likely timeout again if it's run while we have this `stale` result in the
// cache. One other option is to not cache requests with a timeout at all...
indicesRequestCache.invalidate(
new IndexShardCacheEntity(context.indexShard()),
context.getSearchExecutionContext().mappingCacheKey(),
directoryReader,
cacheKey
);
if (logger.isTraceEnabled()) {
logger.trace(
"Query timed out, invalidating cache entry for request on shard [{}]:\n {}",
request.shardId(),
request.source()
);
}
}
}
public ByteSizeValue getTotalIndexingBufferBytes() {
return indexingMemoryController.indexingBufferSize();
}
/**
* Cache something calculated at the shard level.
* @param shard the shard this item is part of
* @param reader a reader for this shard. Used to invalidate the cache when there are changes.
* @param cacheKey key for the thing being cached within this shard
* @param loader loads the data into the cache if needed
* @return the contents of the cache or the result of calling the loader
*/
private BytesReference cacheShardLevelResult(
IndexShard shard,
MappingLookup.CacheKey mappingCacheKey,
DirectoryReader reader,
BytesReference cacheKey,
CheckedConsumer<StreamOutput, IOException> loader
) throws Exception {
IndexShardCacheEntity cacheEntity = new IndexShardCacheEntity(shard);
CheckedSupplier<BytesReference, IOException> supplier = () -> {
/* BytesStreamOutput allows to pass the expected size but by default uses
* BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie.
* a date histogram with 3 buckets is ~100byte so 16k might be very wasteful
* since we don't shrink to the actual size once we are done serializing.
* By passing 512 as the expected size we will resize the byte array in the stream
* slowly until we hit the page size and don't waste too much memory for small query
* results.*/
final int expectedSizeInBytes = 512;
try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) {
loader.accept(out);
// for now, keep the paged data structure, which might have unused bytes to fill a page, but better to keep
// the memory properly paged instead of having varied sized bytes
return out.bytes();
}
};
return indicesRequestCache.getOrCompute(cacheEntity, supplier, mappingCacheKey, reader, cacheKey);
}
static final class IndexShardCacheEntity extends AbstractIndexShardCacheEntity {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IndexShardCacheEntity.class);
private final IndexShard indexShard;
protected IndexShardCacheEntity(IndexShard indexShard) {
this.indexShard = indexShard;
}
@Override
protected ShardRequestCache stats() {
return indexShard.requestCache();
}
@Override
public boolean isOpen() {
return indexShard.state() != IndexShardState.CLOSED;
}
@Override
public Object getCacheIdentity() {
return indexShard;
}
@Override
public long ramBytesUsed() {
// No need to take the IndexShard into account since it is shared
// across many entities
return BASE_RAM_BYTES_USED;
}
}
@FunctionalInterface
interface IndexDeletionAllowedPredicate {
boolean apply(Index index, IndexSettings indexSettings);
}
private final IndexDeletionAllowedPredicate DEFAULT_INDEX_DELETION_PREDICATE = (
Index index,
IndexSettings indexSettings) -> canDeleteIndexContents(index);
private final IndexDeletionAllowedPredicate ALWAYS_TRUE = (Index index, IndexSettings indexSettings) -> true;
public AliasFilter buildAliasFilter(ClusterState state, String index, Set<String> resolvedExpressions) {
/* Being static, parseAliasFilter doesn't have access to whatever guts it needs to parse a query. Instead of passing in a bunch
* of dependencies we pass in a function that can perform the parsing. */
CheckedFunction<BytesReference, QueryBuilder, IOException> filterParser = bytes -> {
try (
XContentParser parser = XContentHelper.createParserNotCompressed(parserConfig, bytes, XContentHelper.xContentType(bytes))
) {
return parseTopLevelQuery(parser);
}
};
String[] aliases = indexNameExpressionResolver.filteringAliases(state, index, resolvedExpressions);
if (aliases == null) {
return AliasFilter.EMPTY;
}
Metadata metadata = state.metadata();
IndexAbstraction ia = state.metadata().getIndicesLookup().get(index);
DataStream dataStream = ia.getParentDataStream();
if (dataStream != null) {
String dataStreamName = dataStream.getName();
List<QueryBuilder> filters = Arrays.stream(aliases)
.map(name -> metadata.dataStreamAliases().get(name))
.filter(dataStreamAlias -> dataStreamAlias.getFilter(dataStreamName) != null)
.map(dataStreamAlias -> {
try {
return filterParser.apply(dataStreamAlias.getFilter(dataStreamName).uncompressed());
} catch (IOException e) {
throw new UncheckedIOException(e);
}
})
.toList();
if (filters.isEmpty()) {
return AliasFilter.of(null, aliases);
} else {
if (filters.size() == 1) {
return AliasFilter.of(filters.get(0), aliases);
} else {
BoolQueryBuilder bool = new BoolQueryBuilder();
for (QueryBuilder filter : filters) {
bool.should(filter);
}
return AliasFilter.of(bool, aliases);
}
}
} else {
IndexMetadata indexMetadata = metadata.index(index);
return AliasFilter.of(ShardSearchRequest.parseAliasFilter(filterParser, indexMetadata, aliases), aliases);
}
}
/**
* Returns a new {@link QueryRewriteContext} with the given {@code now} provider
*/
public QueryRewriteContext getRewriteContext(LongSupplier nowInMillis, ResolvedIndices resolvedIndices) {
return new QueryRewriteContext(parserConfig, client, nowInMillis, resolvedIndices);
}
public DataRewriteContext getDataRewriteContext(LongSupplier nowInMillis) {
return new DataRewriteContext(parserConfig, client, nowInMillis);
}
public CoordinatorRewriteContextProvider getCoordinatorRewriteContextProvider(LongSupplier nowInMillis) {
return new CoordinatorRewriteContextProvider(parserConfig, client, nowInMillis, clusterService::state, this::getTimestampFieldType);
}
/**
* Clears the caches for the given shard id if the shard is still allocated on this node
*/
public void clearIndexShardCache(ShardId shardId, boolean queryCache, boolean fieldDataCache, boolean requestCache, String... fields) {
final IndexService service = indexService(shardId.getIndex());
if (service != null) {
IndexShard shard = service.getShardOrNull(shardId.id());
final boolean clearedAtLeastOne = service.clearCaches(queryCache, fieldDataCache, fields);
if ((requestCache || (clearedAtLeastOne == false && fields.length == 0)) && shard != null) {
indicesRequestCache.clear(new IndexShardCacheEntity(shard));
}
}
}
/**
* Returns a function which given an index name, returns a predicate which fields must match in order to be returned by get mappings,
* get index, get field mappings and field capabilities API. Useful to filter the fields that such API return.
* The predicate receives the field name as input argument. In case multiple plugins register a field filter through
* {@link org.elasticsearch.plugins.MapperPlugin#getFieldFilter()}, only fields that match all the registered filters will be
* returned by get mappings, get index, get field mappings and field capabilities API.
*/
public Function<String, FieldPredicate> getFieldFilter() {
return mapperRegistry.getFieldFilter();
}
private void setIdFieldDataEnabled(boolean value) {
this.idFieldDataEnabled = value;
}
private void updateDanglingIndicesInfo(Index index) {
assert DiscoveryNode.canContainData(settings) : "dangling indices information should only be persisted on data nodes";
assert nodeWriteDanglingIndicesInfo : "writing dangling indices info is not enabled";
assert danglingIndicesThreadPoolExecutor != null : "executor for dangling indices info is not available";
if (danglingIndicesToWrite.add(index)) {
logger.trace("triggered dangling indices update for {}", index);
final long triggeredTimeMillis = threadPool.relativeTimeInMillis();
try {
danglingIndicesThreadPoolExecutor.execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
logger.warn(() -> format("failed to write dangling indices state for index %s", index), e);
}
@Override
protected void doRun() {
final boolean exists = danglingIndicesToWrite.remove(index);
assert exists : "removed non-existing item for " + index;
final IndexService indexService = indices.get(index.getUUID());
if (indexService != null) {
final long executedTimeMillis = threadPool.relativeTimeInMillis();
logger.trace(
"writing out dangling indices state for index {}, triggered {} ago",
index,
TimeValue.timeValueMillis(Math.min(0L, executedTimeMillis - triggeredTimeMillis))
);
indexService.writeDanglingIndicesInfo();
final long completedTimeMillis = threadPool.relativeTimeInMillis();
logger.trace(
"writing out of dangling indices state for index {} completed after {}",
index,
TimeValue.timeValueMillis(Math.min(0L, completedTimeMillis - executedTimeMillis))
);
} else {
logger.trace("omit writing dangling indices state for index {} as index is deallocated on this node", index);
}
}
});
} catch (EsRejectedExecutionException e) {
// ignore cases where we are shutting down..., there is really nothing interesting to be done here...
assert danglingIndicesThreadPoolExecutor.isShutdown();
}
} else {
logger.trace("dangling indices update already pending for {}", index);
}
}
private void setAllowExpensiveQueries(Boolean allowExpensiveQueries) {
this.allowExpensiveQueries = allowExpensiveQueries;
}
// visible for testing
public boolean allPendingDanglingIndicesWritten() {
return nodeWriteDanglingIndicesInfo == false
|| (danglingIndicesToWrite.isEmpty() && danglingIndicesThreadPoolExecutor.getActiveCount() == 0);
}
/**
* @return the field type of the {@code @timestamp} field of the given index, or {@code null} if:
* - the index is not found,
* - the field is not found, or
* - the field is not a timestamp field.
*/
@Nullable
public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) {
return timestampFieldMapperService.getTimestampFieldType(index);
}
public IndexScopedSettings getIndexScopedSettings() {
return indexScopedSettings;
}
// TODO move this?
public BigArrays getBigArrays() {
return bigArrays;
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/indices/IndicesService.java |
473 | // Companion Code to the paper "Generative Forests" by R. Nock and M. Guillame-Bert.
import java.awt.Color;
import java.awt.Graphics2D;
import java.awt.RenderingHints;
import java.awt.geom.Ellipse2D;
import java.awt.geom.Line2D;
import java.awt.geom.Rectangle2D;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
import java.util.Vector;
import javax.imageio.ImageIO;
class WeightedRectangle2D implements Debuggable {
Rectangle2D.Double rectangle;
double weight, density; // weight = unnormalized (just in case it is useful)
public static double NUMERICAL_PRECISION_ERROR;
WeightedRectangle2D(Rectangle2D.Double r, double w){
rectangle = r;
weight = w;
density = -1.0;
}
WeightedRectangle2D(Rectangle2D.Double r, double w, double d){
this(r, w);
density = d;
}
public double surface(){
return (rectangle.width * rectangle.height);
}
public boolean strictly_contains(double x, double y){
// handling numerical precision errors
if ( (x > rectangle.x + NUMERICAL_PRECISION_ERROR) && (x < rectangle.x + rectangle.width - NUMERICAL_PRECISION_ERROR) && (y > rectangle.y + NUMERICAL_PRECISION_ERROR) && (y < rectangle.y + rectangle.height - NUMERICAL_PRECISION_ERROR) )
return true;
return false;
}
public boolean contains_X(double v){
// handling numerical precision errors
if ( (v >= rectangle.x - (NUMERICAL_PRECISION_ERROR/1000.0)) && (v <= rectangle.x + rectangle.width + (NUMERICAL_PRECISION_ERROR/1000.0)) )
return true;
return false;
}
public boolean contains_Y(double v){
// handling numerical precision errors
if ( (v >= rectangle.y - (NUMERICAL_PRECISION_ERROR/1000.0)) && (v <= rectangle.y + rectangle.height + (NUMERICAL_PRECISION_ERROR/1000.0)) )
return true;
return false;
}
public boolean matches(WeightedRectangle2D r){
return (rectangle.equals(r.rectangle));
}
public String toString(){
return rectangle + "{" + weight + ":" + density + "}";
}
}
public class Plotting implements Debuggable{
// class managing all plotting stuff
public static int IMAGE_SIZE_MAX = 500;
public static boolean SQUARE_IMAGE = true;
public static boolean EMBED_DATA = true;
public static int DATA_SIZE = 4;
public static double OFFSET = 1;
public static int GRID_SIZE = 101;
public static String LOWER_LEFT = "LOWER_LEFT",
UPPER_LEFT = "UPPER_LEFT",
LOWER_RIGHT = "LOWER_RIGHT",
UPPER_RIGHT = "UPPER_RIGHT";
public static String LINES = "LINES", RECTANGLES = "RECTANGLES";
int feature_index_x, feature_index_y, width, height;
double x_min, x_max, y_min, y_max, length_x, length_y;
Vector <Line2D.Double> all_lines;
Vector <WeightedRectangle2D> all_rectangles;
public static double GET_MIN(Dataset ds, int feature_index){
if (Feature.IS_CONTINUOUS(ds.features.elementAt(feature_index).type))
return ds.features.elementAt(feature_index).dmin;
else Dataset.perror("Plotting.class :: plotting non continuous variables");
return -1.0;
}
public static double GET_FEATURE_VALUE(Dataset ds, Observation o, int feature_index){
if (Feature.IS_CONTINUOUS(ds.features.elementAt(feature_index).type))
return o.typed_features.elementAt(feature_index).dv;
else Dataset.perror("Plotting.class :: plotting non continuous variables");
return -1.0;
}
public static double GET_MAX(Dataset ds, int feature_index){
if (Feature.IS_CONTINUOUS(ds.features.elementAt(feature_index).type))
return ds.features.elementAt(feature_index).dmax;
else Dataset.perror("Plotting.class :: plotting non continuous variables");
return -1.0;
}
public static double GET_MAX(Feature f, double dmax){
double d;
if (Feature.IS_CONTINUOUS(f.type))
return f.dmax;
else
Dataset.perror("Plotting.class :: plotting non continuous variables");
return -1.0;
}
public static double GET_MIN(Feature f, double dmin){
double d;
if (Feature.IS_CONTINUOUS(f.type))
return f.dmin;
else
Dataset.perror("Plotting.class :: plotting non continuous variables");
return -1.0;
}
public static double MAP_COORDINATE_TO_IMAGE(double c, int size, double vmin, double vmax){
double vuse;
if ( (c < vmin) && (c > vmin - WeightedRectangle2D.NUMERICAL_PRECISION_ERROR) )
vuse = vmin;
else if ( (c > vmax) && (c < vmax + WeightedRectangle2D.NUMERICAL_PRECISION_ERROR) )
vuse = vmax;
else
vuse = c;
if ((c <= vmin - WeightedRectangle2D.NUMERICAL_PRECISION_ERROR)
|| (c >= vmax + WeightedRectangle2D.NUMERICAL_PRECISION_ERROR))
Dataset.perror(
"Plotting.class :: value out of bounds ("
+ c
+ " not in ["
+ vmin
+ ","
+ vmax
+ "] +/- "
+ WeightedRectangle2D.NUMERICAL_PRECISION_ERROR
+ ")");
return (((vuse - vmin)/(vmax - vmin)) * ((double) size));
}
public static double MAP_X_TO_IMAGE(double c, int size, double vmin, double vmax){
return MAP_COORDINATE_TO_IMAGE(c, size, vmin, vmax);
}
public static double MAP_Y_TO_IMAGE(double c, int size, double vmin, double vmax){
return ((double) size - MAP_COORDINATE_TO_IMAGE(c, size, vmin, vmax));
}
Plotting(){
feature_index_x = feature_index_y = width = height = width = height = -1;
x_min = x_max = y_min = y_max = length_x = length_y = -1.0;
all_lines = null;
all_rectangles = null;
}
public void init_vectors(){
all_lines = new Vector<>();
all_rectangles = new Vector<>();
}
public void init(GenerativeModelBasedOnEnsembleOfTrees geot, int f_index_x, int f_index_y){
feature_index_x = f_index_x;
feature_index_y = f_index_y;
x_min = GET_MIN(geot.myDS, feature_index_x);
x_max = GET_MAX(geot.myDS, feature_index_x);
y_min = GET_MIN(geot.myDS, feature_index_y);
y_max = GET_MAX(geot.myDS, feature_index_y);
length_x = x_max - x_min;
length_y = y_max - y_min;
if ( (SQUARE_IMAGE) || (length_x == length_y) ){
width = height = IMAGE_SIZE_MAX;
}else{
if (length_x > length_y){
width = IMAGE_SIZE_MAX;
height = (int) ((length_y / length_x) * (double) IMAGE_SIZE_MAX);
}else{
width = (int) ((length_x / length_y) * (double) IMAGE_SIZE_MAX);
height = IMAGE_SIZE_MAX;
}
}
}
public void compute_geometric_objects(GenerativeModelBasedOnEnsembleOfTrees geot, String which_object){ //Plotting.LINES
if ( (Feature.IS_NOMINAL(geot.myDS.features.elementAt(feature_index_x).type)) || (Feature.IS_NOMINAL(geot.myDS.features.elementAt(feature_index_y).type)) )
Dataset.perror("Plotting.class :: plotting non continuous / integer variables");
double xmic, xmac, ymic, ymac, xc, yc;
if (length_x < length_y)
WeightedRectangle2D.NUMERICAL_PRECISION_ERROR = length_x / 10000.0;
else
WeightedRectangle2D.NUMERICAL_PRECISION_ERROR = length_y / 10000.0;
MeasuredSupportAtTupleOfNodes init_mds = new MeasuredSupportAtTupleOfNodes(geot, true, false, -1), cur_mds;
init_mds.generative_support.local_empirical_measure.init_proportions(1.0);
Vector <MeasuredSupportAtTupleOfNodes> m = new Vector<>();
m.addElement(init_mds);
init_vectors();
GenerativeModelBasedOnEnsembleOfTrees.ALL_SUPPORTS_WITH_POSITIVE_MEASURE_GET(m, this, which_object);
}
public void add_line(MeasuredSupportAtTupleOfNodes cur_mds){
Line2D.Double ld;
double xmic, xmac, ymic, ymac, xc, yc;
xmic = Plotting.MAP_X_TO_IMAGE(Plotting.GET_MIN(cur_mds.generative_support.support.feature(feature_index_x), x_min), width, x_min, x_max);
xmac = Plotting.MAP_X_TO_IMAGE(Plotting.GET_MAX(cur_mds.generative_support.support.feature(feature_index_x), x_max), width, x_min, x_max);
ymic = Plotting.MAP_Y_TO_IMAGE(Plotting.GET_MIN(cur_mds.generative_support.support.feature(feature_index_y), y_min), height, y_min, y_max);
ymac = Plotting.MAP_Y_TO_IMAGE(Plotting.GET_MAX(cur_mds.generative_support.support.feature(feature_index_y), y_max), height, y_min, y_max);
ld = new Line2D.Double(xmic, ymic, xmac, ymic);
if (!all_lines.contains(ld))
all_lines.add(ld);
ld = new Line2D.Double(xmac, ymic, xmac, ymac);
if (!all_lines.contains(ld))
all_lines.add(ld);
ld = new Line2D.Double(xmic, ymac, xmac, ymac);
if (!all_lines.contains(ld))
all_lines.add(ld);
ld = new Line2D.Double(xmic, ymic, xmic, ymac);
if (!all_lines.contains(ld))
all_lines.add(ld);
}
public void add_rectangle(MeasuredSupportAtTupleOfNodes cur_mds){
double xmic, xmac, ymic, ymac, su;
xmic = Plotting.GET_MIN(cur_mds.generative_support.support.feature(feature_index_x), x_min);
xmac = Plotting.GET_MAX(cur_mds.generative_support.support.feature(feature_index_x), x_max);
ymic = Plotting.GET_MIN(cur_mds.generative_support.support.feature(feature_index_y), y_min);
ymac = Plotting.GET_MAX(cur_mds.generative_support.support.feature(feature_index_y), y_max);
if ( (cur_mds.generative_support.local_empirical_measure.observations_indexes == null) || (cur_mds.generative_support.local_empirical_measure.observations_indexes.length == 0) )
su = 0.0;
else{
su = cur_mds.generative_support.local_empirical_measure.total_weight;
}
all_rectangles.addElement(new WeightedRectangle2D(new Rectangle2D.Double(xmic, ymic, xmac - xmic, ymac - ymic), su));
}
public void compute_and_store_x_y_frontiers(GenerativeModelBasedOnEnsembleOfTrees geot, String name_file){
// stores the splits of the geot in the (x,y) plane and => image
int i;
double xc, yc;
if ( (Feature.IS_NOMINAL(geot.myDS.features.elementAt(feature_index_x).type)) || (Feature.IS_NOMINAL(geot.myDS.features.elementAt(feature_index_y).type)) )
Dataset.perror("Plotting.class :: plotting non continuous / integer variables");
if (length_x < length_y)
WeightedRectangle2D.NUMERICAL_PRECISION_ERROR = length_x / 10000.0;
else
WeightedRectangle2D.NUMERICAL_PRECISION_ERROR = length_y / 10000.0;
compute_geometric_objects(geot, Plotting.LINES);
BufferedImage img = new BufferedImage(width, height, BufferedImage.TYPE_INT_ARGB);
Graphics2D graph = (Graphics2D) img.getGraphics();
graph.setPaint(Color.white);
graph.fill(new Rectangle2D.Double(0,0,width,height));
graph.setPaint(Color.black);
graph.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
for (i=0;i<all_lines.size();i++)
graph.draw(all_lines.elementAt(i));
Observation o;
if (EMBED_DATA){
graph.setPaint(Color.green);
for (i=0;i<geot.myDS.observations_from_file.size();i++){
o = geot.myDS.observations_from_file.elementAt(i);
if ( (!Observation.FEATURE_IS_UNKNOWN(o, feature_index_x)) && (!Observation.FEATURE_IS_UNKNOWN(o, feature_index_y)) ){
xc = Plotting.MAP_X_TO_IMAGE(Plotting.GET_FEATURE_VALUE(geot.myDS, o, feature_index_x), width, x_min, x_max);
yc = Plotting.MAP_Y_TO_IMAGE(Plotting.GET_FEATURE_VALUE(geot.myDS, o, feature_index_y), height, y_min, y_max);
graph.fill(new Ellipse2D.Double(xc-((double) (DATA_SIZE/2)), yc-((double) (DATA_SIZE/2)), (double) DATA_SIZE, (double) DATA_SIZE));
}
}
graph.setPaint(Color.black);
}
try{
ImageIO.write(img, "PNG", new File(name_file));
}catch(IOException e){
}
}
public void compute_and_store_x_y_densities(GenerativeModelBasedOnEnsembleOfTrees geot, String name_file){
// stores the joint density in (x,y) and => image
int i, j, k, l;
double xc, yc;
double xmic, xmac, ymic, ymac;
if ( (Feature.IS_NOMINAL(geot.myDS.features.elementAt(feature_index_x).type)) || (Feature.IS_NOMINAL(geot.myDS.features.elementAt(feature_index_y).type)) )
Dataset.perror("Plotting.class :: plotting non continuous / integer variables");
if (length_x < length_y)
WeightedRectangle2D.NUMERICAL_PRECISION_ERROR = length_x / 10000.0;
else
WeightedRectangle2D.NUMERICAL_PRECISION_ERROR = length_y / 10000.0;
compute_geometric_objects(geot, Plotting.RECTANGLES);
Vector <WeightedRectangle2D> all_splitted_rectangles = new Vector<>();
Vector <WeightedRectangle2D> all_splitted_current_rectangles, all_splitted_local_current_rectangles, next_splitted_current_rectangles;
WeightedRectangle2D duma, dumb;
all_splitted_rectangles = new Vector<>();
for (i=0;i<all_rectangles.size();i++){
all_splitted_current_rectangles = new Vector <>();
all_splitted_current_rectangles.addElement(all_rectangles.elementAt(i));
for (j=0;j<all_rectangles.size();j++){
if (j!=i){
next_splitted_current_rectangles = new Vector <>();
for (k=0;k<all_splitted_current_rectangles.size();k++){
duma = all_splitted_current_rectangles.elementAt(k);
dumb = all_rectangles.elementAt(j);
all_splitted_local_current_rectangles = SPLIT_A_FROM_B(duma, dumb);
next_splitted_current_rectangles.addAll(all_splitted_local_current_rectangles);
}
all_splitted_current_rectangles = next_splitted_current_rectangles;
}
}
all_splitted_rectangles.addAll(all_splitted_current_rectangles);
}
i = 0;
do{
duma = all_splitted_rectangles.elementAt(i);
j = all_splitted_rectangles.size()-1;
do{
dumb = all_splitted_rectangles.elementAt(j);
if (duma.matches(dumb)){
duma.weight += dumb.weight;
all_splitted_rectangles.removeElementAt(j);
}
j--;
}while(j>i);
i++;
}while(i<all_splitted_rectangles.size()-1);
// CHECK
boolean infinite_density = false;
for (i=0;i<all_splitted_rectangles.size()-1;i++){
duma = all_splitted_rectangles.elementAt(i);
if (duma.surface() == 0.0){
infinite_density = true;
Dataset.perror(" INFINITE DENSITY ");
}
for (j=i+1;j<all_splitted_rectangles.size();j++){
dumb = all_splitted_rectangles.elementAt(j);
if (duma.matches(dumb))
Dataset.perror("MATCH ERROR " + duma + " & " + dumb);
}
}
//computing densities
double total_weight = 0.0, dens;
for (i=0;i<all_splitted_rectangles.size();i++)
total_weight += all_splitted_rectangles.elementAt(i).weight;
for (i=0;i<all_splitted_rectangles.size();i++){
duma = all_splitted_rectangles.elementAt(i);
dens = (duma.weight / total_weight) / duma.surface();
duma.density = dens;
}
Vector <WeightedRectangle2D> all_final_rectangles = new Vector<>();
for (i=0;i<all_splitted_rectangles.size();i++){
duma = all_splitted_rectangles.elementAt(i);
xmic = Plotting.MAP_X_TO_IMAGE(duma.rectangle.x, width, x_min, x_max);
xmac = Plotting.MAP_X_TO_IMAGE(duma.rectangle.x + duma.rectangle.width, width, x_min, x_max);
ymic = Plotting.MAP_Y_TO_IMAGE(duma.rectangle.y + duma.rectangle.height, height, y_min, y_max);
ymac = Plotting.MAP_Y_TO_IMAGE(duma.rectangle.y, height, y_min, y_max);
all_final_rectangles.addElement(new WeightedRectangle2D(new Rectangle2D.Double(xmic - OFFSET, ymic - OFFSET, xmac - xmic + (2*OFFSET), ymac - ymic + (2*OFFSET)), duma.weight, duma.density));
}
double max_dens = -1.0;
for (i=0;i<all_final_rectangles.size();i++){
duma = all_final_rectangles.elementAt(i);
if ( (i==0) || (duma.density > max_dens) )
max_dens = duma.density;
}
BufferedImage img = new BufferedImage(width, height, BufferedImage.TYPE_INT_ARGB);
Graphics2D graph = (Graphics2D) img.getGraphics();
graph.setPaint(HEATMAP_COLOR(0.0f));
graph.fill(new Rectangle2D.Double(0,0,width,height));
graph.setPaint(Color.black);
graph.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
Color fc;
float cd, enh;
for (i=0;i<all_final_rectangles.size();i++){
duma = all_final_rectangles.elementAt(i);
cd = (float) (duma.density / max_dens);
fc = HEATMAP_COLOR(cd);
graph.setPaint(fc);
graph.fill(duma.rectangle);
graph.setPaint(Color.red);
}
try{
ImageIO.write(img, "PNG", new File(name_file));
}catch(IOException e){
}
}
public void compute_and_store_x_y_densities_dataset(GenerativeModelBasedOnEnsembleOfTrees geot, Vector <Observation> observations, String name_file){
Vector <WeightedRectangle2D> all_domain_rectangles = new Vector<>();
Vector <WeightedRectangle2D> rectangles_containing_o;
int i, su, j, k;
Rectangle2D.Double rd;
Observation o;
WeightedRectangle2D duma;
boolean unknown_x, unknown_y, in_x, in_y;
double total_weight = 0.0, dens;
double xmic, xmac, ymic, ymac;
double delta_x = length_x / ((double) GRID_SIZE), delta_y = length_y / ((double) GRID_SIZE);
if (length_x < length_y)
WeightedRectangle2D.NUMERICAL_PRECISION_ERROR = length_x / 10000.0;
else
WeightedRectangle2D.NUMERICAL_PRECISION_ERROR = length_y / 10000.0;
for (i=0;i<GRID_SIZE;i++)
for (j=0;j<GRID_SIZE;j++)
all_domain_rectangles.addElement(new WeightedRectangle2D(new Rectangle2D.Double((x_min + ((double) i)*delta_x), y_min + (((double) j)*delta_y), delta_x, delta_y), 0.0));
for (i=0;i<observations.size();i++){
total_weight += 1.0;
o = observations.elementAt(i);
rectangles_containing_o = new Vector<>();
unknown_x = Observation.FEATURE_IS_UNKNOWN(o, feature_index_x);
unknown_y = Observation.FEATURE_IS_UNKNOWN(o, feature_index_y);
for (j=0;j<all_domain_rectangles.size();j++){
in_x = ( (unknown_x) || (all_domain_rectangles.elementAt(j).contains_X(GET_FEATURE_VALUE(geot.myDS, o, feature_index_x))) );
in_y = ( (unknown_y) || (all_domain_rectangles.elementAt(j).contains_Y(GET_FEATURE_VALUE(geot.myDS, o, feature_index_y))) );
if ( (in_x) && (in_y) )
rectangles_containing_o.addElement(all_domain_rectangles.elementAt(j));
}
if (rectangles_containing_o.size() == 0)
System.out.print("X");
for (j=0;j<rectangles_containing_o.size();j++)
rectangles_containing_o.elementAt(j).weight += (1.0 / ((double) rectangles_containing_o.size()) );
}
for (i=0;i<all_domain_rectangles.size();i++){
duma = all_domain_rectangles.elementAt(i);
dens = (duma.weight / total_weight) / duma.surface();
duma.density = dens;
}
Vector <WeightedRectangle2D> all_final_rectangles = new Vector<>();
for (i=0;i<all_domain_rectangles.size();i++){
duma = all_domain_rectangles.elementAt(i);
xmic = Plotting.MAP_X_TO_IMAGE(duma.rectangle.x, width, x_min, x_max);
xmac = Plotting.MAP_X_TO_IMAGE(duma.rectangle.x + duma.rectangle.width, width, x_min, x_max);
ymic = Plotting.MAP_Y_TO_IMAGE(duma.rectangle.y + duma.rectangle.height, height, y_min, y_max);
ymac = Plotting.MAP_Y_TO_IMAGE(duma.rectangle.y, height, y_min, y_max);
all_final_rectangles.addElement(new WeightedRectangle2D(new Rectangle2D.Double(xmic - OFFSET, ymic - OFFSET, xmac - xmic + (2*OFFSET), ymac - ymic + (2*OFFSET)), duma.weight, duma.density));
}
double max_dens = -1.0;
for (i=0;i<all_final_rectangles.size();i++){
duma = all_final_rectangles.elementAt(i);
if ( (i==0) || (duma.density > max_dens) )
max_dens = duma.density;
}
BufferedImage img = new BufferedImage(width, height, BufferedImage.TYPE_INT_ARGB);
Graphics2D graph = (Graphics2D) img.getGraphics();
graph.setPaint(Color.white);
graph.fill(new Rectangle2D.Double(0,0,width,height));
graph.setPaint(Color.black);
graph.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
//graph.setStroke(new BasicStroke(2));
Color fc;
float cd, enh;
for (i=0;i<all_final_rectangles.size();i++){
duma = all_final_rectangles.elementAt(i);
cd = (float) (duma.density / max_dens);
fc = HEATMAP_COLOR(cd);
graph.setPaint(fc);
graph.fill(duma.rectangle);
graph.setPaint(Color.red);
}
try{
ImageIO.write(img, "PNG", new File(name_file));
}catch(IOException e){
}
}
public static Color HEATMAP_COLOR(float f){
float [] tvals = {0.0f, 0.05f, 0.2f, 0.4f, 0.6f, 0.8f, 1.0f};
Color [] cvals = {Color.black, Color.blue, Color.cyan, Color.green, Color.yellow, Color.red, Color.white};
int i=0;
boolean found = false;
do{
if ( ( (f>=tvals[i]) && (f<tvals[i+1]) ) || (i == tvals.length-2) )
found = true;
else
i++;
}while(!found);
float [] c1 = cvals[i].getComponents(null);
float [] c2 = cvals[i+1].getComponents(null);
float alpha = (tvals[i+1] - f)/(tvals[i+1] - tvals[i]);
Color cret = new Color(alpha * c1[0] + (1.0f - alpha) * c2[0]
, alpha * c1[1] + (1.0f - alpha) * c2[1]
, alpha * c1[2] + (1.0f - alpha) * c2[2]
, alpha * c1[3] + (1.0f - alpha) * c2[3]);
return cret;
}
public static Vector<String> WHICH_VERTICES_OF_B_ARE_IN_A(WeightedRectangle2D a, WeightedRectangle2D b){
Vector <String> all = new Vector <>();
if (a.strictly_contains(b.rectangle.x, b.rectangle.y))
all.addElement(LOWER_LEFT);
if (a.strictly_contains(b.rectangle.x + b.rectangle.width, b.rectangle.y))
all.addElement(LOWER_RIGHT);
if (a.strictly_contains(b.rectangle.x + b.rectangle.width, b.rectangle.y + b.rectangle.height))
all.addElement(UPPER_RIGHT);
if (a.strictly_contains(b.rectangle.x, b.rectangle.y + b.rectangle.height))
all.addElement(UPPER_LEFT);
return all;
}
public static Vector<WeightedRectangle2D> SPLIT_A_FROM_B(WeightedRectangle2D a, WeightedRectangle2D b){
Vector<WeightedRectangle2D> ret = new Vector<>();
Vector <String> all = WHICH_VERTICES_OF_B_ARE_IN_A(a, b);
double x, y, w, h;
WeightedRectangle2D c;
if (all.size() == 4){
// b included in a: 9 new rectangles
// 1
x = a.rectangle.x;
y = a.rectangle.y;
w = b.rectangle.x - a.rectangle.x;
h = b.rectangle.y - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// 2
x = b.rectangle.x;
y = a.rectangle.y;
w = b.rectangle.width;
h = b.rectangle.y - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// 3
x = b.rectangle.x + b.rectangle.width;
y = a.rectangle.y;
w = a.rectangle.x + a.rectangle.width - (b.rectangle.x + b.rectangle.width);
h = b.rectangle.y - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// 4
x = a.rectangle.x;
y = b.rectangle.y;
w = b.rectangle.x - a.rectangle.x;
h = b.rectangle.height;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// 5
x = b.rectangle.x;
y = b.rectangle.y;
w = b.rectangle.width;
h = b.rectangle.height;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// 6
x = b.rectangle.x + b.rectangle.width;
y = b.rectangle.y;
w = a.rectangle.x + a.rectangle.width - (b.rectangle.x + b.rectangle.width);
h = b.rectangle.height;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// 7
x = a.rectangle.x;
y = b.rectangle.y + b.rectangle.height;
w = b.rectangle.x - a.rectangle.x;
h = a.rectangle.y + a.rectangle.height - (b.rectangle.y + b.rectangle.height);
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// 8
x = b.rectangle.x;
y = b.rectangle.y + b.rectangle.height;
w = b.rectangle.width;
h = a.rectangle.y + a.rectangle.height - (b.rectangle.y + b.rectangle.height);
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// 9
x = b.rectangle.x + b.rectangle.width;
y = b.rectangle.y + b.rectangle.height;
w = a.rectangle.x + a.rectangle.width - (b.rectangle.x + b.rectangle.width);
h = a.rectangle.y + a.rectangle.height - (b.rectangle.y + b.rectangle.height);
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
}else if (all.size() == 2){
// 6 rectangles, 4 configurations
if ( (all.contains(LOWER_LEFT)) && (all.contains(UPPER_LEFT)) ){
// A1
x = a.rectangle.x;
y = a.rectangle.y;
w = b.rectangle.x - a.rectangle.x;
h = b.rectangle.y - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// A2
x = b.rectangle.x;
y = a.rectangle.y;
w = a.rectangle.x + a.rectangle.width - b.rectangle.x;
h = b.rectangle.y - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// A3
x = a.rectangle.x;
y = b.rectangle.y;
w = b.rectangle.x - a.rectangle.x;
h = b.rectangle.height;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// A4
x = b.rectangle.x;
y = b.rectangle.y;
w = a.rectangle.x + a.rectangle.width - b.rectangle.x;
h = b.rectangle.height;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// A5
x = a.rectangle.x;
y = b.rectangle.y + b.rectangle.height;
w = b.rectangle.x - a.rectangle.x;
h = a.rectangle.y + a.rectangle.height - (b.rectangle.y + b.rectangle.height);
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// A6
x = b.rectangle.x;
y = b.rectangle.y + b.rectangle.height;
w = a.rectangle.x + a.rectangle.width - b.rectangle.x;
h = a.rectangle.y + a.rectangle.height - (b.rectangle.y + b.rectangle.height);
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
}else if ( (all.contains(LOWER_LEFT)) && (all.contains(LOWER_RIGHT)) ){
// B1
x = a.rectangle.x;
y = a.rectangle.y;
w = b.rectangle.x - a.rectangle.x;
h = b.rectangle.y - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// B2
x = b.rectangle.x;
y = a.rectangle.y;
w = b.rectangle.width;
h = b.rectangle.y - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// B3
x = b.rectangle.x + b.rectangle.width;
y = a.rectangle.y;
w = a.rectangle.x + a.rectangle.width - (b.rectangle.x + b.rectangle.width);
h = b.rectangle.y - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// B4
x = a.rectangle.x;
y = b.rectangle.y;
w = b.rectangle.x - a.rectangle.x;
h = a.rectangle.y + a.rectangle.height - b.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// B5
x = a.rectangle.x;
y = b.rectangle.y;
w = b.rectangle.width;
h = a.rectangle.y + a.rectangle.height - b.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// B6
x = b.rectangle.x + b.rectangle.width;
y = b.rectangle.y;
w = a.rectangle.x + a.rectangle.width - (b.rectangle.x + b.rectangle.width);
h = a.rectangle.y + a.rectangle.height - b.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
}else if ( (all.contains(LOWER_RIGHT)) && (all.contains(UPPER_RIGHT)) ){
// C1
x = a.rectangle.x;
y = a.rectangle.y;
w = b.rectangle.x + b.rectangle.width - a.rectangle.x;
h = b.rectangle.y - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// C2
x = b.rectangle.x + b.rectangle.width;
y = a.rectangle.y;
w = a.rectangle.x + a.rectangle.width - (b.rectangle.x + b.rectangle.width);
h = b.rectangle.y - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// C3
x = a.rectangle.x;
y = b.rectangle.y;
w = b.rectangle.x + b.rectangle.width - a.rectangle.x;
h = b.rectangle.height;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// C4
x = b.rectangle.x + b.rectangle.width;
y = b.rectangle.y;
w = a.rectangle.x + a.rectangle.width - (b.rectangle.x + b.rectangle.width);
h = b.rectangle.height;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// C5
x = a.rectangle.x;
y = b.rectangle.y + b.rectangle.height;
w = b.rectangle.x + b.rectangle.width - a.rectangle.x;
h = a.rectangle.y + a.rectangle.height - (b.rectangle.y + b.rectangle.height);
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// C6
x = b.rectangle.x + b.rectangle.width;
y = b.rectangle.y + b.rectangle.height;
w = a.rectangle.x + a.rectangle.width - (b.rectangle.x + b.rectangle.width);
h = a.rectangle.y + a.rectangle.height - (b.rectangle.y + b.rectangle.height);
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
}else if ( (all.contains(UPPER_LEFT)) && (all.contains(UPPER_RIGHT)) ){
// D1
x = a.rectangle.x;
y = a.rectangle.y;
w = b.rectangle.x - a.rectangle.x;
h = b.rectangle.y + b.rectangle.height - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// D2
x = b.rectangle.x;
y = a.rectangle.y;
w = b.rectangle.width;
h = b.rectangle.y + b.rectangle.height - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// D3
x = b.rectangle.x + b.rectangle.width;
y = a.rectangle.y;
w = a.rectangle.x + a.rectangle.width - (b.rectangle.x + b.rectangle.width);
h = b.rectangle.y + b.rectangle.height - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// D4
x = a.rectangle.x;
y = b.rectangle.y + b.rectangle.height;
w = b.rectangle.x - a.rectangle.x;
h = a.rectangle.y + a.rectangle.height - (b.rectangle.y + b.rectangle.height);
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// D5
x = b.rectangle.x;
y = b.rectangle.y + b.rectangle.height;
w = b.rectangle.width;
h = a.rectangle.y + a.rectangle.height - (b.rectangle.y + b.rectangle.height);
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// D6
x = b.rectangle.x + b.rectangle.width;
y = b.rectangle.y + b.rectangle.height;
w = a.rectangle.x + a.rectangle.width - (b.rectangle.x + b.rectangle.width);
h = a.rectangle.y + a.rectangle.height - (b.rectangle.y + b.rectangle.height);
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
}else
Dataset.perror("Plotting.class :: no such 2 configuration");
}else if (all.size() == 1){
// 4 rectangles, 4 configurations
if (all.contains(LOWER_LEFT)){
// A1
x = a.rectangle.x;
y = a.rectangle.y;
w = b.rectangle.x - a.rectangle.x;
h = b.rectangle.y - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// A2
x = b.rectangle.x;
y = a.rectangle.y;
w = a.rectangle.x + a.rectangle.width - b.rectangle.x;
h = b.rectangle.y - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// A3
x = a.rectangle.x;
y = b.rectangle.y;
w = b.rectangle.x - a.rectangle.x;
h = a.rectangle.y + a.rectangle.height - b.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// A4
x = b.rectangle.x;
y = b.rectangle.y;
w = a.rectangle.x + a.rectangle.width - b.rectangle.x;
h = a.rectangle.y + a.rectangle.height - b.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
} else if (all.contains(LOWER_RIGHT)){
// B1
x = a.rectangle.x;
y = a.rectangle.y;
w = b.rectangle.x + b.rectangle.width - a.rectangle.x;
h = b.rectangle.y - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// B2
x = b.rectangle.x + b.rectangle.width ;
y = a.rectangle.y;
w = a.rectangle.x + a.rectangle.width - (b.rectangle.x + b.rectangle.width);
h = b.rectangle.y - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// B3
x = a.rectangle.x;
y = b.rectangle.y;
w = b.rectangle.x + b.rectangle.width - a.rectangle.x;
h = a.rectangle.y + a.rectangle.height - b.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// B4
x = b.rectangle.x + b.rectangle.width ;
y = b.rectangle.y;
w = a.rectangle.x + a.rectangle.width - (b.rectangle.x + b.rectangle.width);
h = a.rectangle.y + a.rectangle.height - b.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
} else if (all.contains(UPPER_RIGHT)){
// C1
x = a.rectangle.x;
y = a.rectangle.y;
w = b.rectangle.x + b.rectangle.width - a.rectangle.x;
h = b.rectangle.y + b.rectangle.height - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// C2
x = b.rectangle.x + b.rectangle.width;
y = a.rectangle.y;
w = a.rectangle.x + a.rectangle.width - (b.rectangle.x + b.rectangle.width);
h = b.rectangle.y + b.rectangle.height - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// C3
x = a.rectangle.x;
y = b.rectangle.y + b.rectangle.height;
w = b.rectangle.x + b.rectangle.width - a.rectangle.x;
h = a.rectangle.y + a.rectangle.height - (b.rectangle.y + b.rectangle.height);
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// C4
x = b.rectangle.x + b.rectangle.width;
y = b.rectangle.y + b.rectangle.height;
w = a.rectangle.x + a.rectangle.width - (b.rectangle.x + b.rectangle.width);
h = a.rectangle.y + a.rectangle.height - (b.rectangle.y + b.rectangle.height);
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
} else if (all.contains(UPPER_LEFT)){
// D1
x = a.rectangle.x;
y = a.rectangle.y;
w = b.rectangle.x - a.rectangle.x;
h = b.rectangle.y + b.rectangle.height - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// D2
x = b.rectangle.x;
y = a.rectangle.y;
w = a.rectangle.x + a.rectangle.width - b.rectangle.x;
h = b.rectangle.y + b.rectangle.height - a.rectangle.y;
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// D3
x = a.rectangle.x;
y = b.rectangle.y + b.rectangle.height;
w = b.rectangle.x - a.rectangle.x;
h = a.rectangle.y + a.rectangle.height - (b.rectangle.y + b.rectangle.height);
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
// D4
x = b.rectangle.x;
y = b.rectangle.y + b.rectangle.height;
w = a.rectangle.x + a.rectangle.width - b.rectangle.x;
h = a.rectangle.y + a.rectangle.height - (b.rectangle.y + b.rectangle.height);
c = new WeightedRectangle2D(new Rectangle2D.Double(x, y, w, h), 0.0);
c.weight = (c.surface() / a.surface()) * a.weight;
ret.addElement(c);
}else
Dataset.perror("Plotting.class :: no such 1 configuration");
}else if (all.size() == 0){
// no intersection
ret.addElement(a);
}else
Dataset.perror("Plotting.class :: no such all configuration");
return ret;
}
}
| google-research/google-research | generative_forests/src/Plotting.java |
475 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.ingest;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.script.ScriptService;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.function.BiConsumer;
import java.util.function.LongSupplier;
/**
* A pipeline is a list of {@link Processor} instances grouped under a unique id.
*/
public final class Pipeline {
public static final String DESCRIPTION_KEY = "description";
public static final String PROCESSORS_KEY = "processors";
public static final String VERSION_KEY = "version";
public static final String ON_FAILURE_KEY = "on_failure";
public static final String META_KEY = "_meta";
public static final String DEPRECATED_KEY = "deprecated";
private final String id;
@Nullable
private final String description;
@Nullable
private final Integer version;
@Nullable
private final Map<String, Object> metadata;
private final CompoundProcessor compoundProcessor;
private final IngestPipelineMetric metrics;
private final LongSupplier relativeTimeProvider;
@Nullable
private final Boolean deprecated;
public Pipeline(
String id,
@Nullable String description,
@Nullable Integer version,
@Nullable Map<String, Object> metadata,
CompoundProcessor compoundProcessor
) {
this(id, description, version, metadata, compoundProcessor, null);
}
public Pipeline(
String id,
@Nullable String description,
@Nullable Integer version,
@Nullable Map<String, Object> metadata,
CompoundProcessor compoundProcessor,
@Nullable Boolean deprecated
) {
this(id, description, version, metadata, compoundProcessor, System::nanoTime, deprecated);
}
// package private for testing
Pipeline(
String id,
@Nullable String description,
@Nullable Integer version,
@Nullable Map<String, Object> metadata,
CompoundProcessor compoundProcessor,
LongSupplier relativeTimeProvider,
@Nullable Boolean deprecated
) {
this.id = id;
this.description = description;
this.metadata = metadata;
this.compoundProcessor = compoundProcessor;
this.version = version;
this.metrics = new IngestPipelineMetric();
this.relativeTimeProvider = relativeTimeProvider;
this.deprecated = deprecated;
}
public static Pipeline create(
String id,
Map<String, Object> config,
Map<String, Processor.Factory> processorFactories,
ScriptService scriptService
) throws Exception {
String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY);
Integer version = ConfigurationUtils.readIntProperty(null, null, config, VERSION_KEY, null);
Map<String, Object> metadata = ConfigurationUtils.readOptionalMap(null, null, config, META_KEY);
Boolean deprecated = ConfigurationUtils.readOptionalBooleanProperty(null, null, config, DEPRECATED_KEY);
List<Map<String, Object>> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY);
List<Processor> processors = ConfigurationUtils.readProcessorConfigs(processorConfigs, scriptService, processorFactories);
List<Map<String, Object>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY);
List<Processor> onFailureProcessors = ConfigurationUtils.readProcessorConfigs(
onFailureProcessorConfigs,
scriptService,
processorFactories
);
if (config.isEmpty() == false) {
throw new ElasticsearchParseException(
"pipeline ["
+ id
+ "] doesn't support one or more provided configuration parameters "
+ Arrays.toString(config.keySet().toArray())
);
}
if (onFailureProcessorConfigs != null && onFailureProcessors.isEmpty()) {
throw new ElasticsearchParseException("pipeline [" + id + "] cannot have an empty on_failure option defined");
}
CompoundProcessor compoundProcessor = new CompoundProcessor(false, processors, onFailureProcessors);
return new Pipeline(id, description, version, metadata, compoundProcessor, deprecated);
}
/**
* Modifies the data of a document to be indexed based on the processor this pipeline holds
*
* If <code>null</code> is returned then this document will be dropped and not indexed, otherwise
* this document will be kept and indexed.
*/
public void execute(IngestDocument ingestDocument, BiConsumer<IngestDocument, Exception> handler) {
final long startTimeInNanos = relativeTimeProvider.getAsLong();
metrics.preIngest();
compoundProcessor.execute(ingestDocument, (result, e) -> {
long ingestTimeInNanos = relativeTimeProvider.getAsLong() - startTimeInNanos;
metrics.postIngest(ingestTimeInNanos);
if (e != null) {
metrics.ingestFailed();
}
handler.accept(result, e);
});
}
/**
* The unique id of this pipeline
*/
public String getId() {
return id;
}
/**
* An optional description of what this pipeline is doing to the data gets processed by this pipeline.
*/
@Nullable
public String getDescription() {
return description;
}
/**
* An optional version stored with the pipeline so that it can be used to determine if the pipeline should be updated / replaced.
*
* @return {@code null} if not supplied.
*/
@Nullable
public Integer getVersion() {
return version;
}
@Nullable
public Map<String, Object> getMetadata() {
return metadata;
}
/**
* Get the underlying {@link CompoundProcessor} containing the Pipeline's processors
*/
public CompoundProcessor getCompoundProcessor() {
return compoundProcessor;
}
/**
* Unmodifiable list containing each processor that operates on the data.
*/
public List<Processor> getProcessors() {
return compoundProcessor.getProcessors();
}
/**
* Unmodifiable list containing each on_failure processor that operates on the data in case of
* exception thrown in pipeline processors
*/
public List<Processor> getOnFailureProcessors() {
return compoundProcessor.getOnFailureProcessors();
}
/**
* Flattens the normal and on failure processors into a single list. The original order is lost.
* This can be useful for pipeline validation purposes.
*/
public List<Processor> flattenAllProcessors() {
return compoundProcessor.flattenProcessors();
}
/**
* The metrics associated with this pipeline.
*/
public IngestPipelineMetric getMetrics() {
return metrics;
}
public Boolean getDeprecated() {
return deprecated;
}
public boolean isDeprecated() {
return Boolean.TRUE.equals(deprecated);
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/ingest/Pipeline.java |
476 | /*
* Licensed to Elasticsearch B.V. under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch B.V. licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.HttpHost;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.TreeSet;
/**
* Metadata about an {@link HttpHost} running Elasticsearch.
*/
public class Node {
/**
* Address that this host claims is its primary contact point.
*/
private final HttpHost host;
/**
* Addresses on which the host is listening. These are useful to have
* around because they allow you to find a host based on any address it
* is listening on.
*/
private final Set<HttpHost> boundHosts;
/**
* Name of the node as configured by the {@code node.name} attribute.
*/
private final String name;
/**
* Version of Elasticsearch that the node is running or {@code null}
* if we don't know the version.
*/
private final String version;
/**
* Roles that the Elasticsearch process on the host has or {@code null}
* if we don't know what roles the node has.
*/
private final Roles roles;
/**
* Attributes declared on the node.
*/
private final Map<String, List<String>> attributes;
/**
* Create a {@linkplain Node} with metadata. All parameters except
* {@code host} are nullable and implementations of {@link NodeSelector}
* need to decide what to do in their absence.
*/
public Node(HttpHost host, Set<HttpHost> boundHosts, String name, String version, Roles roles, Map<String, List<String>> attributes) {
if (host == null) {
throw new IllegalArgumentException("host cannot be null");
}
this.host = host;
this.boundHosts = boundHosts;
this.name = name;
this.version = version;
this.roles = roles;
this.attributes = attributes;
}
/**
* Create a {@linkplain Node} without any metadata.
*/
public Node(HttpHost host) {
this(host, null, null, null, null, null);
}
/**
* Contact information for the host.
*/
public HttpHost getHost() {
return host;
}
/**
* Addresses on which the host is listening. These are useful to have
* around because they allow you to find a host based on any address it
* is listening on.
*/
public Set<HttpHost> getBoundHosts() {
return boundHosts;
}
/**
* The {@code node.name} of the node.
*/
public String getName() {
return name;
}
/**
* Version of Elasticsearch that the node is running or {@code null}
* if we don't know the version.
*/
public String getVersion() {
return version;
}
/**
* Roles that the Elasticsearch process on the host has or {@code null}
* if we don't know what roles the node has.
*/
public Roles getRoles() {
return roles;
}
/**
* Attributes declared on the node.
*/
public Map<String, List<String>> getAttributes() {
return attributes;
}
@Override
public String toString() {
StringBuilder b = new StringBuilder();
b.append("[host=").append(host);
if (boundHosts != null) {
b.append(", bound=").append(boundHosts);
}
if (name != null) {
b.append(", name=").append(name);
}
if (version != null) {
b.append(", version=").append(version);
}
if (roles != null) {
b.append(", roles=").append(roles);
}
if (attributes != null) {
b.append(", attributes=").append(attributes);
}
return b.append(']').toString();
}
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass()) {
return false;
}
Node other = (Node) obj;
return host.equals(other.host)
&& Objects.equals(boundHosts, other.boundHosts)
&& Objects.equals(name, other.name)
&& Objects.equals(version, other.version)
&& Objects.equals(roles, other.roles)
&& Objects.equals(attributes, other.attributes);
}
@Override
public int hashCode() {
return Objects.hash(host, boundHosts, name, version, roles, attributes);
}
/**
* Role information about an Elasticsearch process.
*/
public static final class Roles {
private final Set<String> roles;
public Roles(final Set<String> roles) {
this.roles = new TreeSet<>(roles);
}
/**
* Returns whether or not the node <strong>could</strong> be elected master.
*/
public boolean isMasterEligible() {
return roles.contains("master");
}
/**
* Returns whether or not the node stores data.
* @deprecated use {@link #hasDataRole()} or {@link #canContainData()}
*/
@Deprecated
public boolean isData() {
return roles.contains("data");
}
/**
* @return true if node has the "data" role
*/
public boolean hasDataRole() {
return roles.contains("data");
}
/**
* @return true if node has the "data_content" role
*/
public boolean hasDataContentRole() {
return roles.contains("data_content");
}
/**
* @return true if node has the "data_hot" role
*/
public boolean hasDataHotRole() {
return roles.contains("data_hot");
}
/**
* @return true if node has the "data_warm" role
*/
public boolean hasDataWarmRole() {
return roles.contains("data_warm");
}
/**
* @return true if node has the "data_cold" role
*/
public boolean hasDataColdRole() {
return roles.contains("data_cold");
}
/**
* @return true if node has the "data_frozen" role
*/
public boolean hasDataFrozenRole() {
return roles.contains("data_frozen");
}
/**
* @return true if node stores any type of data
*/
public boolean canContainData() {
return hasDataRole() || roles.stream().anyMatch(role -> role.startsWith("data_"));
}
/**
* Returns whether or not the node runs ingest pipelines.
*/
public boolean isIngest() {
return roles.contains("ingest");
}
@Override
public String toString() {
return String.join(",", roles);
}
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass()) {
return false;
}
Roles other = (Roles) obj;
return roles.equals(other.roles);
}
@Override
public int hashCode() {
return roles.hashCode();
}
}
}
| elastic/elasticsearch | client/rest/src/main/java/org/elasticsearch/client/Node.java |
477 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.hexagonal.banking;
import com.mongodb.MongoClient;
import com.mongodb.client.MongoCollection;
import com.mongodb.client.MongoDatabase;
import com.mongodb.client.model.UpdateOptions;
import java.util.ArrayList;
import lombok.Getter;
import org.bson.Document;
/**
* Mongo based banking adapter.
*/
public class MongoBank implements WireTransfers {
private static final String DEFAULT_DB = "lotteryDB";
private static final String DEFAULT_ACCOUNTS_COLLECTION = "accounts";
@Getter
private MongoClient mongoClient;
@Getter
private MongoDatabase database;
@Getter
private MongoCollection<Document> accountsCollection;
/**
* Constructor.
*/
public MongoBank() {
connect();
}
/**
* Constructor accepting parameters.
*/
public MongoBank(String dbName, String accountsCollectionName) {
connect(dbName, accountsCollectionName);
}
/**
* Connect to database with default parameters.
*/
public void connect() {
connect(DEFAULT_DB, DEFAULT_ACCOUNTS_COLLECTION);
}
/**
* Connect to database with given parameters.
*/
public void connect(String dbName, String accountsCollectionName) {
if (mongoClient != null) {
mongoClient.close();
}
mongoClient = new MongoClient(System.getProperty("mongo-host"),
Integer.parseInt(System.getProperty("mongo-port")));
database = mongoClient.getDatabase(dbName);
accountsCollection = database.getCollection(accountsCollectionName);
}
@Override
public void setFunds(String bankAccount, int amount) {
var search = new Document("_id", bankAccount);
var update = new Document("_id", bankAccount).append("funds", amount);
var updateOptions = new UpdateOptions().upsert(true);
accountsCollection.updateOne(search, new Document("$set", update), updateOptions);
}
@Override
public int getFunds(String bankAccount) {
return accountsCollection
.find(new Document("_id", bankAccount))
.limit(1)
.into(new ArrayList<>())
.stream()
.findFirst()
.map(x -> x.getInteger("funds"))
.orElse(0);
}
@Override
public boolean transferFunds(int amount, String sourceAccount, String destinationAccount) {
var sourceFunds = getFunds(sourceAccount);
if (sourceFunds < amount) {
return false;
} else {
var destFunds = getFunds(destinationAccount);
setFunds(sourceAccount, sourceFunds - amount);
setFunds(destinationAccount, destFunds + amount);
return true;
}
}
}
| iluwatar/java-design-patterns | hexagonal/src/main/java/com/iluwatar/hexagonal/banking/MongoBank.java |
478 | // ASM: a very small and fast Java bytecode manipulation framework
// Copyright (c) 2000-2011 INRIA, France Telecom
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. Neither the name of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
package org.springframework.asm;
/**
* An entry of the constant pool, of the BootstrapMethods attribute, or of the (ASM specific) type
* table of a class.
*
* @see <a href="https://docs.oracle.com/javase/specs/jvms/se9/html/jvms-4.html#jvms-4.4">JVMS
* 4.4</a>
* @see <a href="https://docs.oracle.com/javase/specs/jvms/se9/html/jvms-4.html#jvms-4.7.23">JVMS
* 4.7.23</a>
* @author Eric Bruneton
*/
abstract class Symbol {
// Tag values for the constant pool entries (using the same order as in the JVMS).
/** The tag value of CONSTANT_Class_info JVMS structures. */
static final int CONSTANT_CLASS_TAG = 7;
/** The tag value of CONSTANT_Fieldref_info JVMS structures. */
static final int CONSTANT_FIELDREF_TAG = 9;
/** The tag value of CONSTANT_Methodref_info JVMS structures. */
static final int CONSTANT_METHODREF_TAG = 10;
/** The tag value of CONSTANT_InterfaceMethodref_info JVMS structures. */
static final int CONSTANT_INTERFACE_METHODREF_TAG = 11;
/** The tag value of CONSTANT_String_info JVMS structures. */
static final int CONSTANT_STRING_TAG = 8;
/** The tag value of CONSTANT_Integer_info JVMS structures. */
static final int CONSTANT_INTEGER_TAG = 3;
/** The tag value of CONSTANT_Float_info JVMS structures. */
static final int CONSTANT_FLOAT_TAG = 4;
/** The tag value of CONSTANT_Long_info JVMS structures. */
static final int CONSTANT_LONG_TAG = 5;
/** The tag value of CONSTANT_Double_info JVMS structures. */
static final int CONSTANT_DOUBLE_TAG = 6;
/** The tag value of CONSTANT_NameAndType_info JVMS structures. */
static final int CONSTANT_NAME_AND_TYPE_TAG = 12;
/** The tag value of CONSTANT_Utf8_info JVMS structures. */
static final int CONSTANT_UTF8_TAG = 1;
/** The tag value of CONSTANT_MethodHandle_info JVMS structures. */
static final int CONSTANT_METHOD_HANDLE_TAG = 15;
/** The tag value of CONSTANT_MethodType_info JVMS structures. */
static final int CONSTANT_METHOD_TYPE_TAG = 16;
/** The tag value of CONSTANT_Dynamic_info JVMS structures. */
static final int CONSTANT_DYNAMIC_TAG = 17;
/** The tag value of CONSTANT_InvokeDynamic_info JVMS structures. */
static final int CONSTANT_INVOKE_DYNAMIC_TAG = 18;
/** The tag value of CONSTANT_Module_info JVMS structures. */
static final int CONSTANT_MODULE_TAG = 19;
/** The tag value of CONSTANT_Package_info JVMS structures. */
static final int CONSTANT_PACKAGE_TAG = 20;
// Tag values for the BootstrapMethods attribute entries (ASM specific tag).
/** The tag value of the BootstrapMethods attribute entries. */
static final int BOOTSTRAP_METHOD_TAG = 64;
// Tag values for the type table entries (ASM specific tags).
/** The tag value of a normal type entry in the (ASM specific) type table of a class. */
static final int TYPE_TAG = 128;
/**
* The tag value of an uninitialized type entry in the type table of a class. This type is used
* for the normal case where the NEW instruction is before the <init> constructor call (in
* bytecode offset order), i.e. when the label of the NEW instruction is resolved when the
* constructor call is visited. If the NEW instruction is after the constructor call, use the
* {@link #FORWARD_UNINITIALIZED_TYPE_TAG} tag value instead.
*/
static final int UNINITIALIZED_TYPE_TAG = 129;
/**
* The tag value of an uninitialized type entry in the type table of a class. This type is used
* for the unusual case where the NEW instruction is after the <init> constructor call (in
* bytecode offset order), i.e. when the label of the NEW instruction is not resolved when the
* constructor call is visited. If the NEW instruction is before the constructor call, use the
* {@link #UNINITIALIZED_TYPE_TAG} tag value instead.
*/
static final int FORWARD_UNINITIALIZED_TYPE_TAG = 130;
/** The tag value of a merged type entry in the (ASM specific) type table of a class. */
static final int MERGED_TYPE_TAG = 131;
// Instance fields.
/**
* The index of this symbol in the constant pool, in the BootstrapMethods attribute, or in the
* (ASM specific) type table of a class (depending on the {@link #tag} value).
*/
final int index;
/**
* A tag indicating the type of this symbol. Must be one of the static tag values defined in this
* class.
*/
final int tag;
/**
* The internal name of the owner class of this symbol. Only used for {@link
* #CONSTANT_FIELDREF_TAG}, {@link #CONSTANT_METHODREF_TAG}, {@link
* #CONSTANT_INTERFACE_METHODREF_TAG}, and {@link #CONSTANT_METHOD_HANDLE_TAG} symbols.
*/
final String owner;
/**
* The name of the class field or method corresponding to this symbol. Only used for {@link
* #CONSTANT_FIELDREF_TAG}, {@link #CONSTANT_METHODREF_TAG}, {@link
* #CONSTANT_INTERFACE_METHODREF_TAG}, {@link #CONSTANT_NAME_AND_TYPE_TAG}, {@link
* #CONSTANT_METHOD_HANDLE_TAG}, {@link #CONSTANT_DYNAMIC_TAG} and {@link
* #CONSTANT_INVOKE_DYNAMIC_TAG} symbols.
*/
final String name;
/**
* The string value of this symbol. This is:
*
* <ul>
* <li>a field or method descriptor for {@link #CONSTANT_FIELDREF_TAG}, {@link
* #CONSTANT_METHODREF_TAG}, {@link #CONSTANT_INTERFACE_METHODREF_TAG}, {@link
* #CONSTANT_NAME_AND_TYPE_TAG}, {@link #CONSTANT_METHOD_HANDLE_TAG}, {@link
* #CONSTANT_METHOD_TYPE_TAG}, {@link #CONSTANT_DYNAMIC_TAG} and {@link
* #CONSTANT_INVOKE_DYNAMIC_TAG} symbols,
* <li>an arbitrary string for {@link #CONSTANT_UTF8_TAG} and {@link #CONSTANT_STRING_TAG}
* symbols,
* <li>an internal class name for {@link #CONSTANT_CLASS_TAG}, {@link #TYPE_TAG}, {@link
* #UNINITIALIZED_TYPE_TAG} and {@link #FORWARD_UNINITIALIZED_TYPE_TAG} symbols,
* <li>{@literal null} for the other types of symbol.
* </ul>
*/
final String value;
/**
* The numeric value of this symbol. This is:
*
* <ul>
* <li>the symbol's value for {@link #CONSTANT_INTEGER_TAG},{@link #CONSTANT_FLOAT_TAG}, {@link
* #CONSTANT_LONG_TAG}, {@link #CONSTANT_DOUBLE_TAG},
* <li>the CONSTANT_MethodHandle_info reference_kind field value for {@link
* #CONSTANT_METHOD_HANDLE_TAG} symbols,
* <li>the CONSTANT_InvokeDynamic_info bootstrap_method_attr_index field value for {@link
* #CONSTANT_INVOKE_DYNAMIC_TAG} symbols,
* <li>the offset of a bootstrap method in the BootstrapMethods boostrap_methods array, for
* {@link #CONSTANT_DYNAMIC_TAG} or {@link #BOOTSTRAP_METHOD_TAG} symbols,
* <li>the bytecode offset of the NEW instruction that created an {@link
* Frame#ITEM_UNINITIALIZED} type for {@link #UNINITIALIZED_TYPE_TAG} symbols,
* <li>the index of the {@link Label} (in the {@link SymbolTable#labelTable} table) of the NEW
* instruction that created an {@link Frame#ITEM_UNINITIALIZED} type for {@link
* #FORWARD_UNINITIALIZED_TYPE_TAG} symbols,
* <li>the indices (in the class' type table) of two {@link #TYPE_TAG} source types for {@link
* #MERGED_TYPE_TAG} symbols,
* <li>0 for the other types of symbol.
* </ul>
*/
final long data;
/**
* Additional information about this symbol, generally computed lazily. <i>Warning: the value of
* this field is ignored when comparing Symbol instances</i> (to avoid duplicate entries in a
* SymbolTable). Therefore, this field should only contain data that can be computed from the
* other fields of this class. It contains:
*
* <ul>
* <li>the {@link Type#getArgumentsAndReturnSizes} of the symbol's method descriptor for {@link
* #CONSTANT_METHODREF_TAG}, {@link #CONSTANT_INTERFACE_METHODREF_TAG} and {@link
* #CONSTANT_INVOKE_DYNAMIC_TAG} symbols,
* <li>the index in the InnerClasses_attribute 'classes' array (plus one) corresponding to this
* class, for {@link #CONSTANT_CLASS_TAG} symbols,
* <li>the index (in the class' type table) of the merged type of the two source types for
* {@link #MERGED_TYPE_TAG} symbols,
* <li>0 for the other types of symbol, or if this field has not been computed yet.
* </ul>
*/
int info;
/**
* Constructs a new Symbol. This constructor can't be used directly because the Symbol class is
* abstract. Instead, use the factory methods of the {@link SymbolTable} class.
*
* @param index the symbol index in the constant pool, in the BootstrapMethods attribute, or in
* the (ASM specific) type table of a class (depending on 'tag').
* @param tag the symbol type. Must be one of the static tag values defined in this class.
* @param owner The internal name of the symbol's owner class. Maybe {@literal null}.
* @param name The name of the symbol's corresponding class field or method. Maybe {@literal
* null}.
* @param value The string value of this symbol. Maybe {@literal null}.
* @param data The numeric value of this symbol.
*/
Symbol(
final int index,
final int tag,
final String owner,
final String name,
final String value,
final long data) {
this.index = index;
this.tag = tag;
this.owner = owner;
this.name = name;
this.value = value;
this.data = data;
}
/**
* Returns the result {@link Type#getArgumentsAndReturnSizes} on {@link #value}.
*
* @return the result {@link Type#getArgumentsAndReturnSizes} on {@link #value} (memoized in
* {@link #info} for efficiency). This should only be used for {@link
* #CONSTANT_METHODREF_TAG}, {@link #CONSTANT_INTERFACE_METHODREF_TAG} and {@link
* #CONSTANT_INVOKE_DYNAMIC_TAG} symbols.
*/
int getArgumentsAndReturnSizes() {
if (info == 0) {
info = Type.getArgumentsAndReturnSizes(value);
}
return info;
}
}
| spring-projects/spring-framework | spring-core/src/main/java/org/springframework/asm/Symbol.java |
480 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.commander;
import com.iluwatar.commander.employeehandle.EmployeeDatabase;
import com.iluwatar.commander.employeehandle.EmployeeHandle;
import com.iluwatar.commander.exceptions.DatabaseUnavailableException;
import com.iluwatar.commander.messagingservice.MessagingDatabase;
import com.iluwatar.commander.messagingservice.MessagingService;
import com.iluwatar.commander.paymentservice.PaymentDatabase;
import com.iluwatar.commander.paymentservice.PaymentService;
import com.iluwatar.commander.queue.QueueDatabase;
import com.iluwatar.commander.shippingservice.ShippingDatabase;
import com.iluwatar.commander.shippingservice.ShippingService;
/**
* AppMessagingFailCases class looks at possible cases when Messaging service is
* available/unavailable.
*/
public class AppMessagingFailCases {
private static final RetryParams retryParams = RetryParams.DEFAULT;
private static final TimeLimits timeLimits = TimeLimits.DEFAULT;
void messagingDatabaseUnavailableCasePaymentSuccess() {
//rest is successful
var ps = new PaymentService(new PaymentDatabase());
var ss = new ShippingService(new ShippingDatabase());
var ms = new MessagingService(new MessagingDatabase(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException());
var eh = new EmployeeHandle(new EmployeeDatabase());
var qdb = new QueueDatabase();
var c = new Commander(eh, ps, ss, ms, qdb, retryParams, timeLimits);
var user = new User("Jim", "ABCD");
var order = new Order(user, "book", 10f);
c.placeOrder(order);
}
void messagingDatabaseUnavailableCasePaymentError() {
//rest is successful
var ps = new PaymentService(new PaymentDatabase(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException());
var ss = new ShippingService(new ShippingDatabase());
var ms = new MessagingService(new MessagingDatabase(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException());
var eh = new EmployeeHandle(new EmployeeDatabase());
var qdb = new QueueDatabase();
var c = new Commander(eh, ps, ss, ms, qdb, retryParams, timeLimits);
var user = new User("Jim", "ABCD");
var order = new Order(user, "book", 10f);
c.placeOrder(order);
}
void messagingDatabaseUnavailableCasePaymentFailure() {
//rest is successful
var ps = new PaymentService(new PaymentDatabase(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException());
var ss = new ShippingService(new ShippingDatabase());
var ms = new MessagingService(new MessagingDatabase(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException());
var eh = new EmployeeHandle(new EmployeeDatabase());
var qdb =
new QueueDatabase(new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException());
var c = new Commander(eh, ps, ss, ms, qdb, retryParams, timeLimits);
var user = new User("Jim", "ABCD");
var order = new Order(user, "book", 10f);
c.placeOrder(order);
}
void messagingSuccessCase() {
//done here
var ps = new PaymentService(new PaymentDatabase(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException(), new DatabaseUnavailableException(),
new DatabaseUnavailableException());
var ss = new ShippingService(new ShippingDatabase());
var ms = new MessagingService(new MessagingDatabase(), new DatabaseUnavailableException(),
new DatabaseUnavailableException());
var eh = new EmployeeHandle(new EmployeeDatabase());
var qdb = new QueueDatabase();
var c = new Commander(eh, ps, ss, ms, qdb, retryParams, timeLimits);
var user = new User("Jim", "ABCD");
var order = new Order(user, "book", 10f);
c.placeOrder(order);
}
/**
* Program entry point.
*
* @param args command line args
*/
public static void main(String[] args) {
var amfc = new AppMessagingFailCases();
amfc.messagingSuccessCase();
}
} | iluwatar/java-design-patterns | commander/src/main/java/com/iluwatar/commander/AppMessagingFailCases.java |
481 | /*
* Copyright 2002-2023 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.ui;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import org.springframework.core.Conventions;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
/**
* Implementation of {@link java.util.Map} for use when building model data for use
* with UI tools. Supports chained calls and generation of model attribute names.
*
* <p>This class serves as generic model holder for Servlet MVC but is not tied to it.
* Check out the {@link Model} interface for an interface variant.
*
* @author Rob Harrop
* @author Juergen Hoeller
* @since 2.0
* @see Conventions#getVariableName
* @see org.springframework.web.servlet.ModelAndView
*/
@SuppressWarnings("serial")
public class ModelMap extends LinkedHashMap<String, Object> {
/**
* Construct a new, empty {@code ModelMap}.
*/
public ModelMap() {
}
/**
* Construct a new {@code ModelMap} containing the supplied attribute
* under the supplied name.
* @see #addAttribute(String, Object)
*/
public ModelMap(String attributeName, @Nullable Object attributeValue) {
addAttribute(attributeName, attributeValue);
}
/**
* Construct a new {@code ModelMap} containing the supplied attribute.
* Uses attribute name generation to generate the key for the supplied model
* object.
* @see #addAttribute(Object)
*/
public ModelMap(Object attributeValue) {
addAttribute(attributeValue);
}
/**
* Add the supplied attribute under the supplied name.
* @param attributeName the name of the model attribute (never {@code null})
* @param attributeValue the model attribute value (can be {@code null})
*/
public ModelMap addAttribute(String attributeName, @Nullable Object attributeValue) {
Assert.notNull(attributeName, "Model attribute name must not be null");
put(attributeName, attributeValue);
return this;
}
/**
* Add the supplied attribute to this {@code Map} using a
* {@link org.springframework.core.Conventions#getVariableName generated name}.
* <p><i>Note: Empty {@link Collection Collections} are not added to
* the model when using this method because we cannot correctly determine
* the true convention name. View code should check for {@code null} rather
* than for empty collections as is already done by JSTL tags.</i>
* @param attributeValue the model attribute value (never {@code null})
*/
public ModelMap addAttribute(Object attributeValue) {
Assert.notNull(attributeValue, "Model object must not be null");
if (attributeValue instanceof Collection<?> collection && collection.isEmpty()) {
return this;
}
return addAttribute(Conventions.getVariableName(attributeValue), attributeValue);
}
/**
* Copy all attributes in the supplied {@code Collection} into this
* {@code Map}, using attribute name generation for each element.
* @see #addAttribute(Object)
*/
public ModelMap addAllAttributes(@Nullable Collection<?> attributeValues) {
if (attributeValues != null) {
for (Object attributeValue : attributeValues) {
addAttribute(attributeValue);
}
}
return this;
}
/**
* Copy all attributes in the supplied {@code Map} into this {@code Map}.
* @see #addAttribute(String, Object)
*/
public ModelMap addAllAttributes(@Nullable Map<String, ?> attributes) {
if (attributes != null) {
putAll(attributes);
}
return this;
}
/**
* Copy all attributes in the supplied {@code Map} into this {@code Map},
* with existing objects of the same name taking precedence (i.e. not getting
* replaced).
*/
public ModelMap mergeAttributes(@Nullable Map<String, ?> attributes) {
if (attributes != null) {
attributes.forEach((key, value) -> {
if (!containsKey(key)) {
put(key, value);
}
});
}
return this;
}
/**
* Does this model contain an attribute of the given name?
* @param attributeName the name of the model attribute (never {@code null})
* @return whether this model contains a corresponding attribute
*/
public boolean containsAttribute(String attributeName) {
return containsKey(attributeName);
}
/**
* Return the attribute value for the given name, if any.
* @param attributeName the name of the model attribute (never {@code null})
* @return the corresponding attribute value, or {@code null} if none
* @since 5.2
*/
@Nullable
public Object getAttribute(String attributeName) {
return get(attributeName);
}
}
| spring-projects/spring-framework | spring-context/src/main/java/org/springframework/ui/ModelMap.java |
483 | /*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.collect.CollectPreconditions.checkRemove;
import static com.google.common.collect.CompactHashing.UNSET;
import static com.google.common.collect.Hashing.smearedHash;
import static java.util.Objects.requireNonNull;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.annotations.J2ktIncompatible;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Ints;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import java.io.IOException;
import java.io.InvalidObjectException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.AbstractSet;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.ConcurrentModificationException;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.function.Consumer;
import javax.annotation.CheckForNull;
import org.checkerframework.checker.nullness.qual.Nullable;
/**
* CompactHashSet is an implementation of a Set. All optional operations (adding and removing) are
* supported. The elements can be any objects.
*
* <p>{@code contains(x)}, {@code add(x)} and {@code remove(x)}, are all (expected and amortized)
* constant time operations. Expected in the hashtable sense (depends on the hash function doing a
* good job of distributing the elements to the buckets to a distribution not far from uniform), and
* amortized since some operations can trigger a hash table resize.
*
* <p>Unlike {@code java.util.HashSet}, iteration is only proportional to the actual {@code size()},
* which is optimal, and <i>not</i> the size of the internal hashtable, which could be much larger
* than {@code size()}. Furthermore, this structure only depends on a fixed number of arrays; {@code
* add(x)} operations <i>do not</i> create objects for the garbage collector to deal with, and for
* every element added, the garbage collector will have to traverse {@code 1.5} references on
* average, in the marking phase, not {@code 5.0} as in {@code java.util.HashSet}.
*
* <p>If there are no removals, then {@link #iterator iteration} order is the same as insertion
* order. Any removal invalidates any ordering guarantees.
*
* <p>This class should not be assumed to be universally superior to {@code java.util.HashSet}.
* Generally speaking, this class reduces object allocation and memory consumption at the price of
* moderately increased constant factors of CPU. Only use this class when there is a specific reason
* to prioritize memory over CPU.
*
* @author Dimitris Andreou
* @author Jon Noack
*/
@GwtIncompatible // not worth using in GWT for now
@ElementTypesAreNonnullByDefault
class CompactHashSet<E extends @Nullable Object> extends AbstractSet<E> implements Serializable {
// TODO(user): cache all field accesses in local vars
/** Creates an empty {@code CompactHashSet} instance. */
public static <E extends @Nullable Object> CompactHashSet<E> create() {
return new CompactHashSet<>();
}
/**
* Creates a <i>mutable</i> {@code CompactHashSet} instance containing the elements of the given
* collection in unspecified order.
*
* @param collection the elements that the set should contain
* @return a new {@code CompactHashSet} containing those elements (minus duplicates)
*/
public static <E extends @Nullable Object> CompactHashSet<E> create(
Collection<? extends E> collection) {
CompactHashSet<E> set = createWithExpectedSize(collection.size());
set.addAll(collection);
return set;
}
/**
* Creates a <i>mutable</i> {@code CompactHashSet} instance containing the given elements in
* unspecified order.
*
* @param elements the elements that the set should contain
* @return a new {@code CompactHashSet} containing those elements (minus duplicates)
*/
@SafeVarargs
@SuppressWarnings("nullness") // TODO: b/316358623 - Remove after checker fix.
public static <E extends @Nullable Object> CompactHashSet<E> create(E... elements) {
CompactHashSet<E> set = createWithExpectedSize(elements.length);
Collections.addAll(set, elements);
return set;
}
/**
* Creates a {@code CompactHashSet} instance, with a high enough "initial capacity" that it
* <i>should</i> hold {@code expectedSize} elements without growth.
*
* @param expectedSize the number of elements you expect to add to the returned set
* @return a new, empty {@code CompactHashSet} with enough capacity to hold {@code expectedSize}
* elements without resizing
* @throws IllegalArgumentException if {@code expectedSize} is negative
*/
public static <E extends @Nullable Object> CompactHashSet<E> createWithExpectedSize(
int expectedSize) {
return new CompactHashSet<>(expectedSize);
}
/**
* Maximum allowed false positive probability of detecting a hash flooding attack given random
* input.
*/
@VisibleForTesting(
)
static final double HASH_FLOODING_FPP = 0.001;
/**
* Maximum allowed length of a hash table bucket before falling back to a j.u.LinkedHashSet based
* implementation. Experimentally determined.
*/
private static final int MAX_HASH_BUCKET_LENGTH = 9;
// See CompactHashMap for a detailed description of how the following fields work. That
// description talks about `keys`, `values`, and `entries`; here the `keys` and `values` arrays
// are replaced by a single `elements` array but everything else works similarly.
/**
* The hashtable object. This can be either:
*
* <ul>
* <li>a byte[], short[], or int[], with size a power of two, created by
* CompactHashing.createTable, whose values are either
* <ul>
* <li>UNSET, meaning "null pointer"
* <li>one plus an index into the entries and elements array
* </ul>
* <li>another java.util.Set delegate implementation. In most modern JDKs, normal java.util hash
* collections intelligently fall back to a binary search tree if hash table collisions are
* detected. Rather than going to all the trouble of reimplementing this ourselves, we
* simply switch over to use the JDK implementation wholesale if probable hash flooding is
* detected, sacrificing the compactness guarantee in very rare cases in exchange for much
* more reliable worst-case behavior.
* <li>null, if no entries have yet been added to the map
* </ul>
*/
@CheckForNull private transient Object table;
/**
* Contains the logical entries, in the range of [0, size()). The high bits of each int are the
* part of the smeared hash of the element not covered by the hashtable mask, whereas the low bits
* are the "next" pointer (pointing to the next entry in the bucket chain), which will always be
* less than or equal to the hashtable mask.
*
* <pre>
* hash = aaaaaaaa
* mask = 00000fff
* next = 00000bbb
* entry = aaaaabbb
* </pre>
*
* <p>The pointers in [size(), entries.length) are all "null" (UNSET).
*/
@CheckForNull private transient int[] entries;
/**
* The elements contained in the set, in the range of [0, size()). The elements in [size(),
* elements.length) are all {@code null}.
*/
@VisibleForTesting @CheckForNull transient @Nullable Object[] elements;
/**
* Keeps track of metadata like the number of hash table bits and modifications of this data
* structure (to make it possible to throw ConcurrentModificationException in the iterator). Note
* that we choose not to make this volatile, so we do less of a "best effort" to track such
* errors, for better performance.
*/
private transient int metadata;
/** The number of elements contained in the set. */
private transient int size;
/** Constructs a new empty instance of {@code CompactHashSet}. */
CompactHashSet() {
init(CompactHashing.DEFAULT_SIZE);
}
/**
* Constructs a new instance of {@code CompactHashSet} with the specified capacity.
*
* @param expectedSize the initial capacity of this {@code CompactHashSet}.
*/
CompactHashSet(int expectedSize) {
init(expectedSize);
}
/** Pseudoconstructor for serialization support. */
void init(int expectedSize) {
Preconditions.checkArgument(expectedSize >= 0, "Expected size must be >= 0");
// Save expectedSize for use in allocArrays()
this.metadata = Ints.constrainToRange(expectedSize, 1, CompactHashing.MAX_SIZE);
}
/** Returns whether arrays need to be allocated. */
@VisibleForTesting
boolean needsAllocArrays() {
return table == null;
}
/** Handle lazy allocation of arrays. */
@CanIgnoreReturnValue
int allocArrays() {
Preconditions.checkState(needsAllocArrays(), "Arrays already allocated");
int expectedSize = metadata;
int buckets = CompactHashing.tableSize(expectedSize);
this.table = CompactHashing.createTable(buckets);
setHashTableMask(buckets - 1);
this.entries = new int[expectedSize];
this.elements = new Object[expectedSize];
return expectedSize;
}
@SuppressWarnings("unchecked")
@VisibleForTesting
@CheckForNull
Set<E> delegateOrNull() {
if (table instanceof Set) {
return (Set<E>) table;
}
return null;
}
private Set<E> createHashFloodingResistantDelegate(int tableSize) {
return new LinkedHashSet<>(tableSize, 1.0f);
}
@VisibleForTesting
@CanIgnoreReturnValue
Set<E> convertToHashFloodingResistantImplementation() {
Set<E> newDelegate = createHashFloodingResistantDelegate(hashTableMask() + 1);
for (int i = firstEntryIndex(); i >= 0; i = getSuccessor(i)) {
newDelegate.add(element(i));
}
this.table = newDelegate;
this.entries = null;
this.elements = null;
incrementModCount();
return newDelegate;
}
@VisibleForTesting
boolean isUsingHashFloodingResistance() {
return delegateOrNull() != null;
}
/** Stores the hash table mask as the number of bits needed to represent an index. */
private void setHashTableMask(int mask) {
int hashTableBits = Integer.SIZE - Integer.numberOfLeadingZeros(mask);
metadata =
CompactHashing.maskCombine(metadata, hashTableBits, CompactHashing.HASH_TABLE_BITS_MASK);
}
/** Gets the hash table mask using the stored number of hash table bits. */
private int hashTableMask() {
return (1 << (metadata & CompactHashing.HASH_TABLE_BITS_MASK)) - 1;
}
void incrementModCount() {
metadata += CompactHashing.MODIFICATION_COUNT_INCREMENT;
}
@CanIgnoreReturnValue
@Override
public boolean add(@ParametricNullness E object) {
if (needsAllocArrays()) {
allocArrays();
}
Set<E> delegate = delegateOrNull();
if (delegate != null) {
return delegate.add(object);
}
int[] entries = requireEntries();
@Nullable Object[] elements = requireElements();
int newEntryIndex = this.size; // current size, and pointer to the entry to be appended
int newSize = newEntryIndex + 1;
int hash = smearedHash(object);
int mask = hashTableMask();
int tableIndex = hash & mask;
int next = CompactHashing.tableGet(requireTable(), tableIndex);
if (next == UNSET) { // uninitialized bucket
if (newSize > mask) {
// Resize and add new entry
mask = resizeTable(mask, CompactHashing.newCapacity(mask), hash, newEntryIndex);
} else {
CompactHashing.tableSet(requireTable(), tableIndex, newEntryIndex + 1);
}
} else {
int entryIndex;
int entry;
int hashPrefix = CompactHashing.getHashPrefix(hash, mask);
int bucketLength = 0;
do {
entryIndex = next - 1;
entry = entries[entryIndex];
if (CompactHashing.getHashPrefix(entry, mask) == hashPrefix
&& Objects.equal(object, elements[entryIndex])) {
return false;
}
next = CompactHashing.getNext(entry, mask);
bucketLength++;
} while (next != UNSET);
if (bucketLength >= MAX_HASH_BUCKET_LENGTH) {
return convertToHashFloodingResistantImplementation().add(object);
}
if (newSize > mask) {
// Resize and add new entry
mask = resizeTable(mask, CompactHashing.newCapacity(mask), hash, newEntryIndex);
} else {
entries[entryIndex] = CompactHashing.maskCombine(entry, newEntryIndex + 1, mask);
}
}
resizeMeMaybe(newSize);
insertEntry(newEntryIndex, object, hash, mask);
this.size = newSize;
incrementModCount();
return true;
}
/**
* Creates a fresh entry with the specified object at the specified position in the entry arrays.
*/
void insertEntry(int entryIndex, @ParametricNullness E object, int hash, int mask) {
setEntry(entryIndex, CompactHashing.maskCombine(hash, UNSET, mask));
setElement(entryIndex, object);
}
/** Resizes the entries storage if necessary. */
private void resizeMeMaybe(int newSize) {
int entriesSize = requireEntries().length;
if (newSize > entriesSize) {
// 1.5x but round up to nearest odd (this is optimal for memory consumption on Android)
int newCapacity =
Math.min(CompactHashing.MAX_SIZE, (entriesSize + Math.max(1, entriesSize >>> 1)) | 1);
if (newCapacity != entriesSize) {
resizeEntries(newCapacity);
}
}
}
/**
* Resizes the internal entries array to the specified capacity, which may be greater or less than
* the current capacity.
*/
void resizeEntries(int newCapacity) {
this.entries = Arrays.copyOf(requireEntries(), newCapacity);
this.elements = Arrays.copyOf(requireElements(), newCapacity);
}
@CanIgnoreReturnValue
private int resizeTable(int oldMask, int newCapacity, int targetHash, int targetEntryIndex) {
Object newTable = CompactHashing.createTable(newCapacity);
int newMask = newCapacity - 1;
if (targetEntryIndex != UNSET) {
// Add target first; it must be last in the chain because its entry hasn't yet been created
CompactHashing.tableSet(newTable, targetHash & newMask, targetEntryIndex + 1);
}
Object oldTable = requireTable();
int[] entries = requireEntries();
// Loop over current hashtable
for (int oldTableIndex = 0; oldTableIndex <= oldMask; oldTableIndex++) {
int oldNext = CompactHashing.tableGet(oldTable, oldTableIndex);
while (oldNext != UNSET) {
int entryIndex = oldNext - 1;
int oldEntry = entries[entryIndex];
// Rebuild hash using entry hashPrefix and tableIndex ("hashSuffix")
int hash = CompactHashing.getHashPrefix(oldEntry, oldMask) | oldTableIndex;
int newTableIndex = hash & newMask;
int newNext = CompactHashing.tableGet(newTable, newTableIndex);
CompactHashing.tableSet(newTable, newTableIndex, oldNext);
entries[entryIndex] = CompactHashing.maskCombine(hash, newNext, newMask);
oldNext = CompactHashing.getNext(oldEntry, oldMask);
}
}
this.table = newTable;
setHashTableMask(newMask);
return newMask;
}
@Override
public boolean contains(@CheckForNull Object object) {
if (needsAllocArrays()) {
return false;
}
Set<E> delegate = delegateOrNull();
if (delegate != null) {
return delegate.contains(object);
}
int hash = smearedHash(object);
int mask = hashTableMask();
int next = CompactHashing.tableGet(requireTable(), hash & mask);
if (next == UNSET) {
return false;
}
int hashPrefix = CompactHashing.getHashPrefix(hash, mask);
do {
int entryIndex = next - 1;
int entry = entry(entryIndex);
if (CompactHashing.getHashPrefix(entry, mask) == hashPrefix
&& Objects.equal(object, element(entryIndex))) {
return true;
}
next = CompactHashing.getNext(entry, mask);
} while (next != UNSET);
return false;
}
@CanIgnoreReturnValue
@Override
public boolean remove(@CheckForNull Object object) {
if (needsAllocArrays()) {
return false;
}
Set<E> delegate = delegateOrNull();
if (delegate != null) {
return delegate.remove(object);
}
int mask = hashTableMask();
int index =
CompactHashing.remove(
object,
/* value= */ null,
mask,
requireTable(),
requireEntries(),
requireElements(),
/* values= */ null);
if (index == -1) {
return false;
}
moveLastEntry(index, mask);
size--;
incrementModCount();
return true;
}
/**
* Moves the last entry in the entry array into {@code dstIndex}, and nulls out its old position.
*/
void moveLastEntry(int dstIndex, int mask) {
Object table = requireTable();
int[] entries = requireEntries();
@Nullable Object[] elements = requireElements();
int srcIndex = size() - 1;
if (dstIndex < srcIndex) {
// move last entry to deleted spot
Object object = elements[srcIndex];
elements[dstIndex] = object;
elements[srcIndex] = null;
// move the last entry to the removed spot, just like we moved the element
entries[dstIndex] = entries[srcIndex];
entries[srcIndex] = 0;
// also need to update whoever's "next" pointer was pointing to the last entry place
int tableIndex = smearedHash(object) & mask;
int next = CompactHashing.tableGet(table, tableIndex);
int srcNext = srcIndex + 1;
if (next == srcNext) {
// we need to update the root pointer
CompactHashing.tableSet(table, tableIndex, dstIndex + 1);
} else {
// we need to update a pointer in an entry
int entryIndex;
int entry;
do {
entryIndex = next - 1;
entry = entries[entryIndex];
next = CompactHashing.getNext(entry, mask);
} while (next != srcNext);
// here, entries[entryIndex] points to the old entry location; update it
entries[entryIndex] = CompactHashing.maskCombine(entry, dstIndex + 1, mask);
}
} else {
elements[dstIndex] = null;
entries[dstIndex] = 0;
}
}
int firstEntryIndex() {
return isEmpty() ? -1 : 0;
}
int getSuccessor(int entryIndex) {
return (entryIndex + 1 < size) ? entryIndex + 1 : -1;
}
/**
* Updates the index an iterator is pointing to after a call to remove: returns the index of the
* entry that should be looked at after a removal on indexRemoved, with indexBeforeRemove as the
* index that *was* the next entry that would be looked at.
*/
int adjustAfterRemove(int indexBeforeRemove, @SuppressWarnings("unused") int indexRemoved) {
return indexBeforeRemove - 1;
}
@Override
public Iterator<E> iterator() {
Set<E> delegate = delegateOrNull();
if (delegate != null) {
return delegate.iterator();
}
return new Iterator<E>() {
int expectedMetadata = metadata;
int currentIndex = firstEntryIndex();
int indexToRemove = -1;
@Override
public boolean hasNext() {
return currentIndex >= 0;
}
@Override
@ParametricNullness
public E next() {
checkForConcurrentModification();
if (!hasNext()) {
throw new NoSuchElementException();
}
indexToRemove = currentIndex;
E result = element(currentIndex);
currentIndex = getSuccessor(currentIndex);
return result;
}
@Override
public void remove() {
checkForConcurrentModification();
checkRemove(indexToRemove >= 0);
incrementExpectedModCount();
CompactHashSet.this.remove(element(indexToRemove));
currentIndex = adjustAfterRemove(currentIndex, indexToRemove);
indexToRemove = -1;
}
void incrementExpectedModCount() {
expectedMetadata += CompactHashing.MODIFICATION_COUNT_INCREMENT;
}
private void checkForConcurrentModification() {
if (metadata != expectedMetadata) {
throw new ConcurrentModificationException();
}
}
};
}
@Override
public Spliterator<E> spliterator() {
if (needsAllocArrays()) {
return Spliterators.spliterator(new Object[0], Spliterator.DISTINCT | Spliterator.ORDERED);
}
Set<E> delegate = delegateOrNull();
return (delegate != null)
? delegate.spliterator()
: Spliterators.spliterator(
requireElements(), 0, size, Spliterator.DISTINCT | Spliterator.ORDERED);
}
@Override
public void forEach(Consumer<? super E> action) {
checkNotNull(action);
Set<E> delegate = delegateOrNull();
if (delegate != null) {
delegate.forEach(action);
} else {
for (int i = firstEntryIndex(); i >= 0; i = getSuccessor(i)) {
action.accept(element(i));
}
}
}
@Override
public int size() {
Set<E> delegate = delegateOrNull();
return (delegate != null) ? delegate.size() : size;
}
@Override
public boolean isEmpty() {
return size() == 0;
}
@Override
public @Nullable Object[] toArray() {
if (needsAllocArrays()) {
return new Object[0];
}
Set<E> delegate = delegateOrNull();
return (delegate != null) ? delegate.toArray() : Arrays.copyOf(requireElements(), size);
}
@CanIgnoreReturnValue
@Override
@SuppressWarnings("nullness") // b/192354773 in our checker affects toArray declarations
public <T extends @Nullable Object> T[] toArray(T[] a) {
if (needsAllocArrays()) {
if (a.length > 0) {
a[0] = null;
}
return a;
}
Set<E> delegate = delegateOrNull();
return (delegate != null)
? delegate.toArray(a)
: ObjectArrays.toArrayImpl(requireElements(), 0, size, a);
}
/**
* Ensures that this {@code CompactHashSet} has the smallest representation in memory, given its
* current size.
*/
public void trimToSize() {
if (needsAllocArrays()) {
return;
}
Set<E> delegate = delegateOrNull();
if (delegate != null) {
Set<E> newDelegate = createHashFloodingResistantDelegate(size());
newDelegate.addAll(delegate);
this.table = newDelegate;
return;
}
int size = this.size;
if (size < requireEntries().length) {
resizeEntries(size);
}
int minimumTableSize = CompactHashing.tableSize(size);
int mask = hashTableMask();
if (minimumTableSize < mask) { // smaller table size will always be less than current mask
resizeTable(mask, minimumTableSize, UNSET, UNSET);
}
}
@Override
public void clear() {
if (needsAllocArrays()) {
return;
}
incrementModCount();
Set<E> delegate = delegateOrNull();
if (delegate != null) {
metadata =
Ints.constrainToRange(size(), CompactHashing.DEFAULT_SIZE, CompactHashing.MAX_SIZE);
delegate.clear(); // invalidate any iterators left over!
table = null;
size = 0;
} else {
Arrays.fill(requireElements(), 0, size, null);
CompactHashing.tableClear(requireTable());
Arrays.fill(requireEntries(), 0, size, 0);
this.size = 0;
}
}
@J2ktIncompatible
private void writeObject(ObjectOutputStream stream) throws IOException {
stream.defaultWriteObject();
stream.writeInt(size());
for (E e : this) {
stream.writeObject(e);
}
}
@SuppressWarnings("unchecked")
@J2ktIncompatible
private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException {
stream.defaultReadObject();
int elementCount = stream.readInt();
if (elementCount < 0) {
throw new InvalidObjectException("Invalid size: " + elementCount);
}
init(elementCount);
for (int i = 0; i < elementCount; i++) {
E element = (E) stream.readObject();
add(element);
}
}
/*
* For discussion of the safety of the following methods, see the comments near the end of
* CompactHashMap.
*/
private Object requireTable() {
return requireNonNull(table);
}
private int[] requireEntries() {
return requireNonNull(entries);
}
private @Nullable Object[] requireElements() {
return requireNonNull(elements);
}
@SuppressWarnings("unchecked")
private E element(int i) {
return (E) requireElements()[i];
}
private int entry(int i) {
return requireEntries()[i];
}
private void setElement(int i, E value) {
requireElements()[i] = value;
}
private void setEntry(int i, int value) {
requireEntries()[i] = value;
}
}
| google/guava | guava/src/com/google/common/collect/CompactHashSet.java |
484 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.retry;
import java.io.Serial;
/**
* Catastrophic error indicating that we have lost connection to our database.
*
* @author George Aristy ([email protected])
*/
public final class DatabaseNotAvailableException extends BusinessException {
@Serial
private static final long serialVersionUID = -3750769625095997799L;
/**
* Ctor.
*
* @param message the error message
*/
public DatabaseNotAvailableException(String message) {
super(message);
}
}
| rajprins/java-design-patterns | retry/src/main/java/com/iluwatar/retry/DatabaseNotAvailableException.java |
485 | // There is a bag-like data structure, supporting two operations:
// 1 x Throw an element x into the bag.
// 2 Take out an element from the bag.
// Given a sequence of operations with return values, you’re going to guess the data structure. It is
// a stack (Last-In, First-Out), a queue (First-In, First-Out), a priority-queue (Always take out larger
// elements first) or something else that you can hardly imagine!
// Input:
// There are several test cases. Each test case begins with a line containing a single integer n (1 ≤ n ≤
// 1000). Each of the next n lines is either a type-1 command, or an integer 2 followed by an integer x.
// That means after executing a type-2 command, we get an element x without error. The value of x
// is always a positive integer not larger than 100. The input is terminated by end-of-file (EOF).
// Output:
// For each test case, output one of the following:
// stack It’s definitely a stack.
// queue It’s definitely a queue.
// priority queue It’s definitely a priority queue.
// impossible It can’t be a stack, a queue or a priority queue.
// not sure It can be more than one of the three data structures mentioned
// above.
// Sample Input
// 6
// 1 1
// 1 2
// 1 3
// 2 1
// 2 2
// 2 3
// 6
// 1 1
// 1 2
// 1 3
// 2 3
// 2 2
// 2 1
// 2
// 1 1
// 2 2
// 4
// 1 2
// 1 1
// 2 1
// 2 2
// 7
// 1 2
// 1 5
// 1 1
// 1 3
// 2 5
// 1 4
// 2 4
// Sample Output
// queue
// not sure
// impossible
// stack
// priority queue
/**
* Created by kdn251 on 2/10/17.
*/
import java.util.*;
import java.io.*;
public class ICanGuessTheDataStructure {
public static void main(String args[]) throws Exception {
//initialize data structures
Stack<Integer> stack = new Stack<Integer>();
Queue<Integer> queue = new LinkedList<Integer>();
//initialize max priority queue
PriorityQueue<Integer> priorityQueue = new PriorityQueue<Integer>(Collections.reverseOrder());
//initialize buffered reader
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
String line;
//iterate through all test cases
while ((line = br.readLine()) != null) {
//initialize removals for each data structure
int stackRemovals = 0;
int queueRemovals = 0;
int priorityQueueRemovals = 0;
int totalRemovals = 0;
//get number of test cases
int numberOfCases = Integer.parseInt(line);
//clear contents of data structures
queue.clear();
priorityQueue.clear();
stack.clear();
//iterate over all test cases
for (int i = 0; i < numberOfCases; i++) {
String[] currentLineSplit = br.readLine().split(" ");
int command = Integer.parseInt(currentLineSplit[0]);
int number = Integer.parseInt(currentLineSplit[1]);
//if command is 1, push number into all data structures
if (command == 1) {
stack.push(number);
queue.add(number);
priorityQueue.add(number);
} else {
//check which data structure to remove from and increment its removal count
if (!stack.isEmpty() && stack.peek() == number && stackRemovals == totalRemovals) {
stackRemovals++;
stack.pop();
}
if (!queue.isEmpty() && queue.peek() == number && queueRemovals == totalRemovals) {
queueRemovals++;
queue.remove();
}
if (!priorityQueue.isEmpty() && priorityQueue.peek() == number && priorityQueueRemovals == totalRemovals) {
priorityQueueRemovals++;
priorityQueue.remove();
}
totalRemovals++;
}
}
//check all removal counts for each data structure vs. total removal count and print the appropriate data structure
if ((stackRemovals == totalRemovals && queueRemovals == totalRemovals) || (stackRemovals == totalRemovals && stackRemovals == priorityQueueRemovals) || (queueRemovals == totalRemovals && priorityQueueRemovals == totalRemovals)) {
System.out.println("not sure");
} else if (stackRemovals == totalRemovals) {
System.out.println("stack");
} else if (queueRemovals == totalRemovals) {
System.out.println("queue");
} else if (priorityQueueRemovals == totalRemovals) {
System.out.println("priority queue");
} else {
System.out.println("impossible");
}
}
}
}
| kdn251/interviews | uva/ICanGuessTheDataStructure.java |
486 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.script;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.TransportVersions;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.DiffableUtils;
import org.elasticsearch.cluster.NamedDiff;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.collect.Iterators;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.xcontent.ToXContent;
import org.elasticsearch.xcontent.XContentParser;
import org.elasticsearch.xcontent.XContentParser.Token;
import java.io.IOException;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
/**
* {@link ScriptMetadata} is used to store user-defined scripts
* as part of the {@link ClusterState} using only an id as the key.
*/
public final class ScriptMetadata implements Metadata.Custom, Writeable {
/**
* Standard logger used to warn about dropped scripts.
*/
private static final Logger logger = LogManager.getLogger(ScriptMetadata.class);
/**
* A builder used to modify the currently stored scripts data held within
* the {@link ClusterState}. Scripts can be added or deleted, then built
* to generate a new {@link Map} of scripts that will be used to update
* the current {@link ClusterState}.
*/
public static final class Builder {
private final Map<String, StoredScriptSource> scripts;
/**
* @param previous The current {@link ScriptMetadata} or {@code null} if there
* is no existing {@link ScriptMetadata}.
*/
public Builder(ScriptMetadata previous) {
this.scripts = previous == null ? new HashMap<>() : new HashMap<>(previous.scripts);
}
/**
* Add a new script to the existing stored scripts based on a user-specified id. If
* a script with the same id already exists it will be overwritten.
* @param id The user-specified id to use for the look up.
* @param source The user-specified stored script data held in {@link StoredScriptSource}.
*/
public Builder storeScript(String id, StoredScriptSource source) {
scripts.put(id, source);
return this;
}
/**
* Delete a script from the existing stored scripts based on a user-specified id.
* @param id The user-specified id to use for the look up.
*/
public Builder deleteScript(String id) {
StoredScriptSource deleted = scripts.remove(id);
if (deleted == null) {
throw new ResourceNotFoundException("stored script [" + id + "] does not exist and cannot be deleted");
}
return this;
}
/**
* @return A {@link ScriptMetadata} with the updated {@link Map} of scripts.
*/
public ScriptMetadata build() {
return new ScriptMetadata(scripts);
}
}
static final class ScriptMetadataDiff implements NamedDiff<Metadata.Custom> {
final Diff<Map<String, StoredScriptSource>> pipelines;
ScriptMetadataDiff(ScriptMetadata before, ScriptMetadata after) {
this.pipelines = DiffableUtils.diff(before.scripts, after.scripts, DiffableUtils.getStringKeySerializer());
}
ScriptMetadataDiff(StreamInput in) throws IOException {
pipelines = DiffableUtils.readJdkMapDiff(
in,
DiffableUtils.getStringKeySerializer(),
StoredScriptSource::new,
StoredScriptSource::readDiffFrom
);
}
@Override
public String getWriteableName() {
return TYPE;
}
@Override
public Metadata.Custom apply(Metadata.Custom part) {
return new ScriptMetadata(pipelines.apply(((ScriptMetadata) part).scripts));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
pipelines.writeTo(out);
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.MINIMUM_COMPATIBLE;
}
}
/**
* Convenience method to build and return a new
* {@link ScriptMetadata} adding the specified stored script.
*/
static ScriptMetadata putStoredScript(ScriptMetadata previous, String id, StoredScriptSource source) {
Builder builder = new Builder(previous);
builder.storeScript(id, source);
return builder.build();
}
/**
* Convenience method to build and return a new
* {@link ScriptMetadata} deleting the specified stored script.
*/
static ScriptMetadata deleteStoredScript(ScriptMetadata previous, String id) {
Builder builder = new ScriptMetadata.Builder(previous);
builder.deleteScript(id);
return builder.build();
}
/**
* The type of {@link ClusterState} data.
*/
public static final String TYPE = "stored_scripts";
/**
* This will parse XContent into {@link ScriptMetadata}.
*
* The following format will be parsed:
*
* {@code
* {
* "<id>" : "<{@link StoredScriptSource#fromXContent(XContentParser, boolean)}>",
* "<id>" : "<{@link StoredScriptSource#fromXContent(XContentParser, boolean)}>",
* ...
* }
* }
*/
public static ScriptMetadata fromXContent(XContentParser parser) throws IOException {
Map<String, StoredScriptSource> scripts = new HashMap<>();
String id = null;
Token token = parser.currentToken();
if (token == null) {
token = parser.nextToken();
}
if (token != Token.START_OBJECT) {
throw new ParsingException(parser.getTokenLocation(), "unexpected token [" + token + "], expected [{]");
}
token = parser.nextToken();
while (token != Token.END_OBJECT) {
switch (token) {
case FIELD_NAME -> id = parser.currentName();
case START_OBJECT -> {
if (id == null) {
throw new ParsingException(
parser.getTokenLocation(),
"unexpected token [" + token + "], expected [<id>, <code>, {]"
);
}
StoredScriptSource source = StoredScriptSource.fromXContent(parser, true);
// as of 8.0 we drop scripts/templates with an empty source
// this check should be removed for the next upgradable version after 8.0
// since there is a guarantee no more empty scripts will exist
if (source.getSource().isEmpty()) {
if (Script.DEFAULT_TEMPLATE_LANG.equals(source.getLang())) {
logger.warn("empty template [" + id + "] found and dropped");
} else {
logger.warn("empty script [" + id + "] found and dropped");
}
} else {
scripts.put(id, source);
}
id = null;
}
default -> throw new ParsingException(
parser.getTokenLocation(),
"unexpected token [" + token + "], expected [<id>, <code>, {]"
);
}
token = parser.nextToken();
}
return new ScriptMetadata(scripts);
}
public static NamedDiff<Metadata.Custom> readDiffFrom(StreamInput in) throws IOException {
return new ScriptMetadataDiff(in);
}
private final Map<String, StoredScriptSource> scripts;
/**
* Standard constructor to create metadata to store scripts.
* @param scripts The currently stored scripts. Must not be {@code null},
* use and empty {@link Map} to specify there were no
* previously stored scripts.
*/
ScriptMetadata(Map<String, StoredScriptSource> scripts) {
this.scripts = Collections.unmodifiableMap(scripts);
}
public ScriptMetadata(StreamInput in) throws IOException {
Map<String, StoredScriptSource> scripts = new HashMap<>();
StoredScriptSource source;
int size = in.readVInt();
for (int i = 0; i < size; i++) {
String id = in.readString();
source = new StoredScriptSource(in);
scripts.put(id, source);
}
this.scripts = Collections.unmodifiableMap(scripts);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeMap(scripts, StreamOutput::writeWriteable);
}
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return Iterators.map(scripts.entrySet().iterator(), entry -> (builder, params) -> {
builder.field(entry.getKey());
return entry.getValue().toXContent(builder, params);
});
}
@Override
public Diff<Metadata.Custom> diff(Metadata.Custom before) {
return new ScriptMetadataDiff((ScriptMetadata) before, this);
}
@Override
public String getWriteableName() {
return TYPE;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.MINIMUM_COMPATIBLE;
}
@Override
public EnumSet<Metadata.XContentContext> context() {
return Metadata.ALL_CONTEXTS;
}
/**
* Returns the map of stored scripts.
*/
Map<String, StoredScriptSource> getStoredScripts() {
return scripts;
}
/**
* Retrieves a stored script based on a user-specified id.
*/
StoredScriptSource getStoredScript(String id) {
return scripts.get(id);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ScriptMetadata that = (ScriptMetadata) o;
return scripts.equals(that.scripts);
}
@Override
public int hashCode() {
return scripts.hashCode();
}
@Override
public String toString() {
return "ScriptMetadata{" + "scripts=" + scripts + '}';
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/script/ScriptMetadata.java |
487 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.discovery;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.coordination.ClusterFormationFailureHelper;
import org.elasticsearch.cluster.coordination.PeersResponse;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.ReferenceDocs;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.Releasable;
import org.elasticsearch.core.Releasables;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.threadpool.ThreadPool.Names;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.stream.Collectors;
import static java.util.Collections.emptyList;
import static org.elasticsearch.core.Strings.format;
public abstract class PeerFinder {
private static final Logger logger = LogManager.getLogger(PeerFinder.class);
public static final String REQUEST_PEERS_ACTION_NAME = "internal:discovery/request_peers";
// the time between attempts to find all peers
public static final Setting<TimeValue> DISCOVERY_FIND_PEERS_INTERVAL_SETTING = Setting.timeSetting(
"discovery.find_peers_interval",
TimeValue.timeValueMillis(1000),
TimeValue.timeValueMillis(1),
Setting.Property.NodeScope
);
public static final Setting<TimeValue> DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING = Setting.timeSetting(
"discovery.request_peers_timeout",
TimeValue.timeValueMillis(3000),
TimeValue.timeValueMillis(1),
Setting.Property.NodeScope
);
// We do not log connection failures immediately: some failures are expected, especially if the hosts list isn't perfectly up-to-date
// or contains some unnecessary junk. However if the node cannot find a master for an extended period of time then it is helpful to
// users to describe in more detail why we cannot connect to the remote nodes. This setting defines how long we wait without discovering
// the master before we start to emit more verbose logs.
public static final Setting<TimeValue> VERBOSITY_INCREASE_TIMEOUT_SETTING = Setting.timeSetting(
"discovery.find_peers_warning_timeout",
TimeValue.timeValueMinutes(3),
TimeValue.timeValueMillis(1),
Setting.Property.NodeScope
);
private final TimeValue findPeersInterval;
private final TimeValue requestPeersTimeout;
private final TimeValue verbosityIncreaseTimeout;
private final Object mutex = new Object();
private final TransportService transportService;
private final Executor clusterCoordinationExecutor;
private final TransportAddressConnector transportAddressConnector;
private final ConfiguredHostsResolver configuredHostsResolver;
private volatile long currentTerm;
private boolean active;
private long activatedAtMillis;
private DiscoveryNodes lastAcceptedNodes;
private final Map<TransportAddress, Peer> peersByAddress = new LinkedHashMap<>();
private Optional<DiscoveryNode> leader = Optional.empty();
private volatile List<TransportAddress> lastResolvedAddresses = emptyList();
@SuppressWarnings("this-escape")
public PeerFinder(
Settings settings,
TransportService transportService,
TransportAddressConnector transportAddressConnector,
ConfiguredHostsResolver configuredHostsResolver
) {
findPeersInterval = DISCOVERY_FIND_PEERS_INTERVAL_SETTING.get(settings);
requestPeersTimeout = DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING.get(settings);
verbosityIncreaseTimeout = VERBOSITY_INCREASE_TIMEOUT_SETTING.get(settings);
this.transportService = transportService;
this.clusterCoordinationExecutor = transportService.getThreadPool().executor(Names.CLUSTER_COORDINATION);
this.transportAddressConnector = transportAddressConnector;
this.configuredHostsResolver = configuredHostsResolver;
transportService.registerRequestHandler(
REQUEST_PEERS_ACTION_NAME,
this.clusterCoordinationExecutor,
false,
false,
PeersRequest::new,
(request, channel, task) -> channel.sendResponse(handlePeersRequest(request))
);
}
public void activate(final DiscoveryNodes lastAcceptedNodes) {
logger.trace("activating with {}", lastAcceptedNodes);
synchronized (mutex) {
assert assertInactiveWithNoUndiscoveredPeers();
active = true;
activatedAtMillis = transportService.getThreadPool().relativeTimeInMillis();
this.lastAcceptedNodes = lastAcceptedNodes;
leader = Optional.empty();
handleWakeUp();
}
onFoundPeersUpdated(); // trigger a check for a quorum already
}
public void deactivate(DiscoveryNode leader) {
final boolean hasInactivePeers;
final Collection<Releasable> connectionReferences;
synchronized (mutex) {
logger.trace("deactivating and setting leader to {}", leader);
active = false;
connectionReferences = new ArrayList<>(peersByAddress.size());
hasInactivePeers = peersByAddress.isEmpty() == false;
final var iterator = peersByAddress.values().iterator();
while (iterator.hasNext()) {
final var peer = iterator.next();
if (peer.getDiscoveryNode() == null) {
connectionReferences.add(peer.getConnectionReference());
iterator.remove();
}
}
this.leader = Optional.of(leader);
assert assertInactiveWithNoUndiscoveredPeers();
}
if (hasInactivePeers) {
onFoundPeersUpdated();
}
Releasables.close(connectionReferences);
}
public void closePeers() {
// Discovery is over, we're joining a cluster, so we can release all the connections that were being used for discovery. We haven't
// finished joining/forming the cluster yet, but if we're joining an existing master then the join will hold open the connection
// it's using and if we're becoming the master then join validation will hold open the connections to the joining peers; this set of
// peers is a quorum so that's good enough.
//
// Note however that this might still close connections to other master-eligible nodes that we discovered but which aren't currently
// involved in joining: either they're not the master we're joining or else we're becoming the master but they didn't try and join
// us yet. It's a pretty safe bet that we'll want to have connections to these nodes in the near future: either they're already in
// the cluster or else they will discover we're the master and join us straight away. In theory we could keep these discovery
// connections open for a while rather than closing them here and then reopening them again, but in practice it's so much simpler to
// forget about them for now.
//
// Note also that the NodeConnectionsService is still maintaining connections to the nodes in the last-applied cluster state, so
// this will only close connections to nodes that we didn't know about beforehand. In most cases that's because we only just started
// and haven't applied any cluster states at all yet. This won't cause any connection disruption during a typical master failover.
final Collection<Releasable> connectionReferences = new ArrayList<>(peersByAddress.size());
synchronized (mutex) {
assert active == false;
for (final var peer : peersByAddress.values()) {
connectionReferences.add(peer.getConnectionReference());
}
peersByAddress.clear();
logger.trace("closeInactivePeers: closing {}", connectionReferences);
}
Releasables.close(connectionReferences);
}
// exposed to subclasses for testing
protected final boolean holdsLock() {
return Thread.holdsLock(mutex);
}
private boolean assertInactiveWithNoUndiscoveredPeers() {
assert holdsLock() : "PeerFinder mutex not held";
assert active == false;
assert peersByAddress.values().stream().allMatch(p -> p.getDiscoveryNode() != null);
return true;
}
PeersResponse handlePeersRequest(PeersRequest peersRequest) {
final Collection<DiscoveryNode> knownPeers;
final Optional<DiscoveryNode> leader;
final long currentTerm;
synchronized (mutex) {
assert peersRequest.getSourceNode().equals(getLocalNode()) == false;
leader = this.leader;
currentTerm = this.currentTerm;
if (active) {
assert leader.isPresent() == false : leader;
if (peersRequest.getSourceNode().isMasterNode()) {
startProbe(peersRequest.getSourceNode().getAddress());
}
peersRequest.getKnownPeers().stream().map(DiscoveryNode::getAddress).forEach(this::startProbe);
knownPeers = getFoundPeersUnderLock();
} else {
assert leader.isPresent() || lastAcceptedNodes == null;
knownPeers = emptyList();
}
}
return new PeersResponse(leader, List.copyOf(knownPeers), currentTerm);
}
// exposed for checking invariant in o.e.c.c.Coordinator (public since this is a different package)
public Optional<DiscoveryNode> getLeader() {
synchronized (mutex) {
return leader;
}
}
// exposed for checking invariant in o.e.c.c.Coordinator (public since this is a different package)
public long getCurrentTerm() {
return currentTerm;
}
public void setCurrentTerm(long currentTerm) {
this.currentTerm = currentTerm;
}
private DiscoveryNode getLocalNode() {
final DiscoveryNode localNode = transportService.getLocalNode();
assert localNode != null;
return localNode;
}
/**
* Invoked on receipt of a PeersResponse from a node that believes it's an active leader, which this node should therefore try and join.
* Note that invocations of this method are not synchronised. By the time it is called we may have been deactivated.
*/
protected abstract void onActiveMasterFound(DiscoveryNode masterNode, long term);
/**
* Invoked when the set of found peers changes. Note that invocations of this method are not fully synchronised, so we only guarantee
* that the change to the set of found peers happens before this method is invoked. If there are multiple concurrent changes then there
* will be multiple concurrent invocations of this method, with no guarantee as to their order. For this reason we do not pass the
* updated set of peers as an argument to this method, leaving it to the implementation to call getFoundPeers() with appropriate
* synchronisation to avoid lost updates. Also, by the time this method is invoked we may have been deactivated.
*/
protected abstract void onFoundPeersUpdated();
public List<TransportAddress> getLastResolvedAddresses() {
return lastResolvedAddresses;
}
public Iterable<DiscoveryNode> getFoundPeers() {
synchronized (mutex) {
return getFoundPeersUnderLock();
}
}
private Collection<DiscoveryNode> getFoundPeersUnderLock() {
assert holdsLock() : "PeerFinder mutex not held";
if (active == false) {
return Set.of();
}
Set<DiscoveryNode> peers = Sets.newHashSetWithExpectedSize(peersByAddress.size());
for (Peer peer : peersByAddress.values()) {
DiscoveryNode discoveryNode = peer.getDiscoveryNode();
if (discoveryNode != null) {
peers.add(discoveryNode);
}
}
return peers;
}
/**
* @return whether any peers were removed due to disconnection
*/
private boolean handleWakeUp() {
assert holdsLock() : "PeerFinder mutex not held";
final boolean peersRemoved = peersByAddress.values().removeIf(Peer::handleWakeUp);
if (active == false) {
logger.trace("not active");
return peersRemoved;
}
logger.trace("probing master nodes from cluster state: {}", lastAcceptedNodes);
for (DiscoveryNode discoveryNode : lastAcceptedNodes.getMasterNodes().values()) {
startProbe(discoveryNode.getAddress());
}
configuredHostsResolver.resolveConfiguredHosts(providedAddresses -> {
synchronized (mutex) {
lastResolvedAddresses = providedAddresses;
logger.trace("probing resolved transport addresses {}", providedAddresses);
providedAddresses.forEach(this::startProbe);
}
});
transportService.getThreadPool().scheduleUnlessShuttingDown(findPeersInterval, clusterCoordinationExecutor, new Runnable() {
@Override
public void run() {
synchronized (mutex) {
if (handleWakeUp() == false) {
return;
}
}
onFoundPeersUpdated();
}
@Override
public String toString() {
return "PeerFinder handling wakeup";
}
});
return peersRemoved;
}
protected void startProbe(TransportAddress transportAddress) {
assert holdsLock() : "PeerFinder mutex not held";
if (active == false) {
logger.trace("startProbe({}) not running", transportAddress);
return;
}
if (transportAddress.equals(getLocalNode().getAddress())) {
logger.trace("startProbe({}) not probing local node", transportAddress);
return;
}
if (peersByAddress.containsKey(transportAddress) == false) {
final Peer peer = new Peer(transportAddress);
peersByAddress.put(transportAddress, peer);
peer.establishConnection();
}
}
public Set<DiscoveryNode> getMastersOfPeers() {
synchronized (mutex) {
return peersByAddress.values().stream().flatMap(p -> p.lastKnownMasterNode.stream()).collect(Collectors.toSet());
}
}
private class Peer {
private final TransportAddress transportAddress;
private final SetOnce<ProbeConnectionResult> probeConnectionResult = new SetOnce<>();
private volatile boolean peersRequestInFlight;
private Optional<DiscoveryNode> lastKnownMasterNode = Optional.empty();
Peer(TransportAddress transportAddress) {
this.transportAddress = transportAddress;
}
@Nullable
DiscoveryNode getDiscoveryNode() {
return Optional.ofNullable(probeConnectionResult.get()).map(ProbeConnectionResult::getDiscoveryNode).orElse(null);
}
private boolean isActive() {
return active && peersByAddress.get(transportAddress) == this;
}
boolean handleWakeUp() {
assert holdsLock() : "PeerFinder mutex not held";
if (isActive() == false) {
logger.trace("Peer#handleWakeUp inactive: {}", Peer.this);
return false;
}
final DiscoveryNode discoveryNode = getDiscoveryNode();
// may be null if connection not yet established
if (discoveryNode != null) {
if (transportService.nodeConnected(discoveryNode)) {
if (peersRequestInFlight == false) {
requestPeers();
}
} else {
logger.trace("{} no longer connected", this);
return true;
}
}
return false;
}
void establishConnection() {
assert holdsLock() : "PeerFinder mutex not held";
assert getDiscoveryNode() == null : "unexpectedly connected to " + getDiscoveryNode();
assert isActive();
final boolean verboseFailureLogging = transportService.getThreadPool().relativeTimeInMillis()
- activatedAtMillis > verbosityIncreaseTimeout.millis();
logger.trace("{} attempting connection", this);
transportAddressConnector.connectToRemoteMasterNode(transportAddress, new ActionListener<ProbeConnectionResult>() {
@Override
public void onResponse(ProbeConnectionResult connectResult) {
assert holdsLock() == false : "PeerFinder mutex is held in error";
final DiscoveryNode remoteNode = connectResult.getDiscoveryNode();
assert remoteNode.isMasterNode() : remoteNode + " is not master-eligible";
assert remoteNode.equals(getLocalNode()) == false : remoteNode + " is the local node";
boolean retainConnection = false;
try {
synchronized (mutex) {
if (isActive() == false) {
logger.trace("Peer#establishConnection inactive: {}", Peer.this);
return;
}
assert probeConnectionResult.get() == null
: "connection result unexpectedly already set to " + probeConnectionResult.get();
probeConnectionResult.set(connectResult);
requestPeers();
}
onFoundPeersUpdated();
retainConnection = true;
} finally {
if (retainConnection == false) {
Releasables.close(connectResult);
}
}
}
@Override
public void onFailure(Exception e) {
if (verboseFailureLogging) {
final String believedMasterBy;
synchronized (mutex) {
believedMasterBy = peersByAddress.values()
.stream()
.filter(p -> p.lastKnownMasterNode.map(DiscoveryNode::getAddress).equals(Optional.of(transportAddress)))
.findFirst()
.map(p -> " [current master according to " + p.getDiscoveryNode().descriptionWithoutAttributes() + "]")
.orElse("");
}
if (logger.isDebugEnabled()) {
// log message at level WARN, but since DEBUG logging is enabled we include the full stack trace
logger.warn(() -> format("%s%s discovery result", Peer.this, believedMasterBy), e);
} else {
final StringBuilder messageBuilder = new StringBuilder();
Throwable cause = e;
while (cause != null && messageBuilder.length() <= 1024) {
messageBuilder.append(": ").append(cause.getMessage());
cause = cause.getCause();
}
final String message = messageBuilder.length() < 1024
? messageBuilder.toString()
: (messageBuilder.substring(0, 1023) + "...");
logger.warn(
"{}{} discovery result{}; for summary, see logs from {}; for troubleshooting guidance, see {}",
Peer.this,
believedMasterBy,
message,
ClusterFormationFailureHelper.class.getCanonicalName(),
ReferenceDocs.DISCOVERY_TROUBLESHOOTING
);
}
} else {
logger.debug(() -> format("%s discovery result", Peer.this), e);
}
synchronized (mutex) {
assert probeConnectionResult.get() == null
: "discoveryNode unexpectedly already set to " + probeConnectionResult.get();
if (isActive()) {
peersByAddress.remove(transportAddress);
} // else this Peer has been superseded by a different instance which should be left in place
}
}
});
}
private void requestPeers() {
assert holdsLock() : "PeerFinder mutex not held";
assert peersRequestInFlight == false : "PeersRequest already in flight";
assert isActive();
final DiscoveryNode discoveryNode = getDiscoveryNode();
assert discoveryNode != null : "cannot request peers without first connecting";
if (discoveryNode.equals(getLocalNode())) {
logger.trace("{} not requesting peers from local node", this);
return;
}
logger.trace("{} requesting peers", this);
peersRequestInFlight = true;
final List<DiscoveryNode> knownNodes = List.copyOf(getFoundPeersUnderLock());
final TransportResponseHandler<PeersResponse> peersResponseHandler = new TransportResponseHandler<>() {
@Override
public PeersResponse read(StreamInput in) throws IOException {
return new PeersResponse(in);
}
@Override
public void handleResponse(PeersResponse response) {
logger.trace("{} received {}", Peer.this, response);
synchronized (mutex) {
peersRequestInFlight = false;
if (isActive() == false) {
logger.trace("Peer#requestPeers inactive: {}", Peer.this);
return;
}
lastKnownMasterNode = response.getMasterNode();
response.getMasterNode().ifPresent(node -> startProbe(node.getAddress()));
for (DiscoveryNode node : response.getKnownPeers()) {
startProbe(node.getAddress());
}
}
if (response.getMasterNode().equals(Optional.of(discoveryNode))) {
// Must not hold lock here to avoid deadlock
assert holdsLock() == false : "PeerFinder mutex is held in error";
onActiveMasterFound(discoveryNode, response.getTerm());
}
}
@Override
public void handleException(TransportException exp) {
peersRequestInFlight = false;
logger.warn(() -> format("%s peers request failed", Peer.this), exp);
}
@Override
public Executor executor() {
return clusterCoordinationExecutor;
}
};
transportService.sendRequest(
discoveryNode,
REQUEST_PEERS_ACTION_NAME,
new PeersRequest(getLocalNode(), knownNodes),
TransportRequestOptions.timeout(requestPeersTimeout),
peersResponseHandler
);
}
@Nullable
Releasable getConnectionReference() {
assert holdsLock() : "PeerFinder mutex not held";
return probeConnectionResult.get();
}
@Override
public String toString() {
return "address ["
+ transportAddress
+ "], node ["
+ Optional.ofNullable(probeConnectionResult.get())
.map(result -> result.getDiscoveryNode().descriptionWithoutAttributes())
.orElse("unknown")
+ (peersRequestInFlight ? " [request in flight]" : "");
}
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/discovery/PeerFinder.java |
488 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.commander;
import com.iluwatar.commander.exceptions.DatabaseUnavailableException;
/**
* Database abstract class is extended by all databases in our example. The add and get methods are
* used by the respective service to add to database or get from database.
*
* @param <T> T is the type of object being held by database.
*/
public abstract class Database<T> {
public abstract T add(T obj) throws DatabaseUnavailableException;
public abstract T get(String id) throws DatabaseUnavailableException;
}
| smedals/java-design-patterns | commander/src/main/java/com/iluwatar/commander/Database.java |
489 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.index;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.FilterDirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.search.similarities.BM25Similarity;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MMapDirectory;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.client.internal.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.TriFunction;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.Maps;
import org.elasticsearch.core.CheckedFunction;
import org.elasticsearch.core.IOUtils;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.IndexService.IndexCreationContext;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.cache.query.DisabledQueryCache;
import org.elasticsearch.index.cache.query.IndexQueryCache;
import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.MapperMetrics;
import org.elasticsearch.index.mapper.MapperRegistry;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.shard.IndexEventListener;
import org.elasticsearch.index.shard.IndexingOperationListener;
import org.elasticsearch.index.shard.SearchOperationListener;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.FsDirectoryFactory;
import org.elasticsearch.indices.IndicesQueryCache;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.plugins.IndexStorePlugin;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xcontent.XContentParserConfiguration;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.BiFunction;
import java.util.function.BooleanSupplier;
import java.util.function.Consumer;
import java.util.function.Function;
/**
* IndexModule represents the central extension point for index level custom implementations like:
* <ul>
* <li>{@link Similarity} - New {@link Similarity} implementations can be registered through
* {@link #addSimilarity(String, TriFunction)} while existing Providers can be referenced through Settings under the
* {@link IndexModule#SIMILARITY_SETTINGS_PREFIX} prefix along with the "type" value. For example, to reference the
* {@link BM25Similarity}, the configuration {@code "index.similarity.my_similarity.type : "BM25"} can be used.</li>
* <li>{@link IndexStorePlugin.DirectoryFactory} - Custom {@link IndexStorePlugin.DirectoryFactory} instances can be registered
* via {@link IndexStorePlugin}</li>
* <li>{@link IndexEventListener} - Custom {@link IndexEventListener} instances can be registered via
* {@link #addIndexEventListener(IndexEventListener)}</li>
* <li>Settings update listener - Custom settings update listener can be registered via
* {@link #addSettingsUpdateConsumer(Setting, Consumer)}</li>
* </ul>
*/
public final class IndexModule {
private static final Logger logger = LogManager.getLogger(IndexModule.class);
public static final Setting<Boolean> NODE_STORE_ALLOW_MMAP = Setting.boolSetting("node.store.allow_mmap", true, Property.NodeScope);
private static final FsDirectoryFactory DEFAULT_DIRECTORY_FACTORY = new FsDirectoryFactory();
private static final IndexStorePlugin.RecoveryStateFactory DEFAULT_RECOVERY_STATE_FACTORY = RecoveryState::new;
public static final Setting<String> INDEX_STORE_TYPE_SETTING = new Setting<>(
"index.store.type",
"",
Function.identity(),
Property.IndexScope,
Property.NodeScope
);
public static final Setting<String> INDEX_RECOVERY_TYPE_SETTING = new Setting<>(
"index.recovery.type",
"",
Function.identity(),
Property.IndexScope,
Property.NodeScope
);
/** On which extensions to load data into the file-system cache upon opening of files.
* This only works with the mmap directory, and even in that case is still
* best-effort only. */
public static final Setting<List<String>> INDEX_STORE_PRE_LOAD_SETTING = Setting.stringListSetting(
"index.store.preload",
Property.IndexScope,
Property.NodeScope
);
public static final String SIMILARITY_SETTINGS_PREFIX = "index.similarity";
// whether to use the query cache
public static final Setting<Boolean> INDEX_QUERY_CACHE_ENABLED_SETTING = Setting.boolSetting(
"index.queries.cache.enabled",
true,
Property.IndexScope
);
// for test purposes only
public static final Setting<Boolean> INDEX_QUERY_CACHE_EVERYTHING_SETTING = Setting.boolSetting(
"index.queries.cache.everything",
false,
Property.IndexScope
);
/**
* {@link Directory} wrappers allow to apply a function to the Lucene directory instances
* created by {@link org.elasticsearch.plugins.IndexStorePlugin.DirectoryFactory}.
*/
@FunctionalInterface
public interface DirectoryWrapper {
/**
* Wrap a given {@link Directory}
*
* @param directory the {@link Directory} to wrap
* @param shardRouting the {@link ShardRouting} associated with the {@link Directory} or {@code null} is unknown
* @return a {@link Directory}
* @throws IOException
*/
Directory wrap(Directory directory, @Nullable ShardRouting shardRouting) throws IOException;
}
private final IndexSettings indexSettings;
private final AnalysisRegistry analysisRegistry;
private final EngineFactory engineFactory;
private final SetOnce<DirectoryWrapper> indexDirectoryWrapper = new SetOnce<>();
private final SetOnce<Function<IndexService, CheckedFunction<DirectoryReader, DirectoryReader, IOException>>> indexReaderWrapper =
new SetOnce<>();
private final Set<IndexEventListener> indexEventListeners = new HashSet<>();
private final Map<String, TriFunction<Settings, IndexVersion, ScriptService, Similarity>> similarities = new HashMap<>();
private final Map<String, IndexStorePlugin.DirectoryFactory> directoryFactories;
private final SetOnce<BiFunction<IndexSettings, IndicesQueryCache, QueryCache>> forceQueryCacheProvider = new SetOnce<>();
private final List<SearchOperationListener> searchOperationListeners = new ArrayList<>();
private final List<IndexingOperationListener> indexOperationListeners = new ArrayList<>();
private final IndexNameExpressionResolver expressionResolver;
private final AtomicBoolean frozen = new AtomicBoolean(false);
private final BooleanSupplier allowExpensiveQueries;
private final Map<String, IndexStorePlugin.RecoveryStateFactory> recoveryStateFactories;
private final SetOnce<Engine.IndexCommitListener> indexCommitListener = new SetOnce<>();
private final MapperMetrics mapperMetrics;
/**
* Construct the index module for the index with the specified index settings. The index module contains extension points for plugins
* via {@link org.elasticsearch.plugins.Plugin#onIndexModule(IndexModule)}.
*
* @param indexSettings the index settings
* @param analysisRegistry the analysis registry
* @param engineFactory the engine factory
* @param directoryFactories the available store types
*/
public IndexModule(
final IndexSettings indexSettings,
final AnalysisRegistry analysisRegistry,
final EngineFactory engineFactory,
final Map<String, IndexStorePlugin.DirectoryFactory> directoryFactories,
final BooleanSupplier allowExpensiveQueries,
final IndexNameExpressionResolver expressionResolver,
final Map<String, IndexStorePlugin.RecoveryStateFactory> recoveryStateFactories,
final SlowLogFieldProvider slowLogFieldProvider,
final MapperMetrics mapperMetrics
) {
this.indexSettings = indexSettings;
this.analysisRegistry = analysisRegistry;
this.engineFactory = Objects.requireNonNull(engineFactory);
this.searchOperationListeners.add(new SearchSlowLog(indexSettings, slowLogFieldProvider));
this.indexOperationListeners.add(new IndexingSlowLog(indexSettings, slowLogFieldProvider));
this.directoryFactories = Collections.unmodifiableMap(directoryFactories);
this.allowExpensiveQueries = allowExpensiveQueries;
this.expressionResolver = expressionResolver;
this.recoveryStateFactories = recoveryStateFactories;
this.mapperMetrics = mapperMetrics;
}
/**
* Adds a Setting and it's consumer for this index.
*/
public <T> void addSettingsUpdateConsumer(Setting<T> setting, Consumer<T> consumer) {
ensureNotFrozen();
if (setting == null) {
throw new IllegalArgumentException("setting must not be null");
}
indexSettings.getScopedSettings().addSettingsUpdateConsumer(setting, consumer);
}
/**
* Adds a Setting, it's consumer and validator for this index.
*/
public <T> void addSettingsUpdateConsumer(Setting<T> setting, Consumer<T> consumer, Consumer<T> validator) {
ensureNotFrozen();
if (setting == null) {
throw new IllegalArgumentException("setting must not be null");
}
indexSettings.getScopedSettings().addSettingsUpdateConsumer(setting, consumer, validator);
}
/**
* Returns the index {@link Settings} for this index
*/
public Settings getSettings() {
return indexSettings.getSettings();
}
/**
* Returns the {@link IndexSettings} for this index
*/
public IndexSettings indexSettings() {
return indexSettings;
}
/**
* Returns the index this module is associated with
*/
public Index getIndex() {
return indexSettings.getIndex();
}
/**
* The engine factory provided during construction of this index module.
*
* @return the engine factory
*/
EngineFactory getEngineFactory() {
return engineFactory;
}
/**
* Adds an {@link IndexEventListener} for this index. All listeners added here
* are maintained for the entire index lifecycle on this node. Once an index is closed or deleted these
* listeners go out of scope.
* <p>
* Note: an index might be created on a node multiple times. For instance if the last shard from an index is
* relocated to another node the internal representation will be destroyed which includes the registered listeners.
* Once the node holds at least one shard of an index all modules are reloaded and listeners are registered again.
* Listeners can't be unregistered they will stay alive for the entire time the index is allocated on a node.
* </p>
*/
public void addIndexEventListener(IndexEventListener listener) {
ensureNotFrozen();
if (listener == null) {
throw new IllegalArgumentException("listener must not be null");
}
if (indexEventListeners.contains(listener)) {
throw new IllegalArgumentException("listener already added");
}
this.indexEventListeners.add(listener);
}
/**
* Adds an {@link SearchOperationListener} for this index. All listeners added here
* are maintained for the entire index lifecycle on this node. Once an index is closed or deleted these
* listeners go out of scope.
* <p>
* Note: an index might be created on a node multiple times. For instance if the last shard from an index is
* relocated to another node the internal representation will be destroyed which includes the registered listeners.
* Once the node holds at least one shard of an index all modules are reloaded and listeners are registered again.
* Listeners can't be unregistered they will stay alive for the entire time the index is allocated on a node.
* </p>
*/
public void addSearchOperationListener(SearchOperationListener listener) {
ensureNotFrozen();
if (listener == null) {
throw new IllegalArgumentException("listener must not be null");
}
if (searchOperationListeners.contains(listener)) {
throw new IllegalArgumentException("listener already added");
}
this.searchOperationListeners.add(listener);
}
/**
* Adds an {@link IndexingOperationListener} for this index. All listeners added here
* are maintained for the entire index lifecycle on this node. Once an index is closed or deleted these
* listeners go out of scope.
* <p>
* Note: an index might be created on a node multiple times. For instance if the last shard from an index is
* relocated to another node the internal representation will be destroyed which includes the registered listeners.
* Once the node holds at least one shard of an index all modules are reloaded and listeners are registered again.
* Listeners can't be unregistered they will stay alive for the entire time the index is allocated on a node.
* </p>
*/
public void addIndexOperationListener(IndexingOperationListener listener) {
ensureNotFrozen();
if (listener == null) {
throw new IllegalArgumentException("listener must not be null");
}
if (indexOperationListeners.contains(listener)) {
throw new IllegalArgumentException("listener already added");
}
this.indexOperationListeners.add(listener);
}
/**
* Registers the given {@link Similarity} with the given name.
* The function takes as parameters:<ul>
* <li>settings for this similarity
* <li>version of Elasticsearch when the index was created
* <li>ScriptService, for script-based similarities
* </ul>
*
* @param name Name of the SimilarityProvider
* @param similarity SimilarityProvider to register
*/
public void addSimilarity(String name, TriFunction<Settings, IndexVersion, ScriptService, Similarity> similarity) {
ensureNotFrozen();
if (similarities.containsKey(name) || SimilarityService.BUILT_IN.containsKey(name)) {
throw new IllegalArgumentException("similarity for name: [" + name + " is already registered");
}
similarities.put(name, similarity);
}
/**
* Sets the factory for creating new {@link DirectoryReader} wrapper instances.
* The factory ({@link Function}) is called once the IndexService is fully constructed.
* NOTE: this method can only be called once per index. Multiple wrappers are not supported.
* <p>
* The {@link CheckedFunction} is invoked each time a {@link Engine.Searcher} is requested to do an operation,
* for example search, and must return a new directory reader wrapping the provided directory reader or if no
* wrapping was performed the provided directory reader.
* The wrapped reader can filter out document just like delete documents etc. but must not change any term or
* document content.
* NOTE: The index reader wrapper ({@link CheckedFunction}) has a per-request lifecycle,
* must delegate {@link IndexReader#getReaderCacheHelper()}, {@link LeafReader#getCoreCacheHelper()}
* and must be an instance of {@link FilterDirectoryReader} that eventually exposes the original reader
* via {@link FilterDirectoryReader#getDelegate()}.
* The returned reader is closed once it goes out of scope.
* </p>
*/
public void setReaderWrapper(
Function<IndexService, CheckedFunction<DirectoryReader, DirectoryReader, IOException>> indexReaderWrapperFactory
) {
ensureNotFrozen();
this.indexReaderWrapper.set(indexReaderWrapperFactory);
}
/**
* Sets a {@link Directory} wrapping method that allows to apply a function to the Lucene directory instance
* created by {@link org.elasticsearch.plugins.IndexStorePlugin.DirectoryFactory}.
*
* @param wrapper the wrapping function
*/
public void setDirectoryWrapper(DirectoryWrapper wrapper) {
ensureNotFrozen();
this.indexDirectoryWrapper.set(Objects.requireNonNull(wrapper));
}
public void setIndexCommitListener(Engine.IndexCommitListener listener) {
ensureNotFrozen();
this.indexCommitListener.set(Objects.requireNonNull(listener));
}
IndexEventListener freeze() { // pkg private for testing
if (this.frozen.compareAndSet(false, true)) {
return new CompositeIndexEventListener(indexSettings, indexEventListeners);
} else {
throw new IllegalStateException("already frozen");
}
}
public static boolean isBuiltinType(String storeType) {
for (Type type : Type.values()) {
if (type.match(storeType)) {
return true;
}
}
return false;
}
public enum Type {
HYBRIDFS("hybridfs"),
NIOFS("niofs"),
MMAPFS("mmapfs"),
SIMPLEFS("simplefs"),
FS("fs");
private final String settingsKey;
Type(final String settingsKey) {
this.settingsKey = settingsKey;
}
private static final Map<String, Type> TYPES;
static {
final Map<String, Type> types = Maps.newMapWithExpectedSize(4);
for (final Type type : values()) {
types.put(type.settingsKey, type);
}
TYPES = Collections.unmodifiableMap(types);
}
public String getSettingsKey() {
return this.settingsKey;
}
public static Type fromSettingsKey(final String key) {
final Type type = TYPES.get(key);
if (type == null) {
throw new IllegalArgumentException("no matching store type for [" + key + "]");
}
return type;
}
/**
* Returns true iff this settings matches the type.
*/
public boolean match(String setting) {
return getSettingsKey().equals(setting);
}
}
public static Type defaultStoreType(final boolean allowMmap) {
if (allowMmap && Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) {
return Type.HYBRIDFS;
} else {
return Type.NIOFS;
}
}
public IndexService newIndexService(
IndexCreationContext indexCreationContext,
NodeEnvironment environment,
XContentParserConfiguration parserConfiguration,
IndexService.ShardStoreDeleter shardStoreDeleter,
CircuitBreakerService circuitBreakerService,
BigArrays bigArrays,
ThreadPool threadPool,
ScriptService scriptService,
ClusterService clusterService,
Client client,
IndicesQueryCache indicesQueryCache,
MapperRegistry mapperRegistry,
IndicesFieldDataCache indicesFieldDataCache,
NamedWriteableRegistry namedWriteableRegistry,
IdFieldMapper idFieldMapper,
ValuesSourceRegistry valuesSourceRegistry,
IndexStorePlugin.IndexFoldersDeletionListener indexFoldersDeletionListener,
Map<String, IndexStorePlugin.SnapshotCommitSupplier> snapshotCommitSuppliers
) throws IOException {
final IndexEventListener eventListener = freeze();
Function<IndexService, CheckedFunction<DirectoryReader, DirectoryReader, IOException>> readerWrapperFactory = indexReaderWrapper
.get() == null ? (shard) -> null : indexReaderWrapper.get();
eventListener.beforeIndexCreated(indexSettings.getIndex(), indexSettings.getSettings());
final IndexStorePlugin.DirectoryFactory directoryFactory = getDirectoryFactory(indexSettings, directoryFactories);
final IndexStorePlugin.RecoveryStateFactory recoveryStateFactory = getRecoveryStateFactory(indexSettings, recoveryStateFactories);
final IndexStorePlugin.SnapshotCommitSupplier snapshotCommitSupplier = getSnapshotCommitSupplier(
indexSettings,
snapshotCommitSuppliers
);
QueryCache queryCache = null;
IndexAnalyzers indexAnalyzers = null;
boolean success = false;
try {
if (indexSettings.getValue(INDEX_QUERY_CACHE_ENABLED_SETTING)) {
BiFunction<IndexSettings, IndicesQueryCache, QueryCache> queryCacheProvider = forceQueryCacheProvider.get();
if (queryCacheProvider == null) {
queryCache = new IndexQueryCache(indexSettings.getIndex(), indicesQueryCache);
} else {
queryCache = queryCacheProvider.apply(indexSettings, indicesQueryCache);
}
} else {
logger.debug("Using no query cache for [{}]", indexSettings.getIndex());
queryCache = DisabledQueryCache.INSTANCE;
}
if (IndexService.needsMapperService(indexSettings, indexCreationContext)) {
indexAnalyzers = analysisRegistry.build(indexCreationContext, indexSettings);
}
final IndexService indexService = new IndexService(
indexSettings,
indexCreationContext,
environment,
parserConfiguration,
new SimilarityService(indexSettings, scriptService, similarities),
shardStoreDeleter,
indexAnalyzers,
engineFactory,
circuitBreakerService,
bigArrays,
threadPool,
scriptService,
clusterService,
client,
queryCache,
directoryFactory,
eventListener,
readerWrapperFactory,
mapperRegistry,
indicesFieldDataCache,
searchOperationListeners,
indexOperationListeners,
namedWriteableRegistry,
idFieldMapper,
allowExpensiveQueries,
expressionResolver,
valuesSourceRegistry,
recoveryStateFactory,
indexFoldersDeletionListener,
snapshotCommitSupplier,
indexCommitListener.get(),
mapperMetrics
);
success = true;
return indexService;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(queryCache, indexAnalyzers);
}
}
}
private IndexStorePlugin.DirectoryFactory getDirectoryFactory(
final IndexSettings indexSettings,
final Map<String, IndexStorePlugin.DirectoryFactory> indexStoreFactories
) {
final String storeType = indexSettings.getValue(INDEX_STORE_TYPE_SETTING);
final Type type;
final Boolean allowMmap = NODE_STORE_ALLOW_MMAP.get(indexSettings.getNodeSettings());
if (storeType.isEmpty() || Type.FS.getSettingsKey().equals(storeType)) {
type = defaultStoreType(allowMmap);
} else {
if (isBuiltinType(storeType)) {
type = Type.fromSettingsKey(storeType);
} else {
type = null;
}
}
if (allowMmap == false && (type == Type.MMAPFS || type == Type.HYBRIDFS)) {
throw new IllegalArgumentException("store type [" + storeType + "] is not allowed because mmap is disabled");
}
final IndexStorePlugin.DirectoryFactory factory;
if (storeType.isEmpty() || isBuiltinType(storeType)) {
factory = DEFAULT_DIRECTORY_FACTORY;
} else {
factory = indexStoreFactories.get(storeType);
if (factory == null) {
throw new IllegalArgumentException("Unknown store type [" + storeType + "]");
}
}
final DirectoryWrapper directoryWrapper = this.indexDirectoryWrapper.get();
assert frozen.get() : "IndexModule configuration not frozen";
if (directoryWrapper != null) {
return new IndexStorePlugin.DirectoryFactory() {
@Override
public Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath) throws IOException {
return newDirectory(indexSettings, shardPath, null);
}
@Override
public Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath, ShardRouting shardRouting)
throws IOException {
return directoryWrapper.wrap(factory.newDirectory(indexSettings, shardPath, shardRouting), shardRouting);
}
};
}
return factory;
}
private static IndexStorePlugin.RecoveryStateFactory getRecoveryStateFactory(
final IndexSettings indexSettings,
final Map<String, IndexStorePlugin.RecoveryStateFactory> recoveryStateFactories
) {
final String recoveryType = indexSettings.getValue(INDEX_RECOVERY_TYPE_SETTING);
if (recoveryType.isEmpty()) {
return DEFAULT_RECOVERY_STATE_FACTORY;
}
IndexStorePlugin.RecoveryStateFactory factory = recoveryStateFactories.get(recoveryType);
if (factory == null) {
throw new IllegalArgumentException("Unknown recovery type [" + recoveryType + "]");
}
return factory;
}
// By default we flush first so that the snapshot is as up-to-date as possible.
public static final IndexStorePlugin.SnapshotCommitSupplier DEFAULT_SNAPSHOT_COMMIT_SUPPLIER = e -> e.acquireLastIndexCommit(true);
private static IndexStorePlugin.SnapshotCommitSupplier getSnapshotCommitSupplier(
final IndexSettings indexSettings,
final Map<String, IndexStorePlugin.SnapshotCommitSupplier> snapshotCommitSuppliers
) {
final String storeType = indexSettings.getValue(INDEX_STORE_TYPE_SETTING);
// we check that storeType refers to a valid store type in getDirectoryFactory() so there's no need for strictness here too.
final IndexStorePlugin.SnapshotCommitSupplier snapshotCommitSupplier = snapshotCommitSuppliers.get(storeType);
return snapshotCommitSupplier == null ? DEFAULT_SNAPSHOT_COMMIT_SUPPLIER : snapshotCommitSupplier;
}
/**
* creates a new mapper service to do administrative work like mapping updates. This *should not* be used for document parsing.
* doing so will result in an exception.
*/
public MapperService newIndexMapperService(
ClusterService clusterService,
XContentParserConfiguration parserConfiguration,
MapperRegistry mapperRegistry,
ScriptService scriptService
) throws IOException {
return new MapperService(
clusterService,
indexSettings,
analysisRegistry.build(IndexCreationContext.METADATA_VERIFICATION, indexSettings),
parserConfiguration,
new SimilarityService(indexSettings, scriptService, similarities),
mapperRegistry,
() -> {
throw new UnsupportedOperationException("no index query shard context available");
},
indexSettings.getMode().idFieldMapperWithoutFieldData(),
scriptService,
mapperMetrics
);
}
/**
* Forces a certain query cache to use instead of the default one. If this is set
* and query caching is not disabled with {@code index.queries.cache.enabled}, then
* the given provider will be used.
* NOTE: this can only be set once
*
* @see #INDEX_QUERY_CACHE_ENABLED_SETTING
*/
public void forceQueryCacheProvider(BiFunction<IndexSettings, IndicesQueryCache, QueryCache> queryCacheProvider) {
ensureNotFrozen();
this.forceQueryCacheProvider.set(queryCacheProvider);
}
private void ensureNotFrozen() {
if (this.frozen.get()) {
throw new IllegalStateException("Can't modify IndexModule once the index service has been created");
}
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/index/IndexModule.java |
492 | /*
* Licensed to Elasticsearch B.V. under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch B.V. licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
* This project is based on a modification of https://github.com/uber/h3 which is licensed under the Apache 2.0 License.
*
* Copyright 2016-2018 Uber Technologies, Inc.
*/
package org.elasticsearch.h3;
/**
* Base cell related lookup tables and access functions.
*/
final class BaseCells {
private static class BaseCellData {
// "home" face and normalized ijk coordinates on that face
final int homeFace;
final int homeI;
final int homeJ;
final int homeK;
// is this base cell a pentagon?
final boolean isPentagon;
// if a pentagon, what are its two clockwise offset
final int[] cwOffsetPent;
/// faces?
BaseCellData(int homeFace, int homeI, int homeJ, int homeK, boolean isPentagon, int[] cwOffsetPent) {
this.homeFace = homeFace;
this.homeI = homeI;
this.homeJ = homeJ;
this.homeK = homeK;
this.isPentagon = isPentagon;
this.cwOffsetPent = cwOffsetPent;
}
}
/**
* Resolution 0 base cell data table.
* <p>
* For each base cell, gives the "home" face and ijk+ coordinates on that face,
* whether or not the base cell is a pentagon. Additionally, if the base cell
* is a pentagon, the two cw offset rotation adjacent faces are given (-1
* indicates that no cw offset rotation faces exist for this base cell).
*/
private static final BaseCellData[] baseCellData = new BaseCellData[] {
new BaseCellData(1, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 0
new BaseCellData(2, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 1
new BaseCellData(1, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 2
new BaseCellData(2, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 3
new BaseCellData(0, 2, 0, 0, true, new int[] { -1, -1 }), // base cell 4
new BaseCellData(1, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 5
new BaseCellData(1, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 6
new BaseCellData(2, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 7
new BaseCellData(0, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 8
new BaseCellData(2, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 9
new BaseCellData(1, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 10
new BaseCellData(1, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 11
new BaseCellData(3, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 12
new BaseCellData(3, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 13
new BaseCellData(11, 2, 0, 0, true, new int[] { 2, 6 }), // base cell 14
new BaseCellData(4, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 15
new BaseCellData(0, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 16
new BaseCellData(6, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 17
new BaseCellData(0, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 18
new BaseCellData(2, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 19
new BaseCellData(7, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 20
new BaseCellData(2, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 21
new BaseCellData(0, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 22
new BaseCellData(6, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 23
new BaseCellData(10, 2, 0, 0, true, new int[] { 1, 5 }), // base cell 24
new BaseCellData(6, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 25
new BaseCellData(3, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 26
new BaseCellData(11, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 27
new BaseCellData(4, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 28
new BaseCellData(3, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 29
new BaseCellData(0, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 30
new BaseCellData(4, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 31
new BaseCellData(5, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 32
new BaseCellData(0, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 33
new BaseCellData(7, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 34
new BaseCellData(11, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 35
new BaseCellData(7, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 36
new BaseCellData(10, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 37
new BaseCellData(12, 2, 0, 0, true, new int[] { 3, 7 }), // base cell 38
new BaseCellData(6, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 39
new BaseCellData(7, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 40
new BaseCellData(4, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 41
new BaseCellData(3, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 42
new BaseCellData(3, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 43
new BaseCellData(4, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 44
new BaseCellData(6, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 45
new BaseCellData(11, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 46
new BaseCellData(8, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 47
new BaseCellData(5, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 48
new BaseCellData(14, 2, 0, 0, true, new int[] { 0, 9 }), // base cell 49
new BaseCellData(5, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 50
new BaseCellData(12, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 51
new BaseCellData(10, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 52
new BaseCellData(4, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 53
new BaseCellData(12, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 54
new BaseCellData(7, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 55
new BaseCellData(11, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 56
new BaseCellData(10, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 57
new BaseCellData(13, 2, 0, 0, true, new int[] { 4, 8 }), // base cell 58
new BaseCellData(10, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 59
new BaseCellData(11, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 60
new BaseCellData(9, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 61
new BaseCellData(8, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 62
new BaseCellData(6, 2, 0, 0, true, new int[] { 11, 15 }), // base cell 63
new BaseCellData(8, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 64
new BaseCellData(9, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 65
new BaseCellData(14, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 66
new BaseCellData(5, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 67
new BaseCellData(16, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 68
new BaseCellData(8, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 69
new BaseCellData(5, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 70
new BaseCellData(12, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 71
new BaseCellData(7, 2, 0, 0, true, new int[] { 12, 16 }), // base cell 72
new BaseCellData(12, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 73
new BaseCellData(10, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 74
new BaseCellData(9, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 75
new BaseCellData(13, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 76
new BaseCellData(16, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 77
new BaseCellData(15, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 78
new BaseCellData(15, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 79
new BaseCellData(16, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 80
new BaseCellData(14, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 81
new BaseCellData(13, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 82
new BaseCellData(5, 2, 0, 0, true, new int[] { 10, 19 }), // base cell 83
new BaseCellData(8, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 84
new BaseCellData(14, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 85
new BaseCellData(9, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 86
new BaseCellData(14, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 87
new BaseCellData(17, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 88
new BaseCellData(12, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 89
new BaseCellData(16, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 90
new BaseCellData(17, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 91
new BaseCellData(15, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 92
new BaseCellData(16, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 93
new BaseCellData(9, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 94
new BaseCellData(15, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 95
new BaseCellData(13, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 96
new BaseCellData(8, 2, 0, 0, true, new int[] { 13, 17 }), // base cell 97
new BaseCellData(13, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 98
new BaseCellData(17, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 99
new BaseCellData(19, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 100
new BaseCellData(14, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 101
new BaseCellData(19, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 102
new BaseCellData(17, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 103
new BaseCellData(13, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 104
new BaseCellData(17, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 105
new BaseCellData(16, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 106
new BaseCellData(9, 2, 0, 0, true, new int[] { 14, 18 }), // base cell 107
new BaseCellData(15, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 108
new BaseCellData(15, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 109
new BaseCellData(18, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 110
new BaseCellData(18, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 111
new BaseCellData(19, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 112
new BaseCellData(17, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 113
new BaseCellData(19, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 114
new BaseCellData(18, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 115
new BaseCellData(18, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 116
new BaseCellData(19, 2, 0, 0, true, new int[] { -1, -1 }), // base cell 117
new BaseCellData(19, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 118
new BaseCellData(18, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 119
new BaseCellData(19, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 120
new BaseCellData(18, 1, 0, 0, false, new int[] { 0, 0 }) // base cell 121
};
/**
* base cell at a given ijk and required rotations into its system
*/
private static class BaseCellRotation {
final int baseCell; // base cell number
final int ccwRot60; // number of ccw 60 degree rotations relative to current
/// face
BaseCellRotation(int baseCell, int ccwRot60) {
this.baseCell = baseCell;
this.ccwRot60 = ccwRot60;
}
}
/** @brief Resolution 0 base cell lookup table for each face.
*
* Given the face number and a resolution 0 ijk+ coordinate in that face's
* face-centered ijk coordinate system, gives the base cell located at that
* coordinate and the number of 60 ccw rotations to rotate into that base
* cell's orientation.
*
* Valid lookup coordinates are from (0, 0, 0) to (2, 2, 2).
*
* This table can be accessed using the functions `_faceIjkToBaseCell` and
* `_faceIjkToBaseCellCCWrot60`
*/
private static final BaseCellRotation[][][][] faceIjkBaseCells = new BaseCellRotation[][][][] {
{// face 0
{
// i 0
{ new BaseCellRotation(16, 0), new BaseCellRotation(18, 0), new BaseCellRotation(24, 0) }, // j 0
{ new BaseCellRotation(33, 0), new BaseCellRotation(30, 0), new BaseCellRotation(32, 3) }, // j 1
{ new BaseCellRotation(49, 1), new BaseCellRotation(48, 3), new BaseCellRotation(50, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(8, 0), new BaseCellRotation(5, 5), new BaseCellRotation(10, 5) }, // j 0
{ new BaseCellRotation(22, 0), new BaseCellRotation(16, 0), new BaseCellRotation(18, 0) }, // j 1
{ new BaseCellRotation(41, 1), new BaseCellRotation(33, 0), new BaseCellRotation(30, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(4, 0), new BaseCellRotation(0, 5), new BaseCellRotation(2, 5) }, // j 0
{ new BaseCellRotation(15, 1), new BaseCellRotation(8, 0), new BaseCellRotation(5, 5) }, // j 1
{ new BaseCellRotation(31, 1), new BaseCellRotation(22, 0), new BaseCellRotation(16, 0) } // j 2
} },
{// face 1
{
// i 0
{ new BaseCellRotation(2, 0), new BaseCellRotation(6, 0), new BaseCellRotation(14, 0) }, // j 0
{ new BaseCellRotation(10, 0), new BaseCellRotation(11, 0), new BaseCellRotation(17, 3) }, // j 1
{ new BaseCellRotation(24, 1), new BaseCellRotation(23, 3), new BaseCellRotation(25, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(0, 0), new BaseCellRotation(1, 5), new BaseCellRotation(9, 5) }, // j 0
{ new BaseCellRotation(5, 0), new BaseCellRotation(2, 0), new BaseCellRotation(6, 0) }, // j 1
{ new BaseCellRotation(18, 1), new BaseCellRotation(10, 0), new BaseCellRotation(11, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(4, 1), new BaseCellRotation(3, 5), new BaseCellRotation(7, 5) }, // j 0
{ new BaseCellRotation(8, 1), new BaseCellRotation(0, 0), new BaseCellRotation(1, 5) }, // j 1
{ new BaseCellRotation(16, 1), new BaseCellRotation(5, 0), new BaseCellRotation(2, 0) } // j 2
} },
{// face 2
{
// i 0
{ new BaseCellRotation(7, 0), new BaseCellRotation(21, 0), new BaseCellRotation(38, 0) }, // j 0
{ new BaseCellRotation(9, 0), new BaseCellRotation(19, 0), new BaseCellRotation(34, 3) }, // j 1
{ new BaseCellRotation(14, 1), new BaseCellRotation(20, 3), new BaseCellRotation(36, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(3, 0), new BaseCellRotation(13, 5), new BaseCellRotation(29, 5) }, // j 0
{ new BaseCellRotation(1, 0), new BaseCellRotation(7, 0), new BaseCellRotation(21, 0) }, // j 1
{ new BaseCellRotation(6, 1), new BaseCellRotation(9, 0), new BaseCellRotation(19, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(4, 2), new BaseCellRotation(12, 5), new BaseCellRotation(26, 5) }, // j 0
{ new BaseCellRotation(0, 1), new BaseCellRotation(3, 0), new BaseCellRotation(13, 5) }, // j 1
{ new BaseCellRotation(2, 1), new BaseCellRotation(1, 0), new BaseCellRotation(7, 0) } // j 2
} },
{// face 3
{
// i 0
{ new BaseCellRotation(26, 0), new BaseCellRotation(42, 0), new BaseCellRotation(58, 0) }, // j 0
{ new BaseCellRotation(29, 0), new BaseCellRotation(43, 0), new BaseCellRotation(62, 3) }, // j 1
{ new BaseCellRotation(38, 1), new BaseCellRotation(47, 3), new BaseCellRotation(64, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(12, 0), new BaseCellRotation(28, 5), new BaseCellRotation(44, 5) }, // j 0
{ new BaseCellRotation(13, 0), new BaseCellRotation(26, 0), new BaseCellRotation(42, 0) }, // j 1
{ new BaseCellRotation(21, 1), new BaseCellRotation(29, 0), new BaseCellRotation(43, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(4, 3), new BaseCellRotation(15, 5), new BaseCellRotation(31, 5) }, // j 0
{ new BaseCellRotation(3, 1), new BaseCellRotation(12, 0), new BaseCellRotation(28, 5) }, // j 1
{ new BaseCellRotation(7, 1), new BaseCellRotation(13, 0), new BaseCellRotation(26, 0) } // j 2
} },
{// face 4
{
// i 0
{ new BaseCellRotation(31, 0), new BaseCellRotation(41, 0), new BaseCellRotation(49, 0) }, // j 0
{ new BaseCellRotation(44, 0), new BaseCellRotation(53, 0), new BaseCellRotation(61, 3) }, // j 1
{ new BaseCellRotation(58, 1), new BaseCellRotation(65, 3), new BaseCellRotation(75, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(15, 0), new BaseCellRotation(22, 5), new BaseCellRotation(33, 5) }, // j 0
{ new BaseCellRotation(28, 0), new BaseCellRotation(31, 0), new BaseCellRotation(41, 0) }, // j 1
{ new BaseCellRotation(42, 1), new BaseCellRotation(44, 0), new BaseCellRotation(53, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(4, 4), new BaseCellRotation(8, 5), new BaseCellRotation(16, 5) }, // j 0
{ new BaseCellRotation(12, 1), new BaseCellRotation(15, 0), new BaseCellRotation(22, 5) }, // j 1
{ new BaseCellRotation(26, 1), new BaseCellRotation(28, 0), new BaseCellRotation(31, 0) } // j 2
} },
{// face 5
{
// i 0
{ new BaseCellRotation(50, 0), new BaseCellRotation(48, 0), new BaseCellRotation(49, 3) }, // j 0
{ new BaseCellRotation(32, 0), new BaseCellRotation(30, 3), new BaseCellRotation(33, 3) }, // j 1
{ new BaseCellRotation(24, 3), new BaseCellRotation(18, 3), new BaseCellRotation(16, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(70, 0), new BaseCellRotation(67, 0), new BaseCellRotation(66, 3) }, // j 0
{ new BaseCellRotation(52, 3), new BaseCellRotation(50, 0), new BaseCellRotation(48, 0) }, // j 1
{ new BaseCellRotation(37, 3), new BaseCellRotation(32, 0), new BaseCellRotation(30, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(83, 0), new BaseCellRotation(87, 3), new BaseCellRotation(85, 3) }, // j 0
{ new BaseCellRotation(74, 3), new BaseCellRotation(70, 0), new BaseCellRotation(67, 0) }, // j 1
{ new BaseCellRotation(57, 1), new BaseCellRotation(52, 3), new BaseCellRotation(50, 0) } // j 2
} },
{// face 6
{
// i 0
{ new BaseCellRotation(25, 0), new BaseCellRotation(23, 0), new BaseCellRotation(24, 3) }, // j 0
{ new BaseCellRotation(17, 0), new BaseCellRotation(11, 3), new BaseCellRotation(10, 3) }, // j 1
{ new BaseCellRotation(14, 3), new BaseCellRotation(6, 3), new BaseCellRotation(2, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(45, 0), new BaseCellRotation(39, 0), new BaseCellRotation(37, 3) }, // j 0
{ new BaseCellRotation(35, 3), new BaseCellRotation(25, 0), new BaseCellRotation(23, 0) }, // j 1
{ new BaseCellRotation(27, 3), new BaseCellRotation(17, 0), new BaseCellRotation(11, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(63, 0), new BaseCellRotation(59, 3), new BaseCellRotation(57, 3) }, // j 0
{ new BaseCellRotation(56, 3), new BaseCellRotation(45, 0), new BaseCellRotation(39, 0) }, // j 1
{ new BaseCellRotation(46, 3), new BaseCellRotation(35, 3), new BaseCellRotation(25, 0) } // j 2
} },
{// face 7
{
// i 0
{ new BaseCellRotation(36, 0), new BaseCellRotation(20, 0), new BaseCellRotation(14, 3) }, // j 0
{ new BaseCellRotation(34, 0), new BaseCellRotation(19, 3), new BaseCellRotation(9, 3) }, // j 1
{ new BaseCellRotation(38, 3), new BaseCellRotation(21, 3), new BaseCellRotation(7, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(55, 0), new BaseCellRotation(40, 0), new BaseCellRotation(27, 3) }, // j 0
{ new BaseCellRotation(54, 3), new BaseCellRotation(36, 0), new BaseCellRotation(20, 0) }, // j 1
{ new BaseCellRotation(51, 3), new BaseCellRotation(34, 0), new BaseCellRotation(19, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(72, 0), new BaseCellRotation(60, 3), new BaseCellRotation(46, 3) }, // j 0
{ new BaseCellRotation(73, 3), new BaseCellRotation(55, 0), new BaseCellRotation(40, 0) }, // j 1
{ new BaseCellRotation(71, 3), new BaseCellRotation(54, 3), new BaseCellRotation(36, 0) } // j 2
} },
{// face 8
{
// i 0
{ new BaseCellRotation(64, 0), new BaseCellRotation(47, 0), new BaseCellRotation(38, 3) }, // j 0
{ new BaseCellRotation(62, 0), new BaseCellRotation(43, 3), new BaseCellRotation(29, 3) }, // j 1
{ new BaseCellRotation(58, 3), new BaseCellRotation(42, 3), new BaseCellRotation(26, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(84, 0), new BaseCellRotation(69, 0), new BaseCellRotation(51, 3) }, // j 0
{ new BaseCellRotation(82, 3), new BaseCellRotation(64, 0), new BaseCellRotation(47, 0) }, // j 1
{ new BaseCellRotation(76, 3), new BaseCellRotation(62, 0), new BaseCellRotation(43, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(97, 0), new BaseCellRotation(89, 3), new BaseCellRotation(71, 3) }, // j 0
{ new BaseCellRotation(98, 3), new BaseCellRotation(84, 0), new BaseCellRotation(69, 0) }, // j 1
{ new BaseCellRotation(96, 3), new BaseCellRotation(82, 3), new BaseCellRotation(64, 0) } // j 2
} },
{// face 9
{
// i 0
{ new BaseCellRotation(75, 0), new BaseCellRotation(65, 0), new BaseCellRotation(58, 3) }, // j 0
{ new BaseCellRotation(61, 0), new BaseCellRotation(53, 3), new BaseCellRotation(44, 3) }, // j 1
{ new BaseCellRotation(49, 3), new BaseCellRotation(41, 3), new BaseCellRotation(31, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(94, 0), new BaseCellRotation(86, 0), new BaseCellRotation(76, 3) }, // j 0
{ new BaseCellRotation(81, 3), new BaseCellRotation(75, 0), new BaseCellRotation(65, 0) }, // j 1
{ new BaseCellRotation(66, 3), new BaseCellRotation(61, 0), new BaseCellRotation(53, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(107, 0), new BaseCellRotation(104, 3), new BaseCellRotation(96, 3) }, // j 0
{ new BaseCellRotation(101, 3), new BaseCellRotation(94, 0), new BaseCellRotation(86, 0) }, // j 1
{ new BaseCellRotation(85, 3), new BaseCellRotation(81, 3), new BaseCellRotation(75, 0) } // j 2
} },
{// face 10
{
// i 0
{ new BaseCellRotation(57, 0), new BaseCellRotation(59, 0), new BaseCellRotation(63, 3) }, // j 0
{ new BaseCellRotation(74, 0), new BaseCellRotation(78, 3), new BaseCellRotation(79, 3) }, // j 1
{ new BaseCellRotation(83, 3), new BaseCellRotation(92, 3), new BaseCellRotation(95, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(37, 0), new BaseCellRotation(39, 3), new BaseCellRotation(45, 3) }, // j 0
{ new BaseCellRotation(52, 0), new BaseCellRotation(57, 0), new BaseCellRotation(59, 0) }, // j 1
{ new BaseCellRotation(70, 3), new BaseCellRotation(74, 0), new BaseCellRotation(78, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(24, 0), new BaseCellRotation(23, 3), new BaseCellRotation(25, 3) }, // j 0
{ new BaseCellRotation(32, 3), new BaseCellRotation(37, 0), new BaseCellRotation(39, 3) }, // j 1
{ new BaseCellRotation(50, 3), new BaseCellRotation(52, 0), new BaseCellRotation(57, 0) } // j 2
} },
{// face 11
{
// i 0
{ new BaseCellRotation(46, 0), new BaseCellRotation(60, 0), new BaseCellRotation(72, 3) }, // j 0
{ new BaseCellRotation(56, 0), new BaseCellRotation(68, 3), new BaseCellRotation(80, 3) }, // j 1
{ new BaseCellRotation(63, 3), new BaseCellRotation(77, 3), new BaseCellRotation(90, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(27, 0), new BaseCellRotation(40, 3), new BaseCellRotation(55, 3) }, // j 0
{ new BaseCellRotation(35, 0), new BaseCellRotation(46, 0), new BaseCellRotation(60, 0) }, // j 1
{ new BaseCellRotation(45, 3), new BaseCellRotation(56, 0), new BaseCellRotation(68, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(14, 0), new BaseCellRotation(20, 3), new BaseCellRotation(36, 3) }, // j 0
{ new BaseCellRotation(17, 3), new BaseCellRotation(27, 0), new BaseCellRotation(40, 3) }, // j 1
{ new BaseCellRotation(25, 3), new BaseCellRotation(35, 0), new BaseCellRotation(46, 0) } // j 2
} },
{// face 12
{
// i 0
{ new BaseCellRotation(71, 0), new BaseCellRotation(89, 0), new BaseCellRotation(97, 3) }, // j 0
{ new BaseCellRotation(73, 0), new BaseCellRotation(91, 3), new BaseCellRotation(103, 3) }, // j 1
{ new BaseCellRotation(72, 3), new BaseCellRotation(88, 3), new BaseCellRotation(105, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(51, 0), new BaseCellRotation(69, 3), new BaseCellRotation(84, 3) }, // j 0
{ new BaseCellRotation(54, 0), new BaseCellRotation(71, 0), new BaseCellRotation(89, 0) }, // j 1
{ new BaseCellRotation(55, 3), new BaseCellRotation(73, 0), new BaseCellRotation(91, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(38, 0), new BaseCellRotation(47, 3), new BaseCellRotation(64, 3) }, // j 0
{ new BaseCellRotation(34, 3), new BaseCellRotation(51, 0), new BaseCellRotation(69, 3) }, // j 1
{ new BaseCellRotation(36, 3), new BaseCellRotation(54, 0), new BaseCellRotation(71, 0) } // j 2
} },
{// face 13
{
// i 0
{ new BaseCellRotation(96, 0), new BaseCellRotation(104, 0), new BaseCellRotation(107, 3) }, // j 0
{ new BaseCellRotation(98, 0), new BaseCellRotation(110, 3), new BaseCellRotation(115, 3) }, // j 1
{ new BaseCellRotation(97, 3), new BaseCellRotation(111, 3), new BaseCellRotation(119, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(76, 0), new BaseCellRotation(86, 3), new BaseCellRotation(94, 3) }, // j 0
{ new BaseCellRotation(82, 0), new BaseCellRotation(96, 0), new BaseCellRotation(104, 0) }, // j 1
{ new BaseCellRotation(84, 3), new BaseCellRotation(98, 0), new BaseCellRotation(110, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(58, 0), new BaseCellRotation(65, 3), new BaseCellRotation(75, 3) }, // j 0
{ new BaseCellRotation(62, 3), new BaseCellRotation(76, 0), new BaseCellRotation(86, 3) }, // j 1
{ new BaseCellRotation(64, 3), new BaseCellRotation(82, 0), new BaseCellRotation(96, 0) } // j 2
} },
{// face 14
{
// i 0
{ new BaseCellRotation(85, 0), new BaseCellRotation(87, 0), new BaseCellRotation(83, 3) }, // j 0
{ new BaseCellRotation(101, 0), new BaseCellRotation(102, 3), new BaseCellRotation(100, 3) }, // j 1
{ new BaseCellRotation(107, 3), new BaseCellRotation(112, 3), new BaseCellRotation(114, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(66, 0), new BaseCellRotation(67, 3), new BaseCellRotation(70, 3) }, // j 0
{ new BaseCellRotation(81, 0), new BaseCellRotation(85, 0), new BaseCellRotation(87, 0) }, // j 1
{ new BaseCellRotation(94, 3), new BaseCellRotation(101, 0), new BaseCellRotation(102, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(49, 0), new BaseCellRotation(48, 3), new BaseCellRotation(50, 3) }, // j 0
{ new BaseCellRotation(61, 3), new BaseCellRotation(66, 0), new BaseCellRotation(67, 3) }, // j 1
{ new BaseCellRotation(75, 3), new BaseCellRotation(81, 0), new BaseCellRotation(85, 0) } // j 2
} },
{// face 15
{
// i 0
{ new BaseCellRotation(95, 0), new BaseCellRotation(92, 0), new BaseCellRotation(83, 0) }, // j 0
{ new BaseCellRotation(79, 0), new BaseCellRotation(78, 0), new BaseCellRotation(74, 3) }, // j 1
{ new BaseCellRotation(63, 1), new BaseCellRotation(59, 3), new BaseCellRotation(57, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(109, 0), new BaseCellRotation(108, 0), new BaseCellRotation(100, 5) }, // j 0
{ new BaseCellRotation(93, 1), new BaseCellRotation(95, 0), new BaseCellRotation(92, 0) }, // j 1
{ new BaseCellRotation(77, 1), new BaseCellRotation(79, 0), new BaseCellRotation(78, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(117, 4), new BaseCellRotation(118, 5), new BaseCellRotation(114, 5) }, // j 0
{ new BaseCellRotation(106, 1), new BaseCellRotation(109, 0), new BaseCellRotation(108, 0) }, // j 1
{ new BaseCellRotation(90, 1), new BaseCellRotation(93, 1), new BaseCellRotation(95, 0) } // j 2
} },
{// face 16
{
// i 0
{ new BaseCellRotation(90, 0), new BaseCellRotation(77, 0), new BaseCellRotation(63, 0) }, // j 0
{ new BaseCellRotation(80, 0), new BaseCellRotation(68, 0), new BaseCellRotation(56, 3) }, // j 1
{ new BaseCellRotation(72, 1), new BaseCellRotation(60, 3), new BaseCellRotation(46, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(106, 0), new BaseCellRotation(93, 0), new BaseCellRotation(79, 5) }, // j 0
{ new BaseCellRotation(99, 1), new BaseCellRotation(90, 0), new BaseCellRotation(77, 0) }, // j 1
{ new BaseCellRotation(88, 1), new BaseCellRotation(80, 0), new BaseCellRotation(68, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(117, 3), new BaseCellRotation(109, 5), new BaseCellRotation(95, 5) }, // j 0
{ new BaseCellRotation(113, 1), new BaseCellRotation(106, 0), new BaseCellRotation(93, 0) }, // j 1
{ new BaseCellRotation(105, 1), new BaseCellRotation(99, 1), new BaseCellRotation(90, 0) } // j 2
} },
{// face 17
{
// i 0
{ new BaseCellRotation(105, 0), new BaseCellRotation(88, 0), new BaseCellRotation(72, 0) }, // j 0
{ new BaseCellRotation(103, 0), new BaseCellRotation(91, 0), new BaseCellRotation(73, 3) }, // j 1
{ new BaseCellRotation(97, 1), new BaseCellRotation(89, 3), new BaseCellRotation(71, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(113, 0), new BaseCellRotation(99, 0), new BaseCellRotation(80, 5) }, // j 0
{ new BaseCellRotation(116, 1), new BaseCellRotation(105, 0), new BaseCellRotation(88, 0) }, // j 1
{ new BaseCellRotation(111, 1), new BaseCellRotation(103, 0), new BaseCellRotation(91, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(117, 2), new BaseCellRotation(106, 5), new BaseCellRotation(90, 5) }, // j 0
{ new BaseCellRotation(121, 1), new BaseCellRotation(113, 0), new BaseCellRotation(99, 0) }, // j 1
{ new BaseCellRotation(119, 1), new BaseCellRotation(116, 1), new BaseCellRotation(105, 0) } // j 2
} },
{// face 18
{
// i 0
{ new BaseCellRotation(119, 0), new BaseCellRotation(111, 0), new BaseCellRotation(97, 0) }, // j 0
{ new BaseCellRotation(115, 0), new BaseCellRotation(110, 0), new BaseCellRotation(98, 3) }, // j 1
{ new BaseCellRotation(107, 1), new BaseCellRotation(104, 3), new BaseCellRotation(96, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(121, 0), new BaseCellRotation(116, 0), new BaseCellRotation(103, 5) }, // j 0
{ new BaseCellRotation(120, 1), new BaseCellRotation(119, 0), new BaseCellRotation(111, 0) }, // j 1
{ new BaseCellRotation(112, 1), new BaseCellRotation(115, 0), new BaseCellRotation(110, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(117, 1), new BaseCellRotation(113, 5), new BaseCellRotation(105, 5) }, // j 0
{ new BaseCellRotation(118, 1), new BaseCellRotation(121, 0), new BaseCellRotation(116, 0) }, // j 1
{ new BaseCellRotation(114, 1), new BaseCellRotation(120, 1), new BaseCellRotation(119, 0) } // j 2
} },
{// face 19
{
// i 0
{ new BaseCellRotation(114, 0), new BaseCellRotation(112, 0), new BaseCellRotation(107, 0) }, // j 0
{ new BaseCellRotation(100, 0), new BaseCellRotation(102, 0), new BaseCellRotation(101, 3) }, // j 1
{ new BaseCellRotation(83, 1), new BaseCellRotation(87, 3), new BaseCellRotation(85, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(118, 0), new BaseCellRotation(120, 0), new BaseCellRotation(115, 5) }, // j 0
{ new BaseCellRotation(108, 1), new BaseCellRotation(114, 0), new BaseCellRotation(112, 0) }, // j 1
{ new BaseCellRotation(92, 1), new BaseCellRotation(100, 0), new BaseCellRotation(102, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(117, 0), new BaseCellRotation(121, 5), new BaseCellRotation(119, 5) }, // j 0
{ new BaseCellRotation(109, 1), new BaseCellRotation(118, 0), new BaseCellRotation(120, 0) }, // j 1
{ new BaseCellRotation(95, 1), new BaseCellRotation(108, 1), new BaseCellRotation(114, 0) } // j 2
} } };
/**
* Return whether or not the indicated base cell is a pentagon.
*/
public static boolean isBaseCellPentagon(int baseCell) {
if (baseCell < 0 || baseCell >= Constants.NUM_BASE_CELLS) { // LCOV_EXCL_BR_LINE
// Base cells less than zero can not be represented in an index
return false;
}
return baseCellData[baseCell].isPentagon;
}
/**
* Return whether or not the indicated base cell is a pentagon.
*/
public static FaceIJK getBaseFaceIJK(int baseCell) {
if (baseCell < 0 || baseCell >= Constants.NUM_BASE_CELLS) { // LCOV_EXCL_BR_LINE
// Base cells less than zero can not be represented in an index
throw new IllegalArgumentException("Illegal base cell");
}
BaseCellData cellData = baseCellData[baseCell];
return new FaceIJK(cellData.homeFace, new CoordIJK(cellData.homeI, cellData.homeJ, cellData.homeK));
}
/** Find base cell given a face and a CoordIJK.
*
* Given the face number and a resolution 0 ijk+ coordinate in that face's
* face-centered ijk coordinate system, return the base cell located at that
* coordinate.
*
* Valid ijk+ lookup coordinates are from (0, 0, 0) to (2, 2, 2).
*/
public static int getBaseCell(int face, CoordIJK coord) {
return faceIjkBaseCells[face][coord.i][coord.j][coord.k].baseCell;
}
/** Find base cell given a face and a CoordIJK.
*
* Given the face number and a resolution 0 ijk+ coordinate in that face's
* face-centered ijk coordinate system, return the number of 60' ccw rotations
* to rotate into the coordinate system of the base cell at that coordinates.
*
* Valid ijk+ lookup coordinates are from (0, 0, 0) to (2, 2, 2).
*/
public static int getBaseCellCCWrot60(int face, CoordIJK coord) {
return faceIjkBaseCells[face][coord.i][coord.j][coord.k].ccwRot60;
}
/** Return whether or not the tested face is a cw offset face.
*/
public static boolean baseCellIsCwOffset(int baseCell, int testFace) {
return baseCellData[baseCell].cwOffsetPent[0] == testFace || baseCellData[baseCell].cwOffsetPent[1] == testFace;
}
/** Return whether the indicated base cell is a pentagon where all
* neighbors are oriented towards it. */
public static boolean isBaseCellPolarPentagon(int baseCell) {
return baseCell == 4 || baseCell == 117;
}
}
| elastic/elasticsearch | libs/h3/src/main/java/org/elasticsearch/h3/BaseCells.java |
493 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.hexagonal.eventlog;
import com.iluwatar.hexagonal.domain.PlayerDetails;
import com.mongodb.MongoClient;
import com.mongodb.client.MongoCollection;
import com.mongodb.client.MongoDatabase;
import lombok.Getter;
import org.bson.Document;
/**
* Mongo based event log.
*/
public class MongoEventLog implements LotteryEventLog {
private static final String DEFAULT_DB = "lotteryDB";
private static final String DEFAULT_EVENTS_COLLECTION = "events";
private static final String EMAIL = "email";
private static final String PHONE = "phone";
public static final String MESSAGE = "message";
@Getter
private MongoClient mongoClient;
@Getter
private MongoDatabase database;
@Getter
private MongoCollection<Document> eventsCollection;
private final StdOutEventLog stdOutEventLog = new StdOutEventLog();
/**
* Constructor.
*/
public MongoEventLog() {
connect();
}
/**
* Constructor accepting parameters.
*/
public MongoEventLog(String dbName, String eventsCollectionName) {
connect(dbName, eventsCollectionName);
}
/**
* Connect to database with default parameters.
*/
public void connect() {
connect(DEFAULT_DB, DEFAULT_EVENTS_COLLECTION);
}
/**
* Connect to database with given parameters.
*/
public void connect(String dbName, String eventsCollectionName) {
if (mongoClient != null) {
mongoClient.close();
}
mongoClient = new MongoClient(System.getProperty("mongo-host"),
Integer.parseInt(System.getProperty("mongo-port")));
database = mongoClient.getDatabase(dbName);
eventsCollection = database.getCollection(eventsCollectionName);
}
@Override
public void ticketSubmitted(PlayerDetails details) {
var document = new Document(EMAIL, details.email());
document.put(PHONE, details.phoneNumber());
document.put("bank", details.bankAccount());
document
.put(MESSAGE, "Lottery ticket was submitted and bank account was charged for 3 credits.");
eventsCollection.insertOne(document);
stdOutEventLog.ticketSubmitted(details);
}
@Override
public void ticketSubmitError(PlayerDetails details) {
var document = new Document(EMAIL, details.email());
document.put(PHONE, details.phoneNumber());
document.put("bank", details.bankAccount());
document.put(MESSAGE, "Lottery ticket could not be submitted because lack of funds.");
eventsCollection.insertOne(document);
stdOutEventLog.ticketSubmitError(details);
}
@Override
public void ticketDidNotWin(PlayerDetails details) {
var document = new Document(EMAIL, details.email());
document.put(PHONE, details.phoneNumber());
document.put("bank", details.bankAccount());
document.put(MESSAGE, "Lottery ticket was checked and unfortunately did not win this time.");
eventsCollection.insertOne(document);
stdOutEventLog.ticketDidNotWin(details);
}
@Override
public void ticketWon(PlayerDetails details, int prizeAmount) {
var document = new Document(EMAIL, details.email());
document.put(PHONE, details.phoneNumber());
document.put("bank", details.bankAccount());
document.put(MESSAGE, String
.format("Lottery ticket won! The bank account was deposited with %d credits.",
prizeAmount));
eventsCollection.insertOne(document);
stdOutEventLog.ticketWon(details, prizeAmount);
}
@Override
public void prizeError(PlayerDetails details, int prizeAmount) {
var document = new Document(EMAIL, details.email());
document.put(PHONE, details.phoneNumber());
document.put("bank", details.bankAccount());
document.put(MESSAGE, String
.format("Lottery ticket won! Unfortunately the bank credit transfer of %d failed.",
prizeAmount));
eventsCollection.insertOne(document);
stdOutEventLog.prizeError(details, prizeAmount);
}
}
| iluwatar/java-design-patterns | hexagonal/src/main/java/com/iluwatar/hexagonal/eventlog/MongoEventLog.java |
494 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.action.bulk;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.CompositeIndicesRequest;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.RestApiVersion;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.transport.RawIndexingDataTransportRequest;
import org.elasticsearch.xcontent.XContentType;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import static org.elasticsearch.action.ValidateActions.addValidationError;
/**
* A bulk request holds an ordered {@link IndexRequest}s, {@link DeleteRequest}s and {@link UpdateRequest}s
* and allows to execute it in a single batch.
*
* Note that we only support refresh on the bulk request not per item.
* @see org.elasticsearch.client.internal.Client#bulk(BulkRequest)
*/
public class BulkRequest extends ActionRequest
implements
CompositeIndicesRequest,
WriteRequest<BulkRequest>,
Accountable,
RawIndexingDataTransportRequest {
private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(BulkRequest.class);
private static final int REQUEST_OVERHEAD = 50;
/**
* Requests that are part of this request. It is only possible to add things that are both {@link ActionRequest}s and
* {@link WriteRequest}s to this but java doesn't support syntax to declare that everything in the array has both types so we declare
* the one with the least casts.
*/
final List<DocWriteRequest<?>> requests = new ArrayList<>();
private final Set<String> indices = new HashSet<>();
protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT;
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
private RefreshPolicy refreshPolicy = RefreshPolicy.NONE;
private String globalPipeline;
private String globalRouting;
private String globalIndex;
private Boolean globalRequireAlias;
private Boolean globalRequireDatsStream;
private long sizeInBytes = 0;
public BulkRequest() {}
public BulkRequest(StreamInput in) throws IOException {
super(in);
waitForActiveShards = ActiveShardCount.readFrom(in);
requests.addAll(in.readCollectionAsList(i -> DocWriteRequest.readDocumentRequest(null, i)));
refreshPolicy = RefreshPolicy.readFrom(in);
timeout = in.readTimeValue();
}
public BulkRequest(@Nullable String globalIndex) {
this.globalIndex = globalIndex;
}
/**
* Adds a list of requests to be executed. Either index or delete requests.
*/
public BulkRequest add(DocWriteRequest<?>... requests) {
for (DocWriteRequest<?> request : requests) {
add(request);
}
return this;
}
/**
* Add a request to the current BulkRequest.
*
* Note for internal callers: This method does not respect all global parameters.
* Only the global index is applied to the request objects.
* Global parameters would be respected if the request was serialized for a REST call as it is
* in the high level rest client.
* @param request Request to add
* @return the current bulk request
*/
public BulkRequest add(DocWriteRequest<?> request) {
if (request instanceof IndexRequest indexRequest) {
add(indexRequest);
} else if (request instanceof DeleteRequest deleteRequest) {
add(deleteRequest);
} else if (request instanceof UpdateRequest updateRequest) {
add(updateRequest);
} else {
throw new IllegalArgumentException("No support for request [" + request + "]");
}
indices.add(request.index());
return this;
}
/**
* Adds a list of requests to be executed. Either index or delete requests.
*/
public BulkRequest add(Iterable<DocWriteRequest<?>> requests) {
for (DocWriteRequest<?> request : requests) {
add(request);
}
return this;
}
/**
* Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
* (for example, if no id is provided, one will be generated, or usage of the create flag).
*/
public BulkRequest add(IndexRequest request) {
return internalAdd(request);
}
BulkRequest internalAdd(IndexRequest request) {
Objects.requireNonNull(request, "'request' must not be null");
applyGlobalMandatoryParameters(request);
requests.add(request);
// lack of source is validated in validate() method
sizeInBytes += (request.source() != null ? request.source().length() : 0) + REQUEST_OVERHEAD;
indices.add(request.index());
return this;
}
/**
* Adds an {@link UpdateRequest} to the list of actions to execute.
*/
public BulkRequest add(UpdateRequest request) {
return internalAdd(request);
}
BulkRequest internalAdd(UpdateRequest request) {
Objects.requireNonNull(request, "'request' must not be null");
applyGlobalMandatoryParameters(request);
requests.add(request);
if (request.doc() != null) {
sizeInBytes += request.doc().source().length();
}
if (request.upsertRequest() != null) {
sizeInBytes += request.upsertRequest().source().length();
}
if (request.script() != null) {
sizeInBytes += request.script().getIdOrCode().length() * 2;
}
indices.add(request.index());
return this;
}
/**
* Adds an {@link DeleteRequest} to the list of actions to execute.
*/
public BulkRequest add(DeleteRequest request) {
Objects.requireNonNull(request, "'request' must not be null");
applyGlobalMandatoryParameters(request);
requests.add(request);
sizeInBytes += REQUEST_OVERHEAD;
indices.add(request.index());
return this;
}
/**
* The list of requests in this bulk request.
*/
public List<DocWriteRequest<?>> requests() {
return this.requests;
}
/**
* The number of actions in the bulk request.
*/
public int numberOfActions() {
return requests.size();
}
/**
* The estimated size in bytes of the bulk request.
*/
public long estimatedSizeInBytes() {
return sizeInBytes;
}
/**
* Adds a framed data in binary format
*/
public BulkRequest add(byte[] data, int from, int length, XContentType xContentType) throws IOException {
return add(data, from, length, null, xContentType);
}
/**
* Adds a framed data in binary format
*/
public BulkRequest add(byte[] data, int from, int length, @Nullable String defaultIndex, XContentType xContentType) throws IOException {
return add(new BytesArray(data, from, length), defaultIndex, xContentType);
}
/**
* Adds a framed data in binary format
*/
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, XContentType xContentType) throws IOException {
return add(data, defaultIndex, null, null, null, null, null, null, true, xContentType, RestApiVersion.current());
}
/**
* Adds a framed data in binary format
*/
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, boolean allowExplicitIndex, XContentType xContentType)
throws IOException {
return add(data, defaultIndex, null, null, null, null, null, null, allowExplicitIndex, xContentType, RestApiVersion.current());
}
public BulkRequest add(
BytesReference data,
@Nullable String defaultIndex,
@Nullable String defaultRouting,
@Nullable FetchSourceContext defaultFetchSourceContext,
@Nullable String defaultPipeline,
@Nullable Boolean defaultRequireAlias,
@Nullable Boolean defaultRequireDataStream,
@Nullable Boolean defaultListExecutedPipelines,
boolean allowExplicitIndex,
XContentType xContentType,
RestApiVersion restApiVersion
) throws IOException {
String routing = valueOrDefault(defaultRouting, globalRouting);
String pipeline = valueOrDefault(defaultPipeline, globalPipeline);
Boolean requireAlias = valueOrDefault(defaultRequireAlias, globalRequireAlias);
Boolean requireDataStream = valueOrDefault(defaultRequireDataStream, globalRequireDatsStream);
new BulkRequestParser(true, restApiVersion).parse(
data,
defaultIndex,
routing,
defaultFetchSourceContext,
pipeline,
requireAlias,
requireDataStream,
defaultListExecutedPipelines,
allowExplicitIndex,
xContentType,
(indexRequest, type) -> internalAdd(indexRequest),
this::internalAdd,
this::add
);
return this;
}
/**
* Sets the number of shard copies that must be active before proceeding with the write.
* See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details.
*/
public BulkRequest waitForActiveShards(ActiveShardCount waitForActiveShards) {
this.waitForActiveShards = waitForActiveShards;
return this;
}
/**
* A shortcut for {@link #waitForActiveShards(ActiveShardCount)} where the numerical
* shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)}
* to get the ActiveShardCount.
*/
public BulkRequest waitForActiveShards(final int waitForActiveShards) {
return waitForActiveShards(ActiveShardCount.from(waitForActiveShards));
}
public ActiveShardCount waitForActiveShards() {
return this.waitForActiveShards;
}
@Override
public BulkRequest setRefreshPolicy(RefreshPolicy refreshPolicy) {
this.refreshPolicy = refreshPolicy;
return this;
}
@Override
public RefreshPolicy getRefreshPolicy() {
return refreshPolicy;
}
/**
* A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}.
*/
public final BulkRequest timeout(TimeValue timeout) {
this.timeout = timeout;
return this;
}
/**
* Note for internal callers (NOT high level rest client),
* the global parameter setting is ignored when used with:
*
* - {@link BulkRequest#add(IndexRequest)}
* - {@link BulkRequest#add(UpdateRequest)}
* - {@link BulkRequest#add(DocWriteRequest)}
* - {@link BulkRequest#add(DocWriteRequest[])} )}
* - {@link BulkRequest#add(Iterable)}
* @param globalPipeline the global default setting
* @return Bulk request with global setting set
*/
public final BulkRequest pipeline(String globalPipeline) {
this.globalPipeline = globalPipeline;
return this;
}
/**
* Note for internal callers (NOT high level rest client),
* the global parameter setting is ignored when used with:
*
- {@link BulkRequest#add(IndexRequest)}
- {@link BulkRequest#add(UpdateRequest)}
- {@link BulkRequest#add(DocWriteRequest)}
- {@link BulkRequest#add(DocWriteRequest[])} )}
- {@link BulkRequest#add(Iterable)}
* @param globalRouting the global default setting
* @return Bulk request with global setting set
*/
public final BulkRequest routing(String globalRouting) {
this.globalRouting = globalRouting;
return this;
}
public TimeValue timeout() {
return timeout;
}
public String pipeline() {
return globalPipeline;
}
public String routing() {
return globalRouting;
}
public Boolean requireAlias() {
return globalRequireAlias;
}
public Boolean requireDataStream() {
return globalRequireDatsStream;
}
/**
* Note for internal callers (NOT high level rest client),
* the global parameter setting is ignored when used with:
*
* - {@link BulkRequest#add(IndexRequest)}
* - {@link BulkRequest#add(UpdateRequest)}
* - {@link BulkRequest#add(DocWriteRequest)}
* - {@link BulkRequest#add(DocWriteRequest[])} )}
* - {@link BulkRequest#add(Iterable)}
* @param globalRequireAlias the global default setting
* @return Bulk request with global setting set
*/
public BulkRequest requireAlias(Boolean globalRequireAlias) {
this.globalRequireAlias = globalRequireAlias;
return this;
}
public BulkRequest requireDataStream(Boolean globalRequireDatsStream) {
this.globalRequireDatsStream = globalRequireDatsStream;
return this;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (requests.isEmpty()) {
validationException = addValidationError("no requests added", validationException);
}
for (DocWriteRequest<?> request : requests) {
// We first check if refresh has been set
if (((WriteRequest<?>) request).getRefreshPolicy() != RefreshPolicy.NONE) {
validationException = addValidationError(
"RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.",
validationException
);
}
ActionRequestValidationException ex = ((WriteRequest<?>) request).validate();
if (ex != null) {
if (validationException == null) {
validationException = new ActionRequestValidationException();
}
validationException.addValidationErrors(ex.validationErrors());
}
}
return validationException;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
waitForActiveShards.writeTo(out);
out.writeCollection(requests, DocWriteRequest::writeDocumentRequest);
refreshPolicy.writeTo(out);
out.writeTimeValue(timeout);
}
@Override
public String getDescription() {
return "requests[" + requests.size() + "], indices[" + Strings.collectionToDelimitedString(indices, ", ") + "]";
}
private void applyGlobalMandatoryParameters(DocWriteRequest<?> request) {
request.index(valueOrDefault(request.index(), globalIndex));
}
private static String valueOrDefault(String value, String globalDefault) {
if (Strings.isNullOrEmpty(value) && Strings.isNullOrEmpty(globalDefault) == false) {
return globalDefault;
}
return value;
}
private static Boolean valueOrDefault(Boolean value, Boolean globalDefault) {
if (Objects.isNull(value) && Objects.isNull(globalDefault) == false) {
return globalDefault;
}
return value;
}
@Override
public long ramBytesUsed() {
return SHALLOW_SIZE + requests.stream().mapToLong(Accountable::ramBytesUsed).sum();
}
public Set<String> getIndices() {
return Collections.unmodifiableSet(indices);
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java |
495 | // A character in UTF8 can be from 1 to 4 bytes long, subjected to the following rules:
// For 1-byte character, the first bit is a 0, followed by its unicode code.
// For n-bytes character, the first n-bits are all one's, the n+1 bit is 0, followed by n-1 bytes with most significant 2 bits being 10.
// This is how the UTF-8 encoding would work:
// Char. number range | UTF-8 octet sequence
// (hexadecimal) | (binary)
// --------------------+---------------------------------------------
// 0000 0000-0000 007F | 0xxxxxxx
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
// Given an array of integers representing the data, return whether it is a valid utf-8 encoding.
// Note:
// The input is an array of integers. Only the least significant 8 bits of each integer is used to store the data. This means each integer represents only 1 byte of data.
// Example 1:
// data = [197, 130, 1], which represents the octet sequence: 11000101 10000010 00000001.
// Return true.
// It is a valid utf-8 encoding for a 2-bytes character followed by a 1-byte character.
// Example 2:
// data = [235, 140, 4], which represented the octet sequence: 11101011 10001100 00000100.
// Return false.
// The first 3 bits are all one's and the 4th bit is 0 means it is a 3-bytes character.
// The next byte is a continuation byte which starts with 10 and that's correct.
// But the second continuation byte does not start with 10, so it is invalid.
public class Utf8Validation {
public boolean validUtf8(int[] data) {
int count = 0;
for(int i : data) {
if(count == 0) {
if((i >> 5) == 0b110) {
count = 1;
} else if((i >> 4) == 0b1110) {
count = 2;
} else if((i >> 3) == 0b11110) {
count = 3;
} else if((i >> 7) == 0b1) {
return false;
}
} else {
if((i >> 6) != 0b10) {
return false;
}
count--;
}
}
return count == 0;
}
}
| kdn251/interviews | leetcode/bit-manipulation/Utf8Validation.java |
496 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.monitor.fs;
import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings;
import org.elasticsearch.common.collect.Iterators;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.xcontent.ToXContentFragment;
import org.elasticsearch.xcontent.ToXContentObject;
import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
import java.nio.file.FileStore;
import java.util.Iterator;
import java.util.Set;
public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContentFragment {
public static class Path implements Writeable, ToXContentObject {
String path;
/** File system string from {@link FileStore#toString()}. The concrete subclasses of FileStore have meaningful toString methods. */
String mount; // e.g. "/app (/dev/mapper/lxc-data)", "/System/Volumes/Data (/dev/disk1s2)", "Local Disk (C:)", etc.
/** File system type from {@link FileStore#type()}. */
String type; // e.g. "xfs", "apfs", "NTFS", etc.
long total = -1;
long free = -1;
long available = -1;
ByteSizeValue lowWatermarkFreeSpace = null;
ByteSizeValue highWatermarkFreeSpace = null;
ByteSizeValue floodStageWatermarkFreeSpace = null;
ByteSizeValue frozenFloodStageWatermarkFreeSpace = null;
public Path() {}
public Path(String path, String mount, long total, long free, long available) {
this.path = path;
this.mount = mount;
this.total = total;
this.free = free;
this.available = available;
}
/**
* Read from a stream.
*/
public Path(StreamInput in) throws IOException {
path = in.readOptionalString(); // total aggregates do not have a path, mount, or type
mount = in.readOptionalString();
type = in.readOptionalString();
total = in.readLong();
free = in.readLong();
available = in.readLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(path); // total aggregates do not have a path, mount, or type
out.writeOptionalString(mount);
out.writeOptionalString(type);
out.writeLong(total);
out.writeLong(free);
out.writeLong(available);
}
public String getPath() {
return path;
}
public String getMount() {
return mount;
}
public String getType() {
return type;
}
public ByteSizeValue getTotal() {
return ByteSizeValue.ofBytes(total);
}
public ByteSizeValue getFree() {
return ByteSizeValue.ofBytes(free);
}
public ByteSizeValue getAvailable() {
return ByteSizeValue.ofBytes(available);
}
public void setEffectiveWatermarks(final DiskThresholdSettings masterThresholdSettings, boolean isDedicatedFrozenNode) {
lowWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdLowStage(new ByteSizeValue(total, ByteSizeUnit.BYTES));
highWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdHighStage(new ByteSizeValue(total, ByteSizeUnit.BYTES));
floodStageWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdFloodStage(
new ByteSizeValue(total, ByteSizeUnit.BYTES)
);
if (isDedicatedFrozenNode) {
frozenFloodStageWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdFrozenFloodStage(
new ByteSizeValue(total, ByteSizeUnit.BYTES)
);
}
}
public ByteSizeValue getLowWatermarkFreeSpace() {
return lowWatermarkFreeSpace;
}
public ByteSizeValue getHighWatermarkFreeSpace() {
return highWatermarkFreeSpace;
}
public ByteSizeValue getFloodStageWatermarkFreeSpace() {
return floodStageWatermarkFreeSpace;
}
public ByteSizeValue getFrozenFloodStageWatermarkFreeSpace() {
return frozenFloodStageWatermarkFreeSpace;
}
private static long addLong(long current, long other) {
if (current == -1 && other == -1) {
return 0;
}
if (other == -1) {
return current;
}
if (current == -1) {
return other;
}
return current + other;
}
public void add(Path path) {
total = FsProbe.adjustForHugeFilesystems(addLong(total, path.total));
free = FsProbe.adjustForHugeFilesystems(addLong(free, path.free));
available = FsProbe.adjustForHugeFilesystems(addLong(available, path.available));
}
static final class Fields {
static final String PATH = "path";
static final String MOUNT = "mount";
static final String TYPE = "type";
static final String TOTAL = "total";
static final String TOTAL_IN_BYTES = "total_in_bytes";
static final String FREE = "free";
static final String FREE_IN_BYTES = "free_in_bytes";
static final String AVAILABLE = "available";
static final String AVAILABLE_IN_BYTES = "available_in_bytes";
static final String LOW_WATERMARK_FREE_SPACE = "low_watermark_free_space";
static final String LOW_WATERMARK_FREE_SPACE_IN_BYTES = "low_watermark_free_space_in_bytes";
static final String HIGH_WATERMARK_FREE_SPACE = "high_watermark_free_space";
static final String HIGH_WATERMARK_FREE_SPACE_IN_BYTES = "high_watermark_free_space_in_bytes";
static final String FLOOD_STAGE_FREE_SPACE = "flood_stage_free_space";
static final String FLOOD_STAGE_FREE_SPACE_IN_BYTES = "flood_stage_free_space_in_bytes";
static final String FROZEN_FLOOD_STAGE_FREE_SPACE = "frozen_flood_stage_free_space";
static final String FROZEN_FLOOD_STAGE_FREE_SPACE_IN_BYTES = "frozen_flood_stage_free_space_in_bytes";
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (path != null) {
builder.field(Fields.PATH, path);
}
if (mount != null) {
builder.field(Fields.MOUNT, mount);
}
if (type != null) {
builder.field(Fields.TYPE, type);
}
if (total != -1) {
builder.humanReadableField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, getTotal());
}
if (free != -1) {
builder.humanReadableField(Fields.FREE_IN_BYTES, Fields.FREE, getFree());
}
if (available != -1) {
builder.humanReadableField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, getAvailable());
}
if (lowWatermarkFreeSpace != null) {
builder.humanReadableField(
Fields.LOW_WATERMARK_FREE_SPACE_IN_BYTES,
Fields.LOW_WATERMARK_FREE_SPACE,
getLowWatermarkFreeSpace()
);
}
if (highWatermarkFreeSpace != null) {
builder.humanReadableField(
Fields.HIGH_WATERMARK_FREE_SPACE_IN_BYTES,
Fields.HIGH_WATERMARK_FREE_SPACE,
getHighWatermarkFreeSpace()
);
}
if (floodStageWatermarkFreeSpace != null) {
builder.humanReadableField(
Fields.FLOOD_STAGE_FREE_SPACE_IN_BYTES,
Fields.FLOOD_STAGE_FREE_SPACE,
getFloodStageWatermarkFreeSpace()
);
}
if (frozenFloodStageWatermarkFreeSpace != null) {
builder.humanReadableField(
Fields.FROZEN_FLOOD_STAGE_FREE_SPACE_IN_BYTES,
Fields.FROZEN_FLOOD_STAGE_FREE_SPACE,
getFrozenFloodStageWatermarkFreeSpace()
);
}
builder.endObject();
return builder;
}
}
public static class DeviceStats implements Writeable, ToXContentFragment {
final int majorDeviceNumber;
final int minorDeviceNumber;
final String deviceName;
final long currentReadsCompleted;
final long previousReadsCompleted;
final long currentSectorsRead;
final long previousSectorsRead;
final long currentWritesCompleted;
final long previousWritesCompleted;
final long currentSectorsWritten;
final long previousSectorsWritten;
final long currentIOTime;
final long previousIOTime;
public DeviceStats(
final int majorDeviceNumber,
final int minorDeviceNumber,
final String deviceName,
final long currentReadsCompleted,
final long currentSectorsRead,
final long currentWritesCompleted,
final long currentSectorsWritten,
final long currentIOTime,
final DeviceStats previousDeviceStats
) {
this(
majorDeviceNumber,
minorDeviceNumber,
deviceName,
currentReadsCompleted,
previousDeviceStats != null ? previousDeviceStats.currentReadsCompleted : -1,
currentSectorsWritten,
previousDeviceStats != null ? previousDeviceStats.currentSectorsWritten : -1,
currentSectorsRead,
previousDeviceStats != null ? previousDeviceStats.currentSectorsRead : -1,
currentWritesCompleted,
previousDeviceStats != null ? previousDeviceStats.currentWritesCompleted : -1,
currentIOTime,
previousDeviceStats != null ? previousDeviceStats.currentIOTime : -1
);
}
private DeviceStats(
final int majorDeviceNumber,
final int minorDeviceNumber,
final String deviceName,
final long currentReadsCompleted,
final long previousReadsCompleted,
final long currentSectorsWritten,
final long previousSectorsWritten,
final long currentSectorsRead,
final long previousSectorsRead,
final long currentWritesCompleted,
final long previousWritesCompleted,
final long currentIOTime,
final long previousIOTime
) {
this.majorDeviceNumber = majorDeviceNumber;
this.minorDeviceNumber = minorDeviceNumber;
this.deviceName = deviceName;
this.currentReadsCompleted = currentReadsCompleted;
this.previousReadsCompleted = previousReadsCompleted;
this.currentWritesCompleted = currentWritesCompleted;
this.previousWritesCompleted = previousWritesCompleted;
this.currentSectorsRead = currentSectorsRead;
this.previousSectorsRead = previousSectorsRead;
this.currentSectorsWritten = currentSectorsWritten;
this.previousSectorsWritten = previousSectorsWritten;
this.currentIOTime = currentIOTime;
this.previousIOTime = previousIOTime;
}
public DeviceStats(StreamInput in) throws IOException {
majorDeviceNumber = in.readVInt();
minorDeviceNumber = in.readVInt();
deviceName = in.readString();
currentReadsCompleted = in.readLong();
previousReadsCompleted = in.readLong();
currentWritesCompleted = in.readLong();
previousWritesCompleted = in.readLong();
currentSectorsRead = in.readLong();
previousSectorsRead = in.readLong();
currentSectorsWritten = in.readLong();
previousSectorsWritten = in.readLong();
currentIOTime = in.readLong();
previousIOTime = in.readLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(majorDeviceNumber);
out.writeVInt(minorDeviceNumber);
out.writeString(deviceName);
out.writeLong(currentReadsCompleted);
out.writeLong(previousReadsCompleted);
out.writeLong(currentWritesCompleted);
out.writeLong(previousWritesCompleted);
out.writeLong(currentSectorsRead);
out.writeLong(previousSectorsRead);
out.writeLong(currentSectorsWritten);
out.writeLong(previousSectorsWritten);
out.writeLong(currentIOTime);
out.writeLong(previousIOTime);
}
public String getDeviceName() {
return deviceName;
}
public long operations() {
if (previousReadsCompleted == -1 || previousWritesCompleted == -1) return -1;
return (currentReadsCompleted - previousReadsCompleted) + (currentWritesCompleted - previousWritesCompleted);
}
public long readOperations() {
if (previousReadsCompleted == -1) return -1;
return (currentReadsCompleted - previousReadsCompleted);
}
public long writeOperations() {
if (previousWritesCompleted == -1) return -1;
return (currentWritesCompleted - previousWritesCompleted);
}
public long readKilobytes() {
if (previousSectorsRead == -1) return -1;
return (currentSectorsRead - previousSectorsRead) / 2;
}
public long writeKilobytes() {
if (previousSectorsWritten == -1) return -1;
return (currentSectorsWritten - previousSectorsWritten) / 2;
}
public long ioTimeInMillis() {
if (previousIOTime == -1) return -1;
return (currentIOTime - previousIOTime);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("device_name", deviceName);
builder.field(IoStats.OPERATIONS, operations());
builder.field(IoStats.READ_OPERATIONS, readOperations());
builder.field(IoStats.WRITE_OPERATIONS, writeOperations());
builder.field(IoStats.READ_KILOBYTES, readKilobytes());
builder.field(IoStats.WRITE_KILOBYTES, writeKilobytes());
builder.field(IoStats.IO_TIMEMS, ioTimeInMillis());
return builder;
}
}
public static class IoStats implements Writeable, ToXContentFragment {
private static final String OPERATIONS = "operations";
private static final String READ_OPERATIONS = "read_operations";
private static final String WRITE_OPERATIONS = "write_operations";
private static final String READ_KILOBYTES = "read_kilobytes";
private static final String WRITE_KILOBYTES = "write_kilobytes";
private static final String IO_TIMEMS = "io_time_in_millis";
final DeviceStats[] devicesStats;
final long totalOperations;
final long totalReadOperations;
final long totalWriteOperations;
final long totalReadKilobytes;
final long totalWriteKilobytes;
final long totalIOTimeInMillis;
public IoStats(final DeviceStats[] devicesStats) {
this.devicesStats = devicesStats;
long totalOperations = 0;
long totalReadOperations = 0;
long totalWriteOperations = 0;
long totalReadKilobytes = 0;
long totalWriteKilobytes = 0;
long totalIOTimeInMillis = 0;
for (DeviceStats deviceStats : devicesStats) {
totalOperations += deviceStats.operations() != -1 ? deviceStats.operations() : 0;
totalReadOperations += deviceStats.readOperations() != -1 ? deviceStats.readOperations() : 0;
totalWriteOperations += deviceStats.writeOperations() != -1 ? deviceStats.writeOperations() : 0;
totalReadKilobytes += deviceStats.readKilobytes() != -1 ? deviceStats.readKilobytes() : 0;
totalWriteKilobytes += deviceStats.writeKilobytes() != -1 ? deviceStats.writeKilobytes() : 0;
totalIOTimeInMillis += deviceStats.ioTimeInMillis() != -1 ? deviceStats.ioTimeInMillis() : 0;
}
this.totalOperations = totalOperations;
this.totalReadOperations = totalReadOperations;
this.totalWriteOperations = totalWriteOperations;
this.totalReadKilobytes = totalReadKilobytes;
this.totalWriteKilobytes = totalWriteKilobytes;
this.totalIOTimeInMillis = totalIOTimeInMillis;
}
public IoStats(StreamInput in) throws IOException {
final int length = in.readVInt();
final DeviceStats[] devicesStats = new DeviceStats[length];
for (int i = 0; i < length; i++) {
devicesStats[i] = new DeviceStats(in);
}
this.devicesStats = devicesStats;
this.totalOperations = in.readLong();
this.totalReadOperations = in.readLong();
this.totalWriteOperations = in.readLong();
this.totalReadKilobytes = in.readLong();
this.totalWriteKilobytes = in.readLong();
this.totalIOTimeInMillis = in.readLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(devicesStats.length);
for (int i = 0; i < devicesStats.length; i++) {
devicesStats[i].writeTo(out);
}
out.writeLong(totalOperations);
out.writeLong(totalReadOperations);
out.writeLong(totalWriteOperations);
out.writeLong(totalReadKilobytes);
out.writeLong(totalWriteKilobytes);
out.writeLong(totalIOTimeInMillis);
}
public DeviceStats[] getDevicesStats() {
return devicesStats;
}
public long getTotalOperations() {
return totalOperations;
}
public long getTotalReadOperations() {
return totalReadOperations;
}
public long getTotalWriteOperations() {
return totalWriteOperations;
}
public long getTotalReadKilobytes() {
return totalReadKilobytes;
}
public long getTotalWriteKilobytes() {
return totalWriteKilobytes;
}
public long getTotalIOTimeMillis() {
return totalIOTimeInMillis;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (devicesStats.length > 0) {
builder.startArray("devices");
for (DeviceStats deviceStats : devicesStats) {
builder.startObject();
deviceStats.toXContent(builder, params);
builder.endObject();
}
builder.endArray();
builder.startObject("total");
builder.field(OPERATIONS, totalOperations);
builder.field(READ_OPERATIONS, totalReadOperations);
builder.field(WRITE_OPERATIONS, totalWriteOperations);
builder.field(READ_KILOBYTES, totalReadKilobytes);
builder.field(WRITE_KILOBYTES, totalWriteKilobytes);
builder.field(IO_TIMEMS, totalIOTimeInMillis);
builder.endObject();
}
return builder;
}
}
private final long timestamp;
private final Path[] paths;
private final IoStats ioStats;
private final Path total;
public FsInfo(long timestamp, IoStats ioStats, Path[] paths) {
this.timestamp = timestamp;
this.ioStats = ioStats;
this.paths = paths;
this.total = total();
}
/**
* Read from a stream.
*/
public FsInfo(StreamInput in) throws IOException {
timestamp = in.readVLong();
ioStats = in.readOptionalWriteable(IoStats::new);
paths = new Path[in.readVInt()];
for (int i = 0; i < paths.length; i++) {
paths[i] = new Path(in);
}
this.total = total();
}
public static FsInfo setEffectiveWatermarks(
@Nullable final FsInfo fsInfo,
@Nullable final DiskThresholdSettings masterThresholdSettings,
boolean isDedicatedFrozenNode
) {
if (fsInfo != null && masterThresholdSettings != null) {
for (Path path : fsInfo.paths) {
path.setEffectiveWatermarks(masterThresholdSettings, isDedicatedFrozenNode);
}
}
return fsInfo;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(timestamp);
out.writeOptionalWriteable(ioStats);
out.writeArray(paths);
}
public Path getTotal() {
return total;
}
private Path total() {
Path res = new Path();
Set<String> seenDevices = Sets.newHashSetWithExpectedSize(paths.length);
for (Path subPath : paths) {
if (subPath.mount != null) {
if (seenDevices.add(subPath.mount) == false) {
continue; // already added numbers for this device;
}
}
res.add(subPath);
}
return res;
}
public long getTimestamp() {
return timestamp;
}
public IoStats getIoStats() {
return ioStats;
}
@Override
public Iterator<Path> iterator() {
return Iterators.forArray(paths);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.FS);
builder.field(Fields.TIMESTAMP, timestamp);
builder.field(Fields.TOTAL);
total().toXContent(builder, params);
builder.startArray(Fields.DATA);
for (Path path : paths) {
path.toXContent(builder, params);
}
builder.endArray();
if (ioStats != null) {
builder.startObject(Fields.IO_STATS);
ioStats.toXContent(builder, params);
builder.endObject();
}
builder.endObject();
return builder;
}
static final class Fields {
static final String FS = "fs";
static final String TIMESTAMP = "timestamp";
static final String DATA = "data";
static final String TOTAL = "total";
static final String IO_STATS = "io_stats";
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java |
497 | /*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.collect.CollectPreconditions.checkRemove;
import static com.google.common.collect.CompactHashing.UNSET;
import static com.google.common.collect.Hashing.smearedHash;
import static com.google.common.collect.NullnessCasts.uncheckedCastNullableTToT;
import static com.google.common.collect.NullnessCasts.unsafeNull;
import static java.util.Objects.requireNonNull;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.annotations.J2ktIncompatible;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Ints;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import com.google.errorprone.annotations.concurrent.LazyInit;
import com.google.j2objc.annotations.WeakOuter;
import java.io.IOException;
import java.io.InvalidObjectException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.AbstractMap;
import java.util.Arrays;
import java.util.Collection;
import java.util.ConcurrentModificationException;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Consumer;
import javax.annotation.CheckForNull;
import org.checkerframework.checker.nullness.qual.Nullable;
/**
* CompactHashMap is an implementation of a Map. All optional operations (put and remove) are
* supported. Null keys and values are supported.
*
* <p>{@code containsKey(k)}, {@code put(k, v)} and {@code remove(k)} are all (expected and
* amortized) constant time operations. Expected in the hashtable sense (depends on the hash
* function doing a good job of distributing the elements to the buckets to a distribution not far
* from uniform), and amortized since some operations can trigger a hash table resize.
*
* <p>Unlike {@code java.util.HashMap}, iteration is only proportional to the actual {@code size()},
* which is optimal, and <i>not</i> the size of the internal hashtable, which could be much larger
* than {@code size()}. Furthermore, this structure places significantly reduced load on the garbage
* collector by only using a constant number of internal objects.
*
* <p>If there are no removals, then iteration order for the {@link #entrySet}, {@link #keySet}, and
* {@link #values} views is the same as insertion order. Any removal invalidates any ordering
* guarantees.
*
* <p>This class should not be assumed to be universally superior to {@code java.util.HashMap}.
* Generally speaking, this class reduces object allocation and memory consumption at the price of
* moderately increased constant factors of CPU. Only use this class when there is a specific reason
* to prioritize memory over CPU.
*
* @author Louis Wasserman
* @author Jon Noack
*/
@GwtIncompatible // not worth using in GWT for now
@ElementTypesAreNonnullByDefault
class CompactHashMap<K extends @Nullable Object, V extends @Nullable Object>
extends AbstractMap<K, V> implements Serializable {
/*
* TODO: Make this a drop-in replacement for j.u. versions, actually drop them in, and test the
* world. Figure out what sort of space-time tradeoff we're actually going to get here with the
* *Map variants. This class is particularly hard to benchmark, because the benefit is not only in
* less allocation, but also having the GC do less work to scan the heap because of fewer
* references, which is particularly hard to quantify.
*/
/** Creates an empty {@code CompactHashMap} instance. */
public static <K extends @Nullable Object, V extends @Nullable Object>
CompactHashMap<K, V> create() {
return new CompactHashMap<>();
}
/**
* Creates a {@code CompactHashMap} instance, with a high enough "initial capacity" that it
* <i>should</i> hold {@code expectedSize} elements without growth.
*
* @param expectedSize the number of elements you expect to add to the returned set
* @return a new, empty {@code CompactHashMap} with enough capacity to hold {@code expectedSize}
* elements without resizing
* @throws IllegalArgumentException if {@code expectedSize} is negative
*/
public static <K extends @Nullable Object, V extends @Nullable Object>
CompactHashMap<K, V> createWithExpectedSize(int expectedSize) {
return new CompactHashMap<>(expectedSize);
}
private static final Object NOT_FOUND = new Object();
/**
* Maximum allowed false positive probability of detecting a hash flooding attack given random
* input.
*/
@VisibleForTesting(
)
static final double HASH_FLOODING_FPP = 0.001;
/**
* Maximum allowed length of a hash table bucket before falling back to a j.u.LinkedHashMap-based
* implementation. Experimentally determined.
*/
private static final int MAX_HASH_BUCKET_LENGTH = 9;
// The way the `table`, `entries`, `keys`, and `values` arrays work together is as follows.
//
// The `table` array always has a size that is a power of 2. The hashcode of a key in the map
// is masked in order to correspond to the current table size. For example, if the table size
// is 128 then the mask is 127 == 0x7f, keeping the bottom 7 bits of the hash value.
// If a key hashes to 0x89abcdef the mask reduces it to 0x89abcdef & 0x7f == 0x6f. We'll call this
// the "short hash".
//
// The `keys`, `values`, and `entries` arrays always have the same size as each other. They can be
// seen as fields of an imaginary `Entry` object like this:
//
// class Entry {
// int hash;
// Entry next;
// K key;
// V value;
// }
//
// The imaginary `hash` and `next` values are combined into a single `int` value in the `entries`
// array. The top bits of this value are the remaining bits of the hash value that were not used
// in the short hash. We saw that a mask of 0x7f would keep the 7-bit value 0x6f from a full
// hashcode of 0x89abcdef. The imaginary `hash` value would then be the remaining top 25 bits,
// 0x89abcd80. To this is added (or'd) the `next` value, which is an index within `entries`
// (and therefore within `keys` and `values`) of another entry that has the same short hash
// value. In our example, it would be another entry for a key whose short hash is also 0x6f.
//
// Essentially, then, `table[h]` gives us the start of a linked list in `entries`, where every
// element of the list has the short hash value h.
//
// A wrinkle here is that the value 0 (called UNSET in the code) is used as the equivalent of a
// null pointer. If `table[h] == 0` that means there are no keys in the map whose short hash is h.
// If the `next` bits in `entries[i]` are 0 that means there are no further entries for the given
// short hash. But 0 is also a valid index in `entries`, so we add 1 to these indices before
// putting them in `table` or in `next` bits, and subtract 1 again when we need an index value.
//
// The elements of `keys`, `values`, and `entries` are added sequentially, so that elements 0 to
// `size() - 1` are used and remaining elements are not. This makes iteration straightforward.
// Removing an entry generally involves moving the last element of each array to where the removed
// entry was, and adjusting index links accordingly.
/**
* The hashtable object. This can be either:
*
* <ul>
* <li>a byte[], short[], or int[], with size a power of two, created by
* CompactHashing.createTable, whose values are either
* <ul>
* <li>UNSET, meaning "null pointer"
* <li>one plus an index into the keys, values, and entries arrays
* </ul>
* <li>another java.util.Map delegate implementation. In most modern JDKs, normal java.util hash
* collections intelligently fall back to a binary search tree if hash table collisions are
* detected. Rather than going to all the trouble of reimplementing this ourselves, we
* simply switch over to use the JDK implementation wholesale if probable hash flooding is
* detected, sacrificing the compactness guarantee in very rare cases in exchange for much
* more reliable worst-case behavior.
* <li>null, if no entries have yet been added to the map
* </ul>
*/
@CheckForNull private transient Object table;
/**
* Contains the logical entries, in the range of [0, size()). The high bits of each int are the
* part of the smeared hash of the key not covered by the hashtable mask, whereas the low bits are
* the "next" pointer (pointing to the next entry in the bucket chain), which will always be less
* than or equal to the hashtable mask.
*
* <pre>
* hash = aaaaaaaa
* mask = 00000fff
* next = 00000bbb
* entry = aaaaabbb
* </pre>
*
* <p>The pointers in [size(), entries.length) are all "null" (UNSET).
*/
@VisibleForTesting @CheckForNull transient int[] entries;
/**
* The keys of the entries in the map, in the range of [0, size()). The keys in [size(),
* keys.length) are all {@code null}.
*/
@VisibleForTesting @CheckForNull transient @Nullable Object[] keys;
/**
* The values of the entries in the map, in the range of [0, size()). The values in [size(),
* values.length) are all {@code null}.
*/
@VisibleForTesting @CheckForNull transient @Nullable Object[] values;
/**
* Keeps track of metadata like the number of hash table bits and modifications of this data
* structure (to make it possible to throw ConcurrentModificationException in the iterator). Note
* that we choose not to make this volatile, so we do less of a "best effort" to track such
* errors, for better performance.
*
* <p>For a new instance, where the arrays above have not yet been allocated, the value of {@code
* metadata} is the size that the arrays should be allocated with. Once the arrays have been
* allocated, the value of {@code metadata} combines the number of bits in the "short hash", in
* its bottom {@value CompactHashing#HASH_TABLE_BITS_MAX_BITS} bits, with a modification count in
* the remaining bits that is used to detect concurrent modification during iteration.
*/
private transient int metadata;
/** The number of elements contained in the set. */
private transient int size;
/** Constructs a new empty instance of {@code CompactHashMap}. */
CompactHashMap() {
init(CompactHashing.DEFAULT_SIZE);
}
/**
* Constructs a new instance of {@code CompactHashMap} with the specified capacity.
*
* @param expectedSize the initial capacity of this {@code CompactHashMap}.
*/
CompactHashMap(int expectedSize) {
init(expectedSize);
}
/** Pseudoconstructor for serialization support. */
void init(int expectedSize) {
Preconditions.checkArgument(expectedSize >= 0, "Expected size must be >= 0");
// Save expectedSize for use in allocArrays()
this.metadata = Ints.constrainToRange(expectedSize, 1, CompactHashing.MAX_SIZE);
}
/** Returns whether arrays need to be allocated. */
@VisibleForTesting
boolean needsAllocArrays() {
return table == null;
}
/** Handle lazy allocation of arrays. */
@CanIgnoreReturnValue
int allocArrays() {
Preconditions.checkState(needsAllocArrays(), "Arrays already allocated");
int expectedSize = metadata;
int buckets = CompactHashing.tableSize(expectedSize);
this.table = CompactHashing.createTable(buckets);
setHashTableMask(buckets - 1);
this.entries = new int[expectedSize];
this.keys = new Object[expectedSize];
this.values = new Object[expectedSize];
return expectedSize;
}
@SuppressWarnings("unchecked")
@VisibleForTesting
@CheckForNull
Map<K, V> delegateOrNull() {
if (table instanceof Map) {
return (Map<K, V>) table;
}
return null;
}
Map<K, V> createHashFloodingResistantDelegate(int tableSize) {
return new LinkedHashMap<>(tableSize, 1.0f);
}
@VisibleForTesting
@CanIgnoreReturnValue
Map<K, V> convertToHashFloodingResistantImplementation() {
Map<K, V> newDelegate = createHashFloodingResistantDelegate(hashTableMask() + 1);
for (int i = firstEntryIndex(); i >= 0; i = getSuccessor(i)) {
newDelegate.put(key(i), value(i));
}
this.table = newDelegate;
this.entries = null;
this.keys = null;
this.values = null;
incrementModCount();
return newDelegate;
}
/** Stores the hash table mask as the number of bits needed to represent an index. */
private void setHashTableMask(int mask) {
int hashTableBits = Integer.SIZE - Integer.numberOfLeadingZeros(mask);
metadata =
CompactHashing.maskCombine(metadata, hashTableBits, CompactHashing.HASH_TABLE_BITS_MASK);
}
/** Gets the hash table mask using the stored number of hash table bits. */
private int hashTableMask() {
return (1 << (metadata & CompactHashing.HASH_TABLE_BITS_MASK)) - 1;
}
void incrementModCount() {
metadata += CompactHashing.MODIFICATION_COUNT_INCREMENT;
}
/**
* Mark an access of the specified entry. Used only in {@code CompactLinkedHashMap} for LRU
* ordering.
*/
void accessEntry(int index) {
// no-op by default
}
@CanIgnoreReturnValue
@Override
@CheckForNull
public V put(@ParametricNullness K key, @ParametricNullness V value) {
if (needsAllocArrays()) {
allocArrays();
}
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return delegate.put(key, value);
}
int[] entries = requireEntries();
@Nullable Object[] keys = requireKeys();
@Nullable Object[] values = requireValues();
int newEntryIndex = this.size; // current size, and pointer to the entry to be appended
int newSize = newEntryIndex + 1;
int hash = smearedHash(key);
int mask = hashTableMask();
int tableIndex = hash & mask;
int next = CompactHashing.tableGet(requireTable(), tableIndex);
if (next == UNSET) { // uninitialized bucket
if (newSize > mask) {
// Resize and add new entry
mask = resizeTable(mask, CompactHashing.newCapacity(mask), hash, newEntryIndex);
} else {
CompactHashing.tableSet(requireTable(), tableIndex, newEntryIndex + 1);
}
} else {
int entryIndex;
int entry;
int hashPrefix = CompactHashing.getHashPrefix(hash, mask);
int bucketLength = 0;
do {
entryIndex = next - 1;
entry = entries[entryIndex];
if (CompactHashing.getHashPrefix(entry, mask) == hashPrefix
&& Objects.equal(key, keys[entryIndex])) {
@SuppressWarnings("unchecked") // known to be a V
V oldValue = (V) values[entryIndex];
values[entryIndex] = value;
accessEntry(entryIndex);
return oldValue;
}
next = CompactHashing.getNext(entry, mask);
bucketLength++;
} while (next != UNSET);
if (bucketLength >= MAX_HASH_BUCKET_LENGTH) {
return convertToHashFloodingResistantImplementation().put(key, value);
}
if (newSize > mask) {
// Resize and add new entry
mask = resizeTable(mask, CompactHashing.newCapacity(mask), hash, newEntryIndex);
} else {
entries[entryIndex] = CompactHashing.maskCombine(entry, newEntryIndex + 1, mask);
}
}
resizeMeMaybe(newSize);
insertEntry(newEntryIndex, key, value, hash, mask);
this.size = newSize;
incrementModCount();
return null;
}
/**
* Creates a fresh entry with the specified object at the specified position in the entry arrays.
*/
void insertEntry(
int entryIndex, @ParametricNullness K key, @ParametricNullness V value, int hash, int mask) {
this.setEntry(entryIndex, CompactHashing.maskCombine(hash, UNSET, mask));
this.setKey(entryIndex, key);
this.setValue(entryIndex, value);
}
/** Resizes the entries storage if necessary. */
private void resizeMeMaybe(int newSize) {
int entriesSize = requireEntries().length;
if (newSize > entriesSize) {
// 1.5x but round up to nearest odd (this is optimal for memory consumption on Android)
int newCapacity =
Math.min(CompactHashing.MAX_SIZE, (entriesSize + Math.max(1, entriesSize >>> 1)) | 1);
if (newCapacity != entriesSize) {
resizeEntries(newCapacity);
}
}
}
/**
* Resizes the internal entries array to the specified capacity, which may be greater or less than
* the current capacity.
*/
void resizeEntries(int newCapacity) {
this.entries = Arrays.copyOf(requireEntries(), newCapacity);
this.keys = Arrays.copyOf(requireKeys(), newCapacity);
this.values = Arrays.copyOf(requireValues(), newCapacity);
}
@CanIgnoreReturnValue
private int resizeTable(int oldMask, int newCapacity, int targetHash, int targetEntryIndex) {
Object newTable = CompactHashing.createTable(newCapacity);
int newMask = newCapacity - 1;
if (targetEntryIndex != UNSET) {
// Add target first; it must be last in the chain because its entry hasn't yet been created
CompactHashing.tableSet(newTable, targetHash & newMask, targetEntryIndex + 1);
}
Object oldTable = requireTable();
int[] entries = requireEntries();
// Loop over `oldTable` to construct its replacement, ``newTable`. The entries do not move, so
// the `keys` and `values` arrays do not need to change. But because the "short hash" now has a
// different number of bits, we must rewrite each element of `entries` so that its contribution
// to the full hashcode reflects the change, and so that its `next` link corresponds to the new
// linked list of entries with the new short hash.
for (int oldTableIndex = 0; oldTableIndex <= oldMask; oldTableIndex++) {
int oldNext = CompactHashing.tableGet(oldTable, oldTableIndex);
// Each element of `oldTable` is the head of a (possibly empty) linked list of elements in
// `entries`. The `oldNext` loop is going to traverse that linked list.
// We need to rewrite the `next` link of each of the elements so that it is in the appropriate
// linked list starting from `newTable`. In general, each element from the old linked list
// belongs to a different linked list from `newTable`. We insert each element in turn at the
// head of its appropriate `newTable` linked list.
while (oldNext != UNSET) {
int entryIndex = oldNext - 1;
int oldEntry = entries[entryIndex];
// Rebuild the full 32-bit hash using entry hashPrefix and oldTableIndex ("hashSuffix").
int hash = CompactHashing.getHashPrefix(oldEntry, oldMask) | oldTableIndex;
int newTableIndex = hash & newMask;
int newNext = CompactHashing.tableGet(newTable, newTableIndex);
CompactHashing.tableSet(newTable, newTableIndex, oldNext);
entries[entryIndex] = CompactHashing.maskCombine(hash, newNext, newMask);
oldNext = CompactHashing.getNext(oldEntry, oldMask);
}
}
this.table = newTable;
setHashTableMask(newMask);
return newMask;
}
private int indexOf(@CheckForNull Object key) {
if (needsAllocArrays()) {
return -1;
}
int hash = smearedHash(key);
int mask = hashTableMask();
int next = CompactHashing.tableGet(requireTable(), hash & mask);
if (next == UNSET) {
return -1;
}
int hashPrefix = CompactHashing.getHashPrefix(hash, mask);
do {
int entryIndex = next - 1;
int entry = entry(entryIndex);
if (CompactHashing.getHashPrefix(entry, mask) == hashPrefix
&& Objects.equal(key, key(entryIndex))) {
return entryIndex;
}
next = CompactHashing.getNext(entry, mask);
} while (next != UNSET);
return -1;
}
@Override
public boolean containsKey(@CheckForNull Object key) {
Map<K, V> delegate = delegateOrNull();
return (delegate != null) ? delegate.containsKey(key) : indexOf(key) != -1;
}
@Override
@CheckForNull
public V get(@CheckForNull Object key) {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return delegate.get(key);
}
int index = indexOf(key);
if (index == -1) {
return null;
}
accessEntry(index);
return value(index);
}
@CanIgnoreReturnValue
@SuppressWarnings("unchecked") // known to be a V
@Override
@CheckForNull
public V remove(@CheckForNull Object key) {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return delegate.remove(key);
}
Object oldValue = removeHelper(key);
return (oldValue == NOT_FOUND) ? null : (V) oldValue;
}
private @Nullable Object removeHelper(@CheckForNull Object key) {
if (needsAllocArrays()) {
return NOT_FOUND;
}
int mask = hashTableMask();
int index =
CompactHashing.remove(
key,
/* value= */ null,
mask,
requireTable(),
requireEntries(),
requireKeys(),
/* values= */ null);
if (index == -1) {
return NOT_FOUND;
}
Object oldValue = value(index);
moveLastEntry(index, mask);
size--;
incrementModCount();
return oldValue;
}
/**
* Moves the last entry in the entry array into {@code dstIndex}, and nulls out its old position.
*/
void moveLastEntry(int dstIndex, int mask) {
Object table = requireTable();
int[] entries = requireEntries();
@Nullable Object[] keys = requireKeys();
@Nullable Object[] values = requireValues();
int srcIndex = size() - 1;
if (dstIndex < srcIndex) {
// move last entry to deleted spot
Object key = keys[srcIndex];
keys[dstIndex] = key;
values[dstIndex] = values[srcIndex];
keys[srcIndex] = null;
values[srcIndex] = null;
// move the last entry to the removed spot, just like we moved the element
entries[dstIndex] = entries[srcIndex];
entries[srcIndex] = 0;
// also need to update whoever's "next" pointer was pointing to the last entry place
int tableIndex = smearedHash(key) & mask;
int next = CompactHashing.tableGet(table, tableIndex);
int srcNext = srcIndex + 1;
if (next == srcNext) {
// we need to update the root pointer
CompactHashing.tableSet(table, tableIndex, dstIndex + 1);
} else {
// we need to update a pointer in an entry
int entryIndex;
int entry;
do {
entryIndex = next - 1;
entry = entries[entryIndex];
next = CompactHashing.getNext(entry, mask);
} while (next != srcNext);
// here, entries[entryIndex] points to the old entry location; update it
entries[entryIndex] = CompactHashing.maskCombine(entry, dstIndex + 1, mask);
}
} else {
keys[dstIndex] = null;
values[dstIndex] = null;
entries[dstIndex] = 0;
}
}
int firstEntryIndex() {
return isEmpty() ? -1 : 0;
}
int getSuccessor(int entryIndex) {
return (entryIndex + 1 < size) ? entryIndex + 1 : -1;
}
/**
* Updates the index an iterator is pointing to after a call to remove: returns the index of the
* entry that should be looked at after a removal on indexRemoved, with indexBeforeRemove as the
* index that *was* the next entry that would be looked at.
*/
int adjustAfterRemove(int indexBeforeRemove, @SuppressWarnings("unused") int indexRemoved) {
return indexBeforeRemove - 1;
}
private abstract class Itr<T extends @Nullable Object> implements Iterator<T> {
int expectedMetadata = metadata;
int currentIndex = firstEntryIndex();
int indexToRemove = -1;
@Override
public boolean hasNext() {
return currentIndex >= 0;
}
@ParametricNullness
abstract T getOutput(int entry);
@Override
@ParametricNullness
public T next() {
checkForConcurrentModification();
if (!hasNext()) {
throw new NoSuchElementException();
}
indexToRemove = currentIndex;
T result = getOutput(currentIndex);
currentIndex = getSuccessor(currentIndex);
return result;
}
@Override
public void remove() {
checkForConcurrentModification();
checkRemove(indexToRemove >= 0);
incrementExpectedModCount();
CompactHashMap.this.remove(key(indexToRemove));
currentIndex = adjustAfterRemove(currentIndex, indexToRemove);
indexToRemove = -1;
}
void incrementExpectedModCount() {
expectedMetadata += CompactHashing.MODIFICATION_COUNT_INCREMENT;
}
private void checkForConcurrentModification() {
if (metadata != expectedMetadata) {
throw new ConcurrentModificationException();
}
}
}
@Override
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
checkNotNull(function);
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
delegate.replaceAll(function);
} else {
for (int i = 0; i < size; i++) {
setValue(i, function.apply(key(i), value(i)));
}
}
}
@LazyInit @CheckForNull private transient Set<K> keySetView;
@Override
public Set<K> keySet() {
return (keySetView == null) ? keySetView = createKeySet() : keySetView;
}
Set<K> createKeySet() {
return new KeySetView();
}
@WeakOuter
class KeySetView extends Maps.KeySet<K, V> {
KeySetView() {
super(CompactHashMap.this);
}
@Override
public @Nullable Object[] toArray() {
if (needsAllocArrays()) {
return new Object[0];
}
Map<K, V> delegate = delegateOrNull();
return (delegate != null)
? delegate.keySet().toArray()
: ObjectArrays.copyAsObjectArray(requireKeys(), 0, size);
}
@Override
@SuppressWarnings("nullness") // b/192354773 in our checker affects toArray declarations
public <T extends @Nullable Object> T[] toArray(T[] a) {
if (needsAllocArrays()) {
if (a.length > 0) {
@Nullable Object[] unsoundlyCovariantArray = a;
unsoundlyCovariantArray[0] = null;
}
return a;
}
Map<K, V> delegate = delegateOrNull();
return (delegate != null)
? delegate.keySet().toArray(a)
: ObjectArrays.toArrayImpl(requireKeys(), 0, size, a);
}
@Override
public boolean remove(@CheckForNull Object o) {
Map<K, V> delegate = delegateOrNull();
return (delegate != null)
? delegate.keySet().remove(o)
: CompactHashMap.this.removeHelper(o) != NOT_FOUND;
}
@Override
public Iterator<K> iterator() {
return keySetIterator();
}
@Override
public Spliterator<K> spliterator() {
if (needsAllocArrays()) {
return Spliterators.spliterator(new Object[0], Spliterator.DISTINCT | Spliterator.ORDERED);
}
Map<K, V> delegate = delegateOrNull();
return (delegate != null)
? delegate.keySet().spliterator()
: Spliterators.spliterator(
requireKeys(), 0, size, Spliterator.DISTINCT | Spliterator.ORDERED);
}
@Override
public void forEach(Consumer<? super K> action) {
checkNotNull(action);
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
delegate.keySet().forEach(action);
} else {
for (int i = firstEntryIndex(); i >= 0; i = getSuccessor(i)) {
action.accept(key(i));
}
}
}
}
Iterator<K> keySetIterator() {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return delegate.keySet().iterator();
}
return new Itr<K>() {
@Override
@ParametricNullness
K getOutput(int entry) {
return key(entry);
}
};
}
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
checkNotNull(action);
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
delegate.forEach(action);
} else {
for (int i = firstEntryIndex(); i >= 0; i = getSuccessor(i)) {
action.accept(key(i), value(i));
}
}
}
@LazyInit @CheckForNull private transient Set<Entry<K, V>> entrySetView;
@Override
public Set<Entry<K, V>> entrySet() {
return (entrySetView == null) ? entrySetView = createEntrySet() : entrySetView;
}
Set<Entry<K, V>> createEntrySet() {
return new EntrySetView();
}
@WeakOuter
class EntrySetView extends Maps.EntrySet<K, V> {
@Override
Map<K, V> map() {
return CompactHashMap.this;
}
@Override
public Iterator<Entry<K, V>> iterator() {
return entrySetIterator();
}
@Override
public Spliterator<Entry<K, V>> spliterator() {
Map<K, V> delegate = delegateOrNull();
return (delegate != null)
? delegate.entrySet().spliterator()
: CollectSpliterators.indexed(
size, Spliterator.DISTINCT | Spliterator.ORDERED, MapEntry::new);
}
@Override
public boolean contains(@CheckForNull Object o) {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return delegate.entrySet().contains(o);
} else if (o instanceof Entry) {
Entry<?, ?> entry = (Entry<?, ?>) o;
int index = indexOf(entry.getKey());
return index != -1 && Objects.equal(value(index), entry.getValue());
}
return false;
}
@Override
public boolean remove(@CheckForNull Object o) {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return delegate.entrySet().remove(o);
} else if (o instanceof Entry) {
Entry<?, ?> entry = (Entry<?, ?>) o;
if (needsAllocArrays()) {
return false;
}
int mask = hashTableMask();
int index =
CompactHashing.remove(
entry.getKey(),
entry.getValue(),
mask,
requireTable(),
requireEntries(),
requireKeys(),
requireValues());
if (index == -1) {
return false;
}
moveLastEntry(index, mask);
size--;
incrementModCount();
return true;
}
return false;
}
}
Iterator<Entry<K, V>> entrySetIterator() {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return delegate.entrySet().iterator();
}
return new Itr<Entry<K, V>>() {
@Override
Entry<K, V> getOutput(int entry) {
return new MapEntry(entry);
}
};
}
final class MapEntry extends AbstractMapEntry<K, V> {
@ParametricNullness private final K key;
private int lastKnownIndex;
MapEntry(int index) {
this.key = key(index);
this.lastKnownIndex = index;
}
@Override
@ParametricNullness
public K getKey() {
return key;
}
private void updateLastKnownIndex() {
if (lastKnownIndex == -1
|| lastKnownIndex >= size()
|| !Objects.equal(key, key(lastKnownIndex))) {
lastKnownIndex = indexOf(key);
}
}
@Override
@ParametricNullness
public V getValue() {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
/*
* The cast is safe because the entry is present in the map. Or, if it has been removed by a
* concurrent modification, behavior is undefined.
*/
return uncheckedCastNullableTToT(delegate.get(key));
}
updateLastKnownIndex();
/*
* If the entry has been removed from the map, we return null, even though that might not be a
* valid value. That's the best we can do, short of holding a reference to the most recently
* seen value. And while we *could* do that, we aren't required to: Map.Entry explicitly says
* that behavior is undefined when the backing map is modified through another API. (It even
* permits us to throw IllegalStateException. Maybe we should have done that, but we probably
* shouldn't change now for fear of breaking people.)
*/
return (lastKnownIndex == -1) ? unsafeNull() : value(lastKnownIndex);
}
@Override
@ParametricNullness
public V setValue(@ParametricNullness V value) {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return uncheckedCastNullableTToT(delegate.put(key, value)); // See discussion in getValue().
}
updateLastKnownIndex();
if (lastKnownIndex == -1) {
put(key, value);
return unsafeNull(); // See discussion in getValue().
} else {
V old = value(lastKnownIndex);
CompactHashMap.this.setValue(lastKnownIndex, value);
return old;
}
}
}
@Override
public int size() {
Map<K, V> delegate = delegateOrNull();
return (delegate != null) ? delegate.size() : size;
}
@Override
public boolean isEmpty() {
return size() == 0;
}
@Override
public boolean containsValue(@CheckForNull Object value) {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return delegate.containsValue(value);
}
for (int i = 0; i < size; i++) {
if (Objects.equal(value, value(i))) {
return true;
}
}
return false;
}
@LazyInit @CheckForNull private transient Collection<V> valuesView;
@Override
public Collection<V> values() {
return (valuesView == null) ? valuesView = createValues() : valuesView;
}
Collection<V> createValues() {
return new ValuesView();
}
@WeakOuter
class ValuesView extends Maps.Values<K, V> {
ValuesView() {
super(CompactHashMap.this);
}
@Override
public Iterator<V> iterator() {
return valuesIterator();
}
@Override
public void forEach(Consumer<? super V> action) {
checkNotNull(action);
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
delegate.values().forEach(action);
} else {
for (int i = firstEntryIndex(); i >= 0; i = getSuccessor(i)) {
action.accept(value(i));
}
}
}
@Override
public Spliterator<V> spliterator() {
if (needsAllocArrays()) {
return Spliterators.spliterator(new Object[0], Spliterator.ORDERED);
}
Map<K, V> delegate = delegateOrNull();
return (delegate != null)
? delegate.values().spliterator()
: Spliterators.spliterator(requireValues(), 0, size, Spliterator.ORDERED);
}
@Override
public @Nullable Object[] toArray() {
if (needsAllocArrays()) {
return new Object[0];
}
Map<K, V> delegate = delegateOrNull();
return (delegate != null)
? delegate.values().toArray()
: ObjectArrays.copyAsObjectArray(requireValues(), 0, size);
}
@Override
@SuppressWarnings("nullness") // b/192354773 in our checker affects toArray declarations
public <T extends @Nullable Object> T[] toArray(T[] a) {
if (needsAllocArrays()) {
if (a.length > 0) {
@Nullable Object[] unsoundlyCovariantArray = a;
unsoundlyCovariantArray[0] = null;
}
return a;
}
Map<K, V> delegate = delegateOrNull();
return (delegate != null)
? delegate.values().toArray(a)
: ObjectArrays.toArrayImpl(requireValues(), 0, size, a);
}
}
Iterator<V> valuesIterator() {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return delegate.values().iterator();
}
return new Itr<V>() {
@Override
@ParametricNullness
V getOutput(int entry) {
return value(entry);
}
};
}
/**
* Ensures that this {@code CompactHashMap} has the smallest representation in memory, given its
* current size.
*/
public void trimToSize() {
if (needsAllocArrays()) {
return;
}
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
Map<K, V> newDelegate = createHashFloodingResistantDelegate(size());
newDelegate.putAll(delegate);
this.table = newDelegate;
return;
}
int size = this.size;
if (size < requireEntries().length) {
resizeEntries(size);
}
int minimumTableSize = CompactHashing.tableSize(size);
int mask = hashTableMask();
if (minimumTableSize < mask) { // smaller table size will always be less than current mask
resizeTable(mask, minimumTableSize, UNSET, UNSET);
}
}
@Override
public void clear() {
if (needsAllocArrays()) {
return;
}
incrementModCount();
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
metadata =
Ints.constrainToRange(size(), CompactHashing.DEFAULT_SIZE, CompactHashing.MAX_SIZE);
delegate.clear(); // invalidate any iterators left over!
table = null;
size = 0;
} else {
Arrays.fill(requireKeys(), 0, size, null);
Arrays.fill(requireValues(), 0, size, null);
CompactHashing.tableClear(requireTable());
Arrays.fill(requireEntries(), 0, size, 0);
this.size = 0;
}
}
@J2ktIncompatible
private void writeObject(ObjectOutputStream stream) throws IOException {
stream.defaultWriteObject();
stream.writeInt(size());
Iterator<Entry<K, V>> entryIterator = entrySetIterator();
while (entryIterator.hasNext()) {
Entry<K, V> e = entryIterator.next();
stream.writeObject(e.getKey());
stream.writeObject(e.getValue());
}
}
@SuppressWarnings("unchecked")
@J2ktIncompatible
private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException {
stream.defaultReadObject();
int elementCount = stream.readInt();
if (elementCount < 0) {
throw new InvalidObjectException("Invalid size: " + elementCount);
}
init(elementCount);
for (int i = 0; i < elementCount; i++) {
K key = (K) stream.readObject();
V value = (V) stream.readObject();
put(key, value);
}
}
/*
* The following methods are safe to call as long as both of the following hold:
*
* - allocArrays() has been called. Callers can confirm this by checking needsAllocArrays().
*
* - The map has not switched to delegating to a java.util implementation to mitigate hash
* flooding. Callers can confirm this by null-checking delegateOrNull().
*
* In an ideal world, we would document why we know those things are true every time we call these
* methods. But that is a bit too painful....
*/
private Object requireTable() {
return requireNonNull(table);
}
private int[] requireEntries() {
return requireNonNull(entries);
}
private @Nullable Object[] requireKeys() {
return requireNonNull(keys);
}
private @Nullable Object[] requireValues() {
return requireNonNull(values);
}
/*
* The following methods are safe to call as long as the conditions in the *previous* comment are
* met *and* the index is less than size().
*
* (The above explains when these methods are safe from a `nullness` perspective. From an
* `unchecked` perspective, they're safe because we put only K/V elements into each array.)
*/
@SuppressWarnings("unchecked")
private K key(int i) {
return (K) requireKeys()[i];
}
@SuppressWarnings("unchecked")
private V value(int i) {
return (V) requireValues()[i];
}
private int entry(int i) {
return requireEntries()[i];
}
private void setKey(int i, K key) {
requireKeys()[i] = key;
}
private void setValue(int i, V value) {
requireValues()[i] = value;
}
private void setEntry(int i, int value) {
requireEntries()[i] = value;
}
}
| google/guava | guava/src/com/google/common/collect/CompactHashMap.java |
498 | /*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.collect;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.NullnessCasts.uncheckedCastNullableTToT;
import static com.google.common.collect.NullnessCasts.unsafeNull;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.annotations.J2ktIncompatible;
import com.google.common.base.Objects;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import com.google.errorprone.annotations.concurrent.LazyInit;
import com.google.j2objc.annotations.RetainedWith;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.AbstractMap;
import java.util.AbstractSet;
import java.util.Arrays;
import java.util.ConcurrentModificationException;
import java.util.Iterator;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Set;
import javax.annotation.CheckForNull;
import org.checkerframework.checker.nullness.qual.Nullable;
/**
* A {@link BiMap} backed by two hash tables. This implementation allows null keys and values. A
* {@code HashBiMap} and its inverse are both serializable.
*
* <p>This implementation guarantees insertion-based iteration order of its keys.
*
* <p>See the Guava User Guide article on <a href=
* "https://github.com/google/guava/wiki/NewCollectionTypesExplained#bimap">{@code BiMap} </a>.
*
* @author Louis Wasserman
* @author Mike Bostock
* @since 2.0
*/
@GwtCompatible
@ElementTypesAreNonnullByDefault
public final class HashBiMap<K extends @Nullable Object, V extends @Nullable Object>
extends AbstractMap<K, V> implements BiMap<K, V>, Serializable {
/** Returns a new, empty {@code HashBiMap} with the default initial capacity (16). */
public static <K extends @Nullable Object, V extends @Nullable Object> HashBiMap<K, V> create() {
return create(16);
}
/**
* Constructs a new, empty bimap with the specified expected size.
*
* @param expectedSize the expected number of entries
* @throws IllegalArgumentException if the specified expected size is negative
*/
public static <K extends @Nullable Object, V extends @Nullable Object> HashBiMap<K, V> create(
int expectedSize) {
return new HashBiMap<>(expectedSize);
}
/**
* Constructs a new bimap containing initial values from {@code map}. The bimap is created with an
* initial capacity sufficient to hold the mappings in the specified map.
*/
public static <K extends @Nullable Object, V extends @Nullable Object> HashBiMap<K, V> create(
Map<? extends K, ? extends V> map) {
HashBiMap<K, V> bimap = create(map.size());
bimap.putAll(map);
return bimap;
}
private static final int ABSENT = -1;
private static final int ENDPOINT = -2;
/** Maps an "entry" to the key of that entry. */
transient @Nullable K[] keys;
/** Maps an "entry" to the value of that entry. */
transient @Nullable V[] values;
transient int size;
transient int modCount;
/** Maps a bucket to the "entry" of its first element. */
private transient int[] hashTableKToV;
/** Maps a bucket to the "entry" of its first element. */
private transient int[] hashTableVToK;
/** Maps an "entry" to the "entry" that follows it in its bucket. */
private transient int[] nextInBucketKToV;
/** Maps an "entry" to the "entry" that follows it in its bucket. */
private transient int[] nextInBucketVToK;
/** The "entry" of the first element in insertion order. */
private transient int firstInInsertionOrder;
/** The "entry" of the last element in insertion order. */
private transient int lastInInsertionOrder;
/** Maps an "entry" to the "entry" that precedes it in insertion order. */
private transient int[] prevInInsertionOrder;
/** Maps an "entry" to the "entry" that follows it in insertion order. */
private transient int[] nextInInsertionOrder;
private HashBiMap(int expectedSize) {
init(expectedSize);
}
@SuppressWarnings("unchecked")
void init(int expectedSize) {
CollectPreconditions.checkNonnegative(expectedSize, "expectedSize");
int tableSize = Hashing.closedTableSize(expectedSize, 1.0);
size = 0;
keys = (K[]) new Object[expectedSize];
values = (V[]) new Object[expectedSize];
hashTableKToV = createFilledWithAbsent(tableSize);
hashTableVToK = createFilledWithAbsent(tableSize);
nextInBucketKToV = createFilledWithAbsent(expectedSize);
nextInBucketVToK = createFilledWithAbsent(expectedSize);
firstInInsertionOrder = ENDPOINT;
lastInInsertionOrder = ENDPOINT;
prevInInsertionOrder = createFilledWithAbsent(expectedSize);
nextInInsertionOrder = createFilledWithAbsent(expectedSize);
}
/** Returns an int array of the specified size, filled with ABSENT. */
private static int[] createFilledWithAbsent(int size) {
int[] array = new int[size];
Arrays.fill(array, ABSENT);
return array;
}
/** Equivalent to {@code Arrays.copyOf(array, newSize)}, save that the new elements are ABSENT. */
private static int[] expandAndFillWithAbsent(int[] array, int newSize) {
int oldSize = array.length;
int[] result = Arrays.copyOf(array, newSize);
Arrays.fill(result, oldSize, newSize, ABSENT);
return result;
}
@Override
public int size() {
return size;
}
/**
* Ensures that all of the internal structures in the HashBiMap are ready for this many elements.
*/
private void ensureCapacity(int minCapacity) {
if (nextInBucketKToV.length < minCapacity) {
int oldCapacity = nextInBucketKToV.length;
int newCapacity = ImmutableCollection.Builder.expandedCapacity(oldCapacity, minCapacity);
keys = Arrays.copyOf(keys, newCapacity);
values = Arrays.copyOf(values, newCapacity);
nextInBucketKToV = expandAndFillWithAbsent(nextInBucketKToV, newCapacity);
nextInBucketVToK = expandAndFillWithAbsent(nextInBucketVToK, newCapacity);
prevInInsertionOrder = expandAndFillWithAbsent(prevInInsertionOrder, newCapacity);
nextInInsertionOrder = expandAndFillWithAbsent(nextInInsertionOrder, newCapacity);
}
if (hashTableKToV.length < minCapacity) {
int newTableSize = Hashing.closedTableSize(minCapacity, 1.0);
hashTableKToV = createFilledWithAbsent(newTableSize);
hashTableVToK = createFilledWithAbsent(newTableSize);
for (int entryToRehash = 0; entryToRehash < size; entryToRehash++) {
int keyHash = Hashing.smearedHash(keys[entryToRehash]);
int keyBucket = bucket(keyHash);
nextInBucketKToV[entryToRehash] = hashTableKToV[keyBucket];
hashTableKToV[keyBucket] = entryToRehash;
int valueHash = Hashing.smearedHash(values[entryToRehash]);
int valueBucket = bucket(valueHash);
nextInBucketVToK[entryToRehash] = hashTableVToK[valueBucket];
hashTableVToK[valueBucket] = entryToRehash;
}
}
}
/**
* Returns the bucket (in either the K-to-V or V-to-K tables) where elements with the specified
* hash could be found, if present, or could be inserted.
*/
private int bucket(int hash) {
return hash & (hashTableKToV.length - 1);
}
/** Given a key, returns the index of the entry in the tables, or ABSENT if not found. */
int findEntryByKey(@CheckForNull Object key) {
return findEntryByKey(key, Hashing.smearedHash(key));
}
/**
* Given a key and its hash, returns the index of the entry in the tables, or ABSENT if not found.
*/
int findEntryByKey(@CheckForNull Object key, int keyHash) {
return findEntry(key, keyHash, hashTableKToV, nextInBucketKToV, keys);
}
/** Given a value, returns the index of the entry in the tables, or ABSENT if not found. */
int findEntryByValue(@CheckForNull Object value) {
return findEntryByValue(value, Hashing.smearedHash(value));
}
/**
* Given a value and its hash, returns the index of the entry in the tables, or ABSENT if not
* found.
*/
int findEntryByValue(@CheckForNull Object value, int valueHash) {
return findEntry(value, valueHash, hashTableVToK, nextInBucketVToK, values);
}
int findEntry(
@CheckForNull Object o,
int oHash,
int[] hashTable,
int[] nextInBucket,
@Nullable Object[] array) {
for (int entry = hashTable[bucket(oHash)]; entry != ABSENT; entry = nextInBucket[entry]) {
if (Objects.equal(array[entry], o)) {
return entry;
}
}
return ABSENT;
}
@Override
public boolean containsKey(@CheckForNull Object key) {
return findEntryByKey(key) != ABSENT;
}
/**
* Returns {@code true} if this BiMap contains an entry whose value is equal to {@code value} (or,
* equivalently, if this inverse view contains a key that is equal to {@code value}).
*
* <p>Due to the property that values in a BiMap are unique, this will tend to execute in
* faster-than-linear time.
*
* @param value the object to search for in the values of this BiMap
* @return true if a mapping exists from a key to the specified value
*/
@Override
public boolean containsValue(@CheckForNull Object value) {
return findEntryByValue(value) != ABSENT;
}
@Override
@CheckForNull
public V get(@CheckForNull Object key) {
int entry = findEntryByKey(key);
return (entry == ABSENT) ? null : values[entry];
}
@CheckForNull
K getInverse(@CheckForNull Object value) {
int entry = findEntryByValue(value);
return (entry == ABSENT) ? null : keys[entry];
}
@Override
@CanIgnoreReturnValue
@CheckForNull
public V put(@ParametricNullness K key, @ParametricNullness V value) {
return put(key, value, false);
}
@CheckForNull
V put(@ParametricNullness K key, @ParametricNullness V value, boolean force) {
int keyHash = Hashing.smearedHash(key);
int entryForKey = findEntryByKey(key, keyHash);
if (entryForKey != ABSENT) {
V oldValue = values[entryForKey];
if (Objects.equal(oldValue, value)) {
return value;
} else {
replaceValueInEntry(entryForKey, value, force);
return oldValue;
}
}
int valueHash = Hashing.smearedHash(value);
int valueEntry = findEntryByValue(value, valueHash);
if (force) {
if (valueEntry != ABSENT) {
removeEntryValueHashKnown(valueEntry, valueHash);
}
} else {
checkArgument(valueEntry == ABSENT, "Value already present: %s", value);
}
ensureCapacity(size + 1);
keys[size] = key;
values[size] = value;
insertIntoTableKToV(size, keyHash);
insertIntoTableVToK(size, valueHash);
setSucceeds(lastInInsertionOrder, size);
setSucceeds(size, ENDPOINT);
size++;
modCount++;
return null;
}
@Override
@CanIgnoreReturnValue
@CheckForNull
public V forcePut(@ParametricNullness K key, @ParametricNullness V value) {
return put(key, value, true);
}
@CanIgnoreReturnValue
@CheckForNull
K putInverse(@ParametricNullness V value, @ParametricNullness K key, boolean force) {
int valueHash = Hashing.smearedHash(value);
int entryForValue = findEntryByValue(value, valueHash);
if (entryForValue != ABSENT) {
K oldKey = keys[entryForValue];
if (Objects.equal(oldKey, key)) {
return key;
} else {
replaceKeyInEntry(entryForValue, key, force);
return oldKey;
}
}
int predecessor = lastInInsertionOrder;
int keyHash = Hashing.smearedHash(key);
int keyEntry = findEntryByKey(key, keyHash);
if (force) {
if (keyEntry != ABSENT) {
predecessor = prevInInsertionOrder[keyEntry];
removeEntryKeyHashKnown(keyEntry, keyHash);
}
} else {
checkArgument(keyEntry == ABSENT, "Key already present: %s", key);
}
// insertion point for new entry is after predecessor
// note predecessor must still be a valid entry: either we deleted an entry that was *not*
// predecessor, or we didn't delete anything
ensureCapacity(size + 1);
keys[size] = key;
values[size] = value;
insertIntoTableKToV(size, keyHash);
insertIntoTableVToK(size, valueHash);
int successor =
(predecessor == ENDPOINT) ? firstInInsertionOrder : nextInInsertionOrder[predecessor];
setSucceeds(predecessor, size);
setSucceeds(size, successor);
size++;
modCount++;
return null;
}
/**
* Updates the pointers of the insertion order linked list so that {@code next} follows {@code
* prev}. {@code ENDPOINT} represents either the first or last entry in the entire map (as
* appropriate).
*/
private void setSucceeds(int prev, int next) {
if (prev == ENDPOINT) {
firstInInsertionOrder = next;
} else {
nextInInsertionOrder[prev] = next;
}
if (next == ENDPOINT) {
lastInInsertionOrder = prev;
} else {
prevInInsertionOrder[next] = prev;
}
}
/**
* Updates the K-to-V hash table to include the entry at the specified index, which is assumed to
* have not yet been added.
*/
private void insertIntoTableKToV(int entry, int keyHash) {
checkArgument(entry != ABSENT);
int keyBucket = bucket(keyHash);
nextInBucketKToV[entry] = hashTableKToV[keyBucket];
hashTableKToV[keyBucket] = entry;
}
/**
* Updates the V-to-K hash table to include the entry at the specified index, which is assumed to
* have not yet been added.
*/
private void insertIntoTableVToK(int entry, int valueHash) {
checkArgument(entry != ABSENT);
int valueBucket = bucket(valueHash);
nextInBucketVToK[entry] = hashTableVToK[valueBucket];
hashTableVToK[valueBucket] = entry;
}
/**
* Updates the K-to-V hash table to remove the entry at the specified index, which is assumed to
* be present. Does not update any other data structures.
*/
private void deleteFromTableKToV(int entry, int keyHash) {
checkArgument(entry != ABSENT);
int keyBucket = bucket(keyHash);
if (hashTableKToV[keyBucket] == entry) {
hashTableKToV[keyBucket] = nextInBucketKToV[entry];
nextInBucketKToV[entry] = ABSENT;
return;
}
int prevInBucket = hashTableKToV[keyBucket];
for (int entryInBucket = nextInBucketKToV[prevInBucket];
entryInBucket != ABSENT;
entryInBucket = nextInBucketKToV[entryInBucket]) {
if (entryInBucket == entry) {
nextInBucketKToV[prevInBucket] = nextInBucketKToV[entry];
nextInBucketKToV[entry] = ABSENT;
return;
}
prevInBucket = entryInBucket;
}
throw new AssertionError("Expected to find entry with key " + keys[entry]);
}
/**
* Updates the V-to-K hash table to remove the entry at the specified index, which is assumed to
* be present. Does not update any other data structures.
*/
private void deleteFromTableVToK(int entry, int valueHash) {
checkArgument(entry != ABSENT);
int valueBucket = bucket(valueHash);
if (hashTableVToK[valueBucket] == entry) {
hashTableVToK[valueBucket] = nextInBucketVToK[entry];
nextInBucketVToK[entry] = ABSENT;
return;
}
int prevInBucket = hashTableVToK[valueBucket];
for (int entryInBucket = nextInBucketVToK[prevInBucket];
entryInBucket != ABSENT;
entryInBucket = nextInBucketVToK[entryInBucket]) {
if (entryInBucket == entry) {
nextInBucketVToK[prevInBucket] = nextInBucketVToK[entry];
nextInBucketVToK[entry] = ABSENT;
return;
}
prevInBucket = entryInBucket;
}
throw new AssertionError("Expected to find entry with value " + values[entry]);
}
/**
* Updates the specified entry to point to the new value: removes the old value from the V-to-K
* mapping and puts the new one in. The entry does not move in the insertion order of the bimap.
*/
private void replaceValueInEntry(int entry, @ParametricNullness V newValue, boolean force) {
checkArgument(entry != ABSENT);
int newValueHash = Hashing.smearedHash(newValue);
int newValueIndex = findEntryByValue(newValue, newValueHash);
if (newValueIndex != ABSENT) {
if (force) {
removeEntryValueHashKnown(newValueIndex, newValueHash);
if (entry == size) { // this entry got moved to newValueIndex
entry = newValueIndex;
}
} else {
throw new IllegalArgumentException("Value already present in map: " + newValue);
}
}
// we do *not* update insertion order, and it isn't a structural modification!
deleteFromTableVToK(entry, Hashing.smearedHash(values[entry]));
values[entry] = newValue;
insertIntoTableVToK(entry, newValueHash);
}
/**
* Updates the specified entry to point to the new value: removes the old value from the V-to-K
* mapping and puts the new one in. The entry is moved to the end of the insertion order, or to
* the position of the new key if it was previously present.
*/
private void replaceKeyInEntry(int entry, @ParametricNullness K newKey, boolean force) {
checkArgument(entry != ABSENT);
int newKeyHash = Hashing.smearedHash(newKey);
int newKeyIndex = findEntryByKey(newKey, newKeyHash);
int newPredecessor = lastInInsertionOrder;
int newSuccessor = ENDPOINT;
if (newKeyIndex != ABSENT) {
if (force) {
newPredecessor = prevInInsertionOrder[newKeyIndex];
newSuccessor = nextInInsertionOrder[newKeyIndex];
removeEntryKeyHashKnown(newKeyIndex, newKeyHash);
if (entry == size) { // this entry got moved to newKeyIndex
entry = newKeyIndex;
}
} else {
throw new IllegalArgumentException("Key already present in map: " + newKey);
}
}
if (newPredecessor == entry) {
newPredecessor = prevInInsertionOrder[entry];
} else if (newPredecessor == size) {
newPredecessor = newKeyIndex;
}
if (newSuccessor == entry) {
newSuccessor = nextInInsertionOrder[entry];
} else if (newSuccessor == size) {
newSuccessor = newKeyIndex;
}
int oldPredecessor = prevInInsertionOrder[entry];
int oldSuccessor = nextInInsertionOrder[entry];
setSucceeds(oldPredecessor, oldSuccessor); // remove from insertion order linked list
deleteFromTableKToV(entry, Hashing.smearedHash(keys[entry]));
keys[entry] = newKey;
insertIntoTableKToV(entry, Hashing.smearedHash(newKey));
// insert into insertion order linked list, usually at the end
setSucceeds(newPredecessor, entry);
setSucceeds(entry, newSuccessor);
}
@Override
@CanIgnoreReturnValue
@CheckForNull
public V remove(@CheckForNull Object key) {
int keyHash = Hashing.smearedHash(key);
int entry = findEntryByKey(key, keyHash);
if (entry == ABSENT) {
return null;
} else {
V value = values[entry];
removeEntryKeyHashKnown(entry, keyHash);
return value;
}
}
@CheckForNull
K removeInverse(@CheckForNull Object value) {
int valueHash = Hashing.smearedHash(value);
int entry = findEntryByValue(value, valueHash);
if (entry == ABSENT) {
return null;
} else {
K key = keys[entry];
removeEntryValueHashKnown(entry, valueHash);
return key;
}
}
/** Removes the entry at the specified index with no additional data. */
void removeEntry(int entry) {
removeEntryKeyHashKnown(entry, Hashing.smearedHash(keys[entry]));
}
/** Removes the entry at the specified index, given the hash of its key and value. */
private void removeEntry(int entry, int keyHash, int valueHash) {
checkArgument(entry != ABSENT);
deleteFromTableKToV(entry, keyHash);
deleteFromTableVToK(entry, valueHash);
int oldPredecessor = prevInInsertionOrder[entry];
int oldSuccessor = nextInInsertionOrder[entry];
setSucceeds(oldPredecessor, oldSuccessor);
moveEntryToIndex(size - 1, entry);
keys[size - 1] = null;
values[size - 1] = null;
size--;
modCount++;
}
/** Removes the entry at the specified index, given the hash of its key. */
void removeEntryKeyHashKnown(int entry, int keyHash) {
removeEntry(entry, keyHash, Hashing.smearedHash(values[entry]));
}
/** Removes the entry at the specified index, given the hash of its value. */
void removeEntryValueHashKnown(int entry, int valueHash) {
removeEntry(entry, Hashing.smearedHash(keys[entry]), valueHash);
}
/**
* Moves the entry previously positioned at {@code src} to {@code dest}. Assumes the entry
* previously at {@code src} has already been removed from the data structures.
*/
private void moveEntryToIndex(int src, int dest) {
if (src == dest) {
return;
}
int predecessor = prevInInsertionOrder[src];
int successor = nextInInsertionOrder[src];
setSucceeds(predecessor, dest);
setSucceeds(dest, successor);
K key = keys[src];
V value = values[src];
keys[dest] = key;
values[dest] = value;
// update pointers in hashTableKToV
int keyHash = Hashing.smearedHash(key);
int keyBucket = bucket(keyHash);
if (hashTableKToV[keyBucket] == src) {
hashTableKToV[keyBucket] = dest;
} else {
int prevInBucket = hashTableKToV[keyBucket];
for (int entryInBucket = nextInBucketKToV[prevInBucket];
/* should never reach end */ ;
entryInBucket = nextInBucketKToV[entryInBucket]) {
if (entryInBucket == src) {
nextInBucketKToV[prevInBucket] = dest;
break;
}
prevInBucket = entryInBucket;
}
}
nextInBucketKToV[dest] = nextInBucketKToV[src];
nextInBucketKToV[src] = ABSENT;
// update pointers in hashTableVToK
int valueHash = Hashing.smearedHash(value);
int valueBucket = bucket(valueHash);
if (hashTableVToK[valueBucket] == src) {
hashTableVToK[valueBucket] = dest;
} else {
int prevInBucket = hashTableVToK[valueBucket];
for (int entryInBucket = nextInBucketVToK[prevInBucket];
/* should never reach end*/ ;
entryInBucket = nextInBucketVToK[entryInBucket]) {
if (entryInBucket == src) {
nextInBucketVToK[prevInBucket] = dest;
break;
}
prevInBucket = entryInBucket;
}
}
nextInBucketVToK[dest] = nextInBucketVToK[src];
nextInBucketVToK[src] = ABSENT;
}
@Override
public void clear() {
Arrays.fill(keys, 0, size, null);
Arrays.fill(values, 0, size, null);
Arrays.fill(hashTableKToV, ABSENT);
Arrays.fill(hashTableVToK, ABSENT);
Arrays.fill(nextInBucketKToV, 0, size, ABSENT);
Arrays.fill(nextInBucketVToK, 0, size, ABSENT);
Arrays.fill(prevInInsertionOrder, 0, size, ABSENT);
Arrays.fill(nextInInsertionOrder, 0, size, ABSENT);
size = 0;
firstInInsertionOrder = ENDPOINT;
lastInInsertionOrder = ENDPOINT;
modCount++;
}
/** Shared supertype of keySet, values, entrySet, and inverse.entrySet. */
abstract static class View<
K extends @Nullable Object, V extends @Nullable Object, T extends @Nullable Object>
extends AbstractSet<T> {
final HashBiMap<K, V> biMap;
View(HashBiMap<K, V> biMap) {
this.biMap = biMap;
}
@ParametricNullness
abstract T forEntry(int entry);
@Override
public Iterator<T> iterator() {
return new Iterator<T>() {
private int index = biMap.firstInInsertionOrder;
private int indexToRemove = ABSENT;
private int expectedModCount = biMap.modCount;
// Calls to setValue on inverse entries can move already-visited entries to the end.
// Make sure we don't visit those.
private int remaining = biMap.size;
private void checkForComodification() {
if (biMap.modCount != expectedModCount) {
throw new ConcurrentModificationException();
}
}
@Override
public boolean hasNext() {
checkForComodification();
return index != ENDPOINT && remaining > 0;
}
@Override
@ParametricNullness
public T next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
T result = forEntry(index);
indexToRemove = index;
index = biMap.nextInInsertionOrder[index];
remaining--;
return result;
}
@Override
public void remove() {
checkForComodification();
CollectPreconditions.checkRemove(indexToRemove != ABSENT);
biMap.removeEntry(indexToRemove);
if (index == biMap.size) {
index = indexToRemove;
}
indexToRemove = ABSENT;
expectedModCount = biMap.modCount;
}
};
}
@Override
public int size() {
return biMap.size;
}
@Override
public void clear() {
biMap.clear();
}
}
@LazyInit private transient Set<K> keySet;
@Override
public Set<K> keySet() {
Set<K> result = keySet;
return (result == null) ? keySet = new KeySet() : result;
}
final class KeySet extends View<K, V, K> {
KeySet() {
super(HashBiMap.this);
}
@Override
@ParametricNullness
K forEntry(int entry) {
// The cast is safe because we call forEntry only for indexes that contain entries.
return uncheckedCastNullableTToT(keys[entry]);
}
@Override
public boolean contains(@CheckForNull Object o) {
return HashBiMap.this.containsKey(o);
}
@Override
public boolean remove(@CheckForNull Object o) {
int oHash = Hashing.smearedHash(o);
int entry = findEntryByKey(o, oHash);
if (entry != ABSENT) {
removeEntryKeyHashKnown(entry, oHash);
return true;
} else {
return false;
}
}
}
@LazyInit private transient Set<V> valueSet;
@Override
public Set<V> values() {
Set<V> result = valueSet;
return (result == null) ? valueSet = new ValueSet() : result;
}
final class ValueSet extends View<K, V, V> {
ValueSet() {
super(HashBiMap.this);
}
@Override
@ParametricNullness
V forEntry(int entry) {
// The cast is safe because we call forEntry only for indexes that contain entries.
return uncheckedCastNullableTToT(values[entry]);
}
@Override
public boolean contains(@CheckForNull Object o) {
return HashBiMap.this.containsValue(o);
}
@Override
public boolean remove(@CheckForNull Object o) {
int oHash = Hashing.smearedHash(o);
int entry = findEntryByValue(o, oHash);
if (entry != ABSENT) {
removeEntryValueHashKnown(entry, oHash);
return true;
} else {
return false;
}
}
}
@LazyInit private transient Set<Entry<K, V>> entrySet;
@Override
public Set<Entry<K, V>> entrySet() {
Set<Entry<K, V>> result = entrySet;
return (result == null) ? entrySet = new EntrySet() : result;
}
final class EntrySet extends View<K, V, Entry<K, V>> {
EntrySet() {
super(HashBiMap.this);
}
@Override
public boolean contains(@CheckForNull Object o) {
if (o instanceof Entry) {
Entry<?, ?> e = (Entry<?, ?>) o;
Object k = e.getKey();
Object v = e.getValue();
int eIndex = findEntryByKey(k);
return eIndex != ABSENT && Objects.equal(v, values[eIndex]);
}
return false;
}
@Override
@CanIgnoreReturnValue
public boolean remove(@CheckForNull Object o) {
if (o instanceof Entry) {
Entry<?, ?> e = (Entry<?, ?>) o;
Object k = e.getKey();
Object v = e.getValue();
int kHash = Hashing.smearedHash(k);
int eIndex = findEntryByKey(k, kHash);
if (eIndex != ABSENT && Objects.equal(v, values[eIndex])) {
removeEntryKeyHashKnown(eIndex, kHash);
return true;
}
}
return false;
}
@Override
Entry<K, V> forEntry(int entry) {
return new EntryForKey(entry);
}
}
/**
* An {@code Entry} implementation that attempts to follow its key around the map -- that is, if
* the key is moved, deleted, or reinserted, it will account for that -- while not doing any extra
* work if the key has not moved. One quirk: The {@link #getValue()} method can return {@code
* null} even for a map which supposedly does not contain null elements, if the key is not present
* when {@code getValue()} is called.
*/
final class EntryForKey extends AbstractMapEntry<K, V> {
@ParametricNullness final K key;
int index;
EntryForKey(int index) {
// The cast is safe because we call forEntry only for indexes that contain entries.
this.key = uncheckedCastNullableTToT(keys[index]);
this.index = index;
}
void updateIndex() {
if (index == ABSENT || index > size || !Objects.equal(keys[index], key)) {
index = findEntryByKey(key);
}
}
@Override
@ParametricNullness
public K getKey() {
return key;
}
@Override
@ParametricNullness
public V getValue() {
updateIndex();
/*
* If the entry has been removed from the map, we return null, even though that might not be a
* valid value. That's the best we can do, short of holding a reference to the most recently
* seen value. And while we *could* do that, we aren't required to: Map.Entry explicitly says
* that behavior is undefined when the backing map is modified through another API. (It even
* permits us to throw IllegalStateException. Maybe we should have done that, but we probably
* shouldn't change now for fear of breaking people.)
*
* If the entry is still in the map, then updateIndex ensured that `index` points to the right
* element. Because that element is present, uncheckedCastNullableTToT is safe.
*/
return (index == ABSENT) ? unsafeNull() : uncheckedCastNullableTToT(values[index]);
}
@Override
@ParametricNullness
public V setValue(@ParametricNullness V value) {
updateIndex();
if (index == ABSENT) {
HashBiMap.this.put(key, value);
return unsafeNull(); // See the discussion in getValue().
}
/*
* The cast is safe because updateIndex found the entry for this key. (If it hadn't, then we
* would have returned above.) Thus, we know that it and its corresponding value are in
* position `index`.
*/
V oldValue = uncheckedCastNullableTToT(values[index]);
if (Objects.equal(oldValue, value)) {
return value;
}
replaceValueInEntry(index, value, false);
return oldValue;
}
}
@LazyInit @RetainedWith @CheckForNull private transient BiMap<V, K> inverse;
@Override
public BiMap<V, K> inverse() {
BiMap<V, K> result = inverse;
return (result == null) ? inverse = new Inverse<K, V>(this) : result;
}
static class Inverse<K extends @Nullable Object, V extends @Nullable Object>
extends AbstractMap<V, K> implements BiMap<V, K>, Serializable {
private final HashBiMap<K, V> forward;
Inverse(HashBiMap<K, V> forward) {
this.forward = forward;
}
@Override
public int size() {
return forward.size;
}
@Override
public boolean containsKey(@CheckForNull Object key) {
return forward.containsValue(key);
}
@Override
@CheckForNull
public K get(@CheckForNull Object key) {
return forward.getInverse(key);
}
@Override
public boolean containsValue(@CheckForNull Object value) {
return forward.containsKey(value);
}
@Override
@CanIgnoreReturnValue
@CheckForNull
public K put(@ParametricNullness V value, @ParametricNullness K key) {
return forward.putInverse(value, key, false);
}
@Override
@CanIgnoreReturnValue
@CheckForNull
public K forcePut(@ParametricNullness V value, @ParametricNullness K key) {
return forward.putInverse(value, key, true);
}
@Override
public BiMap<K, V> inverse() {
return forward;
}
@Override
@CanIgnoreReturnValue
@CheckForNull
public K remove(@CheckForNull Object value) {
return forward.removeInverse(value);
}
@Override
public void clear() {
forward.clear();
}
@Override
public Set<V> keySet() {
return forward.values();
}
@Override
public Set<K> values() {
return forward.keySet();
}
private transient Set<Entry<V, K>> inverseEntrySet;
@Override
public Set<Entry<V, K>> entrySet() {
Set<Entry<V, K>> result = inverseEntrySet;
return (result == null) ? inverseEntrySet = new InverseEntrySet<K, V>(forward) : result;
}
@GwtIncompatible("serialization")
private void readObject(ObjectInputStream in) throws ClassNotFoundException, IOException {
in.defaultReadObject();
this.forward.inverse = this;
}
}
static class InverseEntrySet<K extends @Nullable Object, V extends @Nullable Object>
extends View<K, V, Entry<V, K>> {
InverseEntrySet(HashBiMap<K, V> biMap) {
super(biMap);
}
@Override
public boolean contains(@CheckForNull Object o) {
if (o instanceof Entry) {
Entry<?, ?> e = (Entry<?, ?>) o;
Object v = e.getKey();
Object k = e.getValue();
int eIndex = biMap.findEntryByValue(v);
return eIndex != ABSENT && Objects.equal(biMap.keys[eIndex], k);
}
return false;
}
@Override
public boolean remove(@CheckForNull Object o) {
if (o instanceof Entry) {
Entry<?, ?> e = (Entry<?, ?>) o;
Object v = e.getKey();
Object k = e.getValue();
int vHash = Hashing.smearedHash(v);
int eIndex = biMap.findEntryByValue(v, vHash);
if (eIndex != ABSENT && Objects.equal(biMap.keys[eIndex], k)) {
biMap.removeEntryValueHashKnown(eIndex, vHash);
return true;
}
}
return false;
}
@Override
Entry<V, K> forEntry(int entry) {
return new EntryForValue<K, V>(biMap, entry);
}
}
/**
* An {@code Entry} implementation that attempts to follow its value around the map -- that is, if
* the value is moved, deleted, or reinserted, it will account for that -- while not doing any
* extra work if the value has not moved.
*/
static final class EntryForValue<K extends @Nullable Object, V extends @Nullable Object>
extends AbstractMapEntry<V, K> {
final HashBiMap<K, V> biMap;
@ParametricNullness final V value;
int index;
EntryForValue(HashBiMap<K, V> biMap, int index) {
this.biMap = biMap;
// The cast is safe because we call forEntry only for indexes that contain entries.
this.value = uncheckedCastNullableTToT(biMap.values[index]);
this.index = index;
}
private void updateIndex() {
if (index == ABSENT || index > biMap.size || !Objects.equal(value, biMap.values[index])) {
index = biMap.findEntryByValue(value);
}
}
@Override
@ParametricNullness
public V getKey() {
return value;
}
@Override
@ParametricNullness
public K getValue() {
updateIndex();
// For discussion of unsafeNull() and uncheckedCastNullableTToT(), see EntryForKey.getValue().
return (index == ABSENT) ? unsafeNull() : uncheckedCastNullableTToT(biMap.keys[index]);
}
@Override
@ParametricNullness
public K setValue(@ParametricNullness K key) {
updateIndex();
if (index == ABSENT) {
biMap.putInverse(value, key, false);
return unsafeNull(); // see EntryForKey.setValue()
}
K oldKey = uncheckedCastNullableTToT(biMap.keys[index]); // see EntryForKey.setValue()
if (Objects.equal(oldKey, key)) {
return key;
}
biMap.replaceKeyInEntry(index, key, false);
return oldKey;
}
}
/**
* @serialData the number of entries, first key, first value, second key, second value, and so on.
*/
@GwtIncompatible // java.io.ObjectOutputStream
@J2ktIncompatible
private void writeObject(ObjectOutputStream stream) throws IOException {
stream.defaultWriteObject();
Serialization.writeMap(this, stream);
}
@GwtIncompatible // java.io.ObjectInputStream
@J2ktIncompatible
private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException {
stream.defaultReadObject();
int size = Serialization.readCount(stream);
init(16); // resist hostile attempts to allocate gratuitous heap
Serialization.populateMap(this, stream, size);
}
}
| google/guava | android/guava/src/com/google/common/collect/HashBiMap.java |
499 | /*
* Copyright (C) 2015 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package retrofit2;
import static retrofit2.Utils.methodError;
import static retrofit2.Utils.parameterError;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.lang.reflect.Method;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.net.URI;
import java.util.ArrayList;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.annotation.Nullable;
import kotlin.coroutines.Continuation;
import okhttp3.Headers;
import okhttp3.HttpUrl;
import okhttp3.MediaType;
import okhttp3.MultipartBody;
import okhttp3.RequestBody;
import retrofit2.http.Body;
import retrofit2.http.DELETE;
import retrofit2.http.Field;
import retrofit2.http.FieldMap;
import retrofit2.http.FormUrlEncoded;
import retrofit2.http.GET;
import retrofit2.http.HEAD;
import retrofit2.http.HTTP;
import retrofit2.http.Header;
import retrofit2.http.HeaderMap;
import retrofit2.http.Multipart;
import retrofit2.http.OPTIONS;
import retrofit2.http.PATCH;
import retrofit2.http.POST;
import retrofit2.http.PUT;
import retrofit2.http.Part;
import retrofit2.http.PartMap;
import retrofit2.http.Path;
import retrofit2.http.Query;
import retrofit2.http.QueryMap;
import retrofit2.http.QueryName;
import retrofit2.http.Tag;
import retrofit2.http.Url;
final class RequestFactory {
static RequestFactory parseAnnotations(Retrofit retrofit, Class<?> service, Method method) {
return new Builder(retrofit, service, method).build();
}
private final Class<?> service;
private final Method method;
private final HttpUrl baseUrl;
final String httpMethod;
private final @Nullable String relativeUrl;
private final @Nullable Headers headers;
private final @Nullable MediaType contentType;
private final boolean hasBody;
private final boolean isFormEncoded;
private final boolean isMultipart;
private final ParameterHandler<?>[] parameterHandlers;
final boolean isKotlinSuspendFunction;
RequestFactory(Builder builder) {
service = builder.service;
method = builder.method;
baseUrl = builder.retrofit.baseUrl;
httpMethod = builder.httpMethod;
relativeUrl = builder.relativeUrl;
headers = builder.headers;
contentType = builder.contentType;
hasBody = builder.hasBody;
isFormEncoded = builder.isFormEncoded;
isMultipart = builder.isMultipart;
parameterHandlers = builder.parameterHandlers;
isKotlinSuspendFunction = builder.isKotlinSuspendFunction;
}
okhttp3.Request create(@Nullable Object instance, Object[] args) throws IOException {
@SuppressWarnings("unchecked") // It is an error to invoke a method with the wrong arg types.
ParameterHandler<Object>[] handlers = (ParameterHandler<Object>[]) parameterHandlers;
int argumentCount = args.length;
if (argumentCount != handlers.length) {
throw new IllegalArgumentException(
"Argument count ("
+ argumentCount
+ ") doesn't match expected count ("
+ handlers.length
+ ")");
}
RequestBuilder requestBuilder =
new RequestBuilder(
httpMethod,
baseUrl,
relativeUrl,
headers,
contentType,
hasBody,
isFormEncoded,
isMultipart);
if (isKotlinSuspendFunction) {
// The Continuation is the last parameter and the handlers array contains null at that index.
argumentCount--;
}
List<Object> argumentList = new ArrayList<>(argumentCount);
for (int p = 0; p < argumentCount; p++) {
argumentList.add(args[p]);
handlers[p].apply(requestBuilder, args[p]);
}
return requestBuilder
.get()
.tag(Invocation.class, new Invocation(service, instance, method, argumentList))
.build();
}
/**
* Inspects the annotations on an interface method to construct a reusable service method. This
* requires potentially-expensive reflection so it is best to build each service method only once
* and reuse it. Builders cannot be reused.
*/
static final class Builder {
// Upper and lower characters, digits, underscores, and hyphens, starting with a character.
private static final String PARAM = "[a-zA-Z][a-zA-Z0-9_-]*";
private static final Pattern PARAM_URL_REGEX = Pattern.compile("\\{(" + PARAM + ")\\}");
private static final Pattern PARAM_NAME_REGEX = Pattern.compile(PARAM);
final Retrofit retrofit;
final Class<?> service;
final Method method;
final Annotation[] methodAnnotations;
final Annotation[][] parameterAnnotationsArray;
final Type[] parameterTypes;
boolean gotField;
boolean gotPart;
boolean gotBody;
boolean gotPath;
boolean gotQuery;
boolean gotQueryName;
boolean gotQueryMap;
boolean gotUrl;
@Nullable String httpMethod;
boolean hasBody;
boolean isFormEncoded;
boolean isMultipart;
@Nullable String relativeUrl;
@Nullable Headers headers;
@Nullable MediaType contentType;
@Nullable Set<String> relativeUrlParamNames;
@Nullable ParameterHandler<?>[] parameterHandlers;
boolean isKotlinSuspendFunction;
Builder(Retrofit retrofit, Class<?> service, Method method) {
this.retrofit = retrofit;
this.service = service;
this.method = method;
this.methodAnnotations = method.getAnnotations();
this.parameterTypes = method.getGenericParameterTypes();
this.parameterAnnotationsArray = method.getParameterAnnotations();
}
RequestFactory build() {
for (Annotation annotation : methodAnnotations) {
parseMethodAnnotation(annotation);
}
if (httpMethod == null) {
throw methodError(method, "HTTP method annotation is required (e.g., @GET, @POST, etc.).");
}
if (!hasBody) {
if (isMultipart) {
throw methodError(
method,
"Multipart can only be specified on HTTP methods with request body (e.g., @POST).");
}
if (isFormEncoded) {
throw methodError(
method,
"FormUrlEncoded can only be specified on HTTP methods with "
+ "request body (e.g., @POST).");
}
}
int parameterCount = parameterAnnotationsArray.length;
parameterHandlers = new ParameterHandler<?>[parameterCount];
for (int p = 0, lastParameter = parameterCount - 1; p < parameterCount; p++) {
parameterHandlers[p] =
parseParameter(p, parameterTypes[p], parameterAnnotationsArray[p], p == lastParameter);
}
if (relativeUrl == null && !gotUrl) {
throw methodError(method, "Missing either @%s URL or @Url parameter.", httpMethod);
}
if (!isFormEncoded && !isMultipart && !hasBody && gotBody) {
throw methodError(method, "Non-body HTTP method cannot contain @Body.");
}
if (isFormEncoded && !gotField) {
throw methodError(method, "Form-encoded method must contain at least one @Field.");
}
if (isMultipart && !gotPart) {
throw methodError(method, "Multipart method must contain at least one @Part.");
}
return new RequestFactory(this);
}
private void parseMethodAnnotation(Annotation annotation) {
if (annotation instanceof DELETE) {
parseHttpMethodAndPath("DELETE", ((DELETE) annotation).value(), false);
} else if (annotation instanceof GET) {
parseHttpMethodAndPath("GET", ((GET) annotation).value(), false);
} else if (annotation instanceof HEAD) {
parseHttpMethodAndPath("HEAD", ((HEAD) annotation).value(), false);
} else if (annotation instanceof PATCH) {
parseHttpMethodAndPath("PATCH", ((PATCH) annotation).value(), true);
} else if (annotation instanceof POST) {
parseHttpMethodAndPath("POST", ((POST) annotation).value(), true);
} else if (annotation instanceof PUT) {
parseHttpMethodAndPath("PUT", ((PUT) annotation).value(), true);
} else if (annotation instanceof OPTIONS) {
parseHttpMethodAndPath("OPTIONS", ((OPTIONS) annotation).value(), false);
} else if (annotation instanceof HTTP) {
HTTP http = (HTTP) annotation;
parseHttpMethodAndPath(http.method(), http.path(), http.hasBody());
} else if (annotation instanceof retrofit2.http.Headers) {
retrofit2.http.Headers headers = (retrofit2.http.Headers) annotation;
String[] headersToParse = headers.value();
if (headersToParse.length == 0) {
throw methodError(method, "@Headers annotation is empty.");
}
this.headers = parseHeaders(headersToParse, headers.allowUnsafeNonAsciiValues());
} else if (annotation instanceof Multipart) {
if (isFormEncoded) {
throw methodError(method, "Only one encoding annotation is allowed.");
}
isMultipart = true;
} else if (annotation instanceof FormUrlEncoded) {
if (isMultipart) {
throw methodError(method, "Only one encoding annotation is allowed.");
}
isFormEncoded = true;
}
}
private void parseHttpMethodAndPath(String httpMethod, String value, boolean hasBody) {
if (this.httpMethod != null) {
throw methodError(
method,
"Only one HTTP method is allowed. Found: %s and %s.",
this.httpMethod,
httpMethod);
}
this.httpMethod = httpMethod;
this.hasBody = hasBody;
if (value.isEmpty()) {
return;
}
// Get the relative URL path and existing query string, if present.
int question = value.indexOf('?');
if (question != -1 && question < value.length() - 1) {
// Ensure the query string does not have any named parameters.
String queryParams = value.substring(question + 1);
Matcher queryParamMatcher = PARAM_URL_REGEX.matcher(queryParams);
if (queryParamMatcher.find()) {
throw methodError(
method,
"URL query string \"%s\" must not have replace block. "
+ "For dynamic query parameters use @Query.",
queryParams);
}
}
this.relativeUrl = value;
this.relativeUrlParamNames = parsePathParameters(value);
}
private Headers parseHeaders(String[] headers, boolean allowUnsafeNonAsciiValues) {
Headers.Builder builder = new Headers.Builder();
for (String header : headers) {
int colon = header.indexOf(':');
if (colon == -1 || colon == 0 || colon == header.length() - 1) {
throw methodError(
method, "@Headers value must be in the form \"Name: Value\". Found: \"%s\"", header);
}
String headerName = header.substring(0, colon);
String headerValue = header.substring(colon + 1).trim();
if ("Content-Type".equalsIgnoreCase(headerName)) {
try {
contentType = MediaType.get(headerValue);
} catch (IllegalArgumentException e) {
throw methodError(method, e, "Malformed content type: %s", headerValue);
}
} else if (allowUnsafeNonAsciiValues) {
builder.addUnsafeNonAscii(headerName, headerValue);
} else {
builder.add(headerName, headerValue);
}
}
return builder.build();
}
private @Nullable ParameterHandler<?> parseParameter(
int p, Type parameterType, @Nullable Annotation[] annotations, boolean allowContinuation) {
ParameterHandler<?> result = null;
if (annotations != null) {
for (Annotation annotation : annotations) {
ParameterHandler<?> annotationAction =
parseParameterAnnotation(p, parameterType, annotations, annotation);
if (annotationAction == null) {
continue;
}
if (result != null) {
throw parameterError(
method, p, "Multiple Retrofit annotations found, only one allowed.");
}
result = annotationAction;
}
}
if (result == null) {
if (allowContinuation) {
try {
if (Utils.getRawType(parameterType) == Continuation.class) {
isKotlinSuspendFunction = true;
return null;
}
} catch (NoClassDefFoundError ignored) {
// Ignored
}
}
throw parameterError(method, p, "No Retrofit annotation found.");
}
return result;
}
@Nullable
private ParameterHandler<?> parseParameterAnnotation(
int p, Type type, Annotation[] annotations, Annotation annotation) {
if (annotation instanceof Url) {
validateResolvableType(p, type);
if (gotUrl) {
throw parameterError(method, p, "Multiple @Url method annotations found.");
}
if (gotPath) {
throw parameterError(method, p, "@Path parameters may not be used with @Url.");
}
if (gotQuery) {
throw parameterError(method, p, "A @Url parameter must not come after a @Query.");
}
if (gotQueryName) {
throw parameterError(method, p, "A @Url parameter must not come after a @QueryName.");
}
if (gotQueryMap) {
throw parameterError(method, p, "A @Url parameter must not come after a @QueryMap.");
}
if (relativeUrl != null) {
throw parameterError(method, p, "@Url cannot be used with @%s URL", httpMethod);
}
gotUrl = true;
if (type == HttpUrl.class
|| type == String.class
|| type == URI.class
|| (type instanceof Class && "android.net.Uri".equals(((Class<?>) type).getName()))) {
return new ParameterHandler.RelativeUrl(method, p);
} else {
throw parameterError(
method,
p,
"@Url must be okhttp3.HttpUrl, String, java.net.URI, or android.net.Uri type.");
}
} else if (annotation instanceof Path) {
validateResolvableType(p, type);
if (gotQuery) {
throw parameterError(method, p, "A @Path parameter must not come after a @Query.");
}
if (gotQueryName) {
throw parameterError(method, p, "A @Path parameter must not come after a @QueryName.");
}
if (gotQueryMap) {
throw parameterError(method, p, "A @Path parameter must not come after a @QueryMap.");
}
if (gotUrl) {
throw parameterError(method, p, "@Path parameters may not be used with @Url.");
}
if (relativeUrl == null) {
throw parameterError(
method, p, "@Path can only be used with relative url on @%s", httpMethod);
}
gotPath = true;
Path path = (Path) annotation;
String name = path.value();
validatePathName(p, name);
Converter<?, String> converter = retrofit.stringConverter(type, annotations);
return new ParameterHandler.Path<>(method, p, name, converter, path.encoded());
} else if (annotation instanceof Query) {
validateResolvableType(p, type);
Query query = (Query) annotation;
String name = query.value();
boolean encoded = query.encoded();
Class<?> rawParameterType = Utils.getRawType(type);
gotQuery = true;
if (Iterable.class.isAssignableFrom(rawParameterType)) {
if (!(type instanceof ParameterizedType)) {
throw parameterError(
method,
p,
rawParameterType.getSimpleName()
+ " must include generic type (e.g., "
+ rawParameterType.getSimpleName()
+ "<String>)");
}
ParameterizedType parameterizedType = (ParameterizedType) type;
Type iterableType = Utils.getParameterUpperBound(0, parameterizedType);
Converter<?, String> converter = retrofit.stringConverter(iterableType, annotations);
return new ParameterHandler.Query<>(name, converter, encoded).iterable();
} else if (rawParameterType.isArray()) {
Class<?> arrayComponentType = boxIfPrimitive(rawParameterType.getComponentType());
Converter<?, String> converter =
retrofit.stringConverter(arrayComponentType, annotations);
return new ParameterHandler.Query<>(name, converter, encoded).array();
} else {
Converter<?, String> converter = retrofit.stringConverter(type, annotations);
return new ParameterHandler.Query<>(name, converter, encoded);
}
} else if (annotation instanceof QueryName) {
validateResolvableType(p, type);
QueryName query = (QueryName) annotation;
boolean encoded = query.encoded();
Class<?> rawParameterType = Utils.getRawType(type);
gotQueryName = true;
if (Iterable.class.isAssignableFrom(rawParameterType)) {
if (!(type instanceof ParameterizedType)) {
throw parameterError(
method,
p,
rawParameterType.getSimpleName()
+ " must include generic type (e.g., "
+ rawParameterType.getSimpleName()
+ "<String>)");
}
ParameterizedType parameterizedType = (ParameterizedType) type;
Type iterableType = Utils.getParameterUpperBound(0, parameterizedType);
Converter<?, String> converter = retrofit.stringConverter(iterableType, annotations);
return new ParameterHandler.QueryName<>(converter, encoded).iterable();
} else if (rawParameterType.isArray()) {
Class<?> arrayComponentType = boxIfPrimitive(rawParameterType.getComponentType());
Converter<?, String> converter =
retrofit.stringConverter(arrayComponentType, annotations);
return new ParameterHandler.QueryName<>(converter, encoded).array();
} else {
Converter<?, String> converter = retrofit.stringConverter(type, annotations);
return new ParameterHandler.QueryName<>(converter, encoded);
}
} else if (annotation instanceof QueryMap) {
validateResolvableType(p, type);
Class<?> rawParameterType = Utils.getRawType(type);
gotQueryMap = true;
if (!Map.class.isAssignableFrom(rawParameterType)) {
throw parameterError(method, p, "@QueryMap parameter type must be Map.");
}
Type mapType = Utils.getSupertype(type, rawParameterType, Map.class);
if (!(mapType instanceof ParameterizedType)) {
throw parameterError(
method, p, "Map must include generic types (e.g., Map<String, String>)");
}
ParameterizedType parameterizedType = (ParameterizedType) mapType;
Type keyType = Utils.getParameterUpperBound(0, parameterizedType);
if (String.class != keyType) {
throw parameterError(method, p, "@QueryMap keys must be of type String: " + keyType);
}
Type valueType = Utils.getParameterUpperBound(1, parameterizedType);
Converter<?, String> valueConverter = retrofit.stringConverter(valueType, annotations);
return new ParameterHandler.QueryMap<>(
method, p, valueConverter, ((QueryMap) annotation).encoded());
} else if (annotation instanceof Header) {
validateResolvableType(p, type);
Header header = (Header) annotation;
String name = header.value();
Class<?> rawParameterType = Utils.getRawType(type);
if (Iterable.class.isAssignableFrom(rawParameterType)) {
if (!(type instanceof ParameterizedType)) {
throw parameterError(
method,
p,
rawParameterType.getSimpleName()
+ " must include generic type (e.g., "
+ rawParameterType.getSimpleName()
+ "<String>)");
}
ParameterizedType parameterizedType = (ParameterizedType) type;
Type iterableType = Utils.getParameterUpperBound(0, parameterizedType);
Converter<?, String> converter = retrofit.stringConverter(iterableType, annotations);
return new ParameterHandler.Header<>(name, converter, header.allowUnsafeNonAsciiValues())
.iterable();
} else if (rawParameterType.isArray()) {
Class<?> arrayComponentType = boxIfPrimitive(rawParameterType.getComponentType());
Converter<?, String> converter =
retrofit.stringConverter(arrayComponentType, annotations);
return new ParameterHandler.Header<>(name, converter, header.allowUnsafeNonAsciiValues())
.array();
} else {
Converter<?, String> converter = retrofit.stringConverter(type, annotations);
return new ParameterHandler.Header<>(name, converter, header.allowUnsafeNonAsciiValues());
}
} else if (annotation instanceof HeaderMap) {
if (type == Headers.class) {
return new ParameterHandler.Headers(method, p);
}
validateResolvableType(p, type);
Class<?> rawParameterType = Utils.getRawType(type);
if (!Map.class.isAssignableFrom(rawParameterType)) {
throw parameterError(method, p, "@HeaderMap parameter type must be Map or Headers.");
}
Type mapType = Utils.getSupertype(type, rawParameterType, Map.class);
if (!(mapType instanceof ParameterizedType)) {
throw parameterError(
method, p, "Map must include generic types (e.g., Map<String, String>)");
}
ParameterizedType parameterizedType = (ParameterizedType) mapType;
Type keyType = Utils.getParameterUpperBound(0, parameterizedType);
if (String.class != keyType) {
throw parameterError(method, p, "@HeaderMap keys must be of type String: " + keyType);
}
Type valueType = Utils.getParameterUpperBound(1, parameterizedType);
Converter<?, String> valueConverter = retrofit.stringConverter(valueType, annotations);
return new ParameterHandler.HeaderMap<>(
method, p, valueConverter, ((HeaderMap) annotation).allowUnsafeNonAsciiValues());
} else if (annotation instanceof Field) {
validateResolvableType(p, type);
if (!isFormEncoded) {
throw parameterError(method, p, "@Field parameters can only be used with form encoding.");
}
Field field = (Field) annotation;
String name = field.value();
boolean encoded = field.encoded();
gotField = true;
Class<?> rawParameterType = Utils.getRawType(type);
if (Iterable.class.isAssignableFrom(rawParameterType)) {
if (!(type instanceof ParameterizedType)) {
throw parameterError(
method,
p,
rawParameterType.getSimpleName()
+ " must include generic type (e.g., "
+ rawParameterType.getSimpleName()
+ "<String>)");
}
ParameterizedType parameterizedType = (ParameterizedType) type;
Type iterableType = Utils.getParameterUpperBound(0, parameterizedType);
Converter<?, String> converter = retrofit.stringConverter(iterableType, annotations);
return new ParameterHandler.Field<>(name, converter, encoded).iterable();
} else if (rawParameterType.isArray()) {
Class<?> arrayComponentType = boxIfPrimitive(rawParameterType.getComponentType());
Converter<?, String> converter =
retrofit.stringConverter(arrayComponentType, annotations);
return new ParameterHandler.Field<>(name, converter, encoded).array();
} else {
Converter<?, String> converter = retrofit.stringConverter(type, annotations);
return new ParameterHandler.Field<>(name, converter, encoded);
}
} else if (annotation instanceof FieldMap) {
validateResolvableType(p, type);
if (!isFormEncoded) {
throw parameterError(
method, p, "@FieldMap parameters can only be used with form encoding.");
}
Class<?> rawParameterType = Utils.getRawType(type);
if (!Map.class.isAssignableFrom(rawParameterType)) {
throw parameterError(method, p, "@FieldMap parameter type must be Map.");
}
Type mapType = Utils.getSupertype(type, rawParameterType, Map.class);
if (!(mapType instanceof ParameterizedType)) {
throw parameterError(
method, p, "Map must include generic types (e.g., Map<String, String>)");
}
ParameterizedType parameterizedType = (ParameterizedType) mapType;
Type keyType = Utils.getParameterUpperBound(0, parameterizedType);
if (String.class != keyType) {
throw parameterError(method, p, "@FieldMap keys must be of type String: " + keyType);
}
Type valueType = Utils.getParameterUpperBound(1, parameterizedType);
Converter<?, String> valueConverter = retrofit.stringConverter(valueType, annotations);
gotField = true;
return new ParameterHandler.FieldMap<>(
method, p, valueConverter, ((FieldMap) annotation).encoded());
} else if (annotation instanceof Part) {
validateResolvableType(p, type);
if (!isMultipart) {
throw parameterError(
method, p, "@Part parameters can only be used with multipart encoding.");
}
Part part = (Part) annotation;
gotPart = true;
String partName = part.value();
Class<?> rawParameterType = Utils.getRawType(type);
if (partName.isEmpty()) {
if (Iterable.class.isAssignableFrom(rawParameterType)) {
if (!(type instanceof ParameterizedType)) {
throw parameterError(
method,
p,
rawParameterType.getSimpleName()
+ " must include generic type (e.g., "
+ rawParameterType.getSimpleName()
+ "<String>)");
}
ParameterizedType parameterizedType = (ParameterizedType) type;
Type iterableType = Utils.getParameterUpperBound(0, parameterizedType);
if (!MultipartBody.Part.class.isAssignableFrom(Utils.getRawType(iterableType))) {
throw parameterError(
method,
p,
"@Part annotation must supply a name or use MultipartBody.Part parameter type.");
}
return ParameterHandler.RawPart.INSTANCE.iterable();
} else if (rawParameterType.isArray()) {
Class<?> arrayComponentType = rawParameterType.getComponentType();
if (!MultipartBody.Part.class.isAssignableFrom(arrayComponentType)) {
throw parameterError(
method,
p,
"@Part annotation must supply a name or use MultipartBody.Part parameter type.");
}
return ParameterHandler.RawPart.INSTANCE.array();
} else if (MultipartBody.Part.class.isAssignableFrom(rawParameterType)) {
return ParameterHandler.RawPart.INSTANCE;
} else {
throw parameterError(
method,
p,
"@Part annotation must supply a name or use MultipartBody.Part parameter type.");
}
} else {
Headers headers =
Headers.of(
"Content-Disposition",
"form-data; name=\"" + partName + "\"",
"Content-Transfer-Encoding",
part.encoding());
if (Iterable.class.isAssignableFrom(rawParameterType)) {
if (!(type instanceof ParameterizedType)) {
throw parameterError(
method,
p,
rawParameterType.getSimpleName()
+ " must include generic type (e.g., "
+ rawParameterType.getSimpleName()
+ "<String>)");
}
ParameterizedType parameterizedType = (ParameterizedType) type;
Type iterableType = Utils.getParameterUpperBound(0, parameterizedType);
if (MultipartBody.Part.class.isAssignableFrom(Utils.getRawType(iterableType))) {
throw parameterError(
method,
p,
"@Part parameters using the MultipartBody.Part must not "
+ "include a part name in the annotation.");
}
Converter<?, RequestBody> converter =
retrofit.requestBodyConverter(iterableType, annotations, methodAnnotations);
return new ParameterHandler.Part<>(method, p, headers, converter).iterable();
} else if (rawParameterType.isArray()) {
Class<?> arrayComponentType = boxIfPrimitive(rawParameterType.getComponentType());
if (MultipartBody.Part.class.isAssignableFrom(arrayComponentType)) {
throw parameterError(
method,
p,
"@Part parameters using the MultipartBody.Part must not "
+ "include a part name in the annotation.");
}
Converter<?, RequestBody> converter =
retrofit.requestBodyConverter(arrayComponentType, annotations, methodAnnotations);
return new ParameterHandler.Part<>(method, p, headers, converter).array();
} else if (MultipartBody.Part.class.isAssignableFrom(rawParameterType)) {
throw parameterError(
method,
p,
"@Part parameters using the MultipartBody.Part must not "
+ "include a part name in the annotation.");
} else {
Converter<?, RequestBody> converter =
retrofit.requestBodyConverter(type, annotations, methodAnnotations);
return new ParameterHandler.Part<>(method, p, headers, converter);
}
}
} else if (annotation instanceof PartMap) {
validateResolvableType(p, type);
if (!isMultipart) {
throw parameterError(
method, p, "@PartMap parameters can only be used with multipart encoding.");
}
gotPart = true;
Class<?> rawParameterType = Utils.getRawType(type);
if (!Map.class.isAssignableFrom(rawParameterType)) {
throw parameterError(method, p, "@PartMap parameter type must be Map.");
}
Type mapType = Utils.getSupertype(type, rawParameterType, Map.class);
if (!(mapType instanceof ParameterizedType)) {
throw parameterError(
method, p, "Map must include generic types (e.g., Map<String, String>)");
}
ParameterizedType parameterizedType = (ParameterizedType) mapType;
Type keyType = Utils.getParameterUpperBound(0, parameterizedType);
if (String.class != keyType) {
throw parameterError(method, p, "@PartMap keys must be of type String: " + keyType);
}
Type valueType = Utils.getParameterUpperBound(1, parameterizedType);
if (MultipartBody.Part.class.isAssignableFrom(Utils.getRawType(valueType))) {
throw parameterError(
method,
p,
"@PartMap values cannot be MultipartBody.Part. "
+ "Use @Part List<Part> or a different value type instead.");
}
Converter<?, RequestBody> valueConverter =
retrofit.requestBodyConverter(valueType, annotations, methodAnnotations);
PartMap partMap = (PartMap) annotation;
return new ParameterHandler.PartMap<>(method, p, valueConverter, partMap.encoding());
} else if (annotation instanceof Body) {
validateResolvableType(p, type);
if (isFormEncoded || isMultipart) {
throw parameterError(
method, p, "@Body parameters cannot be used with form or multi-part encoding.");
}
if (gotBody) {
throw parameterError(method, p, "Multiple @Body method annotations found.");
}
Converter<?, RequestBody> converter;
try {
converter = retrofit.requestBodyConverter(type, annotations, methodAnnotations);
} catch (RuntimeException e) {
// Wide exception range because factories are user code.
throw parameterError(method, e, p, "Unable to create @Body converter for %s", type);
}
gotBody = true;
return new ParameterHandler.Body<>(method, p, converter);
} else if (annotation instanceof Tag) {
validateResolvableType(p, type);
Class<?> tagType = Utils.getRawType(type);
for (int i = p - 1; i >= 0; i--) {
ParameterHandler<?> otherHandler = parameterHandlers[i];
if (otherHandler instanceof ParameterHandler.Tag
&& ((ParameterHandler.Tag) otherHandler).cls.equals(tagType)) {
throw parameterError(
method,
p,
"@Tag type "
+ tagType.getName()
+ " is duplicate of "
+ Platform.reflection.describeMethodParameter(method, i)
+ " and would always overwrite its value.");
}
}
return new ParameterHandler.Tag<>(tagType);
}
return null; // Not a Retrofit annotation.
}
private void validateResolvableType(int p, Type type) {
if (Utils.hasUnresolvableType(type)) {
throw parameterError(
method, p, "Parameter type must not include a type variable or wildcard: %s", type);
}
}
private void validatePathName(int p, String name) {
if (!PARAM_NAME_REGEX.matcher(name).matches()) {
throw parameterError(
method,
p,
"@Path parameter name must match %s. Found: %s",
PARAM_URL_REGEX.pattern(),
name);
}
// Verify URL replacement name is actually present in the URL path.
if (!relativeUrlParamNames.contains(name)) {
throw parameterError(method, p, "URL \"%s\" does not contain \"{%s}\".", relativeUrl, name);
}
}
/**
* Gets the set of unique path parameters used in the given URI. If a parameter is used twice in
* the URI, it will only show up once in the set.
*/
static Set<String> parsePathParameters(String path) {
Matcher m = PARAM_URL_REGEX.matcher(path);
Set<String> patterns = new LinkedHashSet<>();
while (m.find()) {
patterns.add(m.group(1));
}
return patterns;
}
private static Class<?> boxIfPrimitive(Class<?> type) {
if (boolean.class == type) return Boolean.class;
if (byte.class == type) return Byte.class;
if (char.class == type) return Character.class;
if (double.class == type) return Double.class;
if (float.class == type) return Float.class;
if (int.class == type) return Integer.class;
if (long.class == type) return Long.class;
if (short.class == type) return Short.class;
return type;
}
}
}
| square/retrofit | retrofit/src/main/java/retrofit2/RequestFactory.java |
500 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.rest;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.io.stream.BytesStream;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.path.PathTrie;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.util.Maps;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.Releasable;
import org.elasticsearch.core.Releasables;
import org.elasticsearch.core.RestApiVersion;
import org.elasticsearch.core.Streams;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.http.HttpHeadersValidationException;
import org.elasticsearch.http.HttpRouteStats;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.rest.RestHandler.Route;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.telemetry.tracing.Tracer;
import org.elasticsearch.transport.Transports;
import org.elasticsearch.usage.SearchUsageHolder;
import org.elasticsearch.usage.UsageService;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.XContentType;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Collections;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;
import static org.elasticsearch.indices.SystemIndices.EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY;
import static org.elasticsearch.indices.SystemIndices.SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY;
import static org.elasticsearch.rest.RestResponse.TEXT_CONTENT_TYPE;
import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
import static org.elasticsearch.rest.RestStatus.INTERNAL_SERVER_ERROR;
import static org.elasticsearch.rest.RestStatus.METHOD_NOT_ALLOWED;
import static org.elasticsearch.rest.RestStatus.NOT_ACCEPTABLE;
import static org.elasticsearch.rest.RestStatus.OK;
public class RestController implements HttpServerTransport.Dispatcher {
private static final Logger logger = LogManager.getLogger(RestController.class);
private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestController.class);
/**
* list of browser safelisted media types - not allowed on Content-Type header
* https://fetch.spec.whatwg.org/#cors-safelisted-request-header
*/
static final Set<String> SAFELISTED_MEDIA_TYPES = Set.of("application/x-www-form-urlencoded", "multipart/form-data", "text/plain");
static final String ELASTIC_PRODUCT_HTTP_HEADER = "X-elastic-product";
static final String ELASTIC_PRODUCT_HTTP_HEADER_VALUE = "Elasticsearch";
static final Set<String> RESERVED_PATHS = Set.of("/__elb_health__", "/__elb_health__/zk", "/_health", "/_health/zk");
private static final BytesReference FAVICON_RESPONSE;
static {
try (InputStream stream = RestController.class.getResourceAsStream("/config/favicon.ico")) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
Streams.copy(stream, out);
FAVICON_RESPONSE = new BytesArray(out.toByteArray());
} catch (IOException e) {
throw new AssertionError(e);
}
}
private final PathTrie<MethodHandlers> handlers = new PathTrie<>(RestUtils.REST_DECODER);
private final RestInterceptor interceptor;
private final NodeClient client;
private final CircuitBreakerService circuitBreakerService;
private final UsageService usageService;
private final Tracer tracer;
// If true, the ServerlessScope annotations will be enforced
private final ServerlessApiProtections apiProtections;
public RestController(
RestInterceptor restInterceptor,
NodeClient client,
CircuitBreakerService circuitBreakerService,
UsageService usageService,
Tracer tracer
) {
this.usageService = usageService;
this.tracer = tracer;
if (restInterceptor == null) {
restInterceptor = (request, channel, targetHandler, listener) -> listener.onResponse(Boolean.TRUE);
}
this.interceptor = restInterceptor;
this.client = client;
this.circuitBreakerService = circuitBreakerService;
registerHandlerNoWrap(RestRequest.Method.GET, "/favicon.ico", RestApiVersion.current(), new RestFavIconHandler());
this.apiProtections = new ServerlessApiProtections(false);
}
public ServerlessApiProtections getApiProtections() {
return apiProtections;
}
/**
* Registers a REST handler to be executed when the provided {@code method} and {@code path} match the request.
*
* @param method GET, POST, etc.
* @param path Path to handle (e.g. "/{index}/{type}/_bulk")
* @param version API version to handle (e.g. RestApiVersion.V_8)
* @param handler The handler to actually execute
* @param deprecationMessage The message to log and send as a header in the response
*/
protected void registerAsDeprecatedHandler(
RestRequest.Method method,
String path,
RestApiVersion version,
RestHandler handler,
String deprecationMessage
) {
registerAsDeprecatedHandler(method, path, version, handler, deprecationMessage, null);
}
/**
* Registers a REST handler to be executed when the provided {@code method} and {@code path} match the request.
*
* @param method GET, POST, etc.
* @param path Path to handle (e.g. "/{index}/{type}/_bulk")
* @param version API version to handle (e.g. RestApiVersion.V_8)
* @param handler The handler to actually execute
* @param deprecationMessage The message to log and send as a header in the response
* @param deprecationLevel The deprecation log level to use for the deprecation warning, either WARN or CRITICAL
*/
protected void registerAsDeprecatedHandler(
RestRequest.Method method,
String path,
RestApiVersion version,
RestHandler handler,
String deprecationMessage,
@Nullable Level deprecationLevel
) {
assert (handler instanceof DeprecationRestHandler) == false;
if (version == RestApiVersion.current()) {
// e.g. it was marked as deprecated in 8.x, and we're currently running 8.x
registerHandler(
method,
path,
version,
new DeprecationRestHandler(handler, method, path, deprecationLevel, deprecationMessage, deprecationLogger, false)
);
} else if (version == RestApiVersion.minimumSupported()) {
// e.g. it was marked as deprecated in 7.x, and we're currently running 8.x
registerHandler(
method,
path,
version,
new DeprecationRestHandler(handler, method, path, deprecationLevel, deprecationMessage, deprecationLogger, true)
);
} else {
// e.g. it was marked as deprecated in 7.x, and we're currently running *9.x*
logger.debug(
"Deprecated route ["
+ method
+ " "
+ path
+ "] for handler ["
+ handler.getClass()
+ "] "
+ "with version ["
+ version
+ "], which is less than the minimum supported version ["
+ RestApiVersion.minimumSupported()
+ "]"
);
}
}
/**
* Registers a REST handler to be executed when the provided {@code method} and {@code path} match the request, or when provided
* with {@code replacedMethod} and {@code replacedPath}. Expected usage:
* <pre><code>
* // remove deprecation in next major release
* controller.registerAsDeprecatedHandler(POST, "/_forcemerge", RestApiVersion.V_8, someHandler,
* POST, "/_optimize", RestApiVersion.V_7);
* controller.registerAsDeprecatedHandler(POST, "/{index}/_forcemerge", RestApiVersion.V_8, someHandler,
* POST, "/{index}/_optimize", RestApiVersion.V_7);
* </code></pre>
* <p>
* The registered REST handler ({@code method} with {@code path}) is a normal REST handler that is not deprecated and it is
* replacing the deprecated REST handler ({@code replacedMethod} with {@code replacedPath}) that is using the <em>same</em>
* {@code handler}.
* <p>
* Deprecated REST handlers without a direct replacement should be deprecated directly using {@link #registerAsDeprecatedHandler}
* and a specific message.
*
* @param method GET, POST, etc.
* @param path Path to handle (e.g. "/_forcemerge")
* @param version API version to handle (e.g. RestApiVersion.V_8)
* @param handler The handler to actually execute
* @param replacedMethod GET, POST, etc.
* @param replacedPath <em>Replaced</em> path to handle (e.g. "/_optimize")
* @param replacedVersion <em>Replaced</em> API version to handle (e.g. RestApiVersion.V_7)
*/
protected void registerAsReplacedHandler(
RestRequest.Method method,
String path,
RestApiVersion version,
RestHandler handler,
RestRequest.Method replacedMethod,
String replacedPath,
RestApiVersion replacedVersion
) {
// e.g. [POST /_optimize] is deprecated! Use [POST /_forcemerge] instead.
final String replacedMessage = "["
+ replacedMethod.name()
+ " "
+ replacedPath
+ "] is deprecated! Use ["
+ method.name()
+ " "
+ path
+ "] instead.";
registerHandler(method, path, version, handler);
registerAsDeprecatedHandler(replacedMethod, replacedPath, replacedVersion, handler, replacedMessage);
}
/**
* Registers a REST handler to be executed when one of the provided methods and path match the request.
*
* @param method GET, POST, etc.
* @param path Path to handle (e.g. "/{index}/{type}/_bulk")
* @param version API version to handle (e.g. RestApiVersion.V_8)
* @param handler The handler to actually execute
*/
protected void registerHandler(RestRequest.Method method, String path, RestApiVersion version, RestHandler handler) {
if (handler instanceof BaseRestHandler) {
usageService.addRestHandler((BaseRestHandler) handler);
}
registerHandlerNoWrap(method, path, version, handler);
}
private void registerHandlerNoWrap(RestRequest.Method method, String path, RestApiVersion version, RestHandler handler) {
assert RestApiVersion.minimumSupported() == version || RestApiVersion.current() == version
: "REST API compatibility is only supported for version " + RestApiVersion.minimumSupported().major;
if (RESERVED_PATHS.contains(path)) {
throw new IllegalArgumentException("path [" + path + "] is a reserved path and may not be registered");
}
// the HTTP OPTIONS method is treated internally, not by handlers, see {@code #handleNoHandlerFound}
assert method != RestRequest.Method.OPTIONS : "There should be no handlers registered for the OPTIONS HTTP method";
handlers.insertOrUpdate(
path,
new MethodHandlers(path).addMethod(method, version, handler),
(handlers, ignoredHandler) -> handlers.addMethod(method, version, handler)
);
}
public void registerHandler(final Route route, final RestHandler handler) {
if (route.isReplacement()) {
Route replaced = route.getReplacedRoute();
registerAsReplacedHandler(
route.getMethod(),
route.getPath(),
route.getRestApiVersion(),
handler,
replaced.getMethod(),
replaced.getPath(),
replaced.getRestApiVersion()
);
} else if (route.isDeprecated()) {
registerAsDeprecatedHandler(
route.getMethod(),
route.getPath(),
route.getRestApiVersion(),
handler,
route.getDeprecationMessage(),
route.getDeprecationLevel()
);
} else {
// it's just a normal route
registerHandler(route.getMethod(), route.getPath(), route.getRestApiVersion(), handler);
}
}
/**
* Registers a REST handler with the controller. The REST handler declares the {@code method}
* and {@code path} combinations.
*/
public void registerHandler(final RestHandler handler) {
handler.routes().forEach(route -> registerHandler(route, handler));
}
@Override
public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) {
threadContext.addResponseHeader(ELASTIC_PRODUCT_HTTP_HEADER, ELASTIC_PRODUCT_HTTP_HEADER_VALUE);
try {
tryAllHandlers(request, channel, threadContext);
} catch (Exception e) {
try {
sendFailure(channel, e);
} catch (Exception inner) {
inner.addSuppressed(e);
logger.error(() -> "failed to send failure response for uri [" + request.uri() + "]", inner);
}
}
}
@Override
public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) {
threadContext.addResponseHeader(ELASTIC_PRODUCT_HTTP_HEADER, ELASTIC_PRODUCT_HTTP_HEADER_VALUE);
try {
final Exception e;
if (cause == null) {
e = new ElasticsearchException("unknown cause");
} else if (cause instanceof Exception) {
e = (Exception) cause;
} else {
e = new ElasticsearchException(cause);
}
// unless it's a http headers validation error, we consider any exceptions encountered so far during request processing
// to be a problem of invalid/malformed request (hence the RestStatus#BAD_REQEST (400) HTTP response code)
if (e instanceof HttpHeadersValidationException) {
sendFailure(channel, (Exception) e.getCause());
} else {
channel.sendResponse(new RestResponse(channel, BAD_REQUEST, e));
}
} catch (final IOException e) {
if (cause != null) {
e.addSuppressed(cause);
}
logger.warn("failed to send bad request response", e);
channel.sendResponse(new RestResponse(INTERNAL_SERVER_ERROR, RestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY));
}
}
public boolean checkSupported(
RestRequest.Method method,
String path,
Set<String> parameters,
Set<String> capabilities,
RestApiVersion restApiVersion
) {
Iterator<MethodHandlers> allHandlers = getAllHandlers(null, path);
while (allHandlers.hasNext()) {
RestHandler handler;
MethodHandlers handlers = allHandlers.next();
if (handlers == null) {
handler = null;
} else {
handler = handlers.getHandler(method, restApiVersion);
}
if (handler != null) {
var supportedParams = handler.supportedQueryParameters();
return (supportedParams == null || supportedParams.containsAll(parameters))
&& handler.supportedCapabilities().containsAll(capabilities);
}
}
return false;
}
@Override
public Map<String, HttpRouteStats> getStats() {
final Iterator<MethodHandlers> methodHandlersIterator = handlers.allNodeValues();
final SortedMap<String, HttpRouteStats> allStats = new TreeMap<>();
while (methodHandlersIterator.hasNext()) {
final MethodHandlers mh = methodHandlersIterator.next();
final HttpRouteStats stats = mh.getStats();
if (stats.requestCount() > 0 || stats.responseCount() > 0) {
allStats.put(mh.getPath(), stats);
}
}
return Collections.unmodifiableSortedMap(allStats);
}
private void dispatchRequest(
RestRequest request,
RestChannel channel,
RestHandler handler,
MethodHandlers methodHandlers,
ThreadContext threadContext
) throws Exception {
final int contentLength = request.contentLength();
if (contentLength > 0) {
if (isContentTypeDisallowed(request) || handler.mediaTypesValid(request) == false) {
sendContentTypeErrorMessage(request.getAllHeaderValues("Content-Type"), channel);
return;
}
final XContentType xContentType = request.getXContentType();
// TODO consider refactoring to handler.supportsContentStream(xContentType). It is only used with JSON and SMILE
if (handler.supportsContentStream()
&& XContentType.JSON != xContentType.canonical()
&& XContentType.SMILE != xContentType.canonical()) {
channel.sendResponse(
RestResponse.createSimpleErrorResponse(
channel,
RestStatus.NOT_ACCEPTABLE,
"Content-Type [" + xContentType + "] does not support stream parsing. Use JSON or SMILE instead"
)
);
return;
}
}
RestChannel responseChannel = channel;
if (apiProtections.isEnabled()) {
Scope scope = handler.getServerlessScope();
if (scope == null) {
handleServerlessRequestToProtectedResource(request.uri(), request.method(), responseChannel);
return;
}
}
try {
if (handler.canTripCircuitBreaker()) {
inFlightRequestsBreaker(circuitBreakerService).addEstimateBytesAndMaybeBreak(contentLength, "<http_request>");
} else {
inFlightRequestsBreaker(circuitBreakerService).addWithoutBreaking(contentLength);
}
// iff we could reserve bytes for the request we need to send the response also over this channel
responseChannel = new ResourceHandlingHttpChannel(channel, circuitBreakerService, contentLength, methodHandlers);
// TODO: Count requests double in the circuit breaker if they need copying?
if (handler.allowsUnsafeBuffers() == false) {
request.ensureSafeBuffers();
}
if (handler.allowSystemIndexAccessByDefault() == false) {
// The ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER indicates that the request is coming from an Elastic product and
// therefore we should allow a subset of external system index access.
// This header is intended for internal use only.
final String prodOriginValue = request.header(Task.X_ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER);
if (prodOriginValue != null) {
threadContext.putHeader(SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY, Boolean.TRUE.toString());
threadContext.putHeader(EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY, prodOriginValue);
} else {
threadContext.putHeader(SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY, Boolean.FALSE.toString());
}
} else {
threadContext.putHeader(SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY, Boolean.TRUE.toString());
}
final var finalChannel = responseChannel;
this.interceptor.intercept(request, responseChannel, handler.getConcreteRestHandler(), new ActionListener<>() {
@Override
public void onResponse(Boolean processRequest) {
if (processRequest) {
try {
validateRequest(request, handler, client);
handler.handleRequest(request, finalChannel, client);
} catch (Exception e) {
onFailure(e);
}
}
}
@Override
public void onFailure(Exception e) {
try {
sendFailure(finalChannel, e);
} catch (IOException ex) {
logger.info("Failed to send error [{}] to HTTP client", ex.toString());
}
}
});
} catch (Exception e) {
sendFailure(responseChannel, e);
}
}
/**
* Validates that the request should be allowed. Throws an exception if the request should be rejected.
*/
@SuppressWarnings("unused")
protected void validateRequest(RestRequest request, RestHandler handler, NodeClient client) throws ElasticsearchStatusException {}
private static void sendFailure(RestChannel responseChannel, Exception e) throws IOException {
responseChannel.sendResponse(new RestResponse(responseChannel, e));
}
/**
* in order to prevent CSRF we have to reject all media types that are from a browser safelist
* see https://fetch.spec.whatwg.org/#cors-safelisted-request-header
* see https://www.elastic.co/blog/strict-content-type-checking-for-elasticsearch-rest-requests
* @param request
*/
private static boolean isContentTypeDisallowed(RestRequest request) {
return request.getParsedContentType() != null
&& SAFELISTED_MEDIA_TYPES.contains(request.getParsedContentType().mediaTypeWithoutParameters());
}
private boolean handleNoHandlerFound(
ThreadContext threadContext,
String rawPath,
RestRequest.Method method,
String uri,
RestChannel channel
) {
// Get the map of matching handlers for a request, for the full set of HTTP methods.
final Set<RestRequest.Method> validMethodSet = getValidHandlerMethodSet(rawPath);
if (validMethodSet.contains(method) == false) {
if (method == RestRequest.Method.OPTIONS) {
startTrace(threadContext, channel);
handleOptionsRequest(channel, validMethodSet);
return true;
}
if (validMethodSet.isEmpty() == false) {
// If an alternative handler for an explicit path is registered to a
// different HTTP method than the one supplied - return a 405 Method
// Not Allowed error.
startTrace(threadContext, channel);
handleUnsupportedHttpMethod(uri, method, channel, validMethodSet, null);
return true;
}
}
return false;
}
private void startTrace(ThreadContext threadContext, RestChannel channel) {
startTrace(threadContext, channel, null);
}
private void startTrace(ThreadContext threadContext, RestChannel channel, String restPath) {
final RestRequest req = channel.request();
if (restPath == null) {
restPath = req.path();
}
String method = null;
try {
method = req.method().name();
} catch (IllegalArgumentException e) {
// Invalid methods throw an exception
}
String name;
if (method != null) {
name = method + " " + restPath;
} else {
name = restPath;
}
final Map<String, Object> attributes = Maps.newMapWithExpectedSize(req.getHeaders().size() + 3);
req.getHeaders().forEach((key, values) -> {
final String lowerKey = key.toLowerCase(Locale.ROOT).replace('-', '_');
attributes.put("http.request.headers." + lowerKey, values.size() == 1 ? values.get(0) : String.join("; ", values));
});
attributes.put("http.method", method);
attributes.put("http.url", req.uri());
switch (req.getHttpRequest().protocolVersion()) {
case HTTP_1_0 -> attributes.put("http.flavour", "1.0");
case HTTP_1_1 -> attributes.put("http.flavour", "1.1");
}
tracer.startTrace(threadContext, channel.request(), name, attributes);
}
private void traceException(RestChannel channel, Throwable e) {
this.tracer.addError(channel.request(), e);
}
private static void sendContentTypeErrorMessage(@Nullable List<String> contentTypeHeader, RestChannel channel) throws IOException {
final String errorMessage;
if (contentTypeHeader == null) {
errorMessage = "Content-Type header is missing";
} else {
errorMessage = "Content-Type header [" + Strings.collectionToCommaDelimitedString(contentTypeHeader) + "] is not supported";
}
channel.sendResponse(RestResponse.createSimpleErrorResponse(channel, NOT_ACCEPTABLE, errorMessage));
}
private void tryAllHandlers(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) throws Exception {
try {
validateErrorTrace(request, channel);
} catch (IllegalArgumentException e) {
startTrace(threadContext, channel);
channel.sendResponse(RestResponse.createSimpleErrorResponse(channel, BAD_REQUEST, e.getMessage()));
return;
}
final String rawPath = request.rawPath();
final String uri = request.uri();
final RestRequest.Method requestMethod;
RestApiVersion restApiVersion = request.getRestApiVersion();
try {
// Resolves the HTTP method and fails if the method is invalid
requestMethod = request.method();
// Loop through all possible handlers, attempting to dispatch the request
Iterator<MethodHandlers> allHandlers = getAllHandlers(request.params(), rawPath);
while (allHandlers.hasNext()) {
final RestHandler handler;
final MethodHandlers handlers = allHandlers.next();
if (handlers == null) {
handler = null;
} else {
handler = handlers.getHandler(requestMethod, restApiVersion);
}
if (handler == null) {
if (handleNoHandlerFound(threadContext, rawPath, requestMethod, uri, channel)) {
return;
}
} else {
startTrace(threadContext, channel, handlers.getPath());
dispatchRequest(request, channel, handler, handlers, threadContext);
return;
}
}
} catch (final IllegalArgumentException e) {
startTrace(threadContext, channel);
traceException(channel, e);
handleUnsupportedHttpMethod(uri, null, channel, getValidHandlerMethodSet(rawPath), e);
return;
}
// If request has not been handled, fallback to a bad request error.
startTrace(threadContext, channel);
handleBadRequest(uri, requestMethod, channel);
}
private static void validateErrorTrace(RestRequest request, RestChannel channel) {
// error_trace cannot be used when we disable detailed errors
// we consume the error_trace parameter first to ensure that it is always consumed
if (request.paramAsBoolean("error_trace", false) && channel.detailedErrorsEnabled() == false) {
throw new IllegalArgumentException("error traces in responses are disabled.");
}
}
Iterator<MethodHandlers> getAllHandlers(@Nullable Map<String, String> requestParamsRef, String rawPath) {
final Supplier<Map<String, String>> paramsSupplier;
if (requestParamsRef == null) {
paramsSupplier = () -> null;
} else {
// Between retrieving the correct path, we need to reset the parameters,
// otherwise parameters are parsed out of the URI that aren't actually handled.
final Map<String, String> originalParams = Map.copyOf(requestParamsRef);
paramsSupplier = () -> {
// PathTrie modifies the request, so reset the params between each iteration
requestParamsRef.clear();
requestParamsRef.putAll(originalParams);
return requestParamsRef;
};
}
// we use rawPath since we don't want to decode it while processing the path resolution
// so we can handle things like:
// my_index/my_type/http%3A%2F%2Fwww.google.com
return handlers.retrieveAll(rawPath, paramsSupplier).iterator();
}
/**
* Returns the holder for search usage statistics, to be used to track search usage when parsing
* incoming search requests from the relevant REST endpoints. This is exposed for plugins that
* expose search functionalities which need to contribute to the search usage statistics.
*/
public SearchUsageHolder getSearchUsageHolder() {
return usageService.getSearchUsageHolder();
}
/**
* Handle requests to a valid REST endpoint using an unsupported HTTP
* method. A 405 HTTP response code is returned, and the response 'Allow'
* header includes a list of valid HTTP methods for the endpoint (see
* <a href="https://tools.ietf.org/html/rfc2616#section-10.4.6">HTTP/1.1 -
* 10.4.6 - 405 Method Not Allowed</a>).
*/
private static void handleUnsupportedHttpMethod(
String uri,
@Nullable RestRequest.Method method,
final RestChannel channel,
final Set<RestRequest.Method> validMethodSet,
@Nullable final IllegalArgumentException exception
) {
try {
final StringBuilder msg = new StringBuilder();
if (exception == null) {
msg.append("Incorrect HTTP method for uri [").append(uri);
msg.append("] and method [").append(method).append("]");
} else {
msg.append(exception.getMessage());
}
if (validMethodSet.isEmpty() == false) {
msg.append(", allowed: ").append(validMethodSet);
}
RestResponse restResponse = RestResponse.createSimpleErrorResponse(channel, METHOD_NOT_ALLOWED, msg.toString());
if (validMethodSet.isEmpty() == false) {
restResponse.addHeader("Allow", Strings.collectionToDelimitedString(validMethodSet, ","));
}
channel.sendResponse(restResponse);
} catch (final IOException e) {
logger.warn("failed to send bad request response", e);
channel.sendResponse(new RestResponse(INTERNAL_SERVER_ERROR, RestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY));
}
}
/**
* Handle HTTP OPTIONS requests to a valid REST endpoint. A 200 HTTP
* response code is returned, and the response 'Allow' header includes a
* list of valid HTTP methods for the endpoint (see
* <a href="https://tools.ietf.org/html/rfc2616#section-9.2">HTTP/1.1 - 9.2
* - Options</a>).
*/
private static void handleOptionsRequest(RestChannel channel, Set<RestRequest.Method> validMethodSet) {
RestResponse restResponse = new RestResponse(OK, TEXT_CONTENT_TYPE, BytesArray.EMPTY);
// When we have an OPTIONS HTTP request and no valid handlers, simply send OK by default (with the Access Control Origin header
// which gets automatically added).
if (validMethodSet.isEmpty() == false) {
restResponse.addHeader("Allow", Strings.collectionToDelimitedString(validMethodSet, ","));
}
channel.sendResponse(restResponse);
}
/**
* Handle a requests with no candidate handlers (return a 400 Bad Request
* error).
*/
public static void handleBadRequest(String uri, RestRequest.Method method, RestChannel channel) throws IOException {
try (XContentBuilder builder = channel.newErrorBuilder()) {
builder.startObject();
{
builder.field("error", "no handler found for uri [" + uri + "] and method [" + method + "]");
}
builder.endObject();
channel.sendResponse(new RestResponse(BAD_REQUEST, builder));
}
}
public static void handleServerlessRequestToProtectedResource(String uri, RestRequest.Method method, RestChannel channel)
throws IOException {
String msg = "uri [" + uri + "] with method [" + method + "] exists but is not available when running in serverless mode";
sendFailure(channel, new ApiNotAvailableException(msg));
}
/**
* Get the valid set of HTTP methods for a REST request.
*/
private Set<RestRequest.Method> getValidHandlerMethodSet(String rawPath) {
Set<RestRequest.Method> validMethods = EnumSet.noneOf(RestRequest.Method.class);
Iterator<MethodHandlers> allHandlers = getAllHandlers(null, rawPath);
while (allHandlers.hasNext()) {
final MethodHandlers methodHandlers = allHandlers.next();
if (methodHandlers != null) {
validMethods.addAll(methodHandlers.getValidMethods());
}
}
return validMethods;
}
private static final class ResourceHandlingHttpChannel implements RestChannel {
private final RestChannel delegate;
private final CircuitBreakerService circuitBreakerService;
private final int contentLength;
private final MethodHandlers methodHandlers;
private final long startTime;
private final AtomicBoolean closed = new AtomicBoolean();
ResourceHandlingHttpChannel(
RestChannel delegate,
CircuitBreakerService circuitBreakerService,
int contentLength,
MethodHandlers methodHandlers
) {
this.delegate = delegate;
this.circuitBreakerService = circuitBreakerService;
this.contentLength = contentLength;
this.methodHandlers = methodHandlers;
this.startTime = rawRelativeTimeInMillis();
}
@Override
public XContentBuilder newBuilder() throws IOException {
return delegate.newBuilder();
}
@Override
public XContentBuilder newErrorBuilder() throws IOException {
return delegate.newErrorBuilder();
}
@Override
public XContentBuilder newBuilder(@Nullable XContentType xContentType, boolean useFiltering) throws IOException {
return delegate.newBuilder(xContentType, useFiltering);
}
@Override
public XContentBuilder newBuilder(XContentType xContentType, XContentType responseContentType, boolean useFiltering)
throws IOException {
return delegate.newBuilder(xContentType, responseContentType, useFiltering);
}
@Override
public XContentBuilder newBuilder(
XContentType xContentType,
XContentType responseContentType,
boolean useFiltering,
OutputStream out
) throws IOException {
return delegate.newBuilder(xContentType, responseContentType, useFiltering, out);
}
@Override
public BytesStream bytesOutput() {
return delegate.bytesOutput();
}
@Override
public void releaseOutputBuffer() {
delegate.releaseOutputBuffer();
}
@Override
public RestRequest request() {
return delegate.request();
}
@Override
public boolean detailedErrorsEnabled() {
return delegate.detailedErrorsEnabled();
}
@Override
public void sendResponse(RestResponse response) {
boolean success = false;
try {
close();
methodHandlers.addRequestStats(contentLength);
methodHandlers.addResponseTime(rawRelativeTimeInMillis() - startTime);
if (response.isChunked() == false) {
methodHandlers.addResponseStats(response.content().length());
} else {
final var responseLengthRecorder = new ResponseLengthRecorder(methodHandlers);
final var headers = response.getHeaders();
response = RestResponse.chunked(
response.status(),
new EncodedLengthTrackingChunkedRestResponseBody(response.chunkedContent(), responseLengthRecorder),
Releasables.wrap(responseLengthRecorder, response)
);
for (final var header : headers.entrySet()) {
for (final var value : header.getValue()) {
response.addHeader(header.getKey(), value);
}
}
}
delegate.sendResponse(response);
success = true;
} finally {
if (success == false) {
releaseOutputBuffer();
}
}
}
private static long rawRelativeTimeInMillis() {
return TimeValue.nsecToMSec(System.nanoTime());
}
private void close() {
// attempt to close once atomically
if (closed.compareAndSet(false, true) == false) {
throw new IllegalStateException("Channel is already closed");
}
inFlightRequestsBreaker(circuitBreakerService).addWithoutBreaking(-contentLength);
}
}
private static class ResponseLengthRecorder extends AtomicReference<MethodHandlers> implements Releasable {
private long responseLength;
private ResponseLengthRecorder(MethodHandlers methodHandlers) {
super(methodHandlers);
}
@Override
public void close() {
// closed just before sending the last chunk, and also when the whole RestResponse is closed since the client might abort the
// connection before we send the last chunk, in which case we won't have recorded the response in the
// stats yet; thus we need run-once semantics here:
final var methodHandlers = getAndSet(null);
if (methodHandlers != null) {
// if we started sending chunks then we're closed on the transport worker, no need for sync
assert responseLength == 0L || Transports.assertTransportThread();
methodHandlers.addResponseStats(responseLength);
}
}
void addChunkLength(long chunkLength) {
assert chunkLength >= 0L : chunkLength;
assert Transports.assertTransportThread(); // always called on the transport worker, no need for sync
assert get() != null : "already closed";
responseLength += chunkLength;
}
}
private static class EncodedLengthTrackingChunkedRestResponseBody implements ChunkedRestResponseBody {
private final ChunkedRestResponseBody delegate;
private final ResponseLengthRecorder responseLengthRecorder;
private EncodedLengthTrackingChunkedRestResponseBody(
ChunkedRestResponseBody delegate,
ResponseLengthRecorder responseLengthRecorder
) {
this.delegate = delegate;
this.responseLengthRecorder = responseLengthRecorder;
}
@Override
public boolean isDone() {
return delegate.isDone();
}
@Override
public ReleasableBytesReference encodeChunk(int sizeHint, Recycler<BytesRef> recycler) throws IOException {
final ReleasableBytesReference bytesReference = delegate.encodeChunk(sizeHint, recycler);
responseLengthRecorder.addChunkLength(bytesReference.length());
if (isDone()) {
responseLengthRecorder.close();
}
return bytesReference;
}
@Override
public String getResponseContentTypeString() {
return delegate.getResponseContentTypeString();
}
}
private static CircuitBreaker inFlightRequestsBreaker(CircuitBreakerService circuitBreakerService) {
// We always obtain a fresh breaker to reflect changes to the breaker configuration.
return circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS);
}
@ServerlessScope(Scope.PUBLIC)
private static final class RestFavIconHandler implements RestHandler {
@Override
public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception {
channel.sendResponse(new RestResponse(RestStatus.OK, "image/x-icon", FAVICON_RESPONSE));
}
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/rest/RestController.java |
501 | /**
* The company Assistance for Critical Moments (ACM) is helping other companies to overcome the
* current economical crisis. As experts in computing machinery, their job is to calculate the cost/benefit
* balance of the other companies. They receive two numbers, indicating the total amount of benefits and
* costs, and they have to compute the final balance.
* You have to solve the complex business problem of computing balances. You are given two positive
* integer numbers, corresponding to the benefits and the costs. You have to obtain the total balance,
* i.e., the difference between benefits and costs.
* Input
* The first line of the input contains an integer indicating the number of test cases.
* For each test case, there is a line with two positive integer numbers, A and B, corresponding to the
* benefits and the costs, respectively. Both numbers are between 0 and a googol (10100) to the power of
* a hundred.
* Output
* For each test case, the output should consist of a line indicating the balance: A − B.
* Sample Input
* 4
* 10 3
* 4 9
* 0 8
* 5 2
* Sample Output
* 7
* -5
* -8
* 3
*/
//https://uva.onlinejudge.org/index.php?option=onlinejudge&Itemid=99999999&page=show_problem&category=&problem=2443
import java.math.BigInteger;
import java.util.Scanner;
public class WhoSaidCrisis {
public static void main(String[] args) {
Scanner input = new Scanner(System.in);
int numberOfTestCases = input.nextInt();
while (numberOfTestCases != 0) {
BigInteger first = input.nextBigInteger();
BigInteger second = input.nextBigInteger();
System.out.println(first.subtract(second));
numberOfTestCases--;
}
}
}
| kdn251/interviews | uva/WhoSaidCrisis.java |
503 | /*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.base;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkPositionIndex;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.annotations.VisibleForTesting;
import java.util.Arrays;
import java.util.BitSet;
/**
* Determines a true or false value for any Java {@code char} value, just as {@link Predicate} does
* for any {@link Object}. Also offers basic text processing methods based on this function.
* Implementations are strongly encouraged to be side-effect-free and immutable.
*
* <p>Throughout the documentation of this class, the phrase "matching character" is used to mean
* "any {@code char} value {@code c} for which {@code this.matches(c)} returns {@code true}".
*
* <p><b>Warning:</b> This class deals only with {@code char} values, that is, <a
* href="http://www.unicode.org/glossary/#BMP_character">BMP characters</a>. It does not understand
* <a href="http://www.unicode.org/glossary/#supplementary_code_point">supplementary Unicode code
* points</a> in the range {@code 0x10000} to {@code 0x10FFFF} which includes the majority of
* assigned characters, including important CJK characters and emoji.
*
* <p>Supplementary characters are <a
* href="https://docs.oracle.com/javase/8/docs/api/java/lang/Character.html#supplementary">encoded
* into a {@code String} using surrogate pairs</a>, and a {@code CharMatcher} treats these just as
* two separate characters. {@link #countIn} counts each supplementary character as 2 {@code char}s.
*
* <p>For up-to-date Unicode character properties (digit, letter, etc.) and support for
* supplementary code points, use ICU4J UCharacter and UnicodeSet (freeze() after building). For
* basic text processing based on UnicodeSet use the ICU4J UnicodeSetSpanner.
*
* <p>Example usages:
*
* <pre>
* String trimmed = {@link #whitespace() whitespace()}.{@link #trimFrom trimFrom}(userInput);
* if ({@link #ascii() ascii()}.{@link #matchesAllOf matchesAllOf}(s)) { ... }</pre>
*
* <p>See the Guava User Guide article on <a
* href="https://github.com/google/guava/wiki/StringsExplained#charmatcher">{@code CharMatcher}
* </a>.
*
* @author Kevin Bourrillion
* @since 1.0
*/
@GwtCompatible(emulated = true)
@ElementTypesAreNonnullByDefault
public abstract class CharMatcher implements Predicate<Character> {
/*
* N777777777NO
* N7777777777777N
* M777777777777777N
* $N877777777D77777M
* N M77777777ONND777M
* MN777777777NN D777
* N7ZN777777777NN ~M7778
* N777777777777MMNN88777N
* N777777777777MNZZZ7777O
* DZN7777O77777777777777
* N7OONND7777777D77777N
* 8$M++++?N???$77777$
* M7++++N+M77777777N
* N77O777777777777$ M
* DNNM$$$$777777N D
* N$N:=N$777N7777M NZ
* 77Z::::N777777777 ODZZZ
* 77N::::::N77777777M NNZZZ$
* $777:::::::77777777MN ZM8ZZZZZ
* 777M::::::Z7777777Z77 N++ZZZZNN
* 7777M:::::M7777777$777M $++IZZZZM
* M777$:::::N777777$M7777M +++++ZZZDN
* NN$::::::7777$$M777777N N+++ZZZZNZ
* N::::::N:7$O:77777777 N++++ZZZZN
* M::::::::::::N77777777+ +?+++++ZZZM
* 8::::::::::::D77777777M O+++++ZZ
* ::::::::::::M777777777N O+?D
* M:::::::::::M77777777778 77=
* D=::::::::::N7777777777N 777
* INN===::::::=77777777777N I777N
* ?777N========N7777777777787M N7777
* 77777$D======N77777777777N777N? N777777
* I77777$$$N7===M$$77777777$77777777$MMZ77777777N
* $$$$$$$$$$$NIZN$$$$$$$$$M$$7777777777777777ON
* M$$$$$$$$M M$$$$$$$$N=N$$$$7777777$$$ND
* O77Z$$$$$$$ M$$$$$$$$MNI==$DNNNNM=~N
* 7 :N MNN$$$$M$ $$$777$8 8D8I
* NMM.:7O 777777778
* 7777777MN
* M NO .7:
* M : M
* 8
*/
// Constant matcher factory methods
/**
* Matches any character.
*
* @since 19.0 (since 1.0 as constant {@code ANY})
*/
public static CharMatcher any() {
return Any.INSTANCE;
}
/**
* Matches no characters.
*
* @since 19.0 (since 1.0 as constant {@code NONE})
*/
public static CharMatcher none() {
return None.INSTANCE;
}
/**
* Determines whether a character is whitespace according to the latest Unicode standard, as
* illustrated <a
* href="http://unicode.org/cldr/utility/list-unicodeset.jsp?a=%5Cp%7Bwhitespace%7D">here</a>.
* This is not the same definition used by other Java APIs. (See a <a
* href="https://goo.gl/Y6SLWx">comparison of several definitions of "whitespace"</a>.)
*
* <p>All Unicode White_Space characters are on the BMP and thus supported by this API.
*
* <p><b>Note:</b> as the Unicode definition evolves, we will modify this matcher to keep it up to
* date.
*
* @since 19.0 (since 1.0 as constant {@code WHITESPACE})
*/
public static CharMatcher whitespace() {
return Whitespace.INSTANCE;
}
/**
* Determines whether a character is a breaking whitespace (that is, a whitespace which can be
* interpreted as a break between words for formatting purposes). See {@link #whitespace()} for a
* discussion of that term.
*
* @since 19.0 (since 2.0 as constant {@code BREAKING_WHITESPACE})
*/
public static CharMatcher breakingWhitespace() {
return BreakingWhitespace.INSTANCE;
}
/**
* Determines whether a character is ASCII, meaning that its code point is less than 128.
*
* @since 19.0 (since 1.0 as constant {@code ASCII})
*/
public static CharMatcher ascii() {
return Ascii.INSTANCE;
}
/**
* Determines whether a character is a BMP digit according to <a
* href="http://unicode.org/cldr/utility/list-unicodeset.jsp?a=%5Cp%7Bdigit%7D">Unicode</a>. If
* you only care to match ASCII digits, you can use {@code inRange('0', '9')}.
*
* @deprecated Many digits are supplementary characters; see the class documentation.
* @since 19.0 (since 1.0 as constant {@code DIGIT})
*/
@Deprecated
public static CharMatcher digit() {
return Digit.INSTANCE;
}
/**
* Determines whether a character is a BMP digit according to {@linkplain Character#isDigit(char)
* Java's definition}. If you only care to match ASCII digits, you can use {@code inRange('0',
* '9')}.
*
* @deprecated Many digits are supplementary characters; see the class documentation.
* @since 19.0 (since 1.0 as constant {@code JAVA_DIGIT})
*/
@Deprecated
public static CharMatcher javaDigit() {
return JavaDigit.INSTANCE;
}
/**
* Determines whether a character is a BMP letter according to {@linkplain
* Character#isLetter(char) Java's definition}. If you only care to match letters of the Latin
* alphabet, you can use {@code inRange('a', 'z').or(inRange('A', 'Z'))}.
*
* @deprecated Most letters are supplementary characters; see the class documentation.
* @since 19.0 (since 1.0 as constant {@code JAVA_LETTER})
*/
@Deprecated
public static CharMatcher javaLetter() {
return JavaLetter.INSTANCE;
}
/**
* Determines whether a character is a BMP letter or digit according to {@linkplain
* Character#isLetterOrDigit(char) Java's definition}.
*
* @deprecated Most letters and digits are supplementary characters; see the class documentation.
* @since 19.0 (since 1.0 as constant {@code JAVA_LETTER_OR_DIGIT}).
*/
@Deprecated
public static CharMatcher javaLetterOrDigit() {
return JavaLetterOrDigit.INSTANCE;
}
/**
* Determines whether a BMP character is upper case according to {@linkplain
* Character#isUpperCase(char) Java's definition}.
*
* @deprecated Some uppercase characters are supplementary characters; see the class
* documentation.
* @since 19.0 (since 1.0 as constant {@code JAVA_UPPER_CASE})
*/
@Deprecated
public static CharMatcher javaUpperCase() {
return JavaUpperCase.INSTANCE;
}
/**
* Determines whether a BMP character is lower case according to {@linkplain
* Character#isLowerCase(char) Java's definition}.
*
* @deprecated Some lowercase characters are supplementary characters; see the class
* documentation.
* @since 19.0 (since 1.0 as constant {@code JAVA_LOWER_CASE})
*/
@Deprecated
public static CharMatcher javaLowerCase() {
return JavaLowerCase.INSTANCE;
}
/**
* Determines whether a character is an ISO control character as specified by {@link
* Character#isISOControl(char)}.
*
* <p>All ISO control codes are on the BMP and thus supported by this API.
*
* @since 19.0 (since 1.0 as constant {@code JAVA_ISO_CONTROL})
*/
public static CharMatcher javaIsoControl() {
return JavaIsoControl.INSTANCE;
}
/**
* Determines whether a character is invisible; that is, if its Unicode category is any of
* SPACE_SEPARATOR, LINE_SEPARATOR, PARAGRAPH_SEPARATOR, CONTROL, FORMAT, SURROGATE, and
* PRIVATE_USE according to ICU4J.
*
* <p>See also the Unicode Default_Ignorable_Code_Point property (available via ICU).
*
* @deprecated Most invisible characters are supplementary characters; see the class
* documentation.
* @since 19.0 (since 1.0 as constant {@code INVISIBLE})
*/
@Deprecated
public static CharMatcher invisible() {
return Invisible.INSTANCE;
}
/**
* Determines whether a character is single-width (not double-width). When in doubt, this matcher
* errs on the side of returning {@code false} (that is, it tends to assume a character is
* double-width).
*
* <p><b>Note:</b> as the reference file evolves, we will modify this matcher to keep it up to
* date.
*
* <p>See also <a href="http://www.unicode.org/reports/tr11/">UAX #11 East Asian Width</a>.
*
* @deprecated Many such characters are supplementary characters; see the class documentation.
* @since 19.0 (since 1.0 as constant {@code SINGLE_WIDTH})
*/
@Deprecated
public static CharMatcher singleWidth() {
return SingleWidth.INSTANCE;
}
// Static factories
/** Returns a {@code char} matcher that matches only one specified BMP character. */
public static CharMatcher is(final char match) {
return new Is(match);
}
/**
* Returns a {@code char} matcher that matches any character except the BMP character specified.
*
* <p>To negate another {@code CharMatcher}, use {@link #negate()}.
*/
public static CharMatcher isNot(final char match) {
return new IsNot(match);
}
/**
* Returns a {@code char} matcher that matches any BMP character present in the given character
* sequence. Returns a bogus matcher if the sequence contains supplementary characters.
*/
public static CharMatcher anyOf(final CharSequence sequence) {
switch (sequence.length()) {
case 0:
return none();
case 1:
return is(sequence.charAt(0));
case 2:
return isEither(sequence.charAt(0), sequence.charAt(1));
default:
// TODO(lowasser): is it potentially worth just going ahead and building a precomputed
// matcher?
return new AnyOf(sequence);
}
}
/**
* Returns a {@code char} matcher that matches any BMP character not present in the given
* character sequence. Returns a bogus matcher if the sequence contains supplementary characters.
*/
public static CharMatcher noneOf(CharSequence sequence) {
return anyOf(sequence).negate();
}
/**
* Returns a {@code char} matcher that matches any character in a given BMP range (both endpoints
* are inclusive). For example, to match any lowercase letter of the English alphabet, use {@code
* CharMatcher.inRange('a', 'z')}.
*
* @throws IllegalArgumentException if {@code endInclusive < startInclusive}
*/
public static CharMatcher inRange(final char startInclusive, final char endInclusive) {
return new InRange(startInclusive, endInclusive);
}
/**
* Returns a matcher with identical behavior to the given {@link Character}-based predicate, but
* which operates on primitive {@code char} instances instead.
*/
public static CharMatcher forPredicate(final Predicate<? super Character> predicate) {
return predicate instanceof CharMatcher ? (CharMatcher) predicate : new ForPredicate(predicate);
}
// Constructors
/**
* Constructor for use by subclasses. When subclassing, you may want to override {@code
* toString()} to provide a useful description.
*/
protected CharMatcher() {}
// Abstract methods
/** Determines a true or false value for the given character. */
public abstract boolean matches(char c);
// Non-static factories
/** Returns a matcher that matches any character not matched by this matcher. */
// @Override under Java 8 but not under Java 7
@Override
public CharMatcher negate() {
return new Negated(this);
}
/**
* Returns a matcher that matches any character matched by both this matcher and {@code other}.
*/
public CharMatcher and(CharMatcher other) {
return new And(this, other);
}
/**
* Returns a matcher that matches any character matched by either this matcher or {@code other}.
*/
public CharMatcher or(CharMatcher other) {
return new Or(this, other);
}
/**
* Returns a {@code char} matcher functionally equivalent to this one, but which may be faster to
* query than the original; your mileage may vary. Precomputation takes time and is likely to be
* worthwhile only if the precomputed matcher is queried many thousands of times.
*
* <p>This method has no effect (returns {@code this}) when called in GWT: it's unclear whether a
* precomputed matcher is faster, but it certainly consumes more memory, which doesn't seem like a
* worthwhile tradeoff in a browser.
*/
public CharMatcher precomputed() {
return Platform.precomputeCharMatcher(this);
}
private static final int DISTINCT_CHARS = Character.MAX_VALUE - Character.MIN_VALUE + 1;
/**
* This is the actual implementation of {@link #precomputed}, but we bounce calls through a method
* on {@link Platform} so that we can have different behavior in GWT.
*
* <p>This implementation tries to be smart in a number of ways. It recognizes cases where the
* negation is cheaper to precompute than the matcher itself; it tries to build small hash tables
* for matchers that only match a few characters, and so on. In the worst-case scenario, it
* constructs an eight-kilobyte bit array and queries that. In many situations this produces a
* matcher which is faster to query than the original.
*/
@GwtIncompatible // SmallCharMatcher
CharMatcher precomputedInternal() {
final BitSet table = new BitSet();
setBits(table);
int totalCharacters = table.cardinality();
if (totalCharacters * 2 <= DISTINCT_CHARS) {
return precomputedPositive(totalCharacters, table, toString());
} else {
// TODO(lowasser): is it worth it to worry about the last character of large matchers?
table.flip(Character.MIN_VALUE, Character.MAX_VALUE + 1);
int negatedCharacters = DISTINCT_CHARS - totalCharacters;
String suffix = ".negate()";
final String description = toString();
String negatedDescription =
description.endsWith(suffix)
? description.substring(0, description.length() - suffix.length())
: description + suffix;
return new NegatedFastMatcher(
precomputedPositive(negatedCharacters, table, negatedDescription)) {
@Override
public String toString() {
return description;
}
};
}
}
/**
* Helper method for {@link #precomputedInternal} that doesn't test if the negation is cheaper.
*/
@GwtIncompatible // SmallCharMatcher
private static CharMatcher precomputedPositive(
int totalCharacters, BitSet table, String description) {
switch (totalCharacters) {
case 0:
return none();
case 1:
return is((char) table.nextSetBit(0));
case 2:
char c1 = (char) table.nextSetBit(0);
char c2 = (char) table.nextSetBit(c1 + 1);
return isEither(c1, c2);
default:
return isSmall(totalCharacters, table.length())
? SmallCharMatcher.from(table, description)
: new BitSetMatcher(table, description);
}
}
@GwtIncompatible // SmallCharMatcher
private static boolean isSmall(int totalCharacters, int tableLength) {
return totalCharacters <= SmallCharMatcher.MAX_SIZE
&& tableLength > (totalCharacters * 4 * Character.SIZE);
// err on the side of BitSetMatcher
}
/** Sets bits in {@code table} matched by this matcher. */
@GwtIncompatible // used only from other GwtIncompatible code
void setBits(BitSet table) {
for (int c = Character.MAX_VALUE; c >= Character.MIN_VALUE; c--) {
if (matches((char) c)) {
table.set(c);
}
}
}
// Text processing routines
/**
* Returns {@code true} if a character sequence contains at least one matching BMP character.
* Equivalent to {@code !matchesNoneOf(sequence)}.
*
* <p>The default implementation iterates over the sequence, invoking {@link #matches} for each
* character, until this returns {@code true} or the end is reached.
*
* @param sequence the character sequence to examine, possibly empty
* @return {@code true} if this matcher matches at least one character in the sequence
* @since 8.0
*/
public boolean matchesAnyOf(CharSequence sequence) {
return !matchesNoneOf(sequence);
}
/**
* Returns {@code true} if a character sequence contains only matching BMP characters.
*
* <p>The default implementation iterates over the sequence, invoking {@link #matches} for each
* character, until this returns {@code false} or the end is reached.
*
* @param sequence the character sequence to examine, possibly empty
* @return {@code true} if this matcher matches every character in the sequence, including when
* the sequence is empty
*/
public boolean matchesAllOf(CharSequence sequence) {
for (int i = sequence.length() - 1; i >= 0; i--) {
if (!matches(sequence.charAt(i))) {
return false;
}
}
return true;
}
/**
* Returns {@code true} if a character sequence contains no matching BMP characters. Equivalent to
* {@code !matchesAnyOf(sequence)}.
*
* <p>The default implementation iterates over the sequence, invoking {@link #matches} for each
* character, until this returns {@code true} or the end is reached.
*
* @param sequence the character sequence to examine, possibly empty
* @return {@code true} if this matcher matches no characters in the sequence, including when the
* sequence is empty
*/
public boolean matchesNoneOf(CharSequence sequence) {
return indexIn(sequence) == -1;
}
/**
* Returns the index of the first matching BMP character in a character sequence, or {@code -1} if
* no matching character is present.
*
* <p>The default implementation iterates over the sequence in forward order calling {@link
* #matches} for each character.
*
* @param sequence the character sequence to examine from the beginning
* @return an index, or {@code -1} if no character matches
*/
public int indexIn(CharSequence sequence) {
return indexIn(sequence, 0);
}
/**
* Returns the index of the first matching BMP character in a character sequence, starting from a
* given position, or {@code -1} if no character matches after that position.
*
* <p>The default implementation iterates over the sequence in forward order, beginning at {@code
* start}, calling {@link #matches} for each character.
*
* @param sequence the character sequence to examine
* @param start the first index to examine; must be nonnegative and no greater than {@code
* sequence.length()}
* @return the index of the first matching character, guaranteed to be no less than {@code start},
* or {@code -1} if no character matches
* @throws IndexOutOfBoundsException if start is negative or greater than {@code
* sequence.length()}
*/
public int indexIn(CharSequence sequence, int start) {
int length = sequence.length();
checkPositionIndex(start, length);
for (int i = start; i < length; i++) {
if (matches(sequence.charAt(i))) {
return i;
}
}
return -1;
}
/**
* Returns the index of the last matching BMP character in a character sequence, or {@code -1} if
* no matching character is present.
*
* <p>The default implementation iterates over the sequence in reverse order calling {@link
* #matches} for each character.
*
* @param sequence the character sequence to examine from the end
* @return an index, or {@code -1} if no character matches
*/
public int lastIndexIn(CharSequence sequence) {
for (int i = sequence.length() - 1; i >= 0; i--) {
if (matches(sequence.charAt(i))) {
return i;
}
}
return -1;
}
/**
* Returns the number of matching {@code char}s found in a character sequence.
*
* <p>Counts 2 per supplementary character, such as for {@link #whitespace}().{@link #negate}().
*/
public int countIn(CharSequence sequence) {
int count = 0;
for (int i = 0; i < sequence.length(); i++) {
if (matches(sequence.charAt(i))) {
count++;
}
}
return count;
}
/**
* Returns a string containing all non-matching characters of a character sequence, in order. For
* example:
*
* <pre>{@code
* CharMatcher.is('a').removeFrom("bazaar")
* }</pre>
*
* ... returns {@code "bzr"}.
*/
public String removeFrom(CharSequence sequence) {
String string = sequence.toString();
int pos = indexIn(string);
if (pos == -1) {
return string;
}
char[] chars = string.toCharArray();
int spread = 1;
// This unusual loop comes from extensive benchmarking
OUT:
while (true) {
pos++;
while (true) {
if (pos == chars.length) {
break OUT;
}
if (matches(chars[pos])) {
break;
}
chars[pos - spread] = chars[pos];
pos++;
}
spread++;
}
return new String(chars, 0, pos - spread);
}
/**
* Returns a string containing all matching BMP characters of a character sequence, in order. For
* example:
*
* <pre>{@code
* CharMatcher.is('a').retainFrom("bazaar")
* }</pre>
*
* ... returns {@code "aaa"}.
*/
public String retainFrom(CharSequence sequence) {
return negate().removeFrom(sequence);
}
/**
* Returns a string copy of the input character sequence, with each matching BMP character
* replaced by a given replacement character. For example:
*
* <pre>{@code
* CharMatcher.is('a').replaceFrom("radar", 'o')
* }</pre>
*
* ... returns {@code "rodor"}.
*
* <p>The default implementation uses {@link #indexIn(CharSequence)} to find the first matching
* character, then iterates the remainder of the sequence calling {@link #matches(char)} for each
* character.
*
* @param sequence the character sequence to replace matching characters in
* @param replacement the character to append to the result string in place of each matching
* character in {@code sequence}
* @return the new string
*/
public String replaceFrom(CharSequence sequence, char replacement) {
String string = sequence.toString();
int pos = indexIn(string);
if (pos == -1) {
return string;
}
char[] chars = string.toCharArray();
chars[pos] = replacement;
for (int i = pos + 1; i < chars.length; i++) {
if (matches(chars[i])) {
chars[i] = replacement;
}
}
return new String(chars);
}
/**
* Returns a string copy of the input character sequence, with each matching BMP character
* replaced by a given replacement sequence. For example:
*
* <pre>{@code
* CharMatcher.is('a').replaceFrom("yaha", "oo")
* }</pre>
*
* ... returns {@code "yoohoo"}.
*
* <p><b>Note:</b> If the replacement is a fixed string with only one character, you are better
* off calling {@link #replaceFrom(CharSequence, char)} directly.
*
* @param sequence the character sequence to replace matching characters in
* @param replacement the characters to append to the result string in place of each matching
* character in {@code sequence}
* @return the new string
*/
public String replaceFrom(CharSequence sequence, CharSequence replacement) {
int replacementLen = replacement.length();
if (replacementLen == 0) {
return removeFrom(sequence);
}
if (replacementLen == 1) {
return replaceFrom(sequence, replacement.charAt(0));
}
String string = sequence.toString();
int pos = indexIn(string);
if (pos == -1) {
return string;
}
int len = string.length();
StringBuilder buf = new StringBuilder((len * 3 / 2) + 16);
int oldpos = 0;
do {
buf.append(string, oldpos, pos);
buf.append(replacement);
oldpos = pos + 1;
pos = indexIn(string, oldpos);
} while (pos != -1);
buf.append(string, oldpos, len);
return buf.toString();
}
/**
* Returns a substring of the input character sequence that omits all matching BMP characters from
* the beginning and from the end of the string. For example:
*
* <pre>{@code
* CharMatcher.anyOf("ab").trimFrom("abacatbab")
* }</pre>
*
* ... returns {@code "cat"}.
*
* <p>Note that:
*
* <pre>{@code
* CharMatcher.inRange('\0', ' ').trimFrom(str)
* }</pre>
*
* ... is equivalent to {@link String#trim()}.
*/
public String trimFrom(CharSequence sequence) {
int len = sequence.length();
int first;
int last;
for (first = 0; first < len; first++) {
if (!matches(sequence.charAt(first))) {
break;
}
}
for (last = len - 1; last > first; last--) {
if (!matches(sequence.charAt(last))) {
break;
}
}
return sequence.subSequence(first, last + 1).toString();
}
/**
* Returns a substring of the input character sequence that omits all matching BMP characters from
* the beginning of the string. For example:
*
* <pre>{@code
* CharMatcher.anyOf("ab").trimLeadingFrom("abacatbab")
* }</pre>
*
* ... returns {@code "catbab"}.
*/
public String trimLeadingFrom(CharSequence sequence) {
int len = sequence.length();
for (int first = 0; first < len; first++) {
if (!matches(sequence.charAt(first))) {
return sequence.subSequence(first, len).toString();
}
}
return "";
}
/**
* Returns a substring of the input character sequence that omits all matching BMP characters from
* the end of the string. For example:
*
* <pre>{@code
* CharMatcher.anyOf("ab").trimTrailingFrom("abacatbab")
* }</pre>
*
* ... returns {@code "abacat"}.
*/
public String trimTrailingFrom(CharSequence sequence) {
int len = sequence.length();
for (int last = len - 1; last >= 0; last--) {
if (!matches(sequence.charAt(last))) {
return sequence.subSequence(0, last + 1).toString();
}
}
return "";
}
/**
* Returns a string copy of the input character sequence, with each group of consecutive matching
* BMP characters replaced by a single replacement character. For example:
*
* <pre>{@code
* CharMatcher.anyOf("eko").collapseFrom("bookkeeper", '-')
* }</pre>
*
* ... returns {@code "b-p-r"}.
*
* <p>The default implementation uses {@link #indexIn(CharSequence)} to find the first matching
* character, then iterates the remainder of the sequence calling {@link #matches(char)} for each
* character.
*
* @param sequence the character sequence to replace matching groups of characters in
* @param replacement the character to append to the result string in place of each group of
* matching characters in {@code sequence}
* @return the new string
*/
public String collapseFrom(CharSequence sequence, char replacement) {
// This implementation avoids unnecessary allocation.
int len = sequence.length();
for (int i = 0; i < len; i++) {
char c = sequence.charAt(i);
if (matches(c)) {
if (c == replacement && (i == len - 1 || !matches(sequence.charAt(i + 1)))) {
// a no-op replacement
i++;
} else {
StringBuilder builder = new StringBuilder(len).append(sequence, 0, i).append(replacement);
return finishCollapseFrom(sequence, i + 1, len, replacement, builder, true);
}
}
}
// no replacement needed
return sequence.toString();
}
/**
* Collapses groups of matching characters exactly as {@link #collapseFrom} does, except that
* groups of matching BMP characters at the start or end of the sequence are removed without
* replacement.
*/
public String trimAndCollapseFrom(CharSequence sequence, char replacement) {
// This implementation avoids unnecessary allocation.
int len = sequence.length();
int first = 0;
int last = len - 1;
while (first < len && matches(sequence.charAt(first))) {
first++;
}
while (last > first && matches(sequence.charAt(last))) {
last--;
}
return (first == 0 && last == len - 1)
? collapseFrom(sequence, replacement)
: finishCollapseFrom(
sequence, first, last + 1, replacement, new StringBuilder(last + 1 - first), false);
}
private String finishCollapseFrom(
CharSequence sequence,
int start,
int end,
char replacement,
StringBuilder builder,
boolean inMatchingGroup) {
for (int i = start; i < end; i++) {
char c = sequence.charAt(i);
if (matches(c)) {
if (!inMatchingGroup) {
builder.append(replacement);
inMatchingGroup = true;
}
} else {
builder.append(c);
inMatchingGroup = false;
}
}
return builder.toString();
}
/**
* @deprecated Provided only to satisfy the {@link Predicate} interface; use {@link #matches}
* instead.
*/
@Deprecated
@Override
public boolean apply(Character character) {
return matches(character);
}
/**
* Returns a string representation of this {@code CharMatcher}, such as {@code
* CharMatcher.or(WHITESPACE, JAVA_DIGIT)}.
*/
@Override
public String toString() {
return super.toString();
}
/**
* Returns the Java Unicode escape sequence for the given {@code char}, in the form "\u12AB" where
* "12AB" is the four hexadecimal digits representing the 16-bit code unit.
*/
private static String showCharacter(char c) {
String hex = "0123456789ABCDEF";
char[] tmp = {'\\', 'u', '\0', '\0', '\0', '\0'};
for (int i = 0; i < 4; i++) {
tmp[5 - i] = hex.charAt(c & 0xF);
c = (char) (c >> 4);
}
return String.copyValueOf(tmp);
}
// Fast matchers
/** A matcher for which precomputation will not yield any significant benefit. */
abstract static class FastMatcher extends CharMatcher {
@Override
public final CharMatcher precomputed() {
return this;
}
@Override
public CharMatcher negate() {
return new NegatedFastMatcher(this);
}
}
/** {@link FastMatcher} which overrides {@code toString()} with a custom name. */
abstract static class NamedFastMatcher extends FastMatcher {
private final String description;
NamedFastMatcher(String description) {
this.description = checkNotNull(description);
}
@Override
public final String toString() {
return description;
}
}
/** Negation of a {@link FastMatcher}. */
private static class NegatedFastMatcher extends Negated {
NegatedFastMatcher(CharMatcher original) {
super(original);
}
@Override
public final CharMatcher precomputed() {
return this;
}
}
/** Fast matcher using a {@link BitSet} table of matching characters. */
@GwtIncompatible // used only from other GwtIncompatible code
private static final class BitSetMatcher extends NamedFastMatcher {
private final BitSet table;
private BitSetMatcher(BitSet table, String description) {
super(description);
if (table.length() + Long.SIZE < table.size()) {
table = (BitSet) table.clone();
// If only we could actually call BitSet.trimToSize() ourselves...
}
this.table = table;
}
@Override
public boolean matches(char c) {
return table.get(c);
}
@Override
void setBits(BitSet bitSet) {
bitSet.or(table);
}
}
// Static constant implementation classes
/** Implementation of {@link #any()}. */
private static final class Any extends NamedFastMatcher {
static final CharMatcher INSTANCE = new Any();
private Any() {
super("CharMatcher.any()");
}
@Override
public boolean matches(char c) {
return true;
}
@Override
public int indexIn(CharSequence sequence) {
return (sequence.length() == 0) ? -1 : 0;
}
@Override
public int indexIn(CharSequence sequence, int start) {
int length = sequence.length();
checkPositionIndex(start, length);
return (start == length) ? -1 : start;
}
@Override
public int lastIndexIn(CharSequence sequence) {
return sequence.length() - 1;
}
@Override
public boolean matchesAllOf(CharSequence sequence) {
checkNotNull(sequence);
return true;
}
@Override
public boolean matchesNoneOf(CharSequence sequence) {
return sequence.length() == 0;
}
@Override
public String removeFrom(CharSequence sequence) {
checkNotNull(sequence);
return "";
}
@Override
public String replaceFrom(CharSequence sequence, char replacement) {
char[] array = new char[sequence.length()];
Arrays.fill(array, replacement);
return new String(array);
}
@Override
public String replaceFrom(CharSequence sequence, CharSequence replacement) {
StringBuilder result = new StringBuilder(sequence.length() * replacement.length());
for (int i = 0; i < sequence.length(); i++) {
result.append(replacement);
}
return result.toString();
}
@Override
public String collapseFrom(CharSequence sequence, char replacement) {
return (sequence.length() == 0) ? "" : String.valueOf(replacement);
}
@Override
public String trimFrom(CharSequence sequence) {
checkNotNull(sequence);
return "";
}
@Override
public int countIn(CharSequence sequence) {
return sequence.length();
}
@Override
public CharMatcher and(CharMatcher other) {
return checkNotNull(other);
}
@Override
public CharMatcher or(CharMatcher other) {
checkNotNull(other);
return this;
}
@Override
public CharMatcher negate() {
return none();
}
}
/** Implementation of {@link #none()}. */
private static final class None extends NamedFastMatcher {
static final CharMatcher INSTANCE = new None();
private None() {
super("CharMatcher.none()");
}
@Override
public boolean matches(char c) {
return false;
}
@Override
public int indexIn(CharSequence sequence) {
checkNotNull(sequence);
return -1;
}
@Override
public int indexIn(CharSequence sequence, int start) {
int length = sequence.length();
checkPositionIndex(start, length);
return -1;
}
@Override
public int lastIndexIn(CharSequence sequence) {
checkNotNull(sequence);
return -1;
}
@Override
public boolean matchesAllOf(CharSequence sequence) {
return sequence.length() == 0;
}
@Override
public boolean matchesNoneOf(CharSequence sequence) {
checkNotNull(sequence);
return true;
}
@Override
public String removeFrom(CharSequence sequence) {
return sequence.toString();
}
@Override
public String replaceFrom(CharSequence sequence, char replacement) {
return sequence.toString();
}
@Override
public String replaceFrom(CharSequence sequence, CharSequence replacement) {
checkNotNull(replacement);
return sequence.toString();
}
@Override
public String collapseFrom(CharSequence sequence, char replacement) {
return sequence.toString();
}
@Override
public String trimFrom(CharSequence sequence) {
return sequence.toString();
}
@Override
public String trimLeadingFrom(CharSequence sequence) {
return sequence.toString();
}
@Override
public String trimTrailingFrom(CharSequence sequence) {
return sequence.toString();
}
@Override
public int countIn(CharSequence sequence) {
checkNotNull(sequence);
return 0;
}
@Override
public CharMatcher and(CharMatcher other) {
checkNotNull(other);
return this;
}
@Override
public CharMatcher or(CharMatcher other) {
return checkNotNull(other);
}
@Override
public CharMatcher negate() {
return any();
}
}
/** Implementation of {@link #whitespace()}. */
@VisibleForTesting
static final class Whitespace extends NamedFastMatcher {
// TABLE is a precomputed hashset of whitespace characters. MULTIPLIER serves as a hash function
// whose key property is that it maps 25 characters into the 32-slot table without collision.
// Basically this is an opportunistic fast implementation as opposed to "good code". For most
// other use-cases, the reduction in readability isn't worth it.
static final String TABLE =
"\u2002\u3000\r\u0085\u200A\u2005\u2000\u3000"
+ "\u2029\u000B\u3000\u2008\u2003\u205F\u3000\u1680"
+ "\u0009\u0020\u2006\u2001\u202F\u00A0\u000C\u2009"
+ "\u3000\u2004\u3000\u3000\u2028\n\u2007\u3000";
static final int MULTIPLIER = 1682554634;
static final int SHIFT = Integer.numberOfLeadingZeros(TABLE.length() - 1);
static final CharMatcher INSTANCE = new Whitespace();
Whitespace() {
super("CharMatcher.whitespace()");
}
@Override
public boolean matches(char c) {
return TABLE.charAt((MULTIPLIER * c) >>> SHIFT) == c;
}
@GwtIncompatible // used only from other GwtIncompatible code
@Override
void setBits(BitSet table) {
for (int i = 0; i < TABLE.length(); i++) {
table.set(TABLE.charAt(i));
}
}
}
/** Implementation of {@link #breakingWhitespace()}. */
private static final class BreakingWhitespace extends CharMatcher {
static final CharMatcher INSTANCE = new BreakingWhitespace();
@Override
public boolean matches(char c) {
switch (c) {
case '\t':
case '\n':
case '\013':
case '\f':
case '\r':
case ' ':
case '\u0085':
case '\u1680':
case '\u2028':
case '\u2029':
case '\u205f':
case '\u3000':
return true;
case '\u2007':
return false;
default:
return c >= '\u2000' && c <= '\u200a';
}
}
@Override
public String toString() {
return "CharMatcher.breakingWhitespace()";
}
}
/** Implementation of {@link #ascii()}. */
private static final class Ascii extends NamedFastMatcher {
static final CharMatcher INSTANCE = new Ascii();
Ascii() {
super("CharMatcher.ascii()");
}
@Override
public boolean matches(char c) {
return c <= '\u007f';
}
}
/** Implementation that matches characters that fall within multiple ranges. */
private static class RangesMatcher extends CharMatcher {
private final String description;
private final char[] rangeStarts;
private final char[] rangeEnds;
RangesMatcher(String description, char[] rangeStarts, char[] rangeEnds) {
this.description = description;
this.rangeStarts = rangeStarts;
this.rangeEnds = rangeEnds;
checkArgument(rangeStarts.length == rangeEnds.length);
for (int i = 0; i < rangeStarts.length; i++) {
checkArgument(rangeStarts[i] <= rangeEnds[i]);
if (i + 1 < rangeStarts.length) {
checkArgument(rangeEnds[i] < rangeStarts[i + 1]);
}
}
}
@Override
public boolean matches(char c) {
int index = Arrays.binarySearch(rangeStarts, c);
if (index >= 0) {
return true;
} else {
index = ~index - 1;
return index >= 0 && c <= rangeEnds[index];
}
}
@Override
public String toString() {
return description;
}
}
/** Implementation of {@link #digit()}. */
private static final class Digit extends RangesMatcher {
// Plug the following UnicodeSet pattern into
// https://unicode.org/cldr/utility/list-unicodeset.jsp
// [[:Nd:]&[:nv=0:]&[\u0000-\uFFFF]]
// and get the zeroes from there.
// Must be in ascending order.
private static final String ZEROES =
"0\u0660\u06f0\u07c0\u0966\u09e6\u0a66\u0ae6\u0b66\u0be6\u0c66\u0ce6\u0d66\u0de6"
+ "\u0e50\u0ed0\u0f20\u1040\u1090\u17e0\u1810\u1946\u19d0\u1a80\u1a90\u1b50\u1bb0"
+ "\u1c40\u1c50\ua620\ua8d0\ua900\ua9d0\ua9f0\uaa50\uabf0\uff10";
private static char[] zeroes() {
return ZEROES.toCharArray();
}
private static char[] nines() {
char[] nines = new char[ZEROES.length()];
for (int i = 0; i < ZEROES.length(); i++) {
nines[i] = (char) (ZEROES.charAt(i) + 9);
}
return nines;
}
static final CharMatcher INSTANCE = new Digit();
private Digit() {
super("CharMatcher.digit()", zeroes(), nines());
}
}
/** Implementation of {@link #javaDigit()}. */
private static final class JavaDigit extends CharMatcher {
static final CharMatcher INSTANCE = new JavaDigit();
@Override
public boolean matches(char c) {
return Character.isDigit(c);
}
@Override
public String toString() {
return "CharMatcher.javaDigit()";
}
}
/** Implementation of {@link #javaLetter()}. */
private static final class JavaLetter extends CharMatcher {
static final CharMatcher INSTANCE = new JavaLetter();
@Override
public boolean matches(char c) {
return Character.isLetter(c);
}
@Override
public String toString() {
return "CharMatcher.javaLetter()";
}
}
/** Implementation of {@link #javaLetterOrDigit()}. */
private static final class JavaLetterOrDigit extends CharMatcher {
static final CharMatcher INSTANCE = new JavaLetterOrDigit();
@Override
public boolean matches(char c) {
return Character.isLetterOrDigit(c);
}
@Override
public String toString() {
return "CharMatcher.javaLetterOrDigit()";
}
}
/** Implementation of {@link #javaUpperCase()}. */
private static final class JavaUpperCase extends CharMatcher {
static final CharMatcher INSTANCE = new JavaUpperCase();
@Override
public boolean matches(char c) {
return Character.isUpperCase(c);
}
@Override
public String toString() {
return "CharMatcher.javaUpperCase()";
}
}
/** Implementation of {@link #javaLowerCase()}. */
private static final class JavaLowerCase extends CharMatcher {
static final CharMatcher INSTANCE = new JavaLowerCase();
@Override
public boolean matches(char c) {
return Character.isLowerCase(c);
}
@Override
public String toString() {
return "CharMatcher.javaLowerCase()";
}
}
/** Implementation of {@link #javaIsoControl()}. */
private static final class JavaIsoControl extends NamedFastMatcher {
static final CharMatcher INSTANCE = new JavaIsoControl();
private JavaIsoControl() {
super("CharMatcher.javaIsoControl()");
}
@Override
public boolean matches(char c) {
return c <= '\u001f' || (c >= '\u007f' && c <= '\u009f');
}
}
/** Implementation of {@link #invisible()}. */
private static final class Invisible extends RangesMatcher {
// Plug the following UnicodeSet pattern into
// https://unicode.org/cldr/utility/list-unicodeset.jsp
// [[[:Zs:][:Zl:][:Zp:][:Cc:][:Cf:][:Cs:][:Co:]]&[\u0000-\uFFFF]]
// with the "Abbreviate" option, and get the ranges from there.
private static final String RANGE_STARTS =
"\u0000\u007f\u00ad\u0600\u061c\u06dd\u070f\u0890\u08e2\u1680\u180e\u2000\u2028\u205f\u2066"
+ "\u3000\ud800\ufeff\ufff9";
private static final String RANGE_ENDS = // inclusive ends
"\u0020\u00a0\u00ad\u0605\u061c\u06dd\u070f\u0891\u08e2\u1680\u180e\u200f\u202f\u2064\u206f"
+ "\u3000\uf8ff\ufeff\ufffb";
static final CharMatcher INSTANCE = new Invisible();
private Invisible() {
super("CharMatcher.invisible()", RANGE_STARTS.toCharArray(), RANGE_ENDS.toCharArray());
}
}
/** Implementation of {@link #singleWidth()}. */
private static final class SingleWidth extends RangesMatcher {
static final CharMatcher INSTANCE = new SingleWidth();
private SingleWidth() {
super(
"CharMatcher.singleWidth()",
"\u0000\u05be\u05d0\u05f3\u0600\u0750\u0e00\u1e00\u2100\ufb50\ufe70\uff61".toCharArray(),
"\u04f9\u05be\u05ea\u05f4\u06ff\u077f\u0e7f\u20af\u213a\ufdff\ufeff\uffdc".toCharArray());
}
}
// Non-static factory implementation classes
/** Implementation of {@link #negate()}. */
private static class Negated extends CharMatcher {
final CharMatcher original;
Negated(CharMatcher original) {
this.original = checkNotNull(original);
}
@Override
public boolean matches(char c) {
return !original.matches(c);
}
@Override
public boolean matchesAllOf(CharSequence sequence) {
return original.matchesNoneOf(sequence);
}
@Override
public boolean matchesNoneOf(CharSequence sequence) {
return original.matchesAllOf(sequence);
}
@Override
public int countIn(CharSequence sequence) {
return sequence.length() - original.countIn(sequence);
}
@GwtIncompatible // used only from other GwtIncompatible code
@Override
void setBits(BitSet table) {
BitSet tmp = new BitSet();
original.setBits(tmp);
tmp.flip(Character.MIN_VALUE, Character.MAX_VALUE + 1);
table.or(tmp);
}
@Override
public CharMatcher negate() {
return original;
}
@Override
public String toString() {
return original + ".negate()";
}
}
/** Implementation of {@link #and(CharMatcher)}. */
private static final class And extends CharMatcher {
final CharMatcher first;
final CharMatcher second;
And(CharMatcher a, CharMatcher b) {
first = checkNotNull(a);
second = checkNotNull(b);
}
@Override
public boolean matches(char c) {
return first.matches(c) && second.matches(c);
}
@GwtIncompatible // used only from other GwtIncompatible code
@Override
void setBits(BitSet table) {
BitSet tmp1 = new BitSet();
first.setBits(tmp1);
BitSet tmp2 = new BitSet();
second.setBits(tmp2);
tmp1.and(tmp2);
table.or(tmp1);
}
@Override
public String toString() {
return "CharMatcher.and(" + first + ", " + second + ")";
}
}
/** Implementation of {@link #or(CharMatcher)}. */
private static final class Or extends CharMatcher {
final CharMatcher first;
final CharMatcher second;
Or(CharMatcher a, CharMatcher b) {
first = checkNotNull(a);
second = checkNotNull(b);
}
@GwtIncompatible // used only from other GwtIncompatible code
@Override
void setBits(BitSet table) {
first.setBits(table);
second.setBits(table);
}
@Override
public boolean matches(char c) {
return first.matches(c) || second.matches(c);
}
@Override
public String toString() {
return "CharMatcher.or(" + first + ", " + second + ")";
}
}
// Static factory implementations
/** Implementation of {@link #is(char)}. */
private static final class Is extends FastMatcher {
private final char match;
Is(char match) {
this.match = match;
}
@Override
public boolean matches(char c) {
return c == match;
}
@Override
public String replaceFrom(CharSequence sequence, char replacement) {
return sequence.toString().replace(match, replacement);
}
@Override
public CharMatcher and(CharMatcher other) {
return other.matches(match) ? this : none();
}
@Override
public CharMatcher or(CharMatcher other) {
return other.matches(match) ? other : super.or(other);
}
@Override
public CharMatcher negate() {
return isNot(match);
}
@GwtIncompatible // used only from other GwtIncompatible code
@Override
void setBits(BitSet table) {
table.set(match);
}
@Override
public String toString() {
return "CharMatcher.is('" + showCharacter(match) + "')";
}
}
/** Implementation of {@link #isNot(char)}. */
private static final class IsNot extends FastMatcher {
private final char match;
IsNot(char match) {
this.match = match;
}
@Override
public boolean matches(char c) {
return c != match;
}
@Override
public CharMatcher and(CharMatcher other) {
return other.matches(match) ? super.and(other) : other;
}
@Override
public CharMatcher or(CharMatcher other) {
return other.matches(match) ? any() : this;
}
@GwtIncompatible // used only from other GwtIncompatible code
@Override
void setBits(BitSet table) {
table.set(0, match);
table.set(match + 1, Character.MAX_VALUE + 1);
}
@Override
public CharMatcher negate() {
return is(match);
}
@Override
public String toString() {
return "CharMatcher.isNot('" + showCharacter(match) + "')";
}
}
private static CharMatcher.IsEither isEither(char c1, char c2) {
return new CharMatcher.IsEither(c1, c2);
}
/** Implementation of {@link #anyOf(CharSequence)} for exactly two characters. */
private static final class IsEither extends FastMatcher {
private final char match1;
private final char match2;
IsEither(char match1, char match2) {
this.match1 = match1;
this.match2 = match2;
}
@Override
public boolean matches(char c) {
return c == match1 || c == match2;
}
@GwtIncompatible // used only from other GwtIncompatible code
@Override
void setBits(BitSet table) {
table.set(match1);
table.set(match2);
}
@Override
public String toString() {
return "CharMatcher.anyOf(\"" + showCharacter(match1) + showCharacter(match2) + "\")";
}
}
/** Implementation of {@link #anyOf(CharSequence)} for three or more characters. */
private static final class AnyOf extends CharMatcher {
private final char[] chars;
public AnyOf(CharSequence chars) {
this.chars = chars.toString().toCharArray();
Arrays.sort(this.chars);
}
@Override
public boolean matches(char c) {
return Arrays.binarySearch(chars, c) >= 0;
}
@Override
@GwtIncompatible // used only from other GwtIncompatible code
void setBits(BitSet table) {
for (char c : chars) {
table.set(c);
}
}
@Override
public String toString() {
StringBuilder description = new StringBuilder("CharMatcher.anyOf(\"");
for (char c : chars) {
description.append(showCharacter(c));
}
description.append("\")");
return description.toString();
}
}
/** Implementation of {@link #inRange(char, char)}. */
private static final class InRange extends FastMatcher {
private final char startInclusive;
private final char endInclusive;
InRange(char startInclusive, char endInclusive) {
checkArgument(endInclusive >= startInclusive);
this.startInclusive = startInclusive;
this.endInclusive = endInclusive;
}
@Override
public boolean matches(char c) {
return startInclusive <= c && c <= endInclusive;
}
@GwtIncompatible // used only from other GwtIncompatible code
@Override
void setBits(BitSet table) {
table.set(startInclusive, endInclusive + 1);
}
@Override
public String toString() {
return "CharMatcher.inRange('"
+ showCharacter(startInclusive)
+ "', '"
+ showCharacter(endInclusive)
+ "')";
}
}
/** Implementation of {@link #forPredicate(Predicate)}. */
private static final class ForPredicate extends CharMatcher {
private final Predicate<? super Character> predicate;
ForPredicate(Predicate<? super Character> predicate) {
this.predicate = checkNotNull(predicate);
}
@Override
public boolean matches(char c) {
return predicate.apply(c);
}
@SuppressWarnings("deprecation") // intentional; deprecation is for callers primarily
@Override
public boolean apply(Character character) {
return predicate.apply(checkNotNull(character));
}
@Override
public String toString() {
return "CharMatcher.forPredicate(" + predicate + ")";
}
}
}
| google/guava | guava/src/com/google/common/base/CharMatcher.java |
508 | class Solution {
public boolean isValidSudoku(char[][] board) {
HashMap[] row = new HashMap[9];
HashMap[] column = new HashMap[9];
HashMap[] box = new HashMap[9];
for (int i = 0; i < 9; i++) {
row[i] = new HashMap(9);
column[i] = new HashMap(9);
box[i] = new HashMap(9);
}
for (int i = 0; i < 9; i++) {
for (int j = 0; j < 9; j++) {
if (board[i][j] == '.') {
continue;
}
int boxIndex=i / 3 * 3 + j / 3;
if ((boolean) row[i].getOrDefault(board[i][j], true)) {
return false;
}
if ((boolean) column[j].getOrDefault(board[i][j], true)) {
return false;
}
if ((boolean) box[boxIndex].getOrDefault(board[i][j], true)) {
return false;
}
row[i].put(board[i][j], false);
column[j].put(board[i][j], false);
box[boxIndex].put(board[i][j], false);
}
}
return true;
}
} | MisterBooo/LeetCodeAnimation | 0036-valid-sudoku/Code/1.java |
509 | import java.io.*;
import java.lang.Integer.*;
import java.util.*;
import java.util.stream.*;
import java.lang.StringBuilder;
import java.util.concurrent.CountDownLatch;
//////////////////////////////// Solve Sudoku Puzzles ////////////////////////////////
//////////////////////////////// @author Peter Norvig ////////////////////////////////
/** There are two representations of puzzles that we will use:
** 1. A gridstring is 81 chars, with characters '0' or '.' for blank and '1' to '9' for digits.
** 2. A puzzle grid is an int[81] with a digit d (1-9) represented by the integer (1 << (d - 1));
** that is, a bit pattern that has a single 1 bit representing the digit.
** A blank is represented by the OR of all the digits 1-9, meaning that any digit is possible.
** While solving the puzzle, some of these digits are eliminated, leaving fewer possibilities.
** The puzzle is solved when every square has only a single possibility.
**
** Search for a solution with `search`:
** - Fill an empty square with a guessed digit and do constraint propagation.
** - If the guess is consistent, search deeper; if not, try a different guess for the square.
** - If all guesses fail, back up to the previous level.
** - In selecting an empty square, we pick one that has the minimum number of possible digits.
** - To be able to back up, we need to keep the grid from the previous recursive level.
** But we only need to keep one grid for each level, so to save garbage collection,
** we pre-allocate one grid per level (there are 81 levels) in a `gridpool`.
** Do constraint propagation with `arcConsistent`, `dualConsistent`, and `nakedPairs`.
**/
public class Sudoku {
//////////////////////////////// main; command line options //////////////////////////////
static final String usage = String.join("\n",
"usage: java Sudoku -(no)[fghnprstuv] | -[RT]<number> | <filename> ...",
"E.g., -v turns verify flag on, -nov turns it off. -R and -T require a number. The options:\n",
" -f(ile) Print summary stats for each file (default on)",
" -g(rid) Print each puzzle grid and solution grid (default off)",
" -h(elp) Print this usage message",
" -n(aked) Run naked pairs (default on)",
" -p(uzzle) Print summary stats for each puzzle (default off)",
" -r(everse) Solve the reverse of each puzzle as well as each puzzle itself (default off)",
" -s(earch) Run search (default on, but some puzzles can be solved with CSP methods alone)",
" -t(hread) Print summary stats for each thread (default off)",
" -u(nitTest)Run a suite of unit tests (default off)",
" -v(erify) Verify each solution is valid (default on)",
" -T<number> Concurrently run <number> threads (default 26)",
" -R<number> Repeat each puzzle <number> times (default 1)",
" <filename> Solve all puzzles in filename, which has one puzzle per line");
boolean printFileStats = true; // -f
boolean printGrid = false; // -g
boolean runNakedPairs = true; // -n
boolean printPuzzleStats = false; // -p
boolean reversePuzzle = false; // -r
boolean runSearch = true; // -s
boolean printThreadStats = false; // -t
boolean verifySolution = true; // -v
int nThreads = 26; // -T
int repeat = 1; // -R
int backtracks = 0; // count total backtracks
/** Parse command line args and solve puzzles in files. **/
public static void main(String[] args) throws IOException {
Sudoku s = new Sudoku();
for (String arg: args) {
if (!arg.startsWith("-")) {
s.solveFile(arg);
} else {
boolean value = !arg.startsWith("-no");
switch(arg.charAt(value ? 1 : 3)) {
case 'f': s.printFileStats = value; break;
case 'g': s.printGrid = value; break;
case 'h': System.out.println(usage); break;
case 'n': s.runNakedPairs = value; break;
case 'p': s.printPuzzleStats = value; break;
case 'r': s.reversePuzzle = value; break;
case 's': s.runSearch = value; break;
case 't': s.printThreadStats = value; break;
case 'u': s.runUnitTests(); break;
case 'v': s.verifySolution = value; break;
case 'T': s.nThreads = Integer.parseInt(arg.substring(2)); break;
case 'R': s.repeat = Integer.parseInt(arg.substring(2)); break;
default: System.out.println("Unrecognized option: " + arg + "\n" + usage);
}
}
}
}
//////////////////////////////// Handling Lists of Puzzles ////////////////////////////////
/** Solve all the puzzles in a file. Report timing statistics. **/
void solveFile(String filename) throws IOException {
List<int[]> grids = readFile(filename);
long startFileTime = System.nanoTime();
switch(nThreads) {
case 1: solveList(grids); break;
default: solveListThreaded(grids, nThreads); break;
}
if (printFileStats) printStats(grids.size() * repeat, startFileTime, filename);
}
/** Solve a list of puzzles in a single thread.
** repeat -R<number> times; print each puzzle's stats if -p; print grid if -g; verify if -v. **/
void solveList(List<int[]> grids) {
int[] puzzle = new int[N * N]; // Used to save a copy of the original grid
int[][] gridpool = new int[N * N][N * N]; // Reuse grids during the search
for (int g=0; g<grids.size(); ++g) {
int grid[] = grids.get(g);
System.arraycopy(grid, 0, puzzle, 0, grid.length);
for (int i = 0; i < repeat; ++i) {
long startTime = printPuzzleStats ? System.nanoTime() : 0;
int[] solution = initialize(grid); // All the real work is
if (runSearch) solution = search(solution, gridpool, 0); // on these 2 lines.
if (printPuzzleStats) {
printStats(1, startTime, "Puzzle " + (g + 1));
}
if (i == 0 && (printGrid || (verifySolution && !verify(solution, puzzle)))) {
printGrids("Puzzle " + (g + 1), grid, solution);
}
}
}
}
/** Break a list of puzzles into nThreads sublists and solve each sublist in a separate thread. **/
void solveListThreaded(List<int[]> grids, int nThreads) {
try {
final long startTime = System.nanoTime();
int nGrids = grids.size();
final CountDownLatch latch = new CountDownLatch(nThreads);
int size = nGrids / nThreads;
for (int c = 0; c < nThreads; ++c) {
int end = c == nThreads - 1 ? nGrids : (c + 1) * size;
final List<int[]> sublist = grids.subList(c * size, end);
new Thread() {
public void run() {
solveList(sublist);
latch.countDown();
if (printThreadStats) {
printStats(repeat * sublist.size(), startTime, "Thread");
}
}
}.start();
}
latch.await(); // Wait for all threads to finish
} catch (InterruptedException e) {
System.err.println("And you may ask yourself, 'Well, how did I get here?'");
}
}
//////////////////////////////// Utility functions ////////////////////////////////
/** Return an array of all squares in the intersection of these rows and cols **/
int[] cross(int[] rows, int[] cols) {
int[] result = new int[rows.length * cols.length];
int i = 0;
for (int r: rows) { for (int c: cols) { result[i++] = N * r + c; } }
return result;
}
/** Return true iff item is an element of array, or of array[0:end]. **/
boolean member(int item, int[] array) { return member(item, array, array.length); }
boolean member(int item, int[] array, int end) {
for (int i = 0; i<end; ++i) {
if (array[i] == item) { return true; }
}
return false;
}
//////////////////////////////// Constants ////////////////////////////////
final int N = 9; // Number of cells on a side of grid.
final int[] DIGITS = {1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7, 1<<8};
final int ALL_DIGITS = Integer.parseInt("111111111", 2);
final int[] ROWS = IntStream.range(0, N).toArray();
final int[] COLS = ROWS;
final int[] SQUARES = IntStream.range(0, N * N).toArray();
final int[][] BLOCKS = {{0, 1, 2}, {3, 4, 5}, {6, 7, 8}};
final int[][] ALL_UNITS = new int[3 * N][];
final int[][][] UNITS = new int[N * N][3][N];
final int[][] PEERS = new int[N * N][20];
final int[] NUM_DIGITS = new int[ALL_DIGITS + 1];
final int[] HIGHEST_DIGIT = new int[ALL_DIGITS + 1];
{
// Initialize ALL_UNITS to be an array of the 27 units: rows, columns, and blocks
int i = 0;
for (int r: ROWS) {ALL_UNITS[i++] = cross(new int[] {r}, COLS); }
for (int c: COLS) {ALL_UNITS[i++] = cross(ROWS, new int[] {c}); }
for (int[] rb: BLOCKS) {for (int[] cb: BLOCKS) {ALL_UNITS[i++] = cross(rb, cb); } }
// Initialize each UNITS[s] to be an array of the 3 units for square s.
for (int s: SQUARES) {
i = 0;
for (int[] u: ALL_UNITS) {
if (member(s, u)) UNITS[s][i++] = u;
}
}
// Initialize each PEERS[s] to be an array of the 20 squares that are peers of square s.
for (int s: SQUARES) {
i = 0;
for (int[] u: UNITS[s]) {
for (int s2: u) {
if (s2 != s && !member(s2, PEERS[s], i)) {
PEERS[s][i++] = s2;
}
}
}
}
// Initialize NUM_DIGITS[val] to be the number of 1 bits in the bitset val
// and HIGHEST_DIGIT[val] to the highest bit set in the bitset val
for (int val = 0; val <= ALL_DIGITS; val++) {
NUM_DIGITS[val] = Integer.bitCount(val);
HIGHEST_DIGIT[val] = Integer.highestOneBit(val);
}
}
//////////////////////////////// Search algorithm ////////////////////////////////
/** Search for a solution to grid. If there is an unfilled square, select one
** and try--that is, search recursively--every possible digit for the square. **/
int[] search(int[] grid, int[][] gridpool, int level) {
if (grid == null) {
return null;
}
int s = select_square(grid);
if (s == -1) {
return grid; // No squares to select means we are done!
}
for (int d: DIGITS) {
// For each possible digit d that could fill square s, try it
if ((d & grid[s]) > 0) {
// Copy grid's contents into gridpool[level], and use that at the next level
System.arraycopy(grid, 0, gridpool[level], 0, grid.length);
int[] result = search(fill(gridpool[level], s, d), gridpool, level + 1);
if (result != null) {
return result;
}
backtracks += 1;
}
}
return null;
}
/** Verify that grid is a solution to the puzzle. **/
boolean verify(int[] grid, int[] puzzle) {
if (grid == null) { return false; }
// Check that all squares have a single digit, and
// no filled square in the puzzle was changed in the solution.
for (int s: SQUARES) {
if ((NUM_DIGITS[grid[s]] != 1) || (NUM_DIGITS[puzzle[s]] == 1 && grid[s] != puzzle[s])) {
return false;
}
}
// Check that each unit is a permutation of digits
for (int[] u: ALL_UNITS) {
int unit_digits = 0; // All the digits in a unit.
for (int s : u) {unit_digits |= grid[s]; }
if (unit_digits != ALL_DIGITS) {
return false;
}
}
return true;
}
/** Choose an unfilled square with the minimum number of possible values.
** If all squares are filled, return -1 (which means the puzzle is complete). **/
int select_square(int[] grid) {
int square = -1;
int min = N + 1;
for (int s: SQUARES) {
int c = NUM_DIGITS[grid[s]];
if (c == 2) {
return s; // Can't get fewer than 2 possible digits
} else if (c > 1 && c < min) {
square = s;
min = c;
}
}
return square;
}
/** fill grid[s] = d. If this leads to contradiction, return null. **/
int[] fill(int[] grid, int s, int d) {
if ((grid == null) || ((grid[s] & d) == 0)) { return null; } // d not possible for grid[s]
grid[s] = d;
for (int p: PEERS[s]) {
if (!eliminate(grid, p, d)) { // If we can't eliminate d from all peers of s, then fail
return null;
}
}
return grid;
}
/** Eliminate digit d as a possibility for grid[s].
** Run the 3 constraint propagation routines.
** If constraint propagation detects a contradiction, return false. **/
boolean eliminate(int[] grid, int s, int d) {
if ((grid[s] & d) == 0) { return true; } // d already eliminated from grid[s]
grid[s] -= d;
return arc_consistent(grid, s) && dual_consistent(grid, s, d) && naked_pairs(grid, s);
}
//////////////////////////////// Constraint Propagation ////////////////////////////////
/** Check if square s is consistent: that is, it has multiple possible values, or it has
** one possible value which we can consistently fill. **/
boolean arc_consistent(int[] grid, int s) {
int count = NUM_DIGITS[grid[s]];
return count >= 2 || (count == 1 && (fill(grid, s, grid[s]) != null));
}
/** After we eliminate d from possibilities for grid[s], check each unit of s
** and make sure there is some position in the unit where d can go.
** If there is only one possible place for d, fill it with d. **/
boolean dual_consistent(int[] grid, int s, int d) {
for (int[] u: UNITS[s]) {
int dPlaces = 0; // The number of possible places for d within unit u
int dplace = -1; // Try to find a place in the unit where d can go
for (int s2: u) {
if ((grid[s2] & d) > 0) { // s2 is a possible place for d
dPlaces++;
if (dPlaces > 1) break;
dplace = s2;
}
}
if (dPlaces == 0 || (dPlaces == 1 && (fill(grid, dplace, d) == null))) {
return false;
}
}
return true;
}
/** Look for two squares in a unit with the same two possible values, and no other values.
** For example, if s and s2 both have the possible values 8|9, then we know that 8 and 9
** must go in those two squares. We don't know which is which, but we can eliminate
** 8 and 9 from any other square s3 that is in the unit. **/
boolean naked_pairs(int[] grid, int s) {
if (!runNakedPairs) { return true; }
int val = grid[s];
if (NUM_DIGITS[val] != 2) { return true; } // Doesn't apply
for (int s2: PEERS[s]) {
if (grid[s2] == val) {
// s and s2 are a naked pair; find what unit(s) they share
for (int[] u: UNITS[s]) {
if (member(s2, u)) {
for (int s3: u) { // s3 can't have either of the values in val (e.g. 8|9)
if (s3 != s && s3 != s2) {
int d = HIGHEST_DIGIT[val];
int d2 = val - d;
if (!eliminate(grid, s3, d) || !eliminate(grid, s3, d2)) {
return false;
}
}
}
}
}
}
}
return true;
}
//////////////////////////////// Input ////////////////////////////////
/** The method `readFile` reads one puzzle per file line and returns a List of puzzle grids. **/
List<int[]> readFile(String filename) throws IOException {
BufferedReader in = new BufferedReader(new FileReader(filename));
List<int[]> grids = new ArrayList<int[]>(1000);
String gridstring;
while ((gridstring = in.readLine()) != null) {
grids.add(parseGrid(gridstring));
if (reversePuzzle) {
grids.add(parseGrid(new StringBuilder(gridstring).reverse().toString()));
}
}
return grids;
}
/** Parse a gridstring into a puzzle grid: an int[] with values DIGITS[0-9] or ALL_DIGITS. **/
int[] parseGrid(String gridstring) {
int[] grid = new int[N * N];
int s = 0;
for (int i = 0; i<gridstring.length(); ++i) {
char c = gridstring.charAt(i);
if ('1' <= c && c <= '9') {
grid[s++] = DIGITS[c - '1']; // A single-bit set to represent a digit
} else if (c == '0' || c == '.') {
grid[s++] = ALL_DIGITS; // Any digit is possible
}
}
assert s == N * N;
return grid;
}
/** Initialize a grid from a puzzle.
** First initialize every square in the new grid to ALL_DIGITS, meaning any value is possible.
** Then, call `fill` on the puzzle's filled squares to initiate constraint propagation. **/
int[] initialize(int[] puzzle) {
int[] grid = new int[N * N]; Arrays.fill(grid, ALL_DIGITS);
for (int s: SQUARES) { if (puzzle[s] != ALL_DIGITS) { fill(grid, s, puzzle[s]); } }
return grid;
}
//////////////////////////////// Output and Tests ////////////////////////////////
boolean headerPrinted = false;
/** Print stats on puzzles solved, average time, frequency, threads used, and name. **/
void printStats(int nGrids, long startTime, String name) {
double usecs = (System.nanoTime() - startTime) / 1000.;
String line = String.format("%7d %6.1f %7.3f %7d %10.1f %s",
nGrids, usecs / nGrids, 1000 * nGrids / usecs, nThreads, backtracks * 1. / nGrids, name);
synchronized (this) { // So that printing from different threads doesn't get garbled
if (!headerPrinted) {
System.out.println("Puzzles μsec KHz Threads Backtracks Name\n"
+ "======= ====== ======= ======= ========== ====");
headerPrinted = true;
}
System.out.println(line);
backtracks = 0;
}
}
/** Print the original puzzle grid and the solution grid. **/
void printGrids(String name, int[] puzzle, int[] solution) {
String bar = "------+-------+------";
String gap = " "; // Space between the puzzle grid and solution grid
if (solution == null) solution = new int[N * N];
synchronized (this) { // So that printing from different threads doesn't get garbled
System.out.format("\n%-22s%s%s\n", name + ":", gap,
(verify(solution, puzzle) ? "Solution:" : "FAILED:"));
for (int r = 0; r < N; ++r) {
System.out.println(rowString(puzzle, r) + gap + rowString(solution, r));
if (r == 2 || r == 5) System.out.println(bar + gap + " " + bar);
}
}
}
/** Return a String representing a row of this puzzle. **/
String rowString(int[] grid, int r) {
String row = "";
for (int s = r * 9; s < (r + 1) * 9; ++s) {
row += (char) ((NUM_DIGITS[grid[s]] == 9) ? '.' : (NUM_DIGITS[grid[s]] != 1) ? '?' :
('1' + Integer.numberOfTrailingZeros(grid[s])));
row += (s % 9 == 2 || s % 9 == 5 ? " | " : " ");
}
return row;
}
/** Unit Tests. Just getting started with these. **/
void runUnitTests() {
assert N == 9;
assert SQUARES.length == 81;
for (int s: SQUARES) {
assert UNITS[s].length == 3;
assert PEERS[s].length == 20;
}
assert Arrays.equals(PEERS[19],
new int[] {18, 20, 21, 22, 23, 24, 25, 26, 1, 10, 28, 37, 46, 55, 64, 73, 0, 2, 9, 11});
assert Arrays.deepToString(UNITS[19]).equals(
"[[18, 19, 20, 21, 22, 23, 24, 25, 26], [1, 10, 19, 28, 37, 46, 55, 64, 73], [0, 1, 2, 9, 10, 11, 18, 19, 20]]");
System.out.println("Unit tests pass.");
}
} | norvig/pytudes | ipynb/Sudoku.java |
510 | package com.genymobile.scrcpy;
import android.util.Log;
import java.io.FileDescriptor;
import java.io.FileOutputStream;
import java.io.OutputStream;
import java.io.PrintStream;
/**
* Log both to Android logger (so that logs are visible in "adb logcat") and standard output/error (so that they are visible in the terminal
* directly).
*/
public final class Ln {
private static final String TAG = "scrcpy";
private static final String PREFIX = "[server] ";
private static final PrintStream CONSOLE_OUT = new PrintStream(new FileOutputStream(FileDescriptor.out));
private static final PrintStream CONSOLE_ERR = new PrintStream(new FileOutputStream(FileDescriptor.err));
enum Level {
VERBOSE, DEBUG, INFO, WARN, ERROR
}
private static Level threshold = Level.INFO;
private Ln() {
// not instantiable
}
public static void disableSystemStreams() {
PrintStream nullStream = new PrintStream(new NullOutputStream());
System.setOut(nullStream);
System.setErr(nullStream);
}
/**
* Initialize the log level.
* <p>
* Must be called before starting any new thread.
*
* @param level the log level
*/
public static void initLogLevel(Level level) {
threshold = level;
}
public static boolean isEnabled(Level level) {
return level.ordinal() >= threshold.ordinal();
}
public static void v(String message) {
if (isEnabled(Level.VERBOSE)) {
Log.v(TAG, message);
CONSOLE_OUT.print(PREFIX + "VERBOSE: " + message + '\n');
}
}
public static void d(String message) {
if (isEnabled(Level.DEBUG)) {
Log.d(TAG, message);
CONSOLE_OUT.print(PREFIX + "DEBUG: " + message + '\n');
}
}
public static void i(String message) {
if (isEnabled(Level.INFO)) {
Log.i(TAG, message);
CONSOLE_OUT.print(PREFIX + "INFO: " + message + '\n');
}
}
public static void w(String message, Throwable throwable) {
if (isEnabled(Level.WARN)) {
Log.w(TAG, message, throwable);
CONSOLE_ERR.print(PREFIX + "WARN: " + message + '\n');
if (throwable != null) {
throwable.printStackTrace(CONSOLE_ERR);
}
}
}
public static void w(String message) {
w(message, null);
}
public static void e(String message, Throwable throwable) {
if (isEnabled(Level.ERROR)) {
Log.e(TAG, message, throwable);
CONSOLE_ERR.print(PREFIX + "ERROR: " + message + '\n');
if (throwable != null) {
throwable.printStackTrace(CONSOLE_ERR);
}
}
}
public static void e(String message) {
e(message, null);
}
static class NullOutputStream extends OutputStream {
@Override
public void write(byte[] b) {
// ignore
}
@Override
public void write(byte[] b, int off, int len) {
// ignore
}
@Override
public void write(int b) {
// ignore
}
}
}
| Genymobile/scrcpy | server/src/main/java/com/genymobile/scrcpy/Ln.java |
511 | // See README.md for information and build instructions.
import com.example.tutorial.protos.AddressBook;
import com.example.tutorial.protos.Person;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.PrintStream;
class ListPeople {
// Iterates though all people in the AddressBook and prints info about them.
static void Print(AddressBook addressBook) {
for (Person person: addressBook.getPeopleList()) {
System.out.println("Person ID: " + person.getId());
System.out.println(" Name: " + person.getName());
if (!person.getEmail().isEmpty()) {
System.out.println(" E-mail address: " + person.getEmail());
}
for (Person.PhoneNumber phoneNumber : person.getPhonesList()) {
switch (phoneNumber.getType()) {
case MOBILE:
System.out.print(" Mobile phone #: ");
break;
case HOME:
System.out.print(" Home phone #: ");
break;
case WORK:
System.out.print(" Work phone #: ");
break;
default:
System.out.println(" Unknown phone #: ");
break;
}
System.out.println(phoneNumber.getNumber());
}
}
}
// Main function: Reads the entire address book from a file and prints all
// the information inside.
public static void main(String[] args) throws Exception {
if (args.length != 1) {
System.err.println("Usage: ListPeople ADDRESS_BOOK_FILE");
System.exit(-1);
}
// Read the existing address book.
AddressBook addressBook =
AddressBook.parseFrom(new FileInputStream(args[0]));
Print(addressBook);
}
}
| protocolbuffers/protobuf | examples/ListPeople.java |
519 | class ValidAnagram {
public boolean isAnagram(String s, String t) {
HashMap<Character, Integer> map = new HashMap<Character, Integer>();
for(char c: s.toCharArray()) {
if(map.containsKey(c)) {
map.put(c, map.get(c) + 1);
}
else {
map.put(c, 1);
}
}
for(char c: t.toCharArray()) {
if(map.containsKey(c)) {
map.put(c, map.get(c) - 1);
}
else {
return false;
}
}
for(char c: map.keySet()) {
if(map.get(c) != 0) {
return false;
}
}
return true;
}
}
| kdn251/interviews | leetcode/hash-table/ValidAnagram.java |
520 | // ASM: a very small and fast Java bytecode manipulation framework
// Copyright (c) 2000-2011 INRIA, France Telecom
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. Neither the name of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
package org.springframework.asm;
/**
* The input and output stack map frames of a basic block.
*
* <p>Stack map frames are computed in two steps:
*
* <ul>
* <li>During the visit of each instruction in MethodWriter, the state of the frame at the end of
* the current basic block is updated by simulating the action of the instruction on the
* previous state of this so called "output frame".
* <li>After all instructions have been visited, a fix point algorithm is used in MethodWriter to
* compute the "input frame" of each basic block (i.e. the stack map frame at the beginning of
* the basic block). See {@link MethodWriter#computeAllFrames}.
* </ul>
*
* <p>Output stack map frames are computed relatively to the input frame of the basic block, which
* is not yet known when output frames are computed. It is therefore necessary to be able to
* represent abstract types such as "the type at position x in the input frame locals" or "the type
* at position x from the top of the input frame stack" or even "the type at position x in the input
* frame, with y more (or less) array dimensions". This explains the rather complicated type format
* used in this class, explained below.
*
* <p>The local variables and the operand stack of input and output frames contain values called
* "abstract types" hereafter. An abstract type is represented with 4 fields named DIM, KIND, FLAGS
* and VALUE, packed in a single int value for better performance and memory efficiency:
*
* <pre>
* =====================================
* |...DIM|KIND|.F|...............VALUE|
* =====================================
* </pre>
*
* <ul>
* <li>the DIM field, stored in the 6 most significant bits, is a signed number of array
* dimensions (from -32 to 31, included). It can be retrieved with {@link #DIM_MASK} and a
* right shift of {@link #DIM_SHIFT}.
* <li>the KIND field, stored in 4 bits, indicates the kind of VALUE used. These 4 bits can be
* retrieved with {@link #KIND_MASK} and, without any shift, must be equal to {@link
* #CONSTANT_KIND}, {@link #REFERENCE_KIND}, {@link #UNINITIALIZED_KIND}, {@link
* #FORWARD_UNINITIALIZED_KIND},{@link #LOCAL_KIND} or {@link #STACK_KIND}.
* <li>the FLAGS field, stored in 2 bits, contains up to 2 boolean flags. Currently only one flag
* is defined, namely {@link #TOP_IF_LONG_OR_DOUBLE_FLAG}.
* <li>the VALUE field, stored in the remaining 20 bits, contains either
* <ul>
* <li>one of the constants {@link #ITEM_TOP}, {@link #ITEM_ASM_BOOLEAN}, {@link
* #ITEM_ASM_BYTE}, {@link #ITEM_ASM_CHAR} or {@link #ITEM_ASM_SHORT}, {@link
* #ITEM_INTEGER}, {@link #ITEM_FLOAT}, {@link #ITEM_LONG}, {@link #ITEM_DOUBLE}, {@link
* #ITEM_NULL} or {@link #ITEM_UNINITIALIZED_THIS}, if KIND is equal to {@link
* #CONSTANT_KIND}.
* <li>the index of a {@link Symbol#TYPE_TAG} {@link Symbol} in the type table of a {@link
* SymbolTable}, if KIND is equal to {@link #REFERENCE_KIND}.
* <li>the index of an {@link Symbol#UNINITIALIZED_TYPE_TAG} {@link Symbol} in the type
* table of a {@link SymbolTable}, if KIND is equal to {@link #UNINITIALIZED_KIND}.
* <li>the index of a {@link Symbol#FORWARD_UNINITIALIZED_TYPE_TAG} {@link Symbol} in the
* type table of a {@link SymbolTable}, if KIND is equal to {@link
* #FORWARD_UNINITIALIZED_KIND}.
* <li>the index of a local variable in the input stack frame, if KIND is equal to {@link
* #LOCAL_KIND}.
* <li>a position relatively to the top of the stack of the input stack frame, if KIND is
* equal to {@link #STACK_KIND},
* </ul>
* </ul>
*
* <p>Output frames can contain abstract types of any kind and with a positive or negative array
* dimension (and even unassigned types, represented by 0 - which does not correspond to any valid
* abstract type value). Input frames can only contain CONSTANT_KIND, REFERENCE_KIND,
* UNINITIALIZED_KIND or FORWARD_UNINITIALIZED_KIND abstract types of positive or {@literal null}
* array dimension. In all cases the type table contains only internal type names (array type
* descriptors are forbidden - array dimensions must be represented through the DIM field).
*
* <p>The LONG and DOUBLE types are always represented by using two slots (LONG + TOP or DOUBLE +
* TOP), for local variables as well as in the operand stack. This is necessary to be able to
* simulate DUPx_y instructions, whose effect would be dependent on the concrete types represented
* by the abstract types in the stack (which are not always known).
*
* @author Eric Bruneton
*/
class Frame {
// Constants used in the StackMapTable attribute.
// See https://docs.oracle.com/javase/specs/jvms/se9/html/jvms-4.html#jvms-4.7.4.
static final int SAME_FRAME = 0;
static final int SAME_LOCALS_1_STACK_ITEM_FRAME = 64;
static final int RESERVED = 128;
static final int SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED = 247;
static final int CHOP_FRAME = 248;
static final int SAME_FRAME_EXTENDED = 251;
static final int APPEND_FRAME = 252;
static final int FULL_FRAME = 255;
static final int ITEM_TOP = 0;
static final int ITEM_INTEGER = 1;
static final int ITEM_FLOAT = 2;
static final int ITEM_DOUBLE = 3;
static final int ITEM_LONG = 4;
static final int ITEM_NULL = 5;
static final int ITEM_UNINITIALIZED_THIS = 6;
static final int ITEM_OBJECT = 7;
static final int ITEM_UNINITIALIZED = 8;
// Additional, ASM specific constants used in abstract types below.
private static final int ITEM_ASM_BOOLEAN = 9;
private static final int ITEM_ASM_BYTE = 10;
private static final int ITEM_ASM_CHAR = 11;
private static final int ITEM_ASM_SHORT = 12;
// The size and offset in bits of each field of an abstract type.
private static final int DIM_SIZE = 6;
private static final int KIND_SIZE = 4;
private static final int FLAGS_SIZE = 2;
private static final int VALUE_SIZE = 32 - DIM_SIZE - KIND_SIZE - FLAGS_SIZE;
private static final int DIM_SHIFT = KIND_SIZE + FLAGS_SIZE + VALUE_SIZE;
private static final int KIND_SHIFT = FLAGS_SIZE + VALUE_SIZE;
private static final int FLAGS_SHIFT = VALUE_SIZE;
// Bitmasks to get each field of an abstract type.
private static final int DIM_MASK = ((1 << DIM_SIZE) - 1) << DIM_SHIFT;
private static final int KIND_MASK = ((1 << KIND_SIZE) - 1) << KIND_SHIFT;
private static final int VALUE_MASK = (1 << VALUE_SIZE) - 1;
// Constants to manipulate the DIM field of an abstract type.
/** The constant to be added to an abstract type to get one with one more array dimension. */
private static final int ARRAY_OF = +1 << DIM_SHIFT;
/** The constant to be added to an abstract type to get one with one less array dimension. */
private static final int ELEMENT_OF = -1 << DIM_SHIFT;
// Possible values for the KIND field of an abstract type.
private static final int CONSTANT_KIND = 1 << KIND_SHIFT;
private static final int REFERENCE_KIND = 2 << KIND_SHIFT;
private static final int UNINITIALIZED_KIND = 3 << KIND_SHIFT;
private static final int FORWARD_UNINITIALIZED_KIND = 4 << KIND_SHIFT;
private static final int LOCAL_KIND = 5 << KIND_SHIFT;
private static final int STACK_KIND = 6 << KIND_SHIFT;
// Possible flags for the FLAGS field of an abstract type.
/**
* A flag used for LOCAL_KIND and STACK_KIND abstract types, indicating that if the resolved,
* concrete type is LONG or DOUBLE, TOP should be used instead (because the value has been
* partially overridden with an xSTORE instruction).
*/
private static final int TOP_IF_LONG_OR_DOUBLE_FLAG = 1 << FLAGS_SHIFT;
// Useful predefined abstract types (all the possible CONSTANT_KIND types).
private static final int TOP = CONSTANT_KIND | ITEM_TOP;
private static final int BOOLEAN = CONSTANT_KIND | ITEM_ASM_BOOLEAN;
private static final int BYTE = CONSTANT_KIND | ITEM_ASM_BYTE;
private static final int CHAR = CONSTANT_KIND | ITEM_ASM_CHAR;
private static final int SHORT = CONSTANT_KIND | ITEM_ASM_SHORT;
private static final int INTEGER = CONSTANT_KIND | ITEM_INTEGER;
private static final int FLOAT = CONSTANT_KIND | ITEM_FLOAT;
private static final int LONG = CONSTANT_KIND | ITEM_LONG;
private static final int DOUBLE = CONSTANT_KIND | ITEM_DOUBLE;
private static final int NULL = CONSTANT_KIND | ITEM_NULL;
private static final int UNINITIALIZED_THIS = CONSTANT_KIND | ITEM_UNINITIALIZED_THIS;
// -----------------------------------------------------------------------------------------------
// Instance fields
// -----------------------------------------------------------------------------------------------
/** The basic block to which these input and output stack map frames correspond. */
Label owner;
/** The input stack map frame locals. This is an array of abstract types. */
private int[] inputLocals;
/** The input stack map frame stack. This is an array of abstract types. */
private int[] inputStack;
/** The output stack map frame locals. This is an array of abstract types. */
private int[] outputLocals;
/** The output stack map frame stack. This is an array of abstract types. */
private int[] outputStack;
/**
* The start of the output stack, relatively to the input stack. This offset is always negative or
* null. A null offset means that the output stack must be appended to the input stack. A -n
* offset means that the first n output stack elements must replace the top n input stack
* elements, and that the other elements must be appended to the input stack.
*/
private short outputStackStart;
/** The index of the top stack element in {@link #outputStack}. */
private short outputStackTop;
/** The number of types that are initialized in the basic block. See {@link #initializations}. */
private int initializationCount;
/**
* The abstract types that are initialized in the basic block. A constructor invocation on an
* UNINITIALIZED, FORWARD_UNINITIALIZED or UNINITIALIZED_THIS abstract type must replace <i>every
* occurrence</i> of this type in the local variables and in the operand stack. This cannot be
* done during the first step of the algorithm since, during this step, the local variables and
* the operand stack types are still abstract. It is therefore necessary to store the abstract
* types of the constructors which are invoked in the basic block, in order to do this replacement
* during the second step of the algorithm, where the frames are fully computed. Note that this
* array can contain abstract types that are relative to the input locals or to the input stack.
*/
private int[] initializations;
// -----------------------------------------------------------------------------------------------
// Constructor
// -----------------------------------------------------------------------------------------------
/**
* Constructs a new Frame.
*
* @param owner the basic block to which these input and output stack map frames correspond.
*/
Frame(final Label owner) {
this.owner = owner;
}
/**
* Sets this frame to the value of the given frame.
*
* <p>WARNING: after this method is called the two frames share the same data structures. It is
* recommended to discard the given frame to avoid unexpected side effects.
*
* @param frame The new frame value.
*/
final void copyFrom(final Frame frame) {
inputLocals = frame.inputLocals;
inputStack = frame.inputStack;
outputStackStart = 0;
outputLocals = frame.outputLocals;
outputStack = frame.outputStack;
outputStackTop = frame.outputStackTop;
initializationCount = frame.initializationCount;
initializations = frame.initializations;
}
// -----------------------------------------------------------------------------------------------
// Static methods to get abstract types from other type formats
// -----------------------------------------------------------------------------------------------
/**
* Returns the abstract type corresponding to the given public API frame element type.
*
* @param symbolTable the type table to use to lookup and store type {@link Symbol}.
* @param type a frame element type described using the same format as in {@link
* MethodVisitor#visitFrame}, i.e. either {@link Opcodes#TOP}, {@link Opcodes#INTEGER}, {@link
* Opcodes#FLOAT}, {@link Opcodes#LONG}, {@link Opcodes#DOUBLE}, {@link Opcodes#NULL}, or
* {@link Opcodes#UNINITIALIZED_THIS}, or the internal name of a class, or a Label designating
* a NEW instruction (for uninitialized types).
* @return the abstract type corresponding to the given frame element type.
*/
static int getAbstractTypeFromApiFormat(final SymbolTable symbolTable, final Object type) {
if (type instanceof Integer) {
return CONSTANT_KIND | ((Integer) type).intValue();
} else if (type instanceof String) {
String descriptor = Type.getObjectType((String) type).getDescriptor();
return getAbstractTypeFromDescriptor(symbolTable, descriptor, 0);
} else {
Label label = (Label) type;
if ((label.flags & Label.FLAG_RESOLVED) != 0) {
return UNINITIALIZED_KIND | symbolTable.addUninitializedType("", label.bytecodeOffset);
} else {
return FORWARD_UNINITIALIZED_KIND | symbolTable.addForwardUninitializedType("", label);
}
}
}
/**
* Returns the abstract type corresponding to the internal name of a class.
*
* @param symbolTable the type table to use to lookup and store type {@link Symbol}.
* @param internalName the internal name of a class. This must <i>not</i> be an array type
* descriptor.
* @return the abstract type value corresponding to the given internal name.
*/
static int getAbstractTypeFromInternalName(
final SymbolTable symbolTable, final String internalName) {
return REFERENCE_KIND | symbolTable.addType(internalName);
}
/**
* Returns the abstract type corresponding to the given type descriptor.
*
* @param symbolTable the type table to use to lookup and store type {@link Symbol}.
* @param buffer a string ending with a type descriptor.
* @param offset the start offset of the type descriptor in buffer.
* @return the abstract type corresponding to the given type descriptor.
*/
private static int getAbstractTypeFromDescriptor(
final SymbolTable symbolTable, final String buffer, final int offset) {
String internalName;
switch (buffer.charAt(offset)) {
case 'V':
return 0;
case 'Z':
case 'C':
case 'B':
case 'S':
case 'I':
return INTEGER;
case 'F':
return FLOAT;
case 'J':
return LONG;
case 'D':
return DOUBLE;
case 'L':
internalName = buffer.substring(offset + 1, buffer.length() - 1);
return REFERENCE_KIND | symbolTable.addType(internalName);
case '[':
int elementDescriptorOffset = offset + 1;
while (buffer.charAt(elementDescriptorOffset) == '[') {
++elementDescriptorOffset;
}
int typeValue;
switch (buffer.charAt(elementDescriptorOffset)) {
case 'Z':
typeValue = BOOLEAN;
break;
case 'C':
typeValue = CHAR;
break;
case 'B':
typeValue = BYTE;
break;
case 'S':
typeValue = SHORT;
break;
case 'I':
typeValue = INTEGER;
break;
case 'F':
typeValue = FLOAT;
break;
case 'J':
typeValue = LONG;
break;
case 'D':
typeValue = DOUBLE;
break;
case 'L':
internalName = buffer.substring(elementDescriptorOffset + 1, buffer.length() - 1);
typeValue = REFERENCE_KIND | symbolTable.addType(internalName);
break;
default:
throw new IllegalArgumentException(
"Invalid descriptor fragment: " + buffer.substring(elementDescriptorOffset));
}
return ((elementDescriptorOffset - offset) << DIM_SHIFT) | typeValue;
default:
throw new IllegalArgumentException("Invalid descriptor: " + buffer.substring(offset));
}
}
// -----------------------------------------------------------------------------------------------
// Methods related to the input frame
// -----------------------------------------------------------------------------------------------
/**
* Sets the input frame from the given method description. This method is used to initialize the
* first frame of a method, which is implicit (i.e. not stored explicitly in the StackMapTable
* attribute).
*
* @param symbolTable the type table to use to lookup and store type {@link Symbol}.
* @param access the method's access flags.
* @param descriptor the method descriptor.
* @param maxLocals the maximum number of local variables of the method.
*/
final void setInputFrameFromDescriptor(
final SymbolTable symbolTable,
final int access,
final String descriptor,
final int maxLocals) {
inputLocals = new int[maxLocals];
inputStack = new int[0];
int inputLocalIndex = 0;
if ((access & Opcodes.ACC_STATIC) == 0) {
if ((access & Constants.ACC_CONSTRUCTOR) == 0) {
inputLocals[inputLocalIndex++] =
REFERENCE_KIND | symbolTable.addType(symbolTable.getClassName());
} else {
inputLocals[inputLocalIndex++] = UNINITIALIZED_THIS;
}
}
for (Type argumentType : Type.getArgumentTypes(descriptor)) {
int abstractType =
getAbstractTypeFromDescriptor(symbolTable, argumentType.getDescriptor(), 0);
inputLocals[inputLocalIndex++] = abstractType;
if (abstractType == LONG || abstractType == DOUBLE) {
inputLocals[inputLocalIndex++] = TOP;
}
}
while (inputLocalIndex < maxLocals) {
inputLocals[inputLocalIndex++] = TOP;
}
}
/**
* Sets the input frame from the given public API frame description.
*
* @param symbolTable the type table to use to lookup and store type {@link Symbol}.
* @param numLocal the number of local variables.
* @param local the local variable types, described using the same format as in {@link
* MethodVisitor#visitFrame}.
* @param numStack the number of operand stack elements.
* @param stack the operand stack types, described using the same format as in {@link
* MethodVisitor#visitFrame}.
*/
final void setInputFrameFromApiFormat(
final SymbolTable symbolTable,
final int numLocal,
final Object[] local,
final int numStack,
final Object[] stack) {
int inputLocalIndex = 0;
for (int i = 0; i < numLocal; ++i) {
inputLocals[inputLocalIndex++] = getAbstractTypeFromApiFormat(symbolTable, local[i]);
if (local[i] == Opcodes.LONG || local[i] == Opcodes.DOUBLE) {
inputLocals[inputLocalIndex++] = TOP;
}
}
while (inputLocalIndex < inputLocals.length) {
inputLocals[inputLocalIndex++] = TOP;
}
int numStackTop = 0;
for (int i = 0; i < numStack; ++i) {
if (stack[i] == Opcodes.LONG || stack[i] == Opcodes.DOUBLE) {
++numStackTop;
}
}
inputStack = new int[numStack + numStackTop];
int inputStackIndex = 0;
for (int i = 0; i < numStack; ++i) {
inputStack[inputStackIndex++] = getAbstractTypeFromApiFormat(symbolTable, stack[i]);
if (stack[i] == Opcodes.LONG || stack[i] == Opcodes.DOUBLE) {
inputStack[inputStackIndex++] = TOP;
}
}
outputStackTop = 0;
initializationCount = 0;
}
final int getInputStackSize() {
return inputStack.length;
}
// -----------------------------------------------------------------------------------------------
// Methods related to the output frame
// -----------------------------------------------------------------------------------------------
/**
* Returns the abstract type stored at the given local variable index in the output frame.
*
* @param localIndex the index of the local variable whose value must be returned.
* @return the abstract type stored at the given local variable index in the output frame.
*/
private int getLocal(final int localIndex) {
if (outputLocals == null || localIndex >= outputLocals.length) {
// If this local has never been assigned in this basic block, it is still equal to its value
// in the input frame.
return LOCAL_KIND | localIndex;
} else {
int abstractType = outputLocals[localIndex];
if (abstractType == 0) {
// If this local has never been assigned in this basic block, so it is still equal to its
// value in the input frame.
abstractType = outputLocals[localIndex] = LOCAL_KIND | localIndex;
}
return abstractType;
}
}
/**
* Replaces the abstract type stored at the given local variable index in the output frame.
*
* @param localIndex the index of the output frame local variable that must be set.
* @param abstractType the value that must be set.
*/
private void setLocal(final int localIndex, final int abstractType) {
// Create and/or resize the output local variables array if necessary.
if (outputLocals == null) {
outputLocals = new int[10];
}
int outputLocalsLength = outputLocals.length;
if (localIndex >= outputLocalsLength) {
int[] newOutputLocals = new int[Math.max(localIndex + 1, 2 * outputLocalsLength)];
System.arraycopy(outputLocals, 0, newOutputLocals, 0, outputLocalsLength);
outputLocals = newOutputLocals;
}
// Set the local variable.
outputLocals[localIndex] = abstractType;
}
/**
* Pushes the given abstract type on the output frame stack.
*
* @param abstractType an abstract type.
*/
private void push(final int abstractType) {
// Create and/or resize the output stack array if necessary.
if (outputStack == null) {
outputStack = new int[10];
}
int outputStackLength = outputStack.length;
if (outputStackTop >= outputStackLength) {
int[] newOutputStack = new int[Math.max(outputStackTop + 1, 2 * outputStackLength)];
System.arraycopy(outputStack, 0, newOutputStack, 0, outputStackLength);
outputStack = newOutputStack;
}
// Pushes the abstract type on the output stack.
outputStack[outputStackTop++] = abstractType;
// Updates the maximum size reached by the output stack, if needed (note that this size is
// relative to the input stack size, which is not known yet).
short outputStackSize = (short) (outputStackStart + outputStackTop);
if (outputStackSize > owner.outputStackMax) {
owner.outputStackMax = outputStackSize;
}
}
/**
* Pushes the abstract type corresponding to the given descriptor on the output frame stack.
*
* @param symbolTable the type table to use to lookup and store type {@link Symbol}.
* @param descriptor a type or method descriptor (in which case its return type is pushed).
*/
private void push(final SymbolTable symbolTable, final String descriptor) {
int typeDescriptorOffset =
descriptor.charAt(0) == '(' ? Type.getReturnTypeOffset(descriptor) : 0;
int abstractType = getAbstractTypeFromDescriptor(symbolTable, descriptor, typeDescriptorOffset);
if (abstractType != 0) {
push(abstractType);
if (abstractType == LONG || abstractType == DOUBLE) {
push(TOP);
}
}
}
/**
* Pops an abstract type from the output frame stack and returns its value.
*
* @return the abstract type that has been popped from the output frame stack.
*/
private int pop() {
if (outputStackTop > 0) {
return outputStack[--outputStackTop];
} else {
// If the output frame stack is empty, pop from the input stack.
return STACK_KIND | -(--outputStackStart);
}
}
/**
* Pops the given number of abstract types from the output frame stack.
*
* @param elements the number of abstract types that must be popped.
*/
private void pop(final int elements) {
if (outputStackTop >= elements) {
outputStackTop -= elements;
} else {
// If the number of elements to be popped is greater than the number of elements in the output
// stack, clear it, and pop the remaining elements from the input stack.
outputStackStart -= elements - outputStackTop;
outputStackTop = 0;
}
}
/**
* Pops as many abstract types from the output frame stack as described by the given descriptor.
*
* @param descriptor a type or method descriptor (in which case its argument types are popped).
*/
private void pop(final String descriptor) {
char firstDescriptorChar = descriptor.charAt(0);
if (firstDescriptorChar == '(') {
pop((Type.getArgumentsAndReturnSizes(descriptor) >> 2) - 1);
} else if (firstDescriptorChar == 'J' || firstDescriptorChar == 'D') {
pop(2);
} else {
pop(1);
}
}
// -----------------------------------------------------------------------------------------------
// Methods to handle uninitialized types
// -----------------------------------------------------------------------------------------------
/**
* Adds an abstract type to the list of types on which a constructor is invoked in the basic
* block.
*
* @param abstractType an abstract type on a which a constructor is invoked.
*/
private void addInitializedType(final int abstractType) {
// Create and/or resize the initializations array if necessary.
if (initializations == null) {
initializations = new int[2];
}
int initializationsLength = initializations.length;
if (initializationCount >= initializationsLength) {
int[] newInitializations =
new int[Math.max(initializationCount + 1, 2 * initializationsLength)];
System.arraycopy(initializations, 0, newInitializations, 0, initializationsLength);
initializations = newInitializations;
}
// Store the abstract type.
initializations[initializationCount++] = abstractType;
}
/**
* Returns the "initialized" abstract type corresponding to the given abstract type.
*
* @param symbolTable the type table to use to lookup and store type {@link Symbol}.
* @param abstractType an abstract type.
* @return the REFERENCE_KIND abstract type corresponding to abstractType if it is
* UNINITIALIZED_THIS or an UNINITIALIZED_KIND or FORWARD_UNINITIALIZED_KIND abstract type for
* one of the types on which a constructor is invoked in the basic block. Otherwise returns
* abstractType.
*/
private int getInitializedType(final SymbolTable symbolTable, final int abstractType) {
if (abstractType == UNINITIALIZED_THIS
|| (abstractType & (DIM_MASK | KIND_MASK)) == UNINITIALIZED_KIND
|| (abstractType & (DIM_MASK | KIND_MASK)) == FORWARD_UNINITIALIZED_KIND) {
for (int i = 0; i < initializationCount; ++i) {
int initializedType = initializations[i];
int dim = initializedType & DIM_MASK;
int kind = initializedType & KIND_MASK;
int value = initializedType & VALUE_MASK;
if (kind == LOCAL_KIND) {
initializedType = dim + inputLocals[value];
} else if (kind == STACK_KIND) {
initializedType = dim + inputStack[inputStack.length - value];
}
if (abstractType == initializedType) {
if (abstractType == UNINITIALIZED_THIS) {
return REFERENCE_KIND | symbolTable.addType(symbolTable.getClassName());
} else {
return REFERENCE_KIND
| symbolTable.addType(symbolTable.getType(abstractType & VALUE_MASK).value);
}
}
}
}
return abstractType;
}
// -----------------------------------------------------------------------------------------------
// Main method, to simulate the execution of each instruction on the output frame
// -----------------------------------------------------------------------------------------------
/**
* Simulates the action of the given instruction on the output stack frame.
*
* @param opcode the opcode of the instruction.
* @param arg the numeric operand of the instruction, if any.
* @param argSymbol the Symbol operand of the instruction, if any.
* @param symbolTable the type table to use to lookup and store type {@link Symbol}.
*/
void execute(
final int opcode, final int arg, final Symbol argSymbol, final SymbolTable symbolTable) {
// Abstract types popped from the stack or read from local variables.
int abstractType1;
int abstractType2;
int abstractType3;
int abstractType4;
switch (opcode) {
case Opcodes.NOP:
case Opcodes.INEG:
case Opcodes.LNEG:
case Opcodes.FNEG:
case Opcodes.DNEG:
case Opcodes.I2B:
case Opcodes.I2C:
case Opcodes.I2S:
case Opcodes.GOTO:
case Opcodes.RETURN:
break;
case Opcodes.ACONST_NULL:
push(NULL);
break;
case Opcodes.ICONST_M1:
case Opcodes.ICONST_0:
case Opcodes.ICONST_1:
case Opcodes.ICONST_2:
case Opcodes.ICONST_3:
case Opcodes.ICONST_4:
case Opcodes.ICONST_5:
case Opcodes.BIPUSH:
case Opcodes.SIPUSH:
case Opcodes.ILOAD:
push(INTEGER);
break;
case Opcodes.LCONST_0:
case Opcodes.LCONST_1:
case Opcodes.LLOAD:
push(LONG);
push(TOP);
break;
case Opcodes.FCONST_0:
case Opcodes.FCONST_1:
case Opcodes.FCONST_2:
case Opcodes.FLOAD:
push(FLOAT);
break;
case Opcodes.DCONST_0:
case Opcodes.DCONST_1:
case Opcodes.DLOAD:
push(DOUBLE);
push(TOP);
break;
case Opcodes.LDC:
switch (argSymbol.tag) {
case Symbol.CONSTANT_INTEGER_TAG:
push(INTEGER);
break;
case Symbol.CONSTANT_LONG_TAG:
push(LONG);
push(TOP);
break;
case Symbol.CONSTANT_FLOAT_TAG:
push(FLOAT);
break;
case Symbol.CONSTANT_DOUBLE_TAG:
push(DOUBLE);
push(TOP);
break;
case Symbol.CONSTANT_CLASS_TAG:
push(REFERENCE_KIND | symbolTable.addType("java/lang/Class"));
break;
case Symbol.CONSTANT_STRING_TAG:
push(REFERENCE_KIND | symbolTable.addType("java/lang/String"));
break;
case Symbol.CONSTANT_METHOD_TYPE_TAG:
push(REFERENCE_KIND | symbolTable.addType("java/lang/invoke/MethodType"));
break;
case Symbol.CONSTANT_METHOD_HANDLE_TAG:
push(REFERENCE_KIND | symbolTable.addType("java/lang/invoke/MethodHandle"));
break;
case Symbol.CONSTANT_DYNAMIC_TAG:
push(symbolTable, argSymbol.value);
break;
default:
throw new AssertionError();
}
break;
case Opcodes.ALOAD:
push(getLocal(arg));
break;
case Opcodes.LALOAD:
case Opcodes.D2L:
pop(2);
push(LONG);
push(TOP);
break;
case Opcodes.DALOAD:
case Opcodes.L2D:
pop(2);
push(DOUBLE);
push(TOP);
break;
case Opcodes.AALOAD:
pop(1);
abstractType1 = pop();
push(abstractType1 == NULL ? abstractType1 : ELEMENT_OF + abstractType1);
break;
case Opcodes.ISTORE:
case Opcodes.FSTORE:
case Opcodes.ASTORE:
abstractType1 = pop();
setLocal(arg, abstractType1);
if (arg > 0) {
int previousLocalType = getLocal(arg - 1);
if (previousLocalType == LONG || previousLocalType == DOUBLE) {
setLocal(arg - 1, TOP);
} else if ((previousLocalType & KIND_MASK) == LOCAL_KIND
|| (previousLocalType & KIND_MASK) == STACK_KIND) {
// The type of the previous local variable is not known yet, but if it later appears
// to be LONG or DOUBLE, we should then use TOP instead.
setLocal(arg - 1, previousLocalType | TOP_IF_LONG_OR_DOUBLE_FLAG);
}
}
break;
case Opcodes.LSTORE:
case Opcodes.DSTORE:
pop(1);
abstractType1 = pop();
setLocal(arg, abstractType1);
setLocal(arg + 1, TOP);
if (arg > 0) {
int previousLocalType = getLocal(arg - 1);
if (previousLocalType == LONG || previousLocalType == DOUBLE) {
setLocal(arg - 1, TOP);
} else if ((previousLocalType & KIND_MASK) == LOCAL_KIND
|| (previousLocalType & KIND_MASK) == STACK_KIND) {
// The type of the previous local variable is not known yet, but if it later appears
// to be LONG or DOUBLE, we should then use TOP instead.
setLocal(arg - 1, previousLocalType | TOP_IF_LONG_OR_DOUBLE_FLAG);
}
}
break;
case Opcodes.IASTORE:
case Opcodes.BASTORE:
case Opcodes.CASTORE:
case Opcodes.SASTORE:
case Opcodes.FASTORE:
case Opcodes.AASTORE:
pop(3);
break;
case Opcodes.LASTORE:
case Opcodes.DASTORE:
pop(4);
break;
case Opcodes.POP:
case Opcodes.IFEQ:
case Opcodes.IFNE:
case Opcodes.IFLT:
case Opcodes.IFGE:
case Opcodes.IFGT:
case Opcodes.IFLE:
case Opcodes.IRETURN:
case Opcodes.FRETURN:
case Opcodes.ARETURN:
case Opcodes.TABLESWITCH:
case Opcodes.LOOKUPSWITCH:
case Opcodes.ATHROW:
case Opcodes.MONITORENTER:
case Opcodes.MONITOREXIT:
case Opcodes.IFNULL:
case Opcodes.IFNONNULL:
pop(1);
break;
case Opcodes.POP2:
case Opcodes.IF_ICMPEQ:
case Opcodes.IF_ICMPNE:
case Opcodes.IF_ICMPLT:
case Opcodes.IF_ICMPGE:
case Opcodes.IF_ICMPGT:
case Opcodes.IF_ICMPLE:
case Opcodes.IF_ACMPEQ:
case Opcodes.IF_ACMPNE:
case Opcodes.LRETURN:
case Opcodes.DRETURN:
pop(2);
break;
case Opcodes.DUP:
abstractType1 = pop();
push(abstractType1);
push(abstractType1);
break;
case Opcodes.DUP_X1:
abstractType1 = pop();
abstractType2 = pop();
push(abstractType1);
push(abstractType2);
push(abstractType1);
break;
case Opcodes.DUP_X2:
abstractType1 = pop();
abstractType2 = pop();
abstractType3 = pop();
push(abstractType1);
push(abstractType3);
push(abstractType2);
push(abstractType1);
break;
case Opcodes.DUP2:
abstractType1 = pop();
abstractType2 = pop();
push(abstractType2);
push(abstractType1);
push(abstractType2);
push(abstractType1);
break;
case Opcodes.DUP2_X1:
abstractType1 = pop();
abstractType2 = pop();
abstractType3 = pop();
push(abstractType2);
push(abstractType1);
push(abstractType3);
push(abstractType2);
push(abstractType1);
break;
case Opcodes.DUP2_X2:
abstractType1 = pop();
abstractType2 = pop();
abstractType3 = pop();
abstractType4 = pop();
push(abstractType2);
push(abstractType1);
push(abstractType4);
push(abstractType3);
push(abstractType2);
push(abstractType1);
break;
case Opcodes.SWAP:
abstractType1 = pop();
abstractType2 = pop();
push(abstractType1);
push(abstractType2);
break;
case Opcodes.IALOAD:
case Opcodes.BALOAD:
case Opcodes.CALOAD:
case Opcodes.SALOAD:
case Opcodes.IADD:
case Opcodes.ISUB:
case Opcodes.IMUL:
case Opcodes.IDIV:
case Opcodes.IREM:
case Opcodes.IAND:
case Opcodes.IOR:
case Opcodes.IXOR:
case Opcodes.ISHL:
case Opcodes.ISHR:
case Opcodes.IUSHR:
case Opcodes.L2I:
case Opcodes.D2I:
case Opcodes.FCMPL:
case Opcodes.FCMPG:
pop(2);
push(INTEGER);
break;
case Opcodes.LADD:
case Opcodes.LSUB:
case Opcodes.LMUL:
case Opcodes.LDIV:
case Opcodes.LREM:
case Opcodes.LAND:
case Opcodes.LOR:
case Opcodes.LXOR:
pop(4);
push(LONG);
push(TOP);
break;
case Opcodes.FALOAD:
case Opcodes.FADD:
case Opcodes.FSUB:
case Opcodes.FMUL:
case Opcodes.FDIV:
case Opcodes.FREM:
case Opcodes.L2F:
case Opcodes.D2F:
pop(2);
push(FLOAT);
break;
case Opcodes.DADD:
case Opcodes.DSUB:
case Opcodes.DMUL:
case Opcodes.DDIV:
case Opcodes.DREM:
pop(4);
push(DOUBLE);
push(TOP);
break;
case Opcodes.LSHL:
case Opcodes.LSHR:
case Opcodes.LUSHR:
pop(3);
push(LONG);
push(TOP);
break;
case Opcodes.IINC:
setLocal(arg, INTEGER);
break;
case Opcodes.I2L:
case Opcodes.F2L:
pop(1);
push(LONG);
push(TOP);
break;
case Opcodes.I2F:
pop(1);
push(FLOAT);
break;
case Opcodes.I2D:
case Opcodes.F2D:
pop(1);
push(DOUBLE);
push(TOP);
break;
case Opcodes.F2I:
case Opcodes.ARRAYLENGTH:
case Opcodes.INSTANCEOF:
pop(1);
push(INTEGER);
break;
case Opcodes.LCMP:
case Opcodes.DCMPL:
case Opcodes.DCMPG:
pop(4);
push(INTEGER);
break;
case Opcodes.JSR:
case Opcodes.RET:
throw new IllegalArgumentException("JSR/RET are not supported with computeFrames option");
case Opcodes.GETSTATIC:
push(symbolTable, argSymbol.value);
break;
case Opcodes.PUTSTATIC:
pop(argSymbol.value);
break;
case Opcodes.GETFIELD:
pop(1);
push(symbolTable, argSymbol.value);
break;
case Opcodes.PUTFIELD:
pop(argSymbol.value);
pop();
break;
case Opcodes.INVOKEVIRTUAL:
case Opcodes.INVOKESPECIAL:
case Opcodes.INVOKESTATIC:
case Opcodes.INVOKEINTERFACE:
pop(argSymbol.value);
if (opcode != Opcodes.INVOKESTATIC) {
abstractType1 = pop();
if (opcode == Opcodes.INVOKESPECIAL && argSymbol.name.charAt(0) == '<') {
addInitializedType(abstractType1);
}
}
push(symbolTable, argSymbol.value);
break;
case Opcodes.INVOKEDYNAMIC:
pop(argSymbol.value);
push(symbolTable, argSymbol.value);
break;
case Opcodes.NEW:
push(UNINITIALIZED_KIND | symbolTable.addUninitializedType(argSymbol.value, arg));
break;
case Opcodes.NEWARRAY:
pop();
switch (arg) {
case Opcodes.T_BOOLEAN:
push(ARRAY_OF | BOOLEAN);
break;
case Opcodes.T_CHAR:
push(ARRAY_OF | CHAR);
break;
case Opcodes.T_BYTE:
push(ARRAY_OF | BYTE);
break;
case Opcodes.T_SHORT:
push(ARRAY_OF | SHORT);
break;
case Opcodes.T_INT:
push(ARRAY_OF | INTEGER);
break;
case Opcodes.T_FLOAT:
push(ARRAY_OF | FLOAT);
break;
case Opcodes.T_DOUBLE:
push(ARRAY_OF | DOUBLE);
break;
case Opcodes.T_LONG:
push(ARRAY_OF | LONG);
break;
default:
throw new IllegalArgumentException();
}
break;
case Opcodes.ANEWARRAY:
String arrayElementType = argSymbol.value;
pop();
if (arrayElementType.charAt(0) == '[') {
push(symbolTable, '[' + arrayElementType);
} else {
push(ARRAY_OF | REFERENCE_KIND | symbolTable.addType(arrayElementType));
}
break;
case Opcodes.CHECKCAST:
String castType = argSymbol.value;
pop();
if (castType.charAt(0) == '[') {
push(symbolTable, castType);
} else {
push(REFERENCE_KIND | symbolTable.addType(castType));
}
break;
case Opcodes.MULTIANEWARRAY:
pop(arg);
push(symbolTable, argSymbol.value);
break;
default:
throw new IllegalArgumentException();
}
}
// -----------------------------------------------------------------------------------------------
// Frame merging methods, used in the second step of the stack map frame computation algorithm
// -----------------------------------------------------------------------------------------------
/**
* Computes the concrete output type corresponding to a given abstract output type.
*
* @param abstractOutputType an abstract output type.
* @param numStack the size of the input stack, used to resolve abstract output types of
* STACK_KIND kind.
* @return the concrete output type corresponding to 'abstractOutputType'.
*/
private int getConcreteOutputType(final int abstractOutputType, final int numStack) {
int dim = abstractOutputType & DIM_MASK;
int kind = abstractOutputType & KIND_MASK;
if (kind == LOCAL_KIND) {
// By definition, a LOCAL_KIND type designates the concrete type of a local variable at
// the beginning of the basic block corresponding to this frame (which is known when
// this method is called, but was not when the abstract type was computed).
int concreteOutputType = dim + inputLocals[abstractOutputType & VALUE_MASK];
if ((abstractOutputType & TOP_IF_LONG_OR_DOUBLE_FLAG) != 0
&& (concreteOutputType == LONG || concreteOutputType == DOUBLE)) {
concreteOutputType = TOP;
}
return concreteOutputType;
} else if (kind == STACK_KIND) {
// By definition, a STACK_KIND type designates the concrete type of a local variable at
// the beginning of the basic block corresponding to this frame (which is known when
// this method is called, but was not when the abstract type was computed).
int concreteOutputType = dim + inputStack[numStack - (abstractOutputType & VALUE_MASK)];
if ((abstractOutputType & TOP_IF_LONG_OR_DOUBLE_FLAG) != 0
&& (concreteOutputType == LONG || concreteOutputType == DOUBLE)) {
concreteOutputType = TOP;
}
return concreteOutputType;
} else {
return abstractOutputType;
}
}
/**
* Merges the input frame of the given {@link Frame} with the input and output frames of this
* {@link Frame}. Returns {@literal true} if the given frame has been changed by this operation
* (the input and output frames of this {@link Frame} are never changed).
*
* @param symbolTable the type table to use to lookup and store type {@link Symbol}.
* @param dstFrame the {@link Frame} whose input frame must be updated. This should be the frame
* of a successor, in the control flow graph, of the basic block corresponding to this frame.
* @param catchTypeIndex if 'frame' corresponds to an exception handler basic block, the type
* table index of the caught exception type, otherwise 0.
* @return {@literal true} if the input frame of 'frame' has been changed by this operation.
*/
final boolean merge(
final SymbolTable symbolTable, final Frame dstFrame, final int catchTypeIndex) {
boolean frameChanged = false;
// Compute the concrete types of the local variables at the end of the basic block corresponding
// to this frame, by resolving its abstract output types, and merge these concrete types with
// those of the local variables in the input frame of dstFrame.
int numLocal = inputLocals.length;
int numStack = inputStack.length;
if (dstFrame.inputLocals == null) {
dstFrame.inputLocals = new int[numLocal];
frameChanged = true;
}
for (int i = 0; i < numLocal; ++i) {
int concreteOutputType;
if (outputLocals != null && i < outputLocals.length) {
int abstractOutputType = outputLocals[i];
if (abstractOutputType == 0) {
// If the local variable has never been assigned in this basic block, it is equal to its
// value at the beginning of the block.
concreteOutputType = inputLocals[i];
} else {
concreteOutputType = getConcreteOutputType(abstractOutputType, numStack);
}
} else {
// If the local variable has never been assigned in this basic block, it is equal to its
// value at the beginning of the block.
concreteOutputType = inputLocals[i];
}
// concreteOutputType might be an uninitialized type from the input locals or from the input
// stack. However, if a constructor has been called for this class type in the basic block,
// then this type is no longer uninitialized at the end of basic block.
if (initializations != null) {
concreteOutputType = getInitializedType(symbolTable, concreteOutputType);
}
frameChanged |= merge(symbolTable, concreteOutputType, dstFrame.inputLocals, i);
}
// If dstFrame is an exception handler block, it can be reached from any instruction of the
// basic block corresponding to this frame, in particular from the first one. Therefore, the
// input locals of dstFrame should be compatible (i.e. merged) with the input locals of this
// frame (and the input stack of dstFrame should be compatible, i.e. merged, with a one
// element stack containing the caught exception type).
if (catchTypeIndex > 0) {
for (int i = 0; i < numLocal; ++i) {
frameChanged |= merge(symbolTable, inputLocals[i], dstFrame.inputLocals, i);
}
if (dstFrame.inputStack == null) {
dstFrame.inputStack = new int[1];
frameChanged = true;
}
frameChanged |= merge(symbolTable, catchTypeIndex, dstFrame.inputStack, 0);
return frameChanged;
}
// Compute the concrete types of the stack operands at the end of the basic block corresponding
// to this frame, by resolving its abstract output types, and merge these concrete types with
// those of the stack operands in the input frame of dstFrame.
int numInputStack = inputStack.length + outputStackStart;
if (dstFrame.inputStack == null) {
dstFrame.inputStack = new int[numInputStack + outputStackTop];
frameChanged = true;
}
// First, do this for the stack operands that have not been popped in the basic block
// corresponding to this frame, and which are therefore equal to their value in the input
// frame (except for uninitialized types, which may have been initialized).
for (int i = 0; i < numInputStack; ++i) {
int concreteOutputType = inputStack[i];
if (initializations != null) {
concreteOutputType = getInitializedType(symbolTable, concreteOutputType);
}
frameChanged |= merge(symbolTable, concreteOutputType, dstFrame.inputStack, i);
}
// Then, do this for the stack operands that have pushed in the basic block (this code is the
// same as the one above for local variables).
for (int i = 0; i < outputStackTop; ++i) {
int abstractOutputType = outputStack[i];
int concreteOutputType = getConcreteOutputType(abstractOutputType, numStack);
if (initializations != null) {
concreteOutputType = getInitializedType(symbolTable, concreteOutputType);
}
frameChanged |=
merge(symbolTable, concreteOutputType, dstFrame.inputStack, numInputStack + i);
}
return frameChanged;
}
/**
* Merges the type at the given index in the given abstract type array with the given type.
* Returns {@literal true} if the type array has been modified by this operation.
*
* @param symbolTable the type table to use to lookup and store type {@link Symbol}.
* @param sourceType the abstract type with which the abstract type array element must be merged.
* This type should be of {@link #CONSTANT_KIND}, {@link #REFERENCE_KIND}, {@link
* #UNINITIALIZED_KIND} or {@link #FORWARD_UNINITIALIZED_KIND} kind, with positive or
* {@literal null} array dimensions.
* @param dstTypes an array of abstract types. These types should be of {@link #CONSTANT_KIND},
* {@link #REFERENCE_KIND}, {@link #UNINITIALIZED_KIND} or {@link #FORWARD_UNINITIALIZED_KIND}
* kind, with positive or {@literal null} array dimensions.
* @param dstIndex the index of the type that must be merged in dstTypes.
* @return {@literal true} if the type array has been modified by this operation.
*/
private static boolean merge(
final SymbolTable symbolTable,
final int sourceType,
final int[] dstTypes,
final int dstIndex) {
int dstType = dstTypes[dstIndex];
if (dstType == sourceType) {
// If the types are equal, merge(sourceType, dstType) = dstType, so there is no change.
return false;
}
int srcType = sourceType;
if ((sourceType & ~DIM_MASK) == NULL) {
if (dstType == NULL) {
return false;
}
srcType = NULL;
}
if (dstType == 0) {
// If dstTypes[dstIndex] has never been assigned, merge(srcType, dstType) = srcType.
dstTypes[dstIndex] = srcType;
return true;
}
int mergedType;
if ((dstType & DIM_MASK) != 0 || (dstType & KIND_MASK) == REFERENCE_KIND) {
// If dstType is a reference type of any array dimension.
if (srcType == NULL) {
// If srcType is the NULL type, merge(srcType, dstType) = dstType, so there is no change.
return false;
} else if ((srcType & (DIM_MASK | KIND_MASK)) == (dstType & (DIM_MASK | KIND_MASK))) {
// If srcType has the same array dimension and the same kind as dstType.
if ((dstType & KIND_MASK) == REFERENCE_KIND) {
// If srcType and dstType are reference types with the same array dimension,
// merge(srcType, dstType) = dim(srcType) | common super class of srcType and dstType.
mergedType =
(srcType & DIM_MASK)
| REFERENCE_KIND
| symbolTable.addMergedType(srcType & VALUE_MASK, dstType & VALUE_MASK);
} else {
// If srcType and dstType are array types of equal dimension but different element types,
// merge(srcType, dstType) = dim(srcType) - 1 | java/lang/Object.
int mergedDim = ELEMENT_OF + (srcType & DIM_MASK);
mergedType = mergedDim | REFERENCE_KIND | symbolTable.addType("java/lang/Object");
}
} else if ((srcType & DIM_MASK) != 0 || (srcType & KIND_MASK) == REFERENCE_KIND) {
// If srcType is any other reference or array type,
// merge(srcType, dstType) = min(srcDdim, dstDim) | java/lang/Object
// where srcDim is the array dimension of srcType, minus 1 if srcType is an array type
// with a non reference element type (and similarly for dstDim).
int srcDim = srcType & DIM_MASK;
if (srcDim != 0 && (srcType & KIND_MASK) != REFERENCE_KIND) {
srcDim = ELEMENT_OF + srcDim;
}
int dstDim = dstType & DIM_MASK;
if (dstDim != 0 && (dstType & KIND_MASK) != REFERENCE_KIND) {
dstDim = ELEMENT_OF + dstDim;
}
mergedType =
Math.min(srcDim, dstDim) | REFERENCE_KIND | symbolTable.addType("java/lang/Object");
} else {
// If srcType is any other type, merge(srcType, dstType) = TOP.
mergedType = TOP;
}
} else if (dstType == NULL) {
// If dstType is the NULL type, merge(srcType, dstType) = srcType, or TOP if srcType is not a
// an array type or a reference type.
mergedType =
(srcType & DIM_MASK) != 0 || (srcType & KIND_MASK) == REFERENCE_KIND ? srcType : TOP;
} else {
// If dstType is any other type, merge(srcType, dstType) = TOP whatever srcType.
mergedType = TOP;
}
if (mergedType != dstType) {
dstTypes[dstIndex] = mergedType;
return true;
}
return false;
}
// -----------------------------------------------------------------------------------------------
// Frame output methods, to generate StackMapFrame attributes
// -----------------------------------------------------------------------------------------------
/**
* Makes the given {@link MethodWriter} visit the input frame of this {@link Frame}. The visit is
* done with the {@link MethodWriter#visitFrameStart}, {@link MethodWriter#visitAbstractType} and
* {@link MethodWriter#visitFrameEnd} methods.
*
* @param methodWriter the {@link MethodWriter} that should visit the input frame of this {@link
* Frame}.
*/
final void accept(final MethodWriter methodWriter) {
// Compute the number of locals, ignoring TOP types that are just after a LONG or a DOUBLE, and
// all trailing TOP types.
int[] localTypes = inputLocals;
int numLocal = 0;
int numTrailingTop = 0;
int i = 0;
while (i < localTypes.length) {
int localType = localTypes[i];
i += (localType == LONG || localType == DOUBLE) ? 2 : 1;
if (localType == TOP) {
numTrailingTop++;
} else {
numLocal += numTrailingTop + 1;
numTrailingTop = 0;
}
}
// Compute the stack size, ignoring TOP types that are just after a LONG or a DOUBLE.
int[] stackTypes = inputStack;
int numStack = 0;
i = 0;
while (i < stackTypes.length) {
int stackType = stackTypes[i];
i += (stackType == LONG || stackType == DOUBLE) ? 2 : 1;
numStack++;
}
// Visit the frame and its content.
int frameIndex = methodWriter.visitFrameStart(owner.bytecodeOffset, numLocal, numStack);
i = 0;
while (numLocal-- > 0) {
int localType = localTypes[i];
i += (localType == LONG || localType == DOUBLE) ? 2 : 1;
methodWriter.visitAbstractType(frameIndex++, localType);
}
i = 0;
while (numStack-- > 0) {
int stackType = stackTypes[i];
i += (stackType == LONG || stackType == DOUBLE) ? 2 : 1;
methodWriter.visitAbstractType(frameIndex++, stackType);
}
methodWriter.visitFrameEnd();
}
/**
* Put the given abstract type in the given ByteVector, using the JVMS verification_type_info
* format used in StackMapTable attributes.
*
* @param symbolTable the type table to use to lookup and store type {@link Symbol}.
* @param abstractType an abstract type, restricted to {@link Frame#CONSTANT_KIND}, {@link
* Frame#REFERENCE_KIND}, {@link Frame#UNINITIALIZED_KIND} or {@link
* Frame#FORWARD_UNINITIALIZED_KIND} types.
* @param output where the abstract type must be put.
* @see <a href="https://docs.oracle.com/javase/specs/jvms/se9/html/jvms-4.html#jvms-4.7.4">JVMS
* 4.7.4</a>
*/
static void putAbstractType(
final SymbolTable symbolTable, final int abstractType, final ByteVector output) {
int arrayDimensions = (abstractType & Frame.DIM_MASK) >> DIM_SHIFT;
if (arrayDimensions == 0) {
int typeValue = abstractType & VALUE_MASK;
switch (abstractType & KIND_MASK) {
case CONSTANT_KIND:
output.putByte(typeValue);
break;
case REFERENCE_KIND:
output
.putByte(ITEM_OBJECT)
.putShort(symbolTable.addConstantClass(symbolTable.getType(typeValue).value).index);
break;
case UNINITIALIZED_KIND:
output.putByte(ITEM_UNINITIALIZED).putShort((int) symbolTable.getType(typeValue).data);
break;
case FORWARD_UNINITIALIZED_KIND:
output.putByte(ITEM_UNINITIALIZED);
symbolTable.getForwardUninitializedLabel(typeValue).put(output);
break;
default:
throw new AssertionError();
}
} else {
// Case of an array type, we need to build its descriptor first.
StringBuilder typeDescriptor = new StringBuilder(32); // SPRING PATCH: larger initial size
while (arrayDimensions-- > 0) {
typeDescriptor.append('[');
}
if ((abstractType & KIND_MASK) == REFERENCE_KIND) {
typeDescriptor
.append('L')
.append(symbolTable.getType(abstractType & VALUE_MASK).value)
.append(';');
} else {
switch (abstractType & VALUE_MASK) {
case Frame.ITEM_ASM_BOOLEAN:
typeDescriptor.append('Z');
break;
case Frame.ITEM_ASM_BYTE:
typeDescriptor.append('B');
break;
case Frame.ITEM_ASM_CHAR:
typeDescriptor.append('C');
break;
case Frame.ITEM_ASM_SHORT:
typeDescriptor.append('S');
break;
case Frame.ITEM_INTEGER:
typeDescriptor.append('I');
break;
case Frame.ITEM_FLOAT:
typeDescriptor.append('F');
break;
case Frame.ITEM_LONG:
typeDescriptor.append('J');
break;
case Frame.ITEM_DOUBLE:
typeDescriptor.append('D');
break;
default:
throw new AssertionError();
}
}
output
.putByte(ITEM_OBJECT)
.putShort(symbolTable.addConstantClass(typeDescriptor.toString()).index);
}
}
}
| spring-projects/spring-framework | spring-core/src/main/java/org/springframework/asm/Frame.java |
521 | import com.twilio.sdk.TwilioRestClient;
import com.twilio.sdk.TwilioRestException;
import com.twilio.sdk.resource.factory.MessageFactory;
import com.twilio.sdk.resource.instance.Message;
import org.apache.http.NameValuePair;
import org.apache.http.message.BasicNameValuePair;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
//Pre-requisite apache http and twilio java libraries
public class SmackMyBitch {
public static final String ACCOUNT_SID = System.getenv("TWILIO_ACCOUNT_SID");
public static final String AUTH_TOKEN = System.getenv("TWILIO_AUTH_TOKEN");
public static final String YOUR_NUMBER = "1231231231";
public static final String HER_NUMBER = "3213213213";
public static void main(String[] args) throws TwilioRestException {
TwilioRestClient client = new TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN);
String[] randomMessages = {
"Working hard",
"Gotta ship this feature",
"Someone fucked the system again",
};
int randomIndex = new Random().nextInt(randomMessages.length);
String finalMessage = (randomMessages[randomIndex]);
List<NameValuePair> params = new ArrayList<NameValuePair>();
params.add(new BasicNameValuePair("Body", "Late at work. " + finalMessage));
params.add(new BasicNameValuePair("From", YOUR_NUMBER));
params.add(new BasicNameValuePair("To", HER_NUMBER));
MessageFactory messageFactory = client.getAccount().getMessageFactory();
Message message = messageFactory.create(params);
System.out.println(message.getSid());
}
} | NARKOZ/hacker-scripts | java/SmackMyBitch.java |
530 | package com.thealgorithms.others;
final class Sudoku {
private Sudoku() {
}
public static boolean isSafe(int[][] board, int row, int col, int num) {
// Row has the unique (row-clash)
for (int d = 0; d < board.length; d++) {
// Check if the number we are trying to
// place is already present in
// that row, return false;
if (board[row][d] == num) {
return false;
}
}
// Column has the unique numbers (column-clash)
for (int r = 0; r < board.length; r++) {
// Check if the number
// we are trying to
// place is already present in
// that column, return false;
if (board[r][col] == num) {
return false;
}
}
// Corresponding square has
// unique number (box-clash)
int sqrt = (int) Math.sqrt(board.length);
int boxRowStart = row - row % sqrt;
int boxColStart = col - col % sqrt;
for (int r = boxRowStart; r < boxRowStart + sqrt; r++) {
for (int d = boxColStart; d < boxColStart + sqrt; d++) {
if (board[r][d] == num) {
return false;
}
}
}
// if there is no clash, it's safe
return true;
}
public static boolean solveSudoku(int[][] board, int n) {
int row = -1;
int col = -1;
boolean isEmpty = true;
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (board[i][j] == 0) {
row = i;
col = j;
// We still have some remaining
// missing values in Sudoku
isEmpty = false;
break;
}
}
if (!isEmpty) {
break;
}
}
// No empty space left
if (isEmpty) {
return true;
}
// Else for each-row backtrack
for (int num = 1; num <= n; num++) {
if (isSafe(board, row, col, num)) {
board[row][col] = num;
if (solveSudoku(board, n)) {
// print(board, n);
return true;
} else {
// replace it
board[row][col] = 0;
}
}
}
return false;
}
public static void print(int[][] board, int N) {
// We got the answer, just print it
for (int r = 0; r < N; r++) {
for (int d = 0; d < N; d++) {
System.out.print(board[r][d]);
System.out.print(" ");
}
System.out.print("\n");
if ((r + 1) % (int) Math.sqrt(N) == 0) {
System.out.print("");
}
}
}
// Driver Code
public static void main(String[] args) {
int[][] board = new int[][] {
{3, 0, 6, 5, 0, 8, 4, 0, 0},
{5, 2, 0, 0, 0, 0, 0, 0, 0},
{0, 8, 7, 0, 0, 0, 0, 3, 1},
{0, 0, 3, 0, 1, 0, 0, 8, 0},
{9, 0, 0, 8, 6, 3, 0, 0, 5},
{0, 5, 0, 0, 9, 0, 6, 0, 0},
{1, 3, 0, 0, 0, 0, 2, 5, 0},
{0, 0, 0, 0, 0, 0, 0, 7, 4},
{0, 0, 5, 2, 0, 6, 3, 0, 0},
};
int N = board.length;
if (solveSudoku(board, N)) {
// print solution
print(board, N);
} else {
System.out.println("No solution");
}
}
}
| TheAlgorithms/Java | src/main/java/com/thealgorithms/others/Sudoku.java |
533 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.gameloop;
import lombok.Getter;
import lombok.Setter;
/**
* Bullet object class.
*/
public class Bullet {
@Getter
@Setter
private float position;
public Bullet() {
position = 0.0f;
}
}
| iluwatar/java-design-patterns | game-loop/src/main/java/com/iluwatar/gameloop/Bullet.java |
537 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.monostate;
/**
* The Request record. A {@link Server} can handle an instance of a Request.
*/
public record Request(String value) {}
| iluwatar/java-design-patterns | monostate/src/main/java/com/iluwatar/monostate/Request.java |
541 | package com.thealgorithms.others;
/**
* Dijkstra's algorithm,is a graph search algorithm that solves the
* single-source shortest path problem for a graph with nonnegative edge path
* costs, producing a shortest path tree.
*
* <p>
* NOTE: The inputs to Dijkstra's algorithm are a directed and weighted graph
* consisting of 2 or more nodes, generally represented by an adjacency matrix
* or list, and a start node.
*
* <p>
* Original source of code:
* https://rosettacode.org/wiki/Dijkstra%27s_algorithm#Java Also most of the
* comments are from RosettaCode.
*/
import java.util.HashMap;
import java.util.Map;
import java.util.NavigableSet;
import java.util.TreeSet;
public final class Dijkstra {
private Dijkstra() {
}
private static final Graph.Edge[] GRAPH = {
// Distance from node "a" to node "b" is 7.
// In the current Graph there is no way to move the other way (e,g, from "b" to "a"),
// a new edge would be needed for that
new Graph.Edge("a", "b", 7),
new Graph.Edge("a", "c", 9),
new Graph.Edge("a", "f", 14),
new Graph.Edge("b", "c", 10),
new Graph.Edge("b", "d", 15),
new Graph.Edge("c", "d", 11),
new Graph.Edge("c", "f", 2),
new Graph.Edge("d", "e", 6),
new Graph.Edge("e", "f", 9),
};
private static final String START = "a";
private static final String END = "e";
/**
* main function Will run the code with "GRAPH" that was defined above.
*/
public static void main(String[] args) {
Graph g = new Graph(GRAPH);
g.dijkstra(START);
g.printPath(END);
// g.printAllPaths();
}
}
class Graph {
// mapping of vertex names to Vertex objects, built from a set of Edges
private final Map<String, Vertex> graph;
/**
* One edge of the graph (only used by Graph constructor)
*/
public static class Edge {
public final String v1, v2;
public final int dist;
Edge(String v1, String v2, int dist) {
this.v1 = v1;
this.v2 = v2;
this.dist = dist;
}
}
/**
* One vertex of the graph, complete with mappings to neighbouring vertices
*/
public static class Vertex implements Comparable<Vertex> {
public final String name;
// MAX_VALUE assumed to be infinity
public int dist = Integer.MAX_VALUE;
public Vertex previous = null;
public final Map<Vertex, Integer> neighbours = new HashMap<>();
Vertex(String name) {
this.name = name;
}
private void printPath() {
if (this == this.previous) {
System.out.printf("%s", this.name);
} else if (this.previous == null) {
System.out.printf("%s(unreached)", this.name);
} else {
this.previous.printPath();
System.out.printf(" -> %s(%d)", this.name, this.dist);
}
}
public int compareTo(Vertex other) {
if (dist == other.dist) {
return name.compareTo(other.name);
}
return Integer.compare(dist, other.dist);
}
@Override
public boolean equals(Object object) {
if (this == object) {
return true;
}
if (object == null || getClass() != object.getClass()) {
return false;
}
if (!super.equals(object)) {
return false;
}
Vertex vertex = (Vertex) object;
if (dist != vertex.dist) {
return false;
}
if (name != null ? !name.equals(vertex.name) : vertex.name != null) {
return false;
}
if (previous != null ? !previous.equals(vertex.previous) : vertex.previous != null) {
return false;
}
return neighbours != null ? neighbours.equals(vertex.neighbours) : vertex.neighbours == null;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (name != null ? name.hashCode() : 0);
result = 31 * result + dist;
result = 31 * result + (previous != null ? previous.hashCode() : 0);
result = 31 * result + (neighbours != null ? neighbours.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "(" + name + ", " + dist + ")";
}
}
/**
* Builds a graph from a set of edges
*/
Graph(Edge[] edges) {
graph = new HashMap<>(edges.length);
// one pass to find all vertices
for (Edge e : edges) {
if (!graph.containsKey(e.v1)) {
graph.put(e.v1, new Vertex(e.v1));
}
if (!graph.containsKey(e.v2)) {
graph.put(e.v2, new Vertex(e.v2));
}
}
// another pass to set neighbouring vertices
for (Edge e : edges) {
graph.get(e.v1).neighbours.put(graph.get(e.v2), e.dist);
// graph.get(e.v2).neighbours.put(graph.get(e.v1), e.dist); // also do this for an
// undirected graph
}
}
/**
* Runs dijkstra using a specified source vertex
*/
public void dijkstra(String startName) {
if (!graph.containsKey(startName)) {
System.err.printf("Graph doesn't contain start vertex \"%s\"%n", startName);
return;
}
final Vertex source = graph.get(startName);
NavigableSet<Vertex> q = new TreeSet<>();
// set-up vertices
for (Vertex v : graph.values()) {
v.previous = v == source ? source : null;
v.dist = v == source ? 0 : Integer.MAX_VALUE;
q.add(v);
}
dijkstra(q);
}
/**
* Implementation of dijkstra's algorithm using a binary heap.
*/
private void dijkstra(final NavigableSet<Vertex> q) {
Vertex u, v;
while (!q.isEmpty()) {
// vertex with shortest distance (first iteration will return source)
u = q.pollFirst();
if (u.dist == Integer.MAX_VALUE) {
break; // we can ignore u (and any other remaining vertices) since they are
// unreachable
}
// look at distances to each neighbour
for (Map.Entry<Vertex, Integer> a : u.neighbours.entrySet()) {
v = a.getKey(); // the neighbour in this iteration
final int alternateDist = u.dist + a.getValue();
if (alternateDist < v.dist) { // shorter path to neighbour found
q.remove(v);
v.dist = alternateDist;
v.previous = u;
q.add(v);
}
}
}
}
/**
* Prints a path from the source to the specified vertex
*/
public void printPath(String endName) {
if (!graph.containsKey(endName)) {
System.err.printf("Graph doesn't contain end vertex \"%s\"%n", endName);
return;
}
graph.get(endName).printPath();
System.out.println();
}
/**
* Prints the path from the source to every vertex (output order is not
* guaranteed)
*/
public void printAllPaths() {
for (Vertex v : graph.values()) {
v.printPath();
System.out.println();
}
}
}
| TheAlgorithms/Java | src/main/java/com/thealgorithms/others/Dijkstra.java |
542 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.flux.action;
import lombok.Getter;
/**
* ContentAction is a concrete action.
*/
public class ContentAction extends Action {
@Getter
private final Content content;
public ContentAction(Content content) {
super(ActionType.CONTENT_CHANGED);
this.content = content;
}
}
| iluwatar/java-design-patterns | flux/src/main/java/com/iluwatar/flux/action/ContentAction.java |
544 | package com.thealgorithms.others;
import java.awt.Color;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
import javax.imageio.ImageIO;
/**
* The Mandelbrot set is the set of complex numbers "c" for which the series
* "z_(n+1) = z_n * z_n + c" does not diverge, i.e. remains bounded. Thus, a
* complex number "c" is a member of the Mandelbrot set if, when starting with
* "z_0 = 0" and applying the iteration repeatedly, the absolute value of "z_n"
* remains bounded for all "n > 0". Complex numbers can be written as "a + b*i":
* "a" is the real component, usually drawn on the x-axis, and "b*i" is the
* imaginary component, usually drawn on the y-axis. Most visualizations of the
* Mandelbrot set use a color-coding to indicate after how many steps in the
* series the numbers outside the set cross the divergence threshold. Images of
* the Mandelbrot set exhibit an elaborate and infinitely complicated boundary
* that reveals progressively ever-finer recursive detail at increasing
* magnifications, making the boundary of the Mandelbrot set a fractal curve.
* (description adapted from https://en.wikipedia.org/wiki/Mandelbrot_set ) (see
* also https://en.wikipedia.org/wiki/Plotting_algorithms_for_the_Mandelbrot_set
* )
*/
public final class Mandelbrot {
private Mandelbrot() {
}
public static void main(String[] args) {
// Test black and white
BufferedImage blackAndWhiteImage = getImage(800, 600, -0.6, 0, 3.2, 50, false);
// Pixel outside the Mandelbrot set should be white.
assert blackAndWhiteImage.getRGB(0, 0) == new Color(255, 255, 255).getRGB();
// Pixel inside the Mandelbrot set should be black.
assert blackAndWhiteImage.getRGB(400, 300) == new Color(0, 0, 0).getRGB();
// Test color-coding
BufferedImage coloredImage = getImage(800, 600, -0.6, 0, 3.2, 50, true);
// Pixel distant to the Mandelbrot set should be red.
assert coloredImage.getRGB(0, 0) == new Color(255, 0, 0).getRGB();
// Pixel inside the Mandelbrot set should be black.
assert coloredImage.getRGB(400, 300) == new Color(0, 0, 0).getRGB();
// Save image
try {
ImageIO.write(coloredImage, "png", new File("Mandelbrot.png"));
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* Method to generate the image of the Mandelbrot set. Two types of
* coordinates are used: image-coordinates that refer to the pixels and
* figure-coordinates that refer to the complex numbers inside and outside
* the Mandelbrot set. The figure-coordinates in the arguments of this
* method determine which section of the Mandelbrot set is viewed. The main
* area of the Mandelbrot set is roughly between "-1.5 < x < 0.5" and "-1 <
* y < 1" in the figure-coordinates.
*
* @param imageWidth The width of the rendered image.
* @param imageHeight The height of the rendered image.
* @param figureCenterX The x-coordinate of the center of the figure.
* @param figureCenterY The y-coordinate of the center of the figure.
* @param figureWidth The width of the figure.
* @param maxStep Maximum number of steps to check for divergent behavior.
* @param useDistanceColorCoding Render in color or black and white.
* @return The image of the rendered Mandelbrot set.
*/
public static BufferedImage getImage(int imageWidth, int imageHeight, double figureCenterX, double figureCenterY, double figureWidth, int maxStep, boolean useDistanceColorCoding) {
if (imageWidth <= 0) {
throw new IllegalArgumentException("imageWidth should be greater than zero");
}
if (imageHeight <= 0) {
throw new IllegalArgumentException("imageHeight should be greater than zero");
}
if (maxStep <= 0) {
throw new IllegalArgumentException("maxStep should be greater than zero");
}
BufferedImage image = new BufferedImage(imageWidth, imageHeight, BufferedImage.TYPE_INT_RGB);
double figureHeight = figureWidth / imageWidth * imageHeight;
// loop through the image-coordinates
for (int imageX = 0; imageX < imageWidth; imageX++) {
for (int imageY = 0; imageY < imageHeight; imageY++) {
// determine the figure-coordinates based on the image-coordinates
double figureX = figureCenterX + ((double) imageX / imageWidth - 0.5) * figureWidth;
double figureY = figureCenterY + ((double) imageY / imageHeight - 0.5) * figureHeight;
double distance = getDistance(figureX, figureY, maxStep);
// color the corresponding pixel based on the selected coloring-function
image.setRGB(imageX, imageY, useDistanceColorCoding ? colorCodedColorMap(distance).getRGB() : blackAndWhiteColorMap(distance).getRGB());
}
}
return image;
}
/**
* Black and white color-coding that ignores the relative distance. The
* Mandelbrot set is black, everything else is white.
*
* @param distance Distance until divergence threshold
* @return The color corresponding to the distance.
*/
private static Color blackAndWhiteColorMap(double distance) {
return distance >= 1 ? new Color(0, 0, 0) : new Color(255, 255, 255);
}
/**
* Color-coding taking the relative distance into account. The Mandelbrot
* set is black.
*
* @param distance Distance until divergence threshold.
* @return The color corresponding to the distance.
*/
private static Color colorCodedColorMap(double distance) {
if (distance >= 1) {
return new Color(0, 0, 0);
} else {
// simplified transformation of HSV to RGB
// distance determines hue
double hue = 360 * distance;
double saturation = 1;
double val = 255;
int hi = (int) (Math.floor(hue / 60)) % 6;
double f = hue / 60 - Math.floor(hue / 60);
int v = (int) val;
int p = 0;
int q = (int) (val * (1 - f * saturation));
int t = (int) (val * (1 - (1 - f) * saturation));
switch (hi) {
case 0:
return new Color(v, t, p);
case 1:
return new Color(q, v, p);
case 2:
return new Color(p, v, t);
case 3:
return new Color(p, q, v);
case 4:
return new Color(t, p, v);
default:
return new Color(v, p, q);
}
}
}
/**
* Return the relative distance (ratio of steps taken to maxStep) after
* which the complex number constituted by this x-y-pair diverges. Members
* of the Mandelbrot set do not diverge so their distance is 1.
*
* @param figureX The x-coordinate within the figure.
* @param figureX The y-coordinate within the figure.
* @param maxStep Maximum number of steps to check for divergent behavior.
* @return The relative distance as the ratio of steps taken to maxStep.
*/
private static double getDistance(double figureX, double figureY, int maxStep) {
double a = figureX;
double b = figureY;
int currentStep = 0;
for (int step = 0; step < maxStep; step++) {
currentStep = step;
double aNew = a * a - b * b + figureX;
b = 2 * a * b + figureY;
a = aNew;
// divergence happens for all complex number with an absolute value
// greater than 4 (= divergence threshold)
if (a * a + b * b > 4) {
break;
}
}
return (double) currentStep / (maxStep - 1);
}
}
| TheAlgorithms/Java | src/main/java/com/thealgorithms/others/Mandelbrot.java |
546 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.VersionId;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.Assertions;
import org.elasticsearch.core.RestApiVersion;
import org.elasticsearch.core.SuppressForbidden;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.xcontent.ToXContentFragment;
import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.NavigableMap;
import java.util.TreeMap;
public class Version implements VersionId<Version>, ToXContentFragment {
/*
* The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA
* values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99
* indicating a release the (internal) format of the id is there so we can easily do after/before checks on the id
*
* IMPORTANT: Unreleased vs. Released Versions
*
* All listed versions MUST be released versions, except the last major, the last minor and the last revison. ONLY those are required
* as unreleased versions.
*
* Example: assume the last release is 7.3.0
* The unreleased last major is the next major release, e.g. _8_.0.0
* The unreleased last minor is the current major with a upped minor: 7._4_.0
* The unreleased revision is the very release with a upped revision 7.3._1_
*/
public static final int V_EMPTY_ID = 0;
public static final Version V_EMPTY = new Version(V_EMPTY_ID);
public static final Version V_7_0_0 = new Version(7_00_00_99);
public static final Version V_7_0_1 = new Version(7_00_01_99);
public static final Version V_7_1_0 = new Version(7_01_00_99);
public static final Version V_7_1_1 = new Version(7_01_01_99);
public static final Version V_7_2_0 = new Version(7_02_00_99);
public static final Version V_7_2_1 = new Version(7_02_01_99);
public static final Version V_7_3_0 = new Version(7_03_00_99);
public static final Version V_7_3_1 = new Version(7_03_01_99);
public static final Version V_7_3_2 = new Version(7_03_02_99);
public static final Version V_7_4_0 = new Version(7_04_00_99);
public static final Version V_7_4_1 = new Version(7_04_01_99);
public static final Version V_7_4_2 = new Version(7_04_02_99);
public static final Version V_7_5_0 = new Version(7_05_00_99);
public static final Version V_7_5_1 = new Version(7_05_01_99);
public static final Version V_7_5_2 = new Version(7_05_02_99);
public static final Version V_7_6_0 = new Version(7_06_00_99);
public static final Version V_7_6_1 = new Version(7_06_01_99);
public static final Version V_7_6_2 = new Version(7_06_02_99);
public static final Version V_7_7_0 = new Version(7_07_00_99);
public static final Version V_7_7_1 = new Version(7_07_01_99);
public static final Version V_7_8_0 = new Version(7_08_00_99);
public static final Version V_7_8_1 = new Version(7_08_01_99);
public static final Version V_7_9_0 = new Version(7_09_00_99);
public static final Version V_7_9_1 = new Version(7_09_01_99);
public static final Version V_7_9_2 = new Version(7_09_02_99);
public static final Version V_7_9_3 = new Version(7_09_03_99);
public static final Version V_7_10_0 = new Version(7_10_00_99);
public static final Version V_7_10_1 = new Version(7_10_01_99);
public static final Version V_7_10_2 = new Version(7_10_02_99);
public static final Version V_7_11_0 = new Version(7_11_00_99);
public static final Version V_7_11_1 = new Version(7_11_01_99);
public static final Version V_7_11_2 = new Version(7_11_02_99);
public static final Version V_7_12_0 = new Version(7_12_00_99);
public static final Version V_7_12_1 = new Version(7_12_01_99);
public static final Version V_7_13_0 = new Version(7_13_00_99);
public static final Version V_7_13_1 = new Version(7_13_01_99);
public static final Version V_7_13_2 = new Version(7_13_02_99);
public static final Version V_7_13_3 = new Version(7_13_03_99);
public static final Version V_7_13_4 = new Version(7_13_04_99);
public static final Version V_7_14_0 = new Version(7_14_00_99);
public static final Version V_7_14_1 = new Version(7_14_01_99);
public static final Version V_7_14_2 = new Version(7_14_02_99);
public static final Version V_7_15_0 = new Version(7_15_00_99);
public static final Version V_7_15_1 = new Version(7_15_01_99);
public static final Version V_7_15_2 = new Version(7_15_02_99);
public static final Version V_7_16_0 = new Version(7_16_00_99);
public static final Version V_7_16_1 = new Version(7_16_01_99);
public static final Version V_7_16_2 = new Version(7_16_02_99);
public static final Version V_7_16_3 = new Version(7_16_03_99);
public static final Version V_7_17_0 = new Version(7_17_00_99);
public static final Version V_7_17_1 = new Version(7_17_01_99);
public static final Version V_7_17_2 = new Version(7_17_02_99);
public static final Version V_7_17_3 = new Version(7_17_03_99);
public static final Version V_7_17_4 = new Version(7_17_04_99);
public static final Version V_7_17_5 = new Version(7_17_05_99);
public static final Version V_7_17_6 = new Version(7_17_06_99);
public static final Version V_7_17_7 = new Version(7_17_07_99);
public static final Version V_7_17_8 = new Version(7_17_08_99);
public static final Version V_7_17_9 = new Version(7_17_09_99);
public static final Version V_7_17_10 = new Version(7_17_10_99);
public static final Version V_7_17_11 = new Version(7_17_11_99);
public static final Version V_7_17_12 = new Version(7_17_12_99);
public static final Version V_7_17_13 = new Version(7_17_13_99);
public static final Version V_7_17_14 = new Version(7_17_14_99);
public static final Version V_7_17_15 = new Version(7_17_15_99);
public static final Version V_7_17_16 = new Version(7_17_16_99);
public static final Version V_7_17_17 = new Version(7_17_17_99);
public static final Version V_7_17_18 = new Version(7_17_18_99);
public static final Version V_7_17_19 = new Version(7_17_19_99);
public static final Version V_7_17_20 = new Version(7_17_20_99);
public static final Version V_7_17_21 = new Version(7_17_21_99);
public static final Version V_7_17_22 = new Version(7_17_22_99);
public static final Version V_8_0_0 = new Version(8_00_00_99);
public static final Version V_8_0_1 = new Version(8_00_01_99);
public static final Version V_8_1_0 = new Version(8_01_00_99);
public static final Version V_8_1_1 = new Version(8_01_01_99);
public static final Version V_8_1_2 = new Version(8_01_02_99);
public static final Version V_8_1_3 = new Version(8_01_03_99);
public static final Version V_8_2_0 = new Version(8_02_00_99);
public static final Version V_8_2_1 = new Version(8_02_01_99);
public static final Version V_8_2_2 = new Version(8_02_02_99);
public static final Version V_8_2_3 = new Version(8_02_03_99);
public static final Version V_8_3_0 = new Version(8_03_00_99);
public static final Version V_8_3_1 = new Version(8_03_01_99);
public static final Version V_8_3_2 = new Version(8_03_02_99);
public static final Version V_8_3_3 = new Version(8_03_03_99);
public static final Version V_8_4_0 = new Version(8_04_00_99);
public static final Version V_8_4_1 = new Version(8_04_01_99);
public static final Version V_8_4_2 = new Version(8_04_02_99);
public static final Version V_8_4_3 = new Version(8_04_03_99);
public static final Version V_8_5_0 = new Version(8_05_00_99);
public static final Version V_8_5_1 = new Version(8_05_01_99);
public static final Version V_8_5_2 = new Version(8_05_02_99);
public static final Version V_8_5_3 = new Version(8_05_03_99);
public static final Version V_8_6_0 = new Version(8_06_00_99);
public static final Version V_8_6_1 = new Version(8_06_01_99);
public static final Version V_8_6_2 = new Version(8_06_02_99);
public static final Version V_8_7_0 = new Version(8_07_00_99);
public static final Version V_8_7_1 = new Version(8_07_01_99);
public static final Version V_8_8_0 = new Version(8_08_00_99);
public static final Version V_8_8_1 = new Version(8_08_01_99);
public static final Version V_8_8_2 = new Version(8_08_02_99);
public static final Version V_8_9_0 = new Version(8_09_00_99);
public static final Version V_8_9_1 = new Version(8_09_01_99);
public static final Version V_8_9_2 = new Version(8_09_02_99);
public static final Version V_8_10_0 = new Version(8_10_00_99);
public static final Version V_8_10_1 = new Version(8_10_01_99);
public static final Version V_8_10_2 = new Version(8_10_02_99);
public static final Version V_8_10_3 = new Version(8_10_03_99);
public static final Version V_8_10_4 = new Version(8_10_04_99);
public static final Version V_8_11_0 = new Version(8_11_00_99);
public static final Version V_8_11_1 = new Version(8_11_01_99);
public static final Version V_8_11_2 = new Version(8_11_02_99);
public static final Version V_8_11_3 = new Version(8_11_03_99);
public static final Version V_8_11_4 = new Version(8_11_04_99);
public static final Version V_8_12_0 = new Version(8_12_00_99);
public static final Version V_8_12_1 = new Version(8_12_01_99);
public static final Version V_8_12_2 = new Version(8_12_02_99);
public static final Version V_8_13_0 = new Version(8_13_00_99);
public static final Version V_8_13_1 = new Version(8_13_01_99);
public static final Version V_8_13_2 = new Version(8_13_02_99);
public static final Version V_8_13_3 = new Version(8_13_03_99);
public static final Version V_8_13_4 = new Version(8_13_04_99);
public static final Version V_8_13_5 = new Version(8_13_05_99);
public static final Version V_8_14_0 = new Version(8_14_00_99);
public static final Version V_8_15_0 = new Version(8_15_00_99);
public static final Version CURRENT = V_8_15_0;
private static final NavigableMap<Integer, Version> VERSION_IDS;
private static final Map<String, Version> VERSION_STRINGS;
static {
final NavigableMap<Integer, Version> builder = new TreeMap<>();
final Map<String, Version> builderByString = new HashMap<>();
for (final Field declaredField : Version.class.getFields()) {
if (declaredField.getType().equals(Version.class)) {
final String fieldName = declaredField.getName();
if (fieldName.equals("CURRENT") || fieldName.equals("V_EMPTY")) {
continue;
}
assert fieldName.matches("V_\\d+_\\d+_\\d+") : "expected Version field [" + fieldName + "] to match V_\\d+_\\d+_\\d+";
try {
final Version version = (Version) declaredField.get(null);
if (Assertions.ENABLED) {
final String[] fields = fieldName.split("_");
final int major = Integer.valueOf(fields[1]) * 1000000;
final int minor = Integer.valueOf(fields[2]) * 10000;
final int revision = Integer.valueOf(fields[3]) * 100;
final int expectedId = major + minor + revision + 99;
assert version.id == expectedId
: "expected version [" + fieldName + "] to have id [" + expectedId + "] but was [" + version.id + "]";
}
final Version maybePrevious = builder.put(version.id, version);
builderByString.put(version.toString(), version);
assert maybePrevious == null
: "expected [" + version.id + "] to be uniquely mapped but saw [" + maybePrevious + "] and [" + version + "]";
} catch (final IllegalAccessException e) {
assert false : "Version field [" + fieldName + "] should be public";
}
}
}
assert RestApiVersion.current().major == CURRENT.major && RestApiVersion.previous().major == CURRENT.major - 1
: "RestApiVersion must be upgraded "
+ "to reflect major from Version.CURRENT ["
+ CURRENT.major
+ "]"
+ " but is still set to ["
+ RestApiVersion.current().major
+ "]";
builder.put(V_EMPTY_ID, V_EMPTY);
builderByString.put(V_EMPTY.toString(), V_EMPTY);
VERSION_IDS = Collections.unmodifiableNavigableMap(builder);
VERSION_STRINGS = Map.copyOf(builderByString);
}
public static Version readVersion(StreamInput in) throws IOException {
return fromId(in.readVInt());
}
public static Version fromId(int id) {
final Version known = VERSION_IDS.get(id);
if (known != null) {
return known;
}
return fromIdSlow(id);
}
private static Version fromIdSlow(int id) {
return new Version(id);
}
public static void writeVersion(Version version, StreamOutput out) throws IOException {
out.writeVInt(version.id);
}
/**
* Returns the minimum version of {@code version1} and {@code version2}
*/
public static Version min(Version version1, Version version2) {
return version1.id < version2.id ? version1 : version2;
}
/**
* Returns the maximum version of {@code version1} and {@code version2}
*/
public static Version max(Version version1, Version version2) {
return version1.id > version2.id ? version1 : version2;
}
/**
* Returns the version given its string representation, current version if the argument is null or empty
*/
public static Version fromString(String version) {
if (Strings.hasLength(version) == false) {
return Version.CURRENT;
}
final Version cached = VERSION_STRINGS.get(version);
if (cached != null) {
return cached;
}
return fromStringSlow(version);
}
private static Version fromStringSlow(String version) {
final boolean snapshot; // this is some BWC for 2.x and before indices
if (snapshot = version.endsWith("-SNAPSHOT")) {
version = version.substring(0, version.length() - 9);
}
String[] parts = version.split("[.-]");
if (parts.length != 3) {
throw new IllegalArgumentException(
"the version needs to contain major, minor, and revision, and optionally the build: " + version
);
}
try {
final int rawMajor = Integer.parseInt(parts[0]);
if (rawMajor >= 5 && snapshot) { // we don't support snapshot as part of the version here anymore
throw new IllegalArgumentException("illegal version format - snapshots are only supported until version 2.x");
}
if (rawMajor >= 7 && parts.length == 4) { // we don't support qualifier as part of the version anymore
throw new IllegalArgumentException("illegal version format - qualifiers are only supported until version 6.x");
}
if (parts[1].length() > 2) {
throw new IllegalArgumentException(
"illegal minor version format - only one or two digit numbers are supported but found " + parts[1]
);
}
if (parts[2].length() > 2) {
throw new IllegalArgumentException(
"illegal revision version format - only one or two digit numbers are supported but found " + parts[2]
);
}
// we reverse the version id calculation based on some assumption as we can't reliably reverse the modulo
final int major = rawMajor * 1000000;
final int minor = Integer.parseInt(parts[1]) * 10000;
final int revision = Integer.parseInt(parts[2]) * 100;
// TODO: 99 is leftover from alpha/beta/rc, it should be removed
return fromId(major + minor + revision + 99);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("unable to parse version " + version, e);
}
}
public final int id;
public final byte major;
public final byte minor;
public final byte revision;
public final byte build;
private final String toString;
private final int previousMajorId;
Version(int id) {
this.id = id;
this.major = (byte) ((id / 1000000) % 100);
this.minor = (byte) ((id / 10000) % 100);
this.revision = (byte) ((id / 100) % 100);
this.build = (byte) (id % 100);
this.toString = major + "." + minor + "." + revision;
this.previousMajorId = major > 0 ? (major - 1) * 1000000 + 99 : major;
}
@Override
public int id() {
return id;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder.value(toString());
}
/*
* We need the declared versions when computing the minimum compatibility version. As computing the declared versions uses reflection it
* is not cheap. Since computing the minimum compatibility version can occur often, we use this holder to compute the declared versions
* lazily once.
*/
private static class DeclaredVersionsHolder {
static final List<Version> DECLARED_VERSIONS = List.copyOf(getDeclaredVersions(Version.class));
}
// lazy initialized because we don't yet have the declared versions ready when instantiating the cached Version
// instances
private Version minCompatVersion;
/**
* Returns the minimum compatible version based on the current
* version. Ie a node needs to have at least the return version in order
* to communicate with a node running the current version. The returned version
* is in most of the cases the smallest major version release unless the current version
* is a beta or RC release then the version itself is returned.
*/
public Version minimumCompatibilityVersion() {
Version res = minCompatVersion;
if (res == null) {
res = computeMinCompatVersion();
minCompatVersion = res;
}
return res;
}
private Version computeMinCompatVersion() {
if (major == 6) {
// force the minimum compatibility for version 6 to 5.6 since we don't reference version 5 anymore
return Version.fromId(5060099);
} else if (major == 7) {
// force the minimum compatibility for version 7 to 6.8 since we don't reference version 6 anymore
return Version.fromId(6080099);
} else if (major >= 8) {
// all major versions from 8 onwards are compatible with last minor series of the previous major
Version bwcVersion = null;
for (int i = DeclaredVersionsHolder.DECLARED_VERSIONS.size() - 1; i >= 0; i--) {
final Version candidateVersion = DeclaredVersionsHolder.DECLARED_VERSIONS.get(i);
if (candidateVersion.major == major - 1 && after(candidateVersion)) {
if (bwcVersion != null && candidateVersion.minor < bwcVersion.minor) {
break;
}
bwcVersion = candidateVersion;
}
}
return bwcVersion == null ? this : bwcVersion;
}
return Version.min(this, fromId(major * 1000000 + 0 * 10000 + 99));
}
/**
* Returns <code>true</code> iff both version are compatible. Otherwise <code>false</code>
*/
public boolean isCompatible(Version version) {
boolean compatible = onOrAfter(version.minimumCompatibilityVersion()) && version.onOrAfter(minimumCompatibilityVersion());
assert compatible == false || Math.max(major, version.major) - Math.min(major, version.major) <= 1;
return compatible;
}
/**
* Returns a first major version previous to the version stored in this object.
* I.e 8.1.0 will return 7.0.0
*/
public Version previousMajor() {
return Version.fromId(previousMajorId);
}
@SuppressForbidden(reason = "System.out.*")
public static void main(String[] args) {
final String versionOutput = String.format(
Locale.ROOT,
"Version: %s, Build: %s/%s/%s, JVM: %s",
Build.current().qualifiedVersion(),
Build.current().type().displayName(),
Build.current().hash(),
Build.current().date(),
JvmInfo.jvmInfo().version()
);
System.out.println(versionOutput);
}
@Override
public String toString() {
return toString;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Version version = (Version) o;
if (id != version.id) {
return false;
}
return true;
}
@Override
public int hashCode() {
return id;
}
/**
* Extracts a sorted list of declared version constants from a class.
* The argument would normally be Version.class but is exposed for
* testing with other classes-containing-version-constants.
*/
public static List<Version> getDeclaredVersions(final Class<?> versionClass) {
final Field[] fields = versionClass.getFields();
final List<Version> versions = new ArrayList<>(fields.length);
for (final Field field : fields) {
final int mod = field.getModifiers();
if (false == Modifier.isStatic(mod) && Modifier.isFinal(mod) && Modifier.isPublic(mod)) {
continue;
}
if (field.getType() != Version.class) {
continue;
}
switch (field.getName()) {
case "CURRENT":
case "V_EMPTY":
continue;
}
assert field.getName().matches("V(_\\d+){3}?") : field.getName();
try {
versions.add(((Version) field.get(null)));
} catch (final IllegalAccessException e) {
throw new RuntimeException(e);
}
}
Collections.sort(versions);
return versions;
}
}
| mhl-b/elasticsearch | server/src/main/java/org/elasticsearch/Version.java |
547 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.leaderfollowers;
import lombok.Getter;
import lombok.Setter;
/**
* A unit of work to be processed by the Workers.
*/
public class Task {
@Getter
private final int time;
@Getter
@Setter
private boolean finished;
public Task(int time) {
this.time = time;
}
}
| iluwatar/java-design-patterns | leader-followers/src/main/java/com/iluwatar/leaderfollowers/Task.java |
550 | package com.thealgorithms.maths;
/**
* Class for linear convolution of two discrete signals
*
* @author Ioannis Karavitsis
* @version 1.0
*/
public final class Convolution {
private Convolution() {
}
/**
* Discrete linear convolution function. Both input signals and the output
* signal must start from 0. If you have a signal that has values before 0
* then shift it to start from 0.
*
* @param A The first discrete signal
* @param B The second discrete signal
* @return The convolved signal
*/
public static double[] convolution(double[] A, double[] B) {
double[] convolved = new double[A.length + B.length - 1];
/*
The discrete convolution of two signals A and B is defined as:
A.length
C[i] = Σ (A[k]*B[i-k])
k=0
It's obvious that: 0 <= k <= A.length , 0 <= i <= A.length + B.length - 2 and 0 <= i-k <=
B.length - 1 From the last inequality we get that: i - B.length + 1 <= k <= i and thus we get
the conditions below.
*/
for (int i = 0; i < convolved.length; i++) {
convolved[i] = 0;
int k = Math.max(i - B.length + 1, 0);
while (k < i + 1 && k < A.length) {
convolved[i] += A[k] * B[i - k];
k++;
}
}
return convolved;
}
}
| TheAlgorithms/Java | src/main/java/com/thealgorithms/maths/Convolution.java |
554 | import java.io.*;
import java.util.*;
/**************************************************************************************************************************************
* Class Domain
*****/
public class Domain implements Debuggable {
public MemoryMonitor myMemoryMonitor;
Dataset myDS;
Domain(String nameD, String nameP, double eta, boolean check_labels) {
if (!check_labels) System.out.println("Warning :: no checking of labels in stratified sample");
myMemoryMonitor = new MemoryMonitor();
myDS = new Dataset(nameD, nameP, this, eta);
myDS.load_features();
myDS.load_examples();
myDS.generate_stratified_sample_with_check(check_labels);
}
public String memString() {
return myMemoryMonitor.memString;
}
}
| google-research/google-research | tempered_boosting/Domain.java |
557 | package com.thealgorithms.sorts;
/**
* The idea of Swap-Sort is to count the number m of smaller values (that are in
* A) from each element of an array A(1...n) and then swap the element with the
* element in A(m+1). This ensures that the exchanged element is already in the
* correct, i.e. final, position. The disadvantage of this algorithm is that
* each element may only occur once, otherwise there is no termination.
*/
public class SwapSort implements SortAlgorithm {
@Override
public <T extends Comparable<T>> T[] sort(T[] array) {
int LENGTH = array.length;
int index = 0;
while (index < LENGTH - 1) {
int amountSmallerElements = this.getSmallerElementCount(array, index);
if (amountSmallerElements > 0 && index != amountSmallerElements) {
T element = array[index];
array[index] = array[amountSmallerElements];
array[amountSmallerElements] = element;
} else {
index++;
}
}
return array;
}
private <T extends Comparable<T>> int getSmallerElementCount(T[] array, int index) {
int counter = 0;
for (int i = 0; i < array.length; i++) {
if (SortUtils.less(array[i], array[index])) {
counter++;
}
}
return counter;
}
public static void main(String[] args) {
// ==== Int =======
Integer[] a = {3, 7, 45, 1, 33, 5, 2, 9};
System.out.print("unsorted: ");
SortUtils.print(a);
System.out.println();
new SwapSort().sort(a);
System.out.print("sorted: ");
SortUtils.print(a);
System.out.println();
// ==== String =======
String[] b = {
"banana",
"berry",
"orange",
"grape",
"peach",
"cherry",
"apple",
"pineapple",
};
System.out.print("unsorted: ");
SortUtils.print(b);
System.out.println();
new SwapSort().sort(b);
System.out.print("sorted: ");
SortUtils.print(b);
}
}
| TheAlgorithms/Java | src/main/java/com/thealgorithms/sorts/SwapSort.java |
558 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.visitor;
/**
* Visitor interface.
*/
public interface UnitVisitor {
void visit(Soldier soldier);
void visit(Sergeant sergeant);
void visit(Commander commander);
}
| smedals/java-design-patterns | visitor/src/main/java/com/iluwatar/visitor/UnitVisitor.java |
559 | package com.thealgorithms.maths;
/**
* This class provides a method to compute the rank of a matrix.
* In linear algebra, the rank of a matrix is the maximum number of linearly independent rows or columns in the matrix.
* For example, consider the following 3x3 matrix:
* 1 2 3
* 2 4 6
* 3 6 9
* Despite having 3 rows and 3 columns, this matrix only has a rank of 1 because all rows (and columns) are multiples of each other.
* It's a fundamental concept that gives key insights into the structure of the matrix.
* It's important to note that the rank is not only defined for square matrices but for any m x n matrix.
*
* @author Anup Omkar
*/
public final class MatrixRank {
private MatrixRank() {
}
private static final double EPSILON = 1e-10;
/**
* @brief Computes the rank of the input matrix
*
* @param matrix The input matrix
* @return The rank of the input matrix
*/
public static int computeRank(double[][] matrix) {
validateInputMatrix(matrix);
int numRows = matrix.length;
int numColumns = matrix[0].length;
int rank = 0;
boolean[] rowMarked = new boolean[numRows];
double[][] matrixCopy = deepCopy(matrix);
for (int colIndex = 0; colIndex < numColumns; ++colIndex) {
int pivotRow = findPivotRow(matrixCopy, rowMarked, colIndex);
if (pivotRow != numRows) {
++rank;
rowMarked[pivotRow] = true;
normalizePivotRow(matrixCopy, pivotRow, colIndex);
eliminateRows(matrixCopy, pivotRow, colIndex);
}
}
return rank;
}
private static boolean isZero(double value) {
return Math.abs(value) < EPSILON;
}
private static double[][] deepCopy(double[][] matrix) {
int numRows = matrix.length;
int numColumns = matrix[0].length;
double[][] matrixCopy = new double[numRows][numColumns];
for (int rowIndex = 0; rowIndex < numRows; ++rowIndex) {
System.arraycopy(matrix[rowIndex], 0, matrixCopy[rowIndex], 0, numColumns);
}
return matrixCopy;
}
private static void validateInputMatrix(double[][] matrix) {
if (matrix == null) {
throw new IllegalArgumentException("The input matrix cannot be null");
}
if (matrix.length == 0) {
throw new IllegalArgumentException("The input matrix cannot be empty");
}
if (!hasValidRows(matrix)) {
throw new IllegalArgumentException("The input matrix cannot have null or empty rows");
}
if (isJaggedMatrix(matrix)) {
throw new IllegalArgumentException("The input matrix cannot be jagged");
}
}
private static boolean hasValidRows(double[][] matrix) {
for (double[] row : matrix) {
if (row == null || row.length == 0) {
return false;
}
}
return true;
}
/**
* @brief Checks if the input matrix is a jagged matrix.
* Jagged matrix is a matrix where the number of columns in each row is not the same.
*
* @param matrix The input matrix
* @return True if the input matrix is a jagged matrix, false otherwise
*/
private static boolean isJaggedMatrix(double[][] matrix) {
int numColumns = matrix[0].length;
for (double[] row : matrix) {
if (row.length != numColumns) {
return true;
}
}
return false;
}
/**
* @brief The pivot row is the row in the matrix that is used to eliminate other rows and reduce the matrix to its row echelon form.
* The pivot row is selected as the first row (from top to bottom) where the value in the current column (the pivot column) is not zero.
* This row is then used to "eliminate" other rows, by subtracting multiples of the pivot row from them, so that all other entries in the pivot column become zero.
* This process is repeated for each column, each time selecting a new pivot row, until the matrix is in row echelon form.
* The number of pivot rows (rows with a leading entry, or pivot) then gives the rank of the matrix.
*
* @param matrix The input matrix
* @param rowMarked An array indicating which rows have been marked
* @param colIndex The column index
* @return The pivot row index, or the number of rows if no suitable pivot row was found
*/
private static int findPivotRow(double[][] matrix, boolean[] rowMarked, int colIndex) {
int numRows = matrix.length;
for (int pivotRow = 0; pivotRow < numRows; ++pivotRow) {
if (!rowMarked[pivotRow] && !isZero(matrix[pivotRow][colIndex])) {
return pivotRow;
}
}
return numRows;
}
/**
* @brief This method divides all values in the pivot row by the value in the given column.
* This ensures that the pivot value itself will be 1, which simplifies further calculations.
*
* @param matrix The input matrix
* @param pivotRow The pivot row index
* @param colIndex The column index
*/
private static void normalizePivotRow(double[][] matrix, int pivotRow, int colIndex) {
int numColumns = matrix[0].length;
for (int nextCol = colIndex + 1; nextCol < numColumns; ++nextCol) {
matrix[pivotRow][nextCol] /= matrix[pivotRow][colIndex];
}
}
/**
* @brief This method subtracts multiples of the pivot row from all other rows,
* so that all values in the given column of other rows will be zero.
* This is a key step in reducing the matrix to row echelon form.
*
* @param matrix The input matrix
* @param pivotRow The pivot row index
* @param colIndex The column index
*/
private static void eliminateRows(double[][] matrix, int pivotRow, int colIndex) {
int numRows = matrix.length;
int numColumns = matrix[0].length;
for (int otherRow = 0; otherRow < numRows; ++otherRow) {
if (otherRow != pivotRow && !isZero(matrix[otherRow][colIndex])) {
for (int col2 = colIndex + 1; col2 < numColumns; ++col2) {
matrix[otherRow][col2] -= matrix[pivotRow][col2] * matrix[otherRow][colIndex];
}
}
}
}
}
| JackHuynh0610/Java-Algo- | src/main/java/com/thealgorithms/maths/MatrixRank.java |
562 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.visitor;
import lombok.extern.slf4j.Slf4j;
/**
* SoldierVisitor.
*/
@Slf4j
public class SoldierVisitor implements UnitVisitor {
/**
* Soldier Visitor method.
* @param soldier Soldier to be visited
*/
@Override
public void visit(Soldier soldier) {
LOGGER.info("Greetings {}", soldier);
}
/**
* Sergeant Visitor method.
* @param sergeant Sergeant to be visited
*/
@Override
public void visit(Sergeant sergeant) {
// Do nothing
}
/**
* Commander Visitor method.
* @param commander Commander to be visited
*/
@Override
public void visit(Commander commander) {
// Do nothing
}
}
| smedals/java-design-patterns | visitor/src/main/java/com/iluwatar/visitor/SoldierVisitor.java |
563 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.caching;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
/**
* Data structure/implementation of the application's cache. The data structure
* consists of a hash table attached with a doubly linked-list. The linked-list
* helps in capturing and maintaining the LRU data in the cache. When a data is
* queried (from the cache), added (to the cache), or updated, the data is
* moved to the front of the list to depict itself as the most-recently-used
* data. The LRU data is always at the end of the list.
*/
@Slf4j
public class LruCache {
/**
* Static class Node.
*/
static class Node {
/**
* user id.
*/
private final String userId;
/**
* User Account.
*/
private UserAccount userAccount;
/**
* previous.
*/
private Node previous;
/**
* next.
*/
private Node next;
/**
* Node definition.
*
* @param id String
* @param account {@link UserAccount}
*/
Node(final String id, final UserAccount account) {
this.userId = id;
this.userAccount = account;
}
}
/**
* Capacity of Cache.
*/
private int capacity;
/**
* Cache {@link HashMap}.
*/
private Map<String, Node> cache = new HashMap<>();
/**
* Head.
*/
private Node head;
/**
* End.
*/
private Node end;
/**
* Constructor.
*
* @param cap Integer.
*/
public LruCache(final int cap) {
this.capacity = cap;
}
/**
* Get user account.
*
* @param userId String
* @return {@link UserAccount}
*/
public UserAccount get(final String userId) {
if (cache.containsKey(userId)) {
var node = cache.get(userId);
remove(node);
setHead(node);
return node.userAccount;
}
return null;
}
/**
* Remove node from linked list.
*
* @param node {@link Node}
*/
public void remove(final Node node) {
if (node.previous != null) {
node.previous.next = node.next;
} else {
head = node.next;
}
if (node.next != null) {
node.next.previous = node.previous;
} else {
end = node.previous;
}
}
/**
* Move node to the front of the list.
*
* @param node {@link Node}
*/
public void setHead(final Node node) {
node.next = head;
node.previous = null;
if (head != null) {
head.previous = node;
}
head = node;
if (end == null) {
end = head;
}
}
/**
* Set user account.
*
* @param userAccount {@link UserAccount}
* @param userId {@link String}
*/
public void set(final String userId, final UserAccount userAccount) {
if (cache.containsKey(userId)) {
var old = cache.get(userId);
old.userAccount = userAccount;
remove(old);
setHead(old);
} else {
var newNode = new Node(userId, userAccount);
if (cache.size() >= capacity) {
LOGGER.info("# Cache is FULL! Removing {} from cache...", end.userId);
cache.remove(end.userId); // remove LRU data from cache.
remove(end);
setHead(newNode);
} else {
setHead(newNode);
}
cache.put(userId, newNode);
}
}
/**
* Check if Cache contains the userId.
*
* @param userId {@link String}
* @return boolean
*/
public boolean contains(final String userId) {
return cache.containsKey(userId);
}
/**
* Invalidate cache for user.
*
* @param userId {@link String}
*/
public void invalidate(final String userId) {
var toBeRemoved = cache.remove(userId);
if (toBeRemoved != null) {
LOGGER.info("# {} has been updated! "
+ "Removing older version from cache...", userId);
remove(toBeRemoved);
}
}
/**
* Check if the cache is full.
* @return boolean
*/
public boolean isFull() {
return cache.size() >= capacity;
}
/**
* Get LRU data.
*
* @return {@link UserAccount}
*/
public UserAccount getLruData() {
return end.userAccount;
}
/**
* Clear cache.
*/
public void clear() {
head = null;
end = null;
cache.clear();
}
/**
* Returns cache data in list form.
*
* @return {@link List}
*/
public List<UserAccount> getCacheDataInListForm() {
var listOfCacheData = new ArrayList<UserAccount>();
var temp = head;
while (temp != null) {
listOfCacheData.add(temp.userAccount);
temp = temp.next;
}
return listOfCacheData;
}
/**
* Set cache capacity.
*
* @param newCapacity int
*/
public void setCapacity(final int newCapacity) {
if (capacity > newCapacity) {
// Behavior can be modified to accommodate
// for decrease in cache size. For now, we'll
clear();
// just clear the cache.
} else {
this.capacity = newCapacity;
}
}
}
| smedals/java-design-patterns | caching/src/main/java/com/iluwatar/caching/LruCache.java |
565 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.visitor;
import lombok.extern.slf4j.Slf4j;
/**
* SergeantVisitor.
*/
@Slf4j
public class SergeantVisitor implements UnitVisitor {
/**
* Soldier Visitor method.
* @param soldier Soldier to be visited
*/
@Override
public void visit(Soldier soldier) {
// Do nothing
}
/**
* Sergeant Visitor method.
* @param sergeant Sergeant to be visited
*/
@Override
public void visit(Sergeant sergeant) {
LOGGER.info("Hello {}", sergeant);
}
/**
* Commander Visitor method.
* @param commander Commander to be visited
*/
@Override
public void visit(Commander commander) {
// Do nothing
}
}
| smedals/java-design-patterns | visitor/src/main/java/com/iluwatar/visitor/SergeantVisitor.java |
567 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar;
import java.time.LocalDate;
import java.time.Period;
import lombok.extern.slf4j.Slf4j;
/**
* Class which handles actual internal logic and validation for worker registration.
* Part of the domain layer which collects information and sends it back to the presentation.
*/
@Slf4j
public class RegisterWorker extends ServerCommand {
static final int LEGAL_AGE = 18;
protected RegisterWorker(RegisterWorkerDto worker) {
super(worker);
}
/**
* Validates the data provided and adds it to the database in the backend.
*/
public void run() {
validate();
if (!super.getNotification().hasErrors()) {
LOGGER.info("Register worker in backend system");
}
}
/**
* Validates our data. Checks for any errors and if found, adds to notification.
*/
private void validate() {
var ourData = ((RegisterWorkerDto) this.data);
//check if any of submitted data is not given
// passing for empty value validation
fail(isNullOrBlank(ourData.getName()), RegisterWorkerDto.MISSING_NAME);
fail(isNullOrBlank(ourData.getOccupation()), RegisterWorkerDto.MISSING_OCCUPATION);
fail(isNullOrBlank(ourData.getDateOfBirth()), RegisterWorkerDto.MISSING_DOB);
if (isNullOrBlank(ourData.getDateOfBirth())) {
// If DOB is null or empty
fail(true, RegisterWorkerDto.MISSING_DOB);
} else {
// Validating age ( should be greater than or equal to 18 )
Period age = Period.between(ourData.getDateOfBirth(), LocalDate.now());
fail(age.getYears() < LEGAL_AGE, RegisterWorkerDto.DOB_TOO_SOON);
}
}
/**
* Validates for null/empty value.
*
* @param obj any object
* @return boolean
*/
protected boolean isNullOrBlank(Object obj) {
if (obj == null) {
return true;
}
if (obj instanceof String) {
return ((String) obj).trim().isEmpty();
}
return false;
}
/**
* If a condition is met, adds the error to our notification.
*
* @param condition condition to check for.
* @param error error to add if condition met.
*/
protected void fail(boolean condition, NotificationError error) {
if (condition) {
super.getNotification().addError(error);
}
}
}
| rajprins/java-design-patterns | notification/src/main/java/com/iluwatar/RegisterWorker.java |
570 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.promise;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A really simplified implementation of future that allows completing it successfully with a value
* or exceptionally with an exception.
*/
class PromiseSupport<T> implements Future<T> {
private static final Logger LOGGER = LoggerFactory.getLogger(PromiseSupport.class);
private static final int RUNNING = 1;
private static final int FAILED = 2;
private static final int COMPLETED = 3;
private final Object lock;
private volatile int state = RUNNING;
private T value;
private Exception exception;
PromiseSupport() {
this.lock = new Object();
}
void fulfill(T value) {
this.value = value;
this.state = COMPLETED;
synchronized (lock) {
lock.notifyAll();
}
}
void fulfillExceptionally(Exception exception) {
this.exception = exception;
this.state = FAILED;
synchronized (lock) {
lock.notifyAll();
}
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return false;
}
@Override
public boolean isCancelled() {
return false;
}
@Override
public boolean isDone() {
return state > RUNNING;
}
@Override
public T get() throws InterruptedException, ExecutionException {
synchronized (lock) {
while (state == RUNNING) {
lock.wait();
}
}
if (state == COMPLETED) {
return value;
}
throw new ExecutionException(exception);
}
@Override
public T get(long timeout, TimeUnit unit) throws ExecutionException {
synchronized (lock) {
while (state == RUNNING) {
try {
lock.wait(unit.toMillis(timeout));
} catch (InterruptedException e) {
LOGGER.warn("Interrupted!", e);
Thread.currentThread().interrupt();
}
}
}
if (state == COMPLETED) {
return value;
}
throw new ExecutionException(exception);
}
}
| smedals/java-design-patterns | promise/src/main/java/com/iluwatar/promise/PromiseSupport.java |
571 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.databus;
import java.util.HashSet;
import java.util.Set;
/**
* The Data-Bus implementation.
*
* <p>This implementation uses a Singleton.</p>
*
* @author Paul Campbell ([email protected])
*/
public class DataBus {
private static final DataBus INSTANCE = new DataBus();
private final Set<Member> listeners = new HashSet<>();
public static DataBus getInstance() {
return INSTANCE;
}
/**
* Register a member with the data-bus to start receiving events.
*
* @param member The member to register
*/
public void subscribe(final Member member) {
this.listeners.add(member);
}
/**
* Deregister a member to stop receiving events.
*
* @param member The member to deregister
*/
public void unsubscribe(final Member member) {
this.listeners.remove(member);
}
/**
* Publish and event to all members.
*
* @param event The event
*/
public void publish(final DataType event) {
event.setDataBus(this);
listeners.forEach(listener -> listener.accept(event));
}
}
| smedals/java-design-patterns | data-bus/src/main/java/com/iluwatar/databus/DataBus.java |
572 | package com.thealgorithms.misc;
import java.util.Scanner;
/*
*A matrix is sparse if many of its coefficients are zero (In general if 2/3rd of matrix elements
*are 0, it is considered as sparse). The interest in sparsity arises because its exploitation can
*lead to enormous computational savings and because many large matrix problems that occur in
*practice are sparse.
*
* @author Ojasva Jain
*/
final class Sparsity {
private Sparsity() {
}
/*
* @return Sparsity of matrix
*
* where sparsity = number of zeroes/total elements in matrix
*
*/
static double sparsity(double[][] mat) {
int zero = 0;
// Traversing the matrix to count number of zeroes
for (int i = 0; i < mat.length; i++) {
for (int j = 0; j < mat[i].length; j++) {
if (mat[i][j] == 0) {
zero++;
}
}
}
// return sparsity
return ((double) zero / (mat.length * mat[1].length));
}
// Driver method
public static void main(String[] args) {
Scanner in = new Scanner(System.in);
System.out.println("Enter number of rows in matrix: ");
int n = in.nextInt();
System.out.println("Enter number of Columns in matrix: ");
int m = in.nextInt();
System.out.println("Enter Matrix elements: ");
double[][] mat = new double[n][m];
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
mat[i][j] = in.nextDouble();
}
}
System.out.println("Sparsity of matrix is: " + sparsity(mat));
in.close();
}
}
| TheAlgorithms/Java | src/main/java/com/thealgorithms/misc/Sparsity.java |
574 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.masterworker;
import com.iluwatar.masterworker.system.ArrayTransposeMasterWorker;
import com.iluwatar.masterworker.system.MasterWorker;
import com.iluwatar.masterworker.system.systemmaster.ArrayTransposeMaster;
import com.iluwatar.masterworker.system.systemmaster.Master;
import com.iluwatar.masterworker.system.systemworkers.ArrayTransposeWorker;
import com.iluwatar.masterworker.system.systemworkers.Worker;
import lombok.extern.slf4j.Slf4j;
/**
* <p>The <b><em>Master-Worker</em></b> pattern is used when the problem at hand can be solved by
* dividing into multiple parts which need to go through the same computation and may need to be
* aggregated to get final result. Parallel processing is performed using a system consisting of a
* master and some number of workers, where a master divides the work among the workers, gets the
* result back from them and assimilates all the results to give final result. The only
* communication is between the master and the worker - none of the workers communicate among one
* another and the user only communicates with the master to get required job done.</p>
* <p>In our example, we have generic abstract classes {@link MasterWorker}, {@link Master} and
* {@link Worker} which have to be extended by the classes which will perform the specific job at
* hand (in this case finding transpose of matrix, done by {@link ArrayTransposeMasterWorker},
* {@link ArrayTransposeMaster} and {@link ArrayTransposeWorker}). The Master class divides the work
* into parts to be given to the workers, collects the results from the workers and aggregates it
* when all workers have responded before returning the solution. The Worker class extends the
* Thread class to enable parallel processing, and does the work once the data has been received
* from the Master. The MasterWorker contains a reference to the Master class, gets the input from
* the App and passes it on to the Master. These 3 classes define the system which computes the
* result. We also have 2 abstract classes {@link Input} and {@link Result}, which contain the input
* data and result data respectively. The Input class also has an abstract method divideData which
* defines how the data is to be divided into segments. These classes are extended by {@link
* ArrayInput} and {@link ArrayResult}.</p>
*/
@Slf4j
public class App {
/**
* Program entry point.
*
* @param args command line args
*/
public static void main(String[] args) {
var mw = new ArrayTransposeMasterWorker();
var rows = 10;
var columns = 20;
var inputMatrix = ArrayUtilityMethods.createRandomIntMatrix(rows, columns);
var input = new ArrayInput(inputMatrix);
var result = (ArrayResult) mw.getResult(input);
if (result != null) {
ArrayUtilityMethods.printMatrix(inputMatrix);
ArrayUtilityMethods.printMatrix(result.data);
} else {
LOGGER.info("Please enter non-zero input");
}
}
}
| iluwatar/java-design-patterns | master-worker/src/main/java/com/iluwatar/masterworker/App.java |
575 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.registry;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* CustomerRegistry class used to store/access {@link Customer} objects.
*/
public final class CustomerRegistry {
private static final CustomerRegistry instance = new CustomerRegistry();
public static CustomerRegistry getInstance() {
return instance;
}
private final Map<String, Customer> customerMap;
private CustomerRegistry() {
customerMap = new ConcurrentHashMap<>();
}
public Customer addCustomer(Customer customer) {
return customerMap.put(customer.id(), customer);
}
public Customer getCustomer(String id) {
return customerMap.get(id);
}
}
| ati-nordnet/java-design-patterns | registry/src/main/java/com/iluwatar/registry/CustomerRegistry.java |
577 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.visitor;
import lombok.extern.slf4j.Slf4j;
/**
* CommanderVisitor.
*/
@Slf4j
public class CommanderVisitor implements UnitVisitor {
/**
* Soldier Visitor method.
* @param soldier Soldier to be visited
*/
@Override
public void visit(Soldier soldier) {
// Do nothing
}
/**
* Sergeant Visitor method.
* @param sergeant Sergeant to be visited
*/
@Override
public void visit(Sergeant sergeant) {
// Do nothing
}
/**
* Commander Visitor method.
* @param commander Commander to be visited
*/
@Override
public void visit(Commander commander) {
LOGGER.info("Good to see you {}", commander);
}
}
| smedals/java-design-patterns | visitor/src/main/java/com/iluwatar/visitor/CommanderVisitor.java |
580 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.commander.queue;
import com.iluwatar.commander.exceptions.IsEmptyException;
/**
* Queue data structure implementation.
*
* @param <T> is the type of object the queue will hold.
*/
public class Queue<T> {
private Node<T> front;
private Node<T> rear;
private int size;
static class Node<V> {
V value;
Node<V> next;
Node(V obj, Node<V> b) {
value = obj;
next = b;
}
}
boolean isEmpty() {
return size == 0;
}
void enqueue(T obj) {
if (front == null) {
front = new Node<>(obj, null);
rear = front;
} else {
var temp = new Node<>(obj, null);
rear.next = temp;
rear = temp;
}
size++;
}
T dequeue() throws IsEmptyException {
if (isEmpty()) {
throw new IsEmptyException();
} else {
var temp = front;
front = front.next;
size = size - 1;
return temp.value;
}
}
T peek() throws IsEmptyException {
if (isEmpty()) {
throw new IsEmptyException();
} else {
return front.value;
}
}
}
| smedals/java-design-patterns | commander/src/main/java/com/iluwatar/commander/queue/Queue.java |
584 | package com.thealgorithms.sorts;
public final class DNFSort {
private DNFSort() {
}
// Sort the input array, the array is assumed to
// have values in {0, 1, 2}
static void sort012(int[] a, int arr_size) {
int low = 0;
int high = arr_size - 1;
int mid = 0, temp;
while (mid <= high) {
switch (a[mid]) {
case 0: {
temp = a[low];
a[low] = a[mid];
a[mid] = temp;
low++;
mid++;
break;
}
case 1:
mid++;
break;
case 2: {
temp = a[mid];
a[mid] = a[high];
a[high] = temp;
high--;
break;
}
}
}
}
/* Utility function to print array arr[] */
static void printArray(int[] arr, int arr_size) {
for (int i = 0; i < arr_size; i++) {
System.out.print(arr[i] + " ");
}
System.out.println();
}
/*Driver function to check for above functions*/
public static void main(String[] args) {
int[] arr = {0, 1, 1, 0, 1, 2, 1, 2, 0, 0, 0, 1};
int arr_size = arr.length;
sort012(arr, arr_size);
System.out.println("Array after seggregation ");
printArray(arr, arr_size);
}
}
| TheAlgorithms/Java | src/main/java/com/thealgorithms/sorts/DNFSort.java |
587 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.commander;
import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Predicate;
/**
* Retry pattern.
*
* @param <T> is the type of object passed into HandleErrorIssue as a parameter.
*/
public class Retry<T> {
/**
* Operation Interface will define method to be implemented.
*/
public interface Operation {
void operation(List<Exception> list) throws Exception;
}
/**
* HandleErrorIssue defines how to handle errors.
*
* @param <T> is the type of object to be passed into the method as parameter.
*/
public interface HandleErrorIssue<T> {
void handleIssue(T obj, Exception e);
}
private static final SecureRandom RANDOM = new SecureRandom();
private final Operation op;
private final HandleErrorIssue<T> handleError;
private final int maxAttempts;
private final long maxDelay;
private final AtomicInteger attempts;
private final Predicate<Exception> test;
private final List<Exception> errors;
Retry(Operation op, HandleErrorIssue<T> handleError, int maxAttempts,
long maxDelay, Predicate<Exception>... ignoreTests) {
this.op = op;
this.handleError = handleError;
this.maxAttempts = maxAttempts;
this.maxDelay = maxDelay;
this.attempts = new AtomicInteger();
this.test = Arrays.stream(ignoreTests).reduce(Predicate::or).orElse(e -> false);
this.errors = new ArrayList<>();
}
/**
* Performing the operation with retries.
*
* @param list is the exception list
* @param obj is the parameter to be passed into handleIsuue method
*/
public void perform(List<Exception> list, T obj) {
do {
try {
op.operation(list);
return;
} catch (Exception e) {
this.errors.add(e);
if (this.attempts.incrementAndGet() >= this.maxAttempts || !this.test.test(e)) {
this.handleError.handleIssue(obj, e);
return; //return here... don't go further
}
try {
long testDelay =
(long) Math.pow(2, this.attempts.intValue()) * 1000 + RANDOM.nextInt(1000);
long delay = Math.min(testDelay, this.maxDelay);
Thread.sleep(delay);
} catch (InterruptedException f) {
//ignore
}
}
} while (true);
}
}
| rajprins/java-design-patterns | commander/src/main/java/com/iluwatar/commander/Retry.java |
590 | package com.thealgorithms.maths;
import java.util.Scanner;
public final class PrimeCheck {
private PrimeCheck() {
}
public static void main(String[] args) {
Scanner scanner = new Scanner(System.in);
System.out.print("Enter a number: ");
int n = scanner.nextInt();
if (isPrime(n)) {
System.out.println("algo1 verify that " + n + " is a prime number");
} else {
System.out.println("algo1 verify that " + n + " is not a prime number");
}
if (fermatPrimeChecking(n, 20)) {
System.out.println("algo2 verify that " + n + " is a prime number");
} else {
System.out.println("algo2 verify that " + n + " is not a prime number");
}
scanner.close();
}
/**
* *
* Checks if a number is prime or not
*
* @param n the number
* @return {@code true} if {@code n} is prime
*/
public static boolean isPrime(int n) {
if (n == 2) {
return true;
}
if (n < 2 || n % 2 == 0) {
return false;
}
for (int i = 3, limit = (int) Math.sqrt(n); i <= limit; i += 2) {
if (n % i == 0) {
return false;
}
}
return true;
}
/**
* *
* Checks if a number is prime or not
*
* @param n the number
* @return {@code true} if {@code n} is prime
*/
public static boolean fermatPrimeChecking(int n, int iteration) {
long a;
int up = n - 2, down = 2;
for (int i = 0; i < iteration; i++) {
a = (long) Math.floor(Math.random() * (up - down + 1) + down);
if (modPow(a, n - 1, n) != 1) {
return false;
}
}
return true;
}
/**
* *
* @param a basis
* @param b exponent
* @param c modulo
* @return (a^b) mod c
*/
private static long modPow(long a, long b, long c) {
long res = 1;
for (int i = 0; i < b; i++) {
res *= a;
res %= c;
}
return res % c;
}
}
| TheAlgorithms/Java | src/main/java/com/thealgorithms/maths/PrimeCheck.java |
591 | /**
* Author : Suraj Kumar Modi
* https://github.com/skmodi649
*/
/**
* You are given a number n. You need to find the digital root of n.
* DigitalRoot of a number is the recursive sum of its digits until we get a single digit number.
*
* Test Case 1:
* Input:
* n = 1
* Output: 1
* Explanation: Digital root of 1 is 1
*
* Test Case 2:
* Input:
* n = 99999
* Output: 9
* Explanation: Sum of digits of 99999 is 45
* which is not a single digit number, hence
* sum of digit of 45 is 9 which is a single
* digit number.
*/
/**
* Algorithm :
* Step 1 : Define a method digitalRoot(int n)
* Step 2 : Define another method single(int n)
* Step 3 : digitalRoot(int n) method takes output of single(int n) as input
* if(single(int n) <= 9)
* return single(n)
* else
* return digitalRoot(single(n))
* Step 4 : single(int n) calculates the sum of digits of number n recursively
* if(n<=9)
* return n;
* else
* return (n%10) + (n/10)
* Step 5 : In main method simply take n as input and then call digitalRoot(int n) function and
* print the result
*/
package com.thealgorithms.maths;
final class DigitalRoot {
private DigitalRoot() {
}
public static int digitalRoot(int n) {
if (single(n) <= 9) { // If n is already single digit than simply call single method and
// return the value
return single(n);
} else {
return digitalRoot(single(n));
}
}
// This function is used for finding the sum of the digits of number
public static int single(int n) {
if (n <= 9) { // if n becomes less than 10 than return n
return n;
} else {
return (n % 10) + single(n / 10); // n % 10 for extracting digits one by one
}
} // n / 10 is the number obtained after removing the digit one by one
// The Sum of digits is stored in the Stack memory and then finally returned
}
/**
* Time Complexity: O((Number of Digits)^2) Auxiliary Space Complexity:
* O(Number of Digits) Constraints: 1 <= n <= 10^7
*/
| TheAlgorithms/Java | src/main/java/com/thealgorithms/maths/DigitalRoot.java |
592 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.doublebuffer;
/**
* Pixel enum. Each pixel can be white (not drawn) or black (drawn).
*/
public enum Pixel {
WHITE,
BLACK
}
| iluwatar/java-design-patterns | double-buffer/src/main/java/com/iluwatar/doublebuffer/Pixel.java |
593 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.twin;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
/**
* This class is a UI thread for drawing the {@link BallItem}, and provide the method for suspend
* and resume. It hold the reference of {@link BallItem} to delegate the draw task.
*/
@Slf4j
public class BallThread extends Thread {
@Setter
private BallItem twin;
private volatile boolean isSuspended;
private volatile boolean isRunning = true;
/**
* Run the thread.
*/
public void run() {
while (isRunning) {
if (!isSuspended) {
twin.draw();
twin.move();
}
try {
Thread.sleep(250);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
public void suspendMe() {
isSuspended = true;
LOGGER.info("Begin to suspend BallThread");
}
public void resumeMe() {
isSuspended = false;
LOGGER.info("Begin to resume BallThread");
}
public void stopMe() {
this.isRunning = false;
this.isSuspended = true;
}
}
| smedals/java-design-patterns | twin/src/main/java/com/iluwatar/twin/BallThread.java |
594 | package com.thealgorithms.maths;
/**
* @see <a href="https://en.wikipedia.org/wiki/Combination">Combination</a>
*/
public final class Combinations {
private Combinations() {
}
/**
* Calculate of factorial
*
* @param n the number
* @return factorial of given number
*/
public static long factorial(int n) {
if (n < 0) {
throw new IllegalArgumentException("number is negative");
}
return n == 0 || n == 1 ? 1 : n * factorial(n - 1);
}
/**
* Calculate combinations
*
* @param n first number
* @param k second number
* @return combinations of given {@code n} and {@code k}
*/
public static long combinations(int n, int k) {
return factorial(n) / (factorial(k) * factorial(n - k));
}
/**
* The above method can exceed limit of long (overflow) when factorial(n) is
* larger than limits of long variable. Thus even if nCk is within range of
* long variable above reason can lead to incorrect result. This is an
* optimized version of computing combinations. Observations: nC(k + 1) = (n
* - k) * nCk / (k + 1) We know the value of nCk when k = 1 which is nCk = n
* Using this base value and above formula we can compute the next term
* nC(k+1)
*
* @param n
* @param k
* @return nCk
*/
public static long combinationsOptimized(int n, int k) {
if (n < 0 || k < 0) {
throw new IllegalArgumentException("n or k can't be negative");
}
if (n < k) {
throw new IllegalArgumentException("n can't be smaller than k");
}
// nC0 is always 1
long solution = 1;
for (int i = 0; i < k; i++) {
solution = (n - i) * solution / (i + 1);
}
return solution;
}
}
| TheAlgorithms/Java | src/main/java/com/thealgorithms/maths/Combinations.java |
595 | import java.io.*;
import java.util.*;
/**************************************************************************************************************************************
* Class Utils
*****/
class Utils implements Debuggable {
int domain_id;
public static Random R = new Random();
public static String NOW;
public static Vector ALL_NON_TRIVIAL_SUBSETS(Vector v) {
Vector ret = new Vector();
Vector copycat = new Vector();
int i;
for (i = 0; i < v.size(); i++) copycat.addElement(new String((String) v.elementAt(i)));
MOVE_IN(ret, copycat);
ret.removeElementAt(0);
ret.removeElementAt(ret.size() - 1);
return ret;
}
public static void MOVE_IN(Vector grow, Vector shrink) {
if ((shrink == null) || (shrink.size() == 0))
Dataset.perror("Utils.class :: MOVE_IN impossible because empty list of values");
if (grow == null) Dataset.perror("Utils.class :: MOVE_IN impossible because empty grow list");
String s = (String) shrink.elementAt(0);
Vector v, vv;
if (grow.size() == 0) {
v = new Vector();
grow.addElement(v);
}
int i, sinit = grow.size(), j;
for (i = 0; i < sinit; i++) {
vv = (Vector) grow.elementAt(i);
v = new Vector();
if (vv.size() > 0)
for (j = 0; j < vv.size(); j++) v.addElement(new String((String) vv.elementAt(j)));
v.addElement(new String(s));
grow.addElement(v);
}
shrink.removeElementAt(0);
if (shrink.size() > 0) MOVE_IN(grow, shrink);
}
public static void INIT() {
Calendar cal = Calendar.getInstance();
R = new Random();
NOW =
Algorithm.MONTHS[cal.get(Calendar.MONTH)]
+ "_"
+ cal.get(Calendar.DAY_OF_MONTH)
+ "th__"
+ cal.get(Calendar.HOUR_OF_DAY)
+ "h_"
+ cal.get(Calendar.MINUTE)
+ "m_"
+ cal.get(Calendar.SECOND)
+ "s";
}
public static double COMPUTE_P(double wp, double wn) {
double val;
if (wp + wn > 0.0) val = (wp / (wp + wn));
else val = 0.5;
if (val != 0.5) return val;
double vv = RANDOM_P_NOT_HALF();
if (vv < 0.5) val -= EPS2;
else val += EPS2;
return val;
}
public static double RANDOM_P_NOT_HALF() {
double vv;
do {
vv = R.nextDouble();
} while (vv == 0.5);
return vv;
}
}
| google-research/google-research | tempered_boosting/Utils.java |
597 | package com.thealgorithms.others;
/**
* Implementation of Knuth–Morris–Pratt algorithm Usage: see the main function
* for an example
*/
public final class KMP {
private KMP() {
}
// a working example
public static void main(String[] args) {
final String haystack = "AAAAABAAABA"; // This is the full string
final String needle = "AAAA"; // This is the substring that we want to find
KMPmatcher(haystack, needle);
}
// find the starting index in string haystack[] that matches the search word P[]
public static void KMPmatcher(final String haystack, final String needle) {
final int m = haystack.length();
final int n = needle.length();
final int[] pi = computePrefixFunction(needle);
int q = 0;
for (int i = 0; i < m; i++) {
while (q > 0 && haystack.charAt(i) != needle.charAt(q)) {
q = pi[q - 1];
}
if (haystack.charAt(i) == needle.charAt(q)) {
q++;
}
if (q == n) {
System.out.println("Pattern starts: " + (i + 1 - n));
q = pi[q - 1];
}
}
}
// return the prefix function
private static int[] computePrefixFunction(final String P) {
final int n = P.length();
final int[] pi = new int[n];
pi[0] = 0;
int q = 0;
for (int i = 1; i < n; i++) {
while (q > 0 && P.charAt(q) != P.charAt(i)) {
q = pi[q - 1];
}
if (P.charAt(q) == P.charAt(i)) {
q++;
}
pi[i] = q;
}
return pi;
}
}
| TheAlgorithms/Java | src/main/java/com/thealgorithms/others/KMP.java |
601 | /**
* Theorem: If you drop the last digit d of an integer n (n ≥ 10), subtract 5d from the
* remaining integer, then the difference is a multiple of 17 if and only if n is a multiple of 17.
* For example, 34 is a multiple of 17, because 3-20=-17 is a multiple of 17; 201 is not a multiple of
* 17, because 20-5=15 is not a multiple of 17.
* Given a positive integer n, your task is to determine whether it is a multiple of 17.
* Input
* There will be at most 10 test cases, each containing a single line with an integer n (1 ≤ n ≤ 10100).
* The input terminates with n = 0, which should not be processed.
* Output
* For each case, print 1 if the corresponding integer is a multiple of 17, print 0 otherwise.
* Sample Input
* 34
* 201
* 2098765413
* 1717171717171717171717171717171717171717171717171718
* 0
* Sample Output
* 1
* 0
* 1
* 0
*/
//https://uva.onlinejudge.org/index.php?option=onlinejudge&Itemid=99999999&page=show_problem&category=&problem=3001
import java.math.BigInteger;
import java.util.Scanner;
public class MultipleOfSeventeen {
private static final BigInteger BIGINTEGER_FIVE = new BigInteger("5");
private static final BigInteger BIGINTEGER_SEVENTEEN = new BigInteger("17");
private static final BigInteger BIGINTEGER_ZERO = new BigInteger("0");
public static void main(String[] args) {
Scanner input = new Scanner(System.in);
while (input.hasNext()) {
BigInteger number = input.nextBigInteger();
if (number.equals(BIGINTEGER_ZERO)) {
break;
}
BigInteger lastDigit = number.mod(BigInteger.TEN);
number = number.divide(BigInteger.TEN);
BigInteger product5D = lastDigit.multiply(BIGINTEGER_FIVE);
BigInteger difference = number.subtract(product5D);
if (difference.mod(BIGINTEGER_SEVENTEEN).equals(BIGINTEGER_ZERO)) {
System.out.println("1");
} else {
System.out.println("0");
}
}
}
}
| kdn251/interviews | uva/MultipleOfSeventeen.java |
603 | package com.genymobile.scrcpy;
import android.media.MediaCodec;
import android.media.MediaCodecInfo;
import android.media.MediaFormat;
import android.os.Looper;
import android.os.SystemClock;
import android.view.Surface;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
public class SurfaceEncoder implements AsyncProcessor {
private static final int DEFAULT_I_FRAME_INTERVAL = 10; // seconds
private static final int REPEAT_FRAME_DELAY_US = 100_000; // repeat after 100ms
private static final String KEY_MAX_FPS_TO_ENCODER = "max-fps-to-encoder";
// Keep the values in descending order
private static final int[] MAX_SIZE_FALLBACK = {2560, 1920, 1600, 1280, 1024, 800};
private static final int MAX_CONSECUTIVE_ERRORS = 3;
private final SurfaceCapture capture;
private final Streamer streamer;
private final String encoderName;
private final List<CodecOption> codecOptions;
private final int videoBitRate;
private final int maxFps;
private final boolean downsizeOnError;
private boolean firstFrameSent;
private int consecutiveErrors;
private Thread thread;
private final AtomicBoolean stopped = new AtomicBoolean();
public SurfaceEncoder(SurfaceCapture capture, Streamer streamer, int videoBitRate, int maxFps, List<CodecOption> codecOptions, String encoderName,
boolean downsizeOnError) {
this.capture = capture;
this.streamer = streamer;
this.videoBitRate = videoBitRate;
this.maxFps = maxFps;
this.codecOptions = codecOptions;
this.encoderName = encoderName;
this.downsizeOnError = downsizeOnError;
}
private void streamScreen() throws IOException, ConfigurationException {
Codec codec = streamer.getCodec();
MediaCodec mediaCodec = createMediaCodec(codec, encoderName);
MediaFormat format = createFormat(codec.getMimeType(), videoBitRate, maxFps, codecOptions);
capture.init();
try {
streamer.writeVideoHeader(capture.getSize());
boolean alive;
do {
Size size = capture.getSize();
format.setInteger(MediaFormat.KEY_WIDTH, size.getWidth());
format.setInteger(MediaFormat.KEY_HEIGHT, size.getHeight());
Surface surface = null;
try {
mediaCodec.configure(format, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
surface = mediaCodec.createInputSurface();
capture.start(surface);
mediaCodec.start();
alive = encode(mediaCodec, streamer);
// do not call stop() on exception, it would trigger an IllegalStateException
mediaCodec.stop();
} catch (IllegalStateException | IllegalArgumentException e) {
Ln.e("Encoding error: " + e.getClass().getName() + ": " + e.getMessage());
if (!prepareRetry(size)) {
throw e;
}
Ln.i("Retrying...");
alive = true;
} finally {
mediaCodec.reset();
if (surface != null) {
surface.release();
}
}
} while (alive);
} finally {
mediaCodec.release();
capture.release();
}
}
private boolean prepareRetry(Size currentSize) {
if (firstFrameSent) {
++consecutiveErrors;
if (consecutiveErrors >= MAX_CONSECUTIVE_ERRORS) {
// Definitively fail
return false;
}
// Wait a bit to increase the probability that retrying will fix the problem
SystemClock.sleep(50);
return true;
}
if (!downsizeOnError) {
// Must fail immediately
return false;
}
// Downsizing on error is only enabled if an encoding failure occurs before the first frame (downsizing later could be surprising)
int newMaxSize = chooseMaxSizeFallback(currentSize);
if (newMaxSize == 0) {
// Must definitively fail
return false;
}
boolean accepted = capture.setMaxSize(newMaxSize);
if (!accepted) {
return false;
}
// Retry with a smaller size
Ln.i("Retrying with -m" + newMaxSize + "...");
return true;
}
private static int chooseMaxSizeFallback(Size failedSize) {
int currentMaxSize = Math.max(failedSize.getWidth(), failedSize.getHeight());
for (int value : MAX_SIZE_FALLBACK) {
if (value < currentMaxSize) {
// We found a smaller value to reduce the video size
return value;
}
}
// No fallback, fail definitively
return 0;
}
private boolean encode(MediaCodec codec, Streamer streamer) throws IOException {
boolean eof = false;
boolean alive = true;
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
while (!capture.consumeReset() && !eof) {
if (stopped.get()) {
alive = false;
break;
}
int outputBufferId = codec.dequeueOutputBuffer(bufferInfo, -1);
try {
if (capture.consumeReset()) {
// must restart encoding with new size
break;
}
eof = (bufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0;
if (outputBufferId >= 0) {
ByteBuffer codecBuffer = codec.getOutputBuffer(outputBufferId);
boolean isConfig = (bufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0;
if (!isConfig) {
// If this is not a config packet, then it contains a frame
firstFrameSent = true;
consecutiveErrors = 0;
}
streamer.writePacket(codecBuffer, bufferInfo);
}
} finally {
if (outputBufferId >= 0) {
codec.releaseOutputBuffer(outputBufferId, false);
}
}
}
if (capture.isClosed()) {
// The capture might have been closed internally (for example if the camera is disconnected)
alive = false;
}
return !eof && alive;
}
private static MediaCodec createMediaCodec(Codec codec, String encoderName) throws IOException, ConfigurationException {
if (encoderName != null) {
Ln.d("Creating encoder by name: '" + encoderName + "'");
try {
return MediaCodec.createByCodecName(encoderName);
} catch (IllegalArgumentException e) {
Ln.e("Video encoder '" + encoderName + "' for " + codec.getName() + " not found\n" + LogUtils.buildVideoEncoderListMessage());
throw new ConfigurationException("Unknown encoder: " + encoderName);
} catch (IOException e) {
Ln.e("Could not create video encoder '" + encoderName + "' for " + codec.getName() + "\n" + LogUtils.buildVideoEncoderListMessage());
throw e;
}
}
try {
MediaCodec mediaCodec = MediaCodec.createEncoderByType(codec.getMimeType());
Ln.d("Using video encoder: '" + mediaCodec.getName() + "'");
return mediaCodec;
} catch (IOException | IllegalArgumentException e) {
Ln.e("Could not create default video encoder for " + codec.getName() + "\n" + LogUtils.buildVideoEncoderListMessage());
throw e;
}
}
private static MediaFormat createFormat(String videoMimeType, int bitRate, int maxFps, List<CodecOption> codecOptions) {
MediaFormat format = new MediaFormat();
format.setString(MediaFormat.KEY_MIME, videoMimeType);
format.setInteger(MediaFormat.KEY_BIT_RATE, bitRate);
// must be present to configure the encoder, but does not impact the actual frame rate, which is variable
format.setInteger(MediaFormat.KEY_FRAME_RATE, 60);
format.setInteger(MediaFormat.KEY_COLOR_FORMAT, MediaCodecInfo.CodecCapabilities.COLOR_FormatSurface);
format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, DEFAULT_I_FRAME_INTERVAL);
// display the very first frame, and recover from bad quality when no new frames
format.setLong(MediaFormat.KEY_REPEAT_PREVIOUS_FRAME_AFTER, REPEAT_FRAME_DELAY_US); // µs
if (maxFps > 0) {
// The key existed privately before Android 10:
// <https://android.googlesource.com/platform/frameworks/base/+/625f0aad9f7a259b6881006ad8710adce57d1384%5E%21/>
// <https://github.com/Genymobile/scrcpy/issues/488#issuecomment-567321437>
format.setFloat(KEY_MAX_FPS_TO_ENCODER, maxFps);
}
if (codecOptions != null) {
for (CodecOption option : codecOptions) {
String key = option.getKey();
Object value = option.getValue();
CodecUtils.setCodecOption(format, key, value);
Ln.d("Video codec option set: " + key + " (" + value.getClass().getSimpleName() + ") = " + value);
}
}
return format;
}
@Override
public void start(TerminationListener listener) {
thread = new Thread(() -> {
// Some devices (Meizu) deadlock if the video encoding thread has no Looper
// <https://github.com/Genymobile/scrcpy/issues/4143>
Looper.prepare();
try {
streamScreen();
} catch (ConfigurationException e) {
// Do not print stack trace, a user-friendly error-message has already been logged
} catch (IOException e) {
// Broken pipe is expected on close, because the socket is closed by the client
if (!IO.isBrokenPipe(e)) {
Ln.e("Video encoding error", e);
}
} finally {
Ln.d("Screen streaming stopped");
listener.onTerminated(true);
}
}, "video");
thread.start();
}
@Override
public void stop() {
if (thread != null) {
stopped.set(true);
}
}
@Override
public void join() throws InterruptedException {
if (thread != null) {
thread.join();
}
}
}
| Genymobile/scrcpy | server/src/main/java/com/genymobile/scrcpy/SurfaceEncoder.java |
604 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.common;
/**
* Indicates a class that represents a version id of some kind
*/
public interface VersionId<T extends VersionId<T>> extends Comparable<T> {
/**
* The version id this object represents
*/
int id();
default boolean after(T version) {
return version.id() < id();
}
default boolean onOrAfter(T version) {
return version.id() <= id();
}
default boolean before(T version) {
return version.id() > id();
}
default boolean onOrBefore(T version) {
return version.id() >= id();
}
default boolean between(T lowerInclusive, T upperExclusive) {
if (upperExclusive.onOrBefore(lowerInclusive)) throw new IllegalArgumentException();
return onOrAfter(lowerInclusive) && before(upperExclusive);
}
@Override
default int compareTo(T o) {
return Integer.compare(id(), o.id());
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/common/VersionId.java |
605 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.support;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class Color {
private final int red;
private final int green;
private final int blue;
private double alpha;
private static final Converter[] CONVERTERS = {
new RgbConverter(),
new RgbPctConverter(),
new RgbaConverter(),
new RgbaPctConverter(),
new HexConverter(),
new Hex3Converter(),
new HslConverter(),
new HslaConverter(),
new NamedColorConverter(),
};
/*
* Guesses what format the input color is in.
*/
public static Color fromString(String value) {
for (Converter converter : CONVERTERS) {
Color color = converter.getColor(value);
if (color != null) {
return color;
}
}
throw new IllegalArgumentException(
String.format("Did not know how to convert %s into color", value));
}
public Color(int red, int green, int blue, double alpha) {
this.red = red;
this.green = green;
this.blue = blue;
this.alpha = alpha;
}
public void setOpacity(double alpha) {
this.alpha = alpha;
}
public String asRgb() {
return String.format("rgb(%d, %d, %d)", red, green, blue);
}
public String asRgba() {
String alphaString;
if (alpha == 1) {
alphaString = "1";
} else if (alpha == 0) {
alphaString = "0";
} else {
alphaString = Double.toString(alpha);
}
return String.format("rgba(%d, %d, %d, %s)", red, green, blue, alphaString);
}
public String asHex() {
return String.format("#%02x%02x%02x", red, green, blue);
}
/**
* @return a java.awt.Color class instance
*/
public java.awt.Color getColor() {
return new java.awt.Color(red, green, blue, (int) (alpha * 255));
}
@Override
public String toString() {
return "Color: " + asRgba();
}
@Override
public boolean equals(Object other) {
if (other == null) {
return false;
}
if (!(other instanceof Color)) {
return false;
}
return asRgba().equals(((Color) other).asRgba());
}
@Override
public int hashCode() {
int result;
long temp;
result = red;
result = 31 * result + green;
result = 31 * result + blue;
temp = alpha != +0.0d ? Double.doubleToLongBits(alpha) : 0L;
result = 31 * result + Long.hashCode(temp);
return result;
}
private abstract static class Converter {
public Color getColor(String value) {
Matcher matcher = getPattern().matcher(value);
if (matcher.find()) {
double a = 1.0;
if (matcher.groupCount() == 4) {
a = Double.parseDouble(matcher.group(4));
}
return createColor(matcher, a);
}
return null;
}
protected Color createColor(Matcher matcher, double a) {
return new Color(
fromMatchGroup(matcher, 1), fromMatchGroup(matcher, 2), fromMatchGroup(matcher, 3), a);
}
protected short fromMatchGroup(Matcher matcher, int index) {
return Short.parseShort(matcher.group(index), 10);
}
protected abstract Pattern getPattern();
}
private static class RgbConverter extends Converter {
private static final Pattern RGB_PATTERN =
Pattern.compile(
"^\\s*rgb\\(\\s*"
+ "(\\d{1,3})\\s*,\\s*"
+ "(\\d{1,3})\\s*,\\s*"
+ "(\\d{1,3})\\s*\\)\\s*$");
@Override
protected Pattern getPattern() {
return RGB_PATTERN;
}
}
private static class RgbPctConverter extends Converter {
private static final Pattern RGBPCT_PATTERN =
Pattern.compile(
"^\\s*rgb\\(\\s*"
+ "(\\d{1,3}|\\d{1,2}\\.\\d+)%\\s*,\\s*"
+ "(\\d{1,3}|\\d{1,2}\\.\\d+)%\\s*,\\s*"
+ "(\\d{1,3}|\\d{1,2}\\.\\d+)%\\s*\\)\\s*$");
@Override
protected Pattern getPattern() {
return RGBPCT_PATTERN;
}
@Override
protected short fromMatchGroup(Matcher matcher, int index) {
double n = Double.parseDouble(matcher.group(index)) / 100 * 255;
return (short) n;
}
}
private static class RgbaConverter extends RgbConverter {
private static final Pattern RGBA_PATTERN =
Pattern.compile(
"^\\s*rgba\\(\\s*"
+ "(\\d{1,3})\\s*,\\s*(\\d{1,3})\\s*,\\s*"
+ "(\\d{1,3})\\s*,\\s*(0|1|0\\.\\d+)\\s*\\)\\s*$");
@Override
protected Pattern getPattern() {
return RGBA_PATTERN;
}
}
private static class RgbaPctConverter extends RgbPctConverter {
private static final Pattern RGBAPCT_PATTERN =
Pattern.compile(
"^\\s*rgba\\(\\s*"
+ "(\\d{1,3}|\\d{1,2}\\.\\d+)%\\s*,\\s*"
+ "(\\d{1,3}|\\d{1,2}\\.\\d+)%\\s*,\\s*"
+ "(\\d{1,3}|\\d{1,2}\\.\\d+)%\\s*,\\s*"
+ "(0|1|0\\.\\d+)\\s*\\)\\s*$");
@Override
protected Pattern getPattern() {
return RGBAPCT_PATTERN;
}
}
private static class HexConverter extends Converter {
private static final Pattern HEX_PATTERN =
Pattern.compile("#(\\p{XDigit}{2})(\\p{XDigit}{2})(\\p{XDigit}{2})");
@Override
protected Pattern getPattern() {
return HEX_PATTERN;
}
@Override
protected short fromMatchGroup(Matcher matcher, int index) {
return Short.parseShort(matcher.group(index), 16);
}
}
private static class Hex3Converter extends Converter {
private static final Pattern HEX3_PATTERN =
Pattern.compile("#(\\p{XDigit}{1})(\\p{XDigit}{1})(\\p{XDigit}{1})");
@Override
protected Pattern getPattern() {
return HEX3_PATTERN;
}
@Override
protected short fromMatchGroup(Matcher matcher, int index) {
return Short.parseShort(matcher.group(index) + matcher.group(index), 16);
}
}
private static class HslConverter extends Converter {
private static final Pattern HSL_PATTERN =
Pattern.compile(
"^\\s*hsl\\(\\s*"
+ "(\\d{1,3})\\s*,\\s*"
+ "(\\d{1,3})\\%\\s*,\\s*"
+ "(\\d{1,3})\\%\\s*\\)\\s*$");
@Override
protected Pattern getPattern() {
return HSL_PATTERN;
}
@Override
protected Color createColor(Matcher matcher, double a) {
double h = Double.parseDouble(matcher.group(1)) / 360;
double s = Double.parseDouble(matcher.group(2)) / 100;
double l = Double.parseDouble(matcher.group(3)) / 100;
double r, g, b;
if (s == 0) {
r = l;
g = r;
b = r;
} else {
double luminocity2 = (l < 0.5) ? l * (1 + s) : l + s - l * s;
double luminocity1 = 2 * l - luminocity2;
r = hueToRgb(luminocity1, luminocity2, h + 1.0 / 3.0);
g = hueToRgb(luminocity1, luminocity2, h);
b = hueToRgb(luminocity1, luminocity2, h - 1.0 / 3.0);
}
return new Color(
(short) Math.round(r * 255), (short) Math.round(g * 255), (short) Math.round(b * 255), a);
}
private double hueToRgb(double luminocity1, double luminocity2, double hue) {
if (hue < 0.0) hue += 1;
if (hue > 1.0) hue -= 1;
if (hue < 1.0 / 6.0) return (luminocity1 + (luminocity2 - luminocity1) * 6.0 * hue);
if (hue < 1.0 / 2.0) return luminocity2;
if (hue < 2.0 / 3.0)
return (luminocity1 + (luminocity2 - luminocity1) * ((2.0 / 3.0) - hue) * 6.0);
return luminocity1;
}
}
private static class HslaConverter extends HslConverter {
private static final Pattern HSLA_PATTERN =
Pattern.compile(
"^\\s*hsla\\(\\s*"
+ "(\\d{1,3})\\s*,\\s*"
+ "(\\d{1,3})\\%\\s*,\\s*"
+ "(\\d{1,3})\\%\\s*,\\s*"
+ "(0|1|0\\.\\d+)\\s*\\)\\s*$");
@Override
protected Pattern getPattern() {
return HSLA_PATTERN;
}
}
private static class NamedColorConverter extends Converter {
@Override
public Color getColor(String value) {
return Colors.valueOf(value.toUpperCase()).getColorValue();
}
@Override
public Pattern getPattern() {
throw new UnsupportedOperationException("getPattern is unsupported");
}
}
}
| SeleniumHQ/selenium | java/src/org/openqa/selenium/support/Color.java |
608 | package com.thealgorithms.backtracking;
import java.util.ArrayList;
import java.util.List;
/**
* Problem statement: Given a N x N chess board. Return all arrangements in
* which N queens can be placed on the board such no two queens attack each
* other. Ex. N = 6 Solution= There are 4 possible ways Arrangement: 1 ".Q....",
* "...Q..", ".....Q", "Q.....", "..Q...", "....Q."
*
* Arrangement: 2 "..Q...", ".....Q", ".Q....", "....Q.", "Q.....", "...Q.."
*
* Arrangement: 3 "...Q..", "Q.....", "....Q.", ".Q....", ".....Q", "..Q..."
*
* Arrangement: 4 "....Q.", "..Q...", "Q.....", ".....Q", "...Q..", ".Q...."
*
* Solution: Brute Force approach:
*
* Generate all possible arrangement to place N queens on N*N board. Check each
* board if queens are placed safely. If it is safe, include arrangement in
* solution set. Otherwise, ignore it
*
* Optimized solution: This can be solved using backtracking in below steps
*
* Start with first column and place queen on first row Try placing queen in a
* row on second column If placing second queen in second column attacks any of
* the previous queens, change the row in second column otherwise move to next
* column and try to place next queen In case if there is no rows where a queen
* can be placed such that it doesn't attack previous queens, then go back to
* previous column and change row of previous queen. Keep doing this until last
* queen is not placed safely. If there is no such way then return an empty list
* as solution
*/
public final class NQueens {
private NQueens() {
}
public static void main(String[] args) {
placeQueens(1);
placeQueens(2);
placeQueens(3);
placeQueens(4);
placeQueens(5);
placeQueens(6);
}
public static void placeQueens(final int queens) {
List<List<String>> arrangements = new ArrayList<List<String>>();
getSolution(queens, arrangements, new int[queens], 0);
if (arrangements.isEmpty()) {
System.out.println("There is no way to place " + queens + " queens on board of size " + queens + "x" + queens);
} else {
System.out.println("Arrangement for placing " + queens + " queens");
}
for (List<String> arrangement : arrangements) {
arrangement.forEach(System.out::println);
System.out.println();
}
}
/**
* This is backtracking function which tries to place queen recursively
*
* @param boardSize: size of chess board
* @param solutions: this holds all possible arrangements
* @param columns: columns[i] = rowId where queen is placed in ith column.
* @param columnIndex: This is the column in which queen is being placed
*/
private static void getSolution(int boardSize, List<List<String>> solutions, int[] columns, int columnIndex) {
if (columnIndex == boardSize) {
// this means that all queens have been placed
List<String> sol = new ArrayList<String>();
for (int i = 0; i < boardSize; i++) {
StringBuilder sb = new StringBuilder();
for (int j = 0; j < boardSize; j++) {
sb.append(j == columns[i] ? "Q" : ".");
}
sol.add(sb.toString());
}
solutions.add(sol);
return;
}
// This loop tries to place queen in a row one by one
for (int rowIndex = 0; rowIndex < boardSize; rowIndex++) {
columns[columnIndex] = rowIndex;
if (isPlacedCorrectly(columns, rowIndex, columnIndex)) {
// If queen is placed successfully at rowIndex in column=columnIndex then try
// placing queen in next column
getSolution(boardSize, solutions, columns, columnIndex + 1);
}
}
}
/**
* This function checks if queen can be placed at row = rowIndex in column =
* columnIndex safely
*
* @param columns: columns[i] = rowId where queen is placed in ith column.
* @param rowIndex: row in which queen has to be placed
* @param columnIndex: column in which queen is being placed
* @return true: if queen can be placed safely false: otherwise
*/
private static boolean isPlacedCorrectly(int[] columns, int rowIndex, int columnIndex) {
for (int i = 0; i < columnIndex; i++) {
int diff = Math.abs(columns[i] - rowIndex);
if (diff == 0 || columnIndex - i == diff) {
return false;
}
}
return true;
}
}
| TheAlgorithms/Java | src/main/java/com/thealgorithms/backtracking/NQueens.java |
609 | /*
* Copyright 2002-2024 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.web.util;
import java.net.IDN;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Objects;
import java.util.function.Consumer;
import java.util.function.IntPredicate;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
/**
* Implementation of the URL parser from the Living URL standard.
*
* <p>All comments in this class refer to parts of the
* <a href="https://url.spec.whatwg.org/#url-parsing">parsing algorithm</a>.
* This implementation differs from the one defined in the specification in
* these areas:
* <ul>
* <li>Support for URI templates has been added, through the
* {@link State#URL_TEMPLATE} state</li>
* <li>Consequentially, the {@linkplain UrlRecord#port() URL port} has been
* changed from an integer to a string,</li>
* <li>To ensure that trailing slashes are significant, this implementation
* prepends a '/' to each segment.</li>
* </ul>
* All of these modifications have been indicated through comments that start
* with {@code EXTRA}.
*
* @author Arjen Poutsma
* @since 6.2
* @see <a href="https://url.spec.whatwg.org/#url-parsing">URL parsing</a>
*/
final class UrlParser {
private static final int EOF = -1;
private static final int MAX_PORT = 65535;
private static final Log logger = LogFactory.getLog(UrlParser.class);
private final StringBuilder input;
@Nullable
private final UrlRecord base;
@Nullable
private Charset encoding;
@Nullable
private final Consumer<String> validationErrorHandler;
private int pointer;
private final StringBuilder buffer;
@Nullable
private State state;
@Nullable
private State previousState;
@Nullable
private State stateOverride;
private boolean atSignSeen;
private boolean passwordTokenSeen;
private boolean insideBrackets;
private boolean stopMainLoop = false;
private UrlParser(String input, @Nullable UrlRecord base, @Nullable Charset encoding, @Nullable Consumer<String> validationErrorHandler) {
this.input = new StringBuilder(input);
this.base = base;
this.encoding = encoding;
this.validationErrorHandler = validationErrorHandler;
this.buffer = new StringBuilder(this.input.length() / 2);
}
/**
* Parse the given input into a URL record.
* @param input the scalar value string
* @param base the optional base URL to resolve relative URLs against. If
* {@code null}, relative URLs cannot be parsed.
* @param encoding the optional encoding to use. If {@code null}, no
* encoding is performed.
* @param validationErrorHandler optional consumer for non-fatal URL
* validation messages
* @return a URL record, as defined in the
* <a href="https://url.spec.whatwg.org/#concept-url">living URL
* specification</a>
* @throws InvalidUrlException if the {@code input} does not contain a
* parsable URL
*/
public static UrlRecord parse(String input, @Nullable UrlRecord base,
@Nullable Charset encoding, @Nullable Consumer<String> validationErrorHandler)
throws InvalidUrlException {
Assert.notNull(input, "Input must not be null");
UrlParser parser = new UrlParser(input, base, encoding, validationErrorHandler);
return parser.basicUrlParser(null, null);
}
/**
* The basic URL parser takes a scalar value string input, with an optional
* null or base URL base (default null), an optional encoding
* {@code encoding}
* (default UTF-8), an optional URL {@code url}, and an optional state
* override {@code state override}.
*/
private UrlRecord basicUrlParser(@Nullable UrlRecord url, @Nullable State stateOverride) {
// If url is not given:
if (url == null) {
// Set url to a new URL.
url = new UrlRecord();
sanitizeInput(true);
}
else {
sanitizeInput(false);
}
// Let state be state override if given, or scheme start state otherwise.
this.state = stateOverride != null ? stateOverride : State.SCHEME_START;
this.stateOverride = stateOverride;
// Keep running the following state machine by switching on state.
// If after a run pointer points to the EOF code point, go to the next step.
// Otherwise, increase pointer by 1 and continue with the state machine.
while (!this.stopMainLoop && this.pointer <= this.input.length()) {
int c;
if (this.pointer < this.input.length()) {
c = this.input.codePointAt(this.pointer);
}
else {
c = EOF;
}
if (logger.isTraceEnabled()) {
String cStr = c != EOF ? Character.toString(c) : "EOF";
logger.trace("current: " + cStr + " ptr: " + this.pointer + " Buffer: " + this.buffer + " State: " + this.state);
}
this.state.handle(c, url, this);
this.pointer++;
}
return url;
}
void sanitizeInput(boolean removeC0ControlOrSpace) {
boolean strip = true;
for (int i = 0; i < this.input.length(); i++) {
int c = this.input.codePointAt(i);
boolean isSpaceOrC0 = c == ' ' || isC0Control(c);
boolean isTabOrNL = c == '\t' || isNewline(c);
if ((strip && isSpaceOrC0) || isTabOrNL) {
if (validate()) {
// If input contains any leading (or trailing) C0 control or space, invalid-URL-unit validation error.
// If input contains any ASCII tab or newline, invalid-URL-unit validation error.
validationError("Code point \"" + c + "\" is not a URL unit.");
}
// Remove any leading C0 control or space from input.
if (removeC0ControlOrSpace && isSpaceOrC0) {
this.input.deleteCharAt(i);
}
else if (isTabOrNL) {
// Remove all ASCII tab or newline from input.
this.input.deleteCharAt(i);
}
i--;
}
else {
strip = false;
}
}
if (removeC0ControlOrSpace) {
for (int i = this.input.length() - 1; i >= 0; i--) {
int c = this.input.codePointAt(i);
if (c == ' ' || isC0Control(c)) {
if (validate()) {
// If input contains any (leading or) trailing C0 control or space, invalid-URL-unit validation error.
validationError("Code point \"" + c + "\" is not a URL unit.");
}
// Remove any trailing C0 control or space from input.
this.input.deleteCharAt(i);
}
else {
break;
}
}
}
}
private void setState(State newState) {
if (logger.isTraceEnabled()) {
String c;
if (this.pointer < this.input.length()) {
c = Character.toString(this.input.codePointAt(this.pointer));
}
else {
c = "EOF";
}
logger.trace("Changing state from " + this.state + " to " + newState + " (cur: " + c + " prev: " + this.previousState + ")");
}
// EXTRA: we keep the previous state, to ensure that the parser can escape from malformed URI templates
this.previousState = this.state;
this.state = newState;
}
private static LinkedList<String> strictSplit(String input, int delimiter) {
// Let position be a position variable for input, initially pointing at the start of input.
int position = 0;
// Let tokens be a list of strings, initially empty.
LinkedList<String> tokens = new LinkedList<>();
// Let token be the result of collecting a sequence of code points that are not equal to delimiter from input, given position.
int delIdx = input.indexOf(delimiter, position);
String token = (delIdx != EOF) ? input.substring(position, delIdx) : input.substring(position);
position = delIdx;
// Append token to tokens.
tokens.add(token);
// While position is not past the end of input:
while (position != EOF) {
// Assert: the code point at position within input is delimiter.
Assert.state(input.codePointAt(position) == delimiter, "Codepoint is not a delimiter");
// Advance position by 1.
position++;
delIdx = input.indexOf(delimiter, position);
// Let token be the result of collecting a sequence of code points that are not equal to delimiter from input, given position.
token = (delIdx != EOF) ? input.substring(position, delIdx) : input.substring(position);
position = delIdx;
// Append token to tokens.
tokens.add(token);
}
return tokens;
}
private static String domainToAscii(String domain, boolean beStrict) {
// If beStrict is false, domain is an ASCII string, and strictly splitting domain on U+002E (.) does not produce any item that starts with an ASCII case-insensitive match for "xn--", this step is equivalent to ASCII lowercasing domain.
if (!beStrict && containsOnlyAscii(domain)) {
int dotIdx = domain.indexOf('.');
boolean onlyLowerCase = true;
while (dotIdx != -1) {
if (domain.length() - dotIdx > 4) {
// ASCII case-insensitive match for "xn--"
int ch0 = domain.codePointAt(dotIdx + 1);
int ch1 = domain.codePointAt(dotIdx + 2);
int ch2 = domain.codePointAt(dotIdx + 3);
int ch3 = domain.codePointAt(dotIdx + 4);
if ((ch0 == 'x' || ch0 == 'X') &&
(ch1 == 'n' || ch1 == 'N') &&
ch2 == '-' && ch3 == '_') {
onlyLowerCase = false;
break;
}
}
dotIdx = domain.indexOf('.', dotIdx + 1);
}
if (onlyLowerCase) {
return domain.toLowerCase(Locale.ENGLISH);
}
}
// Let result be the result of running Unicode ToASCII (https://www.unicode.org/reports/tr46/#ToASCII) with domain_name set to domain, UseSTD3ASCIIRules set to beStrict, CheckHyphens set to false, CheckBidi set to true, CheckJoiners set to true, Transitional_Processing set to false, and VerifyDnsLength set to beStrict. [UTS46]
int flag = 0;
if (beStrict) {
flag |= IDN.USE_STD3_ASCII_RULES;
}
// Implementation note: implementing Unicode ToASCII is beyond the scope of this parser, we use java.net.IDN.toASCII
try {
return IDN.toASCII(domain, flag);
}
catch (IllegalArgumentException ex) {
throw new InvalidUrlException("Could not convert \"" + domain + "\" to ASCII: " + ex.getMessage(), ex);
}
}
private boolean validate() {
return this.validationErrorHandler != null;
}
private void validationError(@Nullable String additionalInfo) {
if (this.validationErrorHandler != null) {
StringBuilder message = new StringBuilder("URL validation error for URL [");
message.append(this.input);
message.append("]@");
message.append(this.pointer);
if (additionalInfo != null) {
message.append(". ");
message.append(additionalInfo);
}
this.validationErrorHandler.accept(message.toString());
}
}
private void failure(@Nullable String additionalInfo) {
StringBuilder message = new StringBuilder("URL parsing failure for URL [");
message.append(this.input);
message.append("] @ ");
message.append(this.pointer);
if (additionalInfo != null) {
message.append(". ");
message.append(additionalInfo);
}
throw new InvalidUrlException(message.toString());
}
/**
* The C0 control percent-encode set are the C0 controls and all code points greater than U+007E (~).
*/
private static boolean c0ControlPercentEncodeSet(int ch) {
return isC0Control(ch) || Integer.compareUnsigned(ch, '~') > 0;
}
/**
* The fragment percent-encode set is the C0 control percent-encode set and U+0020 SPACE, U+0022 ("), U+003C (<), U+003E (>), and U+0060 (`).
*/
private static boolean fragmentPercentEncodeSet(int ch) {
return c0ControlPercentEncodeSet(ch) || ch == ' ' || ch == '"' || ch == '<' || ch == '>' || ch == '`';
}
/**
* The query percent-encode set is the C0 control percent-encode set and U+0020 SPACE, U+0022 ("), U+0023 (#), U+003C (<), and U+003E (>).
*/
private static boolean queryPercentEncodeSet(int ch) {
return c0ControlPercentEncodeSet(ch) || ch == ' ' || ch == '"' || ch == '#' || ch == '<' || ch == '>';
}
/**
* The special-query percent-encode set is the query percent-encode set and U+0027 (').
*/
private static boolean specialQueryPercentEncodeSet(int ch) {
return queryPercentEncodeSet(ch) || ch == '\'';
}
/**
* The path percent-encode set is the query percent-encode set and U+003F (?), U+0060 (`), U+007B ({), and U+007D (}).
*/
private static boolean pathPercentEncodeSet(int ch) {
return queryPercentEncodeSet(ch) || ch == '?' || ch == '`' || ch == '{' || ch == '}';
}
/**
* The userinfo percent-encode set is the path percent-encode set and U+002F (/), U+003A (:), U+003B (;), U+003D (=), U+0040 (@), U+005B ([) to U+005E (^), inclusive, and U+007C (|).
*/
private static boolean userinfoPercentEncodeSet(int ch) {
return pathPercentEncodeSet(ch) || ch == '/' || ch == ':' || ch == ';' || ch == '=' || ch == '@' ||
(Integer.compareUnsigned(ch, '[') >= 0 && Integer.compareUnsigned(ch, '^') <= 0) || ch == '|';
}
private static boolean isC0Control(int ch) {
return ch >= 0 && ch <= 0x1F;
}
private static boolean isNewline(int ch) {
return ch == '\r' || ch == '\n';
}
private static boolean isAsciiAlpha(int ch) {
return (ch >= 'A' && ch <= 'Z') ||
(ch >= 'a' && ch <= 'z');
}
private static boolean containsOnlyAsciiDigits(CharSequence string) {
for (int i=0; i< string.length(); i++ ) {
int ch = codePointAt(string, i);
if (!isAsciiDigit(ch)) {
return false;
}
}
return true;
}
private static boolean containsOnlyAscii(String string) {
for (int i = 0; i < string.length(); i++) {
int ch = string.codePointAt(i);
if (!isAsciiCodePoint(ch)) {
return false;
}
}
return true;
}
private static boolean isAsciiCodePoint(int ch) {
// An ASCII code point is a code point in the range U+0000 NULL to U+007F DELETE, inclusive.
return Integer.compareUnsigned(ch, 0) >= 0 && Integer.compareUnsigned(ch, 127) <= 0;
}
private static boolean isAsciiDigit(int ch) {
return (ch >= '0' && ch <= '9');
}
private static boolean isAsciiAlphaNumeric(int ch) {
return isAsciiAlpha(ch) || isAsciiDigit(ch);
}
private static boolean isAsciiHexDigit(int ch) {
return isAsciiDigit(ch) ||
(ch >= 'A' && ch <= 'F') ||
(ch >= 'a' && ch <= 'f');
}
private static boolean isForbiddenDomain(int ch) {
return isForbiddenHost(ch) || isC0Control(ch) || ch == '%' || ch == 0x7F;
}
private static boolean isForbiddenHost(int ch) {
return ch == 0x00 || ch == '\t' || isNewline(ch) || ch == ' ' || ch == '#' || ch == '/' || ch == ':' ||
ch == '<' || ch == '>' || ch == '?' || ch == '@' || ch == '[' || ch == '\\' || ch == ']' || ch == '^' ||
ch == '|';
}
private static boolean isNonCharacter(int ch) {
return (ch >= 0xFDD0 && ch <= 0xFDEF) || ch == 0xFFFE || ch == 0xFFFF || ch == 0x1FFFE || ch == 0x1FFFF ||
ch == 0x2FFFE || ch == 0x2FFFF || ch == 0x3FFFE || ch == 0x3FFFF || ch == 0x4FFFE || ch == 0x4FFFF ||
ch == 0x5FFFE || ch == 0x5FFFF || ch == 0x6FFFE || ch == 0x6FFFF || ch == 0x7FFFE || ch == 0x7FFFF ||
ch == 0x8FFFE || ch == 0x8FFFF || ch == 0x9FFFE || ch == 0x9FFFF || ch == 0xAFFFE || ch == 0xAFFFF ||
ch == 0xBFFFE || ch == 0xBFFFF || ch == 0xCFFFE || ch == 0xCFFFF || ch == 0xDFFFE || ch == 0xDFFFF ||
ch == 0xEFFFE || ch == 0xEFFFF || ch == 0xFFFFE || ch == 0xFFFFF || ch == 0x10FFFE || ch == 0x10FFFF;
}
private static boolean isUrlCodePoint(int ch) {
return isAsciiAlphaNumeric(ch) ||
ch == '!' || ch == '$' || ch == '&' || ch == '\'' || ch == '(' || ch == ')' || ch == '*' || ch == '+'
|| ch == ',' || ch == '-' || ch == '.' || ch == '/' || ch == ':' || ch == ';' || ch == '=' || ch == '?'
|| ch == '@' || ch == '_' || ch == '~' ||
(ch >= 0x00A0 && ch <= 0x10FFFD && !Character.isSurrogate((char) ch) && !isNonCharacter(ch));
}
private static boolean isSpecialScheme(String scheme) {
return "ftp".equals(scheme) ||
"file".equals(scheme) ||
"http".equals(scheme) ||
"https".equals(scheme) ||
"ws".equals(scheme) ||
"wss".equals(scheme);
}
private static int defaultPort(@Nullable String scheme) {
if (scheme != null) {
return switch (scheme) {
case "ftp" -> 21;
case "http" -> 80;
case "https" -> 443;
case "ws" -> 80;
case "wss" -> 443;
default -> -1;
};
}
else {
return -1;
}
}
private void append(String s) {
this.buffer.append(s);
}
private void append(char ch) {
this.buffer.append(ch);
}
private void append(int ch) {
this.buffer.appendCodePoint(ch);
}
private void prepend(String s) {
this.buffer.insert(0, s);
}
private void emptyBuffer() {
this.buffer.setLength(0);
}
private int remaining(int deltaPos) {
int pos = this.pointer + deltaPos + 1;
if (pos < this.input.length()) {
return this.input.codePointAt(pos);
}
else {
return EOF;
}
}
private static String percentDecode(String input) {
try {
return UriUtils.decode(input, StandardCharsets.UTF_8);
}
catch (IllegalArgumentException ex) {
throw new InvalidUrlException("Could not decode \"" + input + "\": " + ex.getMessage(), ex);
}
}
@Nullable
private String percentEncode(int c, IntPredicate percentEncodeSet) {
if (this.encoding == null) {
return null;
}
else {
return percentEncode(Character.toString(c), percentEncodeSet);
}
}
private String percentEncode(String input, IntPredicate percentEncodeSet) {
if (this.encoding == null) {
return input;
}
else {
byte[] bytes = input.getBytes(this.encoding);
boolean original = true;
for (byte b : bytes) {
if (percentEncodeSet.test(b)) {
original = false;
break;
}
}
if (original) {
return input;
}
StringBuilder output = new StringBuilder();
for (byte b : bytes) {
if (!percentEncodeSet.test(b)) {
output.append((char)b);
}
else {
output.append('%');
char hex1 = Character.toUpperCase(Character.forDigit((b >> 4) & 0xF, 16));
char hex2 = Character.toUpperCase(Character.forDigit(b & 0xF, 16));
output.append(hex1);
output.append(hex2);
}
}
return output.toString();
}
}
/**
* A single-dot URL path segment is a URL path segment that is "[/]." or an ASCII case-insensitive match for "[/]%2e".
*/
private static boolean isSingleDotPathSegment(StringBuilder b) {
int len = b.length();
switch (len) {
case 1 -> {
int ch0 = b.codePointAt(0);
return ch0 == '.';
}
case 2 -> {
int ch0 = b.codePointAt(0);
int ch1 = b.codePointAt(1);
return ch0 == '/' && ch1 == '.';
}
case 3 -> {
// ASCII case-insensitive match for "%2e".
int ch0 = b.codePointAt(0);
int ch1 = b.codePointAt(1);
int ch2 = b.codePointAt(2);
return ch0 == '%' && ch1 == '2' && (ch2 == 'e' || ch2 == 'E');
}
case 4 -> {
// ASCII case-insensitive match for "/%2e".
int ch0 = b.codePointAt(0);
int ch1 = b.codePointAt(1);
int ch2 = b.codePointAt(2);
int ch3 = b.codePointAt(3);
return ch0 == '/' && ch1 == '%' && ch2 == '2' && (ch3 == 'e' || ch3 == 'E');
}
default -> {
return false;
}
}
}
/**
* A double-dot URL path segment is a URL path segment that is "[/].." or an ASCII case-insensitive match for "/.%2e", "/%2e.", or "/%2e%2e".
*/
private static boolean isDoubleDotPathSegment(StringBuilder b) {
int len = b.length();
switch (len) {
case 2 -> {
int ch0 = b.codePointAt(0);
int ch1 = b.codePointAt(1);
return ch0 == '.' && ch1 == '.';
}
case 3 -> {
int ch0 = b.codePointAt(0);
int ch1 = b.codePointAt(1);
int ch2 = b.codePointAt(2);
return ch0 == '/' && ch1 == '.' && ch2 == '.';
}
case 4 -> {
int ch0 = b.codePointAt(0);
int ch1 = b.codePointAt(1);
int ch2 = b.codePointAt(2);
int ch3 = b.codePointAt(3);
// case-insensitive match for ".%2e" or "%2e."
return (ch0 == '.' && ch1 == '%' && ch2 == '2' && (ch3 == 'e' || ch3 == 'E') ||
(ch0 == '%' && ch1 == '2' && (ch2 == 'e' || ch2 == 'E') && ch3 == '.'));
}
case 5 -> {
int ch0 = b.codePointAt(0);
int ch1 = b.codePointAt(1);
int ch2 = b.codePointAt(2);
int ch3 = b.codePointAt(3);
int ch4 = b.codePointAt(4);
// case-insensitive match for "/.%2e" or "/%2e."
return ch0 == '/' &&
(ch1 == '.' && ch2 == '%' && ch3 == '2' && (ch4 == 'e' || ch4 == 'E')
|| (ch1 == '%' && ch2 == '2' && (ch3 == 'e' || ch3 == 'E') && ch4 == '.'));
}
case 6 -> {
int ch0 = b.codePointAt(0);
int ch1 = b.codePointAt(1);
int ch2 = b.codePointAt(2);
int ch3 = b.codePointAt(3);
int ch4 = b.codePointAt(4);
int ch5 = b.codePointAt(5);
// case-insensitive match for "%2e%2e".
return ch0 == '%' && ch1 == '2' && (ch2 == 'e' || ch2 == 'E')
&& ch3 == '%' && ch4 == '2' && (ch5 == 'e' || ch5 == 'E');
}
case 7 -> {
int ch0 = b.codePointAt(0);
int ch1 = b.codePointAt(1);
int ch2 = b.codePointAt(2);
int ch3 = b.codePointAt(3);
int ch4 = b.codePointAt(4);
int ch5 = b.codePointAt(5);
int ch6 = b.codePointAt(6);
// case-insensitive match for "/%2e%2e".
return ch0 == '/' && ch1 == '%' && ch2 == '2' && (ch3 == 'e' || ch3 == 'E')
&& ch4 == '%' && ch5 == '2' && (ch6 == 'e' || ch6 == 'E');
}
default -> {
return false;
}
}
}
/**
* A Windows drive letter is two code points, of which the first is an ASCII alpha and the second is either U+003A (:) or U+007C (|).
*
* A normalized Windows drive letter is a Windows drive letter of which the second code point is U+003A (:).
*/
private static boolean isWindowsDriveLetter(CharSequence input, boolean normalized) {
if (input.length() != 2) {
return false;
}
return isWindowsDriveLetterInternal(input, normalized);
}
/**
* A string starts with a Windows drive letter if all of the following are true:
*
* its length is greater than or equal to 2
* its first two code points are a Windows drive letter
* its length is 2 or its third code point is U+002F (/), U+005C (\), U+003F (?), or U+0023 (#).
*/
private static boolean startsWithWindowsDriveLetter(String input) {
int len = input.length();
if (len < 2) {
return false;
}
if (!isWindowsDriveLetterInternal(input, false)) {
return false;
}
if (len == 2) {
return true;
}
else {
int ch2 = input.codePointAt(2);
return ch2 == '/' || ch2 == '\\' || ch2 == '?' || ch2 == '#';
}
}
private static boolean isWindowsDriveLetterInternal(CharSequence s, boolean normalized) {
int ch0 = codePointAt(s, 0);
if (!isAsciiAlpha(ch0)) {
return false;
}
else {
int ch1 = codePointAt(s, 1);
if (normalized) {
return ch1 == ':';
}
else {
return ch1 == ':' || ch1 == '|';
}
}
}
private static int codePointAt(CharSequence s, int index) {
if (s instanceof String string) {
return string.codePointAt(index);
}
else if (s instanceof StringBuilder builder) {
return builder.codePointAt(index);
}
else {
throw new IllegalStateException();
}
}
private enum State {
SCHEME_START {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If c is an ASCII alpha, append c, lowercased, to buffer, and set state to scheme state.
if (isAsciiAlpha(c)) {
p.append(Character.toLowerCase((char) c));
p.setState(SCHEME);
}
// EXTRA: if c is '{', then append c to buffer, set previous state to scheme state, and state to url template state.
//
else if (p.previousState != URL_TEMPLATE && c == '{') {
p.append(c);
p.previousState = SCHEME;
p.state = URL_TEMPLATE;
}
// Otherwise, if state override is not given, set state to no scheme state and decrease pointer by 1.
else if (p.stateOverride == null) {
p.setState(NO_SCHEME);
p.pointer--;
}
// Otherwise, return failure.
else {
p.failure(null);
}
}
},
SCHEME {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If c is an ASCII alphanumeric, U+002B (+), U+002D (-), or U+002E (.), append c, lowercased, to buffer.
if (isAsciiAlphaNumeric(c) || (c == '+' || c == '-' || c == '.')) {
p.append(Character.toLowerCase((char) c));
}
// EXTRA: if c is '{', then append c to buffer, set state to url template state.
else if (p.previousState != URL_TEMPLATE && c == '{') {
p.append(c);
p.setState(URL_TEMPLATE);
}
// Otherwise, if c is U+003A (:), then:
else if (c == ':') {
// If state override is given, then:
if (p.stateOverride != null) {
boolean urlSpecialScheme = url.isSpecial();
String bufferString = p.buffer.toString();
boolean bufferSpecialScheme = isSpecialScheme(bufferString);
// If url’s scheme is a special scheme and buffer is not a special scheme, then return.
if (urlSpecialScheme && !bufferSpecialScheme) {
return;
}
// If url’s scheme is not a special scheme and buffer is a special scheme, then return.
if (!urlSpecialScheme && bufferSpecialScheme) {
return;
}
// If url includes credentials or has a non-null port, and buffer is "file", then return.
if ((url.includesCredentials() || url.port() != null) && "file".equals(bufferString)) {
return;
}
// If url’s scheme is "file" and its host is an empty host, then return.
if ("file".equals(url.scheme()) && (url.host() == null || url.host() == EmptyHost.INSTANCE)) {
return;
}
}
// Set url’s scheme to buffer.
url.scheme = p.buffer.toString();
// If state override is given, then:
if (p.stateOverride != null) {
// If url’s port is url’s scheme’s default port, then set url’s port to null.
if (url.port instanceof IntPort intPort &&
intPort.value() == defaultPort(url.scheme)) {
url.port = null;
// Return.
p.stopMainLoop = true;
return;
}
}
// Set buffer to the empty string.
p.emptyBuffer();
// If url’s scheme is "file", then:
if (url.scheme.equals("file")) {
// If remaining does not start with "//", special-scheme-missing-following-solidus validation error.
if (p.validate() && (p.remaining(0) != '/' || p.remaining(1) != '/')) {
p.validationError("\"file\" scheme not followed by \"//\".");
}
// Set state to file state.
p.setState(FILE);
}
// Otherwise, if url is special, base is non-null, and base’s scheme is url’s scheme:
else if (url.isSpecial() && p.base != null && p.base.scheme().equals(url.scheme)) {
// Assert: base is special (and therefore does not have an opaque path).
Assert.state(!p.base.path().isOpaque(), "Opaque path not expected");
// Set state to special relative or authority state.
p.setState(SPECIAL_RELATIVE_OR_AUTHORITY);
}
// Otherwise, if url is special, set state to special authority slashes state.
else if (url.isSpecial()) {
p.setState(SPECIAL_AUTHORITY_SLASHES);
}
// Otherwise, if remaining starts with an U+002F (/), set state to path or authority state and increase pointer by 1.
else if (p.remaining(0) == '/') {
p.setState(PATH_OR_AUTHORITY);
p.pointer++;
}
// Otherwise, set url’s path to the empty string and set state to opaque path state.
else {
url.path = new PathSegment("");
p.setState(OPAQUE_PATH);
}
}
// Otherwise, if state override is not given, set buffer to the empty string, state to no scheme state, and start over (from the first code point in input).
else if (p.stateOverride == null) {
p.emptyBuffer();
p.setState(NO_SCHEME);
p.pointer = -1;
}
// Otherwise, return failure.
else {
p.failure(null);
}
}
},
NO_SCHEME {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If base is null, or base has an opaque path and c is not U+0023 (#), missing-scheme-non-relative-URL
// validation error, return failure.
if (p.base == null || p.base.path().isOpaque() && c != '#') {
p.failure("The input is missing a scheme, because it does not begin with an ASCII alpha \"" +
(c != EOF ? Character.toString(c) : "") + "\", and no base URL was provided.");
}
// Otherwise, if base has an opaque path and c is U+0023 (#), set url’s scheme to base’s scheme, url’s
// path to base’s path, url’s query to base’s query, url’s fragment to the empty string, and set state to fragment state.
else if (p.base.path().isOpaque() && c == '#') {
url.scheme = p.base.scheme();
url.path = p.base.path();
url.query = p.base.query;
url.fragment = new StringBuilder();
p.setState(FRAGMENT);
}
// Otherwise, if base’s scheme is not "file", set state to relative state and decrease pointer by 1.
else if (!"file".equals(p.base.scheme())) {
p.setState(RELATIVE);
p.pointer--;
}
// Otherwise, set state to file state and decrease pointer by 1.
else {
p.setState(FILE);
p.pointer--;
}
}
},
SPECIAL_RELATIVE_OR_AUTHORITY {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If c is U+002F (/) and remaining starts with U+002F (/), then set state to special authority ignore slashes state and increase pointer by 1.
if (c == '/' && p.remaining(0) == '/') {
p.setState(SPECIAL_AUTHORITY_IGNORE_SLASHES);
p.pointer++;
}
// Otherwise, special-scheme-missing-following-solidus validation error, set state to relative state and decrease pointer by 1.
else {
if (p.validate()) {
p.validationError("The input’s scheme is not followed by \"//\".");
}
p.setState(RELATIVE);
p.pointer--;
}
}
},
PATH_OR_AUTHORITY {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If c is U+002F (/), then set state to authority state.
if (c == '/') {
p.setState(AUTHORITY);
}
// Otherwise, set state to path state, and decrease pointer by 1.
else {
p.setState(PATH);
p.pointer--;
}
}
},
RELATIVE {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// Assert: base’s scheme is not "file".
Assert.state(p.base != null && !"file".equals(p.base.scheme()), "Base scheme not provided or supported");
// Set url’s scheme to base’s scheme.
url.scheme = p.base.scheme;
// If c is U+002F (/), then set state to relative slash state.
if (c == '/') {
// EXTRA : append '/' to let the path segment start with /
p.append('/');
p.setState(RELATIVE_SLASH);
}
// Otherwise, if url is special and c is U+005C (\), invalid-reverse-solidus validation error, set state to relative slash state.
else if (url.isSpecial() && c == '\\') {
if (p.validate()) {
p.validationError("URL uses \\ instead of /.");
}
// EXTRA : append '/' to let the path segment start with /
p.append('/');
p.setState(RELATIVE_SLASH);
}
// Otherwise
else {
// Set url’s username to base’s username, url’s password to base’s password, url’s host to base’s host,
// url’s port to base’s port, url’s path to a clone of base’s path, and url’s query to base’s query.
url.username = (p.base.username != null) ? new StringBuilder(p.base.username) : null;
url.password = (p.base.password != null) ? new StringBuilder(p.base.password) : null;
url.host = p.base.host();
url.port = p.base.port();
url.path = p.base.path().clone();
url.query = p.base.query;
// If c is U+003F (?), then set url’s query to the empty string, and state to query state.
if (c == '?') {
url.query = new StringBuilder();
p.setState(QUERY);
}
// Otherwise, if c is U+0023 (#), set url’s fragment to the empty string and state to fragment state.
else if (c == '#') {
url.fragment = new StringBuilder();
p.setState(FRAGMENT);
}
// Otherwise, if c is not the EOF code point:
else if (c != EOF) {
// Set url’s query to null.
url.query = null;
// Shorten url’s path.
url.shortenPath();
// Set state to path state and decrease pointer by 1.
p.setState(PATH);
p.pointer--;
}
}
}
},
RELATIVE_SLASH {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If url is special and c is U+002F (/) or U+005C (\), then:
if (url.isSpecial() && (c == '/' || c == '\\')) {
// If c is U+005C (\), invalid-reverse-solidus validation error.
if (p.validate() && c == '\\') {
p.validationError("URL uses \\ instead of /.");
}
// Set state to special authority ignore slashes state.
p.setState(SPECIAL_AUTHORITY_IGNORE_SLASHES);
}
// Otherwise, if c is U+002F (/), then set state to authority state.
else if (c == '/') {
// EXTRA: empty buffer to remove appended slash, since this is not a path
p.emptyBuffer();
p.setState(AUTHORITY);
}
// Otherwise, set url’s username to base’s username, url’s password to base’s password, url’s host
// to base’s host, url’s port to base’s port, state to path state, and then, decrease pointer by 1.
else {
Assert.state(p.base != null, "No base URL available");
url.username = (p.base.username != null) ? new StringBuilder(p.base.username) : null;
url.password = (p.base.password != null) ? new StringBuilder(p.base.password) : null;
url.host = p.base.host();
url.port = p.base.port();
p.setState(PATH);
p.pointer--;
}
}
},
SPECIAL_AUTHORITY_SLASHES {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If c is U+002F (/) and remaining starts with U+002F (/), then set state to special authority ignore slashes state and increase pointer by 1.
if (c == '/' && p.remaining(0) == '/') {
p.setState(SPECIAL_AUTHORITY_IGNORE_SLASHES);
p.pointer++;
}
// Otherwise, special-scheme-missing-following-solidus validation error, set state to special authority ignore slashes state and decrease pointer by 1.
else {
if (p.validate()) {
p.validationError("Scheme \"" + url.scheme + "\" not followed by \"//\".");
}
p.setState(SPECIAL_AUTHORITY_IGNORE_SLASHES);
p.pointer--;
}
}
},
SPECIAL_AUTHORITY_IGNORE_SLASHES {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If c is neither U+002F (/) nor U+005C (\), then set state to authority state and decrease pointer by 1.
if (c != '/' && c != '\\') {
p.setState(AUTHORITY);
p.pointer--;
}
// Otherwise, special-scheme-missing-following-solidus validation error.
else {
if (p.validate()) {
p.validationError("Scheme \"" + url.scheme + "\" not followed by \"//\".");
}
}
}
},
AUTHORITY {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If c is U+0040 (@), then:
if (c == '@') {
// Invalid-credentials validation error.
if (p.validate()) {
p.validationError("Invalid credentials");
}
// If atSignSeen is true, then prepend "%40" to buffer.
if (p.atSignSeen) {
p.prepend("%40");
}
// Set atSignSeen to true.
p.atSignSeen = true;
int bufferLen = p.buffer.length();
// For each codePoint in buffer:
for (int i = 0; i < bufferLen; i++) {
int codePoint = p.buffer.codePointAt(i);
// If codePoint is U+003A (:) and passwordTokenSeen is false, then set passwordTokenSeen to true and continue.
if (codePoint == ':' && !p.passwordTokenSeen) {
p.passwordTokenSeen = true;
continue;
}
// Let encodedCodePoints be the result of running UTF-8 percent-encode codePoint using the userinfo percent-encode set.
String encodedCodePoints = p.percentEncode(codePoint, UrlParser::userinfoPercentEncodeSet);
// If passwordTokenSeen is true, then append encodedCodePoints to url’s password.
if (p.passwordTokenSeen) {
if (encodedCodePoints != null) {
url.appendToPassword(encodedCodePoints);
}
else {
url.appendToPassword(codePoint);
}
}
// Otherwise, append encodedCodePoints to url’s username.
else {
if (encodedCodePoints != null) {
url.appendToUsername(encodedCodePoints);
}
else {
url.appendToUsername(codePoint);
}
}
}
// Set buffer to the empty string.
p.emptyBuffer();
}
// Otherwise, if one of the following is true:
// - c is the EOF code point, U+002F (/), U+003F (?), or U+0023 (#)
// - url is special and c is U+005C (\)
else if ((c == EOF || c == '/' || c == '?' || c == '#') ||
(url.isSpecial() && c == '\\')) {
// If atSignSeen is true and buffer is the empty string, host-missing validation error, return failure.
if (p.atSignSeen && p.buffer.isEmpty()) {
p.failure("Missing host.");
}
// Decrease pointer by buffer’s code point length + 1, set buffer to the empty string, and set state to host state.
p.pointer -= p.buffer.length() + 1;
p.emptyBuffer();
p.setState(HOST);
}
// Otherwise, append c to buffer.
else {
p.append(c);
}
}
},
HOST {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If state override is given and url’s scheme is "file", then decrease pointer by 1 and set state to file host state.
if (p.stateOverride != null && "file".equals(url.scheme())) {
p.pointer--;
p.setState(FILE_HOST);
}
// Otherwise, if c is U+003A (:) and insideBrackets is false, then:
else if (c == ':' && !p.insideBrackets) {
// If buffer is the empty string, host-missing validation error, return failure.
if (p.buffer.isEmpty()) {
p.failure("Missing host.");
}
// If state override is given and state override is hostname state, then return.
if (p.stateOverride == HOST) {
p.stopMainLoop = true;
return;
}
// Let host be the result of host parsing buffer with url is not special.
Host host = Host.parse(p.buffer.toString(), !url.isSpecial(), p);
// Set url’s host to host, buffer to the empty string, and state to port state.
url.host = host;
p.emptyBuffer();
p.setState(PORT);
}
// Otherwise, if one of the following is true:
// - c is the EOF code point, U+002F (/), U+003F (?), or U+0023 (#)
// - url is special and c is U+005C (\)
else if ( (c == EOF || c == '/' || c == '?' || c == '#') ||
(url.isSpecial() && c == '\\')) {
// then decrease pointer by 1, and then:
p.pointer--;
// If url is special and buffer is the empty string, host-missing validation error, return failure.
if (url.isSpecial() && p.buffer.isEmpty()) {
p.failure("The input has a special scheme, but does not contain a host.");
}
// Otherwise, if state override is given, buffer is the empty string, and either url includes credentials or url’s port is non-null, return.
else if (p.stateOverride != null && p.buffer.isEmpty() &&
(url.includesCredentials() || url.port() != null )) {
p.stopMainLoop = true;
return;
}
// EXTRA: if buffer is not empty
if (!p.buffer.isEmpty()) {
// Let host be the result of host parsing buffer with url is not special.
Host host = Host.parse(p.buffer.toString(), !url.isSpecial(), p);
// Set url’s host to host, buffer to the empty string, and state to path start state.
url.host = host;
}
else {
url.host = EmptyHost.INSTANCE;
}
p.emptyBuffer();
p.setState(PATH_START);
// If state override is given, then return.
if (p.stateOverride != null) {
p.stopMainLoop = true;
return;
}
}
// Otherwise:
else {
// If c is U+005B ([), then set insideBrackets to true.
if (c == '[') {
p.insideBrackets = true;
}
// If c is U+005D (]), then set insideBrackets to false.
else if (c == ']') {
p.insideBrackets = false;
}
// Append c to buffer.
p.append(c);
}
}
},
PORT {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If c is an ASCII digit, append c to buffer.
if (isAsciiDigit(c)) {
p.append(c);
}
// EXTRA: if c is '{', then append c to buffer, set state to url template state.
else if (p.previousState != URL_TEMPLATE && c == '{') {
p.append(c);
p.setState(URL_TEMPLATE);
}
// Otherwise, if one of the following is true:
// - c is the EOF code point, U+002F (/), U+003F (?), or U+0023 (#)
// - url is special and c is U+005C (\)
// - state override is given
else if (c == EOF || c == '/' || c == '?' || c == '#' ||
(url.isSpecial() && c == '\\') ||
(p.stateOverride != null)) {
// If buffer is not the empty string, then:
if (!p.buffer.isEmpty()) {
// EXTRA: if buffer contains only ASCII digits, then
if (containsOnlyAsciiDigits(p.buffer)) {
try {
// Let port be the mathematical integer value that is represented by buffer in radix-10 using ASCII digits for digits with values 0 through 9.
int port = Integer.parseInt(p.buffer, 0, p.buffer.length(), 10);
// If port is greater than 2^16 − 1, port-out-of-range validation error, return failure.
if (port > MAX_PORT) {
p.failure("Port \"" + port + "\" is out of range");
}
int defaultPort = defaultPort(url.scheme);
// Set url’s port to null, if port is url’s scheme’s default port; otherwise to port.
if (defaultPort != -1 && port == defaultPort) {
url.port = null;
}
else {
url.port = new IntPort(port);
}
}
catch (NumberFormatException ex) {
p.failure(ex.getMessage());
}
}
// EXTRA: otherwise, set url's port to buffer
else {
url.port = new StringPort(p.buffer.toString());
}
// Set buffer to the empty string.
p.emptyBuffer();
}
// If state override is given, then return.
if (p.stateOverride != null) {
p.stopMainLoop = true;
return;
}
// Set state to path start state and decrease pointer by 1.
p.setState(PATH_START);
p.pointer--;
}
// Otherwise, port-invalid validation error, return failure.
else {
p.failure("Invalid port: \"" + Character.toString(c) + "\"");
}
}
},
FILE {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// Set url’s scheme to "file".
url.scheme = "file";
// Set url’s host to the empty string.
url.host = EmptyHost.INSTANCE;
// If c is U+002F (/) or U+005C (\), then:
if (c == '/' || c == '\\') {
// If c is U+005C (\), invalid-reverse-solidus validation error.
if (p.validate() && c == '\\') {
p.validationError("URL uses \\ instead of /.");
}
// Set state to file slash state.
p.setState(FILE_SLASH);
}
// Otherwise, if base is non-null and base’s scheme is "file":
else if (p.base != null && p.base.scheme().equals("file")) {
// Set url’s host to base’s host, url’s path to a clone of base’s path, and url’s query to base’s query.
url.host = p.base.host;
url.path = p.base.path().clone();
url.query = p.base.query;
// If c is U+003F (?), then set url’s query to the empty string and state to query state.
if (c == '?') {
url.query = new StringBuilder();
p.setState(QUERY);
}
// Otherwise, if c is U+0023 (#), set url’s fragment to the empty string and state to fragment state.
else if (c == '#') {
url.fragment = new StringBuilder();
p.setState(FRAGMENT);
}
// Otherwise, if c is not the EOF code point:
else if (c != EOF) {
// Set url’s query to null.
url.query = null;
// If the code point substring from pointer to the end of input does not start with a Windows drive letter, then shorten url’s path.
String substring = p.input.substring(p.pointer);
if (!startsWithWindowsDriveLetter(substring)) {
url.shortenPath();
}
// Otherwise:
else {
// File-invalid-Windows-drive-letter validation error.
if (p.validate()) {
p.validationError("The input is a relative-URL string that starts with a Windows " +
"drive letter and the base URL’s scheme is \"file\".");
}
// Set url’s path to « ».
url.path = new PathSegments();
}
// Set state to path state and decrease pointer by 1.
p.setState(PATH);
p.pointer--;
}
}
// Otherwise, set state to path state, and decrease pointer by 1.
else {
p.setState(PATH);
p.pointer--;
}
}
},
FILE_SLASH {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If c is U+002F (/) or U+005C (\), then:
if (c == '/' || c == '\\') {
// If c is U+005C (\), invalid-reverse-solidus validation error.
if (p.validate() && c == '\\') {
p.validationError("URL uses \\ instead of /.");
}
// Set state to file host state.
p.setState(FILE_HOST);
}
// Otherwise:
else {
// If base is non-null and base’s scheme is "file", then:
if (p.base != null && p.base.scheme.equals("file")) {
// Set url’s host to base’s host.
url.host = p.base.host;
// If the code point substring from pointer to the end of input does not start with a Windows drive letter and base’s path[0] is a normalized Windows drive letter, then append base’s path[0] to url’s path.
String substring = p.input.substring(p.pointer);
if (!startsWithWindowsDriveLetter(substring) &&
p.base.path instanceof PathSegments basePath &&
!basePath.isEmpty() &&
isWindowsDriveLetter(basePath.get(0), true)) {
url.path.append(basePath.get(0));
}
}
// Set state to path state, and decrease pointer by 1.
p.setState(PATH);
p.pointer--;
}
}
},
FILE_HOST {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If c is the EOF code point, U+002F (/), U+005C (\), U+003F (?), or U+0023 (#), then decrease pointer by 1 and then:
if (c == EOF || c == '/' || c == '\\' || c == '?' || c == '#') {
p.pointer--;
// If state override is not given and buffer is a Windows drive letter, file-invalid-Windows-drive-letter-host validation error, set state to path state.
if (p.stateOverride == null && isWindowsDriveLetter(p.buffer, false)) {
p.validationError("A file: URL’s host is a Windows drive letter.");
p.setState(PATH);
}
// Otherwise, if buffer is the empty string, then:
else if (p.buffer.isEmpty()) {
// Set url’s host to the empty string.
url.host = EmptyHost.INSTANCE;
// If state override is given, then return.
if (p.stateOverride != null) {
p.stopMainLoop = true;
return;
}
// Set state to path start state.
p.setState(PATH_START);
}
// Otherwise, run these steps:
else {
// Let host be the result of host parsing buffer with url is not special.
Host host = Host.parse(p.buffer.toString(), !url.isSpecial(), p);
// If host is "localhost", then set host to the empty string.
if (host instanceof Domain domain && domain.domain().equals("localhost")) {
host = EmptyHost.INSTANCE;
}
// Set url’s host to host.
url.host = host;
// If state override is given, then return.
if (p.stateOverride != null) {
p.stopMainLoop = true;
return;
}
// Set buffer to the empty string and state to path start state.
p.emptyBuffer();
p.setState(PATH_START);
}
}
// Otherwise, append c to buffer.
else {
p.append(c);
}
}
},
PATH_START {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If url is special, then:
if (url.isSpecial()) {
// If c is U+005C (\), invalid-reverse-solidus validation error.
if (p.validate() && c == '\\') {
p.validationError("URL uses \"\\\" instead of \"/\"");
}
// Set state to path state.
p.setState(PATH);
// If c is neither U+002F (/) nor U+005C (\), then decrease pointer by 1.
if (c != '/' && c != '\\') {
p.pointer--;
}
else {
p.append('/');
}
}
// Otherwise, if state override is not given and if c is U+003F (?), set url’s query to the empty string and state to query state.
else if (p.stateOverride == null && c == '?') {
url.query = new StringBuilder();
p.setState(QUERY);
}
// Otherwise, if state override is not given and if c is U+0023 (#), set url’s fragment to the empty string and state to fragment state.
else if (p.stateOverride == null && c =='#') {
url.fragment = new StringBuilder();
p.setState(FRAGMENT);
}
// Otherwise, if c is not the EOF code point:
else if (c != EOF) {
// Set state to path state.
p.setState(PATH);
// If c is not U+002F (/), then decrease pointer by 1.
if (c != '/') {
p.pointer--;
}
// EXTRA: otherwise append '/' to let the path segment start with /
else {
p.append('/');
}
}
// Otherwise, if state override is given and url’s host is null, append the empty string to url’s path.
else if (p.stateOverride != null && url.host() == null) {
url.path().append("");
}
}
},
PATH {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If one of the following is true:
// - c is the EOF code point or U+002F (/)
// - url is special and c is U+005C (\)
// - state override is not given and c is U+003F (?) or U+0023 (#)
// then:
if (c == EOF || c == '/' ||
(url.isSpecial() && c == '\\') ||
(p.stateOverride == null && (c == '?' || c == '#'))) {
// If url is special and c is U+005C (\), invalid-reverse-solidus validation error.
if (p.validate() && url.isSpecial() && c == '\\') {
p.validationError("URL uses \"\\\" instead of \"/\"");
}
// If buffer is a double-dot URL path segment, then:
if (isDoubleDotPathSegment(p.buffer)) {
// Shorten url’s path.
url.shortenPath();
// If neither c is U+002F (/), nor url is special and c is U+005C (\), append the empty string to url’s path.
if (c != '/' && !(url.isSpecial() && c == '\\')) {
url.path.append("");
}
}
else {
boolean singlePathSegment = isSingleDotPathSegment(p.buffer);
// Otherwise, if buffer is a single-dot URL path segment and if neither c is U+002F (/), nor url is special and c is U+005C (\), append the empty string to url’s path.
if (singlePathSegment && c != '/' && !(url.isSpecial() && c == '\\')) {
url.path.append("");
}
// Otherwise, if buffer is not a single-dot URL path segment, then:
else if (!singlePathSegment) {
// If url’s scheme is "file", url’s path is empty, and buffer is a Windows drive letter, then replace the second code point in buffer with U+003A (:).
if ("file".equals(url.scheme) && url.path.isEmpty() && isWindowsDriveLetter(p.buffer, false)) {
p.buffer.setCharAt(1, ':');
}
// Append buffer to url’s path.
url.path.append(p.buffer.toString());
}
}
// Set buffer to the empty string.
p.emptyBuffer();
if ( c == '/' || url.isSpecial() && c == '\\') {
p.append('/');
}
// If c is U+003F (?), then set url’s query to the empty string and state to query state.
if (c == '?') {
url.query = new StringBuilder();
p.setState(QUERY);
}
// If c is U+0023 (#), then set url’s fragment to the empty string and state to fragment state.
if (c == '#') {
url.fragment = new StringBuilder();
p.setState(FRAGMENT);
}
}
// EXTRA: Otherwise, if c is '{', then append c to buffer, set state to url template state.
else if (p.previousState != URL_TEMPLATE && c == '{') {
p.append(c);
p.setState(URL_TEMPLATE);
}
// Otherwise, run these steps:
else {
if (p.validate()) {
// If c is not a URL code point and not U+0025 (%), invalid-URL-unit validation error.
if (!isUrlCodePoint(c) && c != '%') {
p.validationError("Invalid URL Unit: \"" + (char) c + "\"");
}
// If c is U+0025 (%) and remaining does not start with two ASCII hex digits, invalid-URL-unit validation error.
else if (c == '%' &&
(p.pointer >= p.input.length() - 2 ||
!isAsciiHexDigit(p.input.codePointAt(p.pointer + 1)) ||
!isAsciiHexDigit(p.input.codePointAt(p.pointer + 2)))) {
p.validationError("Invalid URL Unit: \"" + (char) c + "\"");
}
}
// UTF-8 percent-encode c using the path percent-encode set and append the result to buffer.
String encoded = p.percentEncode(c, UrlParser::pathPercentEncodeSet);
if (encoded != null) {
p.append(encoded);
}
else {
p.append(c);
}
}
}
},
OPAQUE_PATH {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// EXTRA: if previous state is URL Template and the buffer is empty, append buffer to url's path and empty the buffer
if (p.previousState == URL_TEMPLATE && !p.buffer.isEmpty()) {
url.path.append(p.buffer.toString());
p.emptyBuffer();
}
// If c is U+003F (?), then set url’s query to the empty string and state to query state.
if (c == '?') {
url.query = new StringBuilder();
p.setState(QUERY);
}
// Otherwise, if c is U+0023 (#), then set url’s fragment to the empty string and state to fragment state.
else if (c == '#') {
url.fragment = new StringBuilder();
p.setState(FRAGMENT);
}
// EXTRA: Otherwise, if c is '{', then append c to buffer, set state to url template state.
else if (p.previousState != URL_TEMPLATE && c == '{') {
p.append(c);
p.setState(URL_TEMPLATE);
}
// Otherwise:
else {
if (p.validate()) {
// If c is not the EOF code point, not a URL code point, and not U+0025 (%), invalid-URL-unit validation error.
if (c != EOF && !isUrlCodePoint(c) && c != '%') {
p.validationError("Invalid URL Unit: \"" + (char) c + "\"");
}
// If c is U+0025 (%) and remaining does not start with two ASCII hex digits, invalid-URL-unit validation error.
else if (c == '%' &&
(p.pointer >= p.input.length() - 2 ||
!isAsciiHexDigit(p.input.codePointAt(p.pointer + 1)) ||
!isAsciiHexDigit(p.input.codePointAt(p.pointer + 2)))) {
p.validationError("Invalid URL Unit: \"" + (char) c + "\"");
}
}
// If c is not the EOF code point, UTF-8 percent-encode c using the C0 control percent-encode set and append the result to url’s path.
if (c != EOF) {
String encoded = p.percentEncode(c, UrlParser::c0ControlPercentEncodeSet);
if (encoded != null) {
url.path.append(encoded);
}
else {
url.path.append(c);
}
}
}
}
},
QUERY {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If encoding is not UTF-8 and one of the following is true:
// - url is not special
// - url’s scheme is "ws" or "wss"
// then set encoding to UTF-8.
if (p.encoding != null &&
!StandardCharsets.UTF_8.equals(p.encoding) &&
(!url.isSpecial() || "ws".equals(url.scheme) || "wss".equals(url.scheme))) {
p.encoding = StandardCharsets.UTF_8;
}
// If one of the following is true:
// - state override is not given and c is U+0023 (#)
// - c is the EOF code point
if ( (p.stateOverride == null && c == '#') || c == EOF) {
// Let queryPercentEncodeSet be the special-query percent-encode set if url is special; otherwise the query percent-encode set.
IntPredicate queryPercentEncodeSet = url.isSpecial() ? UrlParser::specialQueryPercentEncodeSet : UrlParser::queryPercentEncodeSet;
// Percent-encode after encoding, with encoding, buffer, and queryPercentEncodeSet, and append the result to url’s query.
String encoded = p.percentEncode(p.buffer.toString(), queryPercentEncodeSet);
Assert.state(url.query != null, "Url's query should not be null");
url.query.append(encoded);
// Set buffer to the empty string.
p.emptyBuffer();
// If c is U+0023 (#), then set url’s fragment to the empty string and state to fragment state.
if (c == '#') {
url.fragment = new StringBuilder();
p.setState(FRAGMENT);
}
}
// EXTRA: Otherwise, if c is '{', then append c to buffer, set state to url template state.
else if (p.previousState != URL_TEMPLATE && c == '{') {
p.append(c);
p.setState(URL_TEMPLATE);
}
// Otherwise, if c is not the EOF code point:
else if (c != EOF) {
if (p.validate()) {
// If c is not a URL code point and not U+0025 (%), invalid-URL-unit validation error.
if (!isUrlCodePoint(c) && c != '%') {
p.validationError("Invalid URL Unit: \"" + (char) c + "\"");
}
// If c is U+0025 (%) and remaining does not start with two ASCII hex digits, invalid-URL-unit validation error.
else if (c == '%' &&
(p.pointer >= p.input.length() - 2 ||
!isAsciiHexDigit(p.input.codePointAt(p.pointer + 1)) ||
!isAsciiHexDigit(p.input.codePointAt(p.pointer + 2)))) {
p.validationError("Invalid URL Unit: \"" + (char) c + "\"");
}
}
// Append c to buffer.
p.append(c);
}
}
},
FRAGMENT {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
// If c is not the EOF code point, then:
if (c != EOF) {
if (p.validate()) {
// If c is not a URL code point and not U+0025 (%), invalid-URL-unit validation error.
if (!isUrlCodePoint(c) && c != '%') {
p.validationError("Invalid URL Unit: \"" + (char) c + "\"");
}
// If c is U+0025 (%) and remaining does not start with two ASCII hex digits, invalid-URL-unit validation error.
else if (c == '%' &&
(p.pointer >= p.input.length() - 2 ||
!isAsciiHexDigit(p.input.codePointAt(p.pointer + 1)) ||
!isAsciiHexDigit(p.input.codePointAt(p.pointer + 2)))) {
p.validationError("Invalid URL Unit: \"" + (char) c + "\"");
}
}
// UTF-8 percent-encode c using the fragment percent-encode set and append the result to url’s fragment.
String encoded = p.percentEncode(c, UrlParser::fragmentPercentEncodeSet);
Assert.state(url.fragment != null, "Url's fragment should not be null");
if (encoded != null) {
url.fragment.append(encoded);
}
else {
url.fragment.appendCodePoint(c);
}
}
}
},
URL_TEMPLATE {
@Override
public void handle(int c, UrlRecord url, UrlParser p) {
Assert.state(p.previousState != null, "No previous state set");
if (c == '}') {
p.append(c);
p.setState(p.previousState);
}
else if (c == EOF) {
p.pointer -= p.buffer.length() + 1;
p.emptyBuffer();
p.setState(p.previousState);
}
else {
p.append(c);
}
}
};
public abstract void handle(int c, UrlRecord url, UrlParser p);
}
/**
* A URL is a struct that represents a universal identifier. To disambiguate from a valid URL string it can also be
* referred to as a
* <em>URL record</em>.
*/
static final class UrlRecord {
private String scheme = "";
@Nullable
private StringBuilder username = null;
@Nullable
private StringBuilder password = null;
@Nullable
private Host host = null;
@Nullable
private Port port = null;
private Path path = new PathSegments();
@Nullable
private StringBuilder query = null;
@Nullable
private StringBuilder fragment = null;
public UrlRecord() {
}
/**
* A URL is special if its scheme is a special scheme. A URL is not special if its scheme is not a special scheme.
*/
public boolean isSpecial() {
return isSpecialScheme(this.scheme);
}
/**
* A URL includes credentials if its username or password is not the empty string.
*/
public boolean includesCredentials() {
return this.username != null && !this.username.isEmpty() || this.password != null && !this.password.isEmpty();
}
/**
* A URL has an opaque path if its path is a URL path segment.
*/
public boolean hasOpaquePath() {
return path().isOpaque();
}
/**
* The serialization of an origin is the string obtained by applying the following algorithm to the given origin origin:
* If origin is an opaque origin, then return "null".
* Otherwise, let result be origin's scheme.
* Append "://" to result.
* Append origin's host, serialized, to result.
* If origin's port is non-null, append a U+003A COLON character (:), and origin's port, serialized, to result.
* Return result.
*/
public String origin() {
String scheme = scheme();
if (scheme.equals("ftp") || scheme.equals("http") || scheme.equals("https") || scheme.equals("ws") || scheme.equals("wss")) {
StringBuilder builder = new StringBuilder(scheme);
builder.append("://");
builder.append(host());
Port port = port();
if (port != null) {
builder.append(':');
builder.append(port);
}
return builder.toString();
}
else {
return "null";
}
}
/**
* A URL’s scheme is an ASCII string that identifies the type of URL and can be used to dispatch a URL for
* further processing after parsing. It is initially the empty string.
*/
public String scheme() {
return this.scheme;
}
/**
* The protocol getter steps are to return this’s URL’s scheme, followed by U+003A (:).
*/
public String protocol() {
return scheme() + ":";
}
/**
* A URL’s username is an ASCII string identifying a username. It is initially the empty string.
*/
public String username() {
if (this.username != null) {
return this.username.toString();
}
else {
return "";
}
}
void appendToUsername(int codePoint) {
if (this.username == null) {
this.username = new StringBuilder(2);
}
this.username.appendCodePoint(codePoint);
}
public void appendToUsername(String s) {
if (this.username == null) {
this.username = new StringBuilder(s);
}
else {
this.username.append(s);
}
}
/**
* A URL’s password is an ASCII string identifying a password. It is initially the empty string.
*/
public String password() {
if (this.password != null) {
return this.password.toString();
}
else {
return "";
}
}
void appendToPassword(int codePoint) {
if (this.password == null) {
this.password = new StringBuilder(2);
}
this.password.appendCodePoint(codePoint);
}
void appendToPassword(String s) {
if (this.password == null) {
this.password = new StringBuilder(s);
}
else {
this.password.append(s);
}
}
/**
* A URL’s host is {@code null} or a {@linkplain Host host}. It is initially {@code null}.
*/
@Nullable
public Host host() {
return this.host;
}
/**
*The host getter steps are:
* Let url be this’s URL.
* If url’s host is null, then return the empty string.
* If url’s port is null, return url’s host, serialized.
* Return url’s host, serialized, followed by U+003A (:) and url’s port, serialized.
*/
public String hostString() {
if (host() == null) {
return "";
}
StringBuilder builder = new StringBuilder(hostname());
Port port = port();
if (port != null) {
builder.append(':');
builder.append(port);
}
return builder.toString();
}
public String hostname() {
Host host = host();
if (host == null) {
return "";
}
else {
return host.toString();
}
}
/**
* A URL’s port is either null, a string representing a 16-bit unsigned integer that identifies a networking
* port, or a string containing a uri template . It is initially {@code null}.
*/
@Nullable
public Port port() {
return this.port;
}
public String portString() {
if (port() == null) {
return "";
}
else {
return port().toString();
}
}
/**
* A URL’s path is a URL {@linkplain Path path}, usually identifying a location. It is initially {@code « »}.
*/
public Path path() {
return this.path;
}
public String pathname() {
return path().name();
}
/**
* To shorten a url’s path:
* <ol>
* <li>Assert: url does not have an opaque path.</li>
* <li>Let path be url’s path.</li>
* <li>If url’s scheme is "file", path’s size is 1, and path[0] is a
* normalized Windows drive letter, then return.</li>
* <li>Remove path’s last item, if any.</li>
* </ol>
*/
public void shortenPath() {
this.path.shorten(this.scheme);
}
/**
* A URL’s query is either {@code null} or an ASCII string. It is initially {@code null}.
*/
@Nullable
public String query() {
if (this.query == null) {
return null;
}
else {
return this.query.toString();
}
}
/**
* The search getter steps are:
* If this’s URL’s query is either null or the empty string, then return the empty string.
* Return U+003F (?), followed by this’s URL’s query.
*/
public String search() {
String query = query();
if (query == null) {
return "";
}
else {
return "?" + query;
}
}
/**
* A URL’s fragment is either {@code null} or an ASCII string that can be used for further processing on the
* resource the URL’s other components identify. It is initially {@code null}.
*/
@Nullable
public String fragment() {
if (this.fragment == null) {
return null;
}
else {
return this.fragment.toString();
}
}
/**
* The hash getter steps are:
* If this’s URL’s fragment is either null or the empty string, then return the empty string.
* Return U+0023 (#), followed by this’s URL’s fragment.
*/
public String hash() {
String fragment = fragment();
if (fragment == null || fragment.isEmpty()) {
return "";
}
else {
return "#" + fragment;
}
}
public String href() {
// Let output be url’s scheme and U+003A (:) concatenated.
StringBuilder output = new StringBuilder(scheme());
output.append(':');
Host host = host();
// If url’s host is non-null:
if (host != null) {
// Append "//" to output.
output.append("//");
// If url includes credentials, then:
if (includesCredentials()) {
// Append url’s username to output.
output.append(username());
String password = password();
// If url’s password is not the empty string, then append U+003A (:), followed by url’s password, to output.
if (!password.isEmpty()) {
output.append(':');
output.append(password);
}
// Append U+0040 (@) to output.
output.append('@');
}
// Append url’s host, serialized, to output.
output.append(hostname());
Port port = port();
// If url’s port is non-null, append U+003A (:) followed by url’s port, serialized, to output.
if (port != null) {
output.append(':');
output.append(port());
}
}
// If url’s host is null, url does not have an opaque path, url’s path’s size is greater than 1, and url’s path[0] is the empty string, then append U+002F (/) followed by U+002E (.) to output.
else if (!hasOpaquePath() &&
path() instanceof PathSegments pathSegments &&
pathSegments.size() > 1 &&
pathSegments.get(0).isEmpty()) {
output.append("/.");
}
// Append the result of URL path serializing url to output.
output.append(pathname());
// If url’s query is non-null, append U+003F (?), followed by url’s query, to output.
String query = query();
if (query != null) {
output.append('?');
output.append(query);
}
// If exclude fragment is false and url’s fragment is non-null, then append U+0023 (#), followed by url’s fragment, to output.
String fragment = fragment();
if (fragment != null) {
output.append('#');
output.append(fragment);
}
// Return output.
return output.toString();
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj == null || obj.getClass() != this.getClass()) {
return false;
}
UrlRecord that = (UrlRecord) obj;
return Objects.equals(this.scheme(), that.scheme()) &&
Objects.equals(this.username(), that.username()) &&
Objects.equals(this.password(), that.password()) &&
Objects.equals(this.host(), that.host()) &&
Objects.equals(this.port(), that.port()) &&
Objects.equals(this.path(), that.path()) &&
Objects.equals(this.query(), that.query()) &&
Objects.equals(this.fragment(), that.fragment());
}
@Override
public int hashCode() {
return Objects.hash(this.scheme, this.username, this.password, this.host, this.port, this.path, this.query, this.fragment);
}
@Override
public String toString() {
return "UrlRecord[" +
"scheme=" + this.scheme + ", " +
"username=" + this.username + ", " +
"password=" + this.password + ", " +
"host=" + this.host + ", " +
"port=" + this.port + ", " +
"path=" + this.path + ", " +
"query=" + this.query + ", " +
"fragment=" + this.fragment + ']';
}
}
/**
* A host is a domain, an IP address, an opaque host, or an empty host.
* Typically a host serves as a network address, but it is sometimes used as
* opaque identifier in URLs where a network address is not necessary.
*/
sealed interface Host permits Domain, EmptyHost, IpAddressHost, OpaqueHost {
/**
* The host parser takes a scalar value string input with an optional
* boolean isOpaque (default false), and then runs these steps. They return failure or a host.
*/
static Host parse(String input, boolean isOpaque, UrlParser p) {
// If input starts with U+005B ([), then:
if (!input.isEmpty() && input.codePointAt(0) == '[') {
int last = input.length() - 1;
// If input does not end with U+005D (]), IPv6-unclosed validation error, return failure.
if (input.codePointAt(last) != ']') {
throw new InvalidUrlException("IPv6 address is missing the closing \"]\").");
}
// Return the result of IPv6 parsing input with its leading U+005B ([) and trailing U+005D (]) removed.
String ipv6Host = input.substring(1, last);
return new IpAddressHost(Ipv6Address.parse(ipv6Host));
}
// If isOpaque is true, then return the result of opaque-host parsing input.
if (isOpaque) {
return OpaqueHost.parse(input, p);
}
// Assert: input is not the empty string.
Assert.state(!input.isEmpty(), "Input should not be empty");
// Let domain be the result of running UTF-8 decode without BOM on the percent-decoding of input.
String domain = percentDecode(input);
// Let asciiDomain be the result of running domain to ASCII with domain and false.
String asciiDomain = domainToAscii(domain, false);
for (int i=0; i < asciiDomain.length(); i++) {
int ch = asciiDomain.codePointAt(i);
// If asciiDomain contains a forbidden domain code point, domain-invalid-code-point validation error, return failure.
if (isForbiddenDomain(ch)) {
throw new InvalidUrlException("Invalid character \"" + ch + "\" in domain \"" + input + "\"");
}
}
// If asciiDomain ends in a number, then return the result of IPv4 parsing asciiDomain.
if (endsInNumber(asciiDomain)) {
Ipv4Address address = Ipv4Address.parse(asciiDomain, p);
return new IpAddressHost(address);
}
// Return asciiDomain.
else {
return new Domain(asciiDomain);
}
}
private static boolean endsInNumber(String input) {
// Let parts be the result of strictly splitting input on U+002E (.).
LinkedList<String> parts = strictSplit(input, '.');
if (parts.isEmpty()) {
return false;
}
// If the last item in parts is the empty string, then:
if (parts.getLast().isEmpty()) {
// If parts’s size is 1, then return false.
if (parts.size() == 1) {
return false;
}
// Remove the last item from parts.
parts.removeLast();
}
// Let last be the last item in parts.
String last = parts.getLast();
// If last is non-empty and contains only ASCII digits, then return true.
if (!last.isEmpty() && containsOnlyAsciiDigits(last)) {
return true;
}
// If parsing last as an IPv4 number does not return failure, then return true.
ParseIpv4NumberResult result = Ipv4Address.parseIpv4Number(last);
return result != ParseIpv4NumberFailure.INSTANCE;
}
}
/**
* A domain is a non-empty ASCII string that identifies a realm within a
* network. [RFC1034].
*/
static final class Domain implements Host {
private final String domain;
Domain(String domain) {
this.domain = domain;
}
public String domain() {
return this.domain;
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
}
else if (o instanceof Domain other) {
return this.domain.equals(other.domain);
}
else {
return false;
}
}
@Override
public int hashCode() {
return this.domain.hashCode();
}
@Override
public String toString() {
return this.domain;
}
}
static final class IpAddressHost implements Host {
private final IpAddress address;
private final String addressString;
IpAddressHost(IpAddress address) {
this.address = address;
if (address instanceof Ipv6Address) {
this.addressString = "[" + address + "]";
}
else {
this.addressString = address.toString();
}
}
public IpAddress address() {
return this.address;
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
else if (obj instanceof IpAddressHost other) {
return this.address.equals(other.address);
}
else {
return false;
}
}
@Override
public int hashCode() {
return this.address.hashCode();
}
@Override
public String toString() {
return this.addressString;
}
}
static final class OpaqueHost implements Host {
private final String host;
private OpaqueHost(String host) {
this.host = host;
}
/**
* The opaque-host parser takes a scalar value string input, and then runs these steps. They return failure or
* an opaque host.
*/
public static OpaqueHost parse(String input, UrlParser p) {
for (int i = 0; i < input.length(); i++) {
int ch = input.codePointAt(i);
// If input contains a forbidden host code point, host-invalid-code-point validation error, return failure.
if (isForbiddenHost(ch)) {
throw new InvalidUrlException("An opaque host contains a forbidden host code point.");
}
// If input contains a code point that is not a URL code point and not U+0025 (%), invalid-URL-unit validation error.
if (p.validate() && !isUrlCodePoint(ch) && ch != '%') {
p.validationError("Code point \"" + ch + "\" is not a URL unit.");
}
//If input contains a U+0025 (%) and the two code points following it are not ASCII hex digits, invalid-URL-unit validation error.
if (p.validate() && ch == '%' && (input.length() - i < 2 || !isAsciiDigit(input.codePointAt(i + 1)) || !isAsciiDigit(input.codePointAt(i + 2)))) {
p.validationError("Code point \"" + ch + "\" is not a URL unit.");
}
}
//Return the result of running UTF-8 percent-encode on input using the C0 control percent-encode set.
String encoded = p.percentEncode(input, UrlParser::c0ControlPercentEncodeSet);
return new OpaqueHost(encoded);
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
else if (obj instanceof OpaqueHost other) {
return this.host.equals(other.host);
}
else {
return false;
}
}
@Override
public int hashCode() {
return this.host.hashCode();
}
@Override
public String toString() {
return this.host;
}
}
static final class EmptyHost implements Host {
static final EmptyHost INSTANCE = new EmptyHost();
private EmptyHost() {
}
@Override
public boolean equals(Object obj) {
return obj == this || obj != null && obj.getClass() == this.getClass();
}
@Override
public int hashCode() {
return 1;
}
@Override
public String toString() {
return "";
}
}
sealed interface IpAddress permits Ipv4Address, Ipv6Address {
}
static final class Ipv4Address implements IpAddress {
private final int address;
private final String string;
Ipv4Address(int address) {
this.address = address;
this.string = serialize(address);
}
/**
* The IPv4 serializer takes an IPv4 address {@code address} and then runs these steps. They return an ASCII string.
*/
private static String serialize(int address) {
//Let output be the empty string.
StringBuilder output = new StringBuilder();
//Let n be the value of address.
int n = address;
//For each i in the range 1 to 4, inclusive:
for (int i = 1; i <= 4; i++) {
// Prepend n % 256, serialized, to output.
output.insert(0, Integer.toUnsignedString(Integer.remainderUnsigned(n, 256)));
//If i is not 4, then prepend U+002E (.) to output.
if (i != 4) {
output.insert(0, '.');
}
//Set n to floor(n / 256).
n = Math.floorDiv(n, 256);
}
//Return output.
return output.toString();
}
public static Ipv4Address parse(String input, UrlParser p) {
// Let parts be the result of strictly splitting input on U+002E (.).
List<String> parts = strictSplit(input, '.');
int partsSize = parts.size();
// If the last item in parts is the empty string, then:
if (parts.get(partsSize - 1).isEmpty()) {
// IPv4-empty-part validation error.
p.validationError("IPv4 address ends with \".\"");
// If parts’s size is greater than 1, then remove the last item from parts.
if (partsSize > 1) {
parts.remove(partsSize - 1);
partsSize--;
}
}
// If parts’s size is greater than 4, IPv4-too-many-parts validation error, return failure.
if (partsSize > 4) {
throw new InvalidUrlException("IPv4 address does not consist of exactly 4 parts.");
}
// Let numbers be an empty list.
List<Integer> numbers = new ArrayList<>(partsSize);
// For each part of parts:
for (int i = 0; i < partsSize; i++) {
String part = parts.get(i);
// Let result be the result of parsing part.
ParseIpv4NumberResult result = parseIpv4Number(part);
// If result is failure, IPv4-non-numeric-part validation error, return failure.
if (result == ParseIpv4NumberFailure.INSTANCE) {
p.failure("An IPv4 address part is not numeric.");
}
else {
ParseIpv4NumberSuccess success = (ParseIpv4NumberSuccess) result;
if (p.validate() && success.validationError()) {
p.validationError("The IPv4 address contains numbers expressed using hexadecimal or octal digits.");
}
// Append result to numbers.
numbers.add(success.number());
}
}
for (Iterator<Integer> iterator = numbers.iterator(); iterator.hasNext(); ) {
Integer number = iterator.next();
// If any item in numbers is greater than 255, IPv4-out-of-range-part validation error.
if (p.validate() && number > 255) {
p.validationError("An IPv4 address part exceeds 255.");
}
if (iterator.hasNext()) {
// If any but the last item in numbers is greater than 255, then return failure.
if (number > 255) {
throw new InvalidUrlException("An IPv4 address part exceeds 255.");
}
}
else {
// If the last item in numbers is greater than or equal to 256^(5 − numbers’s size), then return failure.
double limit = Math.pow(256, (5 - numbers.size()));
if (number >= limit) {
throw new InvalidUrlException("IPv4 address part " + number + " exceeds " + limit + ".'");
}
}
}
// Let ipv4 be the last item in numbers.
int ipv4 = numbers.get(numbers.size() - 1);
// Remove the last item from numbers.
numbers.remove(numbers.size() - 1);
// Let counter be 0.
int counter = 0;
// For each n of numbers:
for (Integer n : numbers) {
// Increment ipv4 by n × 256^(3 − counter).
int increment = n * (int) Math.pow(256, 3 - counter);
ipv4 += increment;
// Increment counter by 1.
counter++;
}
// Return ipv4.
return new Ipv4Address(ipv4);
}
/**
* The IPv4 number parser takes an ASCII string input and then runs these steps. They return failure or a tuple of a number and a boolean.
*/
private static ParseIpv4NumberResult parseIpv4Number(String input) {
// If input is the empty string, then return failure.
if (input.isEmpty()) {
return ParseIpv4NumberFailure.INSTANCE;
}
// Let validationError be false.
boolean validationError = false;
// Let R be 10.
int r = 10;
int len = input.length();
// If input contains at least two code points and the first two code points are either "0X" or "0x", then:
if (len >= 2) {
int ch0 = input.codePointAt(0);
int ch1 = input.codePointAt(1);
if (ch0 == '0' && (ch1 == 'X' || ch1 == 'x')) {
// Set validationError to true.
validationError = true;
// Remove the first two code points from input.
input = input.substring(2);
// Set R to 16.
r = 16;
}
// Otherwise, if input contains at least two code points and the first code point is U+0030 (0), then:
else if (ch0 == '0') {
// Set validationError to true.
validationError = true;
// Remove the first code point from input.
input = input.substring(1);
// Set R to 8.
r = 8;
}
}
// If input is the empty string, then return (0, true).
if (input.isEmpty()) {
return new ParseIpv4NumberSuccess(0, true);
}
// If input contains a code point that is not a radix-R digit, then return failure.
for (int i = 0; i < input.length(); i++) {
int c = input.codePointAt(i);
int digit = Character.digit(c, r);
if (digit == -1) {
return ParseIpv4NumberFailure.INSTANCE;
}
}
try {
// Let output be the mathematical integer value that is represented by input in radix-R notation, using ASCII hex digits for digits with values 0 through 15.
int output = Integer.parseInt(input, r);
// Return (output, validationError).
return new ParseIpv4NumberSuccess(output, validationError);
}
catch (NumberFormatException ex) {
return ParseIpv4NumberFailure.INSTANCE;
}
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
}
else if (o instanceof Ipv4Address other) {
return this.address == other.address;
}
else {
return false;
}
}
@Override
public int hashCode() {
return this.address;
}
@Override
public String toString() {
return this.string;
}
}
static final class Ipv6Address implements IpAddress {
private final int[] pieces;
private final String string;
private Ipv6Address(int[] pieces) {
Assert.state(pieces.length == 8, "Invalid amount of IPv6 pieces");
this.pieces = pieces;
this.string = serialize(pieces);
}
/**
* The IPv6 parser takes a scalar value string input and then runs these steps. They return failure or an IPv6 address.
*/
public static Ipv6Address parse(String input) {
// Let address be a new IPv6 address whose IPv6 pieces are all 0.
int[] address = new int[8];
// Let pieceIndex be 0.
int pieceIndex = 0;
// Let compress be null.
Integer compress = null;
// Let pointer be a pointer for input.
int pointer = 0;
int inputLength = input.length();
int c = (inputLength > 0) ? input.codePointAt(0) : EOF;
// If c is U+003A (:), then:
if (c == ':') {
// If remaining does not start with U+003A (:), IPv6-invalid-compression validation error, return failure.
if (inputLength > 1 && input.codePointAt(1) != ':') {
throw new InvalidUrlException("IPv6 address begins with improper compression.");
}
// Increase pointer by 2.
pointer += 2;
// Increase pieceIndex by 1 and then set compress to pieceIndex.
pieceIndex++;
compress = pieceIndex;
}
c = (pointer < inputLength) ? input.codePointAt(pointer) : EOF;
// While c is not the EOF code point:
while (c != EOF) {
// If pieceIndex is 8, IPv6-too-many-pieces validation error, return failure.
if (pieceIndex == 8) {
throw new InvalidUrlException("IPv6 address contains more than 8 pieces.");
}
// If c is U+003A (:), then:
if (c == ':') {
// If compress is non-null, IPv6-multiple-compression validation error, return failure.
if (compress != null) {
throw new InvalidUrlException("IPv6 address is compressed in more than one spot.");
}
// Increase pointer and pieceIndex by 1, set compress to pieceIndex, and then continue.
pointer++;
pieceIndex++;
compress = pieceIndex;
c = (pointer < inputLength) ? input.codePointAt(pointer) : EOF;
continue;
}
// Let value and length be 0.
int value = 0;
int length = 0;
// While length is less than 4 and c is an ASCII hex digit, set value to value × 0x10 + c interpreted as hexadecimal number, and increase pointer and length by 1.
while (length < 4 && isAsciiHexDigit(c)) {
int cHex = Character.digit(c, 16);
value = (value * 0x10) + cHex;
pointer++;
length++;
c = (pointer < inputLength) ? input.codePointAt(pointer) : EOF;
}
// If c is U+002E (.), then:
if (c == '.') {
// If length is 0, IPv4-in-IPv6-invalid-code-point validation error, return failure.
if (length == 0) {
throw new InvalidUrlException("IPv6 address with IPv4 address syntax: IPv4 part is empty.");
}
// Decrease pointer by length.
pointer -= length;
// If pieceIndex is greater than 6, IPv4-in-IPv6-too-many-pieces validation error, return failure.
if (pieceIndex > 6) {
throw new InvalidUrlException("IPv6 address with IPv4 address syntax: IPv6 address has more than 6 pieces.");
}
// Let numbersSeen be 0.
int numbersSeen = 0;
c = (pointer < inputLength) ? input.codePointAt(pointer) : EOF;
// While c is not the EOF code point:
while (c != EOF) {
// Let ipv4Piece be null.
Integer ipv4Piece = null;
// If numbersSeen is greater than 0, then:
if (numbersSeen > 0) {
// If c is a U+002E (.) and numbersSeen is less than 4, then increase pointer by 1.
if (c =='.' && numbersSeen < 4) {
pointer++;
c = (pointer < inputLength) ? input.codePointAt(pointer) : EOF;
}
// Otherwise, IPv4-in-IPv6-invalid-code-point validation error, return failure.
else {
throw new InvalidUrlException("IPv6 address with IPv4 address syntax: " +
"IPv4 part is empty or contains a non-ASCII digit.");
}
}
// If c is not an ASCII digit, IPv4-in-IPv6-invalid-code-point validation error, return failure.
if (!isAsciiDigit(c)) {
throw new InvalidUrlException("IPv6 address with IPv4 address syntax: IPv4 part contains a non-ASCII digit.");
}
// While c is an ASCII digit:
while (isAsciiDigit(c)) {
// Let number be c interpreted as decimal number.
int number = Character.digit(c, 10);
// If ipv4Piece is null, then set ipv4Piece to number.
if (ipv4Piece == null) {
ipv4Piece = number;
}
// Otherwise, if ipv4Piece is 0, IPv4-in-IPv6-invalid-code-point validation error, return failure.
else if (ipv4Piece == 0) {
throw new InvalidUrlException("IPv6 address with IPv4 address syntax: IPv4 part contains a non-ASCII digit.");
}
// Otherwise, set ipv4Piece to ipv4Piece × 10 + number.
else {
ipv4Piece = ipv4Piece * 10 + number;
}
// If ipv4Piece is greater than 255, IPv4-in-IPv6-out-of-range-part validation error, return failure.
if (ipv4Piece > 255) {
throw new InvalidUrlException("IPv6 address with IPv4 address syntax: IPv4 part exceeds 255.");
}
// Increase pointer by 1.
pointer++;
c = (pointer < inputLength) ? input.codePointAt(pointer) : EOF;
}
// Set address[pieceIndex] to address[pieceIndex] × 0x100 + ipv4Piece.
address[pieceIndex] = address[pieceIndex] * 0x100 + (ipv4Piece != null ? ipv4Piece : 0);
// Increase numbersSeen by 1.
numbersSeen++;
// If numbersSeen is 2 or 4, then increase pieceIndex by 1.
if (numbersSeen == 2 || numbersSeen == 4) {
pieceIndex++;
}
c = (pointer < inputLength) ? input.codePointAt(pointer) : EOF;
}
// If numbersSeen is not 4, IPv4-in-IPv6-too-few-parts validation error, return failure.
if (numbersSeen != 4) {
throw new InvalidUrlException("IPv6 address with IPv4 address syntax: IPv4 address contains too few parts.");
}
// Break.
break;
}
// Otherwise, if c is U+003A (:):
else if (c == ':') {
// Increase pointer by 1.
pointer++;
c = (pointer < inputLength) ? input.codePointAt(pointer) : EOF;
// If c is the EOF code point, IPv6-invalid-code-point validation error, return failure.
if (c == EOF) {
throw new InvalidUrlException("IPv6 address unexpectedly ends.");
}
}
// Otherwise, if c is not the EOF code point, IPv6-invalid-code-point validation error, return failure.
else if (c != EOF) {
throw new InvalidUrlException("IPv6 address contains \"" + Character.toString(c) + "\", which is neither an ASCII hex digit nor a ':'.");
}
// Set address[pieceIndex] to value.
address[pieceIndex] = value;
// Increase pieceIndex by 1.
pieceIndex++;
}
// If compress is non-null, then:
if (compress != null) {
// Let swaps be pieceIndex − compress.
int swaps = pieceIndex - compress;
// Set pieceIndex to 7.
pieceIndex = 7;
// While pieceIndex is not 0 and swaps is greater than 0, swap address[pieceIndex] with address[compress + swaps − 1], and then decrease both pieceIndex and swaps by 1.
while (pieceIndex != 0 && swaps > 0) {
int tmp = address[pieceIndex];
address[pieceIndex] = address[compress + swaps - 1];
address[compress + swaps - 1] = tmp;
pieceIndex--;
swaps--;
}
}
// Otherwise, if compress is null and pieceIndex is not 8, IPv6-too-few-pieces validation error, return failure.
else if (compress == null && pieceIndex != 8) {
throw new InvalidUrlException("An uncompressed IPv6 address contains fewer than 8 pieces.");
}
// Return address.
return new Ipv6Address(address);
}
/**
* The IPv6 serializer takes an IPv6 address {@code address} and then runs these steps. They return an ASCII string.
*/
private static String serialize(int[] address) {
// Let output be the empty string.
StringBuilder output = new StringBuilder();
// Let compress be an index to the first IPv6 piece in the first longest sequences of address’s IPv6 pieces that are 0.
int compress = longestSequenceOf0Pieces(address);
// Let ignore0 be false.
boolean ignore0 = false;
// For each pieceIndex in the range 0 to 7, inclusive:
for (int pieceIndex = 0; pieceIndex <= 7; pieceIndex++) {
// If ignore0 is true and address[pieceIndex] is 0, then continue.
if (ignore0 && address[pieceIndex] == 0) {
continue;
}
// Otherwise, if ignore0 is true, set ignore0 to false.
else if (ignore0) {
ignore0 = false;
}
// If compress is pieceIndex, then:
if (compress == pieceIndex) {
// Let separator be "::" if pieceIndex is 0, and U+003A (:) otherwise.
String separator = (pieceIndex == 0) ? "::" : ":";
// Append separator to output.
output.append(separator);
// Set ignore0 to true and continue.
ignore0 = true;
continue;
}
// Append address[pieceIndex], represented as the shortest possible lowercase hexadecimal number, to output.
output.append(Integer.toHexString(address[pieceIndex]));
// If pieceIndex is not 7, then append U+003A (:) to output.
if (pieceIndex != 7) {
output.append(':');
}
}
// Return output.
return output.toString();
}
private static int longestSequenceOf0Pieces(int[] pieces) {
int longestStart = -1;
int longestLength = -1;
int start = -1;
for (int i = 0; i < pieces.length + 1; i++) {
if (i < pieces.length && pieces[i] == 0) {
if (start < 0) {
start = i;
}
}
else if (start >= 0) {
int length = i - start;
if (length > longestLength) {
longestStart = start;
longestLength = length;
}
start = -1;
}
}
// If there is no sequence of address’s IPv6 pieces that are 0 that is longer than 1, then set compress to null.
if (longestLength > 1) {
return longestStart;
}
else {
return -1;
}
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
else if (obj instanceof Ipv6Address other) {
return Arrays.equals(this.pieces, other.pieces);
}
else {
return false;
}
}
@Override
public int hashCode() {
return Arrays.hashCode(this.pieces);
}
@Override
public String toString() {
return this.string;
}
}
sealed interface Port permits StringPort, IntPort {
}
static final class StringPort implements Port {
private final String port;
public StringPort(String port) {
this.port = port;
}
public String value() {
return this.port;
}
@Override
public String toString() {
return this.port;
}
}
static final class IntPort implements Port {
private final int port;
public IntPort(int port) {
this.port = port;
}
public int value() {
return this.port;
}
@Override
public String toString() {
return Integer.toString(this.port);
}
}
sealed interface Path permits PathSegment, PathSegments {
void append(int codePoint);
void append(String s);
boolean isEmpty();
void shorten(String scheme);
boolean isOpaque();
Path clone();
String name();
}
static final class PathSegment implements Path {
@Nullable
private StringBuilder builder = null;
@Nullable
String segment;
PathSegment(String segment) {
this.segment = segment;
}
PathSegment(int codePoint) {
append(codePoint);
}
public String segment() {
String result = this.segment;
if (result == null) {
Assert.state(this.builder != null, "String nor StringBuilder available");
result = this.builder.toString();
this.segment = result;
}
return result;
}
@Override
public void append(int codePoint) {
this.segment = null;
if (this.builder == null) {
this.builder = new StringBuilder(2);
}
this.builder.appendCodePoint(codePoint);
}
@Override
public void append(String s) {
this.segment = null;
if (this.builder == null) {
this.builder = new StringBuilder(s);
}
else {
this.builder.append(s);
}
}
@Override
public String name() {
String name = segment();
if (name.startsWith("/")) {
name = name.substring(1);
}
return name;
}
@Override
public boolean isEmpty() {
if (this.segment != null) {
return this.segment.isEmpty();
}
else {
Assert.state(this.builder != null, "String nor StringBuilder available");
return this.builder.isEmpty();
}
}
@Override
public void shorten(String scheme) {
throw new IllegalStateException("Opaque path not expected");
}
@Override
public boolean isOpaque() {
return true;
}
@Override
public Path clone() {
return new PathSegment(segment());
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
}
else if (o instanceof PathSegment other) {
return segment().equals(other.segment());
}
else {
return false;
}
}
@Override
public int hashCode() {
return segment().hashCode();
}
@Override
public String toString() {
return segment();
}
}
static final class PathSegments implements Path {
private final List<PathSegment> segments;
public PathSegments() {
this.segments = new ArrayList<>();
}
public PathSegments(List<PathSegment> segments) {
this.segments = new ArrayList<>(segments);
}
@Override
public void append(int codePoint) {
this.segments.add(new PathSegment(codePoint));
}
@Override
public void append(String segment) {
this.segments.add(new PathSegment(segment));
}
public int size() {
return this.segments.size();
}
public String get(int i) {
return this.segments.get(i).segment();
}
@Override
public boolean isEmpty() {
return this.segments.isEmpty();
}
@Override
public void shorten(String scheme) {
int size = size();
if ("file".equals(scheme) &&
size == 1 &&
isWindowsDriveLetter(get(0), true)) {
return;
}
if (!isEmpty()) {
this.segments.remove(size - 1);
}
}
@Override
public boolean isOpaque() {
return false;
}
@Override
public Path clone() {
return new PathSegments(this.segments);
}
@Override
public String name() {
StringBuilder output = new StringBuilder();
for (PathSegment segment : this.segments) {
output.append('/');
output.append(segment.name());
}
return output.toString();
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
}
else if (o instanceof PathSegments other) {
return this.segments.equals(other.segments);
}
else {
return false;
}
}
@Override
public int hashCode() {
return this.segments.hashCode();
}
@Override
public String toString() {
StringBuilder output = new StringBuilder();
for (PathSegment segment : this.segments) {
output.append(segment);
}
return output.toString();
}
}
private sealed interface ParseIpv4NumberResult permits ParseIpv4NumberFailure, ParseIpv4NumberSuccess {
}
private record ParseIpv4NumberSuccess(int number, boolean validationError) implements ParseIpv4NumberResult {
}
private static final class ParseIpv4NumberFailure implements ParseIpv4NumberResult {
public static final ParseIpv4NumberFailure INSTANCE = new ParseIpv4NumberFailure();
private ParseIpv4NumberFailure() {
}
}
}
| spring-projects/spring-framework | spring-web/src/main/java/org/springframework/web/util/UrlParser.java |
612 | // Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
package com.google.protobuf.util;
import static com.google.common.math.IntMath.checkedAdd;
import static com.google.common.math.IntMath.checkedSubtract;
import static com.google.common.math.LongMath.checkedAdd;
import static com.google.common.math.LongMath.checkedMultiply;
import static com.google.common.math.LongMath.checkedSubtract;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import com.google.errorprone.annotations.CompileTimeConstant;
import com.google.j2objc.annotations.J2ObjCIncompatible;
import com.google.protobuf.Duration;
import com.google.protobuf.Timestamp;
import java.io.Serializable;
import java.lang.reflect.Method;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Comparator;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.Locale;
import java.util.TimeZone;
import javax.annotation.Nullable;
/**
* Utilities to help create/manipulate {@code protobuf/timestamp.proto}. All operations throw an
* {@link IllegalArgumentException} if the input(s) are not {@linkplain #isValid(Timestamp) valid}.
*/
public final class Timestamps {
// Timestamp for "0001-01-01T00:00:00Z"
static final long TIMESTAMP_SECONDS_MIN = -62135596800L;
// Timestamp for "9999-12-31T23:59:59Z"
static final long TIMESTAMP_SECONDS_MAX = 253402300799L;
static final int NANOS_PER_SECOND = 1000000000;
static final int NANOS_PER_MILLISECOND = 1000000;
static final int NANOS_PER_MICROSECOND = 1000;
static final int MILLIS_PER_SECOND = 1000;
static final int MICROS_PER_SECOND = 1000000;
/** A constant holding the minimum valid {@link Timestamp}, {@code 0001-01-01T00:00:00Z}. */
public static final Timestamp MIN_VALUE =
Timestamp.newBuilder().setSeconds(TIMESTAMP_SECONDS_MIN).setNanos(0).build();
/**
* A constant holding the maximum valid {@link Timestamp}, {@code 9999-12-31T23:59:59.999999999Z}.
*/
public static final Timestamp MAX_VALUE =
Timestamp.newBuilder().setSeconds(TIMESTAMP_SECONDS_MAX).setNanos(999999999).build();
/**
* A constant holding the {@link Timestamp} of epoch time, {@code 1970-01-01T00:00:00.000000000Z}.
*/
public static final Timestamp EPOCH = Timestamp.newBuilder().setSeconds(0).setNanos(0).build();
private static final ThreadLocal<SimpleDateFormat> timestampFormat =
new ThreadLocal<SimpleDateFormat>() {
@Override
protected SimpleDateFormat initialValue() {
return createTimestampFormat();
}
};
private static SimpleDateFormat createTimestampFormat() {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss", Locale.ENGLISH);
GregorianCalendar calendar = new GregorianCalendar(TimeZone.getTimeZone("UTC"));
// We use Proleptic Gregorian Calendar (i.e., Gregorian calendar extends
// backwards to year one) for timestamp formatting.
calendar.setGregorianChange(new Date(Long.MIN_VALUE));
sdf.setCalendar(calendar);
return sdf;
}
private Timestamps() {}
private static enum TimestampComparator implements Comparator<Timestamp>, Serializable {
INSTANCE;
@Override
public int compare(Timestamp t1, Timestamp t2) {
checkValid(t1);
checkValid(t2);
int secDiff = Long.compare(t1.getSeconds(), t2.getSeconds());
return (secDiff != 0) ? secDiff : Integer.compare(t1.getNanos(), t2.getNanos());
}
}
/**
* Returns a {@link Comparator} for {@link Timestamp Timestamps} which sorts in increasing
* chronological order. Nulls and invalid {@link Timestamp Timestamps} are not allowed (see {@link
* #isValid}). The returned comparator is serializable.
*/
public static Comparator<Timestamp> comparator() {
return TimestampComparator.INSTANCE;
}
/**
* Compares two timestamps. The value returned is identical to what would be returned by: {@code
* Timestamps.comparator().compare(x, y)}.
*
* @return the value {@code 0} if {@code x == y}; a value less than {@code 0} if {@code x < y};
* and a value greater than {@code 0} if {@code x > y}
*/
public static int compare(Timestamp x, Timestamp y) {
return TimestampComparator.INSTANCE.compare(x, y);
}
/**
* Returns true if the given {@link Timestamp} is valid. The {@code seconds} value must be in the
* range [-62,135,596,800, +253,402,300,799] (i.e., between 0001-01-01T00:00:00Z and
* 9999-12-31T23:59:59Z). The {@code nanos} value must be in the range [0, +999,999,999].
*
* <p><b>Note:</b> Negative second values with fractional seconds must still have non-negative
* nanos values that count forward in time.
*/
public static boolean isValid(Timestamp timestamp) {
return isValid(timestamp.getSeconds(), timestamp.getNanos());
}
/**
* Returns true if the given number of seconds and nanos is a valid {@link Timestamp}. The {@code
* seconds} value must be in the range [-62,135,596,800, +253,402,300,799] (i.e., between
* 0001-01-01T00:00:00Z and 9999-12-31T23:59:59Z). The {@code nanos} value must be in the range
* [0, +999,999,999].
*
* <p><b>Note:</b> Negative second values with fractional seconds must still have non-negative
* nanos values that count forward in time.
*/
@SuppressWarnings("GoodTime") // this is a legacy conversion API
public static boolean isValid(long seconds, int nanos) {
if (!isValidSeconds(seconds)) {
return false;
}
if (nanos < 0 || nanos >= NANOS_PER_SECOND) {
return false;
}
return true;
}
/**
* Returns true if the given number of seconds is valid, if combined with a valid number of nanos.
* The {@code seconds} value must be in the range [-62,135,596,800, +253,402,300,799] (i.e.,
* between 0001-01-01T00:00:00Z and 9999-12-31T23:59:59Z).
*/
@SuppressWarnings("GoodTime") // this is a legacy conversion API
private static boolean isValidSeconds(long seconds) {
return seconds >= TIMESTAMP_SECONDS_MIN && seconds <= TIMESTAMP_SECONDS_MAX;
}
/** Throws an {@link IllegalArgumentException} if the given {@link Timestamp} is not valid. */
@CanIgnoreReturnValue
public static Timestamp checkValid(Timestamp timestamp) {
long seconds = timestamp.getSeconds();
int nanos = timestamp.getNanos();
if (!isValid(seconds, nanos)) {
throw new IllegalArgumentException(
String.format(
"Timestamp is not valid. See proto definition for valid values. "
+ "Seconds (%s) must be in range [-62,135,596,800, +253,402,300,799]. "
+ "Nanos (%s) must be in range [0, +999,999,999].",
seconds, nanos));
}
return timestamp;
}
/**
* Builds the given builder and throws an {@link IllegalArgumentException} if it is not valid. See
* {@link #checkValid(Timestamp)}.
*
* @return A valid, built {@link Timestamp}.
*/
public static Timestamp checkValid(Timestamp.Builder timestampBuilder) {
return checkValid(timestampBuilder.build());
}
/**
* Convert Timestamp to RFC 3339 date string format. The output will always be Z-normalized and
* uses 0, 3, 6 or 9 fractional digits as required to represent the exact value. Note that
* Timestamp can only represent time from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
* See https://www.ietf.org/rfc/rfc3339.txt
*
* <p>Example of generated format: "1972-01-01T10:00:20.021Z"
*
* @return The string representation of the given timestamp.
* @throws IllegalArgumentException if the given timestamp is not in the valid range.
*/
public static String toString(Timestamp timestamp) {
checkValid(timestamp);
long seconds = timestamp.getSeconds();
int nanos = timestamp.getNanos();
StringBuilder result = new StringBuilder();
// Format the seconds part.
Date date = new Date(seconds * MILLIS_PER_SECOND);
result.append(timestampFormat.get().format(date));
// Format the nanos part.
if (nanos != 0) {
result.append(".");
result.append(formatNanos(nanos));
}
result.append("Z");
return result.toString();
}
/**
* Parse from RFC 3339 date string to Timestamp. This method accepts all outputs of {@link
* #toString(Timestamp)} and it also accepts any fractional digits (or none) and any offset as
* long as they fit into nano-seconds precision.
*
* <p>Example of accepted format: "1972-01-01T10:00:20.021-05:00"
*
* @return a Timestamp parsed from the string
* @throws ParseException if parsing fails
*/
public static Timestamp parse(String value) throws ParseException {
int dayOffset = value.indexOf('T');
if (dayOffset == -1) {
throw new ParseException("Failed to parse timestamp: invalid timestamp \"" + value + "\"", 0);
}
int timezoneOffsetPosition = value.indexOf('Z', dayOffset);
if (timezoneOffsetPosition == -1) {
timezoneOffsetPosition = value.indexOf('+', dayOffset);
}
if (timezoneOffsetPosition == -1) {
timezoneOffsetPosition = value.indexOf('-', dayOffset);
}
if (timezoneOffsetPosition == -1) {
throw new ParseException("Failed to parse timestamp: missing valid timezone offset.", 0);
}
// Parse seconds and nanos.
String timeValue = value.substring(0, timezoneOffsetPosition);
String secondValue = timeValue;
String nanoValue = "";
int pointPosition = timeValue.indexOf('.');
if (pointPosition != -1) {
secondValue = timeValue.substring(0, pointPosition);
nanoValue = timeValue.substring(pointPosition + 1);
}
Date date = timestampFormat.get().parse(secondValue);
long seconds = date.getTime() / MILLIS_PER_SECOND;
int nanos = nanoValue.isEmpty() ? 0 : parseNanos(nanoValue);
// Parse timezone offsets.
if (value.charAt(timezoneOffsetPosition) == 'Z') {
if (value.length() != timezoneOffsetPosition + 1) {
throw new ParseException(
"Failed to parse timestamp: invalid trailing data \""
+ value.substring(timezoneOffsetPosition)
+ "\"",
0);
}
} else {
String offsetValue = value.substring(timezoneOffsetPosition + 1);
long offset = parseTimezoneOffset(offsetValue);
if (value.charAt(timezoneOffsetPosition) == '+') {
seconds -= offset;
} else {
seconds += offset;
}
}
try {
return normalizedTimestamp(seconds, nanos);
} catch (IllegalArgumentException e) {
ParseException ex =
new ParseException(
"Failed to parse timestamp " + value + " Timestamp is out of range.", 0);
ex.initCause(e);
throw ex;
}
}
/**
* Parses a string in RFC 3339 format into a {@link Timestamp}.
*
* <p>Identical to {@link #parse(String)}, but throws an {@link IllegalArgumentException} instead
* of a {@link ParseException} if parsing fails.
*
* @return a {@link Timestamp} parsed from the string
* @throws IllegalArgumentException if parsing fails
*/
public static Timestamp parseUnchecked(@CompileTimeConstant String value) {
try {
return parse(value);
} catch (ParseException e) {
// While `java.time.format.DateTimeParseException` is a more accurate representation of the
// failure, this library is currently not JDK8 ready because of Android dependencies.
throw new IllegalArgumentException(e);
}
}
// the following 3 constants contain references to java.time.Instant methods (if that class is
// available at runtime); otherwise, they are null.
@Nullable private static final Method INSTANT_NOW = instantMethod("now");
@Nullable private static final Method INSTANT_GET_EPOCH_SECOND = instantMethod("getEpochSecond");
@Nullable private static final Method INSTANT_GET_NANO = instantMethod("getNano");
@Nullable
private static Method instantMethod(String methodName) {
try {
return Class.forName("java.time.Instant").getMethod(methodName);
} catch (Exception e) {
return null;
}
}
/**
* Create a {@link Timestamp} using the best-available (in terms of precision) system clock.
*
* <p><b>Note:</b> that while this API is convenient, it may harm the testability of your code, as
* you're unable to mock the current time. Instead, you may want to consider injecting a clock
* instance to read the current time.
*/
public static Timestamp now() {
if (INSTANT_NOW != null) {
try {
Object now = INSTANT_NOW.invoke(null);
long epochSecond = (long) INSTANT_GET_EPOCH_SECOND.invoke(now);
int nanoAdjustment = (int) INSTANT_GET_NANO.invoke(now);
return normalizedTimestamp(epochSecond, nanoAdjustment);
} catch (Throwable fallThrough) {
throw new AssertionError(fallThrough);
}
}
// otherwise, fall back on millisecond precision
return fromMillis(System.currentTimeMillis());
}
/** Create a Timestamp from the number of seconds elapsed from the epoch. */
@SuppressWarnings("GoodTime") // this is a legacy conversion API
public static Timestamp fromSeconds(long seconds) {
return normalizedTimestamp(seconds, 0);
}
/**
* Convert a Timestamp to the number of seconds elapsed from the epoch.
*
* <p>The result will be rounded down to the nearest second. E.g., if the timestamp represents
* "1969-12-31T23:59:59.999999999Z", it will be rounded to -1 second.
*/
@SuppressWarnings("GoodTime") // this is a legacy conversion API
public static long toSeconds(Timestamp timestamp) {
return checkValid(timestamp).getSeconds();
}
/** Create a Timestamp from the number of milliseconds elapsed from the epoch. */
@SuppressWarnings("GoodTime") // this is a legacy conversion API
public static Timestamp fromMillis(long milliseconds) {
return normalizedTimestamp(
milliseconds / MILLIS_PER_SECOND,
(int) (milliseconds % MILLIS_PER_SECOND * NANOS_PER_MILLISECOND));
}
/**
* Create a Timestamp from a {@link Date}. If the {@link Date} is a {@link java.sql.Timestamp},
* full nanonsecond precision is retained.
*
* @throws IllegalArgumentException if the year is before 1 CE or after 9999 CE
*/
@SuppressWarnings("GoodTime") // this is a legacy conversion API
@J2ObjCIncompatible
public static Timestamp fromDate(Date date) {
if (date instanceof java.sql.Timestamp) {
java.sql.Timestamp sqlTimestamp = (java.sql.Timestamp) date;
long time = sqlTimestamp.getTime();
long integralSeconds =
(time < 0 && time % 1000 != 0)
? time / 1000L - 1
: time / 1000L; // truncate the fractional seconds
return Timestamp.newBuilder()
.setSeconds(integralSeconds)
.setNanos(sqlTimestamp.getNanos())
.build();
} else {
return fromMillis(date.getTime());
}
}
/**
* Convert a Timestamp to the number of milliseconds elapsed from the epoch.
*
* <p>The result will be rounded down to the nearest millisecond. For instance, if the timestamp
* represents "1969-12-31T23:59:59.999999999Z", it will be rounded to -1 millisecond.
*/
@SuppressWarnings("GoodTime") // this is a legacy conversion API
public static long toMillis(Timestamp timestamp) {
checkValid(timestamp);
return checkedAdd(
checkedMultiply(timestamp.getSeconds(), MILLIS_PER_SECOND),
timestamp.getNanos() / NANOS_PER_MILLISECOND);
}
/** Create a Timestamp from the number of microseconds elapsed from the epoch. */
@SuppressWarnings("GoodTime") // this is a legacy conversion API
public static Timestamp fromMicros(long microseconds) {
return normalizedTimestamp(
microseconds / MICROS_PER_SECOND,
(int) (microseconds % MICROS_PER_SECOND * NANOS_PER_MICROSECOND));
}
/**
* Convert a Timestamp to the number of microseconds elapsed from the epoch.
*
* <p>The result will be rounded down to the nearest microsecond. E.g., if the timestamp
* represents "1969-12-31T23:59:59.999999999Z", it will be rounded to -1 microsecond.
*/
@SuppressWarnings("GoodTime") // this is a legacy conversion API
public static long toMicros(Timestamp timestamp) {
checkValid(timestamp);
return checkedAdd(
checkedMultiply(timestamp.getSeconds(), MICROS_PER_SECOND),
timestamp.getNanos() / NANOS_PER_MICROSECOND);
}
/** Create a Timestamp from the number of nanoseconds elapsed from the epoch. */
@SuppressWarnings("GoodTime") // this is a legacy conversion API
public static Timestamp fromNanos(long nanoseconds) {
return normalizedTimestamp(
nanoseconds / NANOS_PER_SECOND, (int) (nanoseconds % NANOS_PER_SECOND));
}
/** Convert a Timestamp to the number of nanoseconds elapsed from the epoch. */
@SuppressWarnings("GoodTime") // this is a legacy conversion API
public static long toNanos(Timestamp timestamp) {
checkValid(timestamp);
return checkedAdd(
checkedMultiply(timestamp.getSeconds(), NANOS_PER_SECOND), timestamp.getNanos());
}
/**
* Calculate the difference between two timestamps.
*
* <!-- MOE:begin_intracomment_strip -->
* @deprecated Do not use this method for new code. Instead, convert to {@link java.time.Instant}
* using {@link com.google.protobuf.util.JavaTimeConversions#toJavaInstant}, do the arithmetic
* there, and convert back using {@link
* com.google.protobuf.util.JavaTimeConversions#toProtoDuration}.
* <!-- MOE:end_intracomment_strip -->
*/
@Deprecated // MOE:strip_line
public static Duration between(Timestamp from, Timestamp to) {
checkValid(from);
checkValid(to);
return Durations.normalizedDuration(
checkedSubtract(to.getSeconds(), from.getSeconds()),
checkedSubtract(to.getNanos(), from.getNanos()));
}
/**
* Add a duration to a timestamp.
*
* <!-- MOE:begin_intracomment_strip -->
* @deprecated Do not use this method for new code. Instead, convert to {@link java.time.Instant}
* and {@link java.time.Duration} using {@link
* com.google.protobuf.util.JavaTimeConversions#toJavaInstant} and {@link
* com.google.protobuf.util.JavaTimeConversions#toJavaDuration}, do the arithmetic there, and
* convert back using {@link com.google.protobuf.util.JavaTimeConversions#toProtoTimestamp}.
* <!-- MOE:end_intracomment_strip -->
*/
@Deprecated // MOE:strip_line
public static Timestamp add(Timestamp start, Duration length) {
checkValid(start);
Durations.checkValid(length);
return normalizedTimestamp(
checkedAdd(start.getSeconds(), length.getSeconds()),
checkedAdd(start.getNanos(), length.getNanos()));
}
/**
* Subtract a duration from a timestamp.
*
* <!-- MOE:begin_intracomment_strip -->
* @deprecated Do not use this method for new code. Instead, convert to {@link java.time.Instant}
* and {@link java.time.Duration} using {@link
* com.google.protobuf.util.JavaTimeConversions#toJavaInstant} and {@link
* com.google.protobuf.util.JavaTimeConversions#toJavaDuration}, do the arithmetic there, and
* convert back using {@link com.google.protobuf.util.JavaTimeConversions#toProtoTimestamp}.
* <!-- MOE:end_intracomment_strip -->
*/
@Deprecated // MOE:strip_line
public static Timestamp subtract(Timestamp start, Duration length) {
checkValid(start);
Durations.checkValid(length);
return normalizedTimestamp(
checkedSubtract(start.getSeconds(), length.getSeconds()),
checkedSubtract(start.getNanos(), length.getNanos()));
}
static Timestamp normalizedTimestamp(long seconds, int nanos) {
// This only checks seconds, because nanos can intentionally overflow to increment the seconds
// when normalized.
if (!isValidSeconds(seconds)) {
throw new IllegalArgumentException(
String.format(
"Timestamp is not valid. Input seconds is too large. "
+ "Seconds (%s) must be in range [-62,135,596,800, +253,402,300,799]. ",
seconds));
}
if (nanos <= -NANOS_PER_SECOND || nanos >= NANOS_PER_SECOND) {
seconds = checkedAdd(seconds, nanos / NANOS_PER_SECOND);
nanos = (int) (nanos % NANOS_PER_SECOND);
}
if (nanos < 0) {
nanos =
(int)
(nanos + NANOS_PER_SECOND); // no overflow since nanos is negative (and we're adding)
seconds = checkedSubtract(seconds, 1);
}
Timestamp timestamp = Timestamp.newBuilder().setSeconds(seconds).setNanos(nanos).build();
return checkValid(timestamp);
}
private static long parseTimezoneOffset(String value) throws ParseException {
int pos = value.indexOf(':');
if (pos == -1) {
throw new ParseException("Invalid offset value: " + value, 0);
}
String hours = value.substring(0, pos);
String minutes = value.substring(pos + 1);
try {
return (Long.parseLong(hours) * 60 + Long.parseLong(minutes)) * 60;
} catch (NumberFormatException e) {
ParseException ex = new ParseException("Invalid offset value: " + value, 0);
ex.initCause(e);
throw ex;
}
}
static int parseNanos(String value) throws ParseException {
int result = 0;
for (int i = 0; i < 9; ++i) {
result = result * 10;
if (i < value.length()) {
if (value.charAt(i) < '0' || value.charAt(i) > '9') {
throw new ParseException("Invalid nanoseconds.", 0);
}
result += value.charAt(i) - '0';
}
}
return result;
}
/** Format the nano part of a timestamp or a duration. */
static String formatNanos(int nanos) {
// Determine whether to use 3, 6, or 9 digits for the nano part.
if (nanos % NANOS_PER_MILLISECOND == 0) {
return String.format(Locale.ENGLISH, "%1$03d", nanos / NANOS_PER_MILLISECOND);
} else if (nanos % NANOS_PER_MICROSECOND == 0) {
return String.format(Locale.ENGLISH, "%1$06d", nanos / NANOS_PER_MICROSECOND);
} else {
return String.format(Locale.ENGLISH, "%1$09d", nanos);
}
}
}
| protocolbuffers/protobuf | java/util/src/main/java/com/google/protobuf/util/Timestamps.java |
613 | package mindustry;
import arc.*;
import arc.assets.*;
import arc.files.*;
import arc.graphics.*;
import arc.scene.ui.layout.*;
import arc.struct.*;
import arc.util.*;
import arc.util.Log.*;
import mindustry.ai.*;
import mindustry.async.*;
import mindustry.core.*;
import mindustry.ctype.*;
import mindustry.editor.*;
import mindustry.entities.*;
import mindustry.game.EventType.*;
import mindustry.game.*;
import mindustry.gen.*;
import mindustry.graphics.*;
import mindustry.input.*;
import mindustry.io.*;
import mindustry.logic.*;
import mindustry.maps.Map;
import mindustry.maps.*;
import mindustry.mod.*;
import mindustry.net.*;
import mindustry.service.*;
import mindustry.ui.dialogs.*;
import mindustry.world.*;
import mindustry.world.blocks.storage.*;
import mindustry.world.meta.*;
import java.io.*;
import java.nio.charset.*;
import java.util.*;
import java.util.concurrent.*;
import static arc.Core.*;
public class Vars implements Loadable{
/** Whether the game failed to launch last time. */
public static boolean failedToLaunch = false;
/** Whether to load locales.*/
public static boolean loadLocales = true;
/** Whether the logger is loaded. */
public static boolean loadedLogger = false, loadedFileLogger = false;
/** Whether to enable various experimental features (e.g. spawn positions for spawn groups) TODO change */
public static boolean experimental = true;
/** Name of current Steam player. */
public static String steamPlayerName = "";
/** Default accessible content types used for player-selectable icons. */
public static final ContentType[] defaultContentIcons = {ContentType.item, ContentType.liquid, ContentType.block, ContentType.unit};
/** Default rule environment. */
public static final int defaultEnv = Env.terrestrial | Env.spores | Env.groundOil | Env.groundWater | Env.oxygen;
/** Wall darkness radius. */
public static final int darkRadius = 4;
/** Maximum extra padding around deployment schematics. */
public static final int maxLoadoutSchematicPad = 5;
/** All schematic base64 starts with this string.*/
public static final String schematicBaseStart ="bXNjaA";
/** IO buffer size. */
public static final int bufferSize = 8192;
/** global charset, since Android doesn't support the Charsets class */
public static final Charset charset = Charset.forName("UTF-8");
/** main application name, capitalized */
public static final String appName = "Mindustry";
/** Github API URL. */
public static final String ghApi = "https://api.github.com";
/** URL for discord invite. */
public static final String discordURL = "https://discord.gg/mindustry";
/** URL the links to the wiki's modding guide.*/
public static final String modGuideURL = "https://mindustrygame.github.io/wiki/modding/1-modding/";
/** URL to the JSON file containing all the BE servers. Only queried in BE. */
public static final String serverJsonBeURL = "https://raw.githubusercontent.com/Anuken/Mindustry/master/servers_be.json";
/** URL to the JSON file containing all the stable servers. */
//TODO merge with v6 list upon release
public static final String serverJsonURL = "https://raw.githubusercontent.com/Anuken/Mindustry/master/servers_v7.json";
/** URL of the github issue report template.*/
public static final String reportIssueURL = "https://github.com/Anuken/Mindustry/issues/new?labels=bug&template=bug_report.md";
/** list of built-in servers.*/
public static final Seq<ServerGroup> defaultServers = Seq.with();
/** maximum size of any block, do not change unless you know what you're doing */
public static final int maxBlockSize = 16;
/** maximum distance between mine and core that supports automatic transferring */
public static final float mineTransferRange = 220f;
/** max chat message length */
public static final int maxTextLength = 150;
/** max player name length in bytes */
public static final int maxNameLength = 40;
/** displayed item size when ingame. */
public static final float itemSize = 5f;
/** units outside this bound will die instantly */
public static final float finalWorldBounds = 250;
/** default range for building */
public static final float buildingRange = 220f;
/** range for moving items */
public static final float itemTransferRange = 220f;
/** range for moving items for logic units */
public static final float logicItemTransferRange = 45f;
/** duration of time between turns in ticks */
public static final float turnDuration = 2 * Time.toMinutes;
/** chance of an invasion per turn, 1 = 100% */
public static final float baseInvasionChance = 1f / 100f;
/** how many minutes have to pass before invasions in a *captured* sector start */
public static final float invasionGracePeriod = 20;
/** min armor fraction damage; e.g. 0.05 = at least 5% damage */
public static final float minArmorDamage = 0.1f;
/** @deprecated see {@link CoreBlock#landDuration} instead! */
public static final @Deprecated float coreLandDuration = 160f;
/** size of tiles in units */
public static final int tilesize = 8;
/** size of one tile payload (^2) */
public static final float tilePayload = tilesize * tilesize;
/** icon sizes for UI */
public static final float iconXLarge = 8*6f, iconLarge = 8*5f, iconMed = 8*4f, iconSmall = 8*3f;
/** macbook screen notch height */
public static float macNotchHeight = 32f;
/** for map generator dialog */
public static boolean updateEditorOnChange = false;
/** all choosable player colors in join/host dialog */
public static final Color[] playerColors = {
Color.valueOf("82759a"),
Color.valueOf("c0c1c5"),
Color.valueOf("ffffff"),
Color.valueOf("7d2953"),
Color.valueOf("ff074e"),
Color.valueOf("ff072a"),
Color.valueOf("ff76a6"),
Color.valueOf("a95238"),
Color.valueOf("ffa108"),
Color.valueOf("feeb2c"),
Color.valueOf("ffcaa8"),
Color.valueOf("008551"),
Color.valueOf("00e339"),
Color.valueOf("423c7b"),
Color.valueOf("4b5ef1"),
Color.valueOf("2cabfe"),
};
/** maximum TCP packet size */
public static final int maxTcpSize = 900;
/** default server port */
public static final int port = 6567;
/** multicast discovery port.*/
public static final int multicastPort = 20151;
/** Maximum char length of mod subtitles in browser/viewer. */
public static final int maxModSubtitleLength = 40;
/** multicast group for discovery.*/
public static final String multicastGroup = "227.2.7.7";
/** whether the graphical game client has loaded */
public static boolean clientLoaded = false;
/** max GL texture size */
public static int maxTextureSize = 2048;
/** Maximum schematic size.*/
public static int maxSchematicSize = 64;
/** Whether to show sector info upon landing. */
public static boolean showSectorLandInfo = true;
/** Whether to check for memory use before taking screenshots. */
public static boolean checkScreenshotMemory = true;
/** Whether to prompt the user to confirm exiting. */
public static boolean confirmExit = true;
/** if true, UI is not drawn */
public static boolean disableUI;
/** if true, game is set up in mobile mode, even on desktop. used for debugging */
public static boolean testMobile;
/** whether the game is running on a mobile device */
public static boolean mobile;
/** whether the game is running on an iOS device */
public static boolean ios;
/** whether the game is running on an Android device */
public static boolean android;
/** whether the game is running on a headless server */
public static boolean headless;
/** whether steam is enabled for this game */
public static boolean steam;
/** whether to clear sector saves when landing */
public static boolean clearSectors = false;
/** whether any light rendering is enabled */
public static boolean enableLight = true;
/** Whether to draw shadows of blocks at map edges and static blocks.
* Do not change unless you know exactly what you are doing.*/
public static boolean enableDarkness = true;
/** application data directory, equivalent to {@link Settings#getDataDirectory()} */
public static Fi dataDirectory;
/** data subdirectory used for screenshots */
public static Fi screenshotDirectory;
/** data subdirectory used for custom maps */
public static Fi customMapDirectory;
/** data subdirectory used for custom map previews */
public static Fi mapPreviewDirectory;
/** tmp subdirectory for map conversion */
public static Fi tmpDirectory;
/** data subdirectory used for saves */
public static Fi saveDirectory;
/** data subdirectory used for mods */
public static Fi modDirectory;
/** data subdirectory used for schematics */
public static Fi schematicDirectory;
/** data subdirectory used for bleeding edge build versions */
public static Fi bebuildDirectory;
/** file used to store launch ID */
public static Fi launchIDFile;
/** empty map, indicates no current map */
public static Map emptyMap;
/** empty tile for payloads */
public static Tile emptyTile;
/** map file extension */
public static final String mapExtension = "msav";
/** save file extension */
public static final String saveExtension = "msav";
/** schematic file extension */
public static final String schematicExtension = "msch";
/** path to the java executable */
public static String javaPath;
/** list of all locales that can be switched to */
public static Locale[] locales;
//the main executor will only have at most [cores] number of threads active
public static ExecutorService mainExecutor = Threads.executor("Main Executor", OS.cores);
public static FileTree tree = new FileTree();
public static Net net;
public static ContentLoader content;
public static GameState state;
public static EntityCollisions collisions;
public static Waves waves;
public static Platform platform = new Platform(){};
public static Mods mods;
public static Schematics schematics;
public static BeControl becontrol;
public static AsyncCore asyncCore;
public static BaseRegistry bases;
public static GlobalVars logicVars;
public static MapEditor editor;
public static GameService service = new GameService();
public static Universe universe;
public static World world;
public static Maps maps;
public static WaveSpawner spawner;
public static BlockIndexer indexer;
public static Pathfinder pathfinder;
public static ControlPathfinder controlPath;
public static FogControl fogControl;
public static Control control;
public static Logic logic;
public static Renderer renderer;
public static UI ui;
public static NetServer netServer;
public static NetClient netClient;
public static Player player;
@Override
public void loadAsync(){
loadSettings();
init();
}
public static void init(){
Groups.init();
if(loadLocales){
//load locales
String[] stra = Core.files.internal("locales").readString().split("\n");
locales = new Locale[stra.length];
for(int i = 0; i < locales.length; i++){
String code = stra[i];
if(code.contains("_")){
locales[i] = new Locale(code.split("_")[0], code.split("_")[1]);
}else{
locales[i] = new Locale(code);
}
}
Arrays.sort(locales, Structs.comparing(LanguageDialog::getDisplayName, String.CASE_INSENSITIVE_ORDER));
locales = Seq.with(locales).add(new Locale("router")).toArray(Locale.class);
}
Version.init();
CacheLayer.init();
if(!headless){
Log.info("[Mindustry] Version: @", Version.buildString());
}
dataDirectory = settings.getDataDirectory();
screenshotDirectory = dataDirectory.child("screenshots/");
customMapDirectory = dataDirectory.child("maps/");
mapPreviewDirectory = dataDirectory.child("previews/");
saveDirectory = dataDirectory.child("saves/");
tmpDirectory = dataDirectory.child("tmp/");
modDirectory = dataDirectory.child("mods/");
schematicDirectory = dataDirectory.child("schematics/");
bebuildDirectory = dataDirectory.child("be_builds/");
emptyMap = new Map(new StringMap());
if(tree == null) tree = new FileTree();
if(mods == null) mods = new Mods();
content = new ContentLoader();
waves = new Waves();
collisions = new EntityCollisions();
world = new World();
universe = new Universe();
becontrol = new BeControl();
asyncCore = new AsyncCore();
if(!headless) editor = new MapEditor();
maps = new Maps();
spawner = new WaveSpawner();
indexer = new BlockIndexer();
pathfinder = new Pathfinder();
controlPath = new ControlPathfinder();
fogControl = new FogControl();
bases = new BaseRegistry();
logicVars = new GlobalVars();
javaPath =
new Fi(OS.prop("java.home")).child("bin/java").exists() ? new Fi(OS.prop("java.home")).child("bin/java").absolutePath() :
Core.files.local("jre/bin/java").exists() ? Core.files.local("jre/bin/java").absolutePath() : // Unix
Core.files.local("jre/bin/java.exe").exists() ? Core.files.local("jre/bin/java.exe").absolutePath() : // Windows
"java";
state = new GameState();
mobile = Core.app.isMobile() || testMobile;
ios = Core.app.isIOS();
android = Core.app.isAndroid();
modDirectory.mkdirs();
Events.on(ContentInitEvent.class, e -> {
emptyTile = new Tile(Short.MAX_VALUE - 20, Short.MAX_VALUE - 20);
});
mods.load();
maps.load();
}
/** Checks if a launch failure occurred.
* If this is the case, failedToLaunch is set to true. */
public static void checkLaunch(){
settings.setAppName(appName);
launchIDFile = settings.getDataDirectory().child("launchid.dat");
if(launchIDFile.exists()){
failedToLaunch = true;
}else{
failedToLaunch = false;
launchIDFile.writeString("go away");
}
}
/** Cleans up after a successful launch. */
public static void finishLaunch(){
if(launchIDFile != null){
launchIDFile.delete();
}
}
public static void loadLogger(){
if(loadedLogger) return;
String[] tags = {"[green][D][]", "[royal][I][]", "[yellow][W][]", "[scarlet][E][]", ""};
String[] stags = {"&lc&fb[D]", "&lb&fb[I]", "&ly&fb[W]", "&lr&fb[E]", ""};
Seq<String> logBuffer = new Seq<>();
Log.logger = (level, text) -> {
synchronized(logBuffer){
String result = text;
String rawText = Log.format(stags[level.ordinal()] + "&fr " + text);
System.out.println(rawText);
result = tags[level.ordinal()] + " " + result;
if(!headless && (ui == null || ui.consolefrag == null)){
logBuffer.add(result);
}else if(!headless){
if(!OS.isWindows){
for(String code : ColorCodes.values){
result = result.replace(code, "");
}
}
ui.consolefrag.addMessage(Log.removeColors(result));
}
}
};
Events.on(ClientLoadEvent.class, e -> logBuffer.each(ui.consolefrag::addMessage));
loadedLogger = true;
}
public static void loadFileLogger(){
if(loadedFileLogger) return;
settings.setAppName(appName);
try{
Writer writer = settings.getDataDirectory().child("last_log.txt").writer(false);
LogHandler log = Log.logger;
Log.logger = (level, text) -> {
log.log(level, text);
try{
writer.write("[" + Character.toUpperCase(level.name().charAt(0)) + "] " + Log.removeColors(text) + "\n");
writer.flush();
}catch(IOException e){
e.printStackTrace();
//ignore it
}
};
}catch(Exception e){
//handle log file not being found
Log.err(e);
}
loadedFileLogger = true;
}
public static void loadSettings(){
settings.setJson(JsonIO.json);
settings.setAppName(appName);
if(steam || (Version.modifier != null && Version.modifier.contains("steam"))){
settings.setDataDirectory(Core.files.local("saves/"));
}
settings.defaults("locale", "default", "blocksync", true);
keybinds.setDefaults(Binding.values());
settings.setAutosave(false);
settings.load();
//https://github.com/Anuken/Mindustry/issues/8483
if(settings.getInt("uiscale") == 5){
settings.put("uiscale", 100);
}
Scl.setProduct(Math.max(settings.getInt("uiscale", 100), 25) / 100f);
if(!loadLocales) return;
try{
//try loading external bundle
Fi handle = Core.files.local("bundle");
Locale locale = Locale.ENGLISH;
Core.bundle = I18NBundle.createBundle(handle, locale);
Log.info("NOTE: external translation bundle has been loaded.");
if(!headless){
Time.run(10f, () -> ui.showInfo("Note: You have successfully loaded an external translation bundle.\n[accent]" + handle.absolutePath()));
}
}catch(Throwable e){
//no external bundle found
Fi handle = Core.files.internal("bundles/bundle");
Locale locale;
String loc = settings.getString("locale");
if(loc.equals("default")){
locale = Locale.getDefault();
}else{
Locale lastLocale;
if(loc.contains("_")){
String[] split = loc.split("_");
lastLocale = new Locale(split[0], split[1]);
}else{
lastLocale = new Locale(loc);
}
locale = lastLocale;
}
Locale.setDefault(locale);
Core.bundle = I18NBundle.createBundle(handle, locale);
//router
if(locale.toString().equals("router")){
bundle.debug("router");
}
}
}
}
| Anuken/Mindustry | core/src/mindustry/Vars.java |
614 | /* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.*;
import java.util.*;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import db.buffers.*;
import ghidra.util.exception.CancelledException;
import ghidra.util.exception.FileInUseException;
import ghidra.util.task.TaskMonitor;
/**
* <code>Database</code> facilitates the creation of a DBHandle for accessing
* a database.
* <p>
* Public constructors are only provided for use with "Non-Versioned" databases.
* This class should be extended when additional management features are needed,
* such as for a "Versioned" database.
* <p>
* This class assumes exclusive control of the associated files contained within the
* associated database directory and relies on the proper establishment of a
* syncObject to midigate potential concurrent modification issues.
*/
public abstract class Database {
static final Logger log = LogManager.getLogger(Database.class);
protected static final String DATABASE_FILE_PREFIX = "db.";
protected static final String VERSION_FILE_PREFIX = "ver.";
protected static final String CHANGE_FILE_PREFIX = "change.";
protected static final String CUMULATIVE_CHANGE_FILENAME =
CHANGE_FILE_PREFIX + "data" + LocalBufferFile.BUFFER_FILE_EXTENSION;
protected static final String CUMULATIVE_MODMAP_FILENAME =
CHANGE_FILE_PREFIX + "map" + LocalBufferFile.BUFFER_FILE_EXTENSION;
protected int minVersion;
protected int currentVersion;
protected long lastModified;
protected boolean isVersioned = false;
protected boolean isCheckOutCopy = false;
protected boolean updateAllowed = true;
protected BufferFileManager bfMgr;
protected File dbDir;
protected DBFileListener dbFileListener;
protected boolean dbDirCreated = false;
protected Object syncObject = this;
/**
* General Database Constructor.
* @param dbDir
* @param isVersioned
* @param create if true the database will be created.
* @throws IOException
*/
protected Database(File dbDir, boolean isVersioned, boolean create) throws IOException {
this.dbDir = dbDir;
this.isVersioned = isVersioned;
if (create && !dbDir.exists()) {
if (!dbDir.mkdirs()) {
throw new IOException("Failed to create Database directory: " + dbDir);
}
dbDirCreated = true;
}
else {
checkDbDir();
}
}
/**
* Constructor for a new or existing "Non-Versioned" Database.
* @param dbDir
* @param dbFileListener file version listener
* @param create
* @throws IOException
*/
protected Database(File dbDir, DBFileListener dbFileListener, boolean create)
throws IOException {
this(dbDir, false, create);
bfMgr = new DBBufferFileManager();
this.dbFileListener = dbFileListener;
scanFiles(false);
if (create && currentVersion != 0) {
throw new IOException("Database already exists");
}
}
/**
* Constructor for an existing "Non-Versioned" Database.
* @param dbDir database directory
* @throws IOException
*/
protected Database(File dbDir) throws IOException {
this(dbDir, false, false);
bfMgr = new DBBufferFileManager();
scanFiles(false);
}
/**
* Set the object to be used for synchronization.
* @param syncObject
*/
public void setSynchronizationObject(Object syncObject) {
this.syncObject = syncObject;
}
/**
* Returns the time at which this database was last saved.
*/
public long lastModified() {
return lastModified;
}
/**
* Delete a directory and all of its contents.
* @param dir
* @return true if delete was successful.
* If false is returned, a partial delete may have occurred.
*/
protected final static boolean deleteDir(File dir) {
File[] flist = dir.listFiles();
if (flist == null) {
return false;
}
for (int i = 0; i < flist.length; i++) {
if (flist[i].isDirectory()) {
if (!deleteDir(flist[i]))
return false;
}
else {
if (!flist[i].delete())
return false;
}
}
return dir.delete();
}
/**
* Returns the version number associated with the latest buffer file version.
*/
public int getCurrentVersion() {
return currentVersion;
}
/**
* Open the stored database for non-update use.
* The returned handle does not support the Save operation.
* @param monitor task monitor (may be null)
* @return database handle
* @throws FileInUseException thrown if unable to obtain the required database lock(s).
* @throws IOException thrown if IO error occurs.
* @throws CancelledException if cancelled by monitor
*/
public DBHandle open(TaskMonitor monitor) throws IOException, CancelledException {
synchronized (syncObject) {
return new DBHandle(new LocalManagedBufferFile(bfMgr, false, -1, -1));
}
}
/**
* Open the stored database for update use.
* @param monitor task monitor (may be null)
* @return buffer file
* @throws FileInUseException thrown if unable to obtain the required database lock(s).
* @throws IOException thrown if IO error occurs.
* @throws CancelledException if cancelled by monitor
*/
public DBHandle openForUpdate(TaskMonitor monitor) throws IOException, CancelledException {
if (!updateAllowed) {
throw new IOException("Update use not permitted");
}
synchronized (syncObject) {
return new DBHandle(new LocalManagedBufferFile(bfMgr, true, -1, -1));
}
}
/**
* Returns the length of this domain file. This size is the minimum disk space
* used for storing this file, but does not account for additional storage space
* used to tracks changes, etc.
* @return file length
* @throws IOException thrown if IO or access error occurs
*/
public long length() throws IOException {
return bfMgr.getBufferFile(getCurrentVersion()).length();
}
private void checkDbDir() throws IOException {
String[] fileList = dbDir.list();
if (fileList == null) {
throw new IOException("Database directory not found: " + dbDir);
}
// boolean bufferFileFound = false;
boolean versionFileFound = false;
for (int i = 0; i < fileList.length; i++) {
// Identify files of interest
String fname = fileList[i];
if (fname.endsWith(LocalBufferFile.BUFFER_FILE_EXTENSION)) {
if (fname.startsWith(DATABASE_FILE_PREFIX)) {
// bufferFileFound = true;
}
else if (fname.equals(CUMULATIVE_CHANGE_FILENAME)) {
// TODO: This check is not reliable
// If the detabase is checked-out and not yet modified, this file will not yet exist
isCheckOutCopy = true;
}
else if (fname.startsWith(VERSION_FILE_PREFIX)) {
versionFileFound = true;
}
}
}
// if (!bufferFileFound) {
// throw new IOException("Bad database directory: " + dbDir);
// }
if (!isVersioned && versionFileFound) {
updateAllowed = false;
}
if (isVersioned && isCheckOutCopy) {
throw new IOException("Versioned Database also appears to be a checkout copy");
}
}
/**
* Scan files and update state.
*/
public void refresh() throws FileNotFoundException {
scanFiles(false);
if (currentVersion == 0) {
throw new FileNotFoundException("Database files not found");
}
}
/**
* Scan files and update state.
* @param repair if true files are repaired if needed.
*/
protected void scanFiles(boolean repair) throws FileNotFoundException {
synchronized (syncObject) {
// TODO: May need to make repair an option (may not have write privilege)
ArrayList<String> bufFiles = new ArrayList<>();
ArrayList<String> verFiles = new ArrayList<>();
ArrayList<String> changeFiles = new ArrayList<>();
// ArrayList delFiles = new ArrayList();
String[] fileList = dbDir.list();
if (fileList == null) {
throw new FileNotFoundException(dbDir + " not found");
}
for (int i = 0; i < fileList.length; i++) {
// Identify files of interest
String fname = fileList[i];
if (fname.endsWith(LocalBufferFile.BUFFER_FILE_EXTENSION)) {
if (fname.startsWith(DATABASE_FILE_PREFIX)) {
bufFiles.add(fname);
}
else if (fname.startsWith(VERSION_FILE_PREFIX)) {
verFiles.add(fname);
}
else if (fname.startsWith(CHANGE_FILE_PREFIX)) {
changeFiles.add(fname);
}
// else {
// // unknown buffer file
// delFiles.add(fname);
// }
}
// else if (fname.endsWith(LocalBufferFile.PRESAVE_FILE_EXT) ||
// fname.endsWith(LocalBufferFile.TEMP_FILE_EXT)) {
// // Attempt to remove all presave and temp files
// // Open files on Windows will not be deleted, however they will under Unix
// TODO This can cause problems under UNIX since it can be deleted while open
// delFiles.add(fname);
// }
}
// Identify buffer files and current version - keep current version only
int[] bufVersions = getFileVersions(bufFiles);
currentVersion = bufVersions.length == 0 ? 0 : bufVersions[bufVersions.length - 1];
minVersion = currentVersion;
lastModified = bfMgr.getBufferFile(currentVersion).lastModified();
// Remove old buffer files
if (repair) {
for (int i = 0; i < (bufVersions.length - 1); i++) {
bfMgr.getBufferFile(bufVersions[i]).delete();
}
}
if (isVersioned) {
// Check version files
int[] versions = getFileVersions(verFiles);
boolean filesOrphaned = false;
for (int i = versions.length - 1; i >= 0; i--) {
if (versions[i] >= minVersion) {
if (repair) {
File f = bfMgr.getVersionFile(versions[i]);
log.warn(dbDir + ": removing unexpected version file: " + f);
f.delete();
}
}
else if (versions[i] == (minVersion - 1)) {
--minVersion;
}
else {
log.warn(dbDir + ": missing version file " + (minVersion - 1));
filesOrphaned = true;
break;
}
}
// Check change files
int[] changes = getFileVersions(changeFiles);
int minChangeVer = currentVersion;
for (int i = changes.length - 1; i >= 0; i--) {
if (changes[i] >= minChangeVer) {
if (repair) {
File f = bfMgr.getChangeDataFile(changes[i]);
log.warn(dbDir + ": removing unexpected change file: " + f);
f.delete();
}
}
else if (changes[i] == (minChangeVer - 1)) {
--minChangeVer;
}
else {
log.warn(dbDir + ": missing change file " + (minVersion - 1));
filesOrphaned = true;
break;
}
}
if (minChangeVer > minVersion) {
log.warn(dbDir + ": missing change files prior to " + minChangeVer);
minVersion = minChangeVer;
filesOrphaned = true;
}
if (repair && filesOrphaned) {
log.warn(dbDir + ": versions prior to " + minVersion +
" have been orphaned and will be removed");
for (int i = 0; i < versions.length && versions[i] < minVersion; ++i) {
bfMgr.getVersionFile(versions[i]).delete();
}
for (int i = 0; i < changes.length && changes[i] < minVersion; ++i) {
bfMgr.getChangeDataFile(changes[i]).delete();
}
}
}
// Attempt to remove unwanted files
// if (repair) {
// int cnt = delFiles.size();
// for (int i = 0; i < cnt; i++) {
// File f = new File(dbDir, (String) delFiles.get(i));
// f.delete();
// }
// }
}
}
private int[] getFileVersions(ArrayList<String> fileList) {
ArrayList<Integer> list = new ArrayList<>();
Iterator<String> iter = fileList.iterator();
while (iter.hasNext()) {
String fname = iter.next();
int ix1 = fname.indexOf('.');
int ix2 = fname.indexOf('.', ix1 + 1);
if (ix1 < 0 || ix2 < ix1) {
log.error(dbDir + ": bad file name: " + fname);
continue;
}
String v = fname.substring(ix1 + 1, ix2);
try {
list.add(Integer.valueOf(v));
}
catch (NumberFormatException e) {
log.error(dbDir + ": bad file name: " + fname);
}
}
int[] versions = new int[list.size()];
Iterator<Integer> versionsIter = list.iterator();
int ix = 0;
while (versionsIter.hasNext()) {
versions[ix++] = versionsIter.next().intValue();
}
Arrays.sort(versions);
return versions;
}
protected class DBBufferFileManager implements BufferFileManager {
@Override
public int getCurrentVersion() {
return currentVersion;
}
@Override
public File getBufferFile(int version) {
return new File(dbDir,
DATABASE_FILE_PREFIX + version + LocalBufferFile.BUFFER_FILE_EXTENSION);
}
@Override
public File getVersionFile(int version) {
return null;
}
@Override
public File getChangeMapFile() {
if (isCheckOutCopy) {
return new File(dbDir, CUMULATIVE_MODMAP_FILENAME);
}
return null;
}
@Override
public File getChangeDataFile(int version) {
if (isCheckOutCopy) {
return new File(dbDir, CUMULATIVE_CHANGE_FILENAME);
}
return null;
}
@Override
public void versionCreated(int version, String comment, long checkinId)
throws FileNotFoundException {
synchronized (syncObject) {
if (currentVersion != (version - 1)) {
log.error(dbDir + ": unexpected version created (" + version +
"), expected version " + (currentVersion + 1));
if (version > currentVersion || version < minVersion) {
getBufferFile(version).delete();
}
return;
}
scanFiles(true);
if (currentVersion == 0) {
throw new FileNotFoundException("Database files not found");
}
if (version != currentVersion) {
log.error(dbDir + ": Unexpected version found (" + currentVersion +
"), expected " + version);
}
else if (dbFileListener != null) {
dbFileListener.versionCreated(Database.this, version);
}
}
}
@Override
public void updateEnded(long checkinId) {
// do nothing
}
}
}
| NationalSecurityAgency/ghidra | Ghidra/Framework/DB/src/main/java/db/Database.java |