file_id
int64
1
215k
content
stringlengths
7
454k
repo
stringlengths
6
113
path
stringlengths
6
251
213,841
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.mapper; import be.wegenenverkeer.common.resteasy.exception.ExceptionUtil; import be.wegenenverkeer.common.resteasy.json.RestJsonMapper; import be.wegenenverkeer.common.resteasy.logging.PreProcessLoggingInterceptor; import org.hibernate.validator.method.MethodConstraintViolationException; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import javax.servlet.http.HttpServletRequest; import javax.validation.ConstraintViolation; import javax.ws.rs.core.Context; import javax.ws.rs.core.Response; import javax.ws.rs.ext.ExceptionMapper; import javax.ws.rs.ext.Provider; /** * ExceptionMapper for ValidationException. */ @Provider @Component public class MethodConstraintViolationExceptionMapper implements ExceptionMapper<MethodConstraintViolationException> { @Autowired private RestJsonMapper jsonMapper; @Autowired private PreProcessLoggingInterceptor preProcessLoggingInterceptor; @Context private HttpServletRequest request; @Override public Response toResponse(MethodConstraintViolationException exception) { preProcessLoggingInterceptor.postProcessError(exception, "Applicatie keerde terug met een (verwachtte) ConstraintViolation:"); try { return Response.status(Response.Status.PRECONDITION_FAILED).entity(getJsonString(exception)) .header("Access-Control-Allow-Origin", request.getHeader("Origin")) .header("Access-Control-Allow-Credentials", true) .build(); } catch (IOException e) { ExceptionUtil eu = new ExceptionUtil(exception); return Response.status(Response.Status.PRECONDITION_FAILED) .entity("{ \"error\" : {\"validatie\":[\"" + eu.getEscapedConcatenatedMessage() + "\"]}}") .header("Access-Control-Allow-Origin", request.getHeader("Origin")) .header("Access-Control-Allow-Credentials", true) .build(); } } /** * Get String with exception as JSON. * * @param exception exception to convert * @return exception converted to JSON * @throws IOException oops */ public String getJsonString(MethodConstraintViolationException exception) throws IOException { return "{ \"error\" : " + jsonMapper.writeValueAsString(getViolations(exception)) + "}"; } private Map<String, List<String>> getViolations(MethodConstraintViolationException mcve) { Map<String, List<String>> res = new HashMap<String, List<String>>(); for (ConstraintViolation cv : mcve.getConstraintViolations()) { String path = ""; if (null != cv.getPropertyPath()) { path = cv.getPropertyPath().toString(); } List<String> msgs = res.get(path); if (null == msgs) { msgs = new ArrayList<String>(); res.put(path, msgs); } msgs.add(cv.getMessage()); } return res; } }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/mapper/MethodConstraintViolationExceptionMapper.java
213,842
package nl.topicus.eduarte.entities.productregel; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.FetchType; import javax.persistence.JoinColumn; import javax.persistence.ManyToOne; import javax.persistence.UniqueConstraint; import nl.topicus.cobra.entities.IActiefEntiteit; import nl.topicus.cobra.templates.annotations.Exportable; import nl.topicus.cobra.web.components.form.AutoForm; import nl.topicus.eduarte.entities.organisatie.EntiteitContext; import nl.topicus.eduarte.entities.organisatie.LandelijkOfInstellingEntiteit; import nl.topicus.eduarte.entities.taxonomie.Taxonomie; import nl.topicus.eduarte.web.components.choice.TaxonomieCombobox; import org.hibernate.annotations.Cache; import org.hibernate.annotations.CacheConcurrencyStrategy; import org.hibernate.annotations.Index; /** * Productregels kunnen gegroepeerd worden per soort, bijvoorbeeld Gemeenschappelijk, * Profiel, Keuze. * * @author loite */ @Exportable @Entity() @Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region = "Instelling") @javax.persistence.Table(uniqueConstraints = { @UniqueConstraint(columnNames = {"naam", "taxonomie", "organisatie"}), @UniqueConstraint(columnNames = {"volgnummer", "taxonomie", "organisatie"})}) public class SoortProductregel extends LandelijkOfInstellingEntiteit implements Comparable<SoortProductregel>, IActiefEntiteit { private static final long serialVersionUID = 1L; @Column(nullable = false, length = 30) @AutoForm(htmlClasses = {"unit_max"}) private String naam; /** * Volgorde van productregels (Gemeenschappelijk komt voor Profiel). */ @Column(nullable = false) @AutoForm(htmlClasses = {"unit_40"}) private int volgnummer; @Column(nullable = false) private boolean actief; @ManyToOne(fetch = FetchType.LAZY) @JoinColumn(nullable = false, name = "taxonomie") @AutoForm(editorClass = TaxonomieCombobox.class) @Index(name = "idx_SoortProduct_taxonomie") private Taxonomie taxonomie; @Column(nullable = false, length = 30) @AutoForm(htmlClasses = {"unit_max"}) private String diplomanaam; protected SoortProductregel() { } public SoortProductregel(EntiteitContext context) { super(context); } @Exportable public String getNaam() { return naam; } public void setNaam(String naam) { this.naam = naam; } @Exportable public String getDiplomanaam() { return diplomanaam; } public void setDiplomanaam(String diplomanaam) { this.diplomanaam = diplomanaam; } public boolean isActief() { return actief; } public void setActief(boolean actief) { this.actief = actief; } public int getVolgnummer() { return volgnummer; } public void setVolgnummer(int volgnummer) { this.volgnummer = volgnummer; } @Override public int compareTo(SoortProductregel o) { return getVolgnummer() - o.getVolgnummer(); } @Override public String toString() { return getNaam(); } public Taxonomie getTaxonomie() { return taxonomie; } public void setTaxonomie(Taxonomie taxonomie) { this.taxonomie = taxonomie; } }
topicusonderwijs/tribe-krd-opensource
eduarte/core/src/main/java/nl/topicus/eduarte/entities/productregel/SoortProductregel.java
213,843
/* * Copyright 2007 Pieter De Rycke * * This file is part of JMTP. * * JTMP is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation, either version 3 of * the License, or any later version. * * JMTP is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU LesserGeneral Public * License along with JMTP. If not, see <http://www.gnu.org/licenses/>. */ package jmtp; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.math.BigInteger; import java.util.Date; import be.derycke.pieter.com.COMException; import be.derycke.pieter.com.OleDate; //gemeenschappelijke klasse voor storage en folder abstract class AbstractPortableDeviceContainerImplWin32 extends PortableDeviceObjectImplWin32 { AbstractPortableDeviceContainerImplWin32(String objectID, PortableDeviceContentImplWin32 content, PortableDevicePropertiesImplWin32 properties) { super(objectID, content, properties); } public PortableDeviceObject[] getChildObjects() { try { String[] childIDs = content.listChildObjects(objectID); PortableDeviceObject[] objects = new PortableDeviceObject[childIDs.length]; for(int i = 0; i < childIDs.length; i++) objects[i] = WPDImplWin32.convertToPortableDeviceObject(childIDs[i], this.content, this.properties); return objects; } catch (COMException e) { return new PortableDeviceObject[0]; } } public PortableDeviceFolderObject createFolderObject(String name) { try { PortableDeviceValuesImplWin32 values = new PortableDeviceValuesImplWin32(); values.setStringValue(Win32WPDDefines.WPD_OBJECT_PARENT_ID, this.objectID); values.setStringValue(Win32WPDDefines.WPD_OBJECT_ORIGINAL_FILE_NAME, name); values.setStringValue(Win32WPDDefines.WPD_OBJECT_NAME, name); values.setGuidValue(Win32WPDDefines.WPD_OBJECT_CONTENT_TYPE, Win32WPDDefines.WPD_CONTENT_TYPE_FOLDER); return new PortableDeviceFolderObjectImplWin32(content.createObjectWithPropertiesOnly(values), this.content, this.properties); } catch (COMException e) { e.printStackTrace(); return null; } } //TODO references ondersteuning nog toevoegen public PortableDevicePlaylistObject createPlaylistObject(String name, PortableDeviceObject[] references) { try { PortableDeviceValuesImplWin32 values = new PortableDeviceValuesImplWin32(); values.setStringValue(Win32WPDDefines.WPD_OBJECT_PARENT_ID, this.objectID); values.setStringValue(Win32WPDDefines.WPD_OBJECT_ORIGINAL_FILE_NAME, name + ".pla"); values.setStringValue(Win32WPDDefines.WPD_OBJECT_NAME, name); values.setGuidValue(Win32WPDDefines.WPD_OBJECT_FORMAT, Win32WPDDefines.WPD_OBJECT_FORMAT_PLA); values.setGuidValue(Win32WPDDefines.WPD_OBJECT_CONTENT_TYPE, Win32WPDDefines.WPD_CONTENT_TYPE_PLAYLIST); if(references != null) { PortableDevicePropVariantCollectionImplWin32 propVariantCollection = new PortableDevicePropVariantCollectionImplWin32(); for(PortableDeviceObject reference : references) propVariantCollection.add(new PropVariant(reference.getID())); values.setPortableDeviceValuesCollectionValue(Win32WPDDefines.WPD_OBJECT_REFERENCES, propVariantCollection); } return new PortableDevicePlaylistObjectImplWin32(content.createObjectWithPropertiesOnly(values), this.content, this.properties); } catch(COMException e) { e.printStackTrace(); return null; } } public PortableDeviceAudioObject addAudioObject(File file, String artist, String title, BigInteger duration) throws FileNotFoundException, IOException { return addAudioObject(file, artist, title, duration, null, null, null, -1); } public PortableDeviceAudioObject addAudioObject(File file, String artist, String title, BigInteger duration, String genre, String album, Date releaseDate, int track) throws FileNotFoundException, IOException { try { PortableDeviceValuesImplWin32 values = new PortableDeviceValuesImplWin32(); values.setStringValue(Win32WPDDefines.WPD_OBJECT_PARENT_ID, this.objectID); values.setStringValue(Win32WPDDefines.WPD_OBJECT_ORIGINAL_FILE_NAME, file.getName()); values.setGuidValue(Win32WPDDefines.WPD_OBJECT_FORMAT, Win32WPDDefines.WPD_OBJECT_FORMAT_MP3); //TODO nog manier vinden om type te detecteren values.setGuidValue(Win32WPDDefines.WPD_OBJECT_CONTENT_TYPE, Win32WPDDefines.WPD_CONTENT_TYPE_AUDIO); values.setStringValue(Win32WPDDefines.WPD_OBJECT_NAME, title); if(artist != null) values.setStringValue(Win32WPDDefines.WPD_MEDIA_ARTIST, artist); values.setUnsignedLargeIntegerValue(Win32WPDDefines.WPD_MEDIA_DURATION, duration); if(genre != null) values.setStringValue(Win32WPDDefines.WPD_MEDIA_GENRE, genre); if(album != null) values.setStringValue(Win32WPDDefines.WPD_MUSIC_ALBUM, album); if(releaseDate != null) values.setFloateValue(Win32WPDDefines.WPD_MEDIA_RELEASE_DATE, (float)new OleDate(releaseDate).toDouble()); if(track >= 0) values.setUnsignedIntegerValue(Win32WPDDefines.WPD_MUSIC_TRACK, track); return new PortableDeviceAudioObjectImplWin32(content.createObjectWithPropertiesAndData(values, file), this.content, this.properties); } catch(COMException e) { if(e.getHresult() == Win32WPDDefines.E_FILENOTFOUND) throw new FileNotFoundException("File " + file + " was not found."); else { throw new IOException(e); } } } }
lmckeon/jmtp
java/src/jmtp/AbstractPortableDeviceContainerImplWin32.java
213,845
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.json; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.jaxrs.cfg.Annotations; import com.fasterxml.jackson.jaxrs.json.JacksonJsonProvider; import org.springframework.stereotype.Component; import javax.ws.rs.Consumes; import javax.ws.rs.Produces; import javax.ws.rs.core.MediaType; import javax.ws.rs.ext.Provider; /** * Register Jackson JSON provider for use by RESTEasy. */ @Provider @Component @Consumes({ MediaType.APPLICATION_JSON, "application/*+json", "text/json" }) @Produces({ MediaType.APPLICATION_JSON, "application/*+json", "text/json" }) public class RestJacksonJsonProvider extends JacksonJsonProvider { /** * No-arguments constructor. */ public RestJacksonJsonProvider() { this(new RestJsonMapper()); } /** * Constructor. * * @param annotationsToUse annotations to use */ public RestJacksonJsonProvider(Annotations... annotationsToUse) { this(new RestJsonMapper(), annotationsToUse); } /** * Constructor. * * @param mapper object mapper to use - overwritten by our object mapper. */ public RestJacksonJsonProvider(ObjectMapper mapper) { super(new RestJsonMapper()); } /** * Constructor. * * @param mapper object mapper to use - overwritten by our object mapper. * @param annotationsToUse annotations to use */ public RestJacksonJsonProvider(ObjectMapper mapper, Annotations[] annotationsToUse) { super(new RestJsonMapper(), annotationsToUse); } }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/json/RestJacksonJsonProvider.java
213,846
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.web; import be.eliwan.profiling.service.ProfilingContainer; import be.wegenenverkeer.common.resteasy.logging.PreProcessLoggingInterceptor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.stereotype.Service; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.Date; import java.util.Random; import javax.ws.rs.Path; import javax.ws.rs.QueryParam; /** * Service which allows gathering and clearing the profiling information. */ @Path("/rest") @Service public class SampleServiceImpl implements SampleService { private static final Logger LOG = LoggerFactory.getLogger(SampleServiceImpl.class); @Autowired @Qualifier("gatewayMethodProfiling") private ProfilingContainer gatewayMethodProfilingContainer; @Autowired @Qualifier("gatewayServiceProfiling") private ProfilingContainer gatewayServiceProfilingContainer; private Random random = new Random(System.currentTimeMillis()); @Override public String sample(@QueryParam("q") String query) { return sample(query, "gateway"); } @Override public String other(@QueryParam("q") String query) { return sample(query, "starship"); } private String sample(String query, String gatewayMethod) { waitALittle(); if (null == query || (query.length() % 2) == 0) { // do a dummy gateway call callGateway(gatewayMethod); } DateFormat dateFormat = new SimpleDateFormat("HH:mm:ss.SSS"); return "done at " + dateFormat.format(new Date()); } private void waitALittle() { try { Thread.sleep(random.nextInt(2000)); } catch (InterruptedException ie) { LOG.debug("Sleep was interrupted", ie); } } private void callGateway(String gatewayMethod) { long start = System.currentTimeMillis(); try { waitALittle(); } finally { long durationMillis = System.currentTimeMillis() - start; gatewayMethodProfilingContainer.register(gatewayMethod, durationMillis); gatewayServiceProfilingContainer.register(PreProcessLoggingInterceptor.PROFILE_GROUP.get(), durationMillis); } } }
WegenenVerkeer/common-resteasy
web/src/main/java/be/wegenenverkeer/common/resteasy/web/SampleServiceImpl.java
213,847
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.mapper; import be.wegenenverkeer.common.resteasy.exception.ConflictException; import be.wegenenverkeer.common.resteasy.exception.ExceptionUtil; import be.wegenenverkeer.common.resteasy.logging.PreProcessLoggingInterceptor; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import javax.servlet.http.HttpServletRequest; import javax.ws.rs.core.Context; import javax.ws.rs.core.Response; import javax.ws.rs.ext.ExceptionMapper; import javax.ws.rs.ext.Provider; /** * Exception mapper for ConflictException. */ @Provider @Component public class ConflictExceptionMapper implements ExceptionMapper<ConflictException> { @Autowired private PreProcessLoggingInterceptor preProcessLoggingInterceptor; @Context private HttpServletRequest request; @Override public Response toResponse(ConflictException exception) { preProcessLoggingInterceptor.postProcessError(exception, "Conflict:"); ExceptionUtil eu = new ExceptionUtil(exception); return Response.status(Response.Status.CONFLICT) .entity("{ \"error\" : {\"\":[\"" + eu.getEscapedConcatenatedMessage() + "\"]}}") .header("Access-Control-Allow-Origin", request.getHeader("Origin")) .header("Access-Control-Allow-Credentials", true) .build(); } }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/mapper/ConflictExceptionMapper.java
213,848
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.json; import com.fasterxml.jackson.databind.DeserializationContext; import com.fasterxml.jackson.databind.deser.std.FromStringDeserializer; import org.apache.commons.lang3.StringUtils; import java.io.IOException; import java.time.LocalDate; import java.time.LocalDateTime; /** * Jackson serializer for LocalDate. */ public class LocalDateTimeDeserializer extends FromStringDeserializer<LocalDateTime> { private Iso8601AndOthersLocalDateTimeFormat iso8601AndOthers = new Iso8601AndOthersLocalDateTimeFormat(); /** Constructor. */ public LocalDateTimeDeserializer() { super(LocalDate.class); } // CHECKSTYLE METHOD_NAME: OFF @Override protected LocalDateTime _deserialize(String value, DeserializationContext ctxt) throws IOException { if (StringUtils.isNotBlank(value)) { return iso8601AndOthers.parse(value); } return null; // empty string } // CHECKSTYLE METHOD_NAME: ON }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/json/LocalDateTimeDeserializer.java
213,849
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.exception; /** * Overkoepelende exception die gegooid wordt telkens er een fout optreedt in een rest service methode. */ public abstract class AbstractRestException extends RuntimeException { /** * No-arguments constructor. */ protected AbstractRestException() { } /** * Constructor with message. * * @param message message */ protected AbstractRestException(String message) { super(message); } /** * Constructor with message and cause. * * @param message message * @param cause cause */ protected AbstractRestException(String message, Throwable cause) { super(message, cause); } /** * Constructor with cause. * * @param cause cause */ protected AbstractRestException(Throwable cause) { super(cause); } }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/exception/AbstractRestException.java
213,850
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.mapper; import be.wegenenverkeer.common.resteasy.exception.ExceptionUtil; import be.wegenenverkeer.common.resteasy.logging.PreProcessLoggingInterceptor; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import javax.servlet.http.HttpServletRequest; import javax.ws.rs.NotFoundException; import javax.ws.rs.core.Context; import javax.ws.rs.core.Response; import javax.ws.rs.ext.ExceptionMapper; import javax.ws.rs.ext.Provider; /** * Exception mapper for NotFoundException. */ @Provider @Component public class JaxRsNotFoundExceptionMapper implements ExceptionMapper<NotFoundException> { @Autowired private PreProcessLoggingInterceptor preProcessLoggingInterceptor; @Context private HttpServletRequest request; @Override public Response toResponse(NotFoundException exception) { preProcessLoggingInterceptor.postProcessError(exception, "De resource werd niet gevonden:"); ExceptionUtil eu = new ExceptionUtil(exception); return Response.status(Response.Status.NOT_FOUND) .entity("{ \"error\" : {\"\":[\"" + eu.getEscapedConcatenatedMessage() + "\"]}}") .header("Access-Control-Allow-Origin", request.getHeader("Origin")) .header("Access-Control-Allow-Credentials", true) .build(); } }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/mapper/JaxRsNotFoundExceptionMapper.java
213,852
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.logging; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * Indicate that the response from the method should not be logged in the PreProcessLoggingInterceptor. */ @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) public @interface DoNotLogResponse { }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/logging/DoNotLogResponse.java
213,853
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.exception; /** * Exception which can be thrown if the service fails. This indicates a problem in the server which is not caused by * (faulty) client input. * <p/> * This is not intended for recoverable errors. For recoverable errors, it is recommended to use * {@link ValidationException} instead. * <p/> * Contrary to the other AbstractRestExceptions, ServiceExceptions are logged as errors. The other exceptions require * fixes on the client side. This exception indicates a problem which needs fixing on the server side. */ public class ServiceException extends AbstractRestException { /** * No-arguments constructor. */ public ServiceException() { super(); } /** * Constructor with message. * * @param message message */ public ServiceException(String message) { super(message); } /** * Constructor with message and cause. * * @param message message * @param cause cause */ public ServiceException(String message, Throwable cause) { super(message, cause); } /** * Constructor with cause. * * @param cause cause */ public ServiceException(Throwable cause) { super(cause); } }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/exception/ServiceException.java
213,856
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.exception; /** * Exception which can be thrown if a resource if not found. * <p/> * Guideline is to use this exception when an object with is references through a non-optional parameter cannot be * accessed. This could be because the object does not exist or because of insufficient authorization. The non-optional * parameter could be a path parameter, a query parameter or a required field in the body. */ public class NotFoundException extends AbstractRestException { /** * No-arguments constructor. */ public NotFoundException() { } /** * Constructor with message. * * @param message message */ public NotFoundException(String message) { super(message); } /** * Constructor with message and cause. * * @param message message * @param cause cause */ public NotFoundException(String message, Throwable cause) { super(message, cause); } /** * Constructor with cause. * * @param cause cause */ public NotFoundException(Throwable cause) { super(cause); } }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/exception/NotFoundException.java
213,857
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.json; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.databind.JsonSerializer; import com.fasterxml.jackson.databind.SerializerProvider; import java.io.IOException; import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; /** * Jackson serializer for LocalDate. */ public class LocalDateTimeSerializer extends JsonSerializer<LocalDateTime> { private static final DateTimeFormatter FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss"); @Override public void serialize(LocalDateTime value, JsonGenerator gen, SerializerProvider arg2) throws IOException { gen.writeString(FORMATTER.format(value)); } }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/json/LocalDateTimeSerializer.java
213,859
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.mapper; import be.wegenenverkeer.common.resteasy.exception.ExceptionUtil; import be.wegenenverkeer.common.resteasy.logging.PreProcessLoggingInterceptor; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.security.core.AuthenticationException; import org.springframework.stereotype.Component; import javax.servlet.http.HttpServletRequest; import javax.ws.rs.core.Context; import javax.ws.rs.core.Response; import javax.ws.rs.ext.ExceptionMapper; import javax.ws.rs.ext.Provider; /** * Exception mapper for AuthenticationException. */ @Provider @Component public class AuthenticationExceptionMapper implements ExceptionMapper<AuthenticationException> { @Autowired private PreProcessLoggingInterceptor preProcessLoggingInterceptor; @Context private HttpServletRequest request; @Override public Response toResponse(AuthenticationException exception) { preProcessLoggingInterceptor.postProcessError(exception, "Onvoldoende rechten:"); ExceptionUtil eu = new ExceptionUtil(exception); return Response.status(Response.Status.UNAUTHORIZED) .entity("{ \"error\" : {\"authenticatie\":[\"" + eu.getEscapedConcatenatedMessage() + "\"]}}") .header("Access-Control-Allow-Origin", request.getHeader("Origin")) .header("Access-Control-Allow-Credentials", true) .build(); } }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/mapper/AuthenticationExceptionMapper.java
213,860
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.mapper; import be.wegenenverkeer.common.resteasy.exception.ExceptionUtil; import be.wegenenverkeer.common.resteasy.logging.PreProcessLoggingInterceptor; import org.jboss.resteasy.spi.NotFoundException; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import javax.servlet.http.HttpServletRequest; import javax.ws.rs.core.Context; import javax.ws.rs.core.Response; import javax.ws.rs.ext.ExceptionMapper; import javax.ws.rs.ext.Provider; /** * Exception mapper for NotFoundException. */ @Provider @Component public class ResteasyNotFoundExceptionMapper implements ExceptionMapper<NotFoundException> { @Autowired private PreProcessLoggingInterceptor preProcessLoggingInterceptor; @Context private HttpServletRequest request; @Override public Response toResponse(NotFoundException exception) { preProcessLoggingInterceptor.postProcessError(exception, "De resource werd niet gevonden:"); ExceptionUtil eu = new ExceptionUtil(exception); return Response.status(Response.Status.NOT_FOUND) .entity("{ \"error\" : {\"\":[\"" + eu.getEscapedConcatenatedMessage() + "\"]}}") .header("Access-Control-Allow-Origin", request.getHeader("Origin")) .header("Access-Control-Allow-Credentials", true) .build(); } }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/mapper/ResteasyNotFoundExceptionMapper.java
213,861
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.json; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.core.Version; import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.JsonSerializer; import com.fasterxml.jackson.databind.Module; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.databind.module.SimpleModule; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.stereotype.Component; import java.time.LocalDate; import java.time.LocalDateTime; /** * Main entry for the Json serializer/deserializer. */ @Component public class RestJsonMapper extends ObjectMapper { private static final Logger LOG = LoggerFactory.getLogger(RestJsonMapper.class); /** * No-arguments constructor. */ public RestJsonMapper() { super(); this.setSerializationInclusion(JsonInclude.Include.NON_NULL); this.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); this.configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false); this.setDateFormat(new Iso8601AndOthersDateFormat()); SimpleModule testModule = new SimpleModule("jsr310", new Version(1, 0, 0, "", "be.wegenenverkeer.common", "common-resteasy")); testModule.addDeserializer(LocalDate.class, new LocalDateDeserializer()); testModule.addDeserializer(LocalDateTime.class, new LocalDateTimeDeserializer()); testModule.addSerializer(LocalDate.class, new LocalDateSerializer()); testModule.addSerializer(LocalDateTime.class, new LocalDateTimeSerializer()); this.registerModule(testModule); ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); if (null == classLoader) { classLoader = this.getClass().getClassLoader(); } try { Class clazz = classLoader.loadClass("com.fasterxml.jackson.datatype.joda.JodaModule"); Object instance = clazz.newInstance(); this.registerModule((Module) instance); } catch (Exception ex) { // ignore, we do not require joda-time, but inform the user LOG.warn("Add jackson-datatype-joda dependency for joda-time support."); } } /** * Add a custom serializer. * * @param classToMap class to map * @param classSerializer serializer * @param <T> class to map */ public <T> void addClassSerializer(Class<? extends T> classToMap, JsonSerializer<T> classSerializer) { SimpleModule testModule = new SimpleModule("MyModule", new Version(1, 0, 0, null)); testModule.addSerializer(classToMap, classSerializer); this.registerModule(testModule); } }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/json/RestJsonMapper.java
213,862
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.exception; import java.io.InvalidClassException; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.Locale; /** * ExceptionUtil allows you to get more information about an exception, without having to investigate the causes. * <p/> * Originally from equanda, http://equanda.svn.sourceforge.net/, class org/equanda/util/ExceptionUtil.java. */ public class ExceptionUtil { private static final String[] FILTER_PREFIX = { "java.lang.reflect.Method", "org.apache.catalina", "org.eclipse.jetty", "org.springframework.aop", "org.springframework.cglib", "org.springframework.security", "org.springframework.transaction", "org.springframework.web", "sun.reflect", "net.sf.cglib", }; private static final String[] FILTER_CONTAINS = { "ByCGLIB$$", }; private boolean isRetryable; private boolean isRecoverable; private String shortMessage; private String concatenatedMessage; private Throwable originalException; /** * Construct instance which allows investigating the exception. * * @param originalException exception to investigate */ public ExceptionUtil(Throwable originalException) { this.originalException = originalException; isRetryable = false; isRecoverable = true; Throwable exc = originalException; Throwable lastCause = exc; StringBuilder concat = new StringBuilder(); if (exc instanceof InvocationTargetException && null != exc.getCause()) { exc = exc.getCause(); } for (Throwable c = exc; c != null; c = getCause(c)) { if (concat.length() > 0) { concat.append("; "); } concat.append(getMessage(c)); String cn = c.getClass().getName().toLowerCase(Locale.ENGLISH); String em = c.getMessage(); if (em == null) { em = ""; } em = em.toLowerCase(Locale.ENGLISH); if (c instanceof java.rmi.UnmarshalException || c instanceof InvalidClassException || c instanceof ClassNotFoundException || c instanceof LinkageError || c instanceof VirtualMachineError || c instanceof IllegalAccessException) { isRecoverable = false; } else { if (cn.contains("deadlock") || em.contains("deadlock") || cn.contains("staleobjectexception") || em.contains("staleobjectexception") || cn.contains("sockettimeoutexception") || em.contains("sockettimeoutexception") || cn.contains("communicationexception") || em.contains("communicationexception") || cn.contains("concurrentmodificationexception")) { isRetryable = true; } } } concatenatedMessage = concat.toString(); shortMessage = getMessage(lastCause); } /** * Is the exception recoverable? * * @return is it likely that the error can be recovered from, or should we exit? */ public boolean isRecoverable() { return isRecoverable; } /** * Can the operation which caused the exception be retried? * * @return would it be useful to retry the operation? */ public boolean isRetryable() { return isRetryable; } /** * Get the compact error message. * * @return compact error message */ public String getMessage() { return shortMessage; } /** * Get concatenated message, a sequence of the (short) messages for the exception and the causes. * * @return concatenated messages for the exception and causes. */ public String getConcatenatedMessage() { return concatenatedMessage; } /** * Get concatenated message, a sequence of the (short) messages for the exception and the causes. * <p/> * The response is usable inside double quotes. * * @return concatenated messages for the exception and causes. */ public String getEscapedConcatenatedMessage() { return getConcatenatedMessage(). replace('"', '\''). replace('\t', ' '). replace("\n", "\\n"). replace("\r", "\\r"); } /** * Get the stack trace as a string. Filters framework lines from the trace. * * @return stack trace */ public String getStackTrace() { StringBuilder sb = new StringBuilder(); Throwable exception = originalException; while (null != exception) { int filteredCount = 0; sb.append(exception.getMessage()).append('\n'); for (StackTraceElement ste : exception.getStackTrace()) { if (shouldDisplay(ste.getClassName())) { sb.append(" "); while (filteredCount > 0) { sb.append(','); filteredCount--; } sb.append(ste.toString()).append('\n'); } else { filteredCount++; } } exception = exception.getCause(); } return sb.toString(); } private boolean shouldDisplay(String className) { for (String filter : FILTER_PREFIX) { if (className.startsWith(filter)) { return false; } } for (String filter : FILTER_CONTAINS) { if (className.contains(filter)) { return false; } } return true; } /** * Extract a "sensible", reasonably compact message. * * @param exc exception to get message for * @return error message */ private String getMessage(Throwable exc) { String msg; // see if there is a "getShortMessage" method Class clazz = exc.getClass(); try { Method method = clazz.getMethod("getShortMessage"); msg = (String) method.invoke(exc); } catch (Exception ex) { msg = null; /*ignore*/ } // fallback to normal message or else class name if (msg == null || "null".equals(msg)) { msg = exc.getMessage(); } if (msg == null || "null".equals(msg)) { msg = exc.toString(); } return msg; } /** * Return the cause exception. * * @param exc exception to get message for * @return cause exception if any */ private Throwable getCause(Throwable exc) { Throwable cause = exc.getCause(); // use the roundabout way to figure out if there is a cause exception if none found easily // some classes, like EJBException need to be java 1.3 compatible and have a getCausedByException method if (cause == null) { Class clazz = exc.getClass(); try { Method method = clazz.getMethod("getCausedByException"); cause = (Throwable) method.invoke(exc); } catch (Exception ex) { cause = null; /*ignore*/ } } return cause; } }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/exception/ExceptionUtil.java
213,863
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.json; import java.text.DateFormat; import java.text.FieldPosition; import java.text.ParsePosition; import java.time.LocalDateTime; import java.time.ZoneId; import java.util.Calendar; import java.util.Date; import java.util.GregorianCalendar; import java.util.Locale; import java.util.TimeZone; /** * Provide a fast thread-safe formatter/parser DateFormat for ISO8601 dates ONLY. * It was mainly done to be used with Jackson JSON Processor. * <p/> * Watch out for clone implementation that returns itself. * <p/> * All other methods but parse and format and clone are undefined behavior. * * @see com.fasterxml.jackson.databind.util.ISO8601Utils */ public class Iso8601NozoneFormat extends DateFormat { private static final long serialVersionUID = 1L; private static final String GMT_ID = "GMT"; private static final String PARSE_FAILED = "Failed to parse date "; @Override public StringBuffer format(Date date, StringBuffer toAppendTo, FieldPosition fieldPosition) { String value = format(date, false); toAppendTo.append(value); return toAppendTo; } @Override public Date parse(String source, ParsePosition pos) { // index must be set to other than 0, I would swear this requirement is not there in // some version of jdk 6. pos.setIndex(source.length()); return parse(source, false); } // CHECKSTYLE CLONE: OFF @Override public Object clone() { return this; // jackson calls clone everytime. We are threadsafe so just returns the instance } // CHECKSTYLE CLONE: ON /** * Format date into yyyy-MM-ddTHH:mm:ss[.sss]. It is expected that the timezone on both ends is the * same. The timezone is not included. * * @param date the date to format * @param millis include millis? * @return the date formatted as 'yyyy-MM-ddTHH:mm:ssZ' */ public String format(Date date, boolean millis) { Calendar calendar = new GregorianCalendar(Locale.US); calendar.setTime(date); // estimate capacity of buffer as close as we can (yeah, that's pedantic ;) int capacity = "yyyy-MM-ddTHH:mm:ss".length(); capacity += millis ? ".sss".length() : 0; StringBuilder formatted = new StringBuilder(capacity); padInt(formatted, calendar.get(Calendar.YEAR), "yyyy".length()); formatted.append('-'); padInt(formatted, calendar.get(Calendar.MONTH) + 1, "MM".length()); formatted.append('-'); padInt(formatted, calendar.get(Calendar.DAY_OF_MONTH), "dd".length()); formatted.append('T'); padInt(formatted, calendar.get(Calendar.HOUR_OF_DAY), "hh".length()); formatted.append(':'); padInt(formatted, calendar.get(Calendar.MINUTE), "mm".length()); formatted.append(':'); padInt(formatted, calendar.get(Calendar.SECOND), "ss".length()); if (millis) { formatted.append('.'); padInt(formatted, calendar.get(Calendar.MILLISECOND), "sss".length()); } // no timezone return formatted.toString(); } /** * Parse a date from ISO-8601 formatted string. It expects a format yyyy-MM-ddTHH:mm:ss[.sss][Z|[+-]HH:mm] * The timezone is not applied when present. It is expected that the timezone on both ends is the same. * * @param date ISO string to parse in the appropriate format. * @param parseTimezone true when the timezone also needs to be parsed for correctness * @return the parsed date * @throws IllegalArgumentException if the date is not in the appropriate format */ // CHECKSTYLE INNER_ASSIGNMENT: OFF public Date parse(String date, boolean parseTimezone) throws IllegalArgumentException { int max = date.length(); try { int offset = 0; // extract year int year = parseInt(date, offset, offset + 4); offset += 4; checkOffset(date, offset, '-'); offset += 1; // extract month int month = parseInt(date, offset, offset + 2); offset += 2; checkOffset(date, offset, '-'); offset += 1; // extract day int day = parseInt(date, offset, offset + 2); offset += 2; int hour = 0; int minutes = 0; int seconds = 0; int milliseconds = 0; // always use 0 otherwise returned date will include millis of current time if (offset < max) { // time can be optional checkOffset(date, offset, 'T'); offset += 1; // extract hours, minutes, seconds and milliseconds hour = parseInt(date, offset, offset + 2); offset += 2; checkOffset(date, offset, ':'); offset += 1; minutes = parseInt(date, offset, offset + 2); offset += 2; checkOffset(date, offset, ':'); offset += 1; seconds = parseInt(date, offset, offset + 2); offset += 2; // milliseconds can be optional in the format if (offset < max) { // milliseconds are be optional if (date.charAt(offset) == '.') { checkOffset(date, offset, '.'); offset += 1; milliseconds = parseInt(date, offset, offset + 3); offset += 3; } } } // extract timezone if (parseTimezone && offset < max) { String timezoneId; char timezoneIndicator = date.charAt(offset); if (timezoneIndicator == '+' || timezoneIndicator == '-') { timezoneId = GMT_ID + date.substring(offset); } else if (timezoneIndicator == 'Z') { timezoneId = GMT_ID; } else { throw new IndexOutOfBoundsException("Invalid time zone indicator " + timezoneIndicator); } TimeZone timezone = TimeZone.getTimeZone(timezoneId); if (!timezone.getID().equals(timezoneId)) { throw new IndexOutOfBoundsException(); } } return Date.from(LocalDateTime.of(year, month, day, hour, minutes, seconds, milliseconds).atZone(ZoneId.systemDefault()).toInstant()); } catch (IndexOutOfBoundsException | IllegalArgumentException e) { throw new IllegalArgumentException(PARSE_FAILED + date, e); } } // CHECKSTYLE INNER_ASSIGNMENT: ON /** * Check if the expected character exist at the given offset of the * * @param value the string to check at the specified offset * @param offset the offset to look for the expected character * @param expected the expected character * @throws IndexOutOfBoundsException if the expected character is not found */ private static void checkOffset(String value, int offset, char expected) throws IndexOutOfBoundsException { char found = value.charAt(offset); if (found != expected) { throw new IndexOutOfBoundsException("Expected '" + expected + "' character but found '" + found + "'"); } } /** * Parse an integer located between 2 given offsets in a string * * @param value the string to parse * @param beginIndex the start index for the integer in the string * @param endIndex the end index for the integer in the string * @return the int * @throws NumberFormatException if the value is not a number */ private static int parseInt(String value, int beginIndex, int endIndex) throws NumberFormatException { if (beginIndex < 0 || endIndex > value.length() || beginIndex > endIndex) { throw new NumberFormatException(value); } // use same logic as in Integer.parseInt() but less generic we're not supporting negative values int i = beginIndex; int result = 0; int digit; if (i < endIndex) { digit = Character.digit(value.charAt(i++), 10); if (digit < 0) { throw new NumberFormatException("Invalid number: " + value); } result = -digit; } while (i < endIndex) { digit = Character.digit(value.charAt(i++), 10); if (digit < 0) { throw new NumberFormatException("Invalid number: " + value); } result *= 10; result -= digit; } return -result; } /** * Zero pad a number to a specified length * * @param buffer buffer to use for padding * @param value the integer value to pad if necessary. * @param length the length of the string we should zero pad */ private static void padInt(StringBuilder buffer, int value, int length) { String strValue = Integer.toString(value); for (int i = length - strValue.length(); i > 0; i--) { buffer.append('0'); } buffer.append(strValue); } }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/json/Iso8601NozoneFormat.java
213,864
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.mapper; import be.wegenenverkeer.common.resteasy.exception.ExceptionUtil; import be.wegenenverkeer.common.resteasy.exception.ValidationException; import be.wegenenverkeer.common.resteasy.json.RestJsonMapper; import be.wegenenverkeer.common.resteasy.logging.PreProcessLoggingInterceptor; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import java.io.IOException; import java.util.List; import java.util.Map; import javax.servlet.http.HttpServletRequest; import javax.ws.rs.core.Context; import javax.ws.rs.core.Response; import javax.ws.rs.ext.ExceptionMapper; import javax.ws.rs.ext.Provider; /** * ExceptionMapper for ValidationException. */ @Provider @Component public class ValidationExceptionMapper implements ExceptionMapper<ValidationException> { @Autowired private RestJsonMapper jsonMapper; @Autowired private PreProcessLoggingInterceptor preProcessLoggingInterceptor; @Context private HttpServletRequest request; @Override public Response toResponse(ValidationException exception) { try { StringBuilder msg = new StringBuilder("Applicatie gaf een (verwachtte) ValidationException:"); for (Map.Entry<String, List<String>> entry : exception.getExceptions().entrySet()) { msg.append('\n').append(entry.getKey()).append(": "); String indent = ""; for (String m : entry.getValue()) { msg.append(indent).append(m); indent = "\n "; } } preProcessLoggingInterceptor.postProcessError(exception, msg.toString()); return Response.status(Response.Status.PRECONDITION_FAILED).entity("{ \"error\" : " + jsonMapper.writeValueAsString(exception.getExceptions()) + "}") .header("Access-Control-Allow-Origin", request.getHeader("Origin")) .header("Access-Control-Allow-Credentials", true) .build(); } catch (IOException e) { ExceptionUtil eu = new ExceptionUtil(exception); return Response.status(Response.Status.PRECONDITION_FAILED) .entity("{ \"error\" : {\"validatie\",\"" + eu.getEscapedConcatenatedMessage() + "\"}") .header("Access-Control-Allow-Origin", request.getHeader("Origin")) .header("Access-Control-Allow-Credentials", true) .build(); } } }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/mapper/ValidationExceptionMapper.java
213,865
/* * This file is part of wegenenverkeer common-resteasy. * Copyright (c) AWV Agentschap Wegen en Verkeer, Vlaamse Gemeenschap * The program is available in open source according to the Apache License, Version 2.0. * For full licensing details, see LICENSE.txt in the project root. */ package be.wegenenverkeer.common.resteasy.json; import org.apache.commons.lang3.StringUtils; import org.jboss.resteasy.util.DateUtil; import java.text.DateFormat; import java.text.FieldPosition; import java.text.ParseException; import java.text.ParsePosition; import java.text.SimpleDateFormat; import java.util.Arrays; import java.util.Date; import java.util.Locale; /** * Provide a fast thread-safe formatter/parser DateFormat for ISO8601 dates ONLY. * It was mainly done to be used with Jackson JSON Processor. * <p/> * Watch out for clone implementation that returns itself. * <p/> * All other methods but parse and format and clone are undefined behavior. * * @see com.fasterxml.jackson.databind.util.ISO8601Utils */ public class Iso8601AndOthersDateFormat extends DateFormat { private static final long serialVersionUID = 1L; private Iso8601NozoneFormat iso8601NozoneFormat = new Iso8601NozoneFormat(); /** * Datumformaten. */ private static final String[] FORMATS = { "dd/MM/yyyy", "yyyy-MM-dd H:m:s", "yyyy-MM-dd H:m", DateUtil.PATTERN_RFC1036, DateUtil.PATTERN_RFC1123, DateUtil.PATTERN_ASCTIME, "EEE MMM d HH:mm:ss zzz yyyy", "yyyyMMddHHmmss", }; @Override public StringBuffer format(Date date, StringBuffer toAppendTo, FieldPosition fieldPosition) { return iso8601NozoneFormat.format(date, toAppendTo, fieldPosition); } @Override public Date parse(String str, ParsePosition pos) { Date date = null; if (!StringUtils.isBlank(str)) { // try ISO 8601 format first try { return iso8601NozoneFormat.parse(str, pos); } catch (IllegalArgumentException iae) { // ignore, try next format date = null; // dummy } // then try a list of formats for (String format : FORMATS) { DateFormat formatter = new SimpleDateFormat(format, Locale.US); try { return formatter.parse(str); } catch (ParseException e) { // ignore, try next format date = null; // dummy } } throw new IllegalArgumentException("Could not parse date " + str + " using ISO 8601 or any of the formats " + Arrays.asList(FORMATS) + "."); } return date; // empty string } // CHECKSTYLE CLONE: OFF @Override public Object clone() { return this; // jackson calls clone everytime. We are threadsafe so just returns the instance } // CHECKSTYLE CLONE: ON }
WegenenVerkeer/common-resteasy
resteasy/src/main/java/be/wegenenverkeer/common/resteasy/json/Iso8601AndOthersDateFormat.java
213,866
import java.io.IOException; import java.util.ArrayList; import java.util.Map; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.Map; /** * Created by Dmitry on 30.06.2015. */ public class MAAGAIKReader extends AbstractReaderFromUrl{ private static Elements tables; public static final String URL_MIIGAIK = "http://priem.miigaik.ru/konkyrs/spiski/och/budzhet/20150623135902-4183.htm"; @Override public void readData() throws IOException { Document doc = Jsoup.connect(URL_MIIGAIK).get(); tables = doc.select("table"); } @Override public Map<String, ArrayList<EnrolleeData>> getData() { return null; } private ArrayList<EnrolleeData> getCourseData() { return null; } public static void main(String[] args) throws IOException { MAAGAIKReader maagaikReader = new MAAGAIKReader(); maagaikReader.readData(); for (Element temp : tables) { } } }
bvc3at/SupplyApp
src/MAAGAIKReader.java
213,867
/* * Copyright 2016 KairosDB Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kairosdb.core.aggregator; import com.google.inject.Inject; import static com.google.common.base.Preconditions.checkState; import java.util.ArrayList; import java.util.List; import java.util.Set; import org.kairosdb.core.DataPoint; import org.kairosdb.core.annotation.FeatureComponent; import org.kairosdb.core.annotation.FeatureProperty; import org.kairosdb.core.annotation.ValidationProperty; import org.kairosdb.core.datapoints.DoubleDataPointFactory; import org.kairosdb.core.datastore.DataPointGroup; import org.kairosdb.core.groupby.GroupByResult; import org.kairosdb.plugin.Aggregator; @FeatureComponent( name = "sma", label = "SMA", description = "Simple moving average." ) public class SmaAggregator implements Aggregator { private DoubleDataPointFactory m_dataPointFactory; //@NonZero @FeatureProperty( label = "Size", description = "The period of the moving average. This is the number of data point to use each time the average is calculated.", default_value = "10", validations = { @ValidationProperty( expression = "value > 0", message = "Size must be greater than 0." ) } ) private int m_size; @Inject public SmaAggregator(DoubleDataPointFactory dataPointFactory) { m_dataPointFactory = dataPointFactory; } @Override public boolean canAggregate(String groupType) { return DataPoint.GROUP_NUMBER.equals(groupType); } @Override public String getAggregatedGroupType(String groupType) { return m_dataPointFactory.getGroupType(); } @Override public void init() { } @Override public DataPointGroup aggregate(DataPointGroup dataPointGroup) { checkState(m_size != 0); return new SmaDataPointGroup(dataPointGroup); } public void setSize(int size) { m_size = size; } private class SmaDataPointGroup implements DataPointGroup { private DataPointGroup m_innerDataPointGroup; ArrayList<DataPoint> subSet = new ArrayList<DataPoint>(); public SmaDataPointGroup(DataPointGroup innerDataPointGroup) { m_innerDataPointGroup = innerDataPointGroup; for(int i=0;i<m_size-1;i++){ if (innerDataPointGroup.hasNext()){ subSet.add(innerDataPointGroup.next()); } } } @Override public boolean hasNext() { return (m_innerDataPointGroup.hasNext()); } @Override public DataPoint next() { DataPoint dp = m_innerDataPointGroup.next(); subSet.add(dp); if(subSet.size()>m_size){ subSet.remove(0); } double sum = 0; for(int i=0;i<subSet.size();i++){ DataPoint dpt = subSet.get(i); sum += dpt.getDoubleValue(); } dp = m_dataPointFactory.createDataPoint(dp.getTimestamp(), sum / subSet.size()); //System.out.println(new SimpleDateFormat("MM/dd/yyyy HH:mm").format(dp.getTimestamp())+" "+sum+" "+subSet.size()); return (dp); } @Override public void remove() { m_innerDataPointGroup.remove(); } @Override public String getName() { return (m_innerDataPointGroup.getName()); } @Override public List<GroupByResult> getGroupByResult() { return (m_innerDataPointGroup.getGroupByResult()); } @Override public void close() { m_innerDataPointGroup.close(); } @Override public Set<String> getTagNames() { return (m_innerDataPointGroup.getTagNames()); } @Override public Set<String> getTagValues(String tag) { return (m_innerDataPointGroup.getTagValues(tag)); } } }
kairosdb/kairosdb
src/main/java/org/kairosdb/core/aggregator/SmaAggregator.java
213,869
404: Not Found
gama-platform/gama.old
msi.gama.core/src/msi/gaml/types/GamaAgentType.java
213,870
/* * * **************************************************************************** * * Copyright (C) 2019 Testsigma Technologies Inc. * * All rights reserved. * **************************************************************************** * */ package com.testsigma.agent; import com.testsigma.agent.init.WrapperConnector; import lombok.extern.log4j.Log4j2; import org.apache.commons.lang3.StringUtils; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.context.ConfigurableApplicationContext; import org.springframework.scheduling.annotation.EnableScheduling; import org.springframework.web.servlet.config.annotation.EnableWebMvc; @EnableScheduling @EnableWebMvc @SpringBootApplication(scanBasePackages = {"com.testsigma.agent", "com.testsigma.automator"}) @Log4j2 public class TestsigmaAgent { public static void main(String[] args) { System.setProperty("webdriver.http.factory", "jdk-http-client"); String wrapperPort = System.getProperty("agent.wrapper.port"); if (StringUtils.isNotBlank(wrapperPort)) { WrapperConnector.getInstance().disconnectHook(); } Thread.currentThread().setName("TestsigmaAgent"); ConfigurableApplicationContext c = SpringApplication.run(TestsigmaAgent.class, args); if (StringUtils.isNotBlank(wrapperPort)) { WrapperConnector.getInstance().connect(); } } }
testsigmahq/testsigma
agent/src/main/java/com/testsigma/agent/TestsigmaAgent.java
213,872
/* * Copyright (C) 2017-2019 Dremio Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.dremio.exec.store.metadatarefresh.schemaagg; import com.dremio.common.exceptions.UserException; import com.dremio.common.types.SupportsTypeCoercionsAndUpPromotions; import com.dremio.exec.catalog.CatalogOptions; import com.dremio.exec.catalog.ColumnCountTooLargeException; import com.dremio.exec.exception.NoSupportedUpPromotionOrCoercionException; import com.dremio.exec.physical.config.TableFunctionConfig; import com.dremio.exec.record.BatchSchema; import com.dremio.exec.record.VectorAccessible; import com.dremio.exec.store.dfs.AbstractTableFunction; import com.dremio.exec.store.metadatarefresh.MetadataRefreshExecConstants; import com.dremio.exec.util.VectorUtil; import com.dremio.sabot.exec.context.OperatorContext; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Optional; import org.apache.arrow.vector.VarBinaryVector; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.util.TransferPair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Table function which given a list of BatchSchema's will merge them into one final batch schema * applying up promotions for fields if needed. * * <p>Input Vector: 1) A VarBinaryVector with filed name #{@link * MetadataRefreshExecConstants.SchemaAgg.INPUT_SCHEMA} which should have serialized batch schema's * * <p>Output Vector - 1) A varbinaryVector of size 1 with the merged schema */ public class SchemaAggTableFunction extends AbstractTableFunction implements SupportsTypeCoercionsAndUpPromotions { private static final Logger logger = LoggerFactory.getLogger(SchemaAggTableFunction.class); private VarBinaryVector outputFileSchemaVector; private VarBinaryVector inputFileSchemaVector; private BatchSchema reconciledSchema = new BatchSchema(Collections.EMPTY_LIST); private BatchSchema currentSchema; private boolean processedRow; private List<TransferPair> transferPairs = new ArrayList<>(); public SchemaAggTableFunction(OperatorContext context, TableFunctionConfig functionConfig) { super(context, functionConfig); } @Override public VectorAccessible setup(VectorAccessible accessible) throws Exception { this.incoming = accessible; this.inputFileSchemaVector = (VarBinaryVector) VectorUtil.getVectorFromSchemaPath( incoming, MetadataRefreshExecConstants.FooterRead.OUTPUT_SCHEMA.FILE_SCHEMA); this.outgoing = context.createOutputVectorContainer(incoming.getSchema()); List<Field> fieldList = incoming.getSchema().getFields(); Optional<Field> schemaField = fieldList.stream() .filter( field -> field .getName() .equals(MetadataRefreshExecConstants.SchemaAgg.INPUT_SCHEMA.SCHEMA)) .findFirst(); if (!schemaField.isPresent()) { throw new IllegalStateException( String.format( "%s having schema not found in the input list of fields provided", MetadataRefreshExecConstants.FooterRead.OUTPUT_SCHEMA.FILE_SCHEMA)); } incoming.forEach( vw -> { String fieldName = vw.getField().getName(); if (!fieldName.equals(MetadataRefreshExecConstants.SchemaAgg.INPUT_SCHEMA.SCHEMA)) { transferPairs.add( vw.getValueVector() .makeTransferPair(VectorUtil.getVectorFromSchemaPath(outgoing, fieldName))); } }); this.outputFileSchemaVector = (VarBinaryVector) VectorUtil.getVectorFromSchemaPath( outgoing, MetadataRefreshExecConstants.FooterRead.OUTPUT_SCHEMA.FILE_SCHEMA); return outgoing; } @Override public void startRow(int row) throws Exception { this.currentSchema = getBatchSchemaFrom(row); logger.debug("Processing row {}", row); this.processedRow = false; } @Override public int processRow(int startOutIndex, int maxRecords) throws Exception { if (this.processedRow) { return 0; } logger.debug("Processing schema {}", currentSchema.toJSONString()); // Will just try and reconcile the schema try { this.reconciledSchema = reconciledSchema.mergeWithUpPromotion(currentSchema, this); } catch (NoSupportedUpPromotionOrCoercionException e) { throw UserException.unsupportedError(e).message(e.getMessage()).build(logger); } logger.debug( "Merged schema after processing row {} is {}", startOutIndex, reconciledSchema.toJSONString()); if (reconciledSchema.getTotalFieldCount() > context.getOptions().getOption(CatalogOptions.METADATA_LEAF_COLUMN_MAX)) { throw new ColumnCountTooLargeException( (int) context.getOptions().getOption(CatalogOptions.METADATA_LEAF_COLUMN_MAX)); } this.processedRow = true; // Output table function if (startOutIndex == inputFileSchemaVector.getValueCount() - 1 || maxRecords == 1) { logger.debug( "Writing final reconciledSchema to the output. ReconciledSchema = {}", reconciledSchema.toJSONString()); this.outputFileSchemaVector.setSafe(0, this.reconciledSchema.serialize()); this.outputFileSchemaVector.setValueCount(1); transferPairs.forEach(TransferPair::transfer); } return 1; } @Override public void closeRow() throws Exception {} private BatchSchema getBatchSchemaFrom(int index) { byte[] bytes = inputFileSchemaVector.get(index); if (bytes == null) { throw new IllegalStateException( String.format( "Schema not found at index %s of %s vector", index, inputFileSchemaVector.getField().getName())); } return BatchSchema.deserialize(bytes); } }
dremio/dremio-oss
sabot/kernel/src/main/java/com/dremio/exec/store/metadatarefresh/schemaagg/SchemaAggTableFunction.java
213,873
404: Not Found
stargate/stargate
coordinator/grpc/src/main/java/io/stargate/grpc/service/SchemaAgreementHelper.java
213,874
package com.dataiku.hive.udf.maths; /** * Author: Matthieu Scordia * Date: 04/03/14 * Time: 15:12 */ import java.util.ArrayList; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.udf.generic.AbstractGenericUDAFResolver; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils; import org.apache.hadoop.hive.serde2.objectinspector.StandardListObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; /** * UDFExponentialSmoothingMovingAverage * */ @Description(name = "moving_avg", value = "_FUNC_(p, x, windows, div) - Returns the moving mean of a set of numbers over a window of n observations 1/pow(div,i)") public class UDFExponentialSmoothingMovingAverage extends AbstractGenericUDAFResolver { static final Log LOG = LogFactory.getLog(UDFExponentialSmoothingMovingAverage.class.getName()); @Override public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException { //System.out.println("check getEvaluator in"); //We need exactly three parameters if (parameters.length != 5) { throw new UDFArgumentTypeException(parameters.length - 1, "Moving Average requires 5 parameters"); } //check the first parameter to make sure they type is numeric if (parameters[0].getCategory() != ObjectInspector.Category.PRIMITIVE) { throw new UDFArgumentTypeException(0, "Only primitive, numeric types can have a moving average but "+ parameters[0].getTypeName() + "was passed."); } // if it's a primative, let's make sure it's numeric switch(((PrimitiveTypeInfo) parameters[0]).getPrimitiveCategory()) { //fall through all numeric primitives case FLOAT: case DOUBLE: case INT: case LONG: case SHORT: break; default: throw new UDFArgumentTypeException(0, "Only numeric type arguments (excluding bytes and timestamps) are accepted"+ "but " + parameters[0].getTypeName() + " was passed."); } // check the second parameter if (parameters[1].getCategory() != ObjectInspector.Category.PRIMITIVE) { throw new UDFArgumentTypeException(0, "Only primitive, numeric types can have a moving average but "+ parameters[1].getTypeName() + "was passed."); } // if it's a primative, let's make sure it's numeric switch(((PrimitiveTypeInfo) parameters[1]).getPrimitiveCategory()) { //fall through all numeric primitives case FLOAT: case DOUBLE: case INT: case LONG: case SHORT: break; default: throw new UDFArgumentTypeException(0, "Only numeric type arguments (excluding bytes and timestamps) are accepted"+ "but " + parameters[1].getTypeName() + " was passed."); } // ensure that the window size is an integer if (parameters[2].getCategory() != ObjectInspector.Category.PRIMITIVE) { throw new UDFArgumentTypeException(1, "ensure that the window size is an integer"); } if (((PrimitiveTypeInfo) parameters[2]).getPrimitiveCategory() != PrimitiveObjectInspector.PrimitiveCategory.INT) { throw new UDFArgumentTypeException(1, "ensure that the window size is an integer"); } // ensure that the diviseur is a double if (parameters[3].getCategory() != ObjectInspector.Category.PRIMITIVE) { throw new UDFArgumentTypeException(1, "ensure that the diviseur is a double"); } if (((PrimitiveTypeInfo) parameters[3]).getPrimitiveCategory() != PrimitiveObjectInspector.PrimitiveCategory.DOUBLE) { throw new UDFArgumentTypeException(1, "ensure that the diviseur is a double"); } // ensure that the position is a int. if (parameters[4].getCategory() != ObjectInspector.Category.PRIMITIVE) { throw new UDFArgumentTypeException(1, "ensure that the position is a int."); } if (((PrimitiveTypeInfo) parameters[4]).getPrimitiveCategory() != PrimitiveObjectInspector.PrimitiveCategory.INT) { throw new UDFArgumentTypeException(1, "ensure that the position is a int."); } //System.out.println("check getEvaluator out"); return new GenericUDAFMovingAverageEvaluator(); } public static class GenericUDAFMovingAverageEvaluator extends GenericUDAFEvaluator { // input inspectors for PARTIAL1 and COMPLETE private PrimitiveObjectInspector periodOI; private PrimitiveObjectInspector inputOI; private PrimitiveObjectInspector windowSizeOI; private PrimitiveObjectInspector diviseurOI; private PrimitiveObjectInspector positionOI; // input inspectors for PARTIAL2 and FINAL // list for MAs and one for residuals private StandardListObjectInspector loi; @Override public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveException { super.init(m, parameters); // initialize input inspectors if (m == Mode.PARTIAL1 || m == Mode.COMPLETE) { assert(parameters.length == 5); periodOI = (PrimitiveObjectInspector) parameters[0]; inputOI = (PrimitiveObjectInspector) parameters[1]; windowSizeOI = (PrimitiveObjectInspector) parameters[2]; diviseurOI = (PrimitiveObjectInspector) parameters[3]; positionOI = (PrimitiveObjectInspector) parameters[4]; } else { loi = (StandardListObjectInspector) parameters[0]; } // init output object inspectors if (m == Mode.PARTIAL1 || m == Mode.PARTIAL2) { // The output of a partial aggregation is a list of doubles representing the // moving average being constructed. // the first element in the list will be the window size // return ObjectInspectorFactory.getStandardListObjectInspector( PrimitiveObjectInspectorFactory.writableDoubleObjectInspector); } else { return PrimitiveObjectInspectorFactory.writableDoubleObjectInspector; } } @Override public Object terminatePartial(AggregationBuffer agg) throws HiveException { // return an ArrayList where the first parameter is the window size MaAgg myagg = (MaAgg) agg; return myagg.prefixSum.serialize(); } @Override public Object terminate(AggregationBuffer agg) throws HiveException { // final return value goes here MaAgg myagg = (MaAgg) agg; if (myagg.prefixSum.tableSize() < 1) { return null; } else { ArrayList<DoubleWritable[]> result = new ArrayList<DoubleWritable[]>(); DoubleWritable[] entry = new DoubleWritable[1]; entry[0] = new DoubleWritable(myagg.prefixSum.getEntry(myagg.prefixSum.tableSize()-1).movingAverage); return entry[0]; } } @SuppressWarnings("unchecked") @Override public void merge(AggregationBuffer agg, Object partial) throws HiveException { // if we're merging two separate sets we're creating one table that's doubly long if (partial != null) { MaAgg myagg = (MaAgg) agg; List<DoubleWritable> partialMovingAverage = (List<DoubleWritable>) loi.getList(partial); myagg.prefixSum.merge(partialMovingAverage); } } @Override public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveException { assert (parameters.length == 5); if (parameters[0] == null || parameters[1] == null || parameters[2] == null || parameters[3] == null || parameters[4] == null) { return; } MaAgg myagg = (MaAgg) agg; // Parse out the window size just once if we haven't done so before. We need a window of at least 1, // otherwise there's no window. if (!myagg.prefixSum.isReady()) { int windowSize = PrimitiveObjectInspectorUtils.getInt(parameters[2], windowSizeOI); double diviseur = PrimitiveObjectInspectorUtils.getDouble(parameters[3], diviseurOI); int position = PrimitiveObjectInspectorUtils.getInt(parameters[4], positionOI); if (windowSize < 1) { throw new HiveException(getClass().getSimpleName() + " needs a window size >= 1"); } myagg.prefixSum.allocate(windowSize, diviseur, position); } //Add the current data point and compute the average int p = PrimitiveObjectInspectorUtils.getInt(parameters[0], periodOI); double v = PrimitiveObjectInspectorUtils.getDouble(parameters[1], inputOI); myagg.prefixSum.add(p,v); } // Aggregation buffer definition and manipulation methods static class MaAgg implements AggregationBuffer { PrefixSumMovingAverage prefixSum; }; @Override public AggregationBuffer getNewAggregationBuffer() throws HiveException { MaAgg result = new MaAgg(); reset(result); return result; } @Override public void reset(AggregationBuffer agg) throws HiveException { MaAgg myagg = (MaAgg) agg; myagg.prefixSum = new PrefixSumMovingAverage(); myagg.prefixSum.reset(); } } }
dataiku/dataiku-hive-udf
src/com/dataiku/hive/udf/maths/UDFExponentialSmoothingMovingAverage.java
213,875
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. package com.azure.resourcemanager.monitor.implementation; import com.azure.core.util.logging.ClientLogger; import com.azure.resourcemanager.monitor.MonitorManager; import com.azure.resourcemanager.monitor.models.DynamicMetricCriteria; import com.azure.resourcemanager.monitor.models.MetricAlert; import com.azure.resourcemanager.monitor.models.MetricAlertAction; import com.azure.resourcemanager.monitor.models.MetricAlertCondition; import com.azure.resourcemanager.monitor.models.MetricAlertCriteria; import com.azure.resourcemanager.monitor.models.MetricAlertMultipleResourceMultipleMetricCriteria; import com.azure.resourcemanager.monitor.models.MetricAlertSingleResourceMultipleMetricCriteria; import com.azure.resourcemanager.monitor.models.MetricCriteria; import com.azure.resourcemanager.monitor.models.MetricDynamicAlertCondition; import com.azure.resourcemanager.monitor.models.MultiMetricCriteria; import com.azure.resourcemanager.monitor.fluent.models.MetricAlertResourceInner; import com.azure.resourcemanager.resources.fluentcore.arm.models.HasId; import com.azure.resourcemanager.resources.fluentcore.arm.models.Resource; import com.azure.resourcemanager.resources.fluentcore.arm.models.implementation.GroupableResourceImpl; import java.time.Duration; import java.time.OffsetDateTime; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.TreeMap; import reactor.core.publisher.Mono; /** Implementation for MetricAlert. */ class MetricAlertImpl extends GroupableResourceImpl<MetricAlert, MetricAlertResourceInner, MetricAlertImpl, MonitorManager> implements MetricAlert, MetricAlert.Definition, MetricAlert.DefinitionMultipleResource, MetricAlert.Update, MetricAlert.UpdateStages.WithMetricUpdate { private final ClientLogger logger = new ClientLogger(getClass()); // 2019/09 at present service support 2 static criteria, or 1 dynamic criteria // static criteria private Map<String, MetricAlertCondition> conditions; // dynamic criteria private Map<String, MetricDynamicAlertCondition> dynamicConditions; private boolean multipleResource = false; MetricAlertImpl(String name, final MetricAlertResourceInner innerModel, final MonitorManager monitorManager) { super(name, innerModel, monitorManager); this.conditions = new TreeMap<>(); this.dynamicConditions = new TreeMap<>(); if (innerModel.criteria() != null) { MetricAlertCriteria innerCriteria = innerModel.criteria(); if (innerCriteria instanceof MetricAlertSingleResourceMultipleMetricCriteria) { multipleResource = false; // single resource with multiple static criteria MetricAlertSingleResourceMultipleMetricCriteria crits = (MetricAlertSingleResourceMultipleMetricCriteria) innerCriteria; List<MetricCriteria> criteria = crits.allOf(); if (criteria != null) { for (MetricCriteria crit : criteria) { this.conditions.put(crit.name(), new MetricAlertConditionImpl(crit.name(), crit, this)); } } } else if (innerCriteria instanceof MetricAlertMultipleResourceMultipleMetricCriteria) { multipleResource = true; // multiple resource with either multiple static criteria, or (currently single) dynamic criteria MetricAlertMultipleResourceMultipleMetricCriteria crits = (MetricAlertMultipleResourceMultipleMetricCriteria) innerCriteria; List<MultiMetricCriteria> criteria = crits.allOf(); if (criteria != null) { for (MultiMetricCriteria crit : criteria) { if (crit instanceof MetricCriteria) { this .conditions .put( crit.name(), new MetricAlertConditionImpl(crit.name(), (MetricCriteria) crit, this)); } else if (crit instanceof DynamicMetricCriteria) { this .dynamicConditions .put( crit.name(), new MetricDynamicAlertConditionImpl( crit.name(), (DynamicMetricCriteria) crit, this)); } } } } } } @Override public Mono<MetricAlert> createResourceAsync() { if (this.conditions.isEmpty() && this.dynamicConditions.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("Condition cannot be empty")); } else if (!this.conditions.isEmpty() && !this.dynamicConditions.isEmpty()) { throw logger.logExceptionAsError( new IllegalArgumentException("Static condition and dynamic condition cannot co-exist")); } this.innerModel().withLocation("global"); if (!this.conditions.isEmpty()) { if (!multipleResource) { MetricAlertSingleResourceMultipleMetricCriteria crit = new MetricAlertSingleResourceMultipleMetricCriteria(); crit.withAllOf(new ArrayList<>()); for (MetricAlertCondition mc : conditions.values()) { crit.allOf().add(mc.innerModel()); } this.innerModel().withCriteria(crit); } else { MetricAlertMultipleResourceMultipleMetricCriteria crit = new MetricAlertMultipleResourceMultipleMetricCriteria(); crit.withAllOf(new ArrayList<>()); for (MetricAlertCondition mc : conditions.values()) { crit.allOf().add(mc.innerModel()); } this.innerModel().withCriteria(crit); } } else if (!this.dynamicConditions.isEmpty()) { MetricAlertMultipleResourceMultipleMetricCriteria crit = new MetricAlertMultipleResourceMultipleMetricCriteria(); crit.withAllOf(new ArrayList<>()); for (MetricDynamicAlertCondition mc : dynamicConditions.values()) { crit.allOf().add(mc.innerModel()); } this.innerModel().withCriteria(crit); } return this .manager() .serviceClient() .getMetricAlerts() .createOrUpdateAsync(this.resourceGroupName(), this.name(), this.innerModel()) .map(innerToFluentMap(this)); } @Override protected Mono<MetricAlertResourceInner> getInnerAsync() { return this.manager().serviceClient().getMetricAlerts() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } @Override public MetricAlertImpl withTargetResource(String resourceId) { multipleResource = false; this.innerModel().withScopes(new ArrayList<>()); this.innerModel().scopes().add(resourceId); return this; } @Override public MetricAlertImpl withTargetResource(HasId resource) { multipleResource = false; return this.withTargetResource(resource.id()); } @Override public MetricAlertImpl withPeriod(Duration size) { this.innerModel().withWindowSize(size); return this; } @Override public MetricAlertImpl withFrequency(Duration frequency) { this.innerModel().withEvaluationFrequency(frequency); return this; } @Override public MetricAlertImpl withSeverity(int severity) { this.innerModel().withSeverity(severity); return this; } @Override public MetricAlertImpl withAlertDetails(int severity, String description) { this.withSeverity(severity); return this.withDescription(description); } @Override public MetricAlertImpl withDescription(String description) { this.innerModel().withDescription(description); return this; } @Override public MetricAlertImpl withRuleEnabled() { this.innerModel().withEnabled(true); return this; } @Override public MetricAlertImpl withRuleDisabled() { this.innerModel().withEnabled(false); return this; } @Override public MetricAlertImpl withAutoMitigation() { this.innerModel().withAutoMitigate(true); return this; } @Override public MetricAlertImpl withoutAutoMitigation() { this.innerModel().withAutoMitigate(false); return this; } @Override public MetricAlertImpl withActionGroups(String... actionGroupId) { if (this.innerModel().actions() == null) { this.innerModel().withActions(new ArrayList<MetricAlertAction>()); } this.innerModel().actions().clear(); for (String agid : actionGroupId) { MetricAlertAction maa = new MetricAlertAction(); maa.withActionGroupId(agid); this.innerModel().actions().add(maa); } return this; } @Override public MetricAlertImpl withoutActionGroup(String actionGroupId) { if (this.innerModel().actions() != null) { List<MetricAlertAction> toDelete = new ArrayList<>(); for (MetricAlertAction maa : this.innerModel().actions()) { if (maa.actionGroupId().equalsIgnoreCase(actionGroupId)) { toDelete.add(maa); } } this.innerModel().actions().removeAll(toDelete); } return this; } @Override public MetricAlertConditionImpl defineAlertCriteria(String name) { return new MetricAlertConditionImpl(name, new MetricCriteria(), this); } @Override public MetricDynamicAlertConditionImpl defineDynamicAlertCriteria(String name) { return new MetricDynamicAlertConditionImpl(name, new DynamicMetricCriteria(), this); } @Override public MetricAlertConditionImpl updateAlertCriteria(String name) { return (MetricAlertConditionImpl) this.conditions.get(name); } @Override public MetricDynamicAlertConditionImpl updateDynamicAlertCriteria(String name) { return (MetricDynamicAlertConditionImpl) this.dynamicConditions.get(name); } @Override public MetricAlertImpl withoutAlertCriteria(String name) { if (this.conditions.containsKey(name)) { this.conditions.remove(name); } if (this.dynamicConditions.containsKey(name)) { this.dynamicConditions.remove(name); } return this; } MetricAlertImpl withAlertCriteria(MetricAlertConditionImpl criteria) { this.withoutAlertCriteria(criteria.name()); this.conditions.put(criteria.name(), criteria); return this; } MetricAlertImpl withDynamicAlertCriteria(MetricDynamicAlertConditionImpl criteria) { this.withoutAlertCriteria(criteria.name()); this.dynamicConditions.put(criteria.name(), criteria); return this; } @Override public MetricAlertImpl withMultipleTargetResources(Collection<String> resourceIds, String type, String region) { if (resourceIds == null || resourceIds.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("Target resource cannot be empty")); } multipleResource = true; this.innerModel().withScopes(new ArrayList<>(resourceIds)); this.innerModel().withTargetResourceType(type); this.innerModel().withTargetResourceRegion(region); return this; } @Override public MetricAlertImpl withMultipleTargetResources(Collection<? extends Resource> resources) { if (resources == null || resources.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("Target resource cannot be empty")); } multipleResource = true; List<String> resourceIds = new ArrayList<>(); String type = resources.iterator().next().type(); String regionName = resources.iterator().next().regionName(); for (Resource resource : resources) { if (!type.equalsIgnoreCase(resource.type()) || !regionName.equalsIgnoreCase(resource.regionName())) { throw logger.logExceptionAsError(new IllegalArgumentException( "Target resource must be of the same resource type and in the same region")); } resourceIds.add(resource.id()); } return this.withMultipleTargetResources(resourceIds, type, regionName); } @Override public String description() { return this.innerModel().description(); } @Override public int severity() { return this.innerModel().severity(); } @Override public boolean enabled() { return this.innerModel().enabled(); } @Override public Duration evaluationFrequency() { return this.innerModel().evaluationFrequency(); } @Override public Duration windowSize() { return this.innerModel().windowSize(); } @Override public boolean autoMitigate() { return this.innerModel().autoMitigate(); } @Override public OffsetDateTime lastUpdatedTime() { return this.innerModel().lastUpdatedTime(); } @Override public Collection<String> scopes() { return Collections.unmodifiableCollection(this.innerModel().scopes()); } @Override public Collection<String> actionGroupIds() { if (this.innerModel().actions() != null && this.innerModel().actions() != null) { List<String> ids = new ArrayList<>(); for (MetricAlertAction maag : this.innerModel().actions()) { ids.add(maag.actionGroupId()); } return Collections.unmodifiableCollection(ids); } return Collections.emptyList(); } @Override public Map<String, MetricAlertCondition> alertCriterias() { return Collections.unmodifiableMap(this.conditions); } @Override public Map<String, MetricDynamicAlertCondition> dynamicAlertCriterias() { return Collections.unmodifiableMap(this.dynamicConditions); } }
Azure/azure-sdk-for-java
sdk/resourcemanagerhybrid/azure-resourcemanager-monitor/src/main/java/com/azure/resourcemanager/monitor/implementation/MetricAlertImpl.java
213,876
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.beam.sdk.schemas.transforms; import com.google.auto.value.AutoValue; import java.io.Serializable; import java.util.List; import java.util.stream.Collectors; import org.apache.beam.sdk.coders.CannotProvideCoderException; import org.apache.beam.sdk.coders.Coder; import org.apache.beam.sdk.coders.CoderRegistry; import org.apache.beam.sdk.schemas.FieldAccessDescriptor; import org.apache.beam.sdk.schemas.FieldTypeDescriptors; import org.apache.beam.sdk.schemas.Schema; import org.apache.beam.sdk.schemas.Schema.Field; import org.apache.beam.sdk.schemas.Schema.FieldType; import org.apache.beam.sdk.schemas.SchemaCoder; import org.apache.beam.sdk.schemas.utils.RowSelector; import org.apache.beam.sdk.schemas.utils.SelectHelpers; import org.apache.beam.sdk.schemas.utils.SelectHelpers.RowSelectorContainer; import org.apache.beam.sdk.transforms.Combine.CombineFn; import org.apache.beam.sdk.transforms.CombineFns; import org.apache.beam.sdk.transforms.CombineFns.CoCombineResult; import org.apache.beam.sdk.transforms.CombineFns.ComposedCombineFn; import org.apache.beam.sdk.transforms.SimpleFunction; import org.apache.beam.sdk.values.Row; import org.apache.beam.sdk.values.TupleTag; import org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.base.Preconditions; import org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.collect.Lists; import org.checkerframework.checker.nullness.qual.Nullable; /** This is the builder used by {@link Group} to build up a composed {@link CombineFn}. */ @SuppressWarnings({ "nullness", // TODO(https://github.com/apache/beam/issues/20497) "rawtypes" }) class SchemaAggregateFn { static Inner create() { return new AutoValue_SchemaAggregateFn_Inner.Builder() .setFieldAggregations(Lists.newArrayList()) .build(); } /** Implementation of {@link #create}. */ @AutoValue @AutoValue.CopyAnnotations @SuppressWarnings("rawtypes") abstract static class Inner extends CombineFn<Row, Object[], Row> { // Represents an aggregation of one or more fields. static class FieldAggregation<FieldT, AccumT, OutputT> implements Serializable { FieldAccessDescriptor fieldsToAggregate; private final boolean aggregateBaseValues; // The specification of the output field. private final Field outputField; // The combine function. private final CombineFn<FieldT, AccumT, OutputT> fn; // The TupleTag identifying this aggregation element in the composed combine fn. private final TupleTag<Object> combineTag; // The schema corresponding to the subset of input fields being aggregated. private final @Nullable Schema inputSubSchema; private final @Nullable FieldAccessDescriptor flattenedFieldAccessDescriptor; // The flattened version of inputSubSchema. private final @Nullable Schema flattenedInputSubSchema; // The output schema resulting from the aggregation. private final Schema aggregationSchema; private final boolean needsFlattening; FieldAggregation( FieldAccessDescriptor fieldsToAggregate, boolean aggregateBaseValues, Field outputField, CombineFn<FieldT, AccumT, OutputT> fn, TupleTag<Object> combineTag) { this( fieldsToAggregate, aggregateBaseValues, outputField, fn, combineTag, Schema.builder().addField(outputField).build(), null); } FieldAggregation( FieldAccessDescriptor fieldsToAggregate, boolean aggregateBaseValues, Field outputField, CombineFn<FieldT, AccumT, OutputT> fn, TupleTag<Object> combineTag, Schema aggregationSchema, @Nullable Schema inputSchema) { this.aggregateBaseValues = aggregateBaseValues; if (inputSchema != null) { this.fieldsToAggregate = fieldsToAggregate.resolve(inputSchema); if (aggregateBaseValues) { Preconditions.checkArgument(fieldsToAggregate.referencesSingleField()); } this.inputSubSchema = SelectHelpers.getOutputSchema(inputSchema, this.fieldsToAggregate); this.flattenedFieldAccessDescriptor = SelectHelpers.allLeavesDescriptor(inputSubSchema, SelectHelpers.CONCAT_FIELD_NAMES); this.flattenedInputSubSchema = SelectHelpers.getOutputSchema(inputSubSchema, flattenedFieldAccessDescriptor); this.needsFlattening = !inputSchema.equals(flattenedInputSubSchema); } else { this.fieldsToAggregate = fieldsToAggregate; this.inputSubSchema = null; this.flattenedFieldAccessDescriptor = null; this.flattenedInputSubSchema = null; this.needsFlattening = false; } this.outputField = outputField; this.fn = fn; this.combineTag = combineTag; this.aggregationSchema = aggregationSchema; } // The Schema is not necessarily known when the SchemaAggregateFn is created. Once the schema // is known, resolve will be called with the proper schema. FieldAggregation<FieldT, AccumT, OutputT> resolve(Schema schema) { return new FieldAggregation<>( fieldsToAggregate, aggregateBaseValues, outputField, fn, combineTag, aggregationSchema, schema); } } abstract Builder toBuilder(); @AutoValue.Builder @AutoValue.CopyAnnotations @SuppressWarnings("rawtypes") abstract static class Builder { abstract Builder setInputSchema(@Nullable Schema inputSchema); abstract Builder setOutputSchema(@Nullable Schema outputSchema); abstract Builder setComposedCombineFn(@Nullable ComposedCombineFn composedCombineFn); abstract Builder setFieldAggregations(List<FieldAggregation> fieldAggregations); abstract Inner build(); } abstract @Nullable Schema getInputSchema(); abstract @Nullable Schema getOutputSchema(); abstract @Nullable ComposedCombineFn getComposedCombineFn(); abstract List<FieldAggregation> getFieldAggregations(); /** Once the schema is known, this function is called by the {@link Group} transform. */ Inner withSchema(Schema inputSchema) { List<FieldAggregation> fieldAggregations = getFieldAggregations().stream() .map(f -> f.resolve(inputSchema)) .collect(Collectors.toList()); ComposedCombineFn composedCombineFn = null; for (int i = 0; i < fieldAggregations.size(); ++i) { FieldAggregation fieldAggregation = fieldAggregations.get(i); SimpleFunction<Row, ?> extractFunction; Coder extractOutputCoder; if (fieldAggregation.fieldsToAggregate.referencesSingleField()) { extractFunction = new ExtractSingleFieldFunction( inputSchema, fieldAggregation.aggregateBaseValues, fieldAggregation); FieldType fieldType = fieldAggregation.flattenedInputSubSchema.getField(0).getType(); if (fieldAggregation.aggregateBaseValues) { while (fieldType.getTypeName().isLogicalType()) { fieldType = fieldType.getLogicalType().getBaseType(); } } extractOutputCoder = SchemaCoder.coderForFieldType(fieldType); } else { extractFunction = new ExtractFieldsFunction(inputSchema, fieldAggregation); extractOutputCoder = SchemaCoder.of(fieldAggregation.inputSubSchema); } if (i == 0) { composedCombineFn = CombineFns.compose() .with( extractFunction, extractOutputCoder, fieldAggregation.fn, fieldAggregation.combineTag); } else { composedCombineFn = ((ComposedCombineFn) composedCombineFn) .with( extractFunction, extractOutputCoder, fieldAggregation.fn, fieldAggregation.combineTag); } } return toBuilder() .setInputSchema(inputSchema) .setComposedCombineFn(composedCombineFn) .setFieldAggregations(fieldAggregations) .build(); } /** Aggregate all values of a set of fields into an output field. */ <CombineInputT, AccumT, CombineOutputT> Inner aggregateFields( FieldAccessDescriptor fieldsToAggregate, boolean aggregateBaseValues, CombineFn<CombineInputT, AccumT, CombineOutputT> fn, String outputFieldName) { return aggregateFields( fieldsToAggregate, aggregateBaseValues, fn, Field.of(outputFieldName, FieldTypeDescriptors.fieldTypeForJavaType(fn.getOutputType()))); } /** Aggregate all values of a set of fields into an output field. */ <CombineInputT, AccumT, CombineOutputT> Inner aggregateFields( FieldAccessDescriptor fieldsToAggregate, boolean aggregateBaseValues, CombineFn<CombineInputT, AccumT, CombineOutputT> fn, Field outputField) { List<FieldAggregation> fieldAggregations = getFieldAggregations(); TupleTag<Object> combineTag = new TupleTag<>(Integer.toString(fieldAggregations.size())); FieldAggregation fieldAggregation = new FieldAggregation<>( fieldsToAggregate, aggregateBaseValues, outputField, fn, combineTag); fieldAggregations.add(fieldAggregation); return toBuilder() .setOutputSchema(getOutputSchema(fieldAggregations)) .setFieldAggregations(fieldAggregations) .build(); } private Schema getOutputSchema(List<FieldAggregation> fieldAggregations) { Schema.Builder outputSchema = Schema.builder(); for (FieldAggregation aggregation : fieldAggregations) { outputSchema.addField(aggregation.outputField); } return outputSchema.build(); } /** Extract a single field from an input {@link Row}. */ private static class ExtractSingleFieldFunction<OutputT> extends SimpleFunction<Row, OutputT> { private final RowSelector rowSelector; private final boolean extractBaseValue; private final @Nullable RowSelector flatteningSelector; private final FieldAggregation fieldAggregation; private ExtractSingleFieldFunction( Schema inputSchema, boolean extractBaseValue, FieldAggregation fieldAggregation) { rowSelector = new RowSelectorContainer(inputSchema, fieldAggregation.fieldsToAggregate, true); this.extractBaseValue = extractBaseValue; flatteningSelector = fieldAggregation.needsFlattening ? new RowSelectorContainer( fieldAggregation.inputSubSchema, fieldAggregation.flattenedFieldAccessDescriptor, true) : null; this.fieldAggregation = fieldAggregation; } @Override public OutputT apply(Row row) { Row selected = rowSelector.select(row); if (fieldAggregation.needsFlattening) { selected = flatteningSelector.select(selected); } if (extractBaseValue && selected.getSchema().getField(0).getType().getTypeName().isLogicalType()) { return (OutputT) selected.getBaseValue(0, Object.class); } return selected.getValue(0); } } /** Extract multiple fields from an input {@link Row}. */ private static class ExtractFieldsFunction extends SimpleFunction<Row, Row> { private final RowSelector rowSelector; private ExtractFieldsFunction(Schema inputSchema, FieldAggregation fieldAggregation) { rowSelector = new RowSelectorContainer(inputSchema, fieldAggregation.fieldsToAggregate, true); } @Override public Row apply(Row row) { return rowSelector.select(row); } } @Override public Object[] createAccumulator() { return getComposedCombineFn().createAccumulator(); } @Override public Object[] addInput(Object[] accumulator, Row input) { return getComposedCombineFn().addInput(accumulator, input); } @Override public Object[] mergeAccumulators(Iterable<Object[]> accumulator) { return getComposedCombineFn().mergeAccumulators(accumulator); } @Override public Coder<Object[]> getAccumulatorCoder(CoderRegistry registry, Coder<Row> inputCoder) throws CannotProvideCoderException { return getComposedCombineFn().getAccumulatorCoder(registry, inputCoder); } @Override public Coder<Row> getDefaultOutputCoder(CoderRegistry registry, Coder<Row> inputCoder) { return SchemaCoder.of(getOutputSchema()); } @Override public Row extractOutput(Object[] accumulator) { // Build a row containing a field for every aggregate that was registered. CoCombineResult coCombineResult = getComposedCombineFn().extractOutput(accumulator); Row.Builder output = Row.withSchema(getOutputSchema()); for (FieldAggregation fieldAggregation : getFieldAggregations()) { Object aggregate = coCombineResult.get(fieldAggregation.combineTag); output.addValue(aggregate); } return output.build(); } } }
apache/beam
sdks/java/core/src/main/java/org/apache/beam/sdk/schemas/transforms/SchemaAggregateFn.java
213,877
/* * Copyright (C) 2008 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.lego.minddroid; import android.app.Activity; import android.app.AlertDialog; import android.content.SharedPreferences; import java.io.BufferedReader; import java.io.Closeable; import java.io.IOException; import java.io.InputStreamReader; /** * Displays an LAMA ("LEGO Application MINDdroid Agreement") that the user has to accept before * using the application. Your application should call {@link Lama#show(android.app.Activity)} * in the onCreate() method of the first activity. If the user accepts the LAMA, it will never * be shown again. If the user refuses, {@link android.app.Activity#finish()} is invoked * on your activity. */ class Lama { private static final String ASSET_LAMA = "LAMA"; private static final String PREFERENCE_LAMA_ACCEPTED = "lama.accepted"; private static final String PREFERENCES_LAMA = "lama"; /** * callback to let the activity know when the user has accepted the LAMA. */ interface OnLamaAgreedTo { /** * Called when the user has accepted the lama and the dialog closes. */ void onLamaAgreedTo(); } /** * Displays the LAMA if necessary. This method should be called from the onCreate() * method of your main Activity. * * @param activity The Activity to finish if the user rejects the LAMA. * @return Whether the user has agreed already. */ static boolean show(final Activity activity) { final SharedPreferences preferences = activity.getSharedPreferences(PREFERENCES_LAMA, Activity.MODE_PRIVATE); if (!preferences.getBoolean(PREFERENCE_LAMA_ACCEPTED, false)) { final AlertDialog.Builder builder = new AlertDialog.Builder(activity); builder.setTitle(R.string.lama_title); builder.setCancelable(true); builder.setPositiveButton(R.string.lama_accept, (dialog, which) -> { accept(preferences); if (activity instanceof OnLamaAgreedTo) { ((OnLamaAgreedTo) activity).onLamaAgreedTo(); } }); builder.setNegativeButton(R.string.lama_refuse, (dialog, which) -> refuse(activity)); builder.setOnCancelListener(dialog -> refuse(activity)); builder.setMessage(readLama(activity)); builder.create().show(); return false; } return true; } private static void accept(SharedPreferences preferences) { preferences.edit().putBoolean(PREFERENCE_LAMA_ACCEPTED, true).apply(); } private static void refuse(Activity activity) { activity.finish(); } private static CharSequence readLama(Activity activity) { BufferedReader in = null; try { in = new BufferedReader(new InputStreamReader(activity.getAssets().open(ASSET_LAMA))); String line; StringBuilder buffer = new StringBuilder(); while ((line = in.readLine()) != null) buffer.append(line).append('\n'); return buffer; } catch (IOException e) { return ""; } finally { closeStream(in); } } /** * Closes the specified stream. * * @param stream The stream to close. */ private static void closeStream(Closeable stream) { if (stream != null) { try { stream.close(); } catch (IOException e) { // Ignore } } } }
NXT/LEGO-MINDSTORMS-MINDdroid
MindDroid/src/main/java/com/lego/minddroid/Lama.java
213,878
package com.forfan.bigbang.util; import android.content.Context; import com.forfan.bigbang.BigBangApp; import com.umeng.analytics.MobclickAgent; import java.util.HashMap; /** * Created by penglu on 2015/12/17. */ public class UrlCountUtil { //打点的点的定义统一在这里 public static final String CLICK_SETTINGS_OPEN_OCR ="click_settings_open_ocr"; public static final String CLICK_SETTINGS_FEEDBACK ="click_settings_feedback"; public static final String CLICK_SETTINGS_SET_STYLE_BIGBANG ="click_settings_set_style_bigbang"; public static final String CLICK_SETTINGS_OPEN_FROM_OUTSIDE ="click_settings_open_from_outside"; public static final String CLICK_SETTINGS_FLOAT_WHITE_LIST ="click_settings_float_white_list"; public static final String CLICK_SETTINGS_SEARCH_ENGINE ="click_settings_search_engine"; public static final String CLICK_SETTINGS_CHECK_FOR_UPDATE ="click_settings_check_for_update"; public static final String CLICK_SETTINGS_HOW_TO_USE ="click_settings_how_to_use"; public static final String CLICK_SETTINGS_ABOUT ="click_settings_about"; public static final String CLICK_SETTINGS_JOIN_QQ ="click_settings_join_qq"; public static final String CLICK_SETTINGS_ABOUT_DONATE ="click_settings_about_donate"; public static final String CLICK_SETTINGS_PROBLEM = "click_settings_problem"; public static final String CLICK_SETTINGS_SHARE = "click_settings_share"; public static final String CLICK_SETTINGS_WHITELIST = "click_settings_whitelist"; public static final String CLICK_SETTINGS_DOUBLECLICK_SETTING = "click_settings_doubleclick_setting"; public static final String CLICK_SETTINGS_DOUBLECLICK_SETTING_CONFORM = "click_settings_doubleclick_setting_conform"; public static final String CLICK_SETTINGS_DONATE = "click_settings_donate"; public static final String CLICK_DIY_OCR_KEY = "click_diy_ocr_key"; public static final String CLICK_SETTINGS_XPOSED_WHITELIST = "click_settings_xposed_whitelist"; public static final String CLICK_SETTINGS_NOTIFY_FL_WIN = "click_settings_notify_fl_win"; public static final String CLICK_SETTINGS_MOTIFICATION = "click_settings_motification"; public static final String CLICK_SHARE_CARD_LIKE = "click_share_card_like"; public static final String CLICK_SHARE_CARD_SHARE = "click_share_card_share"; public static final String CLICK_SHARE_CARD_CANCEL = "click_share_card_cancel"; public static final String CLICK_SHARE_CARD_DISLIKE = "click_share_card_dislike"; public static final String CLICK_SHARE_CARD_FEEDBACK = "click_share_card_feedback"; public static final String CLICK_INTRO_CARD_GO = "click_intro_card_go"; public static final String CLICK_INTRO_CARD_CANCEL = "click_intro_card_cancel"; public static final String STATUS_ACCESSABILITY = "status_accessability"; public static final String STATUS_CLIPBOARD = "status_clipboard"; public static final String STATUS_SHOW_FLOAT_WINDOW = "status_show_float_window"; public static final String STATUS_SHOW_NOTIFY = "status_show_notify"; public static final String STATUS_TOTAL_SWITCH = "status_total_switch"; public static final String STATUS_PUNCTUATION = "status_punctuation"; public static final String STATUS_USE_BUILTIN_BROWSER= "status_use_built_in_browser"; public static final String STATUS_FLOAT_VIEW_TRIGGER= "status_float_view_trigger"; public static final String STATUS_ONLY_TEXT_MONITOR= "status_only_text_monitor"; public static final String CLICK_OCR_PICK_FROM_GALLERY= "click_ocr_pick_from_gallery"; public static final String CLICK_OCR_TAKEPICTURE= "click_ocr_takepicture"; public static final String CLICK_OCR_FROM_SHARE= "click_ocr_from_share"; public static final String CLICK_OCR_REOCR= "click_ocr_re_ocr"; public static final String CLICK_OCR_TO_BIGBANG_ACTIVITY= "click_ocr_to_bigbang_activity"; public static final String CLICK_TIPVIEW_BACK= "click_tipview_back"; public static final String CLICK_TIPVIEW_SCREEN= "click_tipview_screen"; public static final String CLICK_TIPVIEW_COPY= "click_tipview_copy"; public static final String CLICK_TIPVIEW_SWITCH= "click_tipview_switch"; public static final String CLICK_TIPVIEW_IMAAGEVIEW= "click_tipview_imaageview"; public static final String CLICK_TIPVIEW_SETTING_ACTICITY= "longclick_tipview_setting_acticity"; public static final String CLICK_BROWSER_EXIT= "click_browser_exit"; public static final String CLICK_BROWSER_TO_SYS_BROWSER= "click_browser_to_sys_browser"; public static final String CLICK_SETTINGS_SEARCH_ENGINE_WEB= "click_settings_search_engine_web"; public static final String STATE_BROWSER_ENGINES= "state_browser_engines"; public static final String CLICK_UNIVERSAL_COPY_EXIT_RETUN= "click_universal_copy_exit_retun"; public static final String CLICK_UNIVERSAL_COPY_EXIT= "click_universal_copy_exit"; public static final String CLICK_UNIVERSAL_COPY_EDIT= "click_universal_copy_edit"; public static final String CLICK_UNIVERSAL_COPY_EXIT_FAB= "click_universal_copy_exit_fab"; public static final String CLICK_UNIVERSAL_COPY_EXIT_FULLSCREEN_ACTION= "click_universal_copy_exit_fullscreen_action"; public static final String CLICK_UNIVERSAL_COPY_EXIT_FULLSCREEN_FAB= "click_universal_copy_exit_fullscreen_fab"; public static final String CLICK_UNIVERSAL_COPY_COPY_FAB= "click_universal_copy_copy_fab"; public static final String CLICK_UNIVERSAL_COPY_COPY_ACTION= "click_universal_copy_copy_action"; public static final String CLICK_UNIVERSAL_COPY_SELECT_ALL= "click_universal_copy_select_all"; public static final String CLICK_DONATE_WECHAT_SAVE= "click_donate_wechat_save"; public static final String CLICK_DONATE_ALIPAY_SAVE= "click_donate_alipay_save"; public static final String CLICK_BIGBANG_COPY= "click_bigbang_copy"; public static final String CLICK_BIGBANG_SHARAE= "click_bigbang_sharae"; public static final String CLICK_BIGBANG_TRANSLATE= "click_bigbang_translate"; public static final String CLICK_BIGBANG_DRAG= "click_bigbang_drag"; public static final String CLICK_BIGBANG_SEARCH= "click_bigbang_search"; public static final String CLICK_BIGBANG_SWITCH_TYPE= "click_bigbang_switch_type"; public static final String CLICK_BIGBANG_REMAIN_SYMBOL= "click_bigbang_remain_symbol"; public static final String CLICK_BIGBANG_REMAIN_SECTION= "click_bigbang_remain_section"; public static final String CLICK_BIGBANG_DRAG_SELECTION= "click_bigbang_drag_selection"; public static final String CLICK_PRE_CONFIRM= "click_pre_confirm"; public static final String CLICK_PRE_CONFIRM_IN_DIALOG= "click_pre_confirm_in_dialog"; public static final String CLICK_PRE_CANCEL_IN_DIALOG= "click_pre_cancel_in_dialog"; public static final String PRE__FLOATVIEW = "pre__floatview"; public static final String PRE__NOTIFY = "pre__notify"; public static final String PRE__TRIGGER = "pre__trigger"; public static final String CLICK_CAPTURERESULT_OCR= "click_captureresult_ocr"; public static final String CLICK_CAPTURERESULT_SHARE= "click_captureresult_share"; public static final String CLICK_CAPTURERESULT_SAVE= "click_captureresult_save"; public static final String CLICK_CAPTURERESULT_BIGBANG= "click_captureresult_bigbang"; public static final String CLICK_CAPTURERESULT_OCRRESULT= "click_captureresult_ocrresult"; public static final String CLICK_SHOW_BEYOND_QUOTE= "click_show_beyond_quote"; public static final String CLICK_SET_BB_BGCOLOR_DIY="click_set_bb_bgcolor_diy"; public static final String STATUS_SET_BB_TEXT_SIZE="status_set_bb_text_size"; public static final String STATUS_SET_BB_LINE_MARGIN="status_set_bb_line_margin"; public static final String STATUS_SET_BB_ITEM_MARGIN="status_set_bb_item_margin"; public static final String STATUS_SET_BB_ITEM_PADDING="status_set_bb_item_padding"; public static final String STATUS_SET_BB_ALPHA="status_set_bb_alpha"; public static final String STATUS_SET_BB_BGCOLOR="status_set_bb_bgcolor"; public static final String STATUS_SET_BB_FULL_SCREEN="status_set_bb_full_screen"; public static final String STATUS_SET_BB_STICK_HEAD="status_set_bb_stick_head"; public static final String STATUS_SET_BB_STICK_SHAREBAR="status_set_bb_stick_sharebar"; public static final String STATUS_SET_BB_DEFAULT_LOCAL="status_set_bb_default_local"; public static final String STATUS_SET_BB_ADD_BLANKS="status_set_bb_add_blanks"; public static final String STATUS_SET_BB_BLANKS_IS_SYMBOL="status_set_bb_blanks_is_symbol"; public static final String STATUS_DIY_OCR_KEY="status_diy_ocr_key"; public static final String CLICK_COPY_OCR_URL="click_copy_ocr_url"; public static final String CLICK_SEARCH_ENGINE_ADD="click_search_engine_add"; public static final String CLICK_SEARCH_ENGINE_DEL="click_search_engine_del"; public static final String CLICK_SEARCH_ENGINE_EDIT="click_search_engine_edit"; public static final String STATUS_NOFITY_SWITCH="status_nofity_switch"; public static final String STATUS_NOFITY_CLICK="status_nofity_click"; public static final String STATUS_NOFITY_CLIPBOARD="status_nofity_clipboard"; public static final String CLICK_NOFITY_COPY="click_nofity_copy"; public static final String CLICK_NOFITY_SCREEN="click_nofity_screen"; //whiteListActivity public static final String STATUS_WL_SELECT_ALL="status_wl_select_all"; public static final String CLICK_WL_SELECT_MODE="click_wl_select_mode"; public static final String STATUS_WL_SELECTION="status_wl_selection"; public static final String CLICK_FRAGMENT_SWITCHES="click_fragment_switches"; public static final String STATUS_FLOATVIEW_SET_SIZE="status_floatview_set_size"; public static final String STATUS_FLOATVIEW_SET_ALPHA = "status_floatview_set_alpha"; public static final String STATUS_SET_FLOATVIEW_BGCOLOR="status_set_floatview_bgcolor"; public static final String STATUS_SET_FLOATVIEW_IS_STICK="status_set_floatview_is_stick"; public static final String CLICK_KEY_PRESS_TIPVIEW_SWITCH= "click_key_press_tipview_switch"; public static final String CLICK_KEY_PRESS_TIPVIEW_CLICK= "click_key_press_tipview_click"; public static final String CLICK_KEY_PRESS_TIPVIEW_CLIPBOARD= "click_key_press_tipview_clipboard"; public static final String CLICK_KEY_PRESS_TIPVIEW_COPY= "click_key_press_tipview_copy"; public static final String CLICK_KEY_PRESS_TIPVIEW_SCREEN= "click_key_press_tipview_screen"; public static Context mContext= BigBangApp.getInstance(); /* context指当前的Activity,eventId为当前统计的事件ID。 示例:统计微博应用中"转发"事件发生的次数,那么在转发的函数里调用 MobclickAgent.onEvent(mContext,"Forward"); */ public static void onEvent(String eventId){ MobclickAgent.onEvent(mContext, eventId); } /* map 为当前事件的属性和取值(Key-Value键值对)。 示例:统计电商应用中“购买”事件发生的次数,以及购买的商品类型及数量,那么在购买的函数里调用: HashMap<String,String> map = new HashMap<String,String>(); map.put("type","book"); map.put("quantity","3"); MobclickAgent.onEvent(mContext, "purchase", map); */ public static void onEvent(String eventId, HashMap<String,String> map){ MobclickAgent.onEvent(mContext, eventId, map); } public static void onEvent(String eventId, String value){ MobclickAgent.onEvent(mContext, eventId, value); } public static void onEvent(String eventId, boolean value){ MobclickAgent.onEvent(mContext, eventId, String.valueOf(value)); } /* 统计一个数值类型的连续变量(该变量必须为整数),用户每次触发的数值的分布情况,如事件持续时间、每次付款金额等,可以调用如下方法: MobclickAgent.onEventValue(Context context, String id, Map<String,String> m, int du) id 为事件ID map 为当前事件的属性和取值 du 为当前事件的数值为当前事件的数值,取值范围是-2,147,483,648 到 +2,147,483,647 之间的有符号整数,即int 32类型,如果数据超出了该范围,会造成数据丢包,影响数据统计的准确性。 示例:统计一次音乐播放,包括音乐类型,作者和播放时长,可以在音乐播放结束后这么调用: int duration = 12000; //开发者需要自己计算音乐播放时长   Map<String, String> map_value = new HashMap<String, String>();   map_value.put("type", "popular");   map_value.put("artist", "JJLin"); MobclickAgent.onEventValue(this, "music", map_value, duration); */ public static void onEventValue(String id, HashMap<String,String> map, int value){ MobclickAgent.onEventValue(mContext, id, map, value); } }
penglu20/Bigbang
app/src/main/java/com/forfan/bigbang/util/UrlCountUtil.java
213,879
package io.smartcat.migration.exceptions; /** * Schema agreement exception which wraps exceptions while schema is propagated on all nodes. */ public class SchemaAgreementException extends MigrationException { private static final long serialVersionUID = 4672095868449483293L; /** * Create schema agreement exception with provided message. * @param message Message for this exception */ public SchemaAgreementException(final String message) { super(message); } /** * Create schema agreement exception with provided message and original cause. * @param message Message for this exception * @param throwable Throwable wrapping original cause */ public SchemaAgreementException(final String message, final Throwable throwable) { super(message, throwable); } }
smartcat-labs/cassandra-migration-tool-java
src/main/java/io/smartcat/migration/exceptions/SchemaAgreementException.java
213,880
package gcom.faturamento; import gcom.atendimentopublico.ligacaoagua.LigacaoAguaSituacao; import gcom.cadastro.cliente.EsferaPoder; import gcom.cadastro.imovel.Categoria; import gcom.cadastro.imovel.CategoriaTipo; import gcom.cadastro.imovel.ImovelPerfil; import gcom.cadastro.localidade.GerenciaRegional; import gcom.cadastro.localidade.Localidade; import gcom.cadastro.localidade.Quadra; import gcom.cadastro.localidade.SetorComercial; import gcom.cadastro.localidade.UnidadeNegocio; import gcom.faturamento.consumotarifa.ConsumoTarifa; import java.io.Serializable; import java.math.BigDecimal; import java.util.Date; import org.apache.commons.lang.builder.ToStringBuilder; /** @author Hibernate CodeGenerator */ public class HistogramaAguaLigacao implements Serializable { /** * */ private static final long serialVersionUID = 1L; /** identifier field */ private Integer id; /** persistent field */ private int anoMesReferencia; /** persistent field */ private int codigoSetorComercial; /** persistent field */ private int numeroQuadra; /** persistent field */ private short indicadorLigacaoMista; /** persistent field */ private short indicadorConsumoReal; /** persistent field */ private short indicadorHidrometro; /** persistent field */ private short indicadorPoco; /** persistent field */ private short indicadorVolFixadoagua; /** persistent field */ private int quantidadeConsumo; /** persistent field */ private int quantidadeLigacao; /** persistent field */ private int quantidadeEconomiaLigacao; /** persistent field */ private BigDecimal valorFaturadoLigacao; /** persistent field */ private int volumeFaturadoLigacao; /** persistent field */ private Date ultimaAlteracao; /** persistent field */ private GerenciaRegional gerenciaRegional; /** persistent field */ private Localidade localidade; /** persistent field */ private Localidade localidadeEelo; /** persistent field */ private Quadra quadra; /** persistent field */ private ConsumoTarifa consumoTarifa; /** persistent field */ private ImovelPerfil imovelPerfil; /** persistent field */ private LigacaoAguaSituacao ligacaoAguaSituacao; /** persistent field */ private UnidadeNegocio unidadeNegocio; /** persistent field */ private EsferaPoder esferaPoder; /** persistent field */ private Categoria categoria; /** persistent field */ private CategoriaTipo categoriaTipo; /** persistent field */ private SetorComercial setorComercial; /** full constructor */ public HistogramaAguaLigacao(Integer id, int anoMesReferencia, int codigoSetorComercial, int numeroQuadra, short indicadorLigacaoMista, short indicadorConsumoReal, short indicadorHidrometro, short indicadorPoco, short indicadorVolFixadoagua, int quantidadeConsumo, int quantidadeLigacao, int quantidadeEconomiaLigacao, BigDecimal valorFaturadoLigacao, int volumeFaturadoLigacao, Date ultimaAlteracao, GerenciaRegional gerenciaRegional, Localidade localidade, Localidade localidadeEelo, Quadra quadra, ConsumoTarifa consumoTarifa, ImovelPerfil imovelPerfil, LigacaoAguaSituacao ligacaoAguaSituacao, UnidadeNegocio unidadeNegocio, EsferaPoder esferaPoder, Categoria categoria, CategoriaTipo categoriaTipo, SetorComercial setorComercial) { this.id = id; this.anoMesReferencia = anoMesReferencia; this.codigoSetorComercial = codigoSetorComercial; this.numeroQuadra = numeroQuadra; this.indicadorLigacaoMista = indicadorLigacaoMista; this.indicadorConsumoReal = indicadorConsumoReal; this.indicadorHidrometro = indicadorHidrometro; this.indicadorPoco = indicadorPoco; this.indicadorVolFixadoagua = indicadorVolFixadoagua; this.quantidadeConsumo = quantidadeConsumo; this.quantidadeLigacao = quantidadeLigacao; this.quantidadeEconomiaLigacao = quantidadeEconomiaLigacao; this.valorFaturadoLigacao = valorFaturadoLigacao; this.volumeFaturadoLigacao = volumeFaturadoLigacao; this.ultimaAlteracao = ultimaAlteracao; this.gerenciaRegional = gerenciaRegional; this.localidade = localidade; this.localidadeEelo = localidadeEelo; this.quadra = quadra; this.consumoTarifa = consumoTarifa; this.imovelPerfil = imovelPerfil; this.ligacaoAguaSituacao = ligacaoAguaSituacao; this.unidadeNegocio = unidadeNegocio; this.esferaPoder = esferaPoder; this.categoria = categoria; this.categoriaTipo = categoriaTipo; this.setorComercial = setorComercial; } /** default constructor */ public HistogramaAguaLigacao() { } public Integer getId() { return this.id; } public void setId(Integer id) { this.id = id; } public int getAnoMesReferencia() { return this.anoMesReferencia; } public void setAnoMesReferencia(int anoMesReferencia) { this.anoMesReferencia = anoMesReferencia; } public int getCodigoSetorComercial() { return this.codigoSetorComercial; } public void setCodigoSetorComercial(int codigoSetorComercial) { this.codigoSetorComercial = codigoSetorComercial; } public int getNumeroQuadra() { return this.numeroQuadra; } public void setNumeroQuadra(int numeroQuadra) { this.numeroQuadra = numeroQuadra; } public short getIndicadorLigacaoMista() { return this.indicadorLigacaoMista; } public void setIndicadorLigacaoMista(short indicadorLigacaoMista) { this.indicadorLigacaoMista = indicadorLigacaoMista; } public short getIndicadorConsumoReal() { return this.indicadorConsumoReal; } public void setIndicadorConsumoReal(short indicadorConsumoReal) { this.indicadorConsumoReal = indicadorConsumoReal; } public short getIndicadorHidrometro() { return this.indicadorHidrometro; } public void setIndicadorHidrometro(short indicadorHidrometro) { this.indicadorHidrometro = indicadorHidrometro; } public short getIndicadorPoco() { return this.indicadorPoco; } public void setIndicadorPoco(short indicadorPoco) { this.indicadorPoco = indicadorPoco; } public short getIndicadorVolFixadoagua() { return this.indicadorVolFixadoagua; } public void setIndicadorVolFixadoagua(short indicadorVolFixadoagua) { this.indicadorVolFixadoagua = indicadorVolFixadoagua; } public int getQuantidadeConsumo() { return this.quantidadeConsumo; } public void setQuantidadeConsumo(int quantidadeConsumo) { this.quantidadeConsumo = quantidadeConsumo; } public int getQuantidadeLigacao() { return this.quantidadeLigacao; } public void setQuantidadeLigacao(int quantidadeLigacao) { this.quantidadeLigacao = quantidadeLigacao; } public int getQuantidadeEconomiaLigacao() { return this.quantidadeEconomiaLigacao; } public void setQuantidadeEconomiaLigacao(int quantidadeEconomiaLigacao) { this.quantidadeEconomiaLigacao = quantidadeEconomiaLigacao; } public BigDecimal getValorFaturadoLigacao() { return this.valorFaturadoLigacao; } public void setValorFaturadoLigacao(BigDecimal valorFaturadoLigacao) { this.valorFaturadoLigacao = valorFaturadoLigacao; } public int getVolumeFaturadoLigacao() { return this.volumeFaturadoLigacao; } public void setVolumeFaturadoLigacao(int volumeFaturadoLigacao) { this.volumeFaturadoLigacao = volumeFaturadoLigacao; } public Date getUltimaAlteracao() { return this.ultimaAlteracao; } public void setUltimaAlteracao(Date ultimaAlteracao) { this.ultimaAlteracao = ultimaAlteracao; } public GerenciaRegional getGerenciaRegional() { return this.gerenciaRegional; } public void setGerenciaRegional(GerenciaRegional gerenciaRegional) { this.gerenciaRegional = gerenciaRegional; } public Localidade getLocalidade() { return this.localidade; } public void setLocalidade(Localidade localidade) { this.localidade = localidade; } public Localidade getLocalidadeEelo() { return this.localidadeEelo; } public void setLocalidadeEelo(Localidade localidadeEelo) { this.localidadeEelo = localidadeEelo; } public Quadra getQuadra() { return this.quadra; } public void setQuadra(Quadra quadra) { this.quadra = quadra; } public ConsumoTarifa getConsumoTarifa() { return this.consumoTarifa; } public void setConsumoTarifa(ConsumoTarifa consumoTarifa) { this.consumoTarifa = consumoTarifa; } public ImovelPerfil getImovelPerfil() { return this.imovelPerfil; } public void setImovelPerfil(ImovelPerfil imovelPerfil) { this.imovelPerfil = imovelPerfil; } public LigacaoAguaSituacao getLigacaoAguaSituacao() { return this.ligacaoAguaSituacao; } public void setLigacaoAguaSituacao(LigacaoAguaSituacao ligacaoAguaSituacao) { this.ligacaoAguaSituacao = ligacaoAguaSituacao; } public UnidadeNegocio getUnidadeNegocio() { return this.unidadeNegocio; } public void setUnidadeNegocio(UnidadeNegocio unidadeNegocio) { this.unidadeNegocio = unidadeNegocio; } public EsferaPoder getEsferaPoder() { return this.esferaPoder; } public void setEsferaPoder(EsferaPoder esferaPoder) { this.esferaPoder = esferaPoder; } public Categoria getCategoria() { return this.categoria; } public void setCategoria(Categoria categoria) { this.categoria = categoria; } public CategoriaTipo getCategoriaTipo() { return this.categoriaTipo; } public void setCategoriaTipo(CategoriaTipo categoriaTipo) { this.categoriaTipo = categoriaTipo; } public SetorComercial getSetorComercial() { return this.setorComercial; } public void setSetorComercial(SetorComercial setorComercial) { this.setorComercial = setorComercial; } public String toString() { return new ToStringBuilder(this) .append("id", getId()) .toString(); } }
prodigasistemas/gsan
src/gcom/faturamento/HistogramaAguaLigacao.java
213,881
/** * */ /** * @author MAAG * */ public interface IGetKey<K, V> { K getKeyFromValue(V value); }
malonso-uvg/uvg2023ed10
10_ABB/src/IGetKey.java
213,882
package com.flansmod.common.guns; import java.util.ArrayList; import com.flansmod.common.teams.TeamsManager; import net.minecraft.client.renderer.texture.IIconRegister; import net.minecraft.entity.Entity; import net.minecraft.entity.player.EntityPlayer; import net.minecraft.item.Item; import net.minecraft.item.ItemStack; import net.minecraft.util.IIcon; import net.minecraft.util.MathHelper; import net.minecraft.util.MovingObjectPosition; import net.minecraft.util.MovingObjectPosition.MovingObjectType; import net.minecraft.util.Vec3; import net.minecraft.world.World; import net.minecraftforge.common.util.ForgeDirection; import cpw.mods.fml.common.registry.GameRegistry; import cpw.mods.fml.relauncher.Side; import cpw.mods.fml.relauncher.SideOnly; import com.flansmod.common.FlansMod; import com.flansmod.common.types.IFlanItem; import com.flansmod.common.types.InfoType; public class ItemAAGun extends Item implements IFlanItem { public static final ArrayList<String> names = new ArrayList<String>(); @SideOnly(Side.CLIENT) private ArrayList<IIcon> icons; public AAGunType type; public ItemAAGun(AAGunType type1) { maxStackSize = 1; type = type1; type.item = this; setCreativeTab(FlansMod.tabFlanGuns); GameRegistry.registerItem(this, type.shortName, FlansMod.MODID); } @Override public ItemStack onItemRightClick(ItemStack itemstack, World world, EntityPlayer entityplayer) { if (!(TeamsManager.survivalCanPlaceVehicles || entityplayer.capabilities.isCreativeMode)) { // player isn't allowed to place vehicles. return itemstack; } //Raytracing float cosYaw = MathHelper.cos(-entityplayer.rotationYaw * 0.01745329F - 3.141593F); float sinYaw = MathHelper.sin(-entityplayer.rotationYaw * 0.01745329F - 3.141593F); float cosPitch = -MathHelper.cos(-entityplayer.rotationPitch * 0.01745329F); float sinPitch = MathHelper.sin(-entityplayer.rotationPitch * 0.01745329F); double length = 5D; Vec3 posVec = Vec3.createVectorHelper(entityplayer.posX, entityplayer.posY + 1.62D - entityplayer.yOffset, entityplayer.posZ); Vec3 lookVec = posVec.addVector(sinYaw * cosPitch * length, sinPitch * length, cosYaw * cosPitch * length); MovingObjectPosition movingobjectposition = world.rayTraceBlocks(posVec, lookVec, true); //Result check if (movingobjectposition == null) { return itemstack; } if (movingobjectposition.typeOfHit == MovingObjectType.BLOCK) { int i = movingobjectposition.blockX; int j = movingobjectposition.blockY; int k = movingobjectposition.blockZ; if (!world.isRemote && world.isSideSolid(i, j, k, ForgeDirection.UP)) { EntityAAGun aaGun = new EntityAAGun(world, type, (double) i + 0.5F, (double) j + 1F, (double) k + 0.5F, entityplayer); if (!world.isRemote) { FlansMod.log("Player %s placed AA Gun %s (%d) at (%d, %d, %d)", entityplayer.getDisplayName(), type.shortName, aaGun.getEntityId(), i, j, k); } world.spawnEntityInWorld(aaGun); } if (!entityplayer.capabilities.isCreativeMode) { itemstack.stackSize--; } } return itemstack; } public Entity spawnAAGun(World world, double x, double y, double z, ItemStack stack) { Entity entity = new EntityAAGun(world, type, x, y, z, null); if(!world.isRemote) { world.spawnEntityInWorld(entity); } return entity; } @Override @SideOnly(Side.CLIENT) public int getColorFromItemStack(ItemStack par1ItemStack, int par2) { return type.colour; } @Override @SideOnly(Side.CLIENT) public void registerIcons(IIconRegister icon) { itemIcon = icon.registerIcon("FlansMod:" + type.iconPath); } @Override public InfoType getInfoType() { return type; } }
Unknown025/Flans-Mod-Plus
src/main/java/com/flansmod/common/guns/ItemAAGun.java
213,884
/* Copyright 2008 - 2021 Hochschule Offenburg * For a list of authors see README.md * This software of HSOAutonomy is released under GPL-3 License (see gpl.txt). */ package kdo.ebnDevKit.rcMagma; import java.util.HashMap; import java.util.Map; import kdo.ebn.ExtendedBehaviorNetwork; import kdo.ebn.IEBNAction; import kdo.ebn.IEBNPerception; import kdo.ebn.IResourceBelief; import kdo.ebnDevKit.agent.IEbnAgent; import kdo.ebnDevKit.agent.util.AgentNameHelper; /** * Agent for the magma robocup agents. This integration would need to be placed * into the RoboCup code. * @author Thomas Rinklin */ public class RCMagmaAgent implements IEbnAgent { private final String name; @SuppressWarnings("unused") private ExtendedBehaviorNetwork ebn; // private final AgentRuntime rcClient; private Map<String, IEBNPerception> beliefs; private Map<String, IEBNAction> behaviors; public RCMagmaAgent(AgentNameHelper nameHelper) { name = nameHelper.getName("RCMagma"); // ComponentFactory cf = new NAOComponentFactory() { // @Override // public IDecisionMaker createDecisionMaker( // Map<String, IEBNAction> behaviors, IThoughtModel thoughtModel, // int playerNumber, int serverVersion, String decisionMakerName, // ParameterMap learningParam) // { // RCMagmaAgent.this.beliefs = beliefs; // RCMagmaAgent.this.behaviors = behaviors; // return new EBNProxy(); // } // }; // // PlayerParameters pp = new PlayerParameters( // IMagmaConstants.DEFAULT_TEAMNAME, (byte) 0, 0, // IServerConnection.SERVER_IP, IServerConnection.SERVER_PORT, null, // Level.FINE, IServerConfigFilesConstants.DEFAULT_SERVER_VERSION, cf, // IMagmaConstants.DEFAULT_DECISION_MAKER); // // rcClient = new AgentRuntime(pp); } @Override public void connectEbn(ExtendedBehaviorNetwork ebn) { this.ebn = ebn; } @Override public Map<String, IEBNAction> getBehaviors() { return behaviors; } @Override public Map<String, IEBNPerception> getBeliefs() { return beliefs; } @Override public String getName() { return name; } @Override public Map<String, IResourceBelief> getResources() { return new HashMap<String, IResourceBelief>(); } @Override public AgentStatus getStatus() { // if (rcClient.isConnected()) { // return AgentStatus.Running; // } return AgentStatus.Both; } @Override public boolean isStartable() { return true; } @Override public void start() { Thread t = new Thread(new Runnable() { @Override public void run() { // rcClient.startClient(); } }); t.start(); } @Override public void stop() { // rcClient.stopClient(); } // private class EBNProxy implements IDecisionMaker // { // // @Override // public boolean decide() // { // System.out.println("decide"); // // if (ebn != null) { // return ebn.decide(); // } // return false; // } // // @Override // public IEBNAction getBehavior(String name) // { // return behaviors.get(name); // } // // public IEBNPerception getBelief(String name) // { // return beliefs.get(name); // } // // @Override // public IEBNAction getCurrentBehavior() // { // return ebn.getCurrentBehavior(); // } // // @Override // public IEBNAction getDesiredBehavior() // { // return null; // } // } }
magmaOffenburg/magmaRelease
kdolib/src/main/java/kdo/ebnDevKit/rcMagma/RCMagmaAgent.java
213,885
404: Not Found
LinuxForHealth/FHIR
fhir-persistence-cassandra/src/main/java/org/linuxforhealth/fhir/persistence/cassandra/cql/CqlSessionWrapper.java
213,886
package net.stargraph.rest; /*- * ==========================License-Start============================= * stargraph-model * -------------------------------------------------------------------- * Copyright (C) 2017 Lambda^3 * -------------------------------------------------------------------- * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * ==========================License-End=============================== */ import net.stargraph.query.InteractionMode; import java.util.List; import java.util.Map; import java.util.Objects; public final class SchemaAgnosticUserResponse extends UserResponse { private String sparqlQuery; private List<EntityEntry> answers; private Map<String, List<EntityEntry>> mappings; public SchemaAgnosticUserResponse(String query, InteractionMode interactionMode, String sparqlQuery) { super(query, interactionMode); this.sparqlQuery = Objects.requireNonNull(sparqlQuery); } public void setAnswers(List<EntityEntry> entries) { this.answers = Objects.requireNonNull(entries); } public void setMappings(Map<String, List<EntityEntry>> mappings) { this.mappings = Objects.requireNonNull(mappings); } public String getSparqlQuery() { return sparqlQuery; } public List<EntityEntry> getAnswers() { return answers; } public Map<String, List<EntityEntry>> getMappings() { return mappings; } }
Lambda-3/Stargraph
stargraph-model/src/main/java/net/stargraph/rest/SchemaAgnosticUserResponse.java
213,887
/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ package org.apache.cassandra.cql; import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.charset.CharacterCodingException; import java.util.*; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeoutException; import org.antlr.runtime.*; import org.apache.cassandra.auth.Permission; import org.apache.cassandra.cli.CliUtils; import org.apache.cassandra.concurrent.Stage; import org.apache.cassandra.concurrent.StageManager; import org.apache.cassandra.config.*; import org.apache.cassandra.db.*; import org.apache.cassandra.db.CounterColumn; import org.apache.cassandra.db.filter.QueryPath; import org.apache.cassandra.db.marshal.AbstractType; import org.apache.cassandra.db.marshal.AsciiType; import org.apache.cassandra.db.marshal.MarshalException; import org.apache.cassandra.db.marshal.TypeParser; import org.apache.cassandra.db.migration.*; import org.apache.cassandra.dht.AbstractBounds; import org.apache.cassandra.dht.Bounds; import org.apache.cassandra.dht.IPartitioner; import org.apache.cassandra.dht.RandomPartitioner; import org.apache.cassandra.service.ClientState; import org.apache.cassandra.service.StorageProxy; import org.apache.cassandra.service.StorageService; import org.apache.cassandra.thrift.*; import org.apache.cassandra.thrift.Column; import org.apache.cassandra.utils.ByteBufferUtil; import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Predicates; import com.google.common.collect.Maps; public class QueryProcessor { public static final String CQL_VERSION = "2.0.0"; private static final Logger logger = LoggerFactory.getLogger(QueryProcessor.class); private static final long timeLimitForSchemaAgreement = 10 * 1000; public static final String DEFAULT_KEY_NAME = bufferToString(CFMetaData.DEFAULT_KEY_NAME); private static List<org.apache.cassandra.db.Row> getSlice(CFMetaData metadata, SelectStatement select, List<String> variables) throws InvalidRequestException, TimedOutException, UnavailableException { QueryPath queryPath = new QueryPath(select.getColumnFamily()); List<ReadCommand> commands = new ArrayList<ReadCommand>(); // ...of a list of column names if (!select.isColumnRange()) { Collection<ByteBuffer> columnNames = getColumnNames(select, metadata, variables); validateColumnNames(columnNames); for (Term rawKey: select.getKeys()) { ByteBuffer key = rawKey.getByteBuffer(metadata.getKeyValidator(),variables); validateKey(key); commands.add(new SliceByNamesReadCommand(metadata.ksName, key, queryPath, columnNames)); } } // ...a range (slice) of column names else { AbstractType<?> comparator = select.getComparator(metadata.ksName); ByteBuffer start = select.getColumnStart().getByteBuffer(comparator,variables); ByteBuffer finish = select.getColumnFinish().getByteBuffer(comparator,variables); for (Term rawKey : select.getKeys()) { ByteBuffer key = rawKey.getByteBuffer(metadata.getKeyValidator(),variables); validateKey(key); validateSliceRange(metadata, start, finish, select.isColumnsReversed()); commands.add(new SliceFromReadCommand(metadata.ksName, key, queryPath, start, finish, select.isColumnsReversed(), select.getColumnsLimit())); } } try { return StorageProxy.read(commands, select.getConsistencyLevel()); } catch (TimeoutException e) { throw new TimedOutException(); } catch (IOException e) { throw new RuntimeException(e); } } private static List<ByteBuffer> getColumnNames(SelectStatement select, CFMetaData metadata, List<String> variables) throws InvalidRequestException { String keyString = getKeyString(metadata); List<ByteBuffer> columnNames = new ArrayList<ByteBuffer>(); for (Term column : select.getColumnNames()) { // skip the key for the slice op; we'll add it to the resultset in extractThriftColumns if (!column.getText().equalsIgnoreCase(keyString)) columnNames.add(column.getByteBuffer(metadata.comparator,variables)); } return columnNames; } private static List<org.apache.cassandra.db.Row> multiRangeSlice(CFMetaData metadata, SelectStatement select, List<String> variables) throws TimedOutException, UnavailableException, InvalidRequestException { List<org.apache.cassandra.db.Row> rows; IPartitioner<?> p = StorageService.getPartitioner(); AbstractType<?> keyType = Schema.instance.getCFMetaData(metadata.ksName, select.getColumnFamily()).getKeyValidator(); ByteBuffer startKeyBytes = (select.getKeyStart() != null) ? select.getKeyStart().getByteBuffer(keyType,variables) : null; ByteBuffer finishKeyBytes = (select.getKeyFinish() != null) ? select.getKeyFinish().getByteBuffer(keyType,variables) : null; RowPosition startKey = RowPosition.forKey(startKeyBytes, p), finishKey = RowPosition.forKey(finishKeyBytes, p); if (startKey.compareTo(finishKey) > 0 && !finishKey.isMinimum(p)) { if (p instanceof RandomPartitioner) throw new InvalidRequestException("Start key sorts after end key. This is not allowed; you probably should not specify end key at all, under RandomPartitioner"); else throw new InvalidRequestException("Start key must sort before (or equal to) finish key in your partitioner!"); } AbstractBounds<RowPosition> bounds = new Bounds<RowPosition>(startKey, finishKey); // XXX: Our use of Thrift structs internally makes me Sad. :( SlicePredicate thriftSlicePredicate = slicePredicateFromSelect(select, metadata, variables); validateSlicePredicate(metadata, thriftSlicePredicate); List<IndexExpression> expressions = new ArrayList<IndexExpression>(); for (Relation columnRelation : select.getColumnRelations()) { // Left and right side of relational expression encoded according to comparator/validator. ByteBuffer entity = columnRelation.getEntity().getByteBuffer(metadata.comparator, variables); ByteBuffer value = columnRelation.getValue().getByteBuffer(select.getValueValidator(metadata.ksName, entity), variables); expressions.add(new IndexExpression(entity, IndexOperator.valueOf(columnRelation.operator().toString()), value)); } int limit = select.isKeyRange() && select.getKeyStart() != null ? select.getNumRecords() + 1 : select.getNumRecords(); try { rows = StorageProxy.getRangeSlice(new RangeSliceCommand(metadata.ksName, select.getColumnFamily(), null, thriftSlicePredicate, bounds, expressions, limit), select.getConsistencyLevel()); } catch (IOException e) { throw new RuntimeException(e); } catch (org.apache.cassandra.thrift.UnavailableException e) { throw new UnavailableException(); } catch (TimeoutException e) { throw new TimedOutException(); } // if start key was set and relation was "greater than" if (select.getKeyStart() != null && !select.includeStartKey() && !rows.isEmpty()) { if (rows.get(0).key.key.equals(startKeyBytes)) rows.remove(0); } // if finish key was set and relation was "less than" if (select.getKeyFinish() != null && !select.includeFinishKey() && !rows.isEmpty()) { int lastIndex = rows.size() - 1; if (rows.get(lastIndex).key.key.equals(finishKeyBytes)) rows.remove(lastIndex); } return rows.subList(0, select.getNumRecords() < rows.size() ? select.getNumRecords() : rows.size()); } private static void batchUpdate(ClientState clientState, List<UpdateStatement> updateStatements, ConsistencyLevel consistency, List<String> variables ) throws InvalidRequestException, UnavailableException, TimedOutException { String globalKeyspace = clientState.getKeyspace(); List<IMutation> rowMutations = new ArrayList<IMutation>(); List<String> cfamsSeen = new ArrayList<String>(); for (UpdateStatement update : updateStatements) { String keyspace = update.keyspace == null ? globalKeyspace : update.keyspace; // Avoid unnecessary authorizations. if (!(cfamsSeen.contains(update.getColumnFamily()))) { clientState.hasColumnFamilyAccess(keyspace, update.getColumnFamily(), Permission.WRITE); cfamsSeen.add(update.getColumnFamily()); } rowMutations.addAll(update.prepareRowMutations(keyspace, clientState, variables)); } try { StorageProxy.mutate(rowMutations, consistency); } catch (org.apache.cassandra.thrift.UnavailableException e) { throw new UnavailableException(); } catch (TimeoutException e) { throw new TimedOutException(); } } private static SlicePredicate slicePredicateFromSelect(SelectStatement select, CFMetaData metadata, List<String> variables) throws InvalidRequestException { SlicePredicate thriftSlicePredicate = new SlicePredicate(); if (select.isColumnRange() || select.getColumnNames().size() == 0) { SliceRange sliceRange = new SliceRange(); sliceRange.start = select.getColumnStart().getByteBuffer(metadata.comparator, variables); sliceRange.finish = select.getColumnFinish().getByteBuffer(metadata.comparator, variables); sliceRange.reversed = select.isColumnsReversed(); sliceRange.count = select.getColumnsLimit(); thriftSlicePredicate.slice_range = sliceRange; } else { thriftSlicePredicate.column_names = getColumnNames(select, metadata, variables); } return thriftSlicePredicate; } /* Test for SELECT-specific taboos */ private static void validateSelect(String keyspace, SelectStatement select, List<String> variables) throws InvalidRequestException { ThriftValidation.validateConsistencyLevel(keyspace, select.getConsistencyLevel(), RequestType.READ); // Finish key w/o start key (KEY < foo) if (!select.isKeyRange() && (select.getKeyFinish() != null)) throw new InvalidRequestException("Key range clauses must include a start key (i.e. KEY > term)"); // Key range and by-key(s) combined (KEY > foo AND KEY = bar) if (select.isKeyRange() && select.getKeys().size() > 0) throw new InvalidRequestException("You cannot combine key range and by-key clauses in a SELECT"); // Start and finish keys, *and* column relations (KEY > foo AND KEY < bar and name1 = value1). if (select.isKeyRange() && (select.getKeyFinish() != null) && (select.getColumnRelations().size() > 0)) throw new InvalidRequestException("You cannot combine key range and by-column clauses in a SELECT"); // Can't use more than one KEY = if (!select.isMultiKey() && select.getKeys().size() > 1) throw new InvalidRequestException("You cannot use more than one KEY = in a SELECT"); if (select.getColumnRelations().size() > 0) { AbstractType<?> comparator = select.getComparator(keyspace); Set<ByteBuffer> indexed = Table.open(keyspace).getColumnFamilyStore(select.getColumnFamily()).indexManager.getIndexedColumns(); for (Relation relation : select.getColumnRelations()) { if ((relation.operator() == RelationType.EQ) && indexed.contains(relation.getEntity().getByteBuffer(comparator, variables))) return; } throw new InvalidRequestException("No indexed columns present in by-columns clause with \"equals\" operator"); } } // Copypasta from o.a.c.thrift.CassandraDaemon private static void applyMigrationOnStage(final Migration m) throws SchemaDisagreementException, InvalidRequestException { Future<?> f = StageManager.getStage(Stage.MIGRATION).submit(new Callable<Object>() { @Override public Object call() throws Exception { m.apply(); m.announce(); return null; } }); try { f.get(); } catch (InterruptedException e) { throw new RuntimeException(e); } catch (ExecutionException e) { // this means call() threw an exception. deal with it directly. if (e.getCause() != null) { InvalidRequestException ex = new InvalidRequestException(e.getCause().getMessage()); ex.initCause(e.getCause()); throw ex; } else { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } } validateSchemaIsSettled(); } public static void validateKey(ByteBuffer key) throws InvalidRequestException { if (key == null || key.remaining() == 0) { throw new InvalidRequestException("Key may not be empty"); } // check that key can be handled by FBUtilities.writeShortByteArray if (key.remaining() > FBUtilities.MAX_UNSIGNED_SHORT) { throw new InvalidRequestException("Key length of " + key.remaining() + " is longer than maximum of " + FBUtilities.MAX_UNSIGNED_SHORT); } } public static void validateKeyAlias(CFMetaData cfm, String key) throws InvalidRequestException { assert key.toUpperCase().equals(key); // should always be uppercased by caller String realKeyAlias = bufferToString(cfm.getKeyName()).toUpperCase(); if (!realKeyAlias.equals(key)) throw new InvalidRequestException(String.format("Expected key '%s' to be present in WHERE clause for '%s'", realKeyAlias, cfm.cfName)); } private static void validateColumnNames(Iterable<ByteBuffer> columns) throws InvalidRequestException { for (ByteBuffer name : columns) { if (name.remaining() > IColumn.MAX_NAME_LENGTH) throw new InvalidRequestException(String.format("column name is too long (%s > %s)", name.remaining(), IColumn.MAX_NAME_LENGTH)); if (name.remaining() == 0) throw new InvalidRequestException("zero-length column name"); } } public static void validateColumnName(ByteBuffer column) throws InvalidRequestException { validateColumnNames(Arrays.asList(column)); } public static void validateColumn(CFMetaData metadata, ByteBuffer name, ByteBuffer value) throws InvalidRequestException { validateColumnName(name); AbstractType<?> validator = metadata.getValueValidator(name); try { if (validator != null) validator.validate(value); } catch (MarshalException me) { throw new InvalidRequestException(String.format("Invalid column value for column (name=%s); %s", ByteBufferUtil.bytesToHex(name), me.getMessage())); } } private static void validateSlicePredicate(CFMetaData metadata, SlicePredicate predicate) throws InvalidRequestException { if (predicate.slice_range != null) validateSliceRange(metadata, predicate.slice_range); else validateColumnNames(predicate.column_names); } private static void validateSliceRange(CFMetaData metadata, SliceRange range) throws InvalidRequestException { validateSliceRange(metadata, range.start, range.finish, range.reversed); } private static void validateSliceRange(CFMetaData metadata, ByteBuffer start, ByteBuffer finish, boolean reversed) throws InvalidRequestException { AbstractType<?> comparator = metadata.getComparatorFor(null); Comparator<ByteBuffer> orderedComparator = reversed ? comparator.reverseComparator: comparator; if (start.remaining() > 0 && finish.remaining() > 0 && orderedComparator.compare(start, finish) > 0) throw new InvalidRequestException("range finish must come after start in traversal order"); } // Copypasta from CassandraServer (where it is private). private static void validateSchemaAgreement() throws SchemaDisagreementException { if (describeSchemaVersions().size() > 1) throw new SchemaDisagreementException(); } private static Map<String, List<String>> describeSchemaVersions() { // unreachable hosts don't count towards disagreement return Maps.filterKeys(StorageProxy.describeSchemaVersions(), Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE))); } public static CqlResult processStatement(CQLStatement statement,ClientState clientState, List<String> variables ) throws UnavailableException, InvalidRequestException, TimedOutException, SchemaDisagreementException { String keyspace = null; // Some statements won't have (or don't need) a keyspace (think USE, or CREATE). if (statement.type != StatementType.SELECT && StatementType.requiresKeyspace.contains(statement.type)) keyspace = clientState.getKeyspace(); CqlResult result = new CqlResult(); if (logger.isDebugEnabled()) logger.debug("CQL statement type: {}", statement.type.toString()); CFMetaData metadata; switch (statement.type) { case SELECT: SelectStatement select = (SelectStatement)statement.statement; final String oldKeyspace = clientState.getRawKeyspace(); if (select.isSetKeyspace()) { keyspace = CliUtils.unescapeSQLString(select.getKeyspace()); ThriftValidation.validateTable(keyspace); } else if (oldKeyspace == null) throw new InvalidRequestException("no keyspace has been specified"); else keyspace = oldKeyspace; clientState.hasColumnFamilyAccess(keyspace, select.getColumnFamily(), Permission.READ); metadata = validateColumnFamily(keyspace, select.getColumnFamily()); // need to do this in here because we need a CFMD.getKeyName() select.extractKeyAliasFromColumns(metadata); if (select.getKeys().size() > 0) validateKeyAlias(metadata, select.getKeyAlias()); validateSelect(keyspace, select, variables); List<org.apache.cassandra.db.Row> rows; // By-key if (!select.isKeyRange() && (select.getKeys().size() > 0)) { rows = getSlice(metadata, select, variables); } else { rows = multiRangeSlice(metadata, select, variables); } // count resultset is a single column named "count" result.type = CqlResultType.ROWS; if (select.isCountOperation()) { validateCountOperation(select); ByteBuffer countBytes = ByteBufferUtil.bytes("count"); result.schema = new CqlMetadata(Collections.<ByteBuffer, String>emptyMap(), Collections.<ByteBuffer, String>emptyMap(), "AsciiType", "LongType"); List<Column> columns = Collections.singletonList(new Column(countBytes).setValue(ByteBufferUtil.bytes((long) rows.size()))); result.rows = Collections.singletonList(new CqlRow(countBytes, columns)); return result; } // otherwise create resultset from query results result.schema = new CqlMetadata(new HashMap<ByteBuffer, String>(), new HashMap<ByteBuffer, String>(), TypeParser.getShortName(metadata.comparator), TypeParser.getShortName(metadata.getDefaultValidator())); List<CqlRow> cqlRows = new ArrayList<CqlRow>(); for (org.apache.cassandra.db.Row row : rows) { List<Column> thriftColumns = new ArrayList<Column>(); if (select.isColumnRange()) { if (select.isFullWildcard()) { // prepend key thriftColumns.add(new Column(metadata.getKeyName()).setValue(row.key.key).setTimestamp(-1)); result.schema.name_types.put(metadata.getKeyName(), TypeParser.getShortName(AsciiType.instance)); result.schema.value_types.put(metadata.getKeyName(), TypeParser.getShortName(metadata.getKeyValidator())); } // preserve comparator order if (row.cf != null) { for (IColumn c : row.cf.getSortedColumns()) { if (c.isMarkedForDelete()) continue; ColumnDefinition cd = metadata.getColumnDefinition(c.name()); if (cd != null) result.schema.value_types.put(c.name(), TypeParser.getShortName(cd.getValidator())); thriftColumns.add(thriftify(c)); } } } else { String keyString = getKeyString(metadata); // order columns in the order they were asked for for (Term term : select.getColumnNames()) { if (term.getText().equalsIgnoreCase(keyString)) { // preserve case of key as it was requested ByteBuffer requestedKey = ByteBufferUtil.bytes(term.getText()); thriftColumns.add(new Column(requestedKey).setValue(row.key.key).setTimestamp(-1)); result.schema.name_types.put(requestedKey, TypeParser.getShortName(AsciiType.instance)); result.schema.value_types.put(requestedKey, TypeParser.getShortName(metadata.getKeyValidator())); continue; } if (row.cf == null) continue; ByteBuffer name; try { name = term.getByteBuffer(metadata.comparator, variables); } catch (InvalidRequestException e) { throw new AssertionError(e); } ColumnDefinition cd = metadata.getColumnDefinition(name); if (cd != null) result.schema.value_types.put(name, TypeParser.getShortName(cd.getValidator())); IColumn c = row.cf.getColumn(name); if (c == null || c.isMarkedForDelete()) thriftColumns.add(new Column().setName(name)); else thriftColumns.add(thriftify(c)); } } // Create a new row, add the columns to it, and then add it to the list of rows CqlRow cqlRow = new CqlRow(); cqlRow.key = row.key.key; cqlRow.columns = thriftColumns; if (select.isColumnsReversed()) Collections.reverse(cqlRow.columns); cqlRows.add(cqlRow); } result.rows = cqlRows; return result; case INSERT: // insert uses UpdateStatement case UPDATE: UpdateStatement update = (UpdateStatement)statement.statement; ThriftValidation.validateConsistencyLevel(keyspace, update.getConsistencyLevel(), RequestType.WRITE); batchUpdate(clientState, Collections.singletonList(update), update.getConsistencyLevel(),variables); result.type = CqlResultType.VOID; return result; case BATCH: BatchStatement batch = (BatchStatement) statement.statement; ThriftValidation.validateConsistencyLevel(keyspace, batch.getConsistencyLevel(), RequestType.WRITE); if (batch.getTimeToLive() != 0) throw new InvalidRequestException("Global TTL on the BATCH statement is not supported."); for (AbstractModification up : batch.getStatements()) { if (up.isSetConsistencyLevel()) throw new InvalidRequestException( "Consistency level must be set on the BATCH, not individual statements"); if (batch.isSetTimestamp() && up.isSetTimestamp()) throw new InvalidRequestException( "Timestamp must be set either on BATCH or individual statements"); } try { StorageProxy.mutate(batch.getMutations(keyspace, clientState, variables), batch.getConsistencyLevel()); } catch (org.apache.cassandra.thrift.UnavailableException e) { throw new UnavailableException(); } catch (TimeoutException e) { throw new TimedOutException(); } result.type = CqlResultType.VOID; return result; case USE: clientState.setKeyspace(CliUtils.unescapeSQLString((String) statement.statement)); result.type = CqlResultType.VOID; return result; case TRUNCATE: Pair<String, String> columnFamily = (Pair<String, String>)statement.statement; keyspace = columnFamily.left == null ? clientState.getKeyspace() : columnFamily.left; validateColumnFamily(keyspace, columnFamily.right); clientState.hasColumnFamilyAccess(keyspace, columnFamily.right, Permission.WRITE); try { StorageProxy.truncateBlocking(keyspace, columnFamily.right); } catch (TimeoutException e) { throw (UnavailableException) new UnavailableException().initCause(e); } catch (IOException e) { throw (UnavailableException) new UnavailableException().initCause(e); } result.type = CqlResultType.VOID; return result; case DELETE: DeleteStatement delete = (DeleteStatement)statement.statement; keyspace = delete.keyspace == null ? clientState.getKeyspace() : delete.keyspace; try { StorageProxy.mutate(delete.prepareRowMutations(keyspace, clientState, variables), delete.getConsistencyLevel()); } catch (TimeoutException e) { throw new TimedOutException(); } result.type = CqlResultType.VOID; return result; case CREATE_KEYSPACE: CreateKeyspaceStatement create = (CreateKeyspaceStatement)statement.statement; create.validate(); clientState.hasKeyspaceSchemaAccess(Permission.WRITE); validateSchemaAgreement(); try { KsDef ksd = new KsDef(create.getName(), create.getStrategyClass(), Collections.<CfDef>emptyList()) .setStrategy_options(create.getStrategyOptions()); ThriftValidation.validateKsDef(ksd); ThriftValidation.validateKeyspaceNotYetExisting(create.getName()); applyMigrationOnStage(new AddKeyspace(KSMetaData.fromThrift(ksd))); } catch (ConfigurationException e) { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } catch (IOException e) { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } result.type = CqlResultType.VOID; return result; case CREATE_COLUMNFAMILY: CreateColumnFamilyStatement createCf = (CreateColumnFamilyStatement)statement.statement; clientState.hasColumnFamilySchemaAccess(Permission.WRITE); validateSchemaAgreement(); CFMetaData cfmd = createCf.getCFMetaData(keyspace,variables); ThriftValidation.validateCfDef(cfmd.toThrift(), null); try { applyMigrationOnStage(new AddColumnFamily(cfmd)); } catch (ConfigurationException e) { InvalidRequestException ex = new InvalidRequestException(e.toString()); ex.initCause(e); throw ex; } catch (IOException e) { InvalidRequestException ex = new InvalidRequestException(e.toString()); ex.initCause(e); throw ex; } result.type = CqlResultType.VOID; return result; case CREATE_INDEX: CreateIndexStatement createIdx = (CreateIndexStatement)statement.statement; clientState.hasColumnFamilySchemaAccess(Permission.WRITE); validateSchemaAgreement(); CFMetaData oldCfm = Schema.instance.getCFMetaData(keyspace, createIdx.getColumnFamily()); if (oldCfm == null) throw new InvalidRequestException("No such column family: " + createIdx.getColumnFamily()); boolean columnExists = false; ByteBuffer columnName = createIdx.getColumnName().getByteBuffer(); // mutating oldCfm directly would be bad, but mutating a Thrift copy is fine. This also // sets us up to use validateCfDef to check for index name collisions. CfDef cf_def = oldCfm.toThrift(); for (ColumnDef cd : cf_def.column_metadata) { if (cd.name.equals(columnName)) { if (cd.index_type != null) throw new InvalidRequestException("Index already exists"); if (logger.isDebugEnabled()) logger.debug("Updating column {} definition for index {}", oldCfm.comparator.getString(columnName), createIdx.getIndexName()); cd.setIndex_type(IndexType.KEYS); cd.setIndex_name(createIdx.getIndexName()); columnExists = true; break; } } if (!columnExists) throw new InvalidRequestException("No column definition found for column " + oldCfm.comparator.getString(columnName)); CFMetaData.addDefaultIndexNames(cf_def); ThriftValidation.validateCfDef(cf_def, oldCfm); try { org.apache.cassandra.db.migration.avro.CfDef result1; try { result1 = CFMetaData.fromThrift(cf_def).toAvro(); } catch (Exception e) { throw new RuntimeException(e); } applyMigrationOnStage(new UpdateColumnFamily(result1)); } catch (ConfigurationException e) { InvalidRequestException ex = new InvalidRequestException(e.toString()); ex.initCause(e); throw ex; } catch (IOException e) { InvalidRequestException ex = new InvalidRequestException(e.toString()); ex.initCause(e); throw ex; } result.type = CqlResultType.VOID; return result; case DROP_INDEX: DropIndexStatement dropIdx = (DropIndexStatement)statement.statement; clientState.hasColumnFamilySchemaAccess(Permission.WRITE); validateSchemaAgreement(); try { applyMigrationOnStage(dropIdx.generateMutation(clientState.getKeyspace())); } catch (ConfigurationException e) { InvalidRequestException ex = new InvalidRequestException(e.toString()); ex.initCause(e); throw ex; } catch (IOException e) { InvalidRequestException ex = new InvalidRequestException(e.toString()); ex.initCause(e); throw ex; } result.type = CqlResultType.VOID; return result; case DROP_KEYSPACE: String deleteKeyspace = (String)statement.statement; clientState.hasKeyspaceSchemaAccess(Permission.WRITE); validateSchemaAgreement(); try { applyMigrationOnStage(new DropKeyspace(deleteKeyspace)); } catch (ConfigurationException e) { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } catch (IOException e) { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } result.type = CqlResultType.VOID; return result; case DROP_COLUMNFAMILY: String deleteColumnFamily = (String)statement.statement; clientState.hasColumnFamilySchemaAccess(Permission.WRITE); validateSchemaAgreement(); try { applyMigrationOnStage(new DropColumnFamily(keyspace, deleteColumnFamily)); } catch (ConfigurationException e) { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } catch (IOException e) { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } result.type = CqlResultType.VOID; return result; case ALTER_TABLE: AlterTableStatement alterTable = (AlterTableStatement) statement.statement; validateColumnFamily(keyspace, alterTable.columnFamily); clientState.hasColumnFamilyAccess(alterTable.columnFamily, Permission.WRITE); validateSchemaAgreement(); try { applyMigrationOnStage(new UpdateColumnFamily(alterTable.getCfDef(keyspace))); } catch (ConfigurationException e) { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } catch (IOException e) { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } result.type = CqlResultType.VOID; return result; } return null; // We should never get here. } public static CqlResult process(String queryString, ClientState clientState) throws RecognitionException, UnavailableException, InvalidRequestException, TimedOutException, SchemaDisagreementException { logger.trace("CQL QUERY: {}", queryString); return processStatement(getStatement(queryString), clientState, new ArrayList<String>()); } public static CqlPreparedResult prepare(String queryString, ClientState clientState) throws RecognitionException, InvalidRequestException { logger.trace("CQL QUERY: {}", queryString); CQLStatement statement = getStatement(queryString); int statementId = makeStatementId(queryString); logger.trace("Discovered "+ statement.boundTerms + " bound variables."); clientState.getPrepared().put(statementId, statement); logger.trace(String.format("Stored prepared statement #%d with %d bind markers", statementId, statement.boundTerms)); return new CqlPreparedResult(statementId, statement.boundTerms); } public static CqlResult processPrepared(CQLStatement statement, ClientState clientState, List<String> variables) throws UnavailableException, InvalidRequestException, TimedOutException, SchemaDisagreementException { // Check to see if there are any bound variables to verify if (!(variables.isEmpty() && (statement.boundTerms == 0))) { if (variables.size() != statement.boundTerms) throw new InvalidRequestException(String.format("there were %d markers(?) in CQL but %d bound variables", statement.boundTerms, variables.size())); // at this point there is a match in count between markers and variables that is non-zero if (logger.isTraceEnabled()) for (int i = 0; i < variables.size(); i++) logger.trace("[{}] '{}'", i+1, variables.get(i)); } return processStatement(statement, clientState, variables); } private static final int makeStatementId(String cql) { // use the hash of the string till something better is provided return cql.hashCode(); } private static Column thriftify(IColumn c) { ByteBuffer value = (c instanceof CounterColumn) ? ByteBufferUtil.bytes(((CounterColumn) c).total()) : c.value(); return new Column(c.name()).setValue(value).setTimestamp(c.timestamp()); } private static String getKeyString(CFMetaData metadata) { String keyString; try { keyString = ByteBufferUtil.string(metadata.getKeyName()); } catch (CharacterCodingException e) { throw new AssertionError(e); } return keyString; } private static CQLStatement getStatement(String queryStr) throws InvalidRequestException, RecognitionException { // Lexer and parser CharStream stream = new ANTLRStringStream(queryStr); CqlLexer lexer = new CqlLexer(stream); TokenStream tokenStream = new CommonTokenStream(lexer); CqlParser parser = new CqlParser(tokenStream); // Parse the query string to a statement instance CQLStatement statement = parser.query(); // The lexer and parser queue up any errors they may have encountered // along the way, if necessary, we turn them into exceptions here. lexer.throwLastRecognitionError(); parser.throwLastRecognitionError(); return statement; } private static void validateSchemaIsSettled() throws SchemaDisagreementException { long limit = System.currentTimeMillis() + timeLimitForSchemaAgreement; outer: while (limit - System.currentTimeMillis() >= 0) { String currentVersionId = Schema.instance.getVersion().toString(); for (String version : describeSchemaVersions().keySet()) { if (!version.equals(currentVersionId)) continue outer; } // schemas agree return; } throw new SchemaDisagreementException(); } private static void validateCountOperation(SelectStatement select) throws InvalidRequestException { if (select.isWildcard()) return; // valid count(*) if (!select.isColumnRange()) { List<Term> columnNames = select.getColumnNames(); String firstColumn = columnNames.get(0).getText(); if (columnNames.size() == 1 && (firstColumn.equals("*") || firstColumn.equals("1"))) return; // valid count(*) || count(1) } throw new InvalidRequestException("Only COUNT(*) and COUNT(1) operations are currently supported."); } private static String bufferToString(ByteBuffer string) { try { return ByteBufferUtil.string(string); } catch (CharacterCodingException e) { throw new RuntimeException(e.getMessage(), e); } } }
wlloyd/eiger
src/java/org/apache/cassandra/cql/QueryProcessor.java
213,888
/** * Copyright (c) 2018 Cisco Systems * * Author: Steven Barth <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.cisco.stbarth.netconf.anx; import com.cisco.stbarth.netconf.anc.Netconf; import com.cisco.stbarth.netconf.anc.XMLElement; import com.vaadin.data.TreeData; import com.vaadin.ui.Tree; import org.opendaylight.yangtools.yang.common.QName; import org.opendaylight.yangtools.yang.model.api.*; import org.opendaylight.yangtools.yang.model.api.Module; import org.opendaylight.yangtools.yang.model.api.type.LeafrefTypeDefinition; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Optional; import java.util.stream.Stream; /** * wrapper utility class to work with ODL yangtools schema nodes */ class WrappedYangNode { private Module module; private DataSchemaNode node; private WrappedYangNode parent; private String namespace; private String name; private String description; private boolean expand; private boolean configuration; private static HashMap<String,String> prefixes = new HashMap<>(); WrappedYangNode(WrappedYangNode parent, DataSchemaNode node) { this.parent = parent; this.node = node; this.namespace = node.getQName().getNamespace().toString(); this.name = node.getQName().getLocalName(); this.description = node.getDescription().orElse(""); this.configuration = node.isConfiguration(); } WrappedYangNode(Module module) { this.module = module; this.namespace = module.getNamespace().toString(); this.name = module.getName(); this.description = module.getDescription().orElse(""); prefixes.put(namespace, module.getPrefix()); } // Lookup YANG node from path static Optional<WrappedYangNode> byPath(WrappedYangNode module, String path) { Optional<WrappedYangNode> node = Optional.of(module); for (String element: path.split("/")) { //FIXME: we need to filter any choice-nodes here? node = node.flatMap(n -> n.getChild(element)); } return node; } String getName() { return name; } String getCaption() { String caption = getName(); if (parent != null && (parent.node == null || !parent.namespace.equals(namespace))) caption += " (" + namespace + ")"; return caption; } String getSensorPath(boolean keyed, XMLElement data) { String path = name; if (keyed && node instanceof ListSchemaNode) { // For lists, we need to include key leafs for (QName key : ((ListSchemaNode)node).getKeyDefinition()) { String value = '{' + key.getLocalName() + '}'; if (data != null) value = data.getTextOrDefault(key.getNamespace().toString(), key.getLocalName(), value); path = path + "[" + key.getLocalName() + "=" + value + "]"; } } for (WrappedYangNode node = this.parent; node != null; node = node.parent) { if (node.node instanceof CaseSchemaNode || node.node instanceof ChoiceSchemaNode) continue; if (data != null) data = data.getParent(); String keys = ""; if (keyed && node.node instanceof ListSchemaNode) { // For lists, we need to include key leafs for (QName key : ((ListSchemaNode)(node.node)).getKeyDefinition()) { String value = '{' + key.getLocalName() + '}'; if (data != null) value = data.getTextOrDefault(key.getNamespace().toString(), key.getLocalName(), value); keys = keys + "[" + key.getLocalName() + "=" + value + "]"; } } path = node.name + keys + (node.module != null ? ':' : '/') + path; } return path; } String getXPath() { String path = String.format("%s:%s", prefixes.get(namespace), name); for (WrappedYangNode node = this.parent; node != null && node.node != null; node = node.parent) { if (!(node.node instanceof CaseSchemaNode) && !(node.node instanceof ChoiceSchemaNode)) path = String.format("%s:%s/%s", prefixes.get(node.namespace), node.name, path); } return "/" + path; } String getMaagic(boolean qualified) { String nodeName = name.replaceAll("[-.]", "_"); if (node instanceof ListSchemaNode) nodeName += "[...]"; String path = !qualified ? nodeName : String.format("%s__%s", prefixes.get(namespace).replaceAll("[-.]", "_"), nodeName); for (WrappedYangNode node = this.parent; node != null && node.node != null; node = node.parent) { if (!(node.node instanceof CaseSchemaNode) && !(node.node instanceof ChoiceSchemaNode)) { nodeName = node.name.replaceAll("[-.]", "_"); if (node.node instanceof ListSchemaNode) nodeName += "[...]"; path = !qualified ? String.format("%s.%s", nodeName, path) : String.format("%s__%s.%s", prefixes.get(node.namespace).replaceAll("[-.]", "_"), nodeName, path); } } return path; } Module getModule() { return module; } String getType() { if (node instanceof AnyxmlSchemaNode) return "anyxml"; else if (node instanceof CaseSchemaNode) return "case"; else if (node instanceof ChoiceSchemaNode) return "choice"; else if (node instanceof ContainerSchemaNode) return "container"; else if (node instanceof LeafSchemaNode) return "leaf"; else if (node instanceof LeafListSchemaNode) return "leaf-list"; else if (node instanceof ListSchemaNode) return "list"; else return "module"; } String getLeafRef() { TypeDefinition<?> type = (node instanceof TypedDataSchemaNode) ? ((TypedDataSchemaNode)node).getType() : null; return (type instanceof LeafrefTypeDefinition) ? ((LeafrefTypeDefinition)type).getPathStatement().getOriginalString() : ""; } String getDataType() { return (node instanceof TypedDataSchemaNode) ? ((TypedDataSchemaNode)node).getType().getPath().getLastComponent().getLocalName() : ""; } // Recursively transform YANG schema node into XML pattern (e.g. subtree filters) private void addChildren(XMLElement element, DataSchemaNode node) { if (node instanceof DataNodeContainer) { DataNodeContainer container = (DataNodeContainer)node; for (DataSchemaNode child: container.getChildNodes()) { String childNS = child.getQName().getNamespace().toString(); String childName = child.getQName().getLocalName(); Optional<XMLElement> dataChildElement = element.getFirst(childNS, childName); XMLElement childElement = dataChildElement.isPresent() ? dataChildElement.get() : element.createChild(childNS, childName); childElement.withAttribute("expand", null); childElement.withAttribute("root", null); addChildren(childElement, child); if (!dataChildElement.isPresent()) { if (child instanceof LeafSchemaNode || child instanceof LeafListSchemaNode) childElement.withText(child.getDescription().orElse("")); element.withComment(childElement.toString().replaceAll("\\s+$", "")); childElement.remove(); } } } } // Create XML template (e.g. subtree filter) from current YANG node Optional<XMLElement> createNetconfTemplate(String operation, XMLElement data) { XMLElement element = new XMLElement(namespace, name); if (operation != null && operation.isEmpty()) { if (data != null) element = data; // Remove any meta-attributes we may have added elsewhere element.withAttribute("expand", null); element.withAttribute("root", null); addChildren(element, node); } else if (operation != null || data != null) { if (operation != null && !operation.isEmpty()) element.withAttribute(Netconf.NS_NETCONF, "operation", operation); if (node instanceof ListSchemaNode) { // For lists, we need to include key leafs for (QName key : ((ListSchemaNode) node).getKeyDefinition()) { String keyNS = key.getNamespace().toString(); String keyName = key.getLocalName(); Optional<XMLElement> keyE = Optional.ofNullable(data).flatMap(x -> x.getFirst(keyNS, keyName)); if (keyE.isPresent()) element.withChild(keyE.get().clone()); else element.createChild(keyNS, keyName); } } } // If we have associated data from the peer populate it in the XML template for (WrappedYangNode node = this.parent; node != null && node.node != null; node = node.parent) { if (!(node.node instanceof CaseSchemaNode) && !(node.node instanceof ChoiceSchemaNode)) { if (data != null) data = data.getParent(); element = new XMLElement(node.namespace, node.name).withChild(element); if (node.node instanceof ListSchemaNode) { ListSchemaNode listNode = (ListSchemaNode)node.node; for (QName key: listNode.getKeyDefinition()) { String keyName = key.getLocalName(); String keyNS = key.getNamespace().toString(); Optional<XMLElement> keyV = Optional.ofNullable(data).flatMap(d -> d.getFirst(keyNS, keyName)); if (keyV.isPresent()) element.withChild(keyV.get().clone()); else element.withChild(keyNS, keyName); } } } } return Optional.ofNullable(module == null ? element : null); } Optional<XMLElement> createNetconfTemplate() { return createNetconfTemplate(null, null); } // Get the names of the key leafs Stream<String> getKeys() { return (node instanceof ListSchemaNode) ? ((ListSchemaNode)node).getKeyDefinition().stream().map(QName::getLocalName) : Stream.empty(); } boolean isKey() { return parent != null && parent.node instanceof ListSchemaNode && ((ListSchemaNode)parent.node).getKeyDefinition().contains(node.getQName()); } boolean isMandatory() { return node instanceof MandatoryAware && ((MandatoryAware)node).isMandatory(); } DataSchemaNode getNode() { return node; } WrappedYangNode getParent() { return parent; } // Populate YANG schema nodes, based on a filter applied to names and descriptions boolean addToTree(TreeData<WrappedYangNode> data, Collection<String> filter) { String nodeName = name.toLowerCase(); String nodeDescription = description.toLowerCase(); boolean okay = filter.stream().filter(nodeName::contains).count() == filter.size() || filter.stream().filter(nodeDescription::contains).count() == filter.size(); if (node != null) data.addItem(parent.node != null ? parent : null, this); if (!filter.isEmpty()) { if (parent != null) parent.expand = true; if (okay) filter = Collections.emptyList(); } // Recursively add child nodes to YANG schema tree if (module != null) { for (Module submodule: module.getSubmodules()) { for (DataSchemaNode childNode: submodule.getChildNodes()) { if (new WrappedYangNode(this, childNode).addToTree(data, filter)) okay = true; // if children matches filter, populate match up to current element } } } DataNodeContainer container = module != null ? module : node instanceof DataNodeContainer ? (DataNodeContainer)node : null; if (container != null) { HashSet<DataSchemaNode> addedNodes = new HashSet<>(); for (DataSchemaNode childNode : container.getChildNodes()) { WrappedYangNode child = new WrappedYangNode(this, childNode); if (child.addToTree(data, filter)) { addedNodes.add(childNode); okay = true; } } // Some of the children has matched, add adjacent leafs for context if (!filter.isEmpty() && okay) { for (DataSchemaNode childNode : container.getChildNodes()) if (!addedNodes.contains(childNode) && (childNode instanceof LeafSchemaNode || childNode instanceof LeafListSchemaNode)) new WrappedYangNode(this, childNode).addToTree(data, Collections.emptyList()); } } // If neither current element nor children match filter, remove current try { if (!okay) data.removeItem(this); } catch (IllegalArgumentException e) { // ignore if element is not in tree anyway } return okay; } String getDescription() { return description; } boolean isConfiguration() { return configuration; } String getNamespace() { return namespace; } public Stream<WrappedYangNode> getChildren() { if (node instanceof ChoiceSchemaNode) { return ((ChoiceSchemaNode)node).getCases().values().stream().map(n -> new WrappedYangNode(this, n)); } else { DataNodeContainer container = module != null ? module : node instanceof DataNodeContainer ? (DataNodeContainer) node : null; return (container == null) ? Stream.empty() : container.getChildNodes().stream().map(n -> new WrappedYangNode(this, n)); } } Optional<WrappedYangNode> getChild(String name) { return getChildren().filter(node -> node.getName().equals(name)).findAny(); } // Apply limited expansion int applyExpand(Tree<WrappedYangNode> tree, int limit) { if (expand && limit > 0) { int limitBefore = limit; tree.expand(this); for (WrappedYangNode child: tree.getTreeData().getChildren(this)) limit = child.applyExpand(tree, limit); if (limit == limitBefore) --limit; } return limit; } }
cisco-ie/anx
explorer/src/main/java/com/cisco/stbarth/netconf/anx/WrappedYangNode.java
213,889
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.cassandra.db; import java.io.DataInputStream; import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableSortedSet; import com.google.common.collect.Lists; import com.google.common.util.concurrent.RateLimiter; import com.google.common.util.concurrent.Uninterruptibles; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.cassandra.concurrent.DebuggableScheduledThreadPoolExecutor; import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor; import org.apache.cassandra.concurrent.NamedThreadFactory; import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.config.Schema; import org.apache.cassandra.db.compaction.CompactionManager; import org.apache.cassandra.db.composites.CellName; import org.apache.cassandra.db.composites.Composite; import org.apache.cassandra.db.composites.Composites; import org.apache.cassandra.db.filter.*; import org.apache.cassandra.db.marshal.Int32Type; import org.apache.cassandra.db.marshal.UUIDType; import org.apache.cassandra.dht.IPartitioner; import org.apache.cassandra.dht.Range; import org.apache.cassandra.dht.Token; import org.apache.cassandra.exceptions.WriteFailureException; import org.apache.cassandra.exceptions.WriteTimeoutException; import org.apache.cassandra.gms.ApplicationState; import org.apache.cassandra.gms.FailureDetector; import org.apache.cassandra.gms.Gossiper; import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.sstable.SSTable; import org.apache.cassandra.metrics.HintedHandoffMetrics; import org.apache.cassandra.net.MessageOut; import org.apache.cassandra.net.MessagingService; import org.apache.cassandra.service.StorageProxy; import org.apache.cassandra.service.StorageService; import org.apache.cassandra.service.WriteResponseHandler; import org.apache.cassandra.utils.*; import org.cliffc.high_scale_lib.NonBlockingHashSet; import java.util.List; import org.apache.cassandra.utils.MBeanWrapper; import static org.apache.cassandra.utils.ExecutorUtils.awaitTermination; import static org.apache.cassandra.utils.ExecutorUtils.shutdown; /** * The hint schema looks like this: * * CREATE TABLE hints ( * target_id uuid, * hint_id timeuuid, * message_version int, * mutation blob, * PRIMARY KEY (target_id, hint_id, message_version) * ) WITH COMPACT STORAGE; * * Thus, for each node in the cluster we treat its uuid as the partition key; each hint is a logical row * (physical composite column) containing the mutation to replay and associated metadata. * * When FailureDetector signals that a node that was down is back up, we page through * the hinted mutations and send them over one at a time, waiting for * hinted_handoff_throttle_delay in between each. * * deliverHints is also exposed to JMX so it can be run manually if FD ever misses * its cue somehow. */ public class HintedHandOffManager implements HintedHandOffManagerMBean { public static final String MBEAN_NAME = "org.apache.cassandra.db:type=HintedHandoffManager"; public static final HintedHandOffManager instance = new HintedHandOffManager(); private static final Logger logger = LoggerFactory.getLogger(HintedHandOffManager.class); private static final int PAGE_SIZE = 128; private static final int LARGE_NUMBER = 65536; // 64k nodes ought to be enough for anybody. public final HintedHandoffMetrics metrics = new HintedHandoffMetrics(); private volatile boolean hintedHandOffPaused = false; static final int maxHintTTL = Integer.parseInt(System.getProperty("cassandra.maxHintTTL", String.valueOf(Integer.MAX_VALUE))); private final NonBlockingHashSet<InetAddress> queuedDeliveries = new NonBlockingHashSet<>(); // To keep metrics consistent with earlier versions, where periodic tasks were run on a shared executor, // we run them on this executor and so keep counts separate from those for hint delivery tasks. See CASSANDRA-9129 private final DebuggableScheduledThreadPoolExecutor executor = new DebuggableScheduledThreadPoolExecutor(1, new NamedThreadFactory("HintedHandoffManager", Thread.MIN_PRIORITY)); // Non-scheduled executor to run the actual hint delivery tasks. // Per CASSANDRA-9129, this is where the values displayed in nodetool tpstats // and via the HintedHandoff mbean are obtained. private final ThreadPoolExecutor hintDeliveryExecutor = new JMXEnabledThreadPoolExecutor( DatabaseDescriptor.getMaxHintsThread(), Integer.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("HintedHandoff", Thread.MIN_PRIORITY), "internal"); private final ColumnFamilyStore hintStore = Keyspace.open(SystemKeyspace.NAME).getColumnFamilyStore(SystemKeyspace.HINTS); /** * Returns a mutation representing a Hint to be sent to <code>targetId</code> * as soon as it becomes available again. */ public Mutation hintFor(Mutation mutation, long now, int ttl, Pair<InetAddress, UUID> target) { assert ttl > 0; InetAddress endpoint = target.left; UUID targetId = target.right; metrics.incrCreatedHints(endpoint); UUID hintId = UUIDGen.getTimeUUID(); // serialize the hint with id and version as a composite column name CellName name = SystemKeyspace.Hints.comparator.makeCellName(hintId, MessagingService.current_version); ByteBuffer value = ByteBuffer.wrap(FBUtilities.serialize(mutation, Mutation.serializer, MessagingService.current_version)); ColumnFamily cf = ArrayBackedSortedColumns.factory.create(Schema.instance.getCFMetaData(SystemKeyspace.NAME, SystemKeyspace.HINTS)); cf.addColumn(name, value, now, ttl); return new Mutation(SystemKeyspace.NAME, UUIDType.instance.decompose(targetId), cf); } /* * determine the TTL for the hint Mutation * this is set at the smallest GCGraceSeconds for any of the CFs in the RM * this ensures that deletes aren't "undone" by delivery of an old hint */ public static int calculateHintTTL(Mutation mutation) { int ttl = maxHintTTL; for (ColumnFamily cf : mutation.getColumnFamilies()) ttl = Math.min(ttl, cf.metadata().getGcGraceSeconds()); return ttl; } public void start() { MBeanWrapper.instance.registerMBean(this, MBEAN_NAME); logger.trace("Created HHOM instance, registered MBean."); Runnable runnable = new Runnable() { public void run() { scheduleAllDeliveries(); metrics.log(); } }; executor.scheduleWithFixedDelay(runnable, 10, 10, TimeUnit.MINUTES); } private static void deleteHint(ByteBuffer tokenBytes, CellName columnName, long timestamp) { Mutation mutation = new Mutation(SystemKeyspace.NAME, tokenBytes); mutation.delete(SystemKeyspace.HINTS, columnName, timestamp); mutation.applyUnsafe(); // don't bother with commitlog since we're going to flush as soon as we're done with delivery } public void deleteHintsForEndpoint(final String ipOrHostname) { try { InetAddress endpoint = InetAddress.getByName(ipOrHostname); deleteHintsForEndpoint(endpoint, "Truncating hints (requested via nodetool)"); } catch (UnknownHostException e) { logger.warn("Unable to find {}, not a hostname or ipaddr of a node", ipOrHostname); throw new RuntimeException(e); } } public void deleteHintsForEndpoint(final InetAddress endpoint, String reason) { if (!StorageService.instance.getTokenMetadata().isMember(endpoint)) return; UUID hostId = StorageService.instance.getTokenMetadata().getHostId(endpoint); if (hostId == null) return; ByteBuffer hostIdBytes = ByteBuffer.wrap(UUIDGen.decompose(hostId)); final Mutation mutation = new Mutation(SystemKeyspace.NAME, hostIdBytes); mutation.delete(SystemKeyspace.HINTS, System.currentTimeMillis()); // execute asynchronously to avoid blocking caller (which may be processing gossip) Runnable runnable = new Runnable() { public void run() { try { logger.info("Deleting any stored hints for {}", endpoint); mutation.apply(); hintStore.forceBlockingFlush(reason); compact(); } catch (Exception e) { JVMStabilityInspector.inspectThrowable(e); logger.warn("Could not delete hints for {}: {}", endpoint, e); } } }; executor.submit(runnable); } //foobar public void truncateAllHints() throws ExecutionException, InterruptedException { Runnable runnable = new Runnable() { public void run() { try { logger.info("Truncating all stored hints."); Keyspace.open(SystemKeyspace.NAME).getColumnFamilyStore(SystemKeyspace.HINTS).truncateBlocking(); } catch (Exception e) { logger.warn("Could not truncate all hints.", e); } } }; executor.submit(runnable).get(); } @VisibleForTesting protected synchronized void compact() { ArrayList<Descriptor> descriptors = new ArrayList<>(); for (SSTable sstable : hintStore.getTracker().getUncompacting()) descriptors.add(sstable.descriptor); if (descriptors.isEmpty()) return; try { CompactionManager.instance.submitUserDefined(hintStore, descriptors, (int) (System.currentTimeMillis() / 1000)).get(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } } private static boolean pagingFinished(ColumnFamily hintColumnFamily, Composite startColumn) { // done if no hints found or the start column (same as last column processed in previous iteration) is the only one return hintColumnFamily == null || (!startColumn.isEmpty() && hintColumnFamily.getSortedColumns().size() == 1 && hintColumnFamily.getColumn((CellName)startColumn) != null); } private int waitForSchemaAgreement(InetAddress endpoint) throws TimeoutException { Gossiper gossiper = Gossiper.instance; int waited = 0; // first, wait for schema to be gossiped. while (gossiper.getEndpointStateForEndpoint(endpoint) != null && gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA) == null) { Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); waited += 1000; if (waited > 2 * StorageService.RING_DELAY) throw new TimeoutException("Didin't receive gossiped schema from " + endpoint + " in " + 2 * StorageService.RING_DELAY + "ms"); } if (gossiper.getEndpointStateForEndpoint(endpoint) == null) throw new TimeoutException("Node " + endpoint + " vanished while waiting for agreement"); waited = 0; // then wait for the correct schema version. // usually we use DD.getDefsVersion, which checks the local schema uuid as stored in the system keyspace. // here we check the one in gossip instead; this serves as a canary to warn us if we introduce a bug that // causes the two to diverge (see CASSANDRA-2946) while (gossiper.getEndpointStateForEndpoint(endpoint) != null && !gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA).value.equals( gossiper.getEndpointStateForEndpoint(FBUtilities.getBroadcastAddress()).getApplicationState(ApplicationState.SCHEMA).value)) { Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); waited += 1000; if (waited > 2 * StorageService.RING_DELAY) throw new TimeoutException("Could not reach schema agreement with " + endpoint + " in " + 2 * StorageService.RING_DELAY + "ms"); } if (gossiper.getEndpointStateForEndpoint(endpoint) == null) throw new TimeoutException("Node " + endpoint + " vanished while waiting for agreement"); logger.trace("schema for {} matches local schema", endpoint); return waited; } private void deliverHintsToEndpoint(InetAddress endpoint) { if (hintStore.isEmpty()) return; // nothing to do, don't confuse users by logging a no-op handoff // check if hints delivery has been paused if (hintedHandOffPaused) { logger.trace("Hints delivery process is paused, aborting"); return; } logger.trace("Checking remote({}) schema before delivering hints", endpoint); try { waitForSchemaAgreement(endpoint); } catch (TimeoutException e) { return; } if (!FailureDetector.instance.isAlive(endpoint)) { logger.trace("Endpoint {} died before hint delivery, aborting", endpoint); return; } doDeliverHintsToEndpoint(endpoint); } /* * 1. Get the key of the endpoint we need to handoff * 2. For each column, deserialize the mutation and send it to the endpoint * 3. Delete the column if the write was successful * 4. Force a flush */ private void doDeliverHintsToEndpoint(InetAddress endpoint) { // find the hints for the node using its token. UUID hostId = Gossiper.instance.getHostId(endpoint); logger.info("Started hinted handoff for host: {} with IP: {}", hostId, endpoint); final ByteBuffer hostIdBytes = ByteBuffer.wrap(UUIDGen.decompose(hostId)); DecoratedKey epkey = StorageService.getPartitioner().decorateKey(hostIdBytes); final AtomicInteger rowsReplayed = new AtomicInteger(0); Composite startColumn = Composites.EMPTY; int pageSize = calculatePageSize(); logger.trace("Using pageSize of {}", pageSize); // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml). // max rate is scaled by the number of nodes in the cluster (CASSANDRA-5272). int throttleInKB = DatabaseDescriptor.getHintedHandoffThrottleInKB() / (StorageService.instance.getTokenMetadata().getAllEndpoints().size() - 1); RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024); delivery: while (true) { long now = System.currentTimeMillis(); QueryFilter filter = QueryFilter.getSliceFilter(epkey, SystemKeyspace.HINTS, startColumn, Composites.EMPTY, false, pageSize, now); ColumnFamily hintsPage = ColumnFamilyStore.removeDeleted(hintStore.getColumnFamily(filter), (int) (now / 1000)); if (pagingFinished(hintsPage, startColumn)) { logger.info("Finished hinted handoff of {} rows to endpoint {}", rowsReplayed, endpoint); break; } // check if node is still alive and we should continue delivery process if (!FailureDetector.instance.isAlive(endpoint)) { logger.info("Endpoint {} died during hint delivery; aborting ({} delivered)", endpoint, rowsReplayed); break; } List<WriteResponseHandler<Mutation>> responseHandlers = Lists.newArrayList(); for (final Cell hint : hintsPage) { // check if hints delivery has been paused during the process if (hintedHandOffPaused) { logger.trace("Hints delivery process is paused, aborting"); break delivery; } // Skip tombstones: // if we iterate quickly enough, it's possible that we could request a new page in the same millisecond // in which the local deletion timestamp was generated on the last column in the old page, in which // case the hint will have no columns (since it's deleted) but will still be included in the resultset // since (even with gcgs=0) it's still a "relevant" tombstone. if (!hint.isLive()) continue; startColumn = hint.name(); int version = Int32Type.instance.compose(hint.name().get(1)); DataInputStream in = new DataInputStream(ByteBufferUtil.inputStream(hint.value())); Mutation mutation; try { mutation = Mutation.serializer.deserialize(in, version); } catch (UnknownColumnFamilyException e) { logger.trace("Skipping delivery of hint for deleted table", e); deleteHint(hostIdBytes, hint.name(), hint.timestamp()); continue; } catch (IOException e) { throw new AssertionError(e); } for (UUID cfId : mutation.getColumnFamilyIds()) { if (hint.timestamp() <= SystemKeyspace.getTruncatedAt(cfId)) { logger.trace("Skipping delivery of hint for truncated table {}", cfId); mutation = mutation.without(cfId); } } if (mutation.isEmpty()) { deleteHint(hostIdBytes, hint.name(), hint.timestamp()); continue; } MessageOut<Mutation> message = mutation.createMessage(); rateLimiter.acquire(message.serializedSize(MessagingService.current_version)); Runnable callback = new Runnable() { public void run() { rowsReplayed.incrementAndGet(); deleteHint(hostIdBytes, hint.name(), hint.timestamp()); } }; WriteResponseHandler<Mutation> responseHandler = new WriteResponseHandler<>(endpoint, WriteType.SIMPLE, callback); MessagingService.instance().sendRR(message, endpoint, responseHandler, false); responseHandlers.add(responseHandler); } for (WriteResponseHandler<Mutation> handler : responseHandlers) { try { handler.get(); } catch (WriteTimeoutException|WriteFailureException e) { logger.info("Failed replaying hints to {}; aborting ({} delivered), error : {}", endpoint, rowsReplayed, e.getMessage()); break delivery; } } } // Flush all the tombstones to disk hintStore.forceBlockingFlush("Forcing a flush after delivering hints"); } // read less columns (mutations) per page if they are very large private int calculatePageSize() { int meanColumnCount = hintStore.getMeanColumns(); if (meanColumnCount <= 0) return PAGE_SIZE; int averageColumnSize = (int) (hintStore.metric.meanRowSize.getValue() / meanColumnCount); if (averageColumnSize <= 0) return PAGE_SIZE; // page size of 1 does not allow actual paging b/c of >= behavior on startColumn return Math.max(2, Math.min(PAGE_SIZE, 4 * 1024 * 1024 / averageColumnSize)); } /** * Attempt delivery to any node for which we have hints. Necessary since we can generate hints even for * nodes which are never officially down/failed. */ private void scheduleAllDeliveries() { logger.trace("Started scheduleAllDeliveries"); // Force a major compaction to get rid of the tombstones and expired hints. Do it once, before we schedule any // individual replay, to avoid N - 1 redundant individual compactions (when N is the number of nodes with hints // to deliver to). compact(); IPartitioner p = StorageService.getPartitioner(); RowPosition minPos = p.getMinimumToken().minKeyBound(); Range<RowPosition> range = new Range<>(minPos, minPos); IDiskAtomFilter filter = new NamesQueryFilter(ImmutableSortedSet.<CellName>of()); List<Row> rows = hintStore.getRangeSlice(range, null, filter, Integer.MAX_VALUE, System.currentTimeMillis()); for (Row row : rows) { UUID hostId = UUIDGen.getUUID(row.key.getKey()); InetAddress target = StorageService.instance.getTokenMetadata().getEndpointForHostId(hostId); // token may have since been removed (in which case we have just read back a tombstone) if (target != null) scheduleHintDelivery(target, false); } logger.trace("Finished scheduleAllDeliveries"); } /* * This method is used to deliver hints to a particular endpoint. * When we learn that some endpoint is back up we deliver the data * to him via an event driven mechanism. */ public void scheduleHintDelivery(final InetAddress to, final boolean precompact) { // We should not deliver hints to the same host in 2 different threads if (!queuedDeliveries.add(to)) return; logger.trace("Scheduling delivery of Hints to {}", to); hintDeliveryExecutor.execute(new Runnable() { public void run() { try { // If it's an individual node hint replay (triggered by Gossip or via JMX), and not the global scheduled replay // (every 10 minutes), force a major compaction to get rid of the tombstones and expired hints. if (precompact) compact(); deliverHintsToEndpoint(to); } finally { queuedDeliveries.remove(to); } } }); } public void scheduleHintDelivery(String to) throws UnknownHostException { scheduleHintDelivery(InetAddress.getByName(to), true); } public void pauseHintsDelivery(boolean b) { hintedHandOffPaused = b; } public List<String> listEndpointsPendingHints() { Token.TokenFactory tokenFactory = StorageService.getPartitioner().getTokenFactory(); // Extract the keys as strings to be reported. LinkedList<String> result = new LinkedList<>(); for (Row row : getHintsSlice(1)) { if (row.cf != null) //ignore removed rows result.addFirst(tokenFactory.toString(row.key.getToken())); } return result; } private List<Row> getHintsSlice(int columnCount) { // Get count # of columns... SliceQueryFilter predicate = new SliceQueryFilter(ColumnSlice.ALL_COLUMNS_ARRAY, false, columnCount); // From keys "" to ""... IPartitioner partitioner = StorageService.getPartitioner(); RowPosition minPos = partitioner.getMinimumToken().minKeyBound(); Range<RowPosition> range = new Range<>(minPos, minPos); try { RangeSliceCommand cmd = new RangeSliceCommand(SystemKeyspace.NAME, SystemKeyspace.HINTS, System.currentTimeMillis(), predicate, range, null, LARGE_NUMBER); return StorageProxy.getRangeSlice(cmd, ConsistencyLevel.ONE); } catch (Exception e) { logger.info("HintsCF getEPPendingHints timed out."); throw new RuntimeException(e); } } @VisibleForTesting public void shutdownAndWait(long timeout, TimeUnit units) throws InterruptedException, TimeoutException { shutdown(executor, hintDeliveryExecutor); awaitTermination(timeout, units, executor, hintDeliveryExecutor); } }
palantir/cassandra
src/java/org/apache/cassandra/db/HintedHandOffManager.java
213,890
/* * Copyright (C) 2015 Archie L. Cobbs. All rights reserved. */ package io.permazen.cli.cmd; import io.permazen.Permazen; import io.permazen.cli.Session; import io.permazen.core.Schema; import io.permazen.core.SchemaMismatchException; import io.permazen.core.Transaction; import io.permazen.core.TransactionConfig; import io.permazen.schema.SchemaId; import io.permazen.schema.SchemaModel; abstract class AbstractSchemaCommand extends AbstractCommand { AbstractSchemaCommand(String spec) { super(spec); } // Get the schema having the specified ID protected static SchemaModel getSchemaModel(Session session, final SchemaId schemaId) { // Null means "the configured schema" if (schemaId == null) { SchemaModel schemaModel = session.getSchemaModel(); if (schemaModel == null) { final Permazen pdb = session.getPermazen(); if (pdb != null) schemaModel = pdb.getSchemaModel(); } if (schemaModel == null) { session.getOutput().println("No schema configured on this session"); return null; } return schemaModel; } // Read schema from the database return AbstractSchemaCommand.runWithoutSchema(session, (session1, tx) -> { final Schema schema = tx.getSchemaBundle().getSchema(schemaId); if (schema == null) { session1.getOutput().println(String.format( "Schema \"%s\" not found (known versions: %s)", schemaId, tx.getSchemaBundle().getSchemasBySchemaId().keySet())); return null; } return schema.getSchemaModel(); }); } // Perform action in a transaction that doesn't have any preconceived notion of what schema(s) should be in there protected static <R> R runWithoutSchema(Session session, SchemaAgnosticAction<R> action) { final Transaction tx; try { tx = TransactionConfig.builder().build().newTransaction(session.getDatabase()); } catch (SchemaMismatchException e) { // must be a uninitialized database session.getOutput().println("Database is uninitialized"); return null; } boolean success = false; try { final R result = action.runWithoutSchema(session, tx); tx.commit(); success = true; return result; } finally { if (!success) tx.rollback(); } } protected interface SchemaAgnosticAction<R> { R runWithoutSchema(Session session, Transaction tx); } }
permazen/permazen
permazen-cli/src/main/java/io/permazen/cli/cmd/AbstractSchemaCommand.java
213,892
/* * Copyright (C) 2017 atulgpt <[email protected]> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package subtitlemanager; import java.io.File; import javax.swing.JFileChooser; /** * * @author atulgpt <[email protected]> */ public class Settings extends javax.swing.JFrame { private JFileChooser mFileChooser; private final UserPreferences mUserPreferences = new UserPreferences(); /** * Creates new form Settings */ public Settings() { initComponents(); initComponents2(); } /** * This method is called from within the constructor to initialize the form. * WARNING: Do NOT modify this code. The content of this method is always * regenerated by the Form Editor. */ @SuppressWarnings("unchecked") // <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents private void initComponents() { javax.swing.ButtonGroup langInfoSettings = new javax.swing.ButtonGroup(); lookAndFeelbuttonGroup = new javax.swing.ButtonGroup(); jLabel1 = new javax.swing.JLabel(); defaultLocTextField = new javax.swing.JTextField(); openFileMaagerButton = new javax.swing.JButton(); jLabel2 = new javax.swing.JLabel(); yesRadioButton = new javax.swing.JRadioButton(); noRadioButton = new javax.swing.JRadioButton(); jLabel3 = new javax.swing.JLabel(); sysLNFRadioButton = new javax.swing.JRadioButton(); javaLNFRadioButton = new javax.swing.JRadioButton(); jLabel4 = new javax.swing.JLabel(); setDefaultCloseOperation(javax.swing.WindowConstants.DISPOSE_ON_CLOSE); setTitle("Settings"); jLabel1.setFont(new java.awt.Font("Tahoma", 0, 12)); // NOI18N jLabel1.setText("Default location:"); defaultLocTextField.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { defaultLocTextFieldActionPerformed(evt); } }); openFileMaagerButton.setText("Open"); openFileMaagerButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { openFileMaagerButtonActionPerformed(evt); } }); jLabel2.setFont(new java.awt.Font("Tahoma", 0, 12)); // NOI18N jLabel2.setText("Append language info:"); langInfoSettings.add(yesRadioButton); yesRadioButton.setText("Yes"); yesRadioButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { radioButtonActionPerformed(evt); } }); langInfoSettings.add(noRadioButton); noRadioButton.setText("No"); noRadioButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { radioButtonActionPerformed(evt); } }); jLabel3.setFont(new java.awt.Font("Tahoma", 0, 12)); // NOI18N jLabel3.setText("Look and Feel:"); lookAndFeelbuttonGroup.add(sysLNFRadioButton); sysLNFRadioButton.setText("System"); sysLNFRadioButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { radioButtonActionPerformed(evt); } }); lookAndFeelbuttonGroup.add(javaLNFRadioButton); javaLNFRadioButton.setText("Java"); javaLNFRadioButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { radioButtonActionPerformed(evt); } }); jLabel4.setFont(new java.awt.Font("Tahoma", 0, 10)); // NOI18N jLabel4.setText("(Restart Required)"); javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane()); getContentPane().setLayout(layout); layout.setHorizontalGroup( layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(layout.createSequentialGroup() .addGap(28, 28, 28) .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup() .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(layout.createSequentialGroup() .addComponent(jLabel1) .addGap(24, 24, 24) .addComponent(defaultLocTextField, javax.swing.GroupLayout.PREFERRED_SIZE, 176, javax.swing.GroupLayout.PREFERRED_SIZE) .addGap(18, 18, 18) .addComponent(openFileMaagerButton)) .addGroup(layout.createSequentialGroup() .addComponent(jLabel2) .addGap(24, 24, 24) .addComponent(yesRadioButton) .addGap(18, 18, 18) .addComponent(noRadioButton))) .addContainerGap(20, Short.MAX_VALUE)) .addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup() .addComponent(jLabel3) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE) .addComponent(sysLNFRadioButton) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED) .addComponent(javaLNFRadioButton) .addGap(126, 126, 126)))) .addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup() .addContainerGap() .addComponent(jLabel4) .addGap(306, 306, 306)) ); layout.setVerticalGroup( layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(layout.createSequentialGroup() .addGap(19, 19, 19) .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) .addComponent(jLabel1) .addComponent(defaultLocTextField, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE) .addComponent(openFileMaagerButton)) .addGap(26, 26, 26) .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING) .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) .addComponent(jLabel2) .addComponent(yesRadioButton)) .addComponent(noRadioButton)) .addGap(26, 26, 26) .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING) .addComponent(jLabel3) .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) .addComponent(sysLNFRadioButton) .addComponent(javaLNFRadioButton))) .addGap(0, 0, 0) .addComponent(jLabel4) .addContainerGap(117, Short.MAX_VALUE)) ); pack(); }// </editor-fold>//GEN-END:initComponents private void defaultLocTextFieldActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_defaultLocTextFieldActionPerformed // TODO add your handling code here: }//GEN-LAST:event_defaultLocTextFieldActionPerformed private void openFileMaagerButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_openFileMaagerButtonActionPerformed String dirName = UserPreferences.getDefaultFileLoc(); if (mFileChooser == null) { mFileChooser = new JFileChooser(dirName); mFileChooser.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY); } mFileChooser.setCurrentDirectory(new File(dirName)); mFileChooser.showOpenDialog(this); File defaultDirectory = mFileChooser.getSelectedFile(); if (defaultDirectory != null) { defaultLocTextField.setText(defaultDirectory.getAbsolutePath()); UserPreferences.setDefaultFileLoc(defaultDirectory.getAbsolutePath()); } }//GEN-LAST:event_openFileMaagerButtonActionPerformed private void radioButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_radioButtonActionPerformed boolean langInfoChoosen = false; int lnfChoosen = UserPreferences.SYS_LOOK_AND_FEEL; if (evt.getSource() == yesRadioButton) { langInfoChoosen = true; } else if (evt.getSource() == noRadioButton) { langInfoChoosen = false; } UserPreferences.setEmbedLangInfo(langInfoChoosen); if (evt.getSource() == sysLNFRadioButton) { lnfChoosen = UserPreferences.SYS_LOOK_AND_FEEL; } else if (evt.getSource() == javaLNFRadioButton) { lnfChoosen = UserPreferences.JAVA_LOOK_AND_FEEL; } UserPreferences.setLookAndFeel(lnfChoosen); }//GEN-LAST:event_radioButtonActionPerformed /** * @param args the command line arguments */ public static void main(String args[]) { /* Set the Nimbus look and feel */ //<editor-fold defaultstate="collapsed" desc=" Look and feel setting code (optional) "> /* If Nimbus (introduced in Java SE 6) is not available, stay with the default look and feel. * For details see http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html */ try { for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) { if ("Nimbus".equals(info.getName())) { javax.swing.UIManager.setLookAndFeel(info.getClassName()); break; } } } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | javax.swing.UnsupportedLookAndFeelException ex) { java.util.logging.Logger.getLogger(Settings.class.getName()).log(java.util.logging.Level.SEVERE, null, ex); } //</editor-fold> //</editor-fold> /* Create and display the form */ java.awt.EventQueue.invokeLater(() -> { new Settings().setVisible(true); }); } // Variables declaration - do not modify//GEN-BEGIN:variables private javax.swing.JTextField defaultLocTextField; private javax.swing.JLabel jLabel1; private javax.swing.JLabel jLabel2; private javax.swing.JLabel jLabel3; private javax.swing.JLabel jLabel4; private javax.swing.JRadioButton javaLNFRadioButton; private javax.swing.ButtonGroup lookAndFeelbuttonGroup; private javax.swing.JRadioButton noRadioButton; private javax.swing.JButton openFileMaagerButton; private javax.swing.JRadioButton sysLNFRadioButton; private javax.swing.JRadioButton yesRadioButton; // End of variables declaration//GEN-END:variables private void initComponents2() { this.setVisible(true); boolean isLangInfoChoosen = UserPreferences.isEmbedLangInfo(); if (isLangInfoChoosen) { yesRadioButton.setSelected(isLangInfoChoosen); } else { noRadioButton.setSelected(!isLangInfoChoosen); } defaultLocTextField.setText(UserPreferences.getDefaultFileLoc()); if (UserPreferences.getLookAndFeel() == UserPreferences.SYS_LOOK_AND_FEEL) { sysLNFRadioButton.setSelected(true); } else if (UserPreferences.getLookAndFeel() == UserPreferences.JAVA_LOOK_AND_FEEL) { javaLNFRadioButton.setSelected(true); } } }
atulgpt/SubtitleDownloader
src/subtitlemanager/Settings.java
213,893
package com.makemoji.mojilib; import android.content.Context; import android.graphics.Color; import android.graphics.ColorFilter; import android.graphics.PorterDuff; import android.graphics.drawable.BitmapDrawable; import android.graphics.drawable.Drawable; import android.support.annotation.IntDef; import android.support.v4.content.ContextCompat; import android.support.v7.widget.RecyclerView; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.FrameLayout; import android.widget.ImageView; import android.widget.LinearLayout; import android.widget.TextView; import com.makemoji.mojilib.gif.GifImageView; import com.makemoji.mojilib.model.MojiModel; import com.makemoji.mojilib.model.SpaceMojiModel; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.util.ArrayList; import java.util.List; /** * Created by Scott Baar on 2/5/2016. */ public class MojiGridAdapter extends RecyclerView.Adapter<MojiGridAdapter.Holder> { List<MojiModel> mojiModels = new ArrayList<>(); boolean vertical; int spanSize; Drawable phraseBg; boolean enablePulse = true; boolean imaagesSizedToSpan = true; boolean useKbLifecycle; public static final int ITEM_NORMAL = 0; public static final int ITEM_GIF = 1; public static final int ITEM_PHRASE = 2; public static final int ITEM_VIDEO = 3; public static final int ITEM_HSPACE = 4; @Retention(RetentionPolicy.SOURCE) @IntDef({ITEM_NORMAL,ITEM_GIF,ITEM_PHRASE,ITEM_VIDEO}) public @interface ItemType {} IMojiSelected iMojiSelected; public void setEnablePulse(boolean enable){ enablePulse = enable; } //force gif image views to have the mmkb hash and NOT the open activity's. public void useKbLifecycle(){ useKbLifecycle = true; } public MojiGridAdapter (List<MojiModel> models, IMojiSelected iMojiSelected,boolean vertical, int spanSize) { mojiModels = models; this.iMojiSelected = iMojiSelected; this.spanSize = spanSize; this.vertical =vertical; phraseBg = ContextCompat.getDrawable(Moji.context,R.drawable.mm_phrase_bg); } public void setMojiModels(List<MojiModel> models){ mojiModels = new ArrayList<>(models); notifyDataSetChanged(); } public List<MojiModel> getMojiModels(){ return mojiModels; } public void setImagesSizedtoSpan(boolean enable){ imaagesSizedToSpan = enable; } @Override public int getItemCount() { return mojiModels.size(); } @Override public int getItemViewType(int position){ if (mojiModels.get(position) instanceof SpaceMojiModel) return ITEM_HSPACE; if (mojiModels.get(position).gif==1)return ITEM_GIF; if (mojiModels.get(position).isPhrase()) return ITEM_PHRASE; if (mojiModels.get(position).isVideo()) return ITEM_VIDEO; return 0; } @Override public Holder onCreateViewHolder(ViewGroup parent, int viewType) { View v; if (viewType==ITEM_NORMAL) v = LayoutInflater.from(parent.getContext()) .inflate(R.layout.mm_rv_moji_item, parent, false); else if (viewType==ITEM_GIF){ v = LayoutInflater.from(parent.getContext()) .inflate(vertical?R.layout.mm_gif_iv_vertical:R.layout.mm_gif_iv,parent,false); } else if (viewType==ITEM_PHRASE){ v = LayoutInflater.from(parent.getContext()) .inflate(R.layout.mm_rv_phrase_item, parent, false); v.setBackgroundDrawable(phraseBg); } else if (viewType == ITEM_HSPACE){ v = LayoutInflater.from(parent.getContext()) .inflate(R.layout.mm_item_hspace, parent, false); } else{ v = LayoutInflater.from(parent.getContext()) .inflate(vertical?R.layout.mm_video_moji_item_vertical:R.layout.mm_video_moji_item, parent, false); } //v.getLayoutParams().height = parent.getHeight()/2; return new Holder(v,parent); } @Override public void onBindViewHolder(final Holder holder, int position) { final MojiModel model = mojiModels.get(position); Mojilytics.trackView(model.id); if (getItemViewType(position)==ITEM_NORMAL) { holder.imageView.setPulseEnabled(enablePulse); holder.imageView.forceDimen(holder.dimen); holder.imageView.sizeImagesToSpanSize(imaagesSizedToSpan); holder.imageView.setModel(model); holder.imageView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { iMojiSelected.mojiSelected(model, null); } }); } else if (getItemViewType(position)==ITEM_GIF){ holder.gifImageView.getFromUrl(model.image_url); holder.gifImageView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { iMojiSelected.mojiSelected(model, null); } }); } else if (getItemViewType(position)==ITEM_VIDEO){ holder.imageView.setModel(model); holder.imageView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { iMojiSelected.mojiSelected(model, null); } }); holder.title.setText(model.name); FrameLayout.LayoutParams lp = (FrameLayout.LayoutParams) holder.overlay.getLayoutParams(); lp.width = holder.dimen/2; lp.height = holder.dimen/2; holder.overlay.setLayoutParams(lp); } else if (getItemViewType(position)==ITEM_PHRASE) { LinearLayout ll = (LinearLayout) holder.itemView; ll.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { for (MojiModel emoji : model.emoji) iMojiSelected.mojiSelected(model, null); } }); while (holder.mojiImageViews.size()<model.emoji.size()) { MojiImageView v = (MojiImageView)LayoutInflater.from(holder.itemView.getContext()) .inflate(R.layout.mm_rv_moji_item, ll, false); v.setPulseEnabled(enablePulse); v.sizeImagesToSpanSize(false); //v.setPadding(0,(int)(2*Moji.density),(int)(-5*Moji.density),(int)(2*Moji.density)); ll.addView(v); holder.mojiImageViews.add((MojiImageView)v); } for (int i = 0; i < ll.getChildCount(); i++) { MojiImageView mojiImageView = (MojiImageView) ll.getChildAt(i); MojiModel sequence = model.emoji.size()>i?model.emoji.get(i):null; if (sequence!=null) { mojiImageView.forceDimen(holder.dimen); mojiImageView.setModel(sequence); mojiImageView.setVisibility(View.VISIBLE); } else mojiImageView.setVisibility(View.GONE); } } } class Holder extends RecyclerView.ViewHolder { MojiImageView imageView; int dimen; List<MojiImageView> mojiImageViews = new ArrayList<>(); GifImageView gifImageView; TextView title; ImageView overlay; ViewGroup parent; public Holder(View v, ViewGroup parent) { super(v); this.parent = parent; if (v instanceof MojiImageView)imageView = (MojiImageView) v; else if (v instanceof GifImageView) { gifImageView = (GifImageView) v; if (useKbLifecycle) gifImageView.useKbLifecycle = true; } else{ imageView = (MojiImageView)v.findViewById(R.id._mm_moji_iv); title = (TextView) v.findViewById(R.id.mm_item_title); overlay = (ImageView) v.findViewById(R.id._mm_play_overlay); } dimen = spanSize; } } }
makemoji/MakemojiSDK-Android
makemoji-sdk-android/src/main/java/com/makemoji/mojilib/MojiGridAdapter.java
213,894
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package view; /** * * @author Wendler */ public class ConfirmaAgenda extends javax.swing.JFrame { /** * Creates new form ConfirmaAgenda */ public ConfirmaAgenda() { initComponents(); } /** * This method is called from within the constructor to initialize the form. * WARNING: Do NOT modify this code. The content of this method is always * regenerated by the Form Editor. */ @SuppressWarnings("unchecked") // <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents private void initComponents() { setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE); javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane()); getContentPane().setLayout(layout); layout.setHorizontalGroup( layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGap(0, 400, Short.MAX_VALUE) ); layout.setVerticalGroup( layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGap(0, 300, Short.MAX_VALUE) ); pack(); }// </editor-fold>//GEN-END:initComponents /** * @param args the command line arguments */ public static void main(String args[]) { /* Set the Nimbus look and feel */ //<editor-fold defaultstate="collapsed" desc=" Look and feel setting code (optional) "> /* If Nimbus (introduced in Java SE 6) is not available, stay with the default look and feel. * For details see http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html */ try { for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) { if ("Nimbus".equals(info.getName())) { javax.swing.UIManager.setLookAndFeel(info.getClassName()); break; } } } catch (ClassNotFoundException ex) { java.util.logging.Logger.getLogger(ConfirmaAgenda.class.getName()).log(java.util.logging.Level.SEVERE, null, ex); } catch (InstantiationException ex) { java.util.logging.Logger.getLogger(ConfirmaAgenda.class.getName()).log(java.util.logging.Level.SEVERE, null, ex); } catch (IllegalAccessException ex) { java.util.logging.Logger.getLogger(ConfirmaAgenda.class.getName()).log(java.util.logging.Level.SEVERE, null, ex); } catch (javax.swing.UnsupportedLookAndFeelException ex) { java.util.logging.Logger.getLogger(ConfirmaAgenda.class.getName()).log(java.util.logging.Level.SEVERE, null, ex); } //</editor-fold> /* Create and display the form */ java.awt.EventQueue.invokeLater(new Runnable() { public void run() { new ConfirmaAgenda().setVisible(true); } }); } // Variables declaration - do not modify//GEN-BEGIN:variables // End of variables declaration//GEN-END:variables }
gdaneluti/Fisioterapia
src/view/ConfirmaAgenda.java
213,895
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.datastax.oss.driver.api.core.session; import com.datastax.oss.driver.api.core.AsyncAutoCloseable; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.CqlSessionBuilder; import com.datastax.oss.driver.api.core.MavenCoordinates; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.context.DriverContext; import com.datastax.oss.driver.api.core.metadata.Metadata; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metadata.NodeState; import com.datastax.oss.driver.api.core.metrics.Metrics; import com.datastax.oss.driver.api.core.type.reflect.GenericType; import com.datastax.oss.driver.internal.core.DefaultMavenCoordinates; import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.util.Optional; import java.util.concurrent.CompletionStage; /** * A nexus to send requests to a Cassandra cluster. * * <p>This is a high-level abstraction capable of handling arbitrary request and result types. The * driver's built-in {@link CqlSession} is a more convenient subtype for most client applications. * * <p>The driver's request execution logic is pluggable (see {@code RequestProcessor} in the * internal API) to allow custom extensions. Hence the generic {@link #execute(Request, * GenericType)} method in this interface, that makes no assumptions about the request or result * type. * * @see CqlSession#builder() */ public interface Session extends AsyncAutoCloseable { /** * The Maven coordinates of the core driver artifact. * * <p>This is intended for products that wrap or extend the driver, as a way to check * compatibility if end-users override the driver version in their application. */ @NonNull MavenCoordinates OSS_DRIVER_COORDINATES = DefaultMavenCoordinates.buildFromResourceAndPrint( Session.class.getResource("/com/datastax/oss/driver/Driver.properties")); /** * The unique name identifying this session instance. This is used as a prefix for log messages * and metrics. * * <p>This gets populated from the option {@code basic.session-name} in the configuration. If that * option is absent, the driver will generate an identifier composed of the letter 's' followed by * an incrementing counter. * * <p>Note that this is purely a client-side identifier; in particular, it has no relation with * {@code system.local.cluster_name} on the server. */ @NonNull String getName(); /** * Returns a snapshot of the Cassandra cluster's topology and schema metadata. * * <p>In order to provide atomic updates, this method returns an immutable object: the node list, * token map, and schema contained in a given instance will always be consistent with each other * (but note that {@link Node} itself is not immutable: some of its properties will be updated * dynamically, in particular {@link Node#getState()}). * * <p>As a consequence of the above, you should call this method each time you need a fresh view * of the metadata. <b>Do not</b> call it once and store the result, because it is a frozen * snapshot that will become stale over time. * * <p>If a metadata refresh triggers events (such as node added/removed, or schema events), then * the new version of the metadata is guaranteed to be visible by the time you receive these * events. * * <p>The returned object is never {@code null}, but may be empty if metadata has been disabled in * the configuration. */ @NonNull Metadata getMetadata(); /** Whether schema metadata is currently enabled. */ boolean isSchemaMetadataEnabled(); /** * Enable or disable schema metadata programmatically. * * <p>Use this method to override the value defined in the driver's configuration; one typical use * case is to temporarily disable schema metadata while the client issues a sequence of DDL * statements. * * <p>If calling this method re-enables the metadata (that is, {@link #isSchemaMetadataEnabled()} * was false before, and becomes true as a result of the call), a refresh is also triggered. * * @param newValue a boolean value to enable or disable schema metadata programmatically, or * {@code null} to use the driver's configuration. * @see DefaultDriverOption#METADATA_SCHEMA_ENABLED * @return if this call triggered a refresh, a future that will complete when that refresh is * complete. Otherwise, a completed future with the current metadata. */ @NonNull CompletionStage<Metadata> setSchemaMetadataEnabled(@Nullable Boolean newValue); /** * Force an immediate refresh of the schema metadata, even if it is currently disabled (either in * the configuration or via {@link #setSchemaMetadataEnabled(Boolean)}). * * <p>The new metadata is returned in the resulting future (and will also be reflected by {@link * #getMetadata()} when that future completes). */ @NonNull CompletionStage<Metadata> refreshSchemaAsync(); /** * Convenience method to call {@link #refreshSchemaAsync()} and block for the result. * * <p>This must not be called on a driver thread. */ @NonNull default Metadata refreshSchema() { BlockingOperation.checkNotDriverThread(); return CompletableFutures.getUninterruptibly(refreshSchemaAsync()); } /** * Checks if all nodes in the cluster agree on a common schema version. * * <p>Due to the distributed nature of Cassandra, schema changes made on one node might not be * immediately visible to others. Under certain circumstances, the driver waits until all nodes * agree on a common schema version (namely: before a schema refresh, and before completing a * successful schema-altering query). To do so, it queries system tables to find out the schema * version of all nodes that are currently {@link NodeState#UP UP}. If all the versions match, the * check succeeds, otherwise it is retried periodically, until a given timeout (specified in the * configuration). * * <p>A schema agreement failure is not fatal, but it might produce unexpected results (for * example, getting an "unconfigured table" error for a table that you created right before, just * because the two queries went to different coordinators). * * <p>Note that schema agreement never succeeds in a mixed-version cluster (it would be * challenging because the way the schema version is computed varies across server versions); the * assumption is that schema updates are unlikely to happen during a rolling upgrade anyway. * * @return a future that completes with {@code true} if the nodes agree, or {@code false} if the * timeout fired. * @see DefaultDriverOption#CONTROL_CONNECTION_AGREEMENT_INTERVAL * @see DefaultDriverOption#CONTROL_CONNECTION_AGREEMENT_TIMEOUT */ @NonNull CompletionStage<Boolean> checkSchemaAgreementAsync(); /** * Convenience method to call {@link #checkSchemaAgreementAsync()} and block for the result. * * <p>This must not be called on a driver thread. */ default boolean checkSchemaAgreement() { BlockingOperation.checkNotDriverThread(); return CompletableFutures.getUninterruptibly(checkSchemaAgreementAsync()); } /** Returns a context that provides access to all the policies used by this driver instance. */ @NonNull DriverContext getContext(); /** * The keyspace that this session is currently connected to, or {@link Optional#empty()} if this * session is not connected to any keyspace. * * <p>There are two ways that this can be set: before initializing the session (either with the * {@code session-keyspace} option in the configuration, or with {@link * CqlSessionBuilder#withKeyspace(CqlIdentifier)}); or at runtime, if the client issues a request * that changes the keyspace (such as a CQL {@code USE} query). Note that this second method is * inherently unsafe, since other requests expecting the old keyspace might be executing * concurrently. Therefore it is highly discouraged, aside from trivial cases (such as a * cqlsh-style program where requests are never concurrent). */ @NonNull Optional<CqlIdentifier> getKeyspace(); /** * Returns a gateway to the driver's DropWizard metrics, or {@link Optional#empty()} if all * metrics are disabled, or if the driver has been configured to use MicroProfile or Micrometer * instead of DropWizard (see {@code advanced.metrics.factory.class} in the configuration). * * <p>{@link Metrics} was originally intended to allow programmatic access to the metrics, but it * has a hard dependency to the DropWizard API, which makes it unsuitable for alternative metric * frameworks. A workaround is to inject your own metric registry with {@link * SessionBuilder#withMetricRegistry(Object)} when you build the session. You can then use the * framework's proprietary APIs to retrieve the metrics from the registry. */ @NonNull Optional<Metrics> getMetrics(); /** * Executes an arbitrary request. * * @param resultType the type of the result, which determines the internal request processor * (built-in or custom) that will be used to handle the request. * @see Session */ @Nullable // because ResultT could be Void <RequestT extends Request, ResultT> ResultT execute( @NonNull RequestT request, @NonNull GenericType<ResultT> resultType); }
apache/cassandra-java-driver
core/src/main/java/com/datastax/oss/driver/api/core/session/Session.java
213,896
package gcom.faturamento; import gcom.atendimentopublico.ligacaoagua.LigacaoAguaSituacao; import gcom.cadastro.cliente.EsferaPoder; import gcom.cadastro.imovel.Categoria; import gcom.cadastro.imovel.CategoriaTipo; import gcom.cadastro.imovel.ImovelPerfil; import gcom.cadastro.imovel.Subcategoria; import gcom.cadastro.localidade.GerenciaRegional; import gcom.cadastro.localidade.Localidade; import gcom.cadastro.localidade.SetorComercial; import gcom.cadastro.localidade.UnidadeNegocio; import gcom.faturamento.consumotarifa.ConsumoTarifa; import java.io.Serializable; import java.math.BigDecimal; import java.util.Date; /** @author Hibernate CodeGenerator */ public class HistogramaAguaEconomiaSemQuadra implements Serializable { /** * */ private static final long serialVersionUID = 1L; /** identifier field */ private Integer id; /** persistent field */ private int anoMesReferencia; /** persistent field */ private int codigoSetorComercial; /** persistent field */ private int indicadorConsumoReal; /** persistent field */ private int indicadorHidrometro; /** persistent field */ private int indicadorPoco; /** persistent field */ private int indicadorVolFixadoAgua; /** persistent field */ private int quantidadeConsumo; /** persistent field */ private int quantidadeEconomia; /** persistent field */ private BigDecimal valorFaturadoEconomia; /** persistent field */ private int volumeFaturadoEconomia; /** persistent field */ private int quantidadeLigacao; /** persistent field */ private Date ultimaAlteracao; /** persistent field */ Categoria categoria; /** persistent field */ LigacaoAguaSituacao ligacaoAguaSituacao; /** persistent field */ CategoriaTipo categoriaTipo; /** persistent field */ Subcategoria subcategoria; /** persistent field */ Localidade localidadeElo; /** persistent field */ Localidade localidade; /** persistent field */ EsferaPoder esferaPoder; /** persistent field */ UnidadeNegocio unidadeNegocio; /** persistent field */ SetorComercial setorComercial; /** persistent field */ ConsumoTarifa consumoTarifa; /** persistent field */ GerenciaRegional gerenciaRegional; /** persistent field */ private BigDecimal valorSimuladoEconomia; private ImovelPerfil imovelPerfil; public int getAnoMesReferencia() { return anoMesReferencia; } public void setAnoMesReferencia(int anoMesReferencia) { this.anoMesReferencia = anoMesReferencia; } public Categoria getCategoria() { return categoria; } public void setCategoria(Categoria categoria) { this.categoria = categoria; } public CategoriaTipo getCategoriaTipo() { return categoriaTipo; } public void setCategoriaTipo(CategoriaTipo categoriaTipo) { this.categoriaTipo = categoriaTipo; } public int getCodigoSetorComercial() { return codigoSetorComercial; } public void setCodigoSetorComercial(int codigoSetorComercial) { this.codigoSetorComercial = codigoSetorComercial; } public ConsumoTarifa getConsumoTarifa() { return consumoTarifa; } public void setConsumoTarifa(ConsumoTarifa consumoTarifa) { this.consumoTarifa = consumoTarifa; } public EsferaPoder getEsferaPoder() { return esferaPoder; } public void setEsferaPoder(EsferaPoder esferaPoder) { this.esferaPoder = esferaPoder; } public GerenciaRegional getGerenciaRegional() { return gerenciaRegional; } public void setGerenciaRegional(GerenciaRegional gerenciaRegional) { this.gerenciaRegional = gerenciaRegional; } public Integer getId() { return id; } public void setId(Integer id) { this.id = id; } public int getIndicadorConsumoReal() { return indicadorConsumoReal; } public void setIndicadorConsumoReal(int indicadorConsumoReal) { this.indicadorConsumoReal = indicadorConsumoReal; } public int getIndicadorHidrometro() { return indicadorHidrometro; } public void setIndicadorHidrometro(int indicadorHidrometro) { this.indicadorHidrometro = indicadorHidrometro; } public int getIndicadorPoco() { return indicadorPoco; } public void setIndicadorPoco(int indicadorPoco) { this.indicadorPoco = indicadorPoco; } public int getIndicadorVolFixadoAgua() { return indicadorVolFixadoAgua; } public void setIndicadorVolFixadoAgua(int indicadorVolFixadoAgua) { this.indicadorVolFixadoAgua = indicadorVolFixadoAgua; } public LigacaoAguaSituacao getLigacaoAguaSituacao() { return ligacaoAguaSituacao; } public void setLigacaoAguaSituacao(LigacaoAguaSituacao ligacaoAguaSituacao) { this.ligacaoAguaSituacao = ligacaoAguaSituacao; } public Localidade getLocalidadeElo() { return localidadeElo; } public void setLocalidadeElo(Localidade localidadeElo) { this.localidadeElo = localidadeElo; } public Localidade getLocalidade() { return localidade; } public void setLocalidade(Localidade localidade) { this.localidade = localidade; } public int getQuantidadeConsumo() { return quantidadeConsumo; } public void setQuantidadeConsumo(int quantidadeConsumo) { this.quantidadeConsumo = quantidadeConsumo; } public int getQuantidadeEconomia() { return quantidadeEconomia; } public void setQuantidadeEconomia(int quantidadeEconomia) { this.quantidadeEconomia = quantidadeEconomia; } public int getQuantidadeLigacao() { return quantidadeLigacao; } public void setQuantidadeLigacao(int quantidadeLigacao) { this.quantidadeLigacao = quantidadeLigacao; } public SetorComercial getSetorComercial() { return setorComercial; } public void setSetorComercial(SetorComercial setorComercial) { this.setorComercial = setorComercial; } public Subcategoria getSubcategoria() { return subcategoria; } public void setSubcategoria(Subcategoria subcategoria) { this.subcategoria = subcategoria; } public Date getUltimaAlteracao() { return ultimaAlteracao; } public void setUltimaAlteracao(Date ultimaAlteracao) { this.ultimaAlteracao = ultimaAlteracao; } public UnidadeNegocio getUnidadeNegocio() { return unidadeNegocio; } public void setUnidadeNegocio(UnidadeNegocio unidadeNegocio) { this.unidadeNegocio = unidadeNegocio; } public BigDecimal getValorFaturadoEconomia() { return valorFaturadoEconomia; } public void setValorFaturadoEconomia(BigDecimal valorFaturadoEconomia) { this.valorFaturadoEconomia = valorFaturadoEconomia; } public int getVolumeFaturadoEconomia() { return volumeFaturadoEconomia; } public void setVolumeFaturadoEconomia(int volumeFaturadoEconomia) { this.volumeFaturadoEconomia = volumeFaturadoEconomia; } public HistogramaAguaEconomiaSemQuadra(Integer id, int anoMesReferencia, int codigoSetorComercial, int indicadorConsumoReal, int indicadorHidrometro, int indicadorPoco, int indicadorVolFixadoAgua, int quantidadeConsumo, int quantidadeEconomia, BigDecimal valorFaturadoEconomia, int volumeFaturadoEconomia, int quantidadeLigacao, Date ultimaAlteracao, Categoria categoria, LigacaoAguaSituacao ligacaoAguaSituacao, CategoriaTipo categoriaTipo, Subcategoria subcategoria, Localidade localidadeElo, Localidade localidade, EsferaPoder esferaPoder, UnidadeNegocio unidadeNegocio, SetorComercial setorComercial, ConsumoTarifa consumoTarifa, GerenciaRegional gerenciaRegional) { super(); this.id = id; this.anoMesReferencia = anoMesReferencia; this.codigoSetorComercial = codigoSetorComercial; this.indicadorConsumoReal = indicadorConsumoReal; this.indicadorHidrometro = indicadorHidrometro; this.indicadorPoco = indicadorPoco; this.indicadorVolFixadoAgua = indicadorVolFixadoAgua; this.quantidadeConsumo = quantidadeConsumo; this.quantidadeEconomia = quantidadeEconomia; this.valorFaturadoEconomia = valorFaturadoEconomia; this.volumeFaturadoEconomia = volumeFaturadoEconomia; this.quantidadeLigacao = quantidadeLigacao; this.ultimaAlteracao = ultimaAlteracao; this.categoria = categoria; this.ligacaoAguaSituacao = ligacaoAguaSituacao; this.categoriaTipo = categoriaTipo; this.subcategoria = subcategoria; this.localidadeElo = localidadeElo; this.localidade = localidade; this.esferaPoder = esferaPoder; this.unidadeNegocio = unidadeNegocio; this.setorComercial = setorComercial; this.consumoTarifa = consumoTarifa; this.gerenciaRegional = gerenciaRegional; } public HistogramaAguaEconomiaSemQuadra() { super(); } public BigDecimal getValorSimuladoEconomia() { return valorSimuladoEconomia; } public void setValorSimuladoEconomia(BigDecimal valorSimuladoEconomia) { this.valorSimuladoEconomia = valorSimuladoEconomia; } public ImovelPerfil getImovelPerfil() { return imovelPerfil; } public void setImovelPerfil(ImovelPerfil imovelPerfil) { this.imovelPerfil = imovelPerfil; } }
consensotec/gsan
Gsan/src/gcom/faturamento/HistogramaAguaEconomiaSemQuadra.java
213,897
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package itemsjuego; import Combate.Agua; import Combate.Elemento; import java.io.Externalizable; import org.newdawn.slick.Image; import org.newdawn.slick.SlickException; /** * * @author victo */ public class GemaAgua1 extends Gema1 implements Externalizable{ public GemaAgua1() throws SlickException { super(new Image("resources/Gemas/Agua1.png"), "Gema Agua NL1", new Agua()); } }
TecnologiaVideojuegos/proyecto-videojuego-callister-developers
Java/itemsjuego/GemaAgua1.java
213,898
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.datastax.oss.driver.internal.core.metadata; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metadata.NodeState; import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; import com.datastax.oss.driver.internal.core.channel.DriverChannel; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.util.NanoTime; import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; import java.net.InetAddress; import java.net.UnknownHostException; import java.time.Duration; import java.util.Iterator; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.TimeUnit; import net.jcip.annotations.ThreadSafe; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @ThreadSafe class SchemaAgreementChecker { private static final Logger LOG = LoggerFactory.getLogger(SchemaAgreementChecker.class); private static final int INFINITE_PAGE_SIZE = -1; @VisibleForTesting static final InetAddress BIND_ALL_ADDRESS; static { try { BIND_ALL_ADDRESS = InetAddress.getByAddress(new byte[4]); } catch (UnknownHostException e) { throw new RuntimeException(e); } } private final DriverChannel channel; private final InternalDriverContext context; private final String logPrefix; private final Duration queryTimeout; private final long intervalNs; private final long timeoutNs; private final boolean warnOnFailure; private final long start; private final CompletableFuture<Boolean> result = new CompletableFuture<>(); SchemaAgreementChecker(DriverChannel channel, InternalDriverContext context, String logPrefix) { this.channel = channel; this.context = context; this.logPrefix = logPrefix; DriverExecutionProfile config = context.getConfig().getDefaultProfile(); this.queryTimeout = config.getDuration(DefaultDriverOption.CONTROL_CONNECTION_TIMEOUT); this.intervalNs = config.getDuration(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_INTERVAL).toNanos(); this.timeoutNs = config.getDuration(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT).toNanos(); this.warnOnFailure = config.getBoolean(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_WARN); this.start = System.nanoTime(); } public CompletionStage<Boolean> run() { LOG.debug("[{}] Checking schema agreement", logPrefix); if (timeoutNs == 0) { result.complete(false); } else { sendQueries(); } return result; } private void sendQueries() { long elapsedNs = System.nanoTime() - start; if (elapsedNs > timeoutNs) { String message = String.format( "[%s] Schema agreement not reached after %s", logPrefix, NanoTime.format(elapsedNs)); if (warnOnFailure) { LOG.warn(message); } else { LOG.debug(message); } result.complete(false); } else { CompletionStage<AdminResult> localQuery = query("SELECT schema_version FROM system.local WHERE key='local'"); CompletionStage<AdminResult> peersQuery = query("SELECT * FROM system.peers"); localQuery .thenCombine(peersQuery, this::extractSchemaVersions) .whenComplete(this::completeOrReschedule); } } private Set<UUID> extractSchemaVersions(AdminResult controlNodeResult, AdminResult peersResult) { // Gather the versions of all the nodes that are UP ImmutableSet.Builder<UUID> schemaVersions = ImmutableSet.builder(); // Control node (implicitly UP, we've just queried it) Iterator<AdminRow> iterator = controlNodeResult.iterator(); if (iterator.hasNext()) { AdminRow localRow = iterator.next(); UUID schemaVersion = localRow.getUuid("schema_version"); if (schemaVersion == null) { LOG.warn( "[{}] Missing schema_version for control node {}, " + "excluding from schema agreement check", logPrefix, channel.getEndPoint()); } else { schemaVersions.add(schemaVersion); } } else { LOG.warn( "[{}] Missing system.local row for control node {}, " + "excluding from schema agreement check", logPrefix, channel.getEndPoint()); } Map<UUID, Node> nodes = context.getMetadataManager().getMetadata().getNodes(); for (AdminRow peerRow : peersResult) { if (isPeerValid(peerRow, nodes)) { UUID schemaVersion = Objects.requireNonNull(peerRow.getUuid("schema_version")); schemaVersions.add(schemaVersion); } } return schemaVersions.build(); } private void completeOrReschedule(Set<UUID> uuids, Throwable error) { if (error != null) { LOG.debug( "[{}] Error while checking schema agreement, completing now (false)", logPrefix, error); result.complete(false); } else if (uuids.size() == 1) { LOG.debug( "[{}] Schema agreement reached ({}), completing", logPrefix, uuids.iterator().next()); result.complete(true); } else { LOG.debug( "[{}] Schema agreement not reached yet ({}), rescheduling in {}", logPrefix, uuids, NanoTime.format(intervalNs)); channel .eventLoop() .schedule(this::sendQueries, intervalNs, TimeUnit.NANOSECONDS) .addListener( f -> { if (!f.isSuccess()) { LOG.debug( "[{}] Error while rescheduling schema agreement, completing now (false)", logPrefix, f.cause()); } }); } } @VisibleForTesting protected CompletionStage<AdminResult> query(String queryString) { return AdminRequestHandler.query( channel, queryString, queryTimeout, INFINITE_PAGE_SIZE, logPrefix) .start(); } protected boolean isPeerValid(AdminRow peerRow, Map<UUID, Node> nodes) { if (PeerRowValidator.isValid(peerRow)) { UUID hostId = peerRow.getUuid("host_id"); Node node = nodes.get(hostId); if (node == null) { LOG.warn("[{}] Unknown peer {}, excluding from schema agreement check", logPrefix, hostId); return false; } else if (node.getState() != NodeState.UP) { LOG.debug("[{}] Peer {} is down, excluding from schema agreement check", logPrefix, hostId); return false; } return true; } else { LOG.warn( "[{}] Found invalid system.peers row for peer: {}, excluding from schema agreement check.", logPrefix, peerRow.getInetAddress("peer")); return false; } } }
apache/cassandra-java-driver
core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementChecker.java
213,899
package polimorfismo; public class Lavadora implements Electrodomestico { private String marca; private int consumo; private boolean isTambor; private boolean isMotor; private boolean isBombaAgua; private boolean isFiltro; private boolean isTomaAgua; public Lavadora() { super(); this.marca = null; this.consumo = 0; this.isTambor = true; this.isMotor = true; this.isBombaAgua = true; this.isFiltro = true; this.isTomaAgua = true; } public Lavadora(String marca, int consumo) { super(); this.marca = marca; this.consumo = consumo; this.isTambor = true; this.isMotor = true; this.isBombaAgua = true; this.isFiltro = true; this.isTomaAgua = true; } @Override public int getConsumo() { return this.consumo; } @Override public void setConsumo(int consumo) { this.consumo = consumo; } @Override public String getMarca() { return this.marca; } @Override public void setMarca(String marca) { this.marca = marca; } @Override public boolean isAveriado() { if(!this.isBombaAgua || !this.isFiltro || !this.isMotor || !this.isTambor || !this.isTomaAgua) return true; else return false; } public boolean isTambor() { return isTambor; } public void setTambor(boolean isTambor) { this.isTambor = isTambor; } public boolean isMotor() { return isMotor; } public void setMotor(boolean isMotor) { this.isMotor = isMotor; } public boolean isBombaAgua() { return isBombaAgua; } public void setBombaAgua(boolean isBombaAgua) { this.isBombaAgua = isBombaAgua; } public boolean isFiltro() { return isFiltro; } public void setFiltro(boolean isFiltro) { this.isFiltro = isFiltro; } public boolean isTomaAgua() { return isTomaAgua; } public void setTomaAgua(boolean isTomaAgua) { this.isTomaAgua = isTomaAgua; } }
RazorXXI/JavaExamples
src/polimorfismo/Lavadora.java
213,900
/* * Copyright (c) 2016-2019 VMware, Inc. All Rights Reserved. * * This product is licensed to you under the Apache License, Version 2.0 (the "License"). * You may not use this product except in compliance with the License. * * This product may include a number of subcomponents with separate copyright notices * and license terms. Your use of these subcomponents is subject to the terms and * conditions of the subcomponent's license, as noted in the LICENSE file. */ package com.vmware.mangle.services.cassandra; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; import java.util.Optional; import java.util.Set; import com.datastax.driver.core.AuthProvider; import com.datastax.driver.core.Cluster; import com.datastax.driver.core.Cluster.Builder; import com.datastax.driver.core.Host; import com.datastax.driver.core.LatencyTracker; import com.datastax.driver.core.NettyOptions; import com.datastax.driver.core.PoolingOptions; import com.datastax.driver.core.ProtocolOptions.Compression; import com.datastax.driver.core.ProtocolVersion; import com.datastax.driver.core.QueryOptions; import com.datastax.driver.core.SSLOptions; import com.datastax.driver.core.Session; import com.datastax.driver.core.SocketOptions; import com.datastax.driver.core.TimestampGenerator; import com.datastax.driver.core.policies.AddressTranslator; import com.datastax.driver.core.policies.LoadBalancingPolicy; import com.datastax.driver.core.policies.ReconnectionPolicy; import com.datastax.driver.core.policies.RetryPolicy; import com.datastax.driver.core.policies.SpeculativeExecutionPolicy; import org.springframework.beans.factory.BeanNameAware; import org.springframework.beans.factory.DisposableBean; import org.springframework.beans.factory.FactoryBean; import org.springframework.beans.factory.InitializingBean; import org.springframework.dao.DataAccessException; import org.springframework.dao.support.PersistenceExceptionTranslator; import org.springframework.data.cassandra.config.ClusterBuilderConfigurer; import org.springframework.data.cassandra.config.CompressionType; import org.springframework.data.cassandra.config.KeyspaceActions; import org.springframework.data.cassandra.core.cql.CassandraExceptionTranslator; import org.springframework.data.cassandra.core.cql.CqlTemplate; import org.springframework.data.cassandra.core.cql.generator.AlterKeyspaceCqlGenerator; import org.springframework.data.cassandra.core.cql.generator.CreateKeyspaceCqlGenerator; import org.springframework.data.cassandra.core.cql.generator.DropKeyspaceCqlGenerator; import org.springframework.data.cassandra.core.cql.keyspace.AlterKeyspaceSpecification; import org.springframework.data.cassandra.core.cql.keyspace.CreateKeyspaceSpecification; import org.springframework.data.cassandra.core.cql.keyspace.DropKeyspaceSpecification; import org.springframework.data.cassandra.core.cql.keyspace.KeyspaceActionSpecification; import org.springframework.lang.Nullable; import org.springframework.stereotype.Component; import org.springframework.util.Assert; import org.springframework.util.ClassUtils; import org.springframework.util.CollectionUtils; import org.springframework.util.StringUtils; /** * {@link org.springframework.beans.factory.FactoryBean} for configuring a Cassandra * {@link Cluster}. * <p> * This factory bean allows configuration and creation of {@link Cluster} bean. Most options default * to {@literal null}. Unsupported options are configured via {@link ClusterBuilderConfigurer}. * <p/> * The factory bean initializes keyspaces, if configured, accoording to its lifecycle. Keyspaces can * be created after {@link #afterPropertiesSet() initialization} and dropped when this factory is * {@link #destroy() destroyed}. Keyspace actions can be configured via * {@link #setKeyspaceActions(List) XML} and {@link #setKeyspaceCreations(List) programatically}. * Additional {@link #getStartupScripts()} and {@link #getShutdownScripts()} are executed after * running keyspace actions. * <p/> * <strong>XML configuration</strong> * * <pre class="code"> <cql:cluster contact-points="…" port="${build.cassandra.native_transport_port}" compression= "SNAPPY" netty-options-ref="nettyOptions"> <cql:local-pooling-options min-simultaneous-requests="26" max-simultaneous-requests= "101" core-connections="3" max-connections="9"/> <cql:remote-pooling-options min-simultaneous-requests="25" max-simultaneous-requests="100", core-connections="1" max-connections="2"/> <cql:socket-options connect-timeout-millis="5000" keep-alive="true" reuse-address= "true" so-linger="60" tcp-no-delay="true" receive-buffer-size="65536" send-buffer-size="65536"/> <cql:keyspace name="${cassandra.keyspace}" action="CREATE_DROP" durable-writes="true"/> </cass:cluster> * </pre> * * @author kumargautam * @see org.springframework.beans.factory.InitializingBean * @see org.springframework.beans.factory.DisposableBean * @see org.springframework.beans.factory.FactoryBean * @see com.datastax.driver.core.Cluster */ @Component public class CassandraClusterFactoryBean implements FactoryBean<Cluster>, InitializingBean, DisposableBean, BeanNameAware, PersistenceExceptionTranslator { public static final boolean DEFAULT_JMX_REPORTING_ENABLED = true; public static final boolean DEFAULT_METRICS_ENABLED = true; public static final boolean DEFAULT_SSL_ENABLED = false; public static final int DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS = 10; public static final int DEFAULT_PORT = 9042; public static final String DEFAULT_CONTACT_POINTS = "localhost"; private boolean jmxReportingEnabled = DEFAULT_JMX_REPORTING_ENABLED; private boolean metricsEnabled = DEFAULT_METRICS_ENABLED; private boolean sslEnabled = DEFAULT_SSL_ENABLED; private int maxSchemaAgreementWaitSeconds = DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS; private String port = String.valueOf(DEFAULT_PORT); private final PersistenceExceptionTranslator exceptionTranslator = new CassandraExceptionTranslator(); private @Nullable Cluster cluster; private @Nullable ClusterBuilderConfigurer clusterBuilderConfigurer; private @Nullable AddressTranslator addressTranslator; private @Nullable AuthProvider authProvider; private @Nullable CompressionType compressionType; private @Nullable Host.StateListener hostStateListener; private @Nullable LatencyTracker latencyTracker; private List<CreateKeyspaceSpecification> keyspaceCreations = new ArrayList<>(); private List<AlterKeyspaceSpecification> keyspaceAlterations = new ArrayList<>(); private List<DropKeyspaceSpecification> keyspaceDrops = new ArrayList<>(); private Set<KeyspaceActionSpecification> keyspaceSpecifications = new HashSet<>(); private List<KeyspaceActions> keyspaceActions = new ArrayList<>(); private List<String> startupScripts = new ArrayList<>(); private List<String> shutdownScripts = new ArrayList<>(); private @Nullable LoadBalancingPolicy loadBalancingPolicy; private NettyOptions nettyOptions = NettyOptions.DEFAULT_INSTANCE; private @Nullable PoolingOptions poolingOptions; private @Nullable ProtocolVersion protocolVersion; private @Nullable QueryOptions queryOptions; private @Nullable ReconnectionPolicy reconnectionPolicy; private @Nullable RetryPolicy retryPolicy; private @Nullable SpeculativeExecutionPolicy speculativeExecutionPolicy; private @Nullable SocketOptions socketOptions; private @Nullable SSLOptions sslOptions; private @Nullable TimestampGenerator timestampGenerator; private @Nullable String beanName; private @Nullable String clusterName; private String contactPoints = DEFAULT_CONTACT_POINTS; private @Nullable String password; private @Nullable String username; /* * (non-Javadoc) * @see org.springframework.beans.factory.InitializingBean#afterPropertiesSet() */ @Override public void afterPropertiesSet() throws Exception { Assert.hasText(contactPoints, "At least one server is required"); Cluster.Builder clusterBuilder = newClusterBuilder(); clusterBuilder.addContactPointsWithPorts(formCassandraUrl()); clusterBuilder.withMaxSchemaAgreementWaitSeconds(maxSchemaAgreementWaitSeconds); Optional.ofNullable(compressionType).map(CassandraClusterFactoryBean::convertCompressionType) .ifPresent(clusterBuilder::withCompression); Optional.ofNullable(addressTranslator).ifPresent(clusterBuilder::withAddressTranslator); Optional.ofNullable(loadBalancingPolicy).ifPresent(clusterBuilder::withLoadBalancingPolicy); clusterBuilder.withNettyOptions(nettyOptions); Optional.ofNullable(poolingOptions).ifPresent(clusterBuilder::withPoolingOptions); Optional.ofNullable(protocolVersion).ifPresent(clusterBuilder::withProtocolVersion); Optional.ofNullable(queryOptions).ifPresent(clusterBuilder::withQueryOptions); Optional.ofNullable(reconnectionPolicy).ifPresent(clusterBuilder::withReconnectionPolicy); Optional.ofNullable(retryPolicy).ifPresent(clusterBuilder::withRetryPolicy); Optional.ofNullable(socketOptions).ifPresent(clusterBuilder::withSocketOptions); Optional.ofNullable(speculativeExecutionPolicy).ifPresent(clusterBuilder::withSpeculativeExecutionPolicy); Optional.ofNullable(timestampGenerator).ifPresent(clusterBuilder::withTimestampGenerator); if (authProvider != null) { clusterBuilder.withAuthProvider(authProvider); } else if (StringUtils.hasText(username) && StringUtils.hasText(password)) { clusterBuilder.withCredentials(username, password); } if (!jmxReportingEnabled) { clusterBuilder.withoutJMXReporting(); } if (!metricsEnabled) { clusterBuilder.withoutMetrics(); } if (sslEnabled) { if (sslOptions != null) { clusterBuilder.withSSL(sslOptions); } else { clusterBuilder.withSSL(); } } Optional.ofNullable(resolveClusterName()).filter(StringUtils::hasText) .ifPresent(clusterBuilder::withClusterName); if (clusterBuilderConfigurer != null) { clusterBuilderConfigurer.configure(clusterBuilder); } cluster = clusterBuilder.build(); Optional.ofNullable(hostStateListener).ifPresent(cluster::register); Optional.ofNullable(latencyTracker).ifPresent(cluster::register); generateSpecificationsFromFactoryBeans(); List<KeyspaceActionSpecification> startup = new ArrayList<>(keyspaceCreations.size() + keyspaceAlterations.size()); startup.addAll(keyspaceCreations); startup.addAll(keyspaceAlterations); executeSpecsAndScripts(startup, startupScripts, cluster); } /* * (non-Javadoc) * @see com.datastax.driver.core.Cluster#builder() */ Cluster.Builder newClusterBuilder() { return Cluster.builder(); } /* (non-Javadoc) */ @Nullable private String resolveClusterName() { return StringUtils.hasText(clusterName) ? clusterName : beanName; } /** * default url code: * clusterBuilder.addContactPoints(StringUtils.commaDelimitedListToStringArray(contactPoints)).withPort(port); * * @return */ private List<InetSocketAddress> formCassandraUrl() { List<InetSocketAddress> serverAddressList = new ArrayList<>(); String[] hosts = contactPoints.split(","); String[] ports = port.split(","); for (int i = 0; i < hosts.length; i++) { InetSocketAddress inetSocketAddress; if (ports.length > i) { inetSocketAddress = new InetSocketAddress(hosts[i], Integer.parseInt(ports[i])); } else { inetSocketAddress = new InetSocketAddress(hosts[i], DEFAULT_PORT); } serverAddressList.add(inetSocketAddress); } return serverAddressList; } /* * (non-Javadoc) * @see org.springframework.beans.factory.DisposableBean#destroy() */ @Override public void destroy() { if (cluster != null) { executeSpecsAndScripts(keyspaceDrops, shutdownScripts, cluster); cluster.close(); } } /* * (non-Javadoc) * @see org.springframework.beans.factory.FactoryBean#getObject() */ @Override public Cluster getObject() { return cluster; } /* * (non-Javadoc) * @see org.springframework.beans.factory.FactoryBean#getObjectType() */ @Override public Class<? extends Cluster> getObjectType() { return (cluster != null ? cluster.getClass() : Cluster.class); } /* * (non-Javadoc) * @see org.springframework.beans.factory.FactoryBean#isSingleton() */ @Override public boolean isSingleton() { return true; } /* * (non-Javadoc) * @see org.springframework.dao.support.PersistenceExceptionTranslator#translateExceptionIfPossible(java.lang.RuntimeException) */ @Override public DataAccessException translateExceptionIfPossible(RuntimeException ex) { return exceptionTranslator.translateExceptionIfPossible(ex); } /** * Examines the contents of all the KeyspaceSpecificationFactoryBeans and generates the proper * KeyspaceSpecification from them. */ private void generateSpecificationsFromFactoryBeans() { generateSpecifications(keyspaceSpecifications); keyspaceActions.forEach(actions -> generateSpecifications(actions.getActions())); } private void generateSpecifications(Collection<KeyspaceActionSpecification> specifications) { specifications.forEach(keyspaceActionSpecification -> { if (keyspaceActionSpecification instanceof CreateKeyspaceSpecification) { keyspaceCreations.add((CreateKeyspaceSpecification) keyspaceActionSpecification); } if (keyspaceActionSpecification instanceof DropKeyspaceSpecification) { keyspaceDrops.add((DropKeyspaceSpecification) keyspaceActionSpecification); } if (keyspaceActionSpecification instanceof AlterKeyspaceSpecification) { keyspaceAlterations.add((AlterKeyspaceSpecification) keyspaceActionSpecification); } }); } private void executeSpecsAndScripts(List<? extends KeyspaceActionSpecification> keyspaceActionSpecifications, List<String> scripts, Cluster cluster) { if (!CollectionUtils.isEmpty(keyspaceActionSpecifications) || !CollectionUtils.isEmpty(scripts)) { Session session = cluster.connect(); try { CqlTemplate template = new CqlTemplate(session); keyspaceActionSpecifications .forEach(keyspaceActionSpecification -> template.execute(toCql(keyspaceActionSpecification))); scripts.forEach(template::execute); } finally { if (session != null) { session.close(); } } } } private String toCql(KeyspaceActionSpecification specification) { if (specification instanceof CreateKeyspaceSpecification) { return new CreateKeyspaceCqlGenerator((CreateKeyspaceSpecification) specification).toCql(); } if (specification instanceof DropKeyspaceSpecification) { return new DropKeyspaceCqlGenerator((DropKeyspaceSpecification) specification).toCql(); } if (specification instanceof AlterKeyspaceSpecification) { return new AlterKeyspaceCqlGenerator((AlterKeyspaceSpecification) specification).toCql(); } throw new IllegalArgumentException( "Unsupported specification type: " + ClassUtils.getQualifiedName(specification.getClass())); } /* * (non-Javadoc) * @see org.springframework.beans.factory.BeanNameAware#setBeanName(String) * @since 1.5 */ @Override public void setBeanName(String beanName) { this.beanName = beanName; } /** * Set a comma-delimited string of the contact points (hosts) to connect to. Default is * {@code localhost}; see {@link #DEFAULT_CONTACT_POINTS}. * * @param contactPoints * the contact points used by the new cluster. */ public void setContactPoints(String contactPoints) { this.contactPoints = contactPoints; } /** * Set the port for the contact points. Default is {@code 9042}, see {@link #DEFAULT_PORT}. * * @param port * the port used by the new cluster. */ public void setPort(String port) { this.port = port; } /** * Set the {@link CompressionType}. Default is uncompressed. * * @param compressionType * the {@link CompressionType} used by the new cluster. */ public void setCompressionType(@Nullable CompressionType compressionType) { this.compressionType = compressionType; } /** * Set the {@link PoolingOptions} to configure the connection pooling behavior. * * @param poolingOptions * the {@link PoolingOptions} used by the new cluster. */ public void setPoolingOptions(@Nullable PoolingOptions poolingOptions) { this.poolingOptions = poolingOptions; } /** * Set the {@link ProtocolVersion}. * * @param protocolVersion * the {@link ProtocolVersion} used by the new cluster. * @since 1.4 */ public void setProtocolVersion(@Nullable ProtocolVersion protocolVersion) { this.protocolVersion = protocolVersion; } /** * Set the {@link SocketOptions} containing low-level socket options. * * @param socketOptions * the {@link SocketOptions} used by the new cluster. */ public void setSocketOptions(@Nullable SocketOptions socketOptions) { this.socketOptions = socketOptions; } /** * Set the {@link QueryOptions} to tune to defaults for individual queries. * * @param queryOptions * the {@link QueryOptions} used by the new cluster. */ public void setQueryOptions(@Nullable QueryOptions queryOptions) { this.queryOptions = queryOptions; } /** * Set the {@link AuthProvider}. Default is unauthenticated. * * @param authProvider * the {@link AuthProvider} used by the new cluster. */ public void setAuthProvider(@Nullable AuthProvider authProvider) { this.authProvider = authProvider; } /** * Set the {@link NettyOptions} used by a client to customize the driver's underlying Netty * layer. * * @param nettyOptions * the {@link NettyOptions} used by the new cluster. * @since 1.5 */ public void setNettyOptions(NettyOptions nettyOptions) { this.nettyOptions = nettyOptions; } /** * Set the {@link LoadBalancingPolicy} that decides which Cassandra hosts to contact for each * new query. * * @param loadBalancingPolicy * the {@link LoadBalancingPolicy} used by the new cluster. */ public void setLoadBalancingPolicy(@Nullable LoadBalancingPolicy loadBalancingPolicy) { this.loadBalancingPolicy = loadBalancingPolicy; } /** * Set the {@link ReconnectionPolicy} that decides how often the reconnection to a dead node is * attempted. * * @param reconnectionPolicy * the {@link ReconnectionPolicy} used by the new cluster. */ public void setReconnectionPolicy(@Nullable ReconnectionPolicy reconnectionPolicy) { this.reconnectionPolicy = reconnectionPolicy; } /** * Set the {@link RetryPolicy} that defines a default behavior to adopt when a request fails. * * @param retryPolicy * the {@link RetryPolicy} used by the new cluster. */ public void setRetryPolicy(@Nullable RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; } /** * Set whether metrics are enabled. Default is {@literal true}, see * {@link #DEFAULT_METRICS_ENABLED}. */ public void setMetricsEnabled(boolean metricsEnabled) { this.metricsEnabled = metricsEnabled; } /** * @return the {@link List} of {@link KeyspaceActions}. */ public List<KeyspaceActions> getKeyspaceActions() { return Collections.unmodifiableList(keyspaceActions); } /** * Set a {@link List} of {@link KeyspaceActions} to be executed on initialization. Keyspace * actions may contain create and drop specifications. * * @param keyspaceActions * the {@link List} of {@link KeyspaceActions}. */ public void setKeyspaceActions(List<KeyspaceActions> keyspaceActions) { this.keyspaceActions = new ArrayList<>(keyspaceActions); } /** * Set a {@link List} of {@link CreateKeyspaceSpecification create keyspace specifications} that * are executed when this factory is {@link #afterPropertiesSet() initialized}. * {@link CreateKeyspaceSpecification Create keyspace specifications} are executed on a system * session with no keyspace set, before executing {@link #setStartupScripts(List)}. * * @param specifications * the {@link List} of {@link CreateKeyspaceSpecification create keyspace * specifications}. */ public void setKeyspaceCreations(List<CreateKeyspaceSpecification> specifications) { this.keyspaceCreations = new ArrayList<>(specifications); } /** * @return {@link List} of {@link CreateKeyspaceSpecification create keyspace specifications}. */ public List<CreateKeyspaceSpecification> getKeyspaceCreations() { return Collections.unmodifiableList(keyspaceCreations); } /** * Set a {@link List} of {@link DropKeyspaceSpecification drop keyspace specifications} that are * executed when this factory is {@link #destroy() destroyed}. {@link DropKeyspaceSpecification * Drop keyspace specifications} are executed on a system session with no keyspace set, before * executing {@link #setShutdownScripts(List)}. * * @param specifications * the {@link List} of {@link DropKeyspaceSpecification drop keyspace * specifications}. */ public void setKeyspaceDrops(List<DropKeyspaceSpecification> specifications) { this.keyspaceDrops = new ArrayList<>(specifications); } /** * @return the {@link List} of {@link DropKeyspaceSpecification drop keyspace specifications}. */ public List<DropKeyspaceSpecification> getKeyspaceDrops() { return Collections.unmodifiableList(keyspaceDrops); } /** * Set a {@link List} of raw {@link String CQL statements} that are executed when this factory * is {@link #afterPropertiesSet() initialized}. Scripts are executed on a system session with * no keyspace set, after executing {@link #setKeyspaceCreations(List)}. * * @param scripts * the scripts to execute on startup */ public void setStartupScripts(List<String> scripts) { this.startupScripts = new ArrayList<>(scripts); } /** * @return the startup scripts */ public List<String> getStartupScripts() { return Collections.unmodifiableList(startupScripts); } /** * Set a {@link List} of raw {@link String CQL statements} that are executed when this factory * is {@link #destroy() destroyed}. {@link DropKeyspaceSpecification Drop keyspace * specifications} are executed on a system session with no keyspace set, after executing * {@link #setKeyspaceDrops(List)}. * * @param scripts * the scripts to execute on shutdown */ public void setShutdownScripts(List<String> scripts) { this.shutdownScripts = new ArrayList<>(scripts); } /** * @return the shutdown scripts */ public List<String> getShutdownScripts() { return Collections.unmodifiableList(shutdownScripts); } /** * @param keyspaceSpecifications * The {@link KeyspaceActionSpecification} to set. */ public void setKeyspaceSpecifications(Set<KeyspaceActionSpecification> keyspaceSpecifications) { this.keyspaceSpecifications = new LinkedHashSet<>(keyspaceSpecifications); } /** * @return the {@link KeyspaceActionSpecification} associated with this factory. */ public Set<KeyspaceActionSpecification> getKeyspaceSpecifications() { return Collections.unmodifiableSet(keyspaceSpecifications); } /** * Set the username to use with {@link com.datastax.driver.core.PlainTextAuthProvider}. * * @param username * The username to set. */ public void setUsername(String username) { this.username = username; } /** * Set the username to use with {@link com.datastax.driver.core.PlainTextAuthProvider}. * * @param password * The password to set. */ public void setPassword(String password) { this.password = password; } /** * Set whether to use JMX reporting. Default is {@literal false}, see * {@link #DEFAULT_JMX_REPORTING_ENABLED}. * * @param jmxReportingEnabled * The jmxReportingEnabled to set. */ public void setJmxReportingEnabled(boolean jmxReportingEnabled) { this.jmxReportingEnabled = jmxReportingEnabled; } /** * Set whether to use SSL. Default is plain, see {@link #DEFAULT_SSL_ENABLED}. * * @param sslEnabled * The sslEnabled to set. */ public void setSslEnabled(boolean sslEnabled) { this.sslEnabled = sslEnabled; } /** * @param sslOptions * The sslOptions to set. */ public void setSslOptions(SSLOptions sslOptions) { this.sslOptions = sslOptions; } /** * @param hostStateListener * The hostStateListener to set. */ public void setHostStateListener(Host.StateListener hostStateListener) { this.hostStateListener = hostStateListener; } /** * @param latencyTracker * The latencyTracker to set. */ public void setLatencyTracker(LatencyTracker latencyTracker) { this.latencyTracker = latencyTracker; } /** * Configures the address translator used by the new cluster to translate IP addresses received * from Cassandra nodes into locally query-able addresses. * * @param addressTranslator * {@link AddressTranslator} used by the new cluster. * @see com.datastax.driver.core.Cluster.Builder#withAddressTranslator(AddressTranslator) * @see com.datastax.driver.core.policies.AddressTranslator * @since 1.5 */ public void setAddressTranslator(@Nullable AddressTranslator addressTranslator) { this.addressTranslator = addressTranslator; } /** * Sets the {@link ClusterBuilderConfigurer} used to apply additional configuration logic to the * {@link com.datastax.driver.core.Cluster.Builder}. {@link ClusterBuilderConfigurer} is invoked * after all provided options are configured. The factory will {@link Builder#build()} the * {@link Cluster} after applying {@link ClusterBuilderConfigurer}. * * @param clusterBuilderConfigurer * {@link ClusterBuilderConfigurer} used to configure the * {@link com.datastax.driver.core.Cluster.Builder}. * @see org.springframework.data.cql.config.ClusterBuilderConfigurer */ public void setClusterBuilderConfigurer(@Nullable ClusterBuilderConfigurer clusterBuilderConfigurer) { this.clusterBuilderConfigurer = clusterBuilderConfigurer; } /** * An optional name for the cluster instance. This name appears in JMX metrics. Defaults to the * bean name. * * @param clusterName * optional name for the cluster. * @see com.datastax.driver.core.Cluster.Builder#withClusterName(String) * @since 1.5 */ public void setClusterName(@Nullable String clusterName) { this.clusterName = clusterName; } /** * Sets the maximum time to wait for schema agreement before returning from a DDL query. The * timeout is used to wait for all currently up hosts in the cluster to agree on the schema. * * @param seconds * max schema agreement wait in seconds. * @see com.datastax.driver.core.Cluster.Builder#withMaxSchemaAgreementWaitSeconds(int) * @since 1.5 */ public void setMaxSchemaAgreementWaitSeconds(int seconds) { this.maxSchemaAgreementWaitSeconds = seconds; } /** * Configures the speculative execution policy to use for the new cluster. * * @param speculativeExecutionPolicy * {@link SpeculativeExecutionPolicy} to use with the new cluster. * @see com.datastax.driver.core.Cluster.Builder#withSpeculativeExecutionPolicy(SpeculativeExecutionPolicy) * @see com.datastax.driver.core.policies.SpeculativeExecutionPolicy * @since 1.5 */ public void setSpeculativeExecutionPolicy(@Nullable SpeculativeExecutionPolicy speculativeExecutionPolicy) { this.speculativeExecutionPolicy = speculativeExecutionPolicy; } /** * Configures the generator that will produce the client-side timestamp sent with each query. * * @param timestampGenerator * {@link TimestampGenerator} used to produce a client-side timestamp sent with each * query. * @see com.datastax.driver.core.Cluster.Builder#withTimestampGenerator(TimestampGenerator) * @see com.datastax.driver.core.TimestampGenerator * @since 1.5 */ public void setTimestampGenerator(@Nullable TimestampGenerator timestampGenerator) { this.timestampGenerator = timestampGenerator; } private static Compression convertCompressionType(CompressionType type) { switch (type) { case NONE: return Compression.NONE; case SNAPPY: return Compression.SNAPPY; case LZ4: return Compression.LZ4; default: throw new IllegalArgumentException(String.format("Unknown compression type [%s]", type)); } } }
vmware/mangle
mangle-services/src/main/java/com/vmware/mangle/services/cassandra/CassandraClusterFactoryBean.java
213,901
/* * Copyright 2016 Needham Software LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jesterj.ingest.persistence; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.context.DriverContext; import com.datastax.oss.driver.api.core.cql.PreparedStatement; import com.datastax.oss.driver.api.core.metadata.Metadata; import com.datastax.oss.driver.api.core.metrics.Metrics; import com.datastax.oss.driver.api.core.session.Request; import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.api.core.type.reflect.GenericType; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.util.Map; import java.util.Optional; import java.util.SplittableRandom; import java.util.concurrent.Callable; import java.util.concurrent.CompletionStage; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; /** * A class to globalize the cluster and session objects, while providing query caches on a per-instance basis. * These objects are lightweight and can be */ public class CassandraSupport { public static final SplittableRandom rootRand = new SplittableRandom(); public static final ThreadLocal<SplittableRandom> antiCollision = ThreadLocal.withInitial(rootRand::split); private static final Map<String, Future<PreparedStatement>> preparedQueries = new ConcurrentHashMap<>(); public static NonClosableSession NON_CLOSABLE_SESSION; /** * Add a query to the list of prepared queries maintained by this instance. Queries may be added before * Cassandra is booted, but will not become available until after the Cassandra boot cycle completes. * Attempts to add a query with the same name more than once will be ignored. Queries are cached globally. * * @param name A name with which to retrieve the prepared statement instance * @param statement A string to be prepared as a CQL statement. * @return a future that will complete after cassandra has completed its boot cycle and the statement has been prepared. */ public Future<PreparedStatement> addStatement(String name, String statement) { synchronized (preparedQueries) { if (!preparedQueries.containsKey(name)) { Future<PreparedStatement> result = Cassandra.whenBooted(() -> getSession().prepare(statement)); preparedQueries.put(name, result); return result; } } return null; } /** * Returns a cassandra session wrapped to protect it from being closed. * * @return a <code>NonClosableSession</code> object. */ public CqlSession getSession() { if (NON_CLOSABLE_SESSION == null && Cassandra.getListenAddress() != null) { NON_CLOSABLE_SESSION = new NonClosableSession(); } if (NON_CLOSABLE_SESSION == null){ System.out.println("WARNING: returning null session!!"); } return NON_CLOSABLE_SESSION; } /** * Retrieve a prepared statement added via {@link #addStatement(String, String)}. This method will block until * cassandra has finished booting, a session has been created and the statement has been prepared. This method * may return null if no statement with that name has been prepared previously. * * @param qName the name of the statement to retrieve * @return the prepared statement ready for use. */ public PreparedStatement getPreparedQuery(String qName) { try { Future<PreparedStatement> preparedStatementFuture = preparedQueries.get(qName); return preparedStatementFuture.get(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } } /** * Retrieve a prepared statement added via {@link #addStatement(String, String)}. This method will block until * cassandra has finished booting, a session has been created and the statement has been prepared. If the statement q * was not previously prepared, this method will first prepare it and then return the prepared statement. * * @param qName the name of the statement to retrieve or prepare if not already prepared * @param q the query to prepare (if not already prepared). * @return the prepared statement ready for use. */ public PreparedStatement getPreparedQuery(String qName, String q) { try { Future<PreparedStatement> preparedStatementFuture = preparedQueries.get(qName); if (preparedStatementFuture != null) { return preparedStatementFuture.get(); } else { addStatement(qName,q); return preparedQueries.get(qName).get(); } } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } } public Future<Object> whenBooted(Callable<Object> makeTables) { return Cassandra.whenBooted(makeTables); } private static class SessionHolder { private static final Session INSTANCE; static { Session instance = null; try { instance = CqlSession.builder() .addContactPoint(Cassandra.getSocketAddress()) .withLocalDatacenter("datacenter1") .withAuthCredentials("cassandra", JJCassandraDaemon.getPwDefault()) .build(); } catch (Throwable e) { e.printStackTrace(); } finally { INSTANCE = instance; } } } public static class NonClosableSession implements CqlSession { private final Session sessionRef = SessionHolder.INSTANCE; @Override public CompletionStage<Void> closeAsync() { throw new UnsupportedOperationException("Do not close the sessions handed out from CassandraSupport"); } @NonNull @Override public CompletionStage<Void> forceCloseAsync() { throw new UnsupportedOperationException("Do not close the sessions handed out from CassandraSupport"); } @Override public void close() { throw new UnsupportedOperationException("Do not close the sessions handed out from CassandraSupport"); } @NonNull @Override public CompletionStage<Void> closeFuture() { throw new UnsupportedOperationException("Do not close the sessions handed out from CassandraSupport"); } @Override public boolean isClosed() { return sessionRef.isClosed(); } // Only to be called when shutting down cassandra entirely. // This would only ever be done on JVM shutdown. public void deactivate() { System.out.println("CLOSING CASSANDRA:"); Thread.dumpStack(); sessionRef.close(); } @NonNull @Override public String getName() { return sessionRef.getName(); } @NonNull @Override public Metadata getMetadata() { return sessionRef.getMetadata(); } @Override public boolean isSchemaMetadataEnabled() { return sessionRef.isSchemaMetadataEnabled(); } @NonNull @Override public CompletionStage<Metadata> setSchemaMetadataEnabled(@Nullable Boolean newValue) { return sessionRef.setSchemaMetadataEnabled(newValue); } @NonNull @Override public CompletionStage<Metadata> refreshSchemaAsync() { return sessionRef.refreshSchemaAsync(); } @NonNull @Override public CompletionStage<Boolean> checkSchemaAgreementAsync() { return sessionRef.checkSchemaAgreementAsync(); } @NonNull @Override public DriverContext getContext() { return sessionRef.getContext(); } @NonNull @Override public Optional<CqlIdentifier> getKeyspace() { return sessionRef.getKeyspace(); } @NonNull @Override public Optional<Metrics> getMetrics() { return sessionRef.getMetrics(); } @Nullable @Override public <RequestT extends Request, ResultT> ResultT execute(@NonNull RequestT request, @NonNull GenericType<ResultT> resultType) { return sessionRef.execute(request,resultType); } } }
nsoft/jesterj
code/ingest/src/main/java/org/jesterj/ingest/persistence/CassandraSupport.java
213,902
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.datastax.oss.driver.internal.core.session; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.context.DriverContext; import com.datastax.oss.driver.api.core.metadata.Metadata; import com.datastax.oss.driver.api.core.metrics.Metrics; import com.datastax.oss.driver.api.core.session.Request; import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.api.core.type.reflect.GenericType; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.util.Optional; import java.util.concurrent.CompletionStage; import net.jcip.annotations.ThreadSafe; /** * Utility class to wrap a session. * * <p>This will typically be used to mix in a convenience interface from a 3rd-party extension: * * <pre>{@code * class ReactiveSessionWrapper extends SessionWrapper implements ReactiveSession { * public ReactiveSessionWrapper(Session delegate) { * super(delegate); * } * } * }</pre> */ @ThreadSafe public class SessionWrapper implements Session { private final Session delegate; public SessionWrapper(@NonNull Session delegate) { this.delegate = delegate; } @NonNull public Session getDelegate() { return delegate; } @NonNull @Override public String getName() { return delegate.getName(); } @NonNull @Override public Metadata getMetadata() { return delegate.getMetadata(); } @Override public boolean isSchemaMetadataEnabled() { return delegate.isSchemaMetadataEnabled(); } @NonNull @Override public CompletionStage<Metadata> setSchemaMetadataEnabled(@Nullable Boolean newValue) { return delegate.setSchemaMetadataEnabled(newValue); } @NonNull @Override public CompletionStage<Metadata> refreshSchemaAsync() { return delegate.refreshSchemaAsync(); } @NonNull @Override public CompletionStage<Boolean> checkSchemaAgreementAsync() { return delegate.checkSchemaAgreementAsync(); } @NonNull @Override public DriverContext getContext() { return delegate.getContext(); } @NonNull @Override public Optional<CqlIdentifier> getKeyspace() { return delegate.getKeyspace(); } @NonNull @Override public Optional<Metrics> getMetrics() { return delegate.getMetrics(); } @Nullable @Override public <RequestT extends Request, ResultT> ResultT execute( @NonNull RequestT request, @NonNull GenericType<ResultT> resultType) { return delegate.execute(request, resultType); } @NonNull @Override public CompletionStage<Void> closeFuture() { return delegate.closeFuture(); } @NonNull @Override public CompletionStage<Void> closeAsync() { return delegate.closeAsync(); } @NonNull @Override public CompletionStage<Void> forceCloseAsync() { return delegate.forceCloseAsync(); } }
apache/cassandra-java-driver
core/src/main/java/com/datastax/oss/driver/internal/core/session/SessionWrapper.java
213,903
/** * Copyright (c) 2008-2023, MOVES Institute, Naval Postgraduate School (NPS). All rights reserved. * This work is provided under a BSD-style open-source license, see project * <a href="https://savage.nps.edu/opendis7-java/license.html" target="_blank">license.html</a> and <a href="https://savage.nps.edu/opendis7-java/license.txt" target="_blank">license.txt</a> */ // header autogenerated using string template dis7javalicense.txt // autogenerated using string template entitytypecommon.txt package edu.nps.moves.dis7.entities.rus.platform.land; import edu.nps.moves.dis7.pdus.*; import edu.nps.moves.dis7.enumerations.*; /** * <p> Entity class <b><code>KS1285mmAAGun</code></b> collects multiple enumeration values together to uniquely define this entity. </p> * <p> <i>Usage:</i> create an instance of this class with <code>KS1285mmAAGun.createInstance()</code> or <code>new KS1285mmAAGun()</code>. </p> * <ul> * <li> Country: Russia (RUS) = <code>222</code>; </li> * <li> Entity kind: PlatformDomain = <code>LAND</code>; </li> * <li> Domain: Platform = <code>1</code>; </li> * <li> Category: Air Defense / Missile Defense Unit Equipment = <code>28</code>; </li> * <li> SubCategory: TowedVSHORADGunMissileSystem = <code>13</code>; </li> * <li> Specific: KS1285mmAAGun = <code>3</code>; </li> * <li> Entity type uid: 13130; </li> * <li> Online document reference: <a href="https://gitlab.nps.edu/Savage/NetworkedGraphicsMV3500/-/blob/master/specifications/README.md" target="_blank">SISO-REF-010-v33-DRAFT-20231217-d10 (2023-12-17)</a>. </li> * </ul> * <p> Full name: edu.nps.moves.dis7.source.generator.entityTypes.GenerateEntityTypes$SpecificElem@79d9214d. </p> * @see Country#RUSSIA_RUS * @see EntityKind#PLATFORM * @see Domain * @see PlatformDomain * @see Category * @see AirDefenseMissileDefenseUnitEquipment * @see SubCategory */ public final class KS1285mmAAGun extends EntityType { /** Default constructor */ public KS1285mmAAGun() { setCountry(Country.RUSSIA_RUS); setEntityKind(EntityKind.PLATFORM); setDomain(Domain.inst(PlatformDomain.LAND)); setCategory((byte)28); // uid 13034, Air Defense / Missile Defense Unit Equipment setSubCategory((byte)13); // uid 13127, Towed VSHORAD Gun/Missile System setSpecific((byte)3); // uid 13130, KS-12 85mm AA Gun } /** Create a new instance of this final (unmodifiable) class * @return copy of class for use as data */ public static KS1285mmAAGun createInstance() { return new KS1285mmAAGun(); } }
open-dis/opendis7-java
src-generated/edu/nps/moves/dis7/entities/rus/platform/land/KS1285mmAAGun.java
213,904
package jasonteam; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.InetSocketAddress; import java.net.Socket; import java.net.SocketException; import java.util.logging.Level; import java.util.logging.Logger; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import javax.xml.transform.TransformerConfigurationException; import javax.xml.transform.TransformerException; import javax.xml.transform.TransformerFactory; import javax.xml.transform.dom.DOMSource; import javax.xml.transform.stream.StreamResult; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import org.xml.sax.SAXException; /** * This class provides a very simple foundation to agents. It will only connect once (no automatic reconnection). * It will authenticate itself and wait for any messages. You can send ping using "sendPing" whenever * * @author silver */ public abstract class ClimaAgent { private Logger logger = Logger.getLogger(ClimaAgent.class.getName()); private class SocketClosedException extends Exception {} private int networkport; private String networkhost; private InetSocketAddress socketaddress; private Socket socket; private InputStream inputstream; private OutputStream outputstream; private String username; private String password; private DocumentBuilderFactory documentbuilderfactory; private TransformerFactory transformerfactory; public ClimaAgent() { networkhost = "localhost"; networkport = 0; //socket = new Socket(); documentbuilderfactory=DocumentBuilderFactory.newInstance(); transformerfactory = TransformerFactory.newInstance(); } public String getHost() { return networkhost; } public void setHost(String host) { this.networkhost = host; } public int getPort() { return networkport; } public void setPort(int port) { this.networkport=port; } public String getUsername() { return username; } public void setUsername(String username) { this.username=username; } public String getPassword() { return username; } public void setPassword(String password) { this.password=password; } public void start() { new Thread() { public void run() {agentThread();} }.start(); } public void sendAuthentication(String username, String password) throws IOException{ try { Document doc = documentbuilderfactory.newDocumentBuilder().newDocument(); Element root = doc.createElement("message"); root.setAttribute("type","auth-request"); doc.appendChild(root); Element auth = doc.createElement("authentication"); auth.setAttribute("username",username); auth.setAttribute("password",password); root.appendChild(auth); transformerfactory.newTransformer().transform(new DOMSource(doc),new StreamResult(outputstream)); outputstream.write(0); } catch (ParserConfigurationException e) { logger.log(Level.SEVERE, "unable to create new document for authentication.", e); } catch (TransformerConfigurationException e) { logger.log(Level.SEVERE,"unable to configure transformer", e); } catch (TransformerException e) { logger.log(Level.SEVERE,"unable to transform document", e); } } public boolean receiveAuthenticationResult() throws IOException { try { Document doc = receiveDocument(); Element root = doc.getDocumentElement(); if (root==null) return false; if (!root.getAttribute("type").equalsIgnoreCase("auth-response")) return false; NodeList nl = root.getChildNodes(); Element authresult = null; for (int i=0;i<nl.getLength();i++) { Node n = nl.item(i); if (n.getNodeType()==Element.ELEMENT_NODE && n.getNodeName().equalsIgnoreCase("authentication")) { authresult = (Element) n; break; } } if (!authresult.getAttribute("result").equalsIgnoreCase("ok")) return false; } catch (SAXException e) { e.printStackTrace(); return false; } catch (ParserConfigurationException e) { e.printStackTrace(); return false; } catch (SocketClosedException e) { e.printStackTrace(); return false; } return true; } public boolean doAuthentication(String username, String password) throws IOException { sendAuthentication(username, password); return receiveAuthenticationResult(); } public byte[] receivePacket() throws IOException, SocketClosedException { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); int read = inputstream.read(); while (read!=0) { if (read==-1) { throw new SocketClosedException(); } buffer.write(read); read = inputstream.read(); } return buffer.toByteArray(); } public Document receiveDocument() throws SAXException, IOException, ParserConfigurationException, SocketClosedException { byte[] raw = receivePacket(); Document doc = documentbuilderfactory.newDocumentBuilder().parse(new ByteArrayInputStream(raw)); /* try { if (logger.isLoggable(Level.FINE)) { ByteArrayOutputStream temp = new ByteArrayOutputStream(); transformerfactory.newTransformer().transform(new DOMSource(doc),new StreamResult(temp)); logger.fine("Received message:\n"+temp.toString()); } } catch (Exception e) {} */ return doc; } private boolean connect() { try { //socketaddress = new InetSocketAddress(networkhost,networkport); socket = new Socket(networkhost,networkport);//socket.connect(socketaddress); inputstream = socket.getInputStream(); outputstream = socket.getOutputStream(); if (doAuthentication(username, password)) { processLogIn(); return true; } else { logger.log(Level.SEVERE, "authentication failed"); } } catch (Exception e) { logger.log(Level.SEVERE, "Exception", e); } return false; } public void agentThread() { if (!connect()) return; while(true) { try { Document doc = receiveDocument(); Element el_root = doc.getDocumentElement(); if (el_root != null) { if (el_root.getNodeName().equals("message")) { processMessage(el_root); } else { logger.log(Level.SEVERE,"unknown document received"); } } else { logger.log(Level.SEVERE, "no document element found"); } } catch (SocketClosedException e) { logger.log(Level.SEVERE, "Socket was closed:"+e); if (!connect()) return; } catch (SocketException e) { logger.log(Level.SEVERE, "Socket exception:"+e); if (!connect()) return; } catch (Exception e) { logger.log(Level.SEVERE, "Exception", e); } } } public boolean processMessage(Element el_message) { String type = el_message.getAttribute("type"); if (type.equals("requestaction") || type.equals("request-action") || type.equals("sim-start") || type.equals("sim-end")) { long deadline = 0; long currenttime = 0; try { currenttime = Long.parseLong(el_message.getAttribute("timestamp")); } catch (NumberFormatException e) { logger.log(Level.SEVERE,"number format invalid",e); return true; } //get perception Element el_perception = null; NodeList nl = el_message.getChildNodes(); String infoelementname ="perception"; if (type.equals("requestaction") || type.equals("request-action")) { infoelementname = "perception"; } else if (type.equals("sim-start")) { infoelementname = "simulation"; } else if (type.equals("sim-end")) { infoelementname = "sim-result"; } for (int i=0;i<nl.getLength();i++) { Node n = nl.item(i); if (n.getNodeType()==Element.ELEMENT_NODE && n.getNodeName().equalsIgnoreCase(infoelementname)) { if (el_perception==null) { el_perception = (Element) n; break; } } } if (type.equals("requestaction") || type.equals("request-action")) { try { deadline = Long.parseLong(el_perception.getAttribute("deadline")); } catch (NumberFormatException e) { logger.log(Level.SEVERE,"number format invalid",e); return true; } processRequestAction(el_perception, currenttime, deadline); } else if (type.equals("sim-start")) { processSimulationStart(el_perception, currenttime); } else if (type.equals("sim-end")) { processSimulationEnd(el_perception, currenttime); } } else if (type.equals("pong")) { NodeList nl = el_message.getChildNodes(); for (int i=0;i<nl.getLength();i++) { Node n = nl.item(i); if (n.getNodeType()==Element.ELEMENT_NODE && n.getNodeName().equalsIgnoreCase("payload")) { processPong(((Element)n).getAttribute("value")); return true; } } } return true; } public abstract void processSimulationStart(Element perception, long currenttime); public abstract void processRequestAction(Element perception, long currenttime, long deadline); public abstract void processSimulationEnd(Element result, long currenttime); public void processPong(String pong) { logger.info("---#-#-#-#-#-#-- processPong("+pong+") --#-#-#-#-#-#---"); } public void processLogIn() { logger.info("---#-#-#-#-#-#-- login --#-#-#-#-#-#---"); } public void sendDocument(Document doc) throws IOException { try { /* if (logger.isLoggable(Level.FINE)) { ByteArrayOutputStream temp = new ByteArrayOutputStream(); transformerfactory.newTransformer().transform(new DOMSource(doc),new StreamResult(temp)); logger.fine("Sending:"+temp.toString()); } */ transformerfactory.newTransformer().transform(new DOMSource(doc),new StreamResult(outputstream)); outputstream.write(0); outputstream.flush(); } catch (TransformerConfigurationException e) { logger.log(Level.SEVERE, "transformer config error" ,e); } catch (TransformerException e) { logger.log(Level.SEVERE,"transformer error error",e); } } public void sendPing(String ping) throws IOException { Document doc = null; try { doc = documentbuilderfactory.newDocumentBuilder().newDocument(); } catch (ParserConfigurationException e) { logger.log(Level.SEVERE,"parser config error",e); return; } Element root = doc.createElement("message"); doc.appendChild(root); root.setAttribute("type","ping"); Element payload = doc.createElement("payload"); payload.setAttribute("value",ping); root.appendChild(payload); sendDocument(doc); } }
jason-lang/apps
jason-team-2006/jasonteam/ClimaAgent.java
213,905
/* * Copyright The OpenTelemetry Authors * SPDX-License-Identifier: Apache-2.0 */ package io.opentelemetry.javaagent.instrumentation.cassandra.v4_0; import static io.opentelemetry.javaagent.instrumentation.cassandra.v4_0.CassandraSingletons.instrumenter; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.DriverException; import com.datastax.oss.driver.api.core.context.DriverContext; import com.datastax.oss.driver.api.core.cql.AsyncResultSet; import com.datastax.oss.driver.api.core.cql.BoundStatement; import com.datastax.oss.driver.api.core.cql.ExecutionInfo; import com.datastax.oss.driver.api.core.cql.PrepareRequest; import com.datastax.oss.driver.api.core.cql.PreparedStatement; import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.core.metadata.Metadata; import com.datastax.oss.driver.api.core.metrics.Metrics; import com.datastax.oss.driver.api.core.session.Request; import com.datastax.oss.driver.api.core.type.reflect.GenericType; import io.opentelemetry.context.Context; import io.opentelemetry.context.Scope; import java.util.Optional; import java.util.concurrent.CompletionStage; import org.checkerframework.checker.nullness.qual.Nullable; public class TracingCqlSession implements CqlSession { private final CqlSession session; public TracingCqlSession(CqlSession session) { this.session = session; } @Override public PreparedStatement prepare(SimpleStatement statement) { return session.prepare(statement); } @Override public PreparedStatement prepare(String query) { return session.prepare(query); } @Override public PreparedStatement prepare(PrepareRequest request) { return session.prepare(request); } @Override public CompletionStage<PreparedStatement> prepareAsync(SimpleStatement statement) { return session.prepareAsync(statement); } @Override public CompletionStage<PreparedStatement> prepareAsync(String query) { return session.prepareAsync(query); } @Override public CompletionStage<PreparedStatement> prepareAsync(PrepareRequest request) { return session.prepareAsync(request); } @Override public String getName() { return session.getName(); } @Override public Metadata getMetadata() { return session.getMetadata(); } @Override public boolean isSchemaMetadataEnabled() { return session.isSchemaMetadataEnabled(); } @Override public CompletionStage<Metadata> setSchemaMetadataEnabled(@Nullable Boolean newValue) { return session.setSchemaMetadataEnabled(newValue); } @Override public CompletionStage<Metadata> refreshSchemaAsync() { return session.refreshSchemaAsync(); } @Override public Metadata refreshSchema() { return session.refreshSchema(); } @Override public CompletionStage<Boolean> checkSchemaAgreementAsync() { return session.checkSchemaAgreementAsync(); } @Override public boolean checkSchemaAgreement() { return session.checkSchemaAgreement(); } @Override public DriverContext getContext() { return session.getContext(); } @Override public Optional<CqlIdentifier> getKeyspace() { return session.getKeyspace(); } @Override public Optional<Metrics> getMetrics() { return session.getMetrics(); } @Override public CompletionStage<Void> closeFuture() { return session.closeFuture(); } @Override public boolean isClosed() { return session.isClosed(); } @Override public CompletionStage<Void> closeAsync() { return session.closeAsync(); } @Override public CompletionStage<Void> forceCloseAsync() { return session.forceCloseAsync(); } @Override public void close() { session.close(); } @Override @Nullable public <REQUEST extends Request, RESULT> RESULT execute( REQUEST request, GenericType<RESULT> resultType) { return session.execute(request, resultType); } @Override public ResultSet execute(String query) { CassandraRequest request = CassandraRequest.create(session, query); Context context = instrumenter().start(Context.current(), request); ResultSet resultSet; try (Scope ignored = context.makeCurrent()) { resultSet = session.execute(query); } catch (RuntimeException e) { instrumenter().end(context, request, getExecutionInfo(e), e); throw e; } instrumenter().end(context, request, resultSet.getExecutionInfo(), null); return resultSet; } @Override public ResultSet execute(Statement<?> statement) { String query = getQuery(statement); CassandraRequest request = CassandraRequest.create(session, query); Context context = instrumenter().start(Context.current(), request); ResultSet resultSet; try (Scope ignored = context.makeCurrent()) { resultSet = session.execute(statement); } catch (RuntimeException e) { instrumenter().end(context, request, getExecutionInfo(e), e); throw e; } instrumenter().end(context, request, resultSet.getExecutionInfo(), null); return resultSet; } @Override public CompletionStage<AsyncResultSet> executeAsync(Statement<?> statement) { String query = getQuery(statement); CassandraRequest request = CassandraRequest.create(session, query); Context context = instrumenter().start(Context.current(), request); try (Scope ignored = context.makeCurrent()) { CompletionStage<AsyncResultSet> stage = session.executeAsync(statement); return stage.whenComplete( (asyncResultSet, throwable) -> instrumenter() .end(context, request, getExecutionInfo(asyncResultSet, throwable), throwable)); } } @Override public CompletionStage<AsyncResultSet> executeAsync(String query) { CassandraRequest request = CassandraRequest.create(session, query); Context context = instrumenter().start(Context.current(), request); try (Scope ignored = context.makeCurrent()) { CompletionStage<AsyncResultSet> stage = session.executeAsync(query); return stage.whenComplete( (asyncResultSet, throwable) -> instrumenter() .end(context, request, getExecutionInfo(asyncResultSet, throwable), throwable)); } } private static String getQuery(Statement<?> statement) { String query = null; if (statement instanceof SimpleStatement) { query = ((SimpleStatement) statement).getQuery(); } else if (statement instanceof BoundStatement) { query = ((BoundStatement) statement).getPreparedStatement().getQuery(); } return query == null ? "" : query; } private static ExecutionInfo getExecutionInfo( @Nullable AsyncResultSet asyncResultSet, @Nullable Throwable throwable) { if (asyncResultSet != null) { return asyncResultSet.getExecutionInfo(); } else { return getExecutionInfo(throwable); } } private static ExecutionInfo getExecutionInfo(@Nullable Throwable throwable) { if (throwable instanceof DriverException) { return ((DriverException) throwable).getExecutionInfo(); } else if (throwable != null && throwable.getCause() instanceof DriverException) { // TODO (trask) find out if this is needed and if so add comment explaining return ((DriverException) throwable.getCause()).getExecutionInfo(); } else { return null; } } }
XiaoMi/mone
opentelemetry-java-instrumentation/instrumentation/cassandra/cassandra-4.0/javaagent/src/main/java/io/opentelemetry/javaagent/instrumentation/cassandra/v4_0/TracingCqlSession.java
213,906
package experiments; import org.scify.jedai.datamodel.EntityProfile; import org.scify.jedai.datamodel.IdDuplicates; import java.util.*; import minhash.LocalitySensitiveHashing; import minhash.MinHash; import minhash.Pair; import minhash.Reader; import minhash.ShinglingModel; import minhash.Utilities; public class schemaAgnostic { static int ITERATIONS = 10; public static void main(String[] args) { boolean[] preprocessed = {false, false, false, true, false, true, false, false, false, false}; int[] bands = {4, 32, 16, 4, 32, 32, 16, 32, 16, 32}; int[] buckets = {64, 8, 8, 128, 16, 8, 16, 16, 16, 8}; int[] k = {2, 2, 2, 2, 2, 5, 2, 2, 2, 2}; String[] mainDirs = {"/home/gap2/Documents/blockingNN/data/schemaAgnostic/", "/home/gap2/Documents/blockingNN/data/preprocessedSA/" }; String[] datasetsD1 = {"restaurant1Profiles", "abtProfiles", "amazonProfiles", "dblpProfiles", "imdbProfilesNEW", "imdbProfilesNEW", "tmdbProfiles", "walmartProfiles", "dblpProfiles2", "imdbProfiles"}; String[] datasetsD2 = {"restaurant2Profiles", "buyProfiles", "gpProfiles", "acmProfiles", "tmdbProfiles", "tvdbProfiles", "tvdbProfiles", "amazonProfiles2", "scholarProfiles", "dbpediaProfiles"}; String[] groundtruthDirs = {"restaurantsIdDuplicates", "abtBuyIdDuplicates", "amazonGpIdDuplicates", "dblpAcmIdDuplicates", "imdbTmdbIdDuplicates", "imdbTvdbIdDuplicates", "tmdbTvdbIdDuplicates", "amazonWalmartIdDuplicates", "dblpScholarIdDuplicates", "moviesIdDuplicates"}; for (int datasetId = 0; datasetId < groundtruthDirs.length; datasetId++) { // read source entities int dirId = preprocessed[datasetId] ? 1 : 0; String sourcePath = mainDirs[dirId] + datasetsD1[datasetId]; List<EntityProfile> sourceEntities = Reader.readSerialized(sourcePath); System.out.println("Source Entities: " + sourceEntities.size()); // read target entities String targetPath = mainDirs[dirId] + datasetsD2[datasetId]; List<EntityProfile> targetEntities = Reader.readSerialized(targetPath); System.out.println("Target Entities: " + targetEntities.size()); // read ground-truth file String groundTruthPath = mainDirs[dirId] + groundtruthDirs[datasetId]; Set<IdDuplicates> gtDuplicates = Reader.readSerializedGT(groundTruthPath, sourceEntities, targetEntities); System.out.println("GT Duplicates Entities: " + gtDuplicates.size()); System.out.println(); double averageIndexingTime = 0; double averageQueryingTime = 0; double averageRecall = 0; double averagePrecision = 0; double averageCandidates = 0; for (int iteration = 0; iteration < ITERATIONS; iteration++) { long time1 = System.currentTimeMillis(); List<String> sourceSTR = Utilities.entities2String(sourceEntities); ShinglingModel model = new ShinglingModel(sourceSTR, k[datasetId]); int[][] sourceVectorsInt = model.vectorization(sourceSTR); float[][] sVectors = new float[sourceVectorsInt.length][]; for (int row = 0; row < sourceVectorsInt.length; row++) { double[] tempArray = Arrays.stream(sourceVectorsInt[row]).asDoubleStream().toArray(); sVectors[row] = new float[tempArray.length]; for (int i = 0; i < tempArray.length; i++) { sVectors[row][i] = (float) tempArray[i]; } } // initialize LSH LocalitySensitiveHashing lsh = new MinHash(sVectors, bands[datasetId], buckets[datasetId], model.getVectorSize()); long time2 = System.currentTimeMillis(); List<String> targetSTR = Utilities.entities2String(targetEntities); int[][] targetVectorsInt = model.vectorization(targetSTR); float[][] tVectors = new float[targetVectorsInt.length][]; for (int row = 0; row < targetVectorsInt.length; row++) { double[] tempArray = Arrays.stream(targetVectorsInt[row]).asDoubleStream().toArray(); tVectors[row] = new float[tempArray.length]; for (int i = 0; i < tempArray.length; i++) { tVectors[row][i] = (float) tempArray[i]; } } // for each target entity, find its candidates (query) // find TP by searching the pairs in GT final List<Pair> candidatePairs = new ArrayList<>(); for (int j = 0; j < targetEntities.size(); j++) { float[] vector = tVectors[j]; Set<Integer> candidates = lsh.query(vector); for (Integer c : candidates) { candidatePairs.add(new Pair(j, c)); } } long time3 = System.currentTimeMillis(); averageIndexingTime += time2 - time1; averageQueryingTime += time3 - time2; // true positive long tp_ = 0; // total verifications long verifications_ = 0; for (int j = 0; j < targetEntities.size(); j++) { float[] vector = tVectors[j]; Set<Integer> candidates = lsh.query(vector); for (Integer c : candidates) { IdDuplicates pair = new IdDuplicates(c, j); if (gtDuplicates.contains(pair)) { tp_ += 1; } verifications_ += 1; } } float recall_ = (float) tp_ / (float) gtDuplicates.size(); float precision_ = (float) tp_ / (float) verifications_; averageRecall += recall_; averagePrecision += precision_; averageCandidates += candidatePairs.size(); } System.out.println("Average indexing time\t:\t" + averageIndexingTime / ITERATIONS); System.out.println("Average querying time\t:\t" + averageQueryingTime / ITERATIONS); System.out.println("Recall\t:\t" + averageRecall / ITERATIONS); System.out.println("Precision\t:\t" + averagePrecision / ITERATIONS); System.out.println("Candidates\t:\t" + averageCandidates / ITERATIONS); } } }
gpapadis/ContinuousFilteringBenchmark
nnmethods/minhashLSH/src/experiments/schemaAgnostic.java
213,907
package joins; import utilities.Tokenizer; import utilities.Pair; import utilities.SimilarityFunction; import gnu.trove.iterator.TIntIterator; import gnu.trove.list.TIntList; import gnu.trove.set.TIntSet; import gnu.trove.set.hash.TIntHashSet; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; import org.scify.jedai.datamodel.EntityProfile; import org.scify.jedai.datamodel.IdDuplicates; import org.scify.jedai.datareader.entityreader.EntitySerializationReader; import org.scify.jedai.datareader.groundtruthreader.GtSerializationReader; import utilities.RepresentationModel; /** * * @author Georgios */ public class SchemaAgnosticEJoin extends AbstractJoin { public static void main(String[] args) { String[] mainDirs = {"/home/gap2/Documents/blockingNN/data/schemaAgnostic/", "/home/gap2/Documents/blockingNN/data/preprocessedSA/" }; boolean[] preprocessing = {true, true, true, false, true, false, true, true, true, false}; float[] threshold = {0.82f, 0.26f, 0.08f, 0.58f, 0.16f, 0.34f, 0.49f, 0.28f, 0.35f, 0.15f}; String[] datasetsD1 = {"restaurant1Profiles", "abtProfiles", "amazonProfiles", "dblpProfiles", "imdbProfilesNEW", "imdbProfilesNEW", "tmdbProfiles", "walmartProfiles", "dblpProfiles2", "imdbProfiles"}; String[] datasetsD2 = {"restaurant2Profiles", "buyProfiles", "gpProfiles", "acmProfiles", "tmdbProfiles", "tvdbProfiles", "tvdbProfiles", "amazonProfiles2", "scholarProfiles", "dbpediaProfiles"}; String[] groundtruthDirs = {"restaurantsIdDuplicates", "abtBuyIdDuplicates", "amazonGpIdDuplicates", "dblpAcmIdDuplicates", "imdbTmdbIdDuplicates", "imdbTvdbIdDuplicates", "tmdbTvdbIdDuplicates", "amazonWalmartIdDuplicates", "dblpScholarIdDuplicates", "moviesIdDuplicates"}; SimilarityFunction[] simFunction = {SimilarityFunction.COSINE_SIM, SimilarityFunction.COSINE_SIM, SimilarityFunction.COSINE_SIM, SimilarityFunction.JACCARD_SIM, SimilarityFunction.COSINE_SIM, SimilarityFunction.COSINE_SIM, SimilarityFunction.COSINE_SIM, SimilarityFunction.JACCARD_SIM, SimilarityFunction.JACCARD_SIM, SimilarityFunction.COSINE_SIM}; Tokenizer[] tokenizer = {Tokenizer.WHITESPACE, Tokenizer.CHARACTER_TRIGRAMS, Tokenizer.CHARACTER_FIVEGRAMS, Tokenizer.WHITESPACE, Tokenizer.CHARACTER_FIVEGRAMS_MULTISET, Tokenizer.CHARACTER_BIGRAMS, Tokenizer.WHITESPACE_MULTISET, Tokenizer.CHARACTER_TRIGRAMS_MULTISET, Tokenizer.CHARACTER_TRIGRAMS_MULTISET, Tokenizer.WHITESPACE}; for (int datasetId = 0; datasetId < groundtruthDirs.length; datasetId++) { System.out.println("\n\nCurrent dataset\t:\t" + datasetId); // read source entities int dirId = preprocessing[datasetId] ? 1 : 0; String sourcePath = mainDirs[dirId] + datasetsD1[datasetId]; EntitySerializationReader reader = new EntitySerializationReader(sourcePath); List<EntityProfile> sourceEntities = reader.getEntityProfiles(); System.out.println("Source Entities: " + sourceEntities.size()); // read target entities String targetPath = mainDirs[dirId] + datasetsD2[datasetId]; reader = new EntitySerializationReader(targetPath); List<EntityProfile> targetEntities = reader.getEntityProfiles(); System.out.println("Target Entities: " + targetEntities.size()); // read ground-truth file String groundTruthPath = mainDirs[dirId] + groundtruthDirs[datasetId]; GtSerializationReader gtReader = new GtSerializationReader(groundTruthPath); Set<IdDuplicates> gtDuplicates = gtReader.getDuplicatePairs(sourceEntities, targetEntities); System.out.println("GT Duplicates Entities: " + gtDuplicates.size()); System.out.println(); // first run int noOfEntities = sourceEntities.size(); SOURCE_FREQUENCY = new int[noOfEntities]; Map<String, TIntList> index = indexSource(tokenizer[datasetId], sourceEntities); int[] counters = new int[noOfEntities]; int[] flags = new int[noOfEntities]; for (int i = 0; i < noOfEntities; i++) { flags[i] = -1; } int targetId = 0; List<Pair> sims = new ArrayList<>(noOfEntities * targetEntities.size()); for (EntityProfile e : targetEntities) { String query = RepresentationModel.getAttributeValue(e); Set<String> tokens = RepresentationModel.tokenizeEntity(query, tokenizer[datasetId]); final TIntSet candidates = new TIntHashSet(); for (String token : tokens) { final TIntList sourceEnts = index.get(token); if (sourceEnts == null) { continue; } for (TIntIterator tIterator = sourceEnts.iterator(); tIterator.hasNext();) { int sourceId = tIterator.next(); candidates.add(sourceId); if (flags[sourceId] != targetId) { counters[sourceId] = 0; flags[sourceId] = targetId; } counters[sourceId]++; } } for (TIntIterator tIterator = candidates.iterator(); tIterator.hasNext();) { int sourceId = tIterator.next(); float commonTokens = counters[sourceId]; float sim = 0; switch (simFunction[datasetId]) { case COSINE_SIM: sim = commonTokens / (float) Math.sqrt(((float) SOURCE_FREQUENCY[sourceId]) * tokens.size()); break; case DICE_SIM: sim = 2 * commonTokens / (SOURCE_FREQUENCY[sourceId] + tokens.size()); break; case JACCARD_SIM: sim = commonTokens / (SOURCE_FREQUENCY[sourceId] + tokens.size() - commonTokens); break; } if (threshold[datasetId] <= sim) { sims.add(new Pair(sourceId, targetId)); } } targetId++; } double duplicates = 0; for (Pair jp : sims) { if (gtDuplicates.contains(new IdDuplicates(jp.getEntityId1(), jp.getEntityId2()))) { duplicates++; } } System.out.println("Candidates\t:\t" + sims.size()); System.out.println("Duplicates\t:\t" + duplicates); // run-time measurement double averageIndexingTime = 0; double averageQueryingTime = 0; for (int iteration = 0; iteration < ITERATIONS; iteration++) { long time1 = System.currentTimeMillis(); noOfEntities = sourceEntities.size(); SOURCE_FREQUENCY = new int[noOfEntities]; index = indexSource(tokenizer[datasetId], sourceEntities); counters = new int[noOfEntities]; flags = new int[noOfEntities]; for (int i = 0; i < noOfEntities; i++) { flags[i] = -1; } long time2 = System.currentTimeMillis(); targetId = 0; sims = new ArrayList<>(noOfEntities * targetEntities.size()); for (EntityProfile e : targetEntities) { String query = RepresentationModel.getAttributeValue(e); Set<String> tokens = RepresentationModel.tokenizeEntity(query, tokenizer[datasetId]); final TIntSet candidates = new TIntHashSet(); for (String token : tokens) { final TIntList sourceEnts = index.get(token); if (sourceEnts == null) { continue; } for (TIntIterator tIterator = sourceEnts.iterator(); tIterator.hasNext();) { int sourceId = tIterator.next(); candidates.add(sourceId); if (flags[sourceId] != targetId) { counters[sourceId] = 0; flags[sourceId] = targetId; } counters[sourceId]++; } } for (TIntIterator tIterator = candidates.iterator(); tIterator.hasNext();) { int sourceId = tIterator.next(); float commonTokens = counters[sourceId]; float sim = 0; switch (simFunction[datasetId]) { case COSINE_SIM: sim = commonTokens / (float) Math.sqrt(((float) SOURCE_FREQUENCY[sourceId]) * tokens.size()); break; case DICE_SIM: sim = 2 * commonTokens / (SOURCE_FREQUENCY[sourceId] + tokens.size()); break; case JACCARD_SIM: sim = commonTokens / (SOURCE_FREQUENCY[sourceId] + tokens.size() - commonTokens); break; } if (threshold[datasetId] <= sim) { sims.add(new Pair(sourceId, targetId)); } } targetId++; } long time3 = System.currentTimeMillis(); averageIndexingTime += time2 - time1; averageQueryingTime += time3 - time2; } System.out.println("Average indexing run-time\t:\t" + averageIndexingTime / ITERATIONS); System.out.println("Average querying run-time\t:\t" + averageQueryingTime / ITERATIONS); } } }
gpapadis/ContinuousFilteringBenchmark
joins/src/joins/SchemaAgnosticEJoin.java
213,908
/* * Copyright DataStax, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * Copyright (C) 2022 ScyllaDB * * Modified by ScyllaDB */ package com.datastax.driver.core; import static com.datastax.driver.core.SchemaElement.KEYSPACE; import com.datastax.driver.core.exceptions.BusyConnectionException; import com.datastax.driver.core.exceptions.ConnectionException; import com.datastax.driver.core.exceptions.DriverException; import com.datastax.driver.core.exceptions.DriverInternalError; import com.datastax.driver.core.exceptions.InvalidQueryException; import com.datastax.driver.core.exceptions.NoHostAvailableException; import com.datastax.driver.core.exceptions.ServerError; import com.datastax.driver.core.exceptions.UnsupportedProtocolVersionException; import com.datastax.driver.core.utils.MoreFutures; import com.datastax.driver.core.utils.MoreObjects; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import org.slf4j.Logger; import org.slf4j.LoggerFactory; class ControlConnection implements Connection.Owner { private static final Logger logger = LoggerFactory.getLogger(ControlConnection.class); private static final boolean EXTENDED_PEER_CHECK = SystemProperties.getBoolean("com.datastax.driver.EXTENDED_PEER_CHECK", true); private static final InetAddress bindAllAddress; static { try { bindAllAddress = InetAddress.getByAddress(new byte[4]); } catch (UnknownHostException e) { throw new RuntimeException(e); } } private static final String SELECT_PEERS = "SELECT * FROM system.peers"; private static final String SELECT_PEERS_V2 = "SELECT * FROM system.peers_v2"; private static final String SELECT_LOCAL = "SELECT * FROM system.local WHERE key='local'"; private static final String SELECT_SCHEMA_PEERS = "SELECT peer, rpc_address, schema_version, host_id FROM system.peers"; private static final String SELECT_SCHEMA_LOCAL = "SELECT schema_version, host_id FROM system.local WHERE key='local'"; private static final VersionNumber _3_11 = VersionNumber.parse("3.11.0"); @VisibleForTesting final AtomicReference<Connection> connectionRef = new AtomicReference<Connection>(); private final Cluster.Manager cluster; private final AtomicReference<ListenableFuture<?>> reconnectionAttempt = new AtomicReference<ListenableFuture<?>>(); private volatile boolean isShutdown; // set to true initially, if ever fails will be set to false and peers table will be used // from here on out. private volatile boolean isPeersV2 = true; public ControlConnection(Cluster.Manager manager) { this.cluster = manager; } // Only for the initial connection. Does not schedule retries if it fails void connect() throws UnsupportedProtocolVersionException { if (isShutdown) return; List<Host> hosts = new ArrayList<Host>(cluster.metadata.getContactPoints()); // shuffle so that multiple clients with the same contact points don't all pick the same control // host Collections.shuffle(hosts); setNewConnection(reconnectInternal(hosts.iterator(), true)); } CloseFuture closeAsync() { // We don't have to be fancy here. We just set a flag so that we stop trying to reconnect (and // thus change the // connection used) and shutdown the current one. isShutdown = true; // Cancel any reconnection attempt in progress ListenableFuture<?> r = reconnectionAttempt.get(); if (r != null) r.cancel(false); Connection connection = connectionRef.get(); return connection == null ? CloseFuture.immediateFuture() : connection.closeAsync().force(); } Host connectedHost() { Connection current = connectionRef.get(); return (current == null) ? null : cluster.metadata.getHost(current.endPoint); } void triggerReconnect() { backgroundReconnect(0); } /** @param initialDelayMs if >=0, bypass the schedule and use this for the first call */ private void backgroundReconnect(long initialDelayMs) { if (isShutdown) return; // Abort if a reconnection is already in progress. This is not thread-safe: two threads might // race through this and both // schedule a reconnection; in that case AbstractReconnectionHandler knows how to deal with it // correctly. // But this cheap check can help us avoid creating the object unnecessarily. ListenableFuture<?> reconnection = reconnectionAttempt.get(); if (reconnection != null && !reconnection.isDone()) return; new AbstractReconnectionHandler( "Control connection", cluster.reconnectionExecutor, cluster.reconnectionPolicy().newSchedule(), reconnectionAttempt, initialDelayMs) { @Override protected Connection tryReconnect() throws ConnectionException { if (isShutdown) throw new ConnectionException(null, "Control connection was shut down"); try { return reconnectInternal(queryPlan(), false); } catch (NoHostAvailableException e) { throw new ConnectionException(null, e.getMessage()); } catch (UnsupportedProtocolVersionException e) { // reconnectInternal only propagate those if we've not decided on the protocol version // yet, // which should only happen on the initial connection and thus in connect() but never // here. throw new AssertionError(); } } @Override protected void onReconnection(Connection connection) { if (isShutdown) { connection.closeAsync().force(); return; } setNewConnection(connection); } @Override protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { if (isShutdown) return false; logger.error( "[Control connection] Cannot connect to any host, scheduling retry in {} milliseconds", nextDelayMs); return true; } @Override protected boolean onUnknownException(Exception e, long nextDelayMs) { if (isShutdown) return false; logger.error( String.format( "[Control connection] Unknown error during reconnection, scheduling retry in %d milliseconds", nextDelayMs), e); return true; } }.start(); } private Iterator<Host> queryPlan() { return cluster.loadBalancingPolicy().newQueryPlan(null, Statement.DEFAULT); } private void signalError() { Connection connection = connectionRef.get(); if (connection != null) connection.closeAsync().force(); // If the error caused the host to go down, onDown might have already triggered a reconnect. // But backgroundReconnect knows how to deal with that. backgroundReconnect(0); } private void setNewConnection(Connection newConnection) { Host.statesLogger.debug("[Control connection] established to {}", newConnection.endPoint); newConnection.setOwner(this); Connection old = connectionRef.getAndSet(newConnection); if (old != null && !old.isClosed()) old.closeAsync().force(); } private Connection reconnectInternal(Iterator<Host> iter, boolean isInitialConnection) throws UnsupportedProtocolVersionException { Map<EndPoint, Throwable> errors = null; Host host = null; try { while (iter.hasNext()) { host = iter.next(); if (!host.convictionPolicy.canReconnectNow()) continue; try { return tryConnect(host, isInitialConnection); } catch (ConnectionException e) { errors = logError(host, e, errors, iter); if (isInitialConnection) { // Mark the host down right away so that we don't try it again during the initialization // process. // We don't call cluster.triggerOnDown because it does a bunch of other things we don't // want to do here (notify LBP, etc.) host.setDown(); } } catch (ExecutionException e) { errors = logError(host, e.getCause(), errors, iter); } catch (UnsupportedProtocolVersionException e) { // If it's the very first node we've connected to, rethrow the exception and // Cluster.init() will handle it. Otherwise, just mark this node in error. if (isInitialConnection) throw e; logger.debug("Ignoring host {}: {}", host, e.getMessage()); errors = logError(host, e, errors, iter); } catch (ClusterNameMismatchException e) { logger.debug("Ignoring host {}: {}", host, e.getMessage()); errors = logError(host, e, errors, iter); } } } catch (InterruptedException e) { // Sets interrupted status Thread.currentThread().interrupt(); // Indicates that all remaining hosts are skipped due to the interruption errors = logError(host, new DriverException("Connection thread interrupted"), errors, iter); while (iter.hasNext()) errors = logError( iter.next(), new DriverException("Connection thread interrupted"), errors, iter); } throw new NoHostAvailableException( errors == null ? Collections.<EndPoint, Throwable>emptyMap() : errors); } private static Map<EndPoint, Throwable> logError( Host host, Throwable exception, Map<EndPoint, Throwable> errors, Iterator<Host> iter) { if (errors == null) errors = new HashMap<EndPoint, Throwable>(); errors.put(host.getEndPoint(), exception); if (logger.isDebugEnabled()) { if (iter.hasNext()) { logger.debug( String.format("[Control connection] error on %s connection, trying next host", host), exception); } else { logger.debug( String.format("[Control connection] error on %s connection, no more host to try", host), exception); } } return errors; } private Connection tryConnect(Host host, boolean isInitialConnection) throws ConnectionException, ExecutionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { Connection connection = cluster.connectionFactory.open(host); String productType = connection.optionsQuery().get(); // If no protocol version was specified, set the default as soon as a connection succeeds (it's // needed to parse UDTs in refreshSchema) if (cluster.connectionFactory.protocolVersion == null) cluster.connectionFactory.protocolVersion = ProtocolVersion.DEFAULT; try { logger.trace("[Control connection] Registering for events"); List<ProtocolEvent.Type> evs = Arrays.asList( ProtocolEvent.Type.TOPOLOGY_CHANGE, ProtocolEvent.Type.STATUS_CHANGE, ProtocolEvent.Type.SCHEMA_CHANGE); connection.write(new Requests.Register(evs)); // We need to refresh the node list first so we know about the cassandra version of // the node we're connecting to. // This will create the token map for the first time, but it will be incomplete // due to the lack of keyspace information refreshNodeListAndTokenMap(connection, cluster, isInitialConnection, true); // refresh schema will also update the token map again, // this time with information about keyspaces logger.debug("[Control connection] Refreshing schema"); refreshSchema(connection, null, null, null, null, cluster); return connection; } catch (BusyConnectionException e) { connection.closeAsync().force(); throw new DriverInternalError("Newly created connection should not be busy"); } catch (InterruptedException e) { connection.closeAsync().force(); throw e; } catch (ConnectionException e) { connection.closeAsync().force(); throw e; } catch (ExecutionException e) { connection.closeAsync().force(); throw e; } catch (RuntimeException e) { connection.closeAsync().force(); throw e; } } public void refreshSchema( SchemaElement targetType, String targetKeyspace, String targetName, List<String> signature) throws InterruptedException { logger.debug( "[Control connection] Refreshing schema for {}{}", targetType == null ? "everything" : targetKeyspace, (targetType == KEYSPACE) ? "" : "." + targetName + " (" + targetType + ")"); try { Connection c = connectionRef.get(); // At startup, when we add the initial nodes, this will be null, which is ok if (c == null || c.isClosed()) return; refreshSchema(c, targetType, targetKeyspace, targetName, signature, cluster); } catch (ConnectionException e) { logger.debug( "[Control connection] Connection error while refreshing schema ({})", e.getMessage()); signalError(); } catch (ExecutionException e) { // If we're being shutdown during schema refresh, this can happen. That's fine so don't scare // the user. if (!isShutdown) logger.error("[Control connection] Unexpected error while refreshing schema", e); signalError(); } catch (BusyConnectionException e) { logger.debug("[Control connection] Connection is busy, reconnecting"); signalError(); } } static void refreshSchema( Connection connection, SchemaElement targetType, String targetKeyspace, String targetName, List<String> targetSignature, Cluster.Manager cluster) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { Host host = cluster.metadata.getHost(connection.endPoint); // Neither host, nor it's version should be null. But instead of dying if there is a race or // something, we can kind of try to infer // a Cassandra version from the protocol version (this is not full proof, we can have the // protocol 1 against C* 2.0+, but it's worth // a shot, and since we log in this case, it should be relatively easy to debug when if this // ever fail). VersionNumber cassandraVersion; if (host == null || host.getCassandraVersion() == null) { cassandraVersion = cluster.protocolVersion().minCassandraVersion(); logger.warn( "Cannot find Cassandra version for host {} to parse the schema, using {} based on protocol version in use. " + "If parsing the schema fails, this could be the cause", connection.endPoint, cassandraVersion); } else { cassandraVersion = host.getCassandraVersion(); } SchemaParser schemaParser; if (host == null) { schemaParser = SchemaParser.forVersion(cassandraVersion); } else { @SuppressWarnings("deprecation") VersionNumber dseVersion = host.getDseVersion(); // If using DSE, derive parser from DSE version. schemaParser = dseVersion == null ? SchemaParser.forVersion(cassandraVersion) : SchemaParser.forDseVersion(dseVersion); if (dseVersion != null && dseVersion.getMajor() == 6 && dseVersion.getMinor() < 8) { // DSE 6.0 and 6.7 report C* 4.0, but consider it C* 3.11 for schema parsing purposes cassandraVersion = _3_11; } } schemaParser.refresh( cluster.getCluster(), targetType, targetKeyspace, targetName, targetSignature, connection, cassandraVersion); } void refreshNodeListAndTokenMap() { Connection c = connectionRef.get(); // At startup, when we add the initial nodes, this will be null, which is ok if (c == null || c.isClosed()) return; try { refreshNodeListAndTokenMap(c, cluster, false, true); } catch (ConnectionException e) { logger.debug( "[Control connection] Connection error while refreshing node list and token map ({})", e.getMessage()); signalError(); } catch (ExecutionException e) { // If we're being shutdown during refresh, this can happen. That's fine so don't scare the // user. if (!isShutdown) logger.error( "[Control connection] Unexpected error while refreshing node list and token map", e); signalError(); } catch (BusyConnectionException e) { logger.debug("[Control connection] Connection is busy, reconnecting"); signalError(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); logger.debug( "[Control connection] Interrupted while refreshing node list and token map, skipping it."); } } private static EndPoint endPointForPeerHost( Row peersRow, EndPoint connectedEndPoint, Cluster.Manager cluster) { EndPoint endPoint = cluster.configuration.getPolicies().getEndPointFactory().create(peersRow); if (connectedEndPoint.equals(endPoint)) { // Some DSE versions were inserting a line for the local node in peers (with mostly null // values). This has been fixed, but if we detect that's the case, ignore it as it's not // really a big deal. logger.debug( "System.peers on node {} has a line for itself. " + "This is not normal but is a known problem of some DSE versions. " + "Ignoring the entry.", connectedEndPoint); return null; } return endPoint; } private Row fetchNodeInfo(Host host, Connection c) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { boolean isConnectedHost = c.endPoint.equals(host.getEndPoint()); if (isConnectedHost || host.getBroadcastSocketAddress() != null) { String query; if (isConnectedHost) { query = SELECT_LOCAL; } else { InetSocketAddress broadcastAddress = host.getBroadcastSocketAddress(); query = isPeersV2 ? SELECT_PEERS_V2 + " WHERE peer='" + broadcastAddress.getAddress().getHostAddress() + "' AND peer_port=" + broadcastAddress.getPort() : SELECT_PEERS + " WHERE peer='" + broadcastAddress.getAddress().getHostAddress() + "'"; } DefaultResultSetFuture future = new DefaultResultSetFuture(null, cluster.protocolVersion(), new Requests.Query(query)); c.write(future); Row row = future.get().one(); if (row != null) { return row; } else { InetSocketAddress address = host.getBroadcastSocketAddress(); // Don't include full address if port is 0. String addressToUse = address.getPort() != 0 ? address.toString() : address.getAddress().toString(); logger.debug( "Could not find peer with broadcast address {}, " + "falling back to a full system.peers scan to fetch info for {} " + "(this can happen if the broadcast address changed)", addressToUse, host); } } // We have to fetch the whole peers table and find the host we're looking for ListenableFuture<ResultSet> future = selectPeersFuture(c); for (Row row : future.get()) { UUID rowId = row.getUUID("host_id"); if (host.getHostId().equals(rowId)) { return row; } } return null; } /** @return whether we have enough information to bring the node back up */ boolean refreshNodeInfo(Host host) { Connection c = connectionRef.get(); // At startup, when we add the initial nodes, this will be null, which is ok if (c == null || c.isClosed()) return true; logger.debug("[Control connection] Refreshing node info on {}", host); try { Row row = fetchNodeInfo(host, c); if (row == null) { if (c.isDefunct()) { logger.debug("Control connection is down, could not refresh node info"); // Keep going with what we currently know about the node, otherwise we will ignore all // nodes // until the control connection is back up (which leads to a catch-22 if there is only // one) return true; } else { logger.warn( "No row found for host {} in {}'s peers system table. {} will be ignored.", host.getEndPoint(), c.endPoint, host.getEndPoint()); return false; } // Ignore hosts with a null rpc_address, as this is most likely a phantom row in // system.peers (JAVA-428). // Don't test this for the control host since we're already connected to it anyway, and we // read the info from system.local // which didn't have an rpc_address column (JAVA-546) until CASSANDRA-9436 } else if (!c.endPoint.equals(host.getEndPoint()) && !isValidPeer(row, true)) { return false; } updateInfo(host, row, cluster, false); return true; } catch (ConnectionException e) { logger.debug( "[Control connection] Connection error while refreshing node info ({})", e.getMessage()); signalError(); } catch (ExecutionException e) { // If we're being shutdown during refresh, this can happen. That's fine so don't scare the // user. if (!isShutdown) logger.debug("[Control connection] Unexpected error while refreshing node info", e); signalError(); } catch (BusyConnectionException e) { logger.debug("[Control connection] Connection is busy, reconnecting"); signalError(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); logger.debug("[Control connection] Interrupted while refreshing node info, skipping it."); } catch (Exception e) { logger.debug("[Control connection] Unexpected error while refreshing node info", e); signalError(); } // If we got an exception, always return true. Otherwise a faulty control connection would cause // reconnected hosts to be ignored permanently. return true; } // row can come either from the 'local' table or the 'peers' one private static void updateInfo( Host host, Row row, Cluster.Manager cluster, boolean isInitialConnection) { if (!row.isNull("data_center") || !row.isNull("rack")) updateLocationInfo( host, row.getString("data_center"), row.getString("rack"), isInitialConnection, cluster); String version = row.getString("release_version"); host.setVersion(version); // Before CASSANDRA-9436 local row did not contain any info about the host addresses. // After CASSANDRA-9436 (2.0.16, 2.1.6, 2.2.0 rc1) local row contains two new columns: // - broadcast_address // - rpc_address // After CASSANDRA-9603 (2.0.17, 2.1.8, 2.2.0 rc2) local row contains one more column: // - listen_address // After CASSANDRA-7544 (4.0) local row also contains: // - broadcast_port // - listen_port InetSocketAddress broadcastRpcAddress = null; if (row.getColumnDefinitions().contains("native_address")) { InetAddress nativeAddress = row.getInet("native_address"); int nativePort = row.getInt("native_port"); broadcastRpcAddress = new InetSocketAddress(nativeAddress, nativePort); } else if (row.getColumnDefinitions().contains("native_transport_address")) { // DSE 6.8 introduced native_transport_address and native_transport_port for the // listen address. Also included is native_transport_port_ssl (in case users // want to setup a different port for SSL and non-SSL conns). InetAddress nativeAddress = row.getInet("native_transport_address"); int nativePort = row.getInt("native_transport_port"); if (cluster.getCluster().getConfiguration().getProtocolOptions().getSSLOptions() != null && !row.isNull("native_transport_port_ssl")) { nativePort = row.getInt("native_transport_port_ssl"); } broadcastRpcAddress = new InetSocketAddress(nativeAddress, nativePort); } else if (row.getColumnDefinitions().contains("rpc_address")) { InetAddress rpcAddress = row.getInet("rpc_address"); broadcastRpcAddress = new InetSocketAddress(rpcAddress, cluster.connectionFactory.getPort()); } // Before CASSANDRA-9436, system.local doesn't have rpc_address, so this might be null. It's not // a big deal because we only use this for server events, and the control node doesn't receive // events for itself. host.setBroadcastRpcAddress(broadcastRpcAddress); InetSocketAddress broadcastSocketAddress = null; if (row.getColumnDefinitions().contains("peer")) { // system.peers int broadcastPort = row.getColumnDefinitions().contains("peer_port") ? row.getInt("peer_port") : 0; broadcastSocketAddress = new InetSocketAddress(row.getInet("peer"), broadcastPort); } else if (row.getColumnDefinitions().contains("broadcast_address")) { // system.local int broadcastPort = row.getColumnDefinitions().contains("broadcast_port") ? row.getInt("broadcast_port") : 0; broadcastSocketAddress = new InetSocketAddress(row.getInet("broadcast_address"), broadcastPort); } host.setBroadcastSocketAddress(broadcastSocketAddress); // in system.local only for C* versions >= 2.0.17, 2.1.8, 2.2.0 rc2, // not yet in system.peers as of C* 3.2 InetSocketAddress listenAddress = null; if (row.getColumnDefinitions().contains("listen_address")) { int listenPort = row.getColumnDefinitions().contains("listen_port") ? row.getInt("listen_port") : 0; listenAddress = new InetSocketAddress(row.getInet("listen_address"), listenPort); } host.setListenSocketAddress(listenAddress); if (row.getColumnDefinitions().contains("workload")) { String dseWorkload = row.getString("workload"); host.setDseWorkload(dseWorkload); } if (row.getColumnDefinitions().contains("graph")) { boolean isDseGraph = row.getBool("graph"); host.setDseGraphEnabled(isDseGraph); } if (row.getColumnDefinitions().contains("dse_version")) { String dseVersion = row.getString("dse_version"); host.setDseVersion(dseVersion); } host.setHostId(row.getUUID("host_id")); host.setSchemaVersion(row.getUUID("schema_version")); EndPoint endPoint = cluster.configuration.getPolicies().getEndPointFactory().create(row); if (endPoint != null) { host.setEndPoint(endPoint); } } private static void updateLocationInfo( Host host, String datacenter, String rack, boolean isInitialConnection, Cluster.Manager cluster) { if (MoreObjects.equal(host.getDatacenter(), datacenter) && MoreObjects.equal(host.getRack(), rack)) return; // If the dc/rack information changes for an existing node, we need to update the load balancing // policy. // For that, we remove and re-add the node against the policy. Not the most elegant, and assumes // that the policy will update correctly, but in practice this should work. if (!isInitialConnection) cluster.loadBalancingPolicy().onRemove(host); host.setLocationInfo(datacenter, rack); if (!isInitialConnection) cluster.loadBalancingPolicy().onAdd(host); } /** * Resolves peering information by doing the following: * * <ol> * <li>if <code>isPeersV2</code> is true, query the <code>system.peers_v2</code> table, * otherwise query <code>system.peers</code>. * <li>if <code>system.peers_v2</code> query fails, set <code>isPeersV2</code> to false and call * selectPeersFuture again. * </ol> * * @param connection connection to send request on. * @return result of peers query. */ private ListenableFuture<ResultSet> selectPeersFuture(final Connection connection) { if (isPeersV2) { DefaultResultSetFuture peersV2Future = new DefaultResultSetFuture( null, cluster.protocolVersion(), new Requests.Query(SELECT_PEERS_V2)); connection.write(peersV2Future); final SettableFuture<ResultSet> peersFuture = SettableFuture.create(); // if peers v2 query fails, query peers table instead. GuavaCompatibility.INSTANCE.addCallback( peersV2Future, new FutureCallback<ResultSet>() { @Override public void onSuccess(ResultSet result) { peersFuture.set(result); } @Override public void onFailure(Throwable t) { // Downgrade to system.peers if we get an invalid query error as this indicates the // peers_v2 table does not exist. // Also downgrade on server error with a specific error message (DSE 6.0.0 to 6.0.2 // with search enabled. if (t instanceof InvalidQueryException || (t instanceof ServerError && t.getMessage().contains("Unknown keyspace/cf pair (system.peers_v2)"))) { isPeersV2 = false; MoreFutures.propagateFuture(peersFuture, selectPeersFuture(connection)); } else { peersFuture.setException(t); } } }); return peersFuture; } else { DefaultResultSetFuture peersFuture = new DefaultResultSetFuture( null, cluster.protocolVersion(), new Requests.Query(SELECT_PEERS)); connection.write(peersFuture); return peersFuture; } } private void refreshNodeListAndTokenMap( final Connection connection, final Cluster.Manager cluster, boolean isInitialConnection, boolean logInvalidPeers) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { logger.debug("[Control connection] Refreshing node list and token map"); boolean metadataEnabled = cluster.configuration.getQueryOptions().isMetadataEnabled(); // Make sure we're up to date on nodes and tokens DefaultResultSetFuture localFuture = new DefaultResultSetFuture( null, cluster.protocolVersion(), new Requests.Query(SELECT_LOCAL)); ListenableFuture<ResultSet> peersFuture = selectPeersFuture(connection); connection.write(localFuture); String partitioner = null; Token.Factory factory = null; Map<Host, Set<Token>> tokenMap = new HashMap<Host, Set<Token>>(); // Update cluster name, DC and rack for the one node we are connected to Row localRow = localFuture.get().one(); if (localRow == null) { throw new IllegalStateException( String.format( "system.local is empty on %s, this should not happen", connection.endPoint)); } String clusterName = localRow.getString("cluster_name"); if (clusterName != null) cluster.metadata.clusterName = clusterName; partitioner = localRow.getString("partitioner"); if (partitioner != null) { cluster.metadata.partitioner = partitioner; factory = Token.getFactory(partitioner); } // During init, metadata.allHosts is still empty, the contact points are in // metadata.contactPoints. We need to copy them over, but we can only do it after having // called updateInfo, because we need to know the host id. // This is the same for peer hosts (see further down). Host controlHost = isInitialConnection ? cluster.metadata.getContactPoint(connection.endPoint) : cluster.metadata.getHost(connection.endPoint); // In theory host can't be null. However there is no point in risking a NPE in case we // have a race between a node removal and this. if (controlHost == null) { logger.debug( "Host in local system table ({}) unknown to us (ok if said host just got removed)", connection.endPoint); } else { updateInfo(controlHost, localRow, cluster, isInitialConnection); connection.endPoint = controlHost.getEndPoint(); if (metadataEnabled && factory != null) { Set<String> tokensStr = localRow.getSet("tokens", String.class); if (!tokensStr.isEmpty()) { Set<Token> tokens = toTokens(factory, tokensStr); tokenMap.put(controlHost, tokens); } } if (isInitialConnection) { cluster.metadata.addIfAbsent(controlHost); } } List<EndPoint> foundHosts = new ArrayList<EndPoint>(); List<String> dcs = new ArrayList<String>(); List<String> racks = new ArrayList<String>(); List<String> cassandraVersions = new ArrayList<String>(); List<InetSocketAddress> broadcastRpcAddresses = new ArrayList<InetSocketAddress>(); List<InetSocketAddress> broadcastAddresses = new ArrayList<InetSocketAddress>(); List<InetSocketAddress> listenAddresses = new ArrayList<InetSocketAddress>(); List<Set<Token>> allTokens = new ArrayList<Set<Token>>(); List<String> dseVersions = new ArrayList<String>(); List<Boolean> dseGraphEnabled = new ArrayList<Boolean>(); List<String> dseWorkloads = new ArrayList<String>(); List<UUID> hostIds = new ArrayList<UUID>(); List<UUID> schemaVersions = new ArrayList<UUID>(); for (Row row : peersFuture.get()) { if (!isValidPeer(row, logInvalidPeers)) continue; EndPoint endPoint = endPointForPeerHost(row, connection.endPoint, cluster); if (endPoint == null) { continue; } foundHosts.add(endPoint); dcs.add(row.getString("data_center")); racks.add(row.getString("rack")); cassandraVersions.add(row.getString("release_version")); InetSocketAddress broadcastRpcAddress; if (row.getColumnDefinitions().contains("native_address")) { InetAddress nativeAddress = row.getInet("native_address"); int nativePort = row.getInt("native_port"); broadcastRpcAddress = new InetSocketAddress(nativeAddress, nativePort); } else if (row.getColumnDefinitions().contains("native_transport_address")) { InetAddress nativeAddress = row.getInet("native_transport_address"); int nativePort = row.getInt("native_transport_port"); if (cluster.getCluster().getConfiguration().getProtocolOptions().getSSLOptions() != null && !row.isNull("native_transport_port_ssl")) { nativePort = row.getInt("native_transport_port_ssl"); } broadcastRpcAddress = new InetSocketAddress(nativeAddress, nativePort); } else { InetAddress rpcAddress = row.getInet("rpc_address"); broadcastRpcAddress = new InetSocketAddress(rpcAddress, cluster.connectionFactory.getPort()); } broadcastRpcAddresses.add(broadcastRpcAddress); int broadcastPort = row.getColumnDefinitions().contains("peer_port") ? row.getInt("peer_port") : 0; InetSocketAddress broadcastAddress = new InetSocketAddress(row.getInet("peer"), broadcastPort); broadcastAddresses.add(broadcastAddress); if (metadataEnabled && factory != null) { Set<String> tokensStr = row.getSet("tokens", String.class); Set<Token> tokens = null; if (!tokensStr.isEmpty()) { tokens = toTokens(factory, tokensStr); } allTokens.add(tokens); } if (row.getColumnDefinitions().contains("listen_address") && !row.isNull("listen_address")) { int listenPort = row.getColumnDefinitions().contains("listen_port") ? row.getInt("listen_port") : 0; InetSocketAddress listenAddress = new InetSocketAddress(row.getInet("listen_address"), listenPort); listenAddresses.add(listenAddress); } else { listenAddresses.add(null); } String dseWorkload = row.getColumnDefinitions().contains("workload") ? row.getString("workload") : null; dseWorkloads.add(dseWorkload); Boolean isDseGraph = row.getColumnDefinitions().contains("graph") ? row.getBool("graph") : null; dseGraphEnabled.add(isDseGraph); String dseVersion = row.getColumnDefinitions().contains("dse_version") ? row.getString("dse_version") : null; dseVersions.add(dseVersion); hostIds.add(row.getUUID("host_id")); schemaVersions.add(row.getUUID("schema_version")); } for (int i = 0; i < foundHosts.size(); i++) { Host peerHost = isInitialConnection ? cluster.metadata.getContactPoint(foundHosts.get(i)) : cluster.metadata.getHost(foundHosts.get(i)); boolean isNew = false; if (peerHost == null) { // We don't know that node, create the Host object but wait until we've set the known // info before signaling the addition. Host newHost = cluster.metadata.newHost(foundHosts.get(i)); newHost.setHostId(hostIds.get(i)); // we need an id to add to the metadata Host previous = cluster.metadata.addIfAbsent(newHost); if (previous == null) { peerHost = newHost; isNew = true; } else { peerHost = previous; isNew = false; } } if (dcs.get(i) != null || racks.get(i) != null) updateLocationInfo(peerHost, dcs.get(i), racks.get(i), isInitialConnection, cluster); if (cassandraVersions.get(i) != null) peerHost.setVersion(cassandraVersions.get(i)); if (broadcastRpcAddresses.get(i) != null) peerHost.setBroadcastRpcAddress(broadcastRpcAddresses.get(i)); if (broadcastAddresses.get(i) != null) peerHost.setBroadcastSocketAddress(broadcastAddresses.get(i)); if (listenAddresses.get(i) != null) peerHost.setListenSocketAddress(listenAddresses.get(i)); if (dseVersions.get(i) != null) peerHost.setDseVersion(dseVersions.get(i)); if (dseWorkloads.get(i) != null) peerHost.setDseWorkload(dseWorkloads.get(i)); if (dseGraphEnabled.get(i) != null) peerHost.setDseGraphEnabled(dseGraphEnabled.get(i)); peerHost.setHostId(hostIds.get(i)); if (schemaVersions.get(i) != null) { peerHost.setSchemaVersion(schemaVersions.get(i)); } if (metadataEnabled && factory != null && allTokens.get(i) != null) tokenMap.put(peerHost, allTokens.get(i)); if (!isNew && isInitialConnection) { // If we're at init and the node already existed, it means it was a contact point, so we // need to copy it over to the regular host list cluster.metadata.addIfAbsent(peerHost); } if (isNew && !isInitialConnection) { cluster.triggerOnAdd(peerHost); } } // Removes all those that seem to have been removed (since we lost the control connection) Set<EndPoint> foundHostsSet = new HashSet<EndPoint>(foundHosts); for (Host host : cluster.metadata.allHosts()) if (!host.getEndPoint().equals(connection.endPoint) && !foundHostsSet.contains(host.getEndPoint())) cluster.removeHost(host, isInitialConnection); if (metadataEnabled && factory != null && !tokenMap.isEmpty()) cluster.metadata.rebuildTokenMap(factory, tokenMap); } private static Set<Token> toTokens(Token.Factory factory, Set<String> tokensStr) { Set<Token> tokens = new LinkedHashSet<Token>(tokensStr.size()); for (String tokenStr : tokensStr) { tokens.add(factory.fromString(tokenStr)); } return tokens; } private boolean isValidPeer(Row peerRow, boolean logIfInvalid) { boolean isValid = peerRow.getColumnDefinitions().contains("host_id") && !peerRow.isNull("host_id"); if (isPeersV2) { isValid &= peerRow.getColumnDefinitions().contains("native_address") && peerRow.getColumnDefinitions().contains("native_port") && !peerRow.isNull("native_address") && !peerRow.isNull("native_port"); } else { isValid &= (peerRow.getColumnDefinitions().contains("rpc_address") && !peerRow.isNull("rpc_address")) || (peerRow.getColumnDefinitions().contains("native_transport_address") && peerRow.getColumnDefinitions().contains("native_transport_port") && !peerRow.isNull("native_transport_address") && !peerRow.isNull("native_transport_port")); } if (EXTENDED_PEER_CHECK) { isValid &= peerRow.getColumnDefinitions().contains("data_center") && !peerRow.isNull("data_center") && peerRow.getColumnDefinitions().contains("rack") && !peerRow.isNull("rack") && peerRow.getColumnDefinitions().contains("tokens") && !peerRow.isNull("tokens"); } if (!isValid && logIfInvalid) logger.warn( "Found invalid row in system.peers: {}. " + "This is likely a gossip or snitch issue, this host will be ignored.", formatInvalidPeer(peerRow)); return isValid; } // Custom formatting to avoid spamming the logs if 'tokens' is present and contains a gazillion // tokens private String formatInvalidPeer(Row peerRow) { StringBuilder sb = new StringBuilder("[peer=" + peerRow.getInet("peer")); if (isPeersV2) { formatMissingOrNullColumn(peerRow, "native_address", sb); formatMissingOrNullColumn(peerRow, "native_port", sb); } else { formatMissingOrNullColumn(peerRow, "native_transport_address", sb); formatMissingOrNullColumn(peerRow, "native_transport_port", sb); formatMissingOrNullColumn(peerRow, "native_transport_port_ssl", sb); formatMissingOrNullColumn(peerRow, "rpc_address", sb); } if (EXTENDED_PEER_CHECK) { formatMissingOrNullColumn(peerRow, "host_id", sb); formatMissingOrNullColumn(peerRow, "data_center", sb); formatMissingOrNullColumn(peerRow, "rack", sb); formatMissingOrNullColumn(peerRow, "tokens", sb); } sb.append("]"); return sb.toString(); } private static void formatMissingOrNullColumn(Row peerRow, String columnName, StringBuilder sb) { if (!peerRow.getColumnDefinitions().contains(columnName)) sb.append(", missing ").append(columnName); else if (peerRow.isNull(columnName)) sb.append(", ").append(columnName).append("=null"); } static boolean waitForSchemaAgreement(Connection connection, Cluster.Manager cluster) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { long start = System.nanoTime(); long elapsed = 0; int maxSchemaAgreementWaitSeconds = cluster.configuration.getProtocolOptions().getMaxSchemaAgreementWaitSeconds(); while (elapsed < maxSchemaAgreementWaitSeconds * 1000) { if (checkSchemaAgreement(connection, cluster)) return true; // let's not flood the node too much Thread.sleep(200); elapsed = Cluster.timeSince(start, TimeUnit.MILLISECONDS); } return false; } private static boolean checkSchemaAgreement(Connection connection, Cluster.Manager cluster) throws InterruptedException, ExecutionException { DefaultResultSetFuture peersFuture = new DefaultResultSetFuture( null, cluster.protocolVersion(), new Requests.Query(SELECT_SCHEMA_PEERS)); DefaultResultSetFuture localFuture = new DefaultResultSetFuture( null, cluster.protocolVersion(), new Requests.Query(SELECT_SCHEMA_LOCAL)); connection.write(peersFuture); connection.write(localFuture); Set<UUID> versions = new HashSet<UUID>(); Row localRow = localFuture.get().one(); if (localRow != null && !localRow.isNull("schema_version")) versions.add(localRow.getUUID("schema_version")); for (Row row : peersFuture.get()) { UUID hostId = row.getUUID("host_id"); if (row.isNull("schema_version")) continue; Host peer = cluster.metadata.getHost(hostId); if (peer != null && peer.isUp()) versions.add(row.getUUID("schema_version")); } logger.debug("Checking for schema agreement: versions are {}", versions); return versions.size() <= 1; } boolean checkSchemaAgreement() throws ConnectionException, BusyConnectionException, InterruptedException, ExecutionException { Connection connection = connectionRef.get(); return connection != null && !connection.isClosed() && checkSchemaAgreement(connection, cluster); } boolean isOpen() { Connection c = connectionRef.get(); return c != null && !c.isClosed(); } public void onUp(Host host) {} public void onAdd(Host host) {} public void onDown(Host host) { onHostGone(host); } public void onRemove(Host host) { onHostGone(host); } private void onHostGone(Host host) { Connection current = connectionRef.get(); if (current != null && current.endPoint.equals(host.getEndPoint())) { logger.debug( "[Control connection] {} is down/removed and it was the control host, triggering reconnect", current.endPoint); if (!current.isClosed()) current.closeAsync().force(); backgroundReconnect(0); } } @Override public void onConnectionDefunct(Connection connection) { if (connection == connectionRef.get()) backgroundReconnect(0); } }
scylladb/java-driver
driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java
213,909
/* * Copyright (c) 2015-2021 by k3b. * * This file is part of AndroFotoFinder / #APhotoManager. * * This program is free software: you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/> */ package de.k3b.android.androFotoFinder.gallery.cursor; import android.annotation.TargetApi; import android.app.Activity; import android.app.Dialog; import android.app.Fragment; import android.app.LoaderManager; import android.content.ClipData; import android.content.ContentResolver; import android.content.Intent; import android.content.Loader; import android.database.Cursor; import android.net.Uri; import android.os.Build; import android.os.Bundle; import android.util.Log; import android.view.LayoutInflater; import android.view.Menu; import android.view.MenuInflater; import android.view.MenuItem; import android.view.View; import android.view.ViewGroup; import android.widget.AdapterView; import android.widget.Button; import android.widget.GridView; import android.widget.HorizontalScrollView; import android.widget.LinearLayout; import android.widget.ShareActionProvider; import android.widget.Toast; import androidx.annotation.NonNull; import androidx.annotation.Nullable; import org.osmdroid.api.IGeoPoint; import java.io.File; import java.lang.ref.WeakReference; import java.util.ArrayList; import java.util.List; import de.k3b.LibGlobal; import de.k3b.android.androFotoFinder.AffUtils; import de.k3b.android.androFotoFinder.Common; import de.k3b.android.androFotoFinder.FotoGalleryActivity; import de.k3b.android.androFotoFinder.Global; import de.k3b.android.androFotoFinder.LockScreen; import de.k3b.android.androFotoFinder.OnGalleryInteractionListener; import de.k3b.android.androFotoFinder.PhotoAutoprocessingEditActivity; import de.k3b.android.androFotoFinder.PhotoPropertiesEditActivity; import de.k3b.android.androFotoFinder.R; import de.k3b.android.androFotoFinder.backup.BackupActivity; import de.k3b.android.androFotoFinder.directory.DirectoryGui; import de.k3b.android.androFotoFinder.directory.DirectoryPickerFragment; import de.k3b.android.androFotoFinder.imagedetail.ImageDetailActivityViewPager; import de.k3b.android.androFotoFinder.imagedetail.ImageDetailMetaDialogBuilder; import de.k3b.android.androFotoFinder.locationmap.GeoEditActivity; import de.k3b.android.androFotoFinder.locationmap.MapGeoPickerActivity; import de.k3b.android.androFotoFinder.queries.CursorLoaderWithException; import de.k3b.android.androFotoFinder.queries.FotoSql; import de.k3b.android.androFotoFinder.queries.FotoViewerParameter; import de.k3b.android.androFotoFinder.queries.Queryable; import de.k3b.android.androFotoFinder.tagDB.TagSql; import de.k3b.android.androFotoFinder.tagDB.TagTask; import de.k3b.android.androFotoFinder.tagDB.TagWorflow; import de.k3b.android.androFotoFinder.tagDB.TagsPickerFragment; import de.k3b.android.io.AndroidFileCommands; import de.k3b.android.util.OsUtils; import de.k3b.android.util.PhotoChangeNotifyer; import de.k3b.android.util.PhotoPropertiesMediaFilesScanner; import de.k3b.android.util.ResourceUtils; import de.k3b.android.widget.AboutDialogPreference; import de.k3b.android.widget.ActivityWithCallContext; import de.k3b.android.widget.Dialogs; import de.k3b.android.widget.FilePermissionActivity; import de.k3b.android.widget.UpdateTask; import de.k3b.database.QueryParameter; import de.k3b.geo.api.GeoPointDto; import de.k3b.geo.api.IGeoPointInfo; import de.k3b.geo.io.GeoUri; import de.k3b.io.Directory; import de.k3b.io.GalleryFilterParameter; import de.k3b.io.IDirectory; import de.k3b.io.IGalleryFilter; import de.k3b.io.ListUtils; import de.k3b.io.PhotoAutoprocessingDto; import de.k3b.io.StringUtils; import de.k3b.io.VISIBILITY; import de.k3b.io.collections.SelectedFiles; import de.k3b.io.collections.SelectedItemIds; import de.k3b.io.filefacade.FileFacade; import de.k3b.io.filefacade.IFile; import de.k3b.tagDB.Tag; /** * A {@link Fragment} to show ImageGallery content based on ContentProvider-Cursor. * Activities that contain this fragment must implement the * {@link OnGalleryInteractionListener} interface * to handle interaction events. * Use the {@link GalleryCursorFragment#newInstance} factory method to * create an instance of this fragment. * * States view-locked <=> view <=> view-multiselect * pick-single, pick-multible, pick-locked * * Menu: * if (picker-mode) menu_gallery_pick + menu_gallery_non_multiselect * if (locked) menu_locked * if (isMultiSelectionActive()) menu_gallery_multiselect_mode_all + menu_image_commands * if (view-non-select) menu_gallery_non_selected_only + menu_gallery_non_multiselect */ public class GalleryCursorFragment extends Fragment implements Queryable, DirectoryGui, Common, TagsPickerFragment.ITagsPicker, PhotoChangeNotifyer.PhotoChangedListener { private static final String INSTANCE_STATE_LAST_VISIBLE_POSITION = "lastVisiblePosition"; private static final String INSTANCE_STATE_SELECTED_ITEM_IDS = "selectedItems"; private static final String INSTANCE_STATE_OLD_TITLE = "oldTitle"; private static final String INSTANCE_STATE_SEL_ONLY = "selectedOnly"; private static final String INSTANCE_STATE_LOADER_ID = "loaderID"; private static final int MODE_VIEW_PICKER_NONE = 0; private static final int MODE_VIEW_PICK_SINGLE = 1; private static final int MODE_VIEW_PICK_MULTIBLE = 2; private static final boolean SUPPORT_MULTISELECTION = (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN); private static int nextLoaderID = 100; private int loaderID = -1; private boolean locked = false; // if != Global.locked : must update menu private HorizontalScrollView mParentPathBarScroller; private LinearLayout mParentPathBar; // private HorizontalScrollView mChildPathBarScroller; private LinearLayout mChildPathBar; // for debugging private static int id = 1; private String mDebugPrefix; private GridView mGalleryView; private ShareActionProvider mShareActionProvider; long mUpdateId = FotoSql.getMediaDBApi().getCurrentUpdateId(); private GalleryCursorAdapterFromArray mAdapter = null; private OnGalleryInteractionListener mGalleryListener; private QueryParameter mGalleryContentQuery; private DirectoryPickerFragment.OnDirectoryInteractionListener mDirectoryListener; private int mLastVisiblePosition = -1; private TagUpdateTask mTagWorflow = null; // multi selection support private final SelectedItemIds mSelectedItemIds = new SelectedItemIds(); private String mOldAppTitleBeforeMultiselectionStarts = null; private boolean mShowSelectedOnly = false; private boolean isMultiSelectionMenuVisible = false; private final AndroidFileCommands mFileCommands = new LocalFileCommands(); private MenuItem mShareOnlyToggle; private MenuItem mMenuRemoveAllSelected = null; /* false: prevent showing error message again */ private boolean mNoShareError = true; /** true pick geo; false pick image */ private boolean mModePickGeoElsePickImaage = false; /** one of the MODE_VIEW_PICKER_XXXX */ private int mMode = MODE_VIEW_PICKER_NONE; /** * not null while mDestDirPicker is open */ private MoveOrCopyDestDirPicker mDestDirPicker = null; /** * not null while tag picker is open */ private WeakReference<TagsPickerFragment> mTagPickerDialog = null; /** * not null while background task is active */ private static UpdateTask exifUpdate = null; /**************** construction ******************/ /** * Use this factory method to create a new instance of * this fragment using the provided parameters. * * @return A new instance of fragment GalleryCursorFragment. */ // TODO: Rename and change types and number of parameters public static GalleryCursorFragment newInstance() { GalleryCursorFragment fragment = new GalleryCursorFragment(); Bundle args = new Bundle(); fragment.setArguments(args); return fragment; } public SelectedItemIds getSelectedItemIds() { return mSelectedItemIds; } public void onNotifyPhotoChanged() { requeryIfDataHasChanged(); } /** * incremented every time a new curster/query is generated */ private int mRequeryInstanceCount = 0; protected LocalCursorLoader mCurorLoader = null; private void requeryIfDataHasChanged() { if (FotoSql.getMediaDBApi().mustRequery(mUpdateId)) { requery("requeryIfDataHasChanged"); } } private void requery(String why) { mUpdateId = FotoSql.getMediaDBApi().getCurrentUpdateId(); if (Global.debugEnabled) { Log.i(Global.LOG_CONTEXT, mDebugPrefix + why + " requery\n" + ((mGalleryContentQuery != null) ? mGalleryContentQuery.toSqlString() : null)); } if (mGalleryContentQuery != null) { // query has been initialized if (mCurorLoader == null) { mCurorLoader = new LocalCursorLoader(); getLoaderManager().initLoader(loaderID, null, mCurorLoader); } else { getLoaderManager().restartLoader(loaderID, null, this.mCurorLoader); } } } private boolean cmdMoveOrCopyWithDestDirPicker(final boolean move, final String lastCopyToPath, final SelectedFiles fotos, final CharSequence title) { final FilePermissionActivity activity = (FilePermissionActivity) getActivity(); if (fotos != null) { File missingRoot = activity.getMissingRootDirFileOrNull( "cmdMoveOrCopyWithDestDirPicker", fotos.getFiles()); if (missingRoot != null) { // ask for needed permissions activity.requestRootUriDialog(missingRoot, title, new FilePermissionActivity.IOnDirectoryPermissionGrantedHandler() { @Override public void afterGrant(FilePermissionActivity activity) { // does not work in frag,emt mFileCommands.setContext(activity); if (FileFacade.debugLogSAFFacade) { Log.i(FileFacade.LOG_TAG, this.getClass().getSimpleName() + ": afterGrant " + activity + "-" + this); } cmdMoveOrCopyWithDestDirPicker(move, lastCopyToPath, fotos, title); } }); return false; } } if (AndroidFileCommands.canProcessFile(this.getActivity(), false)) { PhotoChangeNotifyer.setPhotoChangedListener(this); mDestDirPicker = MoveOrCopyDestDirPicker.newInstance(move, fotos); mDestDirPicker.defineDirectoryNavigation(OsUtils.getRootOSDirectory(null), (move) ? FotoSql.QUERY_TYPE_GROUP_MOVE : FotoSql.QUERY_TYPE_GROUP_COPY, lastCopyToPath); if (!LockScreen.isLocked(this.getActivity())) { mDestDirPicker.setContextMenuId(R.menu.menu_context_pick_osdir); } mDestDirPicker.setBaseQuery(getCurrentQuery()); mDestDirPicker.show(activity.getFragmentManager(), "osdir"); } return false; } public GalleryCursorFragment() { mDebugPrefix = "GalleryCursorFragment#" + (id++) + " "; Global.debugMemory(mDebugPrefix, "ctor"); // Required empty public constructor if (Global.debugEnabled) { Log.i(Global.LOG_CONTEXT, mDebugPrefix + "()"); } } /**************** live-cycle ******************/ @Override public void onCreate(Bundle savedInstanceState) { Global.debugMemory(mDebugPrefix, "onCreate"); setHasOptionsMenu(true); this.mShareActionProvider = new ShareActionProvider(this.getActivity()); if (savedInstanceState != null) { this.mLastVisiblePosition = savedInstanceState.getInt(INSTANCE_STATE_LAST_VISIBLE_POSITION, this.mLastVisiblePosition); this.loaderID = savedInstanceState.getInt(INSTANCE_STATE_LOADER_ID, this.loaderID); String oldIds = mSelectedItemIds.toString(); mSelectedItemIds.clear(); mSelectedItemIds.parse(savedInstanceState.getString(INSTANCE_STATE_SELECTED_ITEM_IDS, oldIds)); this.mOldAppTitleBeforeMultiselectionStarts = savedInstanceState.getString(INSTANCE_STATE_OLD_TITLE, this.mOldAppTitleBeforeMultiselectionStarts); this.mShowSelectedOnly = savedInstanceState.getBoolean(INSTANCE_STATE_SEL_ONLY, this.mShowSelectedOnly); if (isMultiSelectionActive()) { mMustReplaceMenue = true; getActivity().invalidateOptionsMenu(); } } else { // first creation of new instance loaderID = nextLoaderID++; } if (!mDebugPrefix.contains("@")) { mDebugPrefix += "@" + loaderID + " "; } super.onCreate(savedInstanceState); } @Override public void onSaveInstanceState(Bundle outState) { super.onSaveInstanceState(outState); mLastVisiblePosition = mGalleryView.getLastVisiblePosition(); outState.putInt(INSTANCE_STATE_LAST_VISIBLE_POSITION, mLastVisiblePosition); outState.putInt(INSTANCE_STATE_LOADER_ID, loaderID); outState.putString(INSTANCE_STATE_SELECTED_ITEM_IDS, this.mSelectedItemIds.toString()); outState.putString(INSTANCE_STATE_OLD_TITLE, this.mOldAppTitleBeforeMultiselectionStarts); outState.putBoolean(INSTANCE_STATE_SEL_ONLY, this.mShowSelectedOnly); } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { Global.debugMemory(mDebugPrefix, "onCreateView"); // Inflate the layout for this fragment View result = inflater.inflate(R.layout.fragment_gallery, container, false); mGalleryView = (GridView) result.findViewById(R.id.gridView); Activity parent = this.getActivity(); // mAdapter = new GalleryCursorAdapter(parent, mSelectedItems, mDebugPrefix); Intent intent = (parent == null) ? null : parent.getIntent(); if (Global.debugEnabled && (intent != null)){ Log.d(Global.LOG_CONTEXT, mDebugPrefix + "onCreateView " + intent.toUri(Intent.URI_INTENT_SCHEME)); } String action = (intent != null) ? intent.getAction() : null; if ((action != null) && ((Intent.ACTION_PICK.compareTo(action) == 0) || (Intent.ACTION_GET_CONTENT.compareTo(action) == 0))) { this.mMode = (intent.getBooleanExtra(Intent.EXTRA_ALLOW_MULTIPLE,false)) ? MODE_VIEW_PICK_MULTIBLE : MODE_VIEW_PICK_SINGLE; mMustReplaceMenue = true; String schema = intent.getScheme(); mModePickGeoElsePickImaage = ((schema != null) && ("geo".compareTo(schema) == 0)); } String path = (intent == null) ? null : intent.getStringExtra(AffUtils.EXTRA_SELECTED_ITEM_PATHS); String filterValue = ((intent != null) && (path == null)) ? intent.getStringExtra(EXTRA_FILTER) : null; IGalleryFilter filter = (filterValue != null) ? GalleryFilterParameter.parse(filterValue, new GalleryFilterParameter()) : null; if (filter != null) { path = filter.getPath(); } mAdapter = new GalleryCursorAdapterFromArray(parent, mSelectedItemIds, mDebugPrefix, FileFacade.convert(mDebugPrefix, path)); mGalleryView.setAdapter(mAdapter); mGalleryView.setLongClickable(true); mGalleryView.setOnItemLongClickListener(new AdapterView.OnItemLongClickListener() { @Override public boolean onItemLongClick(AdapterView<?> parent, View v, int position, long id) { return onGalleryLongImageClick((GalleryCursorAdapter.GridCellViewHolder) v.getTag(), position); } }); mShareActionProvider.setOnShareTargetSelectedListener(new ShareActionProvider.OnShareTargetSelectedListener() { @Override public boolean onShareTargetSelected(ShareActionProvider source, Intent intent) { if (Global.clearSelectionAfterCommand) { multiSelectionCancel(); } return false; } }); mGalleryView.setOnItemClickListener(new AdapterView.OnItemClickListener() { public void onItemClick(AdapterView<?> parent, View v, int position, long id) { onGalleryImageClick((GalleryCursorAdapter.GridCellViewHolder) v.getTag(), position); } }); this.mParentPathBar = (LinearLayout) result.findViewById(R.id.parent_owner); this.mParentPathBarScroller = (HorizontalScrollView) result.findViewById(R.id.parent_scroller); this.mChildPathBar = (LinearLayout) result.findViewById(R.id.child_owner); // this.mChildPathBarScroller = (HorizontalScrollView) result.findViewById(R.id.child_scroller); reloadDirGuiIfAvailable("onCreateView"); if (!mAdapter.isInArrayMode()) { fixMediaDatabase(); } requery("onCreateView"); updateExifUpdateTask(this.getActivity()); return result; } @Override public void onAttach(Activity activity) { Global.debugMemory(mDebugPrefix, "onAttach"); super.onAttach(activity); mFileCommands.setContext(activity); mFileCommands.setLogFilePath(mFileCommands.getDefaultLogFile()); updateExifUpdateTask(activity); if (Global.debugEnabledMemory) { Log.d(Global.LOG_CONTEXT, mDebugPrefix + " - onAttach cmd (" + MoveOrCopyDestDirPicker.sFileCommands + ") => (" + mFileCommands + ")"); } MoveOrCopyDestDirPicker.sFileCommands = mFileCommands; try { mGalleryListener = (OnGalleryInteractionListener) activity; } catch (ClassCastException e) { throw new ClassCastException(activity.toString() + " must implement OnGalleryInteractionListener"); } try { mDirectoryListener = (DirectoryPickerFragment.OnDirectoryInteractionListener) activity; } catch (ClassCastException e) { throw new ClassCastException(activity.toString() + " must implement DirectoryPickerFragment.OnDirectoryInteractionListener"); } } @Override public void onResume() { Global.debugMemory(mDebugPrefix, "onResume"); super.onResume(); // this may destroy an other instance of gallery(fragment) final boolean locked = LockScreen.isLocked(this.getActivity()); if (this.locked != locked) { this.locked = locked; mMustReplaceMenue = true; getActivity().invalidateOptionsMenu(); } if (Global.debugEnabledMemory) { Log.d(Global.LOG_CONTEXT, mDebugPrefix + " - onResume cmd (" + MoveOrCopyDestDirPicker.sFileCommands + ") => (" + mFileCommands + ")"); } // workaround fragment lifecycle is newFragment.attach oldFragment.detach. // this makes shure that the visible fragment has commands MoveOrCopyDestDirPicker.sFileCommands = mFileCommands; requeryIfDataHasChanged(); } /** * Call back from sub-activities.<br/> * Process Change StartTime (longpress start), Select StopTime before stop * (longpress stop) or filter change for detailReport */ @Override public void onActivityResult(final int requestCode, final int resultCode, final Intent intent) { super.onActivityResult(requestCode,resultCode,intent); if (mDestDirPicker != null) mDestDirPicker.onActivityResult(requestCode,resultCode,intent); final boolean locked = LockScreen.isLocked(this.getActivity()); if (this.locked != locked) { this.locked = locked; mMustReplaceMenue = true; getActivity().invalidateOptionsMenu(); } if (resultCode == Activity.RESULT_OK) { switch (requestCode) { case R.id.cmd_rename_multible: onRenameMultible(PhotoAutoprocessingEditActivity.getAutoprocessingData(intent), AffUtils.getSelectedFiles(intent)); break; } } } @Override public void onDetach() { updateExifUpdateTask(null); Global.debugMemory(mDebugPrefix, "onDetach"); super.onDetach(); mGalleryListener = null; mDirectoryListener = null; mFileCommands.setContext(null); // kill this instance only if not an other instance is active if (MoveOrCopyDestDirPicker.sFileCommands == mFileCommands) { if (Global.debugEnabledMemory) { Log.d(Global.LOG_CONTEXT, mDebugPrefix + " - onDetach cmd (" + MoveOrCopyDestDirPicker.sFileCommands + ") => (null) "); } MoveOrCopyDestDirPicker.sFileCommands = null; } else if (Global.debugEnabledMemory) { Log.d(Global.LOG_CONTEXT, mDebugPrefix + " - onDetach cmd [ignore] (" + MoveOrCopyDestDirPicker.sFileCommands + ")"); } } @Override public void onPause() { super.onPause(); destroyLoaderIfFinishing("onPause"); } @Override public void onDestroy() { updateExifUpdateTask(null); Global.debugMemory(mDebugPrefix, "before onDestroy"); mDestDirPicker = null; destroyLoaderIfFinishing("onDestroy"); mFileCommands.closeLogFile(); mFileCommands.closeAll(); mGalleryContentQuery = null; mAdapter = null; System.gc(); Global.debugMemory(mDebugPrefix, "after onDestroy"); // RefWatcher refWatcher = AndroFotoFinderApp.getRefWatcher(getActivity()); // refWatcher.watch(this); super.onDestroy(); } private void destroyLoaderIfFinishing(String context) { if ((getActivity() != null) && (getActivity().isFinishing())) { destroyLoader(context); } } //!!! TODO destroyLoader for performance reasons while scanner is active to remove private void destroyLoader(String context) { if (loaderID != -1) { getLoaderManager().destroyLoader(loaderID); if ((Global.debugEnabled) && (mCurorLoader != null)) { Log.d(Global.LOG_CONTEXT, mDebugPrefix + context + " - releasing mCurorLoader" + mCurorLoader.getDebugContext()); } mCurorLoader = null; if (loaderID == (nextLoaderID - 1)) nextLoaderID--; loaderID = -1; } } /** * interface Queryable: Owning activity tells fragment to change its content: * Initiates a database requery in the background */ @Override public void requery(Activity context, QueryParameter parameters, String why) { this.mGalleryContentQuery = parameters; FotoSql.setWhereVisibility(this.mGalleryContentQuery, VISIBILITY.DEFAULT); requery(why); } private QueryParameter getCurrentQuery() { return getCurrentQuery(mGalleryContentQuery); } private QueryParameter getCurrentQuery(QueryParameter rootQuery) { QueryParameter selFilter = new QueryParameter(rootQuery); if (mShowSelectedOnly) { FotoSql.setWhereSelectionPks(selFilter, mSelectedItemIds); } selFilter.replaceFrom(FotoSql.SQL_TABLE_EXTERNAL_CONTENT_URI_FILE.toString()); return selFilter; } @Override public String toString() { return mDebugPrefix + this.mAdapter; } /* --********************** local helper ****************************************** - */ /** an Image in the FotoGallery was clicked */ private void onGalleryImageClick(final GalleryCursorAdapter.GridCellViewHolder holder, int position) { if ((!multiSelectionHandleClick(holder)) && (mGalleryListener != null)) { if (holder.filter != null) { QueryParameter subGalleryQuery = new QueryParameter(FotoSql.queryDetail); subGalleryQuery.addWhere(holder.filter); onOpenChildGallery("Image#filter", subGalleryQuery); return; } long imageID = holder.imageID; mGalleryListener.onGalleryImageClick(imageID, getUri(imageID), position); } } private Uri getUri(long imageID) { return mAdapter.getUri(imageID); } private void onOpenChildGallery(String debugContext, QueryParameter subGalleryQuery) { if (Global.debugEnabledSql) { Log.i(Global.LOG_CONTEXT, "Exec child gallery " + debugContext + "\n\t" + subGalleryQuery.toSqlString()); } FotoGalleryActivity.showActivity("[8]"+debugContext, getActivity(), subGalleryQuery, 0); } /****************** path navigation *************************/ private IDirectory mDirectoryRoot = null; private int mDirQueryID = 0; private String mCurrentPath = null; /** Defines Directory Navigation */ @Override public void defineDirectoryNavigation(IDirectory root, int dirTypId, String initialAbsolutePath) { mDirectoryRoot = root; mDirQueryID = dirTypId; navigateTo(initialAbsolutePath); } /** Set curent selection to absolutePath */ @Override public void navigateTo(String absolutePath) { if (Global.debugEnabled) { Log.i(Global.LOG_CONTEXT, mDebugPrefix + " navigateTo : " + absolutePath); } mCurrentPath = absolutePath; reloadDirGuiIfAvailable("navigateTo " + absolutePath); // requeryGallery(); done by owning activity } /** * path/directory was clicked */ private final View.OnClickListener onPathButtonClickListener = new View.OnClickListener() { @Override public void onClick(View v) { onPathButtonClick((IDirectory) v.getTag()); } }; private Button createPathButton(IDirectory currentDir) { Button result = new Button(getActivity()); result.setTag(currentDir); result.setText(getDirectoryDisplayText(null, currentDir, (FotoViewerParameter.includeSubItems) ? Directory.OPT_SUB_ITEM : Directory.OPT_ITEM)); result.setOnClickListener(onPathButtonClickListener); return result; } private void reloadDirGuiIfAvailable(String why) { if ((mDirectoryRoot != null) && (mCurrentPath != null) && (mParentPathBar != null)) { if (Global.debugEnabled) { Log.i(Global.LOG_CONTEXT, mDebugPrefix + " reloadDirGuiIfAvailable : " + why); } mParentPathBar.removeAllViews(); mChildPathBar.removeAllViews(); IDirectory selectedChild = mDirectoryRoot.find(mCurrentPath); if (selectedChild == null) selectedChild = mDirectoryRoot; Button first = null; IDirectory current = selectedChild; while (current.getParent() != null) { Button button = createPathButton(current); // add parent left to chlild // gui order root/../child.parent/child mParentPathBar.addView(button, 0); if (first == null) first = button; current = current.getParent(); } // scroll to right where deepest child is if (first != null) mParentPathBarScroller.requestChildFocus(mParentPathBar, first); IDirectory[] children = selectedChild.getChildDirs(); if (children != null) { for (IDirectory child : children) { Button button = createPathButton(child); mChildPathBar.addView(button); } } } } /** path/directory was clicked */ private void onPathButtonClick(IDirectory newSelection) { if ((mDirectoryListener != null) && (newSelection != null)) { mCurrentPath = newSelection.getAbsolute(); mDirectoryListener.onDirectoryPick(mCurrentPath, this.mDirQueryID); } } /** getFrom tree display text */ private static String getDirectoryDisplayText(String prefix, IDirectory directory, int options) { StringBuilder result = new StringBuilder(); if (prefix != null) result.append(prefix); result.append(directory.getRelPath()).append(" "); Directory.appendCount(result, directory, options); return result.toString(); } /********************** Multi selection support ***********************************************************/ private boolean mMustReplaceMenue = false; /** starts mutliselection */ private boolean onGalleryLongImageClick(final GalleryCursorAdapter.GridCellViewHolder holder, int position) { if (!LockScreen.isLocked(this.getActivity())) { if (!isMultiSelectionActive()) { startMultiSelectionMode(); mSelectedItemIds.add(holder.imageID); holder.icon.setVisibility(View.VISIBLE); multiSelectionUpdateActionbar("Start multisel"); } else { // in gallery mode long click is view image ImageDetailActivityViewPager.showActivity("[9]", this.getActivity(), getUri(holder.imageID), position, getCurrentQuery(), ImageDetailActivityViewPager.ACTIVITY_ID); } return true; } return false; // no multi-selection in lock mode } private void startMultiSelectionMode() { // multi selection not active yet: start multi selection if (mOldAppTitleBeforeMultiselectionStarts == null) { mOldAppTitleBeforeMultiselectionStarts = getActivity().getTitle().toString(); } mMustReplaceMenue = true; mShowSelectedOnly = false; getActivity().invalidateOptionsMenu(); } /** * Different menu modes * * normal name searchbar-icon folder map (tags) (filter) menu * * R.menu.menu_gallery_non_selected_only R.menu.menu_gallery_non_multiselect * * selected selected cancel seleted-only share (save) menu * * (Filter not available; this.isMultiSelectionActive(); * * R.menu.menu_gallery_multiselect_mode_all R.menu.menu_image_commands * * locked name lock folder map menu * * (no multiselection, no base-filters) * * (this.locked; R.menu.menu_locked) * * searchbar bar cancel-searc-bar (folder) (map) (tags) (filter) menu * * picker-locked * * R.menu.menu_gallery_pick R.menu.menu_locked * * picker-non-locked selected ok cancel filter settings * * R.menu.menu_gallery_pick R.menu.menu_gallery_non_multiselect * * (xxxx) with "IFROOM" (in wide screen only) * searchbarmode is like "normal" where there is no "IFROOM" on no-searchbar items * * @param menu */ @Override public void onPrepareOptionsMenu(Menu menu) { this.isMultiSelectionMenuVisible = false; super.onPrepareOptionsMenu(menu); final boolean locked = LockScreen.isLocked(this.getActivity()); if (mMustReplaceMenue || (locked != this.locked)) { MenuInflater inflater = getActivity().getMenuInflater(); this.locked = locked; mMustReplaceMenue = false; menu.clear(); mMenuRemoveAllSelected = null; if (mMode == MODE_VIEW_PICKER_NONE) { // if (locked) { // view-locked if (isMultiSelectionActive()) { clearSelections(); } inflater.inflate(R.menu.menu_locked, menu); LockScreen.removeDangerousCommandsFromMenu(menu); AboutDialogPreference.onPrepareOptionsMenu(getActivity(), menu); } else if (this.isMultiSelectionActive()) { // view-multiselect inflater.inflate(R.menu.menu_gallery_multiselect_mode_all, menu); this.isMultiSelectionMenuVisible = true; if (Global.allowRenameMultible) { inflater.inflate(R.menu.menu_gallery_multiselect_rename, menu); } mShareOnlyToggle = menu.findItem(R.id.cmd_selected_only); if (mShowSelectedOnly && (mShareOnlyToggle != null)) { mShareOnlyToggle.setIcon(android.R.drawable.checkbox_on_background); mShareOnlyToggle.setChecked(true); } MenuItem shareItem = menu.findItem(R.id.menu_item_share); shareItem.setActionProvider(mShareActionProvider); // multiSelectionUpdateShareIntent(); inflater.inflate(R.menu.menu_image_commands, menu); multiSelectionUpdateShareIntent(); Global.fixMenu(getActivity(), menu); } else { // view-non-select inflater.inflate(R.menu.menu_gallery_non_selected_only, menu); inflater.inflate(R.menu.menu_gallery_non_multiselect, menu); Global.fixMenu(getActivity(), menu); } } else { // picker mode inflater.inflate(R.menu.menu_gallery_pick, menu); if (!locked) { inflater.inflate(R.menu.menu_gallery_non_multiselect, menu); } else { // pick-locked LockScreen.removeDangerousCommandsFromMenu(menu); } AboutDialogPreference.onPrepareOptionsMenu(getActivity(), menu); } mMenuRemoveAllSelected = menu.findItem(R.id.cmd_selection_remove_all); } updateSelectionCount(); } protected void updateSelectionCount() { if (mMenuRemoveAllSelected != null) { mMenuRemoveAllSelected.setVisible(isMultiSelectionActive()); } } @Override public boolean onOptionsItemSelected(MenuItem menuItem) { if (LockScreen.onOptionsItemSelected(this.getActivity(), menuItem)) { this.mMustReplaceMenue = true; multiSelectionCancel(); this.getActivity().invalidateOptionsMenu(); if (mShareOnlyToggle != null) mShareOnlyToggle.setVisible(false); if (mMenuRemoveAllSelected != null) mMenuRemoveAllSelected.setVisible(false); return true; } // Handle menuItem selection AndroidFileCommands fileCommands = mFileCommands; final SelectedFiles selectedFiles = this.mAdapter.createSelectedFiles(getActivity(), this.mSelectedItemIds); if ((mSelectedItemIds != null) && (fileCommands.onOptionsItemSelected( (FilePermissionActivity) getActivity(), menuItem, selectedFiles, this))) { return true; } switch (menuItem.getItemId()) { case R.id.cmd_cancel_multiselect: return multiSelectionCancel(); case R.id.cmd_cancel_pick: getActivity().finish(); return true; case R.id.cmd_ok: return onPickOk(); case R.id.cmd_selected_only: return multiSelectionToggle(); case R.id.cmd_backup: BackupActivity.showActivity(" menu " + menuItem.getTitle(), getActivity(), selectedFiles, null, getCurrentQuery(), BackupActivity.REQUEST_BACKUP_ID); return true; case R.id.cmd_copy: return cmdMoveOrCopyWithDestDirPicker(false, fileCommands.getLastCopyToPath(), selectedFiles, menuItem.getTitle()); case R.id.cmd_move: return cmdMoveOrCopyWithDestDirPicker(true, fileCommands.getLastCopyToPath(), selectedFiles, menuItem.getTitle()); case R.id.cmd_rename_multible: return cmdRenameMultible(menuItem, selectedFiles); case R.id.cmd_show_geo: MapGeoPickerActivity.showActivity(" menu " + menuItem.getTitle(), this.getActivity(), selectedFiles, null, null, 0); return true; case R.id.cmd_edit_geo: GeoEditActivity.showActivity(" menu " + menuItem.getTitle(), this.getActivity(), selectedFiles, GeoEditActivity.RESULT_ID); return true; case R.id.cmd_edit_tags: { return tagsShowEditDialog(selectedFiles); } case R.id.menu_exif: return onEditExif(menuItem, selectedFiles); case R.id.cmd_selection_add_all: addAllToSelection(); return true; case R.id.cmd_selection_remove_all: removeAllFromSelection(); return true; case R.id.action_details: cmdShowDetails(); return true; case R.id.cmd_scan: return fileCommands.cmdMediaScannerWithQuestion(this.getActivity()); default: return super.onOptionsItemSelected(menuItem); } } private void cmdShowDetails() { SelectedItemIds ids = getSelectedItemIds(); final Activity activity = this.getActivity(); CharSequence subQueryTypName = (activity instanceof FotoGalleryActivity) ? ((FotoGalleryActivity)activity).getValueAsTitle(true) : null; String files = ((ids != null) && (ids.size() > 0)) ? mAdapter.createSelectedFiles(activity, ids).toString().replace(",","\n") : null; final Dialog dlg = ImageDetailMetaDialogBuilder.createImageDetailDialog( activity, getActivity().getTitle().toString(), this.toString(), ids, files, (mGalleryContentQuery != null) ? mGalleryContentQuery.toSqlString() : null, TagSql.getStatisticsMessage(this.getActivity(), R.string.show_photo, mGalleryContentQuery), subQueryTypName); dlg.show(); // setAutoClose(null, dlg, null); } private boolean onEditExif(MenuItem menuItem, SelectedFiles fotos) { PhotoPropertiesEditActivity.showActivity(" menu " + menuItem.getTitle() + "[12]", getActivity(), null, null, fotos, 0, true); return true; } private boolean tagsShowEditDialog(SelectedFiles fotos) { mTagWorflow = new TagUpdateTask(fotos); TagsPickerFragment dlg = new TagsPickerFragment(); dlg.setFragmentOnwner(this); dlg.setTitleId(R.string.tags_edit_menu_title); dlg.setAffectedNames(mTagWorflow.getWorkflow().getAffected()); dlg.setAddNames(new ArrayList<String>()); dlg.setRemoveNames(new ArrayList<String>()); dlg.show(getFragmentManager(), "editTags"); dlg.setBaseQuery(getCurrentQuery()); ((FotoGalleryActivity) getActivity()).setAutoClose(dlg, null, null); mTagPickerDialog = new WeakReference<TagsPickerFragment>(dlg); return true; } /** called by {@link TagsPickerFragment} */ @Override public boolean onCancel(String msg) { if (mTagWorflow != null) mTagWorflow.destroy(); mTagWorflow = null; return true; } /** called by {@link TagsPickerFragment} */ @Override public boolean onOk(List<String> addNames, List<String> removeNames) { if (mTagWorflow != null) { mTagWorflow.execute(addNames, removeNames); } mTagWorflow = null; return true; } /** called by {@link TagsPickerFragment} */ @Override public boolean onTagPopUpClick(MenuItem menuItem, int menuItemItemId, Tag selectedTag) { if ((mTagPickerDialog == null) || (mTagPickerDialog.get() == null)) return false; return TagsPickerFragment.handleMenuShow(mTagPickerDialog.get(), menuItem, selectedTag.getName()); } private void tagsUpdate(TagWorflow fotos, List<String> addNames, List<String> removeNames) { fotos.updateTags(addNames,removeNames); } public static class MoveOrCopyDestDirPicker extends DirectoryPickerFragment { protected static AndroidFileCommands sFileCommands = null; public static MoveOrCopyDestDirPicker newInstance(boolean move, final SelectedFiles srcFotos) { MoveOrCopyDestDirPicker f = new MoveOrCopyDestDirPicker(); // Supply index input as an argument. Bundle args = new Bundle(); args.putBoolean("move", move); AffUtils.putSelectedFiles(args, srcFotos); f.setArguments(args); return f; } /** do not use activity callback */ @Override protected void setDirectoryListener(Activity activity) {} public boolean getMove() { return getArguments().getBoolean("move", false); } /** overwritten by dialog host to get selected photos for edit autoprocessing mode */ @Override public SelectedFiles getSrcFotos() { return AffUtils.getSelectedFiles(getArguments()); } @Override protected void onDirectoryPick(IDirectory selection) { // super.onDirectoryPick(selection); sFileCommands.onMoveOrCopyDirectoryPick(getMove(), getSrcFotos(), selection); dismiss(); } } public void notifyPhotoChanged() { PhotoChangeNotifyer.notifyPhotoChanged(this.getActivity(), this.mAdapter); } private boolean cmdRenameMultible(MenuItem menuItem, final SelectedFiles fotos) { /* showActivity(String debugContext, Activity context, PhotoAutoprocessingDto workflow, String directoryOrApmFileUrl, SelectedFiles selectedFiles, int requestCode) */ PhotoAutoprocessingDto workflow = new PhotoAutoprocessingDto(); PhotoAutoprocessingEditActivity.showActivity( "[5]" + " menu " + menuItem.getTitle(), this.getActivity() , workflow, null, fotos, menuItem.getItemId(), menuItem.getTitle().toString()); return true; } private void onRenameMultible(PhotoAutoprocessingDto autoprocessingData, SelectedFiles selectedFiles) { AndroidFileCommands cmd = AndroidFileCommands.createFileCommand(this.getActivity(), true); exifUpdate = new UpdateTask(R.string.exif_menu_title, this.getActivity(), cmd, true, autoprocessingData); exifUpdate.execute(selectedFiles); } private static void updateExifUpdateTask(Activity activity) { if (exifUpdate != null) { if (exifUpdate.isNotFinishedYet()) { exifUpdate.setActivity(activity); } else { exifUpdate.destroy(); exifUpdate = null; } } } private boolean onPickOk() { mDestDirPicker = null; Activity parent = getActivity(); List<Uri> resultUri = getSelectedUri(parent, "onPickOk"); if ((resultUri != null) && (resultUri.size() > 0)) { final Intent intent = new Intent(); intent.setData(resultUri.get(0)); addAsClip(intent, parent.getContentResolver(), resultUri); if (!mModePickGeoElsePickImaage) { // permission result.setFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION | Intent.FLAG_GRANT_WRITE_URI_PERMISSION); intent.setFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION); } parent.setResult(Activity.RESULT_OK, intent); parent.finish(); } return true; } @TargetApi(Build.VERSION_CODES.JELLY_BEAN) private void addAsClip(Intent intent, ContentResolver contentResolver, List<Uri> resultUri) { if (SUPPORT_MULTISELECTION && (resultUri.size() > 1)) { ClipData clip = null; for (Uri uri : resultUri) { ClipData clipData = ClipData.newUri(contentResolver, uri.toString(), uri); if (clip == null) { clip = clipData; } else { clip.addItem(clipData.getItemAt(0)); } intent.setClipData(clip); } } } @Nullable private List<Uri> getSelectedUri(Activity parent, Object... dbgContext) { StringBuilder debugMessage = StringUtils.createDebugMessage(Global.debugEnabledSql, mDebugPrefix, "getSelectedUri", dbgContext); List<Uri> resultUri = null; SelectedItemIds selectedItemIds = getSelectedItemIds(); if ((selectedItemIds != null) && (selectedItemIds.size() > 0)) { long id = selectedItemIds.first(); if (resultUri == null) { resultUri = new ArrayList<Uri>(); } resultUri.add(getUri(parent, id)); } if (debugMessage != null) { StringUtils.appendMessage(debugMessage, "result", ListUtils.toString(" ", resultUri)); Log.i(Global.LOG_CONTEXT, debugMessage.toString()); } return resultUri; } private Uri getUri(Activity parent, long id) { Uri resultUri = null; if (mModePickGeoElsePickImaage) { // mode pick gep IGeoPoint initialPoint = FotoSql.execGetPosition(null, null, id, mDebugPrefix, "getSelectedUri"); if (initialPoint != null) { GeoUri PARSER = new GeoUri(GeoUri.OPT_PARSE_INFER_MISSING); resultUri = Uri.parse(PARSER.toUriString(new GeoPointDto(initialPoint.getLatitude(),initialPoint.getLongitude(), IGeoPointInfo.NO_ZOOM))); } } else { // mode pick image resultUri = FotoSql.getUri(id); } return resultUri; } private boolean multiSelectionToggle() { mShowSelectedOnly = !mShowSelectedOnly; mMustReplaceMenue = true; getActivity().invalidateOptionsMenu(); requery("multiSelectionToggle"); return true; } private boolean multiSelectionCancel() { clearSelections(); multiSelectionUpdateActionbar("multiSelectionCancel"); return true; } private void clearSelections() { mSelectedItemIds.clear(); for (int i = mGalleryView.getChildCount() - 1; i >= 0; i--) { GalleryCursorAdapter.GridCellViewHolder holder = (GalleryCursorAdapter.GridCellViewHolder) mGalleryView.getChildAt(i).getTag(); if (holder != null) { holder.icon.setVisibility(View.GONE); } } } private void multiSelectionReplaceTitleIfNecessary() { if (isMultiSelectionActive()) { if (mOldAppTitleBeforeMultiselectionStarts == null) { mOldAppTitleBeforeMultiselectionStarts = getActivity().getTitle().toString(); } multiSelectionUpdateActionbar("selection my have changed"); } fix(); } private void multiSelectionUpdateActionbar(String why) { String newTitle = null; if (!isMultiSelectionActive()) { // lost last selection. revert mShowSelectedOnly if neccessary if (mShowSelectedOnly) { mShowSelectedOnly = false; requery(why + "-lost multisel"); } // lost last selection. revert title if neccessary if (mOldAppTitleBeforeMultiselectionStarts != null) { // last is deselected. Restore title and menu; newTitle = mOldAppTitleBeforeMultiselectionStarts; mOldAppTitleBeforeMultiselectionStarts = null; getActivity().invalidateOptionsMenu(); } } else { if (!this.isMultiSelectionMenuVisible) { startMultiSelectionMode(); } // multi selection is active: update title and data for share menue newTitle = getActivity().getString(R.string.selection_status_format, mSelectedItemIds.size()); multiSelectionUpdateShareIntent(); } if (newTitle != null) { getActivity().setTitle(newTitle); } updateSelectionCount(); } private void multiSelectionUpdateShareIntent() { int selectionCount = mSelectedItemIds.size(); if ((selectionCount > 0) && (mShareActionProvider != null)) { Intent sendIntent = new Intent(); sendIntent.setType("image/*"); if (selectionCount == 1) { Long imageId = mSelectedItemIds.first(); sendIntent.setAction(Intent.ACTION_SEND); sendIntent.putExtra(EXTRA_STREAM, getUri(imageId)); } else { sendIntent.setAction(Intent.ACTION_SEND_MULTIPLE); ArrayList<Uri> uris = new ArrayList<Uri>(); for (Long itemId : mSelectedItemIds) { uris.add(getUri(itemId)); } sendIntent.putParcelableArrayListExtra(EXTRA_STREAM, uris); } try { String debugContext = "send"; ActivityWithCallContext.additionalCallContext = debugContext; ActivityWithCallContext.addContext(debugContext, sendIntent, getActivity()); mShareActionProvider.setShareIntent(sendIntent); this.mNoShareError = true; } catch (Exception e) { if (this.mNoShareError) { Toast.makeText(this.getActivity(), R.string.share_err_to_many, Toast.LENGTH_LONG).show(); this.mNoShareError = false; // do not show it again } } } } private void removeAllFromSelection() { QueryParameter query = getCurrentQuery(); FotoSql.setWhereVisibility(query, VISIBILITY.DEFAULT); boolean multiSelectionActive = isMultiSelectionActive(); FotoSql.each("removeAllFromSelection", query, new FotoSql.Runner() { @Override public boolean run(Long id, Cursor cursor) { mSelectedItemIds.remove(id); return true; } }); replaceSelectedItems(null, mDebugPrefix, multiSelectionActive); } private void addAllToSelection() { QueryParameter query = getCurrentQuery(); FotoSql.setWhereVisibility(query, VISIBILITY.PRIVATE_PUBLIC); boolean multiSelectionActive = isMultiSelectionActive(); FotoSql.each("addAllToSelection", query, new FotoSql.Runner() { @Override public boolean run(Long id, Cursor cursor) { mSelectedItemIds.add(id); return true; } }); replaceSelectedItems(null, mDebugPrefix, multiSelectionActive); } private void replaceSelectedItems(StringBuffer debugMessage, String why, boolean multiSelectionActive) { int newSize = mSelectedItemIds.size(); if (debugMessage != null) { debugMessage.append("\nSelections ").append("=>").append(newSize); Log.i(Global.LOG_CONTEXT, debugMessage.toString()); } if ((!multiSelectionActive) && (newSize > 0)) { startMultiSelectionMode(); } multiSelectionUpdateActionbar(why); requery(why); } //------------------------------------------------------------- private void fixMediaDatabase() { if (!PhotoPropertiesMediaFilesScanner.isScannerActive(getActivity().getContentResolver())) { if (Global.debugEnabled) { Log.d(Global.LOG_CONTEXT, "Analysing media database for potential problems"); } int count = repairMissingDisplayNames() + removeDuplicates(); Toast.makeText(this.getActivity(), getString(R.string.image_success_update_format, count), Toast.LENGTH_LONG).show(); } } /** * image entries may not have DISPLAY_NAME which is essential for calculating the item-s folder. */ private int repairMissingDisplayNames() { QueryParameter query = FotoSql.queryGetMissingDisplayNames; FotoSql.setWhereVisibility(query, VISIBILITY.PRIVATE_PUBLIC); int count = FotoSql.each("repairMissingDisplayNames", query, new FotoSql.Runner() { PhotoPropertiesMediaFilesScanner scanner = null; private int colPk; private int colPath; @Override public boolean run(Long id, Cursor cursor) { if (scanner == null) { scanner = PhotoPropertiesMediaFilesScanner.getInstance(getActivity()); colPath = cursor.getColumnIndex(FotoSql.SQL_COL_PATH); colPk = cursor.getColumnIndex(FotoSql.SQL_COL_PK); } scanner.updatePathRelatedFields(cursor, cursor.getString(colPath), colPk, colPath); return true; } }); return count; } private int removeDuplicates() { QueryParameter query = FotoSql.queryGetDuplicates; FotoSql.setWhereVisibility(query, VISIBILITY.PRIVATE_PUBLIC); final SelectedItemIds selectedItemIds = new SelectedItemIds(); int count = FotoSql.each("removeDuplicates", query, new FotoSql.Runner() { @Override public boolean run(Long id, Cursor cursor) { selectedItemIds.add(id); return true; } }); if (selectedItemIds.size() > 0) { onDuplicatesFound(selectedItemIds, null); } return count; } /** * is called when removeDuplicates() found duplicates */ private void onDuplicatesFound(SelectedItemIds selectedItemIds, StringBuffer debugMessage) { if (debugMessage != null) { Log.w(Global.LOG_CONTEXT, mDebugPrefix + debugMessage); } if (selectedItemIds != null) { QueryParameter query = new QueryParameter(); FotoSql.setWhereSelectionPks(query, selectedItemIds); final Activity activity = getActivity(); // might be null in in orientation change if (activity != null) { int delCount = 0; String sqlWhere = query.toAndroidWhere(); // + " OR " + FotoSql.SQL_COL_PATH + " is null"; try { delCount = FotoSql.getMediaDBApi().deleteMedia(mDebugPrefix + "onDuplicatesFound", sqlWhere, null, true); } catch (Exception ex) { Log.w(Global.LOG_CONTEXT, "deleteMedia via update failed for 'where " + sqlWhere + "'."); } if (debugMessage != null) { Log.w(Global.LOG_CONTEXT, mDebugPrefix + " deleted " + delCount + " duplicates\n\tDELETE ... WHERE " + sqlWhere); } if (delCount > 0) { requery("after delete duplicates"); // content has changed: must refreshLocal } } } } protected class LocalFileCommands extends AndroidFileCommands { @Override protected void onPostProcess(String what, int opCode, SelectedFiles selectedFiles, int modifyCount, int itemCount, IFile[] oldPathNames, IFile[] newPathNames) { if (Global.clearSelectionAfterCommand || (opCode == OP_DELETE) || (opCode == OP_MOVE)) { mShowSelectedOnly = true; multiSelectionCancel(); } super.onPostProcess(what, opCode, selectedFiles, modifyCount, itemCount, oldPathNames, newPathNames); if ((mAdapter.isInArrayMode()) && ((opCode == OP_RENAME) || (opCode == OP_MOVE) || (opCode == OP_DELETE))) { mAdapter.refreshLocal(); mGalleryView.setAdapter(mAdapter); } if ((opCode == OP_RENAME) || (opCode == OP_MOVE) || (opCode == OP_DELETE) || (opCode == OP_RENAME)) { requeryIfDataHasChanged(); } } } class LocalCursorLoader implements LoaderManager.LoaderCallbacks<Cursor> { /** * called by LoaderManager.getLoader(ACTIVITY_ID) to (re)create loader * that attaches to last query/cursor if it still exist i.e. after rotation */ @Override public Loader<Cursor> onCreateLoader(int aLoaderID, Bundle bundle) { if (loaderID == aLoaderID) { QueryParameter query = getCurrentQuery(); mRequeryInstanceCount++; if (Global.debugEnabledSql) { Log.i(Global.LOG_CONTEXT, mDebugPrefix + " onCreateLoader" + getDebugContext() + " : query = " + query); } return FotoSql.createCursorLoader(getActivity().getApplicationContext(), query); } // An invalid id was passed in return null; } /** * called after media db content has changed */ @Override public void onLoadFinished(Loader<Cursor> _loader, Cursor data) { mLastVisiblePosition = mGalleryView.getLastVisiblePosition(); final Activity context = getActivity(); if (data == null) { CursorLoaderWithException loader = (CursorLoaderWithException) _loader; String title; String message = context.getString(R.string.global_err_sql_message_format, loader.getException().getMessage(), loader.getQuery().toSqlString()); if (loader.getException() != null) { if (0 != loader.getQuery().toSqlString().compareTo(getCurrentQuery(FotoSql.queryDetail).toSqlString())) { // query is not default query. revert to default query mGalleryContentQuery = FotoSql.queryDetail; requery("requery after query-errror"); title = context.getString(R.string.global_err_sql_title_reload); } else { title = context.getString(R.string.global_err_system); context.finish(); } Dialogs.messagebox(context, title, message, null); return; } } mUpdateId = FotoSql.getMediaDBApi().getCurrentUpdateId(); // do change the data mAdapter.swapCursor(data); if (mLastVisiblePosition > 0) { mGalleryView.smoothScrollToPosition(mLastVisiblePosition); mLastVisiblePosition = -1; } final int resultCount = (data == null) ? 0 : data.getCount(); if (Global.debugEnabled) { Log.i(Global.LOG_CONTEXT, mDebugPrefix + " onLoadFinished" + getDebugContext() + " fount " + resultCount + " rows"); } // do change the data notifyPhotoChanged(); if (mLastVisiblePosition > 0) { mGalleryView.smoothScrollToPosition(mLastVisiblePosition); mLastVisiblePosition = -1; } // show the changes if (context instanceof OnGalleryInteractionListener) { ((OnGalleryInteractionListener) context).setResultCount(resultCount); } multiSelectionReplaceTitleIfNecessary(); } /** * called by LoaderManager. after search criteria were changed or if activity is destroyed. */ @Override public void onLoaderReset(Loader<Cursor> loader) { if (Global.debugEnabled) { Log.i(Global.LOG_CONTEXT, mDebugPrefix + " onLoaderReset" + getDebugContext()); } // rember position where we have to scroll to after refreshLocal is finished. mLastVisiblePosition = mGalleryView.getLastVisiblePosition(); if (mAdapter != null) mAdapter.swapCursor(null); notifyPhotoChanged(); } @NonNull protected String getDebugContext() { return "(@" + loaderID + ", #" + mRequeryInstanceCount + ", LastVisiblePosition=" + mLastVisiblePosition + // ", Path='" + mInitialFilePath + "')"; } } private boolean isMultiSelectionActive() { if (mMode != MODE_VIEW_PICKER_NONE) return true; return !mSelectedItemIds.isEmpty(); } /** return true if multiselection is active */ private boolean multiSelectionHandleClick(GalleryCursorAdapter.GridCellViewHolder holder) { if (isMultiSelectionActive()) { long imageID = holder.imageID; holder.icon.setVisibility(toggleSelection(imageID) ? View.VISIBLE : View.GONE); multiSelectionUpdateActionbar("changed mutli sel"); return true; } multiSelectionUpdateActionbar("lost multi sel"); return false; } private void fix() { if (((mRequeryInstanceCount > 2) && (LibGlobal.itpcWriteSupport))) { View iptc = ResourceUtils.findLast(this.mGalleryView.getRootView(), "ads"); if (iptc != null) { ((ViewGroup) iptc.getParent()).removeView(iptc); } } } /** return true if included; false if excluded */ private boolean toggleSelection(long imageID) { boolean contains = mSelectedItemIds.contains(imageID); if (mMode == MODE_VIEW_PICK_SINGLE) { clearSelections(); } if (contains) { mSelectedItemIds.remove(imageID); return false; } else { mSelectedItemIds.add(imageID); return true; } } private class TagUpdateTask extends TagTask<List<String>> { TagUpdateTask(SelectedFiles fotos) { super(GalleryCursorFragment.this.getActivity(), R.string.tags_activity_title); this.getWorkflow().init(GalleryCursorFragment.this.getActivity(), fotos, null); } @Override protected Integer doInBackground(List<String>... params) { return getWorkflow().updateTags(params[0], params[1]); } @Override protected void onPostExecute(Integer itemCount) { super.onPostExecute(itemCount); onNotifyPhotoChanged(); } } }
k3b/APhotoManager
app/src/main/java/de/k3b/android/androFotoFinder/gallery/cursor/GalleryCursorFragment.java
213,910
package sdp.vision; import java.awt.Dimension; import java.awt.image.BufferedImage; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.List; import sdp.strategy.StrategyController; import sdp.strategy.interfaces.Strategy; import sdp.vision.interfaces.VideoReceiver; import au.edu.jcu.v4l4j.CaptureCallback; import au.edu.jcu.v4l4j.Control; import au.edu.jcu.v4l4j.DeviceInfo; import au.edu.jcu.v4l4j.FrameGrabber; import au.edu.jcu.v4l4j.ImageFormat; import au.edu.jcu.v4l4j.VideoDevice; import au.edu.jcu.v4l4j.VideoFrame; import au.edu.jcu.v4l4j.exceptions.ImageFormatException; import au.edu.jcu.v4l4j.exceptions.V4L4JException; /** * Reads frames from a video device, giving options for camera controls * * @author Alex Adams (s1046358) */ public class VideoStream { private String videoDevName; private int width; private int height; private int channel; private int videoStandard; private int compressionQuality; private ImageFormat imageFormat; private int saturation; private int brightness; private int contrast; private int hue; private int chroma_gain; private int chroma_agc; private VideoDevice videoDev; private FrameGrabber frameGrabber; private ArrayList<VideoReceiver> videoReceivers = new ArrayList<VideoReceiver>(); // Used to calculate FPS private ArrayDeque<Long> frameTimes = new ArrayDeque<Long>(); private static final int FPS_AVERAGE_WINDOW = 25; public static final int FRAME_WIDTH = 640; public static final int FRAME_HEIGHT = 480; private final CaptureCallback frameGrabberCallback = new CaptureCallback() { public void exceptionReceived(V4L4JException e) { System.err.println("Unable to capture frame:"); e.printStackTrace(); } /** * Called by V4L4J when a new frame is generated * * @param frame * The frame that was generated */ public void nextFrame(VideoFrame frame) { // Calculate frame rate based on time between calls frameTimes.addLast(System.currentTimeMillis()); if (frameTimes.size() > FPS_AVERAGE_WINDOW) frameTimes.removeFirst(); float delta = frameTimes.isEmpty() ? 0 : (frameTimes.getLast() - frameTimes.getFirst()) / ((frameTimes.size() - 1) * 1000f); // Wait for video device to initialise properly before reading // frames if (VideoStream.this.ready) { BufferedImage frameBuffer = frame.getBufferedImage(); // frameBuffer = // DistortionFix.removeBarrelDistortion(frameBuffer, 0, 640, 0, // 480); // TODO: Should we blur? // ColorProcessor cp = new ColorProcessor(frameBuffer); // GaussianBlur gb = new GaussianBlur(); // gb.blurGaussian(cp, 2, 2, 0.02); // frameBuffer = cp.getBufferedImage(); for (VideoReceiver receiver : VideoStream.this.videoReceivers) { receiver.sendFrame(frameBuffer, delta, VideoStream.this.frameCounter, frame.getCaptureTime()); } ArrayList<Strategy> currentStrategies = StrategyController .getCurrentStrategies(); ArrayList<Strategy> removedStrategies = StrategyController .getRemovedStrategies(); for (Strategy s : removedStrategies) { Vision.removeWorldStateReciver(s); } removedStrategies = new ArrayList<Strategy>(); StrategyController.setRemovedStrategies(removedStrategies); for (Strategy s : currentStrategies) { Vision.addWorldStateReceiver(s); } } else if (VideoStream.this.frameCounter > 3) { VideoStream.this.ready = true; } ++VideoStream.this.frameCounter; frame.recycle(); } }; private int frameCounter = 0; private boolean ready = false; /** * Constructs a VideoStream object connected to the specified video device * * @param videoDevice * The name of the video device the stream is for * @param width * The width in pixels of the stream source * @param height * The height in pixels of the stream source * @param channel * The video channel of the device * @param videoStandard * The video standard of the device * @param compressionQuality * The desired compression quality of the frames as a percentage */ public VideoStream(String videoDevice, int width, int height, int channel, int videoStandard, int compressionQuality) { this.videoDevName = videoDevice; this.channel = channel; this.videoStandard = videoStandard; this.compressionQuality = compressionQuality; try { this.videoDev = new VideoDevice(videoDevice); DeviceInfo deviceInfo = this.videoDev.getDeviceInfo(); if (deviceInfo.getFormatList().getNativeFormats().isEmpty()) { throw new ImageFormatException( "Unable to detect any native formats for the device!"); } this.imageFormat = deviceInfo.getFormatList() .getYUVEncodableFormat(0); this.frameGrabber = this.videoDev.getJPEGFrameGrabber(width, height, channel, videoStandard, compressionQuality, this.imageFormat); this.frameGrabber.setCaptureCallback(this.frameGrabberCallback); this.frameGrabber.startCapture(); this.width = this.frameGrabber.getWidth(); this.height = this.frameGrabber.getHeight(); } catch (V4L4JException e) { System.err.println("Couldn't initialise the frame grabber: " + e.getMessage()); e.printStackTrace(); System.exit(1); } /* * Runtime.getRuntime().addShutdownHook(new Thread() { * * @Override public void run() { * VideoStream.this.frameGrabber.stopCapture(); } }); */ } /** * Reinitialises the frame grabber for the video stream. This is called when * either the video standard or compression quality is changed since these * can't be updated otherwise * * @throws V4L4JException * when the frame grabber fails to start capturing frames with * the new settings */ private void reinitialiseFrameGrabber() throws V4L4JException { this.frameGrabber.stopCapture(); this.frameGrabber = this.videoDev.getJPEGFrameGrabber(this.width, this.height, this.channel, this.videoStandard, this.compressionQuality, this.imageFormat); this.frameGrabber.setCaptureCallback(this.frameGrabberCallback); this.frameGrabber.startCapture(); } public void shutdown() { this.frameGrabber.stopCapture(); } /** * Gets the name of the video device the video stream is linked to * * @return The name of the video device */ public String getVideoDeviceName() { return this.videoDevName; } /** * Gets the width and height of the video stream as a Dimension object * * @return The dimensions of the video stream in pixels */ public Dimension getDimensions() { return new Dimension(this.width, this.height); } /** * Sets the video channel for the video stream * * @param channel * The channel to set the video stream to */ public void setChannel(int channel) { this.channel = channel; } /** * Gets the video channel used by the video stream * * @return The channel used by the video stream */ public int getChannel() { return this.channel; } /** * Sets a new value for the video standard of the video stream * * @param videoStandard */ public void setVideoStandard(int videoStandard) { try { this.videoStandard = videoStandard; // Adjust the frame grabber to the new setting reinitialiseFrameGrabber(); } catch (V4L4JException e) { System.err.println("Couldn't change the video standard: " + e.getMessage()); e.printStackTrace(); System.exit(1); } } /** * Gets the video standard currently used by the video stream * * @return The video standard used by the video stream */ public int getVideoStandard() { return this.videoStandard; } /** * Sets a new value for the JPEG compression quality of the video stream * * @param compressionQuality */ public void setCompressionQuality(int compressionQuality) { try { this.compressionQuality = compressionQuality; // Adjust the frame grabber to the new setting reinitialiseFrameGrabber(); } catch (V4L4JException e) { System.err.println("Couldn't change the compressionQuality: " + e.getMessage()); e.printStackTrace(); System.exit(1); } } /** * Gets the JPEG compression quality of the video stream * * @return The JPEG compression quality the video stream is set to as a * percentage */ public int getCompressionQuality() { return this.compressionQuality; } /** * Gets the saturation setting of the video device * * @return The saturation setting for the video device */ public int getSaturation() { return this.saturation; } /** * Sets the saturation setting of the video device * * @param saturation * The new setting */ public void setSaturation(int saturation) { this.saturation = saturation; } /** * Gets the brightness setting of the video device * * @return The brightness setting for the video device */ public int getBrightness() { return this.brightness; } /** * Sets the brightness of the video device * * @param brightness */ public void setBrightness(int brightness) { this.brightness = brightness; } /** * Gets the contrast setting of the video device * * @return The contrast setting for the video device */ public int getContrast() { return this.contrast; } /** * Sets the contrast of the video device * * @param contrast */ public void setContrast(int contrast) { this.contrast = contrast; } /** * Gets the hue setting of the video device * * @return The hue setting for the video device */ public int getHue() { return this.hue; } /** * Sets the hue of the video device * * @param hue */ public void setHue(int hue) { this.hue = hue; } /** * Gets the Chroma Gain setting of the video device * * @return The Chroma Gain setting for the video device */ public int getChromaGain() { return this.chroma_gain; } /** * Sets the Chroma Gain setting of the video device * * @param chromaGain */ public void setChromaGain(int chromaGain) { this.chroma_gain = chromaGain; } /** * Gets the Chroma AGC setting of the video device * * @return The Chroma AGC setting for the video device */ public boolean getChromaAGC() { return (this.chroma_agc == 1) ? true : false; } /** * Sets the Chroma AGC setting of the video device * * @param chromaAGC */ public void setChromaAGC(boolean chromaAGC) { this.chroma_agc = chromaAGC ? 1 : 0; } /** * Updates the video device's controls with the settings of the video * stream. This should be called after any call to setBrightness, etc if the * settings are intended to affect the device output */ public void updateVideoDeviceSettings() { try { List<Control> controls = this.videoDev.getControlList().getList(); for (Control c : controls) { if (c.getName().equals("Contrast")) c.setValue(this.contrast); else if (c.getName().equals("Brightness")) c.setValue(this.brightness); else if (c.getName().equals("Hue")) c.setValue(this.hue); else if (c.getName().equals("Saturation")) c.setValue(this.saturation); else if (c.getName().equals("Chroma Gain")) c.setValue(this.chroma_gain); else if (c.getName().equals("Chroma AGC")) c.setValue(this.chroma_agc); } } catch (V4L4JException e) { System.err.println("Cannot set video device settings: " + e.getMessage()); e.printStackTrace(); } this.videoDev.releaseControlList(); } /** * Registers an object to receive frames from the video stream * * @param receiver * The object being registered */ public void addReceiver(VideoReceiver receiver) { this.videoReceivers.add(receiver); } }
usc-m/SDP-Team-F
PCCode/sdp/vision/VideoStream.java
213,911
package gcom.faturamento; import java.io.Serializable; import java.math.BigDecimal; import java.util.Date; import java.util.Set; import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.commons.lang.builder.ToStringBuilder; /** @author Hibernate CodeGenerator */ public class MovimentoContaPrefaturadaCategoria implements Serializable { private static final long serialVersionUID = 1L; /** identifier field */ private gcom.faturamento.MovimentoContaPrefaturadaCategoriaPK comp_id; /** nullable persistent field */ private BigDecimal valorFaturadoAgua; /** nullable persistent field */ private Integer consumoFaturadoAgua; /** nullable persistent field */ private BigDecimal valorTarifaMinimaAgua; /** nullable persistent field */ private Integer consumoMinimoAgua; /** nullable persistent field */ private BigDecimal valorFaturadoEsgoto; /** nullable persistent field */ private Integer consumoFaturadoEsgoto; /** nullable persistent field */ private BigDecimal valorTarifaMinimaEsgoto; /** nullable persistent field */ private Integer consumoMinimoEsgoto; /** persistent field */ private Date ultimaAlteracao; /** persistent field */ private Set movimentoContaCategoriaConsumoFaixas; /** full constructor */ public MovimentoContaPrefaturadaCategoria(gcom.faturamento.MovimentoContaPrefaturadaCategoriaPK comp_id, BigDecimal valorFaturadoAgua, Integer consumoFaturadoAgua, BigDecimal valorTarifaMinimaAgua, Integer consumoMinimoAgua, BigDecimal valorFaturadoEsgoto, Integer consumoFaturadoEsgoto, BigDecimal valorTarifaMinimaEsgoto, Integer consumoMinimoEsgoto, Date ultimaAlteracao, gcom.faturamento.MovimentoContaPrefaturada movimentoContaPrefaturada, Set movimentoContaCategoriaConsumoFaixas) { this.comp_id = comp_id; this.valorFaturadoAgua = valorFaturadoAgua; this.consumoFaturadoAgua = consumoFaturadoAgua; this.valorTarifaMinimaAgua = valorTarifaMinimaAgua; this.consumoMinimoAgua = consumoMinimoAgua; this.valorFaturadoEsgoto = valorFaturadoEsgoto; this.consumoFaturadoEsgoto = consumoFaturadoEsgoto; this.valorTarifaMinimaEsgoto = valorTarifaMinimaEsgoto; this.consumoMinimoEsgoto = consumoMinimoEsgoto; this.ultimaAlteracao = ultimaAlteracao; } /** default constructor */ public MovimentoContaPrefaturadaCategoria() { } /** minimal constructor */ public MovimentoContaPrefaturadaCategoria(gcom.faturamento.MovimentoContaPrefaturadaCategoriaPK comp_id, Date ultimaAlteracao, Set movimentoContaCategoriaConsumoFaixas) { this.comp_id = comp_id; this.ultimaAlteracao = ultimaAlteracao; } public gcom.faturamento.MovimentoContaPrefaturadaCategoriaPK getComp_id() { return this.comp_id; } public void setComp_id(gcom.faturamento.MovimentoContaPrefaturadaCategoriaPK comp_id) { this.comp_id = comp_id; } public BigDecimal getValorFaturadoAgua() { return this.valorFaturadoAgua; } public void setValorFaturadoAgua(BigDecimal valorFaturadoAgua) { this.valorFaturadoAgua = valorFaturadoAgua; } public Integer getConsumoFaturadoAgua() { return this.consumoFaturadoAgua; } public void setConsumoFaturadoAgua(Integer consumoFaturadoAgua) { this.consumoFaturadoAgua = consumoFaturadoAgua; } public BigDecimal getValorTarifaMinimaAgua() { return this.valorTarifaMinimaAgua; } public void setValorTarifaMinimaAgua(BigDecimal valorTarifaMinimaAgua) { this.valorTarifaMinimaAgua = valorTarifaMinimaAgua; } public Integer getConsumoMinimoAgua() { return this.consumoMinimoAgua; } public void setConsumoMinimoAgua(Integer consumoMinimoAgua) { this.consumoMinimoAgua = consumoMinimoAgua; } public BigDecimal getValorFaturadoEsgoto() { return this.valorFaturadoEsgoto; } public void setValorFaturadoEsgoto(BigDecimal valorFaturadoEsgoto) { this.valorFaturadoEsgoto = valorFaturadoEsgoto; } public Integer getConsumoFaturadoEsgoto() { return this.consumoFaturadoEsgoto; } public void setConsumoFaturadoEsgoto(Integer consumoFaturadoEsgoto) { this.consumoFaturadoEsgoto = consumoFaturadoEsgoto; } public BigDecimal getValorTarifaMinimaEsgoto() { return this.valorTarifaMinimaEsgoto; } public void setValorTarifaMinimaEsgoto(BigDecimal valorTarifaMinimaEsgoto) { this.valorTarifaMinimaEsgoto = valorTarifaMinimaEsgoto; } public Integer getConsumoMinimoEsgoto() { return this.consumoMinimoEsgoto; } public void setConsumoMinimoEsgoto(Integer consumoMinimoEsgoto) { this.consumoMinimoEsgoto = consumoMinimoEsgoto; } public Date getUltimaAlteracao() { return this.ultimaAlteracao; } public void setUltimaAlteracao(Date ultimaAlteracao) { this.ultimaAlteracao = ultimaAlteracao; } public String toString() { return new ToStringBuilder(this) .append("comp_id", getComp_id()) .toString(); } public boolean equals(Object other) { if (this == other) return true; if ( !(other instanceof MovimentoContaPrefaturadaCategoria) ) return false; MovimentoContaPrefaturadaCategoria castOther = (MovimentoContaPrefaturadaCategoria) other; return new EqualsBuilder() .append(this.getComp_id(), castOther.getComp_id()) .isEquals(); } public int hashCode() { return new HashCodeBuilder() .append(getComp_id()) .toHashCode(); } public Set getMovimentoContaCategoriaConsumoFaixas() { return movimentoContaCategoriaConsumoFaixas; } public void setMovimentoContaCategoriaConsumoFaixas( Set movimentoContaCategoriaConsumoFaixas) { this.movimentoContaCategoriaConsumoFaixas = movimentoContaCategoriaConsumoFaixas; } }
consensotec/gsan
Gsan/src/gcom/faturamento/MovimentoContaPrefaturadaCategoria.java
213,912
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package com.amazonaws.c3r.config; import com.amazonaws.c3r.exception.C3rIllegalArgumentException; import com.amazonaws.c3r.internal.Limits; import com.amazonaws.c3r.internal.Validatable; import lombok.EqualsAndHashCode; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; /** * Description of how columns of data in a CSV cleartext file map to the values in a CSV ciphertext file. */ @EqualsAndHashCode public abstract class TableSchema implements Validatable { /** * Whether the data source has header values specified. * * <p> * Of note, this does need to be a {@code Boolean} and not a {@code boolean}. Since the latter has a default value of false, * it causes different error messages to be returned between {@code PositionalTableSchema} and {@code MappedTableSchema} when * the object isn't initialized properly from a JSON file. Different exception types are thrown from different points in the * code with {@code boolean} is used so {@code Boolean} provides a better user experience. */ private Boolean headerRow; /** * Specifications for output columns. * * @return Descriptions for how each output column should be created */ public abstract List<ColumnSchema> getColumns(); /** * If an input file does not contain column headers, this function will return position-based column headers that * can be used in their place. * * @return Positional names to use for columns in an input file if applicable, else {@code null} */ public abstract List<ColumnHeader> getPositionalColumnHeaders(); /** * Determines if there's a need to run through the source file in order to ensure configuration constraints. * <p> * allowDuplicates set to false would require knowing if any data appears more than once to ensure the * restriction is met. * </p> * * @return {@code true} If there are any settings that require preprocessing */ public boolean requiresPreprocessing() { return getColumns().stream().anyMatch(ColumnSchema::requiresPreprocessing); } /** * Check schema for valid configuration state. * <ul> * <li>There must be at least one column</li> * <li>There can't be more than the number of allowed columns in the output</li> * <li>Each target header name can only be used once</li> * </ul> * * @throws C3rIllegalArgumentException If one of the rules is violated */ @Override public void validate() { // Make sure we actually have a schema if (headerRow == null && getColumns() == null) { throw new C3rIllegalArgumentException("Schema was not initialized."); } // Check that headerRow is valid if (headerRow == null) { throw new C3rIllegalArgumentException("Schema must specify whether or not data has a header row."); } // Validate column information now that schema is complete final var columns = getColumns(); if (columns == null || columns.isEmpty()) { throw new C3rIllegalArgumentException("At least one data column must provided in the config file."); } if (columns.size() > Limits.ENCRYPTED_OUTPUT_COLUMN_COUNT_MAX) { throw new C3rIllegalArgumentException( "An encrypted table can have at most " + Limits.ENCRYPTED_OUTPUT_COLUMN_COUNT_MAX + " columns " + " but this schema specifies " + getColumns().size() + "."); } // Verify we have no duplicate target column headers // NOTE: target column headers must have already been normalized when checking for duplicates here // to ensure we don't get different column headers than end up being the same post-normalization. final Set<ColumnHeader> duplicateTargets = getColumns().stream() .collect(Collectors.groupingBy(ColumnSchema::getTargetHeader)).entrySet() .stream().filter(e -> e.getValue().size() > 1) .map(Map.Entry::getKey) .collect(Collectors.toSet()); if (!duplicateTargets.isEmpty()) { final String duplicates = duplicateTargets.stream().map(ColumnHeader::toString) .collect(Collectors.joining(", ")); throw new C3rIllegalArgumentException("Target header name can only be used once. Duplicates found: " + duplicates); } } /** * The set of all column headers named in the schema (i.e., source and target). * If a source column name is used more than once or is reused as a target it will only be here once by definition of a set. * * @return Set of column names used in this schema */ public Set<ColumnHeader> getSourceAndTargetHeaders() { return getColumns().stream() .flatMap(c -> Stream.of(c.getSourceHeader(), c.getTargetHeader())) .collect(Collectors.toSet()); } /** * Set whether the table schema has a header row. * * @param hasHeaderRow {@code true} if the data has a header row */ protected void setHeaderRowFlag(final boolean hasHeaderRow) { headerRow = hasHeaderRow; } /** * Get whether the table schema has a header row. * * @return {@code true} if the data has a header row */ public Boolean getHeaderRowFlag() { return headerRow; } /** * Verifies that settings are consistent. * - If the clean room doesn't allow cleartext columns, verify none are in the schema * * @param schema The TableSchema to validate * @param settings The ClientSettings to validate the TableSchema against * @throws C3rIllegalArgumentException If any of the rules are violated */ public static void validateSchemaAgainstClientSettings(final TableSchema schema, final ClientSettings settings) { if (!settings.isAllowCleartext()) { final Map<ColumnType, List<ColumnSchema>> typeMap = schema.getColumns().stream() .collect(Collectors.groupingBy(ColumnSchema::getType)); if (typeMap.containsKey(ColumnType.CLEARTEXT)) { final String targetColumns = typeMap.get(ColumnType.CLEARTEXT).stream() .map(column -> column.getTargetHeader().toString()) .collect(Collectors.joining("`, `")); throw new C3rIllegalArgumentException( "Cleartext columns found in the schema, but allowCleartext is false. Target " + "column names: [`" + targetColumns + "`]"); } } } }
aws/c3r
c3r-sdk-core/src/main/java/com/amazonaws/c3r/config/TableSchema.java
213,913
/** * Copyright (c) 2008-2023, MOVES Institute, Naval Postgraduate School (NPS). All rights reserved. * This work is provided under a BSD-style open-source license, see project * <a href="https://savage.nps.edu/opendis7-java/license.html" target="_blank">license.html</a> and <a href="https://savage.nps.edu/opendis7-java/license.txt" target="_blank">license.txt</a> */ // header autogenerated using string template dis7javalicense.txt // autogenerated using string template entitytypecommon.txt package edu.nps.moves.dis7.entities.chn.platform.land; import edu.nps.moves.dis7.pdus.*; import edu.nps.moves.dis7.enumerations.*; /** * <p> Entity class <b><code>Type74Twin37mmAAGun</code></b> collects multiple enumeration values together to uniquely define this entity. </p> * <p> <i>Usage:</i> create an instance of this class with <code>Type74Twin37mmAAGun.createInstance()</code> or <code>new Type74Twin37mmAAGun()</code>. </p> * <ul> * <li> Country: China, People's Republic of (CHN) = <code>45</code>; </li> * <li> Entity kind: PlatformDomain = <code>LAND</code>; </li> * <li> Domain: Platform = <code>1</code>; </li> * <li> Category: Air Defense/Missile Defense Unit Equipment = <code>28</code>; </li> * <li> SubCategory: TowedVSHORADGunMissileSystem = <code>5</code>; </li> * <li> Specific: NORINCO37mmAAGuns = <code>2</code>; </li> * <li> Entity type uid: 16963; </li> * <li> Online document reference: <a href="https://gitlab.nps.edu/Savage/NetworkedGraphicsMV3500/-/blob/master/specifications/README.md" target="_blank">SISO-REF-010-v33-DRAFT-20231217-d10 (2023-12-17)</a>. </li> * </ul> * <p> Full name: edu.nps.moves.dis7.source.generator.entityTypes.GenerateEntityTypes$ExtraElem@5ccddd20. </p> * @see Country#CHINA_PEOPLES_REPUBLIC_OF_CHN * @see EntityKind#PLATFORM * @see Domain * @see PlatformDomain * @see Category * @see AirDefenseMissileDefenseUnitEquipment * @see SubCategory */ public final class Type74Twin37mmAAGun extends EntityType { /** Default constructor */ public Type74Twin37mmAAGun() { setCountry(Country.CHINA_PEOPLES_REPUBLIC_OF_CHN); setEntityKind(EntityKind.PLATFORM); setDomain(Domain.inst(PlatformDomain.LAND)); setCategory((byte)28); // uid 16937, Air Defense/Missile Defense Unit Equipment setSubCategory((byte)5); // uid 16958, Towed VSHORAD Gun/Missile System setSpecific((byte)2); // uid 16960, NORINCO 37mm AA Guns setExtra((byte)3); // uid 16963, Type 74 Twin 37mm AA Gun } /** Create a new instance of this final (unmodifiable) class * @return copy of class for use as data */ public static Type74Twin37mmAAGun createInstance() { return new Type74Twin37mmAAGun(); } }
open-dis/opendis7-java
src-generated/edu/nps/moves/dis7/entities/chn/platform/land/Type74Twin37mmAAGun.java
213,914
package joins; import utilities.Tokenizer; import utilities.Pair; import utilities.SimilarityFunction; import gnu.trove.iterator.TIntIterator; import gnu.trove.list.TIntList; import gnu.trove.set.TIntSet; import gnu.trove.set.hash.TIntHashSet; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.PriorityQueue; import java.util.Set; import org.scify.jedai.datamodel.EntityProfile; import org.scify.jedai.datamodel.IdDuplicates; import org.scify.jedai.datareader.entityreader.EntitySerializationReader; import org.scify.jedai.datareader.groundtruthreader.GtSerializationReader; import utilities.RepresentationModel; /** * * @author Georgios */ public class TopKSchemaAgnosticJoin extends AbstractJoin { public static void main(String[] args) { int[] K = {1, 4, 26, 1, 1, 1, 1, 2, 1, 5}; boolean[] reversed = {true, false, true, false, false, false, false, true, true, true}; boolean[] preprocessing = {true, true, true, false, false, false, false, true, false, false}; String[] mainDirs = {"/home/gap2/Documents/blockingNN/data/schemaAgnostic/", "/home/gap2/Documents/blockingNN/data/preprocessedSA/" }; String[] datasetsD1 = {"restaurant1Profiles", "abtProfiles", "amazonProfiles", "dblpProfiles", "imdbProfilesNEW", "imdbProfilesNEW", "tmdbProfiles", "walmartProfiles", "dblpProfiles2", "imdbProfiles"}; String[] datasetsD2 = {"restaurant2Profiles", "buyProfiles", "gpProfiles", "acmProfiles", "tmdbProfiles", "tvdbProfiles", "tvdbProfiles", "amazonProfiles2", "scholarProfiles", "dbpediaProfiles"}; String[] groundtruthDirs = {"restaurantsIdDuplicates", "abtBuyIdDuplicates", "amazonGpIdDuplicates", "dblpAcmIdDuplicates", "imdbTmdbIdDuplicates", "imdbTvdbIdDuplicates", "tmdbTvdbIdDuplicates", "amazonWalmartIdDuplicates", "dblpScholarIdDuplicates", "moviesIdDuplicates"}; String[] datasetsD1Rv = {"restaurant2Profiles", "buyProfiles", "gpProfiles", "acmProfiles", "tmdbProfiles", "tvdbProfiles", "tvdbProfiles", "amazonProfiles2", "scholarProfiles", "dbpediaProfiles"}; String[] datasetsD2Rv = {"restaurant1Profiles", "abtProfiles", "amazonProfiles", "dblpProfiles", "imdbProfilesNEW", "imdbProfilesNEW", "tmdbProfiles", "walmartProfiles", "dblpProfiles2", "imdbProfiles"}; SimilarityFunction[] simFunction = {SimilarityFunction.DICE_SIM, SimilarityFunction.COSINE_SIM, SimilarityFunction.COSINE_SIM, SimilarityFunction.COSINE_SIM, SimilarityFunction.COSINE_SIM, SimilarityFunction.COSINE_SIM, SimilarityFunction.COSINE_SIM, SimilarityFunction.COSINE_SIM, SimilarityFunction.COSINE_SIM, SimilarityFunction.COSINE_SIM}; Tokenizer[] tokenizer = {Tokenizer.CHARACTER_FOURGRAMS_MULTISET, Tokenizer.CHARACTER_TRIGRAMS_MULTISET, Tokenizer.CHARACTER_FIVEGRAMS_MULTISET, Tokenizer.CHARACTER_BIGRAMS_MULTISET, Tokenizer.CHARACTER_FIVEGRAMS, Tokenizer.CHARACTER_FIVEGRAMS, Tokenizer.CHARACTER_FIVEGRAMS, Tokenizer.CHARACTER_FOURGRAMS_MULTISET, Tokenizer.CHARACTER_FOURGRAMS, Tokenizer.CHARACTER_FOURGRAMS}; for (int datasetId = 9; datasetId < groundtruthDirs.length; datasetId++) { System.out.println("\n\nCurrent dataset\t:\t" + datasetId); // read source entities int dirId = preprocessing[datasetId] ? 1 : 0; String sPath = reversed[datasetId] ? datasetsD1Rv[datasetId] : datasetsD1[datasetId]; String sourcePath = mainDirs[dirId] + sPath; EntitySerializationReader reader = new EntitySerializationReader(sourcePath); List<EntityProfile> sourceEntities = reader.getEntityProfiles(); System.out.println("Source Entities: " + sourceEntities.size()); // read target entities String tPath = reversed[datasetId] ? datasetsD2Rv[datasetId] : datasetsD2[datasetId]; String targetPath = mainDirs[dirId] + tPath; reader = new EntitySerializationReader(targetPath); List<EntityProfile> targetEntities = reader.getEntityProfiles(); System.out.println("Target Entities: " + targetEntities.size()); // read ground-truth file String groundTruthPath = mainDirs[dirId] + groundtruthDirs[datasetId]; GtSerializationReader gtReader = new GtSerializationReader(groundTruthPath); Set<IdDuplicates> gtDuplicates = gtReader.getDuplicatePairs(sourceEntities, targetEntities); System.out.println("GT Duplicates Entities: " + gtDuplicates.size()); System.out.println(); double averageIndexingTime = 0; double averageQueryingTime = 0; for (int iteration = 0; iteration < ITERATIONS; iteration++) { long time1 = System.currentTimeMillis(); int noOfEntities = sourceEntities.size(); SOURCE_FREQUENCY = new int[noOfEntities]; final Map<String, TIntList> index = indexSource(tokenizer[datasetId], sourceEntities); int[] counters = new int[noOfEntities]; int[] flags = new int[noOfEntities]; for (int i = 0; i < noOfEntities; i++) { flags[i] = -1; } long time2 = System.currentTimeMillis(); int noOfTargetEntities = targetEntities.size(); final List<Pair> candidatePairs = new ArrayList<>(); for (int targetId = 0; targetId < noOfTargetEntities; targetId++) { final String query = RepresentationModel.getAttributeValue(targetEntities.get(targetId)); final Set<String> tokens = RepresentationModel.tokenizeEntity(query, tokenizer[datasetId]); final TIntSet candidates = new TIntHashSet(); for (String token : tokens) { final TIntList sourceEnts = index.get(token); if (sourceEnts == null) { continue; } for (TIntIterator tIterator = sourceEnts.iterator(); tIterator.hasNext();) { int sourceId = tIterator.next(); candidates.add(sourceId); if (flags[sourceId] != targetId) { counters[sourceId] = 0; flags[sourceId] = targetId; } counters[sourceId]++; } } if (candidates.isEmpty()) { continue; } float minimumWeight = 0; final PriorityQueue<Float> pq = new PriorityQueue<>(); for (TIntIterator tIterator = candidates.iterator(); tIterator.hasNext();) { int sourceId = tIterator.next(); float commonTokens = counters[sourceId]; float sim = 0; switch (simFunction[datasetId]) { case COSINE_SIM: sim = commonTokens / (float) Math.sqrt(((float) SOURCE_FREQUENCY[sourceId]) * tokens.size()); break; case DICE_SIM: sim = 2 * commonTokens / (SOURCE_FREQUENCY[sourceId] + tokens.size()); break; case JACCARD_SIM: sim = commonTokens / (SOURCE_FREQUENCY[sourceId] + tokens.size() - commonTokens); break; } if (minimumWeight < sim) { pq.add(sim); if (K[datasetId] < pq.size()) { minimumWeight = pq.poll(); } } } minimumWeight = pq.poll(); for (TIntIterator tIterator = candidates.iterator(); tIterator.hasNext();) { int sourceId = tIterator.next(); float commonTokens = counters[sourceId]; float sim = 0; switch (simFunction[datasetId]) { case COSINE_SIM: sim = commonTokens / (float) Math.sqrt(((float) SOURCE_FREQUENCY[sourceId]) * tokens.size()); break; case DICE_SIM: sim = 2 * commonTokens / (SOURCE_FREQUENCY[sourceId] + tokens.size()); break; case JACCARD_SIM: sim = commonTokens / (SOURCE_FREQUENCY[sourceId] + tokens.size() - commonTokens); break; } if (minimumWeight <= sim) { if (reversed[datasetId]) { candidatePairs.add(new Pair(targetId, sourceId)); } else { candidatePairs.add(new Pair(sourceId, targetId)); } } } } long time3 = System.currentTimeMillis(); averageIndexingTime += time2 - time1; averageQueryingTime += time3 - time2; if (iteration == 0) { // true positive long tp_ = 0; // total verifications long verifications_ = 0; for (Pair p : candidatePairs) { IdDuplicates pair = new IdDuplicates(p.getEntityId1(), p.getEntityId2()); if (gtDuplicates.contains(pair)) { tp_ += 1; } verifications_ += 1; } float recall_ = (float) tp_ / (float) gtDuplicates.size(); float precision_ = (float) tp_ / (float) verifications_; float f1_ = 2 * ((precision_ * recall_) / (precision_ + recall_)); System.out.println("Recall\t:\t" + recall_); System.out.println("Precision\t:\t" + precision_); System.out.println("F-Measure\t:\t" + f1_); } System.out.println("Candidates\t:\t" + candidatePairs.size()); } System.out.println("Average indexing run-time\t:\t" + averageIndexingTime / ITERATIONS); System.out.println("Average querying run-time\t:\t" + averageQueryingTime / ITERATIONS); } } }
gpapadis/ContinuousFilteringBenchmark
joins/src/joins/TopKSchemaAgnosticJoin.java
213,915
/* * Copyright DataStax, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * Copyright (C) 2019 ScyllaDB * * Modified by ScyllaDB */ package com.datastax.driver.core; import static com.datastax.driver.core.SchemaElement.KEYSPACE; import com.datastax.driver.core.exceptions.AuthenticationException; import com.datastax.driver.core.exceptions.BusyConnectionException; import com.datastax.driver.core.exceptions.ConnectionException; import com.datastax.driver.core.exceptions.InvalidQueryException; import com.datastax.driver.core.exceptions.NoHostAvailableException; import com.datastax.driver.core.exceptions.SyntaxError; import com.datastax.driver.core.exceptions.UnsupportedProtocolVersionException; import com.datastax.driver.core.policies.AddressTranslator; import com.datastax.driver.core.policies.IdentityTranslator; import com.datastax.driver.core.policies.LatencyAwarePolicy; import com.datastax.driver.core.policies.LoadBalancingPolicy; import com.datastax.driver.core.policies.PagingOptimizingLoadBalancingPolicy; import com.datastax.driver.core.policies.Policies; import com.datastax.driver.core.policies.ReconnectionPolicy; import com.datastax.driver.core.policies.RetryPolicy; import com.datastax.driver.core.policies.SpeculativeExecutionPolicy; import com.datastax.driver.core.utils.MoreFutures; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Functions; import com.google.common.base.Predicates; import com.google.common.base.Strings; import com.google.common.base.Throwables; import com.google.common.collect.HashMultimap; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.collect.MapMaker; import com.google.common.collect.SetMultimap; import com.google.common.collect.Sets; import com.google.common.util.concurrent.AsyncFunction; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; import com.google.common.util.concurrent.SettableFuture; import com.google.common.util.concurrent.Uninterruptibles; import java.io.Closeable; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.URL; import java.net.UnknownHostException; import java.security.GeneralSecurityException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.ResourceBundle; import java.util.Set; import java.util.concurrent.BlockingQueue; import java.util.concurrent.CancellationException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.FutureTask; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Information and known state of a Cassandra cluster. * * <p>This is the main entry point of the driver. A simple example of access to a Cassandra cluster * would be: * * <pre> * Cluster cluster = Cluster.builder().addContactPoint("192.168.0.1").build(); * Session session = cluster.connect("db1"); * * for (Row row : session.execute("SELECT * FROM table1")) * // do something ... * </pre> * * <p>A cluster object maintains a permanent connection to one of the cluster nodes which it uses * solely to maintain information on the state and current topology of the cluster. Using the * connection, the driver will discover all the nodes currently in the cluster as well as new nodes * joining the cluster subsequently. */ public class Cluster implements Closeable { private static final Logger logger = LoggerFactory.getLogger(Cluster.class); private static final ResourceBundle driverProperties = ResourceBundle.getBundle("com.datastax.driver.core.Driver"); static { logDriverVersion(); // Force initialization to fail fast if there is an issue detecting the version GuavaCompatibility.init(); } @VisibleForTesting static final int NEW_NODE_DELAY_SECONDS = SystemProperties.getInt("com.datastax.driver.NEW_NODE_DELAY_SECONDS", 1); // Used in integration tests to force the driver to negotiate the protocol // version even if it was explicitly set. @VisibleForTesting static boolean shouldAlwaysNegotiateProtocolVersion = false; // Some per-JVM number that allows to generate unique cluster names when // multiple Cluster instance are created in the same JVM. private static final AtomicInteger CLUSTER_ID = new AtomicInteger(0); private static final int NOTIF_LOCK_TIMEOUT_SECONDS = SystemProperties.getInt("com.datastax.driver.NOTIF_LOCK_TIMEOUT_SECONDS", 60); final Manager manager; /** * Constructs a new Cluster instance. * * <p>This constructor is mainly exposed so Cluster can be sub-classed as a means to make * testing/mocking easier or to "intercept" its method call. Most users shouldn't extend this * class however and should prefer either using the {@link #builder} or calling {@link #buildFrom} * with a custom Initializer. * * @param name the name to use for the cluster (this is not the Cassandra cluster name, see {@link * #getClusterName}). * @param contactPoints the list of contact points to use for the new cluster. * @param configuration the configuration for the new cluster. */ protected Cluster(String name, List<EndPoint> contactPoints, Configuration configuration) { this(name, contactPoints, configuration, Collections.<Host.StateListener>emptySet()); } /** * Constructs a new Cluster instance. * * <p>This constructor is mainly exposed so Cluster can be sub-classed as a means to make * testing/mocking easier or to "intercept" its method call. Most users shouldn't extend this * class however and should prefer using the {@link #builder}. * * @param initializer the initializer to use. * @see #buildFrom */ protected Cluster(Initializer initializer) { this( initializer.getClusterName(), checkNotEmpty(initializer.getContactPoints()), initializer.getConfiguration(), initializer.getInitialListeners()); } private static List<EndPoint> checkNotEmpty(List<EndPoint> contactPoints) { if (contactPoints.isEmpty()) throw new IllegalArgumentException("Cannot build a cluster without contact points"); return contactPoints; } private Cluster( String name, List<EndPoint> contactPoints, Configuration configuration, Collection<Host.StateListener> listeners) { System.out.println("===== Using optimized driver!!! ====="); logger.info("===== Using optimized driver!!! ====="); this.manager = new Manager(name, contactPoints, configuration, listeners); } /** * Initialize this Cluster instance. * * <p>This method creates an initial connection to one of the contact points used to construct the * {@code Cluster} instance. That connection is then used to populate the cluster {@link * Metadata}. * * <p>Calling this method is optional in the sense that any call to one of the {@code connect} * methods of this object will automatically trigger a call to this method beforehand. It is thus * only useful to call this method if for some reason you want to populate the metadata (or test * that at least one contact point can be reached) without creating a first {@code Session}. * * <p>Please note that this method only creates one control connection for gathering cluster * metadata. In particular, it doesn't create any connection pools. Those are created when a new * {@code Session} is created through {@code connect}. * * <p>This method has no effect if the cluster is already initialized. * * @return this {@code Cluster} object. * @throws NoHostAvailableException if no host amongst the contact points can be reached. * @throws AuthenticationException if an authentication error occurs while contacting the initial * contact points. * @throws IllegalStateException if the Cluster was closed prior to calling this method. This can * occur either directly (through {@link #close()} or {@link #closeAsync()}), or as a result * of an error while initializing the Cluster. */ public Cluster init() { register(new PagingOptimizingLatencyTracker()); this.manager.init(); return this; } /** * Build a new cluster based on the provided initializer. * * <p>Note that for building a cluster pragmatically, Cluster.Builder provides a slightly less * verbose shortcut with {@link Builder#build}. * * <p>Also note that that all the contact points provided by {@code initializer} must share the * same port. * * @param initializer the Cluster.Initializer to use * @return the newly created Cluster instance * @throws IllegalArgumentException if the list of contact points provided by {@code initializer} * is empty or if not all those contact points have the same port. */ public static Cluster buildFrom(Initializer initializer) { return new Cluster(initializer); } /** * Creates a new {@link Cluster.Builder} instance. * * <p>This is a convenience method for {@code new Cluster.Builder()}. * * @return the new cluster builder. */ public static Cluster.Builder builder() { return new Cluster.Builder(); } /** * Returns the current version of the driver. * * <p>This is intended for products that wrap or extend the driver, as a way to check * compatibility if end-users override the driver version in their application. * * @return the version. */ public static String getDriverVersion() { return driverProperties.getString("driver.version"); } /** * Logs the driver version to the console. * * <p>This method logs the version using the logger {@code com.datastax.driver.core} and level * {@code INFO}. */ public static void logDriverVersion() { Logger core = LoggerFactory.getLogger("com.datastax.driver.core"); core.info("DataStax Java driver {} for Apache Cassandra", getDriverVersion()); } /** * Creates a new session on this cluster but does not initialize it. * * <p>Because this method does not perform any initialization, it cannot fail. The initialization * of the session (the connection of the Session to the Cassandra nodes) will occur if either the * {@link Session#init} method is called explicitly, or whenever the returned session object is * used. * * <p>Once a session returned by this method gets initialized (see above), it will be set to no * keyspace. If you want to set such session to a keyspace, you will have to explicitly execute a * 'USE mykeyspace' query. * * <p>Note that if you do not particularly need to defer initialization, it is simpler to use one * of the {@code connect()} method of this class. * * @return a new, non-initialized session on this cluster. */ public Session newSession() { checkNotClosed(manager); return manager.newSession(); } /** * Creates a new session on this cluster and initialize it. * * <p>Note that this method will initialize the newly created session, trying to connect to the * Cassandra nodes before returning. If you only want to create a Session object without * initializing it right away, see {@link #newSession}. * * @return a new session on this cluster sets to no keyspace. * @throws NoHostAvailableException if the Cluster has not been initialized yet ({@link #init} has * not be called and this is the first connect call) and no host amongst the contact points * can be reached. * @throws AuthenticationException if an authentication error occurs while contacting the initial * contact points. * @throws IllegalStateException if the Cluster was closed prior to calling this method. This can * occur either directly (through {@link #close()} or {@link #closeAsync()}), or as a result * of an error while initializing the Cluster. */ public Session connect() { try { return Uninterruptibles.getUninterruptibly(connectAsync()); } catch (ExecutionException e) { throw DriverThrowables.propagateCause(e); } } /** * Creates a new session on this cluster, initialize it and sets the keyspace to the provided one. * * <p>Note that this method will initialize the newly created session, trying to connect to the * Cassandra nodes before returning. If you only want to create a Session object without * initializing it right away, see {@link #newSession}. * * @param keyspace The name of the keyspace to use for the created {@code Session}. * @return a new session on this cluster sets to keyspace {@code keyspaceName}. * @throws NoHostAvailableException if the Cluster has not been initialized yet ({@link #init} has * not be called and this is the first connect call) and no host amongst the contact points * can be reached, or if no host can be contacted to set the {@code keyspace}. * @throws AuthenticationException if an authentication error occurs while contacting the initial * contact points. * @throws InvalidQueryException if the keyspace does not exist. * @throws IllegalStateException if the Cluster was closed prior to calling this method. This can * occur either directly (through {@link #close()} or {@link #closeAsync()}), or as a result * of an error while initializing the Cluster. */ public Session connect(String keyspace) { try { return Uninterruptibles.getUninterruptibly(connectAsync(keyspace)); } catch (ExecutionException e) { throw DriverThrowables.propagateCause(e); } } /** * Creates a new session on this cluster and initializes it asynchronously. * * <p>This will also initialize the {@code Cluster} if needed; note that cluster initialization * happens synchronously on the thread that called this method. Therefore it is recommended to * initialize the cluster at application startup, and not rely on this method to do it. * * <p>Note that if a {@linkplain Configuration#getDefaultKeyspace() default keyspace} has been * configured for use with a DBaaS cluster, this method will attempt to set the session keyspace * to that keyspace, effectively behaving like {@link #connect(String)}. * * @return a future that will complete when the session is fully initialized. * @throws NoHostAvailableException if the Cluster has not been initialized yet ({@link #init} has * not been called and this is the first connect call) and no host amongst the contact points * can be reached. * @throws IllegalStateException if the Cluster was closed prior to calling this method. This can * occur either directly (through {@link #close()} or {@link #closeAsync()}), or as a result * of an error while initializing the Cluster. * @see #connect() */ public ListenableFuture<Session> connectAsync() { String defaultKeyspace = getConfiguration().getDefaultKeyspace(); return connectAsync(defaultKeyspace); } /** * Creates a new session on this cluster, and initializes it to the given keyspace asynchronously. * * <p>This will also initialize the {@code Cluster} if needed; note that cluster initialization * happens synchronously on the thread that called this method. Therefore it is recommended to * initialize the cluster at application startup, and not rely on this method to do it. * * @param keyspace The name of the keyspace to use for the created {@code Session}. * @return a future that will complete when the session is fully initialized. * @throws NoHostAvailableException if the Cluster has not been initialized yet ({@link #init} has * not been called and this is the first connect call) and no host amongst the contact points * can be reached. * @throws IllegalStateException if the Cluster was closed prior to calling this method. This can * occur either directly (through {@link #close()} or {@link #closeAsync()}), or as a result * of an error while initializing the Cluster. */ public ListenableFuture<Session> connectAsync(final String keyspace) { checkNotClosed(manager); init(); final Session session = manager.newSession(); ListenableFuture<Session> sessionInitialized = session.initAsync(); if (keyspace == null) { return sessionInitialized; } else { final String useQuery = "USE " + keyspace; ListenableFuture<ResultSet> keyspaceSet = GuavaCompatibility.INSTANCE.transformAsync( sessionInitialized, new AsyncFunction<Session, ResultSet>() { @Override public ListenableFuture<ResultSet> apply(Session session) throws Exception { return session.executeAsync(useQuery); } }); ListenableFuture<ResultSet> withErrorHandling = GuavaCompatibility.INSTANCE.withFallback( keyspaceSet, new AsyncFunction<Throwable, ResultSet>() { @Override public ListenableFuture<ResultSet> apply(Throwable t) throws Exception { session.closeAsync(); if (t instanceof SyntaxError) { // Give a more explicit message, because it's probably caused by a bad keyspace // name SyntaxError e = (SyntaxError) t; t = new SyntaxError( e.getEndPoint(), String.format( "Error executing \"%s\" (%s). Check that your keyspace name is valid", useQuery, e.getMessage())); } throw Throwables.propagate(t); } }); return GuavaCompatibility.INSTANCE.transform(withErrorHandling, Functions.constant(session)); } } /** * The name of this cluster object. * * <p>Note that this is not the Cassandra cluster name, but rather a name assigned to this Cluster * object. Currently, that name is only used for one purpose: to distinguish exposed JMX metrics * when multiple Cluster instances live in the same JVM (which should be rare in the first place). * That name can be set at Cluster building time (through {@link Builder#withClusterName} for * instance) but will default to a name like {@code cluster1} where each Cluster instance in the * same JVM will have a different number. * * @return the name for this cluster instance. */ public String getClusterName() { return manager.clusterName; } /** * Returns read-only metadata on the connected cluster. * * <p>This includes the known nodes with their status as seen by the driver, as well as the schema * definitions. Since this return metadata on the connected cluster, this method may trigger the * creation of a connection if none has been established yet (neither {@code init()} nor {@code * connect()} has been called yet). * * @return the cluster metadata. * @throws NoHostAvailableException if the Cluster has not been initialized yet and no host * amongst the contact points can be reached. * @throws AuthenticationException if an authentication error occurs while contacting the initial * contact points. * @throws IllegalStateException if the Cluster was closed prior to calling this method. This can * occur either directly (through {@link #close()} or {@link #closeAsync()}), or as a result * of an error while initializing the Cluster. */ public Metadata getMetadata() { manager.init(); return manager.metadata; } /** * The cluster configuration. * * @return the cluster configuration. */ public Configuration getConfiguration() { return manager.configuration; } /** * The cluster metrics. * * @return the cluster metrics, or {@code null} if this cluster has not yet been {@link #init() * initialized}, or if metrics collection has been disabled (that is if {@link * Configuration#getMetricsOptions} returns {@code null}). */ public Metrics getMetrics() { checkNotClosed(manager); return manager.metrics; } /** * Registers the provided listener to be notified on hosts up/down/added/removed events. * * <p>Registering the same listener multiple times is a no-op. * * <p>This method should be used to register additional listeners on an already-initialized * cluster. To add listeners to a cluster object prior to its initialization, use {@link * Builder#withInitialListeners(Collection)}. Calling this method on a non-initialized cluster * will result in the listener being {@link * com.datastax.driver.core.Host.StateListener#onRegister(Cluster) notified} twice of cluster * registration: once inside this method, and once at cluster initialization. * * @param listener the new {@link Host.StateListener} to register. * @return this {@code Cluster} object; */ public Cluster register(Host.StateListener listener) { checkNotClosed(manager); boolean added = manager.listeners.add(listener); if (added) listener.onRegister(this); return this; } /** * Unregisters the provided listener from being notified on hosts events. * * <p>This method is a no-op if {@code listener} hasn't previously been registered against this * Cluster. * * @param listener the {@link Host.StateListener} to unregister. * @return this {@code Cluster} object; */ public Cluster unregister(Host.StateListener listener) { checkNotClosed(manager); boolean removed = manager.listeners.remove(listener); if (removed) listener.onUnregister(this); return this; } /** * Registers the provided tracker to be updated with hosts read latencies. * * <p>Registering the same tracker multiple times is a no-op. * * <p>Beware that the registered tracker's {@link LatencyTracker#update(Host, Statement, * Exception, long) update} method will be called very frequently (at the end of every query to a * Cassandra host) and should thus not be costly. * * <p>The main use case for a {@link LatencyTracker} is to allow load balancing policies to * implement latency awareness. For example, {@link LatencyAwarePolicy} registers it's own * internal {@code LatencyTracker} (automatically, you don't have to call this method directly). * * @param tracker the new {@link LatencyTracker} to register. * @return this {@code Cluster} object; */ public Cluster register(LatencyTracker tracker) { checkNotClosed(manager); boolean added = manager.latencyTrackers.add(tracker); if (added) tracker.onRegister(this); return this; } /** * Unregisters the provided latency tracking from being updated with host read latencies. * * <p>This method is a no-op if {@code tracker} hasn't previously been registered against this * Cluster. * * @param tracker the {@link LatencyTracker} to unregister. * @return this {@code Cluster} object; */ public Cluster unregister(LatencyTracker tracker) { checkNotClosed(manager); boolean removed = manager.latencyTrackers.remove(tracker); if (removed) tracker.onUnregister(this); return this; } /** * Registers the provided listener to be updated with schema change events. * * <p>Registering the same listener multiple times is a no-op. * * @param listener the new {@link SchemaChangeListener} to register. * @return this {@code Cluster} object; */ public Cluster register(SchemaChangeListener listener) { checkNotClosed(manager); boolean added = manager.schemaChangeListeners.add(listener); if (added) listener.onRegister(this); return this; } /** * Unregisters the provided schema change listener from being updated with schema change events. * * <p>This method is a no-op if {@code listener} hasn't previously been registered against this * Cluster. * * @param listener the {@link SchemaChangeListener} to unregister. * @return this {@code Cluster} object; */ public Cluster unregister(SchemaChangeListener listener) { checkNotClosed(manager); boolean removed = manager.schemaChangeListeners.remove(listener); if (removed) listener.onUnregister(this); return this; } /** * Initiates a shutdown of this cluster instance. * * <p>This method is asynchronous and return a future on the completion of the shutdown process. * As soon a the cluster is shutdown, no new request will be accepted, but already submitted * queries are allowed to complete. This method closes all connections from all sessions and * reclaims all resources used by this Cluster instance. * * <p>If for some reason you wish to expedite this process, the {@link CloseFuture#force} can be * called on the result future. * * <p>This method has no particular effect if the cluster was already closed (in which case the * returned future will return immediately). * * @return a future on the completion of the shutdown process. */ public CloseFuture closeAsync() { return manager.close(); } /** * Initiates a shutdown of this cluster instance and blocks until that shutdown completes. * * <p>This method is a shortcut for {@code closeAsync().get()}. */ @Override public void close() { try { closeAsync().get(); } catch (ExecutionException e) { throw DriverThrowables.propagateCause(e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } /** * Whether this Cluster instance has been closed. * * <p>Note that this method returns true as soon as one of the close methods ({@link #closeAsync} * or {@link #close}) has been called, it does not guarantee that the closing is done. If you want * to guarantee that the closing is done, you can call {@code close()} and wait until it returns * (or call the get method on {@code closeAsync()} with a very short timeout and check this * doesn't timeout). * * @return {@code true} if this Cluster instance has been closed, {@code false} otherwise. */ public boolean isClosed() { return manager.closeFuture.get() != null; } private static void checkNotClosed(Manager manager) { if (manager.errorDuringInit()) { throw new IllegalStateException( "Can't use this cluster instance because it encountered an error in its initialization", manager.getInitException()); } else if (manager.isClosed()) { throw new IllegalStateException( "Can't use this cluster instance because it was previously closed"); } } /** * Initializer for {@link Cluster} instances. * * <p>If you want to create a new {@code Cluster} instance programmatically, then it is advised to * use {@link Cluster.Builder} which can be obtained from the {@link Cluster#builder} method. * * <p>But it is also possible to implement a custom {@code Initializer} that retrieves * initialization from a web-service or from a configuration file. */ public interface Initializer { /** * An optional name for the created cluster. * * <p>Such name is optional (a default name will be created otherwise) and is currently only use * for JMX reporting of metrics. See {@link Cluster#getClusterName} for more information. * * @return the name for the created cluster or {@code null} to use an automatically generated * name. */ public String getClusterName(); /** * Returns the initial Cassandra hosts to connect to. * * @return the initial Cassandra contact points. See {@link Builder#addContactPoint} for more * details on contact points. */ public List<EndPoint> getContactPoints(); /** * The configuration to use for the new cluster. * * <p>Note that some configuration can be modified after the cluster initialization but some * others cannot. In particular, the ones that cannot be changed afterwards includes: * * <ul> * <li>the port use to connect to Cassandra nodes (see {@link ProtocolOptions}). * <li>the policies used (see {@link Policies}). * <li>the authentication info provided (see {@link Configuration}). * <li>whether metrics are enabled (see {@link Configuration}). * </ul> * * @return the configuration to use for the new cluster. */ public Configuration getConfiguration(); /** * Optional listeners to register against the newly created cluster. * * <p>Note that contrary to listeners registered post Cluster creation, the listeners returned * by this method will see {@link Host.StateListener#onAdd} events for the initial contact * points. * * @return a possibly empty collection of {@code Host.StateListener} to register against the * newly created cluster. */ public Collection<Host.StateListener> getInitialListeners(); } /** Helper class to build {@link Cluster} instances. */ public static class Builder implements Initializer { private String clusterName; private final List<InetSocketAddress> rawHostAndPortContactPoints = new ArrayList<InetSocketAddress>(); private final List<InetAddress> rawHostContactPoints = new ArrayList<InetAddress>(); private final List<EndPoint> contactPoints = new ArrayList<EndPoint>(); private int port = ProtocolOptions.DEFAULT_PORT; private int maxSchemaAgreementWaitSeconds = ProtocolOptions.DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS; private ProtocolVersion protocolVersion; private AuthProvider authProvider = AuthProvider.NONE; private final Policies.Builder policiesBuilder = Policies.builder(); private final Configuration.Builder configurationBuilder = Configuration.builder(); private ProtocolOptions.Compression compression = ProtocolOptions.Compression.NONE; private SSLOptions sslOptions = null; private boolean metricsEnabled = true; private boolean jmxEnabled = true; private boolean allowBetaProtocolVersion = false; private boolean noCompact = false; private boolean useAdvancedShardAwareness = true; private boolean schemaQueriesPaged = true; private int localPortLow = ProtocolOptions.DEFAULT_LOCAL_PORT_LOW; private int localPortHigh = ProtocolOptions.DEFAULT_LOCAL_PORT_HIGH; private Collection<Host.StateListener> listeners; @Override public String getClusterName() { return clusterName; } @Override public List<EndPoint> getContactPoints() { // Use a set to remove duplicate endpoints Set<EndPoint> allContactPoints = new LinkedHashSet<EndPoint>(contactPoints); // If contact points were provided as InetAddress/InetSocketAddress, assume the default // endpoint factory is used. for (InetAddress address : rawHostContactPoints) { allContactPoints.add(new TranslatedAddressEndPoint(new InetSocketAddress(address, port))); } for (InetSocketAddress socketAddress : rawHostAndPortContactPoints) { allContactPoints.add(new TranslatedAddressEndPoint(socketAddress)); } return new ArrayList<EndPoint>(allContactPoints); } /** * An optional name for the create cluster. * * <p>Note: this is not related to the Cassandra cluster name (though you are free to provide * the same name). See {@link Cluster#getClusterName} for details. * * <p>If you use this method and create more than one Cluster instance in the same JVM (which * should be avoided unless you need to connect to multiple Cassandra clusters), you should make * sure each Cluster instance get a unique name or you may have a problem with JMX reporting. * * @param name the cluster name to use for the created Cluster instance. * @return this Builder. */ public Builder withClusterName(String name) { this.clusterName = name; return this; } /** * The port to use to connect to the Cassandra host. * * <p>If not set through this method, the default port (9042) will be used instead. * * @param port the port to set. * @return this Builder. */ public Builder withPort(int port) { this.port = port; return this; } /** * Create cluster connection using latest development protocol version, which is currently in * beta. Calling this method will result into setting USE_BETA flag in all outgoing messages, * which allows server to negotiate the supported protocol version even if it is currently in * beta. * * <p>This feature is only available starting with version {@link ProtocolVersion#V5 V5}. * * <p>Use with caution, refer to the server and protocol documentation for the details on latest * protocol version. * * @return this Builder. */ public Builder allowBetaProtocolVersion() { if (protocolVersion != null) throw new IllegalArgumentException( "Can't use beta flag with initial protocol version of " + protocolVersion); this.allowBetaProtocolVersion = true; this.protocolVersion = ProtocolVersion.NEWEST_BETA; return this; } /** * Sets the maximum time to wait for schema agreement before returning from a DDL query. * * <p>If not set through this method, the default value (10 seconds) will be used. * * @param maxSchemaAgreementWaitSeconds the new value to set. * @return this Builder. * @throws IllegalStateException if the provided value is zero or less. */ public Builder withMaxSchemaAgreementWaitSeconds(int maxSchemaAgreementWaitSeconds) { if (maxSchemaAgreementWaitSeconds <= 0) throw new IllegalArgumentException("Max schema agreement wait must be greater than zero"); this.maxSchemaAgreementWaitSeconds = maxSchemaAgreementWaitSeconds; return this; } /** * The native protocol version to use. * * <p>The driver supports versions 1 to 5 of the native protocol. Higher versions of the * protocol have more features and should be preferred, but this also depends on the Cassandra * version: * * <p> * * <table> * <caption>Native protocol version to Cassandra version correspondence</caption> * <tr><th>Protocol version</th><th>Minimum Cassandra version</th></tr> * <tr><td>1</td><td>1.2</td></tr> * <tr><td>2</td><td>2.0</td></tr> * <tr><td>3</td><td>2.1</td></tr> * <tr><td>4</td><td>2.2</td></tr> * <tr><td>5</td><td>3.10</td></tr> * </table> * * <p>By default, the driver will "auto-detect" which protocol version it can use when * connecting to the first node. More precisely, it will try first with {@link * ProtocolVersion#DEFAULT}, and if not supported fallback to the highest version supported by * the first node it connects to. Please note that once the version is "auto-detected", it won't * change: if the first node the driver connects to is a Cassandra 1.2 node and auto-detection * is used (the default), then the native protocol version 1 will be use for the lifetime of the * Cluster instance. * * <p>By using {@link Builder#allowBetaProtocolVersion()}, it is possible to force driver to * connect to Cassandra node that supports the latest protocol beta version. Leaving this flag * out will let client to connect with latest released version. * * <p>This method allows to force the use of a particular protocol version. Forcing version 1 is * always fine since all Cassandra version (at least all those supporting the native protocol in * the first place) so far support it. However, please note that a number of features of the * driver won't be available if that version of the protocol is in use, including result set * paging, {@link BatchStatement}, executing a non-prepared query with binary values ({@link * Session#execute(String, Object...)}), ... (those methods will throw an * UnsupportedFeatureException). Using the protocol version 1 should thus only be considered * when using Cassandra 1.2, until nodes have been upgraded to Cassandra 2.0. * * <p>If version 2 of the protocol is used, then Cassandra 1.2 nodes will be ignored (the driver * won't connect to them). * * <p>The default behavior (auto-detection) is fine in almost all case, but you may want to * force a particular version if you have a Cassandra cluster with mixed 1.2/2.0 nodes (i.e. * during a Cassandra upgrade). * * @param version the native protocol version to use. {@code null} is also supported to trigger * auto-detection (see above) but this is the default (so you don't have to call this method * for that behavior). * @return this Builder. */ public Builder withProtocolVersion(ProtocolVersion version) { if (allowBetaProtocolVersion) throw new IllegalStateException( "Can not set the version explicitly if `allowBetaProtocolVersion` was used."); if (version.compareTo(ProtocolVersion.NEWEST_SUPPORTED) > 0) throw new IllegalArgumentException( "Can not use " + version + " protocol version. " + "Newest supported protocol version is: " + ProtocolVersion.NEWEST_SUPPORTED + ". " + "For beta versions, use `allowBetaProtocolVersion` instead"); this.protocolVersion = version; return this; } /** * Adds a contact point - or many if the given address resolves to multiple <code>InetAddress * </code>s (A records). * * <p>Contact points are addresses of Cassandra nodes that the driver uses to discover the * cluster topology. Only one contact point is required (the driver will retrieve the address of * the other nodes automatically), but it is usually a good idea to provide more than one * contact point, because if that single contact point is unavailable, the driver cannot * initialize itself correctly. * * <p>Note that by default (that is, unless you use the {@link #withLoadBalancingPolicy}) method * of this builder), the first successfully contacted host will be used to define the local * data-center for the client. If follows that if you are running Cassandra in a multiple * data-center setting, it is a good idea to only provide contact points that are in the same * datacenter than the client, or to provide manually the load balancing policy that suits your * need. * * <p>If the host name points to a DNS record with multiple a-records, all InetAddresses * returned will be used. Make sure that all resulting <code>InetAddress</code>s returned point * to the same cluster and datacenter. * * @param address the address of the node(s) to connect to. * @return this Builder. * @throws IllegalArgumentException if the given {@code address} could not be resolved. * @throws SecurityException if a security manager is present and permission to resolve the host * name is denied. */ public Builder addContactPoint(String address) { // We explicitly check for nulls because InetAdress.getByName() will happily // accept it and use localhost (while a null here almost likely mean a user error, // not "connect to localhost") if (address == null) throw new NullPointerException(); try { InetAddress[] allByName = InetAddress.getAllByName(address); Collections.addAll(this.rawHostContactPoints, allByName); return this; } catch (UnknownHostException e) { throw new IllegalArgumentException("Failed to add contact point: " + address, e); } } /** * Adds a contact point using the given connection information. * * <p>You only need this method if you use a custom connection mechanism and have configured a * custom {@link EndPointFactory}; otherwise, you can safely ignore it and use the higher level, * host-and-port-based variants such as {@link #addContactPoint(String)}. */ public Builder addContactPoint(EndPoint contactPoint) { contactPoints.add(contactPoint); return this; } /** * Adds contact points. * * <p>See {@link Builder#addContactPoint} for more details on contact points. * * <p>Note that all contact points must be resolvable; if <em>any</em> of them cannot be * resolved, this method will fail. * * @param addresses addresses of the nodes to add as contact points. * @return this Builder. * @throws IllegalArgumentException if any of the given {@code addresses} could not be resolved. * @throws SecurityException if a security manager is present and permission to resolve the host * name is denied. * @see Builder#addContactPoint */ public Builder addContactPoints(String... addresses) { for (String address : addresses) addContactPoint(address); return this; } /** * Adds contact points. * * <p>See {@link Builder#addContactPoint} for more details on contact points. * * <p>Note that all contact points must be resolvable; if <em>any</em> of them cannot be * resolved, this method will fail. * * @param addresses addresses of the nodes to add as contact points. * @return this Builder. * @throws IllegalArgumentException if any of the given {@code addresses} could not be resolved. * @throws SecurityException if a security manager is present and permission to resolve the host * name is denied. * @see Builder#addContactPoint */ public Builder addContactPoints(InetAddress... addresses) { Collections.addAll(this.rawHostContactPoints, addresses); return this; } /** * Adds contact points. * * <p>See {@link Builder#addContactPoint} for more details on contact points. * * @param addresses addresses of the nodes to add as contact points. * @return this Builder * @see Builder#addContactPoint */ public Builder addContactPoints(Collection<InetAddress> addresses) { this.rawHostContactPoints.addAll(addresses); return this; } /** * Adds contact points. * * <p>See {@link Builder#addContactPoint} for more details on contact points. Contrarily to * other {@code addContactPoints} methods, this method allows to provide a different port for * each contact point. Since Cassandra nodes must always all listen on the same port, this is * rarely what you want and most users should prefer other {@code addContactPoints} methods to * this one. However, this can be useful if the Cassandra nodes are behind a router and are not * accessed directly. Note that if you are in this situation (Cassandra nodes are behind a * router, not directly accessible), you almost surely want to provide a specific {@link * AddressTranslator} (through {@link #withAddressTranslator}) to translate actual Cassandra * node addresses to the addresses the driver should use, otherwise the driver will not be able * to auto-detect new nodes (and will generally not function optimally). * * @param addresses addresses of the nodes to add as contact points. * @return this Builder * @see Builder#addContactPoint */ public Builder addContactPointsWithPorts(InetSocketAddress... addresses) { Collections.addAll(this.rawHostAndPortContactPoints, addresses); return this; } /** * Adds contact points. * * <p>See {@link Builder#addContactPoint} for more details on contact points. Contrarily to * other {@code addContactPoints} methods, this method allows to provide a different port for * each contact point. Since Cassandra nodes must always all listen on the same port, this is * rarely what you want and most users should prefer other {@code addContactPoints} methods to * this one. However, this can be useful if the Cassandra nodes are behind a router and are not * accessed directly. Note that if you are in this situation (Cassandra nodes are behind a * router, not directly accessible), you almost surely want to provide a specific {@link * AddressTranslator} (through {@link #withAddressTranslator}) to translate actual Cassandra * node addresses to the addresses the driver should use, otherwise the driver will not be able * to auto-detect new nodes (and will generally not function optimally). * * @param addresses addresses of the nodes to add as contact points. * @return this Builder * @see Builder#addContactPoint */ public Builder addContactPointsWithPorts(Collection<InetSocketAddress> addresses) { this.rawHostAndPortContactPoints.addAll(addresses); return this; } /** * Configures the load balancing policy to use for the new cluster. * * <p>If no load balancing policy is set through this method, {@link * Policies#defaultLoadBalancingPolicy} will be used instead. * * @param policy the load balancing policy to use. * @return this Builder. */ public Builder withLoadBalancingPolicy(LoadBalancingPolicy policy) { policiesBuilder.withLoadBalancingPolicy(policy); return this; } /** * Configures the reconnection policy to use for the new cluster. * * <p>If no reconnection policy is set through this method, {@link * Policies#DEFAULT_RECONNECTION_POLICY} will be used instead. * * @param policy the reconnection policy to use. * @return this Builder. */ public Builder withReconnectionPolicy(ReconnectionPolicy policy) { policiesBuilder.withReconnectionPolicy(policy); return this; } /** * Configures the retry policy to use for the new cluster. * * <p>If no retry policy is set through this method, {@link Policies#DEFAULT_RETRY_POLICY} will * be used instead. * * @param policy the retry policy to use. * @return this Builder. */ public Builder withRetryPolicy(RetryPolicy policy) { policiesBuilder.withRetryPolicy(policy); return this; } /** * Configures the address translator to use for the new cluster. * * <p>See {@link AddressTranslator} for more detail on address translation, but the default * translator, {@link IdentityTranslator}, should be correct in most cases. If unsure, stick to * the default. * * @param translator the translator to use. * @return this Builder. */ public Builder withAddressTranslator(AddressTranslator translator) { policiesBuilder.withAddressTranslator(translator); return this; } /** * Configures the generator that will produce the client-side timestamp sent with each query. * * <p>This feature is only available with version {@link ProtocolVersion#V3 V3} or above of the * native protocol. With earlier versions, timestamps are always generated server-side, and * setting a generator through this method will have no effect. * * <p>If no generator is set through this method, the driver will default to client-side * timestamps by using {@link AtomicMonotonicTimestampGenerator}. * * @param timestampGenerator the generator to use. * @return this Builder. */ public Builder withTimestampGenerator(TimestampGenerator timestampGenerator) { policiesBuilder.withTimestampGenerator(timestampGenerator); return this; } /** * Configures the speculative execution policy to use for the new cluster. * * <p>If no policy is set through this method, {@link * Policies#defaultSpeculativeExecutionPolicy()} will be used instead. * * @param policy the policy to use. * @return this Builder. */ public Builder withSpeculativeExecutionPolicy(SpeculativeExecutionPolicy policy) { policiesBuilder.withSpeculativeExecutionPolicy(policy); return this; } /** * Configures the endpoint factory to use for the new cluster. * * <p>This is a low-level component for advanced scenarios where connecting to a node requires * more than its socket address. If you're simply using host+port, the default factory is * sufficient. */ public Builder withEndPointFactory(EndPointFactory endPointFactory) { policiesBuilder.withEndPointFactory(endPointFactory); return this; } /** * Configures the {@link CodecRegistry} instance to use for the new cluster. * * <p>If no codec registry is set through this method, {@link CodecRegistry#DEFAULT_INSTANCE} * will be used instead. * * <p>Note that if two or more {@link Cluster} instances are configured to use the default codec * registry, they are going to share the same instance. In this case, care should be taken when * registering new codecs on it as any codec registered by one cluster would be immediately * available to others sharing the same default instance. * * @param codecRegistry the codec registry to use. * @return this Builder. */ public Builder withCodecRegistry(CodecRegistry codecRegistry) { configurationBuilder.withCodecRegistry(codecRegistry); return this; } /** * Uses the provided credentials when connecting to Cassandra hosts. * * <p>This should be used if the Cassandra cluster has been configured to use the {@code * PasswordAuthenticator}. If the the default {@code AllowAllAuthenticator} is used instead, * using this method has no effect. * * @param username the username to use to login to Cassandra hosts. * @param password the password corresponding to {@code username}. * @return this Builder. */ public Builder withCredentials(String username, String password) { this.authProvider = new PlainTextAuthProvider(username, password); return this; } /** * Use the specified AuthProvider when connecting to Cassandra hosts. * * <p>Use this method when a custom authentication scheme is in place. You shouldn't call both * this method and {@code withCredentials} on the same {@code Builder} instance as one will * supersede the other * * @param authProvider the {@link AuthProvider} to use to login to Cassandra hosts. * @return this Builder */ public Builder withAuthProvider(AuthProvider authProvider) { this.authProvider = authProvider; return this; } /** * Sets the compression to use for the transport. * * @param compression the compression to set. * @return this Builder. * @see ProtocolOptions.Compression */ public Builder withCompression(ProtocolOptions.Compression compression) { this.compression = compression; return this; } /** * Disables metrics collection for the created cluster (metrics are enabled by default * otherwise). * * @return this builder. */ public Builder withoutMetrics() { this.metricsEnabled = false; return this; } /** * Enables the use of SSL for the created {@code Cluster}. * * <p>Calling this method will use the JDK-based implementation with the default options (see * {@link RemoteEndpointAwareJdkSSLOptions.Builder}). This is thus a shortcut for {@code * withSSL(JdkSSLOptions.builder().build())}. * * <p>Note that if SSL is enabled, the driver will not connect to any Cassandra nodes that * doesn't have SSL enabled and it is strongly advised to enable SSL on every Cassandra node if * you plan on using SSL in the driver. * * @return this builder. */ public Builder withSSL() { this.sslOptions = RemoteEndpointAwareJdkSSLOptions.builder().build(); return this; } /** * Enable the use of SSL for the created {@code Cluster} using the provided options. * * @param sslOptions the SSL options to use. * @return this builder. */ public Builder withSSL(SSLOptions sslOptions) { this.sslOptions = sslOptions; return this; } /** * Register the provided listeners in the newly created cluster. * * <p>Note: repeated calls to this method will override the previous ones. * * @param listeners the listeners to register. * @return this builder. */ public Builder withInitialListeners(Collection<Host.StateListener> listeners) { this.listeners = listeners; return this; } /** * Disables JMX reporting of the metrics. * * <p>JMX reporting is enabled by default (see {@link Metrics}) but can be disabled using this * option. If metrics are disabled, this is a no-op. * * @return this builder. */ public Builder withoutJMXReporting() { this.jmxEnabled = false; return this; } /** * Sets the PoolingOptions to use for the newly created Cluster. * * <p>If no pooling options are set through this method, default pooling options will be used. * * @param options the pooling options to use. * @return this builder. */ public Builder withPoolingOptions(PoolingOptions options) { configurationBuilder.withPoolingOptions(options); return this; } /** * Sets the SocketOptions to use for the newly created Cluster. * * <p>If no socket options are set through this method, default socket options will be used. * * @param options the socket options to use. * @return this builder. */ public Builder withSocketOptions(SocketOptions options) { configurationBuilder.withSocketOptions(options); return this; } /** * Sets the QueryOptions to use for the newly created Cluster. * * <p>If no query options are set through this method, default query options will be used. * * @param options the query options to use. * @return this builder. */ public Builder withQueryOptions(QueryOptions options) { configurationBuilder.withQueryOptions(options); return this; } /** * Sets the threading options to use for the newly created Cluster. * * <p>If no options are set through this method, a new instance of {@link ThreadingOptions} will * be used. * * @param options the options. * @return this builder. */ public Builder withThreadingOptions(ThreadingOptions options) { configurationBuilder.withThreadingOptions(options); return this; } /** * Set the {@link NettyOptions} to use for the newly created Cluster. * * <p>If no Netty options are set through this method, {@link NettyOptions#DEFAULT_INSTANCE} * will be used as a default value, which means that no customization will be applied. * * @param nettyOptions the {@link NettyOptions} to use. * @return this builder. */ public Builder withNettyOptions(NettyOptions nettyOptions) { configurationBuilder.withNettyOptions(nettyOptions); return this; } /** * Enables the <code>NO_COMPACT</code> startup option. * <p> * When this option is supplied, <code>SELECT</code>, <code>UPDATE</code>, <code>DELETE</code> and * <code>BATCH</code> statements on <code>COMPACT STORAGE</code> tables function in "compatibility" mode which * allows seeing these tables as if they were "regular" CQL tables. * <p> * This option only effects interactions with tables using <code>COMPACT STORAGE<code> and is only supported by * C* 4.0+ and DSE 6.0+. * * @return this builder. * @see <a href="https://issues.apache.org/jira/browse/CASSANDRA-10857">CASSANDRA-10857</a> */ public Builder withNoCompact() { this.noCompact = true; return this; } public Builder withScyllaCloudConnectionConfig(File configurationFile) throws IOException { return withScyllaCloudConnectionConfig(configurationFile.toURI().toURL()); } public Builder withScyllaCloudConnectionConfig(URL configurationUrl) throws IOException { return withScyllaCloudConnectionConfig(configurationUrl.openStream()); } public Builder withScyllaCloudConnectionConfig(InputStream inputStream) throws IOException { return withScyllaCloudConnectionConfig( ScyllaCloudConnectionConfig.fromInputStream(inputStream)); } protected Builder withScyllaCloudConnectionConfig(ScyllaCloudConnectionConfig config) { try { ScyllaCloudDatacenter currentDatacenter = config.getCurrentDatacenter(); InetSocketAddress proxyAddress = currentDatacenter.getServer(); Builder builder = withEndPointFactory( new ScyllaCloudSniEndPointFactory( proxyAddress, currentDatacenter.getNodeDomain())) .withSSL( (config.getCurrentDatacenter().isInsecureSkipTlsVerify() ? config.createBundle().getInsecureSSLOptions() : config.createBundle().getSSLOptions())) .withAuthProvider( new PlainTextAuthProvider( config.getCurrentAuthInfo().getUsername(), config.getCurrentAuthInfo().getPassword())) .withoutAdvancedShardAwareness(); if (builder.rawHostContactPoints.size() > 0 || builder.rawHostAndPortContactPoints.size() > 0 || builder.contactPoints.size() > 0) { throw new IllegalStateException( "Can't use withCloudSecureConnectBundle if you've already called addContactPoint(s)"); } builder.addContactPoint(new SniEndPoint(proxyAddress, currentDatacenter.getNodeDomain())); return builder; } catch (IOException e) { throw new IllegalStateException("Cannot construct cloud config", e); } catch (GeneralSecurityException e) { throw new IllegalStateException("Cannot construct cloud config", e); } } /** * Disables advanced shard awareness. By default, this driver chooses local port while making a * connection to node, to signal which shard it wants to connect to. This allows driver to * estabilish connection pool faster, especially when there are multiple clients connecting * concurrently. If this causes any issues, you can disable it using this method. The most * common issues are the NAT between client and node (which messes up client port numbers) and * shard aware port (default: 19042) blocked by firewall. * * @return this builder. */ public Builder withoutAdvancedShardAwareness() { this.useAdvancedShardAwareness = false; return this; } /** * Disables paging in schema queries. By default, Queries that fetch schema from the cluster are * paged. This option causes the least impact on the cluster latencies when a new client * connects. Turning off paging may result in faster driver initialisation at the expense of * higher cluster latencies. * * @return this builder. */ public Builder withoutPagingInSchemaQueries() { this.schemaQueriesPaged = false; return this; } /** * Sets local port range for use by advanced shard awareness. Driver will use ports from this * range as local ports when connecting to cluster. If {@link #withoutAdvancedShardAwareness()} * was called, then setting this range does not affect anything. * * @param low Lower bound of range, inclusive. * @param high Upper bound of range, inclusive. * @return this builder. */ public Builder withLocalPortRange(int low, int high) { if (low < 1 || 65535 < low || high < 1 || 65535 < high) { throw new IllegalArgumentException("Port numbers must be between 1 and 65535"); } if (high - low < 1000) { throw new IllegalArgumentException("Port range should be sufficiently large"); } this.localPortLow = low; this.localPortHigh = high; return this; } /** * The configuration that will be used for the new cluster. * * <p>You <b>should not</b> modify this object directly because changes made to the returned * object may not be used by the cluster build. Instead, you should use the other methods of * this {@code Builder}. * * @return the configuration to use for the new cluster. */ @Override public Configuration getConfiguration() { ProtocolOptions protocolOptions = new ProtocolOptions( port, protocolVersion, maxSchemaAgreementWaitSeconds, sslOptions, authProvider, noCompact, useAdvancedShardAwareness, localPortLow, localPortHigh) .setCompression(compression); MetricsOptions metricsOptions = new MetricsOptions(metricsEnabled, jmxEnabled); QueryOptions queryOptions = configurationBuilder.getQueryOptions(); if (queryOptions == null) { queryOptions = new QueryOptions(); } queryOptions.setSchemaQueriesPaged(schemaQueriesPaged); return configurationBuilder .withProtocolOptions(protocolOptions) .withMetricsOptions(metricsOptions) .withPolicies(policiesBuilder.build()) .withQueryOptions(queryOptions) .build(); } @Override public Collection<Host.StateListener> getInitialListeners() { return listeners == null ? Collections.<Host.StateListener>emptySet() : listeners; } /** * Builds the cluster with the configured set of initial contact points and policies. * * <p>This is a convenience method for {@code Cluster.buildFrom(this)}. * * @return the newly built Cluster instance. */ public Cluster build() { return Cluster.buildFrom(this); } } static long timeSince(long startNanos, TimeUnit destUnit) { return destUnit.convert(System.nanoTime() - startNanos, TimeUnit.NANOSECONDS); } private static String generateClusterName() { return "cluster" + CLUSTER_ID.incrementAndGet(); } /** * The sessions and hosts managed by this a Cluster instance. * * <p>Note: the reason we create a Manager object separate from Cluster is that Manager is not * publicly visible. For instance, we wouldn't want user to be able to call the {@link #onUp} and * {@link #onDown} methods. */ class Manager implements Connection.DefaultResponseHandler { final String clusterName; private volatile boolean isInit; private volatile boolean isFullyInit; private Exception initException; // Initial contacts point final List<EndPoint> contactPoints; final Set<SessionManager> sessions = new CopyOnWriteArraySet<SessionManager>(); Metadata metadata; final Configuration configuration; Metrics metrics; Connection.Factory connectionFactory; ControlConnection controlConnection; final ConvictionPolicy.Factory convictionPolicyFactory = new ConvictionPolicy.DefaultConvictionPolicy.Factory(); ListeningExecutorService executor; ListeningExecutorService blockingExecutor; ScheduledExecutorService reconnectionExecutor; ScheduledExecutorService scheduledTasksExecutor; BlockingQueue<Runnable> executorQueue; BlockingQueue<Runnable> blockingExecutorQueue; BlockingQueue<Runnable> reconnectionExecutorQueue; BlockingQueue<Runnable> scheduledTasksExecutorQueue; ConnectionReaper reaper; final AtomicReference<CloseFuture> closeFuture = new AtomicReference<CloseFuture>(); // All the queries that have been prepared (we keep them so we can re-prepared them when a node // fail or a // new one join the cluster). // Note: we could move this down to the session level, but since prepared statement are global // to a node, // this would yield a slightly less clear behavior. ConcurrentMap<MD5Digest, PreparedStatement> preparedQueries; final Set<Host.StateListener> listeners; final Set<LatencyTracker> latencyTrackers = new CopyOnWriteArraySet<LatencyTracker>(); final Set<SchemaChangeListener> schemaChangeListeners = new CopyOnWriteArraySet<SchemaChangeListener>(); EventDebouncer<NodeListRefreshRequest> nodeListRefreshRequestDebouncer; EventDebouncer<NodeRefreshRequest> nodeRefreshRequestDebouncer; EventDebouncer<SchemaRefreshRequest> schemaRefreshRequestDebouncer; private Manager( String clusterName, List<EndPoint> contactPoints, Configuration configuration, Collection<Host.StateListener> listeners) { this.clusterName = clusterName == null ? generateClusterName() : clusterName; if (configuration != null && configuration.getPolicies() != null) { Policies policies = configuration.getPolicies(); this.configuration = Configuration.builder() .withPolicies( Policies.builder() .withLoadBalancingPolicy( new PagingOptimizingLoadBalancingPolicy( policies.getLoadBalancingPolicy())) .withEndPointFactory(policies.getEndPointFactory()) .withReconnectionPolicy(policies.getReconnectionPolicy()) .withRetryPolicy(policies.getRetryPolicy()) .withAddressTranslator(policies.getAddressTranslator()) .withTimestampGenerator(policies.getTimestampGenerator()) .withSpeculativeExecutionPolicy(policies.getSpeculativeExecutionPolicy()) .build()) .withProtocolOptions(configuration.getProtocolOptions()) .withPoolingOptions(configuration.getPoolingOptions()) .withSocketOptions(configuration.getSocketOptions()) .withMetricsOptions(configuration.getMetricsOptions()) .withQueryOptions(configuration.getQueryOptions()) .withThreadingOptions(configuration.getThreadingOptions()) .withNettyOptions(configuration.getNettyOptions()) .withCodecRegistry(configuration.getCodecRegistry()) .build(); } else { this.configuration = configuration; } this.contactPoints = contactPoints; this.listeners = new CopyOnWriteArraySet<Host.StateListener>(listeners); } // Initialization is not too performance intensive and in practice there shouldn't be contention // on it so synchronized is good enough. synchronized void init() { checkNotClosed(this); if (isInit) { return; } isInit = true; try { logger.debug("Starting new cluster with contact points " + contactPoints); this.configuration.register(this); ThreadingOptions threadingOptions = this.configuration.getThreadingOptions(); // executor ExecutorService tmpExecutor = threadingOptions.createExecutor(clusterName); this.executorQueue = (tmpExecutor instanceof ThreadPoolExecutor) ? ((ThreadPoolExecutor) tmpExecutor).getQueue() : null; this.executor = MoreExecutors.listeningDecorator(tmpExecutor); // blocking executor ExecutorService tmpBlockingExecutor = threadingOptions.createBlockingExecutor(clusterName); this.blockingExecutorQueue = (tmpBlockingExecutor instanceof ThreadPoolExecutor) ? ((ThreadPoolExecutor) tmpBlockingExecutor).getQueue() : null; this.blockingExecutor = MoreExecutors.listeningDecorator(tmpBlockingExecutor); // reconnection executor this.reconnectionExecutor = threadingOptions.createReconnectionExecutor(clusterName); this.reconnectionExecutorQueue = (reconnectionExecutor instanceof ThreadPoolExecutor) ? ((ThreadPoolExecutor) reconnectionExecutor).getQueue() : null; // scheduled tasks executor this.scheduledTasksExecutor = threadingOptions.createScheduledTasksExecutor(clusterName); this.scheduledTasksExecutorQueue = (scheduledTasksExecutor instanceof ThreadPoolExecutor) ? ((ThreadPoolExecutor) scheduledTasksExecutor).getQueue() : null; this.reaper = new ConnectionReaper(threadingOptions.createReaperExecutor(clusterName)); this.metadata = new Metadata(this); this.connectionFactory = new Connection.Factory(this, configuration); this.controlConnection = new ControlConnection(this); this.metrics = configuration.getMetricsOptions().isEnabled() ? new Metrics(this) : null; this.preparedQueries = new MapMaker().weakValues().makeMap(); // create debouncers - at this stage, they are not running yet final QueryOptions queryOptions = configuration.getQueryOptions(); this.nodeListRefreshRequestDebouncer = new EventDebouncer<NodeListRefreshRequest>( "Node list refresh", scheduledTasksExecutor, new NodeListRefreshRequestDeliveryCallback()) { @Override int maxPendingEvents() { return configuration.getQueryOptions().getMaxPendingRefreshNodeListRequests(); } @Override long delayMs() { return configuration.getQueryOptions().getRefreshNodeListIntervalMillis(); } }; this.nodeRefreshRequestDebouncer = new EventDebouncer<NodeRefreshRequest>( "Node refresh", scheduledTasksExecutor, new NodeRefreshRequestDeliveryCallback()) { @Override int maxPendingEvents() { return configuration.getQueryOptions().getMaxPendingRefreshNodeRequests(); } @Override long delayMs() { return configuration.getQueryOptions().getRefreshNodeIntervalMillis(); } }; this.schemaRefreshRequestDebouncer = new EventDebouncer<SchemaRefreshRequest>( "Schema refresh", scheduledTasksExecutor, new SchemaRefreshRequestDeliveryCallback()) { @Override int maxPendingEvents() { return configuration.getQueryOptions().getMaxPendingRefreshSchemaRequests(); } @Override long delayMs() { return configuration.getQueryOptions().getRefreshSchemaIntervalMillis(); } }; this.scheduledTasksExecutor.scheduleWithFixedDelay( new CleanupIdleConnectionsTask(), 10, 10, TimeUnit.SECONDS); for (EndPoint contactPoint : contactPoints) { metadata.addContactPoint(contactPoint); } // Initialize the control connection: negotiateProtocolVersionAndConnect(); // The control connection: // - marked contact points down if they couldn't be reached // - triggered an initial full refresh of metadata.allHosts. If any contact points weren't // valid, they won't appear in it. Set<Host> downContactPointHosts = Sets.newHashSet(); Set<Host> removedContactPointHosts = Sets.newHashSet(); for (Host contactPoint : metadata.getContactPoints()) { if (!metadata.allHosts().contains(contactPoint)) { removedContactPointHosts.add(contactPoint); } else if (contactPoint.state == Host.State.DOWN) { downContactPointHosts.add(contactPoint); } } // Now that the control connection is ready, we have all the information we need about the // nodes (datacenter, rack...) to initialize the load balancing policy Set<Host> lbpContactPoints = Sets.newHashSet(metadata.getContactPoints()); lbpContactPoints.removeAll(removedContactPointHosts); lbpContactPoints.removeAll(downContactPointHosts); loadBalancingPolicy().init(Cluster.this, lbpContactPoints); speculativeExecutionPolicy().init(Cluster.this); configuration.getPolicies().getRetryPolicy().init(Cluster.this); reconnectionPolicy().init(Cluster.this); configuration.getPolicies().getAddressTranslator().init(Cluster.this); for (LatencyTracker tracker : latencyTrackers) tracker.onRegister(Cluster.this); for (Host.StateListener listener : listeners) listener.onRegister(Cluster.this); for (Host host : removedContactPointHosts) { loadBalancingPolicy().onRemove(host); for (Host.StateListener listener : listeners) listener.onRemove(host); } for (Host host : downContactPointHosts) { loadBalancingPolicy().onDown(host); for (Host.StateListener listener : listeners) listener.onDown(host); startPeriodicReconnectionAttempt(host, true); } configuration.getPoolingOptions().setProtocolVersion(protocolVersion()); for (Host host : metadata.allHosts()) { // If the host is down at this stage, it's a contact point that the control connection // failed to reach. // Reconnection attempts are already scheduled, and the LBP and listeners have been // notified above. if (host.state == Host.State.DOWN) continue; // Otherwise, we want to do the equivalent of onAdd(). But since we know for sure that no // sessions or prepared // statements exist at this point, we can skip some of the steps (plus this avoids // scheduling concurrent pool // creations if a session is created right after this method returns). logger.info("New Cassandra host {} added", host); if (!host.supports(connectionFactory.protocolVersion)) { logUnsupportedVersionProtocol(host, connectionFactory.protocolVersion); continue; } if (!lbpContactPoints.contains(host)) loadBalancingPolicy().onAdd(host); host.setUp(); for (Host.StateListener listener : listeners) listener.onAdd(host); } // start debouncers this.nodeListRefreshRequestDebouncer.start(); this.schemaRefreshRequestDebouncer.start(); this.nodeRefreshRequestDebouncer.start(); isFullyInit = true; } catch (RuntimeException e) { initException = e; close(); throw e; } } private void negotiateProtocolVersionAndConnect() { boolean shouldNegotiate = (configuration.getProtocolOptions().initialProtocolVersion == null || shouldAlwaysNegotiateProtocolVersion); while (true) { try { controlConnection.connect(); return; } catch (UnsupportedProtocolVersionException e) { if (!shouldNegotiate) { throw e; } // Do not trust version of server's response, as C* behavior in case of protocol // negotiation is not // properly documented, and varies over time (specially after CASSANDRA-11464). Instead, // always // retry at attempted version - 1, if such a version exists; and otherwise, stop and fail. ProtocolVersion attemptedVersion = e.getUnsupportedVersion(); ProtocolVersion retryVersion = attemptedVersion.getLowerSupported(); if (retryVersion == null) { throw e; } logger.info( "Cannot connect with protocol version {}, trying with {}", attemptedVersion, retryVersion); connectionFactory.protocolVersion = retryVersion; } } } ProtocolVersion protocolVersion() { return connectionFactory.protocolVersion; } Cluster getCluster() { return Cluster.this; } LoadBalancingPolicy loadBalancingPolicy() { return configuration.getPolicies().getLoadBalancingPolicy(); } SpeculativeExecutionPolicy speculativeExecutionPolicy() { return configuration.getPolicies().getSpeculativeExecutionPolicy(); } ReconnectionPolicy reconnectionPolicy() { return configuration.getPolicies().getReconnectionPolicy(); } InetSocketAddress translateAddress(InetSocketAddress address) { InetSocketAddress translated = configuration.getPolicies().getAddressTranslator().translate(address); return translated == null ? address : translated; } InetSocketAddress translateAddress(InetAddress address) { InetSocketAddress sa = new InetSocketAddress(address, connectionFactory.getPort()); return translateAddress(sa); } private Session newSession() { SessionManager session = new SessionManager(Cluster.this); sessions.add(session); return session; } boolean removeSession(Session session) { return sessions.remove(session); } void reportQuery(Host host, Statement statement, Exception exception, long latencyNanos) { for (LatencyTracker tracker : latencyTrackers) { try { tracker.update(host, statement, exception, latencyNanos); } catch (Exception e) { logger.error("Call to latency tracker failed", e); } } } ControlConnection getControlConnection() { return controlConnection; } List<EndPoint> getContactPoints() { return contactPoints; } boolean isClosed() { return closeFuture.get() != null; } boolean errorDuringInit() { return (isInit && initException != null); } Exception getInitException() { return initException; } private CloseFuture close() { CloseFuture future = closeFuture.get(); if (future != null) return future; if (isInit) { logger.debug("Shutting down"); // stop debouncers if (nodeListRefreshRequestDebouncer != null) { nodeListRefreshRequestDebouncer.stop(); } if (nodeRefreshRequestDebouncer != null) { nodeRefreshRequestDebouncer.stop(); } if (schemaRefreshRequestDebouncer != null) { schemaRefreshRequestDebouncer.stop(); } // If we're shutting down, there is no point in waiting on scheduled reconnections, nor on // notifications // delivery or blocking tasks so we use shutdownNow shutdownNow(reconnectionExecutor); shutdownNow(scheduledTasksExecutor); shutdownNow(blockingExecutor); // but for the worker executor, we want to let submitted tasks finish unless the shutdown is // forced. if (executor != null) { executor.shutdown(); } // We also close the metrics if (metrics != null) metrics.shutdown(); loadBalancingPolicy().close(); speculativeExecutionPolicy().close(); configuration.getPolicies().getRetryPolicy().close(); reconnectionPolicy().close(); configuration.getPolicies().getAddressTranslator().close(); for (LatencyTracker tracker : latencyTrackers) tracker.onUnregister(Cluster.this); for (Host.StateListener listener : listeners) listener.onUnregister(Cluster.this); for (SchemaChangeListener listener : schemaChangeListeners) listener.onUnregister(Cluster.this); // Then we shutdown all connections List<CloseFuture> futures = new ArrayList<CloseFuture>(sessions.size() + 1); if (controlConnection != null) { futures.add(controlConnection.closeAsync()); } for (Session session : sessions) futures.add(session.closeAsync()); future = new ClusterCloseFuture(futures); // The rest will happen asynchronously, when all connections are successfully closed } else { future = CloseFuture.immediateFuture(); } return closeFuture.compareAndSet(null, future) ? future : closeFuture.get(); // We raced, it's ok, return the future that was actually set } private void shutdownNow(ExecutorService executor) { if (executor != null) { List<Runnable> pendingTasks = executor.shutdownNow(); // If some tasks were submitted to this executor but not yet commenced, make sure the // corresponding futures complete for (Runnable pendingTask : pendingTasks) { if (pendingTask instanceof FutureTask<?>) ((FutureTask<?>) pendingTask).cancel(false); } } } void logUnsupportedVersionProtocol(Host host, ProtocolVersion version) { logger.warn( "Detected added or restarted Cassandra host {} but ignoring it since it does not support the version {} of the native " + "protocol which is currently in use. If you want to force the use of a particular version of the native protocol, use " + "Cluster.Builder#usingProtocolVersion() when creating the Cluster instance.", host, version); } void logClusterNameMismatch(Host host, String expectedClusterName, String actualClusterName) { logger.warn( "Detected added or restarted Cassandra host {} but ignoring it since its cluster name '{}' does not match the one " + "currently known ({})", host, actualClusterName, expectedClusterName); } public ListenableFuture<?> triggerOnUp(final Host host) { if (!isClosed()) { return executor.submit( new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws InterruptedException, ExecutionException { onUp(host, null); } }); } else { return MoreFutures.VOID_SUCCESS; } } // Use triggerOnUp unless you're sure you want to run this on the current thread. private void onUp(final Host host, Connection reusedConnection) throws InterruptedException, ExecutionException { if (isClosed()) return; if (!host.supports(connectionFactory.protocolVersion)) { logUnsupportedVersionProtocol(host, connectionFactory.protocolVersion); return; } try { boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); if (!locked) { logger.warn( "Could not acquire notifications lock within {} seconds, ignoring UP notification for {}", NOTIF_LOCK_TIMEOUT_SECONDS, host); return; } try { // We don't want to use the public Host.isUp() as this would make us skip the rest for // suspected hosts if (host.state == Host.State.UP) return; Host.statesLogger.debug("[{}] marking host UP", host); // If there is a reconnection attempt scheduled for that node, cancel it Future<?> scheduledAttempt = host.reconnectionAttempt.getAndSet(null); if (scheduledAttempt != null) { logger.debug("Cancelling reconnection attempt since node is UP"); scheduledAttempt.cancel(false); } try { if (getCluster().getConfiguration().getQueryOptions().isReprepareOnUp()) reusedConnection = prepareAllQueries(host, reusedConnection); } catch (InterruptedException e) { Thread.currentThread().interrupt(); // Don't propagate because we don't want to prevent other listener to run } catch (UnsupportedProtocolVersionException e) { logUnsupportedVersionProtocol(host, e.getUnsupportedVersion()); return; } catch (ClusterNameMismatchException e) { logClusterNameMismatch(host, e.expectedClusterName, e.actualClusterName); return; } // Session#onUp() expects the load balancing policy to have been updated first, so that // Host distances are up to date. This mean the policy could return the node before the // new pool have been created. This is harmless if there is no prior pool since // RequestHandler // will ignore the node, but we do want to make sure there is no prior pool so we don't // query from a pool we will shutdown right away. for (SessionManager s : sessions) s.removePool(host); loadBalancingPolicy().onUp(host); controlConnection.onUp(host); logger.trace("Adding/renewing host pools for newly UP host {}", host); List<ListenableFuture<Boolean>> futures = Lists.newArrayListWithCapacity(sessions.size()); for (SessionManager s : sessions) futures.add(s.forceRenewPool(host, reusedConnection)); try { // Only mark the node up once all session have re-added their pool (if the // load-balancing // policy says it should), so that Host.isUp() don't return true before we're // reconnected // to the node. List<Boolean> poolCreationResults = Futures.allAsList(futures).get(); // If any of the creation failed, they will have signaled a connection failure // which will trigger a reconnection to the node. So don't bother marking UP. if (Iterables.any(poolCreationResults, Predicates.equalTo(false))) { logger.debug("Connection pool cannot be created, not marking {} UP", host); return; } host.setUp(); for (Host.StateListener listener : listeners) listener.onUp(host); } catch (ExecutionException e) { Throwable t = e.getCause(); // That future is not really supposed to throw unexpected exceptions if (!(t instanceof InterruptedException) && !(t instanceof CancellationException)) logger.error( "Unexpected error while marking node UP: while this shouldn't happen, this shouldn't be critical", t); } // Now, check if there isn't pools to create/remove following the addition. // We do that now only so that it's not called before we've set the node up. for (SessionManager s : sessions) s.updateCreatedPools().get(); } finally { host.notificationsLock.unlock(); } } finally { if (reusedConnection != null && !reusedConnection.hasOwner()) reusedConnection.closeAsync(); } } public ListenableFuture<?> triggerOnDown(final Host host, boolean startReconnection) { return triggerOnDown(host, false, startReconnection); } public ListenableFuture<?> triggerOnDown( final Host host, final boolean isHostAddition, final boolean startReconnection) { if (!isClosed()) { return executor.submit( new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws InterruptedException, ExecutionException { onDown(host, isHostAddition, startReconnection); } }); } else { return MoreFutures.VOID_SUCCESS; } } // Use triggerOnDown unless you're sure you want to run this on the current thread. private void onDown(final Host host, final boolean isHostAddition, boolean startReconnection) throws InterruptedException, ExecutionException { if (isClosed()) return; boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); if (!locked) { logger.warn( "Could not acquire notifications lock within {} seconds, ignoring DOWN notification for {}", NOTIF_LOCK_TIMEOUT_SECONDS, host); return; } try { // Note: we don't want to skip that method if !host.isUp() because we set isUp // late in onUp, and so we can rely on isUp if there is an error during onUp. // But if there is a reconnection attempt in progress already, then we know // we've already gone through that method since the last successful onUp(), so // we're good skipping it. if (host.reconnectionAttempt.get() != null) { logger.debug("Aborting onDown because a reconnection is running on DOWN host {}", host); return; } Host.statesLogger.debug("[{}] marking host DOWN", host); // Remember if we care about this node at all. We must call this before // we've signalled the load balancing policy, since most policy will always // IGNORE down nodes anyway. HostDistance distance = loadBalancingPolicy().distance(host); boolean wasUp = host.isUp(); host.setDown(); loadBalancingPolicy().onDown(host); controlConnection.onDown(host); for (SessionManager s : sessions) s.onDown(host); // Contrarily to other actions of that method, there is no reason to notify listeners // unless the host was UP at the beginning of this function since even if a onUp fail // mid-method, listeners won't have been notified of the UP. if (wasUp) { for (Host.StateListener listener : listeners) listener.onDown(host); } // Don't start a reconnection if we ignore the node anyway (JAVA-314) if (distance == HostDistance.IGNORED || !startReconnection) return; startPeriodicReconnectionAttempt(host, isHostAddition); } finally { host.notificationsLock.unlock(); } } void startPeriodicReconnectionAttempt(final Host host, final boolean isHostAddition) { new AbstractReconnectionHandler( host.toString(), reconnectionExecutor, reconnectionPolicy().newSchedule(), host.reconnectionAttempt) { @Override protected Connection tryReconnect() throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { return connectionFactory.open(host); } @Override protected void onReconnection(Connection connection) { // Make sure we have up-to-date infos on that host before adding it (so we typically // catch that an upgraded node uses a new cassandra version). if (controlConnection.refreshNodeInfo(host)) { logger.debug("Successful reconnection to {}, setting host UP", host); try { if (isHostAddition) { onAdd(host, connection); submitNodeListRefresh(); } else onUp(host, connection); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } catch (Exception e) { logger.error("Unexpected error while setting node up", e); } } else { logger.debug("Not enough info for {}, ignoring host", host); connection.closeAsync(); } } @Override protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { if (logger.isDebugEnabled()) logger.debug( "Failed reconnection to {} ({}), scheduling retry in {} milliseconds", host, e.getMessage(), nextDelayMs); return true; } @Override protected boolean onUnknownException(Exception e, long nextDelayMs) { logger.error( String.format( "Unknown error during reconnection to %s, scheduling retry in %d milliseconds", host, nextDelayMs), e); return true; } @Override protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { logger.error( String.format( "Authentication error during reconnection to %s, scheduling retry in %d milliseconds", host, nextDelayMs), e); return true; } }.start(); } void startSingleReconnectionAttempt(final Host host) { if (isClosed() || host.isUp()) return; logger.debug("Scheduling one-time reconnection to {}", host); // Setting an initial delay of 0 to start immediately, and all the exception handlers return // false to prevent further attempts new AbstractReconnectionHandler( host.toString(), reconnectionExecutor, reconnectionPolicy().newSchedule(), host.reconnectionAttempt, 0) { @Override protected Connection tryReconnect() throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { return connectionFactory.open(host); } @Override protected void onReconnection(Connection connection) { // Make sure we have up-to-date infos on that host before adding it (so we typically // catch that an upgraded node uses a new cassandra version). if (controlConnection.refreshNodeInfo(host)) { logger.debug("Successful reconnection to {}, setting host UP", host); try { onUp(host, connection); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } catch (Exception e) { logger.error("Unexpected error while setting node up", e); } } else { logger.debug("Not enough info for {}, ignoring host", host); connection.closeAsync(); } } @Override protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { if (logger.isDebugEnabled()) logger.debug("Failed one-time reconnection to {} ({})", host, e.getMessage()); return false; } @Override protected boolean onUnknownException(Exception e, long nextDelayMs) { logger.error(String.format("Unknown error during one-time reconnection to %s", host), e); return false; } @Override protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { logger.error( String.format("Authentication error during one-time reconnection to %s", host), e); return false; } }.start(); } public ListenableFuture<?> triggerOnAdd(final Host host) { if (!isClosed()) { return executor.submit( new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws InterruptedException, ExecutionException { onAdd(host, null); } }); } else { return MoreFutures.VOID_SUCCESS; } } // Use triggerOnAdd unless you're sure you want to run this on the current thread. private void onAdd(final Host host, Connection reusedConnection) throws InterruptedException, ExecutionException { if (isClosed()) return; if (!host.supports(connectionFactory.protocolVersion)) { logUnsupportedVersionProtocol(host, connectionFactory.protocolVersion); return; } try { boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); if (!locked) { logger.warn( "Could not acquire notifications lock within {} seconds, ignoring ADD notification for {}", NOTIF_LOCK_TIMEOUT_SECONDS, host); return; } try { Host.statesLogger.debug("[{}] adding host", host); // Adds to the load balancing first and foremost, as doing so might change the decision // it will make for distance() on that node (not likely but we leave that possibility). // This does mean the policy may start returning that node for query plan, but as long // as no pools have been created (below) this will be ignored by RequestHandler so it's // fine. loadBalancingPolicy().onAdd(host); // Next, if the host should be ignored, well, ignore it. if (loadBalancingPolicy().distance(host) == HostDistance.IGNORED) { // We still mark the node UP though as it should be (and notifiy the listeners). // We'll mark it down if we have a notification anyway and we've documented that // especially // for IGNORED hosts, the isUp() method was a best effort guess host.setUp(); for (Host.StateListener listener : listeners) listener.onAdd(host); return; } try { reusedConnection = prepareAllQueries(host, reusedConnection); } catch (InterruptedException e) { Thread.currentThread().interrupt(); // Don't propagate because we don't want to prevent other listener to run } catch (UnsupportedProtocolVersionException e) { logUnsupportedVersionProtocol(host, e.getUnsupportedVersion()); return; } catch (ClusterNameMismatchException e) { logClusterNameMismatch(host, e.expectedClusterName, e.actualClusterName); return; } controlConnection.onAdd(host); List<ListenableFuture<Boolean>> futures = Lists.newArrayListWithCapacity(sessions.size()); for (SessionManager s : sessions) futures.add(s.maybeAddPool(host, reusedConnection)); try { // Only mark the node up once all session have added their pool (if the load-balancing // policy says it should), so that Host.isUp() don't return true before we're // reconnected // to the node. List<Boolean> poolCreationResults = Futures.allAsList(futures).get(); // If any of the creation failed, they will have signaled a connection failure // which will trigger a reconnection to the node. So don't bother marking UP. if (Iterables.any(poolCreationResults, Predicates.equalTo(false))) { logger.debug("Connection pool cannot be created, not marking {} UP", host); return; } host.setUp(); for (Host.StateListener listener : listeners) listener.onAdd(host); } catch (ExecutionException e) { Throwable t = e.getCause(); // That future is not really supposed to throw unexpected exceptions if (!(t instanceof InterruptedException) && !(t instanceof CancellationException)) logger.error( "Unexpected error while adding node: while this shouldn't happen, this shouldn't be critical", t); } // Now, check if there isn't pools to create/remove following the addition. // We do that now only so that it's not called before we've set the node up. for (SessionManager s : sessions) s.updateCreatedPools().get(); } finally { host.notificationsLock.unlock(); } } finally { if (reusedConnection != null && !reusedConnection.hasOwner()) reusedConnection.closeAsync(); } } public ListenableFuture<?> triggerOnRemove(final Host host) { if (!isClosed()) { return executor.submit( new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws InterruptedException, ExecutionException { onRemove(host); } }); } else { return MoreFutures.VOID_SUCCESS; } } // Use triggerOnRemove unless you're sure you want to run this on the current thread. private void onRemove(Host host) throws InterruptedException, ExecutionException { if (isClosed()) return; boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); if (!locked) { logger.warn( "Could not acquire notifications lock within {} seconds, ignoring REMOVE notification for {}", NOTIF_LOCK_TIMEOUT_SECONDS, host); return; } try { host.setDown(); Host.statesLogger.debug("[{}] removing host", host); loadBalancingPolicy().onRemove(host); controlConnection.onRemove(host); for (SessionManager s : sessions) s.onRemove(host); for (Host.StateListener listener : listeners) listener.onRemove(host); } finally { host.notificationsLock.unlock(); } } public void signalHostDown(Host host, boolean isHostAddition) { // Don't mark the node down until we've fully initialized the controlConnection as this might // mess up with // the protocol detection if (!isFullyInit || isClosed()) return; triggerOnDown(host, isHostAddition, true); } public void removeHost(Host host, boolean isInitialConnection) { if (host == null) return; if (metadata.remove(host)) { if (isInitialConnection) { logger.warn( "You listed {} in your contact points, but it wasn't found in the control host's system.peers at startup", host); } else { logger.info("Cassandra host {} removed", host); triggerOnRemove(host); } } } public void ensurePoolsSizing() { if (protocolVersion().compareTo(ProtocolVersion.V3) >= 0) return; for (SessionManager session : sessions) { for (HostConnectionPool pool : session.pools.values()) pool.ensureCoreConnections(); } } public PreparedStatement addPrepared(PreparedStatement stmt) { PreparedStatement previous = preparedQueries.putIfAbsent(stmt.getPreparedId().boundValuesMetadata.id, stmt); if (previous != null) { logger.warn( "Re-preparing already prepared query is generally an anti-pattern and will likely affect performance. " + "Consider preparing the statement only once. Query='{}'", stmt.getQueryString()); // The one object in the cache will get GCed once it's not referenced by the client anymore // since we use a weak reference. // So we need to make sure that the instance we do return to the user is the one that is in // the cache. // However if the result metadata changed since the last PREPARE call, this also needs to be // updated. previous.getPreparedId().resultSetMetadata = stmt.getPreparedId().resultSetMetadata; return previous; } return stmt; } /** * @param reusedConnection an existing connection (from a reconnection attempt) that we want to * reuse to prepare the statements (might be null). * @return a connection that the rest of the initialization process can use (it will be made * part of a connection pool). Can be reusedConnection, or one that was open in the method. */ private Connection prepareAllQueries(Host host, Connection reusedConnection) throws InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { if (preparedQueries.isEmpty()) return reusedConnection; logger.debug( "Preparing {} prepared queries on newly up node {}", preparedQueries.size(), host); Connection connection = null; try { connection = (reusedConnection == null) ? connectionFactory.open(host) : reusedConnection; // Furthermore, along with each prepared query we keep the current keyspace at the time of // preparation // as we need to make it is the same when we re-prepare on new/restarted nodes. Most query // will use the // same keyspace so keeping it each time is slightly wasteful, but this doesn't really // matter and is // simpler. Besides, we do avoid in prepareAllQueries to not set the current keyspace more // than needed. // We need to make sure we prepared every query with the right current keyspace, i.e. the // one originally // used for preparing it. However, since we are likely that all prepared query belong to // only a handful // of different keyspace (possibly only one), and to avoid setting the current keyspace more // than needed, // we first sort the query per keyspace. SetMultimap<String, String> perKeyspace = HashMultimap.create(); for (PreparedStatement ps : preparedQueries.values()) { // It's possible for a query to not have a current keyspace. But since null doesn't work // well as // map keys, we use the empty string instead (that is not a valid keyspace name). String keyspace = ps.getQueryKeyspace() == null ? "" : ps.getQueryKeyspace(); perKeyspace.put(keyspace, ps.getQueryString()); } for (String keyspace : perKeyspace.keySet()) { // Empty string mean no particular keyspace to set if (!keyspace.isEmpty()) connection.setKeyspace(keyspace); List<Connection.Future> futures = new ArrayList<Connection.Future>(preparedQueries.size()); for (String query : perKeyspace.get(keyspace)) { futures.add(connection.write(new Requests.Prepare(query))); } for (Connection.Future future : futures) { try { future.get(); } catch (ExecutionException e) { // This "might" happen if we drop a CF but haven't removed it's prepared queries // (which we don't do // currently). It's not a big deal however as if it's a more serious problem it'll // show up later when // the query is tried for execution. logger.debug("Unexpected error while preparing queries on new/newly up host", e); } } } return connection; } catch (ConnectionException e) { // Ignore, not a big deal if (connection != null) connection.closeAsync(); return null; } catch (AuthenticationException e) { // That's a bad news, but ignore at this point if (connection != null) connection.closeAsync(); return null; } catch (BusyConnectionException e) { // Ignore, not a big deal // In theory the problem is transient so the connection could be reused later, but if the // core pool size is 1 // it's better to close this one so that we start with a fresh connection. if (connection != null) connection.closeAsync(); return null; } } ListenableFuture<Void> submitSchemaRefresh( final SchemaElement targetType, final String targetKeyspace, final String targetName, final List<String> targetSignature) { SchemaRefreshRequest request = new SchemaRefreshRequest(targetType, targetKeyspace, targetName, targetSignature); logger.trace("Submitting schema refresh: {}", request); return schemaRefreshRequestDebouncer.eventReceived(request); } ListenableFuture<Void> submitNodeListRefresh() { logger.trace("Submitting node list and token map refresh"); return nodeListRefreshRequestDebouncer.eventReceived(new NodeListRefreshRequest()); } ListenableFuture<Void> submitNodeRefresh(InetSocketAddress address, HostEvent eventType) { NodeRefreshRequest request = new NodeRefreshRequest(address, eventType); logger.trace("Submitting node refresh: {}", request); return nodeRefreshRequestDebouncer.eventReceived(request); } // refresh the schema using the provided connection, and notice the future with the provided // resultset once done public void refreshSchemaAndSignal( final Connection connection, final DefaultResultSetFuture future, final ResultSet rs, final SchemaElement targetType, final String targetKeyspace, final String targetName, final List<String> targetSignature) { if (logger.isDebugEnabled()) logger.debug( "Refreshing schema for {}{}", targetType == null ? "everything" : targetKeyspace, (targetType == KEYSPACE) ? "" : "." + targetName + " (" + targetType + ")"); maybeRefreshSchemaAndSignal( connection, future, rs, targetType, targetKeyspace, targetName, targetSignature); } public void waitForSchemaAgreementAndSignal( final Connection connection, final DefaultResultSetFuture future, final ResultSet rs) { maybeRefreshSchemaAndSignal(connection, future, rs, null, null, null, null); } private void maybeRefreshSchemaAndSignal( final Connection connection, final DefaultResultSetFuture future, final ResultSet rs, final SchemaElement targetType, final String targetKeyspace, final String targetName, final List<String> targetSignature) { final boolean refreshSchema = (targetKeyspace != null); // if false, only wait for schema agreement executor.submit( new Runnable() { @Override public void run() { boolean schemaInAgreement = false; try { // Before refreshing the schema, wait for schema agreement so // that querying a table just after having created it don't fail. schemaInAgreement = ControlConnection.waitForSchemaAgreement(connection, Cluster.Manager.this); if (!schemaInAgreement) logger.warn( "No schema agreement from live replicas after {} s. The schema may not be up to date on some nodes.", configuration.getProtocolOptions().getMaxSchemaAgreementWaitSeconds()); ListenableFuture<Void> schemaReady; if (refreshSchema) { schemaReady = submitSchemaRefresh(targetType, targetKeyspace, targetName, targetSignature); // JAVA-1120: skip debouncing delay and force immediate delivery if (!schemaReady.isDone()) schemaRefreshRequestDebouncer.scheduleImmediateDelivery(); } else { schemaReady = MoreFutures.VOID_SUCCESS; } final boolean finalSchemaInAgreement = schemaInAgreement; schemaReady.addListener( new Runnable() { @Override public void run() { rs.getExecutionInfo().setSchemaInAgreement(finalSchemaInAgreement); future.setResult(rs); } }, GuavaCompatibility.INSTANCE.sameThreadExecutor()); } catch (Exception e) { logger.warn("Error while waiting for schema agreement", e); // This is not fatal, complete the future anyway rs.getExecutionInfo().setSchemaInAgreement(schemaInAgreement); future.setResult(rs); } } }); } // Called when some message has been received but has been initiated from the server (streamId < // 0). // This is called on an I/O thread, so all blocking operation must be done on an executor. @Override public void handle(Message.Response response) { if (!(response instanceof Responses.Event)) { logger.error("Received an unexpected message from the server: {}", response); return; } final ProtocolEvent event = ((Responses.Event) response).event; logger.debug("Received event {}, scheduling delivery", response); switch (event.type) { case TOPOLOGY_CHANGE: ProtocolEvent.TopologyChange tpc = (ProtocolEvent.TopologyChange) event; Host.statesLogger.debug("[{}] received event {}", tpc.node, tpc.change); // Do NOT translate the address, it will be matched against Host.getBroadcastRpcAddress() // to find the target host. switch (tpc.change) { case REMOVED_NODE: submitNodeRefresh(tpc.node, HostEvent.REMOVED); break; default: // If a node was added, we don't have enough information to create a new Host (we are // missing it's ID) so trigger a full refresh submitNodeListRefresh(); break; } break; case STATUS_CHANGE: ProtocolEvent.StatusChange stc = (ProtocolEvent.StatusChange) event; Host.statesLogger.debug("[{}] received event {}", stc.node, stc.status); // Do NOT translate the address, it will be matched against Host.getBroadcastRpcAddress() // to find the target host. switch (stc.status) { case UP: submitNodeRefresh(stc.node, HostEvent.UP); break; case DOWN: submitNodeRefresh(stc.node, HostEvent.DOWN); break; } break; case SCHEMA_CHANGE: if (!configuration.getQueryOptions().isMetadataEnabled()) return; ProtocolEvent.SchemaChange scc = (ProtocolEvent.SchemaChange) event; switch (scc.change) { case CREATED: case UPDATED: submitSchemaRefresh( scc.targetType, scc.targetKeyspace, scc.targetName, scc.targetSignature); break; case DROPPED: if (scc.targetType == KEYSPACE) { final KeyspaceMetadata removedKeyspace = manager.metadata.removeKeyspace(scc.targetKeyspace); if (removedKeyspace != null) { executor.submit( new Runnable() { @Override public void run() { manager.metadata.triggerOnKeyspaceRemoved(removedKeyspace); } }); } } else { KeyspaceMetadata keyspace = manager.metadata.keyspaces.get(scc.targetKeyspace); if (keyspace == null) { logger.warn( "Received a DROPPED notification for {} {}.{}, but this keyspace is unknown in our metadata", scc.targetType, scc.targetKeyspace, scc.targetName); } else { switch (scc.targetType) { case TABLE: // we can't tell whether it's a table or a view, // but since two objects cannot have the same name, // try removing both final TableMetadata removedTable = keyspace.removeTable(scc.targetName); if (removedTable != null) { executor.submit( new Runnable() { @Override public void run() { manager.metadata.triggerOnTableRemoved(removedTable); } }); } else { final MaterializedViewMetadata removedView = keyspace.removeMaterializedView(scc.targetName); if (removedView != null) { executor.submit( new Runnable() { @Override public void run() { manager.metadata.triggerOnMaterializedViewRemoved(removedView); } }); } } break; case TYPE: final UserType removedType = keyspace.removeUserType(scc.targetName); if (removedType != null) { executor.submit( new Runnable() { @Override public void run() { manager.metadata.triggerOnUserTypeRemoved(removedType); } }); } break; case FUNCTION: final FunctionMetadata removedFunction = keyspace.removeFunction( Metadata.fullFunctionName(scc.targetName, scc.targetSignature)); if (removedFunction != null) { executor.submit( new Runnable() { @Override public void run() { manager.metadata.triggerOnFunctionRemoved(removedFunction); } }); } break; case AGGREGATE: final AggregateMetadata removedAggregate = keyspace.removeAggregate( Metadata.fullFunctionName(scc.targetName, scc.targetSignature)); if (removedAggregate != null) { executor.submit( new Runnable() { @Override public void run() { manager.metadata.triggerOnAggregateRemoved(removedAggregate); } }); } break; } } } break; } break; } } void refreshConnectedHosts() { // Deal first with the control connection: if it's connected to a node that is not LOCAL, try // reconnecting (thus letting the loadBalancingPolicy pick a better node) Host ccHost = controlConnection.connectedHost(); if (ccHost == null || loadBalancingPolicy().distance(ccHost) != HostDistance.LOCAL) controlConnection.triggerReconnect(); try { for (SessionManager s : sessions) Uninterruptibles.getUninterruptibly(s.updateCreatedPools()); } catch (ExecutionException e) { throw DriverThrowables.propagateCause(e); } } void refreshConnectedHost(Host host) { // Deal with the control connection if it was using this host Host ccHost = controlConnection.connectedHost(); if (ccHost == null || ccHost.equals(host) && loadBalancingPolicy().distance(ccHost) != HostDistance.LOCAL) controlConnection.triggerReconnect(); for (SessionManager s : sessions) s.updateCreatedPools(host); } private class ClusterCloseFuture extends CloseFuture.Forwarding { ClusterCloseFuture(List<CloseFuture> futures) { super(futures); } @Override public CloseFuture force() { // The only ExecutorService we haven't forced yet is executor shutdownNow(executor); return super.force(); } @Override protected void onFuturesDone() { /* * When we reach this, all sessions should be shutdown. We've also started a shutdown * of the thread pools used by this object. Remains 2 things before marking the shutdown * as done: * 1) we need to wait for the completion of the shutdown of the Cluster threads pools. * 2) we need to shutdown the Connection.Factory, i.e. the executors used by Netty. * But at least for 2), we must not do it on the current thread because that could be * a netty worker, which we're going to shutdown. So creates some thread for that. */ (new Thread("Shutdown-checker") { @Override public void run() { // Just wait indefinitely on the the completion of the thread pools. Provided the // user // call force(), we'll never really block forever. try { if (reconnectionExecutor != null) { reconnectionExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); } if (scheduledTasksExecutor != null) { scheduledTasksExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); } if (executor != null) { executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); } if (blockingExecutor != null) { blockingExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); } // Some of the jobs on the executors can be doing query stuff, so close the // connectionFactory at the very last if (connectionFactory != null) { connectionFactory.shutdown(); } if (reaper != null) { reaper.shutdown(); } set(null); } catch (InterruptedException e) { Thread.currentThread().interrupt(); setException(e); } } }) .start(); } } private class CleanupIdleConnectionsTask implements Runnable { @Override public void run() { try { long now = System.currentTimeMillis(); for (SessionManager session : sessions) { session.cleanupIdleConnections(now); } } catch (Exception e) { logger.warn("Error while trashing idle connections", e); } } } private class SchemaRefreshRequest { private final SchemaElement targetType; private final String targetKeyspace; private final String targetName; private final List<String> targetSignature; public SchemaRefreshRequest( SchemaElement targetType, String targetKeyspace, String targetName, List<String> targetSignature) { this.targetType = targetType; this.targetKeyspace = Strings.emptyToNull(targetKeyspace); this.targetName = Strings.emptyToNull(targetName); this.targetSignature = targetSignature; } /** * Coalesce schema refresh requests. The algorithm is simple: if more than 2 keyspaces need * refresh, then refresh the entire schema; otherwise if more than 2 elements in the same * keyspace need refresh, then refresh the entire keyspace. * * @param that the other request to merge with the current one. * @return A coalesced request */ SchemaRefreshRequest coalesce(SchemaRefreshRequest that) { if (this.targetType == null || that.targetType == null) return new SchemaRefreshRequest(null, null, null, null); if (!this.targetKeyspace.equals(that.targetKeyspace)) return new SchemaRefreshRequest(null, null, null, null); if (this.targetName == null || that.targetName == null) return new SchemaRefreshRequest(KEYSPACE, targetKeyspace, null, null); if (!this.targetName.equals(that.targetName)) return new SchemaRefreshRequest(KEYSPACE, targetKeyspace, null, null); return this; } @Override public String toString() { if (this.targetType == null) return "Refresh ALL"; if (this.targetName == null) return "Refresh keyspace " + targetKeyspace; return String.format("Refresh %s %s.%s", targetType, targetKeyspace, targetName); } } private class SchemaRefreshRequestDeliveryCallback implements EventDebouncer.DeliveryCallback<SchemaRefreshRequest> { @Override public ListenableFuture<?> deliver(final List<SchemaRefreshRequest> events) { return executor.submit( new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws InterruptedException, ExecutionException { if (schemaRefreshRequestDebouncer.maxPendingEvents() > 1) { SchemaRefreshRequest coalesced = null; for (SchemaRefreshRequest request : events) { coalesced = coalesced == null ? request : coalesced.coalesce(request); } assert coalesced != null; logger.trace("Coalesced schema refresh request: {}", coalesced); controlConnection.refreshSchema( coalesced.targetType, coalesced.targetKeyspace, coalesced.targetName, coalesced.targetSignature); } else { for (SchemaRefreshRequest request : events) { logger.trace("Schema refresh request: {}", request); controlConnection.refreshSchema( request.targetType, request.targetKeyspace, request.targetName, request.targetSignature); } } } }); } } private class NodeRefreshRequest { private final InetSocketAddress address; private final HostEvent eventType; private NodeRefreshRequest(InetSocketAddress address, HostEvent eventType) { this.address = address; this.eventType = eventType; } @Override public String toString() { return address + " " + eventType; } } private class NodeRefreshRequestDeliveryCallback implements EventDebouncer.DeliveryCallback<NodeRefreshRequest> { @Override public ListenableFuture<?> deliver(List<NodeRefreshRequest> events) { Map<InetSocketAddress, HostEvent> hosts = new HashMap<InetSocketAddress, HostEvent>(); // only keep the last event for each host for (NodeRefreshRequest req : events) { hosts.put(req.address, req.eventType); } List<ListenableFuture<?>> futures = new ArrayList<ListenableFuture<?>>(hosts.size()); for (final Entry<InetSocketAddress, HostEvent> entry : hosts.entrySet()) { InetSocketAddress address = entry.getKey(); HostEvent eventType = entry.getValue(); switch (eventType) { case UP: Host upHost = metadata.getHost(address); if (upHost == null) { // We don't have enough information to create a new Host (we are missing it's ID) // so trigger a full node refresh submitNodeListRefresh(); } else { futures.add(schedule(hostUp(upHost))); } break; case DOWN: // Note that there is a slight risk we can receive the event late and thus // mark the host down even though we already had reconnected successfully. // But it is unlikely, and don't have too much consequence since we'll try // reconnecting // right away, so we favor the detection to make the Host.isUp method more reliable. Host downHost = metadata.getHost(address); if (downHost != null) { // Only process DOWN events if we have no active connections to the host . // Otherwise, we // wait for the connections to fail. This is to prevent against a bad control host // aggressively marking DOWN all of its peers. if (downHost.convictionPolicy.hasActiveConnections()) { logger.debug( "Ignoring down event on {} because it still has active connections", downHost); } else { futures.add(execute(hostDown(downHost))); } } break; case REMOVED: Host removedHost = metadata.getHost(address); if (removedHost != null) futures.add(execute(hostRemoved(removedHost))); break; } } return Futures.allAsList(futures); } private ListenableFuture<?> execute(ExceptionCatchingRunnable task) { return executor.submit(task); } private ListenableFuture<?> schedule(final ExceptionCatchingRunnable task) { // Cassandra tends to send notifications for new/up nodes a bit early (it is triggered once // gossip is up, but that is before the client-side server is up), so we add a delay // (otherwise the connection will likely fail and have to be retry which is wasteful). // This has been fixed by CASSANDRA-8236 and does not apply to protocol versions >= 4 // and C* versions >= 2.2.0 if (protocolVersion().compareTo(ProtocolVersion.V4) < 0) { final SettableFuture<?> future = SettableFuture.create(); scheduledTasksExecutor.schedule( new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws Exception { ListenableFuture<?> f = execute(task); GuavaCompatibility.INSTANCE.addCallback( f, new FutureCallback<Object>() { @Override public void onSuccess(Object result) { future.set(null); } @Override public void onFailure(Throwable t) { future.setException(t); } }); } }, NEW_NODE_DELAY_SECONDS, TimeUnit.SECONDS); return future; } else { return execute(task); } } private ExceptionCatchingRunnable hostUp(final Host host) { return new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws Exception { // Make sure we call controlConnection.refreshNodeInfo(host) // so that we have up-to-date infos on that host before recreating the pools (so we // typically catch that an upgraded node uses a new cassandra version). if (controlConnection.refreshNodeInfo(host)) { onUp(host, null); } else { logger.debug("Not enough info for {}, ignoring host", host); } } }; } private ExceptionCatchingRunnable hostDown(final Host host) { return new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws Exception { onDown(host, false, true); } }; } private ExceptionCatchingRunnable hostRemoved(final Host host) { return new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws Exception { if (metadata.remove(host)) { logger.info("Cassandra host {} removed", host); onRemove(host); submitNodeListRefresh(); } } }; } } private class NodeListRefreshRequest { @Override public String toString() { return "Refresh node list and token map"; } } private class NodeListRefreshRequestDeliveryCallback implements EventDebouncer.DeliveryCallback<NodeListRefreshRequest> { @Override public ListenableFuture<?> deliver(List<NodeListRefreshRequest> events) { // The number of received requests does not matter // as long as one request is made, refresh the entire node list return executor.submit( new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws InterruptedException, ExecutionException { controlConnection.refreshNodeListAndTokenMap(); } }); } } } private enum HostEvent { UP, DOWN, REMOVED } /** * Periodically ensures that closed connections are properly terminated once they have no more * pending requests. * * <p>This is normally done when the connection errors out, or when the last request is processed; * this class acts as a last-effort protection since unterminated connections can lead to * deadlocks. If it terminates a connection, this indicates a bug; warnings are logged so that * this can be reported. * * @see Connection#tryTerminate(boolean) */ static class ConnectionReaper { private static final int INTERVAL_MS = 15000; private final ScheduledExecutorService executor; @VisibleForTesting final Map<Connection, Long> connections = new ConcurrentHashMap<Connection, Long>(); private volatile boolean shutdown; private final Runnable reaperTask = new Runnable() { @Override public void run() { long now = System.currentTimeMillis(); Iterator<Entry<Connection, Long>> iterator = connections.entrySet().iterator(); while (iterator.hasNext()) { Entry<Connection, Long> entry = iterator.next(); Connection connection = entry.getKey(); Long terminateTime = entry.getValue(); if (terminateTime <= now) { boolean terminated = connection.tryTerminate(true); if (terminated) iterator.remove(); } } } }; ConnectionReaper(ScheduledExecutorService executor) { this.executor = executor; this.executor.scheduleWithFixedDelay( reaperTask, INTERVAL_MS, INTERVAL_MS, TimeUnit.MILLISECONDS); } void register(Connection connection, long terminateTime) { if (shutdown) { // This should not happen since the reaper is shut down after all sessions. logger.warn("Connection registered after reaper shutdown: {}", connection); connection.tryTerminate(true); } else { connections.put(connection, terminateTime); } } void shutdown() { shutdown = true; // Force shutdown to avoid waiting for the interval, and run the task manually one last time executor.shutdownNow(); reaperTask.run(); } } }
scylladb/java-driver
driver-core/src/main/java/com/datastax/driver/core/Cluster.java
213,916
package pc.vision; import java.awt.Dimension; import java.awt.image.BufferedImage; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.List; import pc.strategy.StrategyController; import pc.strategy.interfaces.Strategy; import pc.vision.interfaces.VideoReceiver; import au.edu.jcu.v4l4j.CaptureCallback; import au.edu.jcu.v4l4j.Control; import au.edu.jcu.v4l4j.DeviceInfo; import au.edu.jcu.v4l4j.FrameGrabber; import au.edu.jcu.v4l4j.ImageFormat; import au.edu.jcu.v4l4j.VideoDevice; import au.edu.jcu.v4l4j.VideoFrame; import au.edu.jcu.v4l4j.exceptions.ImageFormatException; import au.edu.jcu.v4l4j.exceptions.V4L4JException; /** * Reads frames from a video device, giving options for camera controls * * @author Alex Adams (s1046358) */ public class VideoStream { private String videoDevName; private int width; private int height; private int channel; private int videoStandard; private int compressionQuality; private ImageFormat imageFormat; private int saturation; private int brightness; private int contrast; private int hue; private int chroma_gain; private int chroma_agc; private VideoDevice videoDev; private FrameGrabber frameGrabber; private ArrayList<VideoReceiver> videoReceivers = new ArrayList<VideoReceiver>(); // Used to calculate FPS private ArrayDeque<Long> frameTimes = new ArrayDeque<Long>(); private static final int FPS_AVERAGE_WINDOW = 25; public static final int FRAME_WIDTH = 640; public static final int FRAME_HEIGHT = 480; private final CaptureCallback frameGrabberCallback = new CaptureCallback() { public void exceptionReceived(V4L4JException e) { System.err.println("Unable to capture frame:"); e.printStackTrace(); } /** * Called by V4L4J when a new frame is generated * * @param frame * The frame that was generated */ public void nextFrame(VideoFrame frame) { // Calculate frame rate based on time between calls frameTimes.addLast(System.currentTimeMillis()); if (frameTimes.size() > FPS_AVERAGE_WINDOW) frameTimes.removeFirst(); float delta = frameTimes.isEmpty() ? 0 : (frameTimes.getLast() - frameTimes.getFirst()) / ((frameTimes.size() - 1) * 1000f); // Wait for video device to initialise properly before reading // frames if (VideoStream.this.ready) { BufferedImage frameBuffer = frame.getBufferedImage(); // frameBuffer = // DistortionFix.removeBarrelDistortion(frameBuffer, 0, 640, 0, // 480); // TODO: Should we blur? // ColorProcessor cp = new ColorProcessor(frameBuffer); // GaussianBlur gb = new GaussianBlur(); // gb.blurGaussian(cp, 2, 2, 0.02); // frameBuffer = cp.getBufferedImage(); for (VideoReceiver receiver : VideoStream.this.videoReceivers) { receiver.sendFrame(frameBuffer, delta, VideoStream.this.frameCounter, frame.getCaptureTime()); } ArrayList<Strategy> currentStrategies = StrategyController .getCurrentStrategies(); ArrayList<Strategy> removedStrategies = StrategyController .getRemovedStrategies(); for (Strategy s : removedStrategies) { Vision.removeWorldStateReciver(s); } removedStrategies = new ArrayList<Strategy>(); StrategyController.setRemovedStrategies(removedStrategies); for (Strategy s : currentStrategies) { Vision.addWorldStateReceiver(s); } } else if (VideoStream.this.frameCounter > 3) { VideoStream.this.ready = true; } ++VideoStream.this.frameCounter; frame.recycle(); } }; private int frameCounter = 0; private boolean ready = false; /** * Constructs a VideoStream object connected to the specified video device * * @param videoDevice * The name of the video device the stream is for * @param width * The width in pixels of the stream source * @param height * The height in pixels of the stream source * @param channel * The video channel of the device * @param videoStandard * The video standard of the device * @param compressionQuality * The desired compression quality of the frames as a percentage */ public VideoStream(String videoDevice, int width, int height, int channel, int videoStandard, int compressionQuality) { this.videoDevName = videoDevice; this.channel = channel; this.videoStandard = videoStandard; this.compressionQuality = compressionQuality; try { this.videoDev = new VideoDevice(videoDevice); DeviceInfo deviceInfo = this.videoDev.getDeviceInfo(); if (deviceInfo.getFormatList().getNativeFormats().isEmpty()) { throw new ImageFormatException( "Unable to detect any native formats for the device!"); } this.imageFormat = deviceInfo.getFormatList() .getYUVEncodableFormat(0); this.frameGrabber = this.videoDev.getJPEGFrameGrabber(width, height, channel, videoStandard, compressionQuality, this.imageFormat); this.frameGrabber.setCaptureCallback(this.frameGrabberCallback); this.frameGrabber.startCapture(); this.width = this.frameGrabber.getWidth(); this.height = this.frameGrabber.getHeight(); } catch (V4L4JException e) { System.err.println("Couldn't initialise the frame grabber: " + e.getMessage()); e.printStackTrace(); System.exit(1); } /* * Runtime.getRuntime().addShutdownHook(new Thread() { * * @Override public void run() { * VideoStream.this.frameGrabber.stopCapture(); } }); */ } /** * Reinitialises the frame grabber for the video stream. This is called when * either the video standard or compression quality is changed since these * can't be updated otherwise * * @throws V4L4JException * when the frame grabber fails to start capturing frames with * the new settings */ private void reinitialiseFrameGrabber() throws V4L4JException { this.frameGrabber.stopCapture(); this.frameGrabber = this.videoDev.getJPEGFrameGrabber(this.width, this.height, this.channel, this.videoStandard, this.compressionQuality, this.imageFormat); this.frameGrabber.setCaptureCallback(this.frameGrabberCallback); this.frameGrabber.startCapture(); } public void shutdown() { this.frameGrabber.stopCapture(); } /** * Gets the name of the video device the video stream is linked to * * @return The name of the video device */ public String getVideoDeviceName() { return this.videoDevName; } /** * Gets the width and height of the video stream as a Dimension object * * @return The dimensions of the video stream in pixels */ public Dimension getDimensions() { return new Dimension(this.width, this.height); } /** * Sets the video channel for the video stream * * @param channel * The channel to set the video stream to */ public void setChannel(int channel) { this.channel = channel; } /** * Gets the video channel used by the video stream * * @return The channel used by the video stream */ public int getChannel() { return this.channel; } /** * Sets a new value for the video standard of the video stream * * @param videoStandard */ public void setVideoStandard(int videoStandard) { try { this.videoStandard = videoStandard; // Adjust the frame grabber to the new setting reinitialiseFrameGrabber(); } catch (V4L4JException e) { System.err.println("Couldn't change the video standard: " + e.getMessage()); e.printStackTrace(); System.exit(1); } } /** * Gets the video standard currently used by the video stream * * @return The video standard used by the video stream */ public int getVideoStandard() { return this.videoStandard; } /** * Sets a new value for the JPEG compression quality of the video stream * * @param compressionQuality */ public void setCompressionQuality(int compressionQuality) { try { this.compressionQuality = compressionQuality; // Adjust the frame grabber to the new setting reinitialiseFrameGrabber(); } catch (V4L4JException e) { System.err.println("Couldn't change the compressionQuality: " + e.getMessage()); e.printStackTrace(); System.exit(1); } } /** * Gets the JPEG compression quality of the video stream * * @return The JPEG compression quality the video stream is set to as a * percentage */ public int getCompressionQuality() { return this.compressionQuality; } /** * Gets the saturation setting of the video device * * @return The saturation setting for the video device */ public int getSaturation() { return this.saturation; } /** * Sets the saturation setting of the video device * * @param saturation * The new setting */ public void setSaturation(int saturation) { this.saturation = saturation; } /** * Gets the brightness setting of the video device * * @return The brightness setting for the video device */ public int getBrightness() { return this.brightness; } /** * Sets the brightness of the video device * * @param brightness */ public void setBrightness(int brightness) { this.brightness = brightness; } /** * Gets the contrast setting of the video device * * @return The contrast setting for the video device */ public int getContrast() { return this.contrast; } /** * Sets the contrast of the video device * * @param contrast */ public void setContrast(int contrast) { this.contrast = contrast; } /** * Gets the hue setting of the video device * * @return The hue setting for the video device */ public int getHue() { return this.hue; } /** * Sets the hue of the video device * * @param hue */ public void setHue(int hue) { this.hue = hue; } /** * Gets the Chroma Gain setting of the video device * * @return The Chroma Gain setting for the video device */ public int getChromaGain() { return this.chroma_gain; } /** * Sets the Chroma Gain setting of the video device * * @param chromaGain */ public void setChromaGain(int chromaGain) { this.chroma_gain = chromaGain; } /** * Gets the Chroma AGC setting of the video device * * @return The Chroma AGC setting for the video device */ public boolean getChromaAGC() { return (this.chroma_agc == 1) ? true : false; } /** * Sets the Chroma AGC setting of the video device * * @param chromaAGC */ public void setChromaAGC(boolean chromaAGC) { this.chroma_agc = chromaAGC ? 1 : 0; } /** * Updates the video device's controls with the settings of the video * stream. This should be called after any call to setBrightness, etc if the * settings are intended to affect the device output */ public void updateVideoDeviceSettings() { try { List<Control> controls = this.videoDev.getControlList().getList(); for (Control c : controls) { if (c.getName().equals("Contrast")) c.setValue(this.contrast); else if (c.getName().equals("Brightness")) c.setValue(this.brightness); else if (c.getName().equals("Hue")) c.setValue(this.hue); else if (c.getName().equals("Saturation")) c.setValue(this.saturation); else if (c.getName().equals("Chroma Gain")) c.setValue(this.chroma_gain); else if (c.getName().equals("Chroma AGC")) c.setValue(this.chroma_agc); } } catch (V4L4JException e) { System.err.println("Cannot set video device settings: " + e.getMessage()); e.printStackTrace(); } this.videoDev.releaseControlList(); } /** * Registers an object to receive frames from the video stream * * @param receiver * The object being registered */ public void addReceiver(VideoReceiver receiver) { this.videoReceivers.add(receiver); } }
PeterH139/SDP-Project-Group-9
PcProject/src/pc/vision/VideoStream.java
213,917
package trabalho2; import java.io.IOException; import java.util.ArrayList; import java.util.Vector; import org.snmp4j.CommunityTarget; import org.snmp4j.PDU; import org.snmp4j.Snmp; import org.snmp4j.TransportMapping; import org.snmp4j.event.ResponseEvent; import org.snmp4j.mp.SnmpConstants; import org.snmp4j.smi.Integer32; import org.snmp4j.smi.OID; import org.snmp4j.smi.OctetString; import org.snmp4j.smi.UdpAddress; import org.snmp4j.smi.VariableBinding; import org.snmp4j.transport.DefaultUdpTransportMapping; /** * * @author Douglas, Patricia, Priscila, Tamires */ public class Conexao { private ArrayList<String> listaAgentes = new ArrayList<String>(); private ArrayList<PontoGrafico> lista = new ArrayList<PontoGrafico>(); private Grafico chart = new Grafico(); private int anteriorIn = -1; private int anteriorOut = -1; private int atualIn = -1; private int atualOut = -1; private float anteriorY; private float atualY; public String get(String ip, String comunidade, String OID) { String mensagem = ""; try { TransportMapping transport = new DefaultUdpTransportMapping(); transport.listen(); CommunityTarget target = new CommunityTarget(); target.setCommunity(new OctetString(comunidade)); target.setVersion(SnmpConstants.version2c); target.setAddress(new UdpAddress(ip + "/161")); target.setRetries(2); target.setTimeout(1000); PDU pdu = new PDU(); pdu.add(new VariableBinding(new OID(OID))); pdu.setType(PDU.GET); pdu.setRequestID(new Integer32(1)); Snmp snmp = new Snmp(transport); ResponseEvent response = snmp.get(pdu, target); if (response != null) { PDU responsePDU = response.getResponse(); if (responsePDU != null) { int errorStatus = responsePDU.getErrorStatus(); if (errorStatus == PDU.noError) { Vector<? extends VariableBinding> variableBindings = responsePDU.getVariableBindings(); for (int i = 0; i < variableBindings.size(); i++) { mensagem += variableBindings.get(i).toValueString(); } } else { String errorStatusText = responsePDU.getErrorStatusText(); int errorIndex = responsePDU.getErrorIndex(); mensagem = "Error: Request Failed\n"; mensagem += "Error Status = " + errorStatus + "\n "; mensagem += "Error Index = " + errorIndex + "\n "; mensagem += "Error Status Text = " + errorStatusText + "\n "; } } else { mensagem += "Error: Response PDU is null.\n"; } } else { mensagem += "Error: Agent Timeout... \n"; } snmp.close(); } catch (IOException e) { mensagem += "Erro não pode ser encontrado o oid solicitado.\n"; } return mensagem; } public void atualizaGrafico(String titulo, int inX, int inY, int outX, int outY) { if (anteriorIn != -1 && anteriorOut != -1) { atualIn = inY; atualOut = outY; lista.add(new PontoGrafico(inX, (atualIn - anteriorIn), outX, (atualOut - anteriorOut))); anteriorIn = atualIn; anteriorOut = atualOut; } else { anteriorIn = inY; anteriorOut = outY; } chart.criaGrafico("Gráfico", titulo, lista); } public void atualizaGrafico(String titulo, int x, float y) { if (anteriorY != -1) { atualY = (float) y; lista.add(new PontoGrafico(x, (atualY - anteriorY))); anteriorY = (float) atualY; } else { anteriorY = (float) y; } chart.criaGrafico2("Gráfico", titulo, lista); } public ArrayList<String> descoberta() { String ip; ArrayList<String> lista = new ArrayList<String>(); int lastOctes = 150; while (lastOctes < 239) { ip = "10.32.143." + Integer.toString(lastOctes) + "/161"; lista.add(ip); lastOctes++; } return lista; } public void agentes() { ArrayList<String> ips = descoberta(); for (String ip : ips) { if (!get(ip, "public", ".1.3.6.1.2.1.4.3.0").equals("Error: Response PDU is null.\n")) { String novoIp = ip.split("/")[0]; listaAgentes.add(novoIp); System.out.println(novoIp); } } } public ArrayList<String> getListaAgentes() { return listaAgentes; } public void chamaAgendador(String ip, String comunidade, String metrica, String indice, int tempo) { //chamaGet(ip, comunidade, metrica, indice, tempo); Agendador agendador = new Agendador(ip, comunidade, metrica, indice, tempo); agendador.agendamento(); } }
dtondin/T2_Redes
src/trabalho2/Conexao.java
213,918
/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ package org.apache.cassandra.cql; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.charset.CharacterCodingException; import java.util.*; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeoutException; import org.apache.cassandra.auth.Permission; import org.apache.cassandra.concurrent.Stage; import org.apache.cassandra.concurrent.StageManager; import org.apache.cassandra.config.*; import org.apache.cassandra.cli.CliUtils; import org.apache.cassandra.db.CounterColumn; import org.apache.cassandra.db.*; import org.apache.cassandra.db.context.CounterContext; import org.apache.cassandra.db.filter.QueryPath; import org.apache.cassandra.db.marshal.AbstractType; import org.apache.cassandra.db.marshal.AsciiType; import org.apache.cassandra.db.marshal.MarshalException; import org.apache.cassandra.db.marshal.TypeParser; import org.apache.cassandra.db.migration.*; import org.apache.cassandra.dht.*; import org.apache.cassandra.service.ClientState; import org.apache.cassandra.service.StorageProxy; import org.apache.cassandra.service.StorageService; import org.apache.cassandra.thrift.*; import org.apache.cassandra.thrift.Column; import org.apache.cassandra.utils.ByteBufferUtil; import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.Pair; import com.google.common.base.Predicates; import com.google.common.collect.Maps; import org.antlr.runtime.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily; public class QueryProcessor { public static final String CQL_VERSION = "2.0.0"; private static final Logger logger = LoggerFactory.getLogger(QueryProcessor.class); private static final long timeLimitForSchemaAgreement = 10 * 1000; public static final String DEFAULT_KEY_NAME = bufferToString(CFMetaData.DEFAULT_KEY_NAME); private static List<org.apache.cassandra.db.Row> getSlice(CFMetaData metadata, SelectStatement select, List<String> variables) throws InvalidRequestException, TimedOutException, UnavailableException { QueryPath queryPath = new QueryPath(select.getColumnFamily()); List<ReadCommand> commands = new ArrayList<ReadCommand>(); // ...of a list of column names if (!select.isColumnRange()) { Collection<ByteBuffer> columnNames = getColumnNames(select, metadata, variables); validateColumnNames(columnNames); for (Term rawKey: select.getKeys()) { ByteBuffer key = rawKey.getByteBuffer(metadata.getKeyValidator(),variables); validateKey(key); commands.add(new SliceByNamesReadCommand(metadata.ksName, key, queryPath, columnNames)); } } // ...a range (slice) of column names else { AbstractType<?> comparator = select.getComparator(metadata.ksName); ByteBuffer start = select.getColumnStart().getByteBuffer(comparator,variables); ByteBuffer finish = select.getColumnFinish().getByteBuffer(comparator,variables); for (Term rawKey : select.getKeys()) { ByteBuffer key = rawKey.getByteBuffer(metadata.getKeyValidator(),variables); validateKey(key); validateSliceRange(metadata, start, finish, select.isColumnsReversed()); commands.add(new SliceFromReadCommand(metadata.ksName, key, queryPath, start, finish, select.isColumnsReversed(), select.getColumnsLimit())); } } try { return StorageProxy.read(commands, select.getConsistencyLevel()); } catch (TimeoutException e) { throw new TimedOutException(); } catch (IOException e) { throw new RuntimeException(e); } } private static List<ByteBuffer> getColumnNames(SelectStatement select, CFMetaData metadata, List<String> variables) throws InvalidRequestException { String keyString = getKeyString(metadata); List<ByteBuffer> columnNames = new ArrayList<ByteBuffer>(); for (Term column : select.getColumnNames()) { // skip the key for the slice op; we'll add it to the resultset in extractThriftColumns if (!column.getText().equalsIgnoreCase(keyString)) columnNames.add(column.getByteBuffer(metadata.comparator,variables)); } return columnNames; } private static List<org.apache.cassandra.db.Row> multiRangeSlice(CFMetaData metadata, SelectStatement select, List<String> variables) throws TimedOutException, UnavailableException, InvalidRequestException { List<org.apache.cassandra.db.Row> rows; IPartitioner<?> p = StorageService.getPartitioner(); AbstractType<?> keyType = Schema.instance.getCFMetaData(metadata.ksName, select.getColumnFamily()).getKeyValidator(); ByteBuffer startKeyBytes = (select.getKeyStart() != null) ? select.getKeyStart().getByteBuffer(keyType,variables) : null; ByteBuffer finishKeyBytes = (select.getKeyFinish() != null) ? select.getKeyFinish().getByteBuffer(keyType,variables) : null; RowPosition startKey = RowPosition.forKey(startKeyBytes, p), finishKey = RowPosition.forKey(finishKeyBytes, p); if (startKey.compareTo(finishKey) > 0 && !finishKey.isMinimum(p)) { if (p instanceof RandomPartitioner) throw new InvalidRequestException("Start key sorts after end key. This is not allowed; you probably should not specify end key at all, under RandomPartitioner"); else throw new InvalidRequestException("Start key must sort before (or equal to) finish key in your partitioner!"); } AbstractBounds<RowPosition> bounds = new Bounds<RowPosition>(startKey, finishKey); // XXX: Our use of Thrift structs internally makes me Sad. :( SlicePredicate thriftSlicePredicate = slicePredicateFromSelect(select, metadata,variables); validateSlicePredicate(metadata, thriftSlicePredicate); int limit = select.isKeyRange() && select.getKeyStart() != null ? select.getNumRecords() + 1 : select.getNumRecords(); try { rows = StorageProxy.getRangeSlice(new RangeSliceCommand(metadata.ksName, select.getColumnFamily(), null, thriftSlicePredicate, bounds, limit), select.getConsistencyLevel()); } catch (IOException e) { throw new RuntimeException(e); } catch (org.apache.cassandra.thrift.UnavailableException e) { throw new UnavailableException(); } catch (TimeoutException e) { throw new TimedOutException(); } // if start key was set and relation was "greater than" if (select.getKeyStart() != null && !select.includeStartKey() && !rows.isEmpty()) { if (rows.get(0).key.key.equals(startKeyBytes)) rows.remove(0); } // if finish key was set and relation was "less than" if (select.getKeyFinish() != null && !select.includeFinishKey() && !rows.isEmpty()) { int lastIndex = rows.size() - 1; if (rows.get(lastIndex).key.key.equals(finishKeyBytes)) rows.remove(lastIndex); } return rows.subList(0, select.getNumRecords() < rows.size() ? select.getNumRecords() : rows.size()); } private static List<org.apache.cassandra.db.Row> getIndexedSlices(CFMetaData metadata, SelectStatement select, List<String> variables) throws TimedOutException, UnavailableException, InvalidRequestException { // XXX: Our use of Thrift structs internally (still) makes me Sad. :~( SlicePredicate thriftSlicePredicate = slicePredicateFromSelect(select, metadata, variables); validateSlicePredicate(metadata, thriftSlicePredicate); List<IndexExpression> expressions = new ArrayList<IndexExpression>(); for (Relation columnRelation : select.getColumnRelations()) { // Left and right side of relational expression encoded according to comparator/validator. ByteBuffer entity = columnRelation.getEntity().getByteBuffer(metadata.comparator, variables); ByteBuffer value = columnRelation.getValue().getByteBuffer(select.getValueValidator(metadata.ksName, entity), variables); expressions.add(new IndexExpression(entity, IndexOperator.valueOf(columnRelation.operator().toString()), value)); } AbstractType<?> keyType = Schema.instance.getCFMetaData(metadata.ksName, select.getColumnFamily()).getKeyValidator(); ByteBuffer startKey = (!select.isKeyRange()) ? (new Term()).getByteBuffer() : select.getKeyStart().getByteBuffer(keyType, variables); IndexClause thriftIndexClause = new IndexClause(expressions, startKey, select.getNumRecords()); List<org.apache.cassandra.db.Row> rows; try { rows = StorageProxy.scan(metadata.ksName, select.getColumnFamily(), thriftIndexClause, thriftSlicePredicate, select.getConsistencyLevel()); } catch (IOException e) { throw new RuntimeException(e); } catch (TimeoutException e) { throw new TimedOutException(); } return rows; } private static void batchUpdate(ClientState clientState, List<UpdateStatement> updateStatements, ConsistencyLevel consistency, List<String> variables ) throws InvalidRequestException, UnavailableException, TimedOutException { String globalKeyspace = clientState.getKeyspace(); List<IMutation> rowMutations = new ArrayList<IMutation>(); List<String> cfamsSeen = new ArrayList<String>(); for (UpdateStatement update : updateStatements) { String keyspace = update.keyspace == null ? globalKeyspace : update.keyspace; // Avoid unnecessary authorizations. if (!(cfamsSeen.contains(update.getColumnFamily()))) { clientState.hasColumnFamilyAccess(keyspace, update.getColumnFamily(), Permission.WRITE); cfamsSeen.add(update.getColumnFamily()); } rowMutations.addAll(update.prepareRowMutations(keyspace, clientState, variables)); } try { StorageProxy.mutate(rowMutations, consistency); } catch (org.apache.cassandra.thrift.UnavailableException e) { throw new UnavailableException(); } catch (TimeoutException e) { throw new TimedOutException(); } } private static SlicePredicate slicePredicateFromSelect(SelectStatement select, CFMetaData metadata, List<String> variables) throws InvalidRequestException { SlicePredicate thriftSlicePredicate = new SlicePredicate(); if (select.isColumnRange() || select.getColumnNames().size() == 0) { SliceRange sliceRange = new SliceRange(); sliceRange.start = select.getColumnStart().getByteBuffer(metadata.comparator, variables); sliceRange.finish = select.getColumnFinish().getByteBuffer(metadata.comparator, variables); sliceRange.reversed = select.isColumnsReversed(); sliceRange.count = select.getColumnsLimit(); thriftSlicePredicate.slice_range = sliceRange; } else { thriftSlicePredicate.column_names = getColumnNames(select, metadata, variables); } return thriftSlicePredicate; } /* Test for SELECT-specific taboos */ private static void validateSelect(String keyspace, SelectStatement select, List<String> variables) throws InvalidRequestException { ThriftValidation.validateConsistencyLevel(keyspace, select.getConsistencyLevel(), RequestType.READ); // Finish key w/o start key (KEY < foo) if (!select.isKeyRange() && (select.getKeyFinish() != null)) throw new InvalidRequestException("Key range clauses must include a start key (i.e. KEY > term)"); // Key range and by-key(s) combined (KEY > foo AND KEY = bar) if (select.isKeyRange() && select.getKeys().size() > 0) throw new InvalidRequestException("You cannot combine key range and by-key clauses in a SELECT"); // Start and finish keys, *and* column relations (KEY > foo AND KEY < bar and name1 = value1). if (select.isKeyRange() && (select.getKeyFinish() != null) && (select.getColumnRelations().size() > 0)) throw new InvalidRequestException("You cannot combine key range and by-column clauses in a SELECT"); // Can't use more than one KEY = if (!select.isMultiKey() && select.getKeys().size() > 1) throw new InvalidRequestException("You cannot use more than one KEY = in a SELECT"); if (select.getColumnRelations().size() > 0) { AbstractType<?> comparator = select.getComparator(keyspace); Set<ByteBuffer> indexed = Table.open(keyspace).getColumnFamilyStore(select.getColumnFamily()).indexManager.getIndexedColumns(); for (Relation relation : select.getColumnRelations()) { if ((relation.operator() == RelationType.EQ) && indexed.contains(relation.getEntity().getByteBuffer(comparator, variables))) return; } throw new InvalidRequestException("No indexed columns present in by-columns clause with \"equals\" operator"); } } // Copypasta from o.a.c.thrift.CassandraDaemon private static void applyMigrationOnStage(final Migration m) throws SchemaDisagreementException, InvalidRequestException { Future<?> f = StageManager.getStage(Stage.MIGRATION).submit(new Callable<Object>() { public Object call() throws Exception { m.apply(); m.announce(); return null; } }); try { f.get(); } catch (InterruptedException e) { throw new RuntimeException(e); } catch (ExecutionException e) { // this means call() threw an exception. deal with it directly. if (e.getCause() != null) { InvalidRequestException ex = new InvalidRequestException(e.getCause().getMessage()); ex.initCause(e.getCause()); throw ex; } else { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } } validateSchemaIsSettled(); } public static void validateKey(ByteBuffer key) throws InvalidRequestException { if (key == null || key.remaining() == 0) { throw new InvalidRequestException("Key may not be empty"); } // check that key can be handled by FBUtilities.writeShortByteArray if (key.remaining() > FBUtilities.MAX_UNSIGNED_SHORT) { throw new InvalidRequestException("Key length of " + key.remaining() + " is longer than maximum of " + FBUtilities.MAX_UNSIGNED_SHORT); } } public static void validateKeyAlias(CFMetaData cfm, String key) throws InvalidRequestException { assert key.toUpperCase().equals(key); // should always be uppercased by caller String realKeyAlias = bufferToString(cfm.getKeyName()).toUpperCase(); if (!realKeyAlias.equals(key)) throw new InvalidRequestException(String.format("Expected key '%s' to be present in WHERE clause for '%s'", key, cfm.cfName)); } private static void validateColumnNames(Iterable<ByteBuffer> columns) throws InvalidRequestException { for (ByteBuffer name : columns) { if (name.remaining() > IColumn.MAX_NAME_LENGTH) throw new InvalidRequestException(String.format("column name is too long (%s > %s)", name.remaining(), IColumn.MAX_NAME_LENGTH)); if (name.remaining() == 0) throw new InvalidRequestException("zero-length column name"); } } public static void validateColumnName(ByteBuffer column) throws InvalidRequestException { validateColumnNames(Arrays.asList(column)); } public static void validateColumn(CFMetaData metadata, ByteBuffer name, ByteBuffer value) throws InvalidRequestException { validateColumnName(name); AbstractType<?> validator = metadata.getValueValidator(name); try { if (validator != null) validator.validate(value); } catch (MarshalException me) { throw new InvalidRequestException(String.format("Invalid column value for column (name=%s); %s", ByteBufferUtil.bytesToHex(name), me.getMessage())); } } private static void validateSlicePredicate(CFMetaData metadata, SlicePredicate predicate) throws InvalidRequestException { if (predicate.slice_range != null) validateSliceRange(metadata, predicate.slice_range); else validateColumnNames(predicate.column_names); } private static void validateSliceRange(CFMetaData metadata, SliceRange range) throws InvalidRequestException { validateSliceRange(metadata, range.start, range.finish, range.reversed); } private static void validateSliceRange(CFMetaData metadata, ByteBuffer start, ByteBuffer finish, boolean reversed) throws InvalidRequestException { AbstractType<?> comparator = metadata.getComparatorFor(null); Comparator<ByteBuffer> orderedComparator = reversed ? comparator.reverseComparator: comparator; if (start.remaining() > 0 && finish.remaining() > 0 && orderedComparator.compare(start, finish) > 0) throw new InvalidRequestException("range finish must come after start in traversal order"); } // Copypasta from CassandraServer (where it is private). private static void validateSchemaAgreement() throws SchemaDisagreementException { if (describeSchemaVersions().size() > 1) throw new SchemaDisagreementException(); } private static Map<String, List<String>> describeSchemaVersions() { // unreachable hosts don't count towards disagreement return Maps.filterKeys(StorageProxy.describeSchemaVersions(), Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE))); } public static CqlResult processStatement(CQLStatement statement,ClientState clientState, List<String> variables ) throws UnavailableException, InvalidRequestException, TimedOutException, SchemaDisagreementException { String keyspace = null; // Some statements won't have (or don't need) a keyspace (think USE, or CREATE). if (statement.type != StatementType.SELECT && StatementType.requiresKeyspace.contains(statement.type)) keyspace = clientState.getKeyspace(); CqlResult result = new CqlResult(); if (logger.isDebugEnabled()) logger.debug("CQL statement type: {}", statement.type.toString()); CFMetaData metadata; switch (statement.type) { case SELECT: SelectStatement select = (SelectStatement)statement.statement; final String oldKeyspace = clientState.getRawKeyspace(); if (select.isSetKeyspace()) { keyspace = CliUtils.unescapeSQLString(select.getKeyspace()); ThriftValidation.validateTable(keyspace); } else if (oldKeyspace == null) throw new InvalidRequestException("no keyspace has been specified"); else keyspace = oldKeyspace; clientState.hasColumnFamilyAccess(keyspace, select.getColumnFamily(), Permission.READ); metadata = validateColumnFamily(keyspace, select.getColumnFamily()); // need to do this in here because we need a CFMD.getKeyName() select.extractKeyAliasFromColumns(metadata); if (select.getKeys().size() > 0) validateKeyAlias(metadata, select.getKeyAlias()); validateSelect(keyspace, select, variables); List<org.apache.cassandra.db.Row> rows; // By-key if (!select.isKeyRange() && (select.getKeys().size() > 0)) { rows = getSlice(metadata, select, variables); } else { // Range query if ((select.getKeyFinish() != null) || (select.getColumnRelations().size() == 0)) { rows = multiRangeSlice(metadata, select, variables); } // Index scan else { rows = getIndexedSlices(metadata, select, variables); } } // count resultset is a single column named "count" result.type = CqlResultType.ROWS; if (select.isCountOperation()) { validateCountOperation(select); ByteBuffer countBytes = ByteBufferUtil.bytes("count"); result.schema = new CqlMetadata(Collections.<ByteBuffer, String>emptyMap(), Collections.<ByteBuffer, String>emptyMap(), "AsciiType", "LongType"); List<Column> columns = Collections.singletonList(new Column(countBytes).setValue(ByteBufferUtil.bytes((long) rows.size()))); result.rows = Collections.singletonList(new CqlRow(countBytes, columns)); return result; } // otherwise create resultset from query results result.schema = new CqlMetadata(new HashMap<ByteBuffer, String>(), new HashMap<ByteBuffer, String>(), TypeParser.getShortName(metadata.comparator), TypeParser.getShortName(metadata.getDefaultValidator())); List<CqlRow> cqlRows = new ArrayList<CqlRow>(); for (org.apache.cassandra.db.Row row : rows) { List<Column> thriftColumns = new ArrayList<Column>(); if (select.isColumnRange()) { if (select.isFullWildcard()) { // prepend key thriftColumns.add(new Column(metadata.getKeyName()).setValue(row.key.key).setTimestamp(-1)); result.schema.name_types.put(metadata.getKeyName(), TypeParser.getShortName(AsciiType.instance)); result.schema.value_types.put(metadata.getKeyName(), TypeParser.getShortName(metadata.getKeyValidator())); } // preserve comparator order if (row.cf != null) { for (IColumn c : row.cf.getSortedColumns()) { if (c.isMarkedForDelete()) continue; ColumnDefinition cd = metadata.getColumnDefinition(c.name()); if (cd != null) result.schema.value_types.put(c.name(), TypeParser.getShortName(cd.getValidator())); thriftColumns.add(thriftify(c)); } } } else { String keyString = getKeyString(metadata); // order columns in the order they were asked for for (Term term : select.getColumnNames()) { if (term.getText().equalsIgnoreCase(keyString)) { // preserve case of key as it was requested ByteBuffer requestedKey = ByteBufferUtil.bytes(term.getText()); thriftColumns.add(new Column(requestedKey).setValue(row.key.key).setTimestamp(-1)); result.schema.name_types.put(requestedKey, TypeParser.getShortName(AsciiType.instance)); result.schema.value_types.put(requestedKey, TypeParser.getShortName(metadata.getKeyValidator())); continue; } if (row.cf == null) continue; ByteBuffer name; try { name = term.getByteBuffer(metadata.comparator, variables); } catch (InvalidRequestException e) { throw new AssertionError(e); } ColumnDefinition cd = metadata.getColumnDefinition(name); if (cd != null) result.schema.value_types.put(name, TypeParser.getShortName(cd.getValidator())); IColumn c = row.cf.getColumn(name); if (c == null || c.isMarkedForDelete()) thriftColumns.add(new Column().setName(name)); else thriftColumns.add(thriftify(c)); } } // Create a new row, add the columns to it, and then add it to the list of rows CqlRow cqlRow = new CqlRow(); cqlRow.key = row.key.key; cqlRow.columns = thriftColumns; if (select.isColumnsReversed()) Collections.reverse(cqlRow.columns); cqlRows.add(cqlRow); } result.rows = cqlRows; return result; case INSERT: // insert uses UpdateStatement case UPDATE: UpdateStatement update = (UpdateStatement)statement.statement; ThriftValidation.validateConsistencyLevel(keyspace, update.getConsistencyLevel(), RequestType.WRITE); batchUpdate(clientState, Collections.singletonList(update), update.getConsistencyLevel(),variables); result.type = CqlResultType.VOID; return result; case BATCH: BatchStatement batch = (BatchStatement) statement.statement; ThriftValidation.validateConsistencyLevel(keyspace, batch.getConsistencyLevel(), RequestType.WRITE); if (batch.getTimeToLive() != 0) throw new InvalidRequestException("Global TTL on the BATCH statement is not supported."); for (AbstractModification up : batch.getStatements()) { if (up.isSetConsistencyLevel()) throw new InvalidRequestException( "Consistency level must be set on the BATCH, not individual statements"); if (batch.isSetTimestamp() && up.isSetTimestamp()) throw new InvalidRequestException( "Timestamp must be set either on BATCH or individual statements"); } try { StorageProxy.mutate(batch.getMutations(keyspace, clientState, variables), batch.getConsistencyLevel()); } catch (org.apache.cassandra.thrift.UnavailableException e) { throw new UnavailableException(); } catch (TimeoutException e) { throw new TimedOutException(); } result.type = CqlResultType.VOID; return result; case USE: clientState.setKeyspace(CliUtils.unescapeSQLString((String) statement.statement)); result.type = CqlResultType.VOID; return result; case TRUNCATE: Pair<String, String> columnFamily = (Pair<String, String>)statement.statement; keyspace = columnFamily.left == null ? clientState.getKeyspace() : columnFamily.left; validateColumnFamily(keyspace, columnFamily.right); clientState.hasColumnFamilyAccess(keyspace, columnFamily.right, Permission.WRITE); try { StorageProxy.truncateBlocking(keyspace, columnFamily.right); } catch (TimeoutException e) { throw (UnavailableException) new UnavailableException().initCause(e); } catch (IOException e) { throw (UnavailableException) new UnavailableException().initCause(e); } result.type = CqlResultType.VOID; return result; case DELETE: DeleteStatement delete = (DeleteStatement)statement.statement; keyspace = delete.keyspace == null ? clientState.getKeyspace() : delete.keyspace; try { StorageProxy.mutate(delete.prepareRowMutations(keyspace, clientState, variables), delete.getConsistencyLevel()); } catch (TimeoutException e) { throw new TimedOutException(); } result.type = CqlResultType.VOID; return result; case CREATE_KEYSPACE: CreateKeyspaceStatement create = (CreateKeyspaceStatement)statement.statement; create.validate(); clientState.hasKeyspaceSchemaAccess(Permission.WRITE); validateSchemaAgreement(); try { KsDef ksd = new KsDef(create.getName(), create.getStrategyClass(), Collections.<CfDef>emptyList()) .setStrategy_options(create.getStrategyOptions()); ThriftValidation.validateKsDef(ksd); ThriftValidation.validateKeyspaceNotYetExisting(create.getName()); applyMigrationOnStage(new AddKeyspace(KSMetaData.fromThrift(ksd))); } catch (ConfigurationException e) { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } catch (IOException e) { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } result.type = CqlResultType.VOID; return result; case CREATE_COLUMNFAMILY: CreateColumnFamilyStatement createCf = (CreateColumnFamilyStatement)statement.statement; clientState.hasColumnFamilySchemaAccess(Permission.WRITE); validateSchemaAgreement(); CFMetaData cfmd = createCf.getCFMetaData(keyspace,variables); ThriftValidation.validateCfDef(cfmd.toThrift(), null); try { applyMigrationOnStage(new AddColumnFamily(cfmd)); } catch (ConfigurationException e) { InvalidRequestException ex = new InvalidRequestException(e.toString()); ex.initCause(e); throw ex; } catch (IOException e) { InvalidRequestException ex = new InvalidRequestException(e.toString()); ex.initCause(e); throw ex; } result.type = CqlResultType.VOID; return result; case CREATE_INDEX: CreateIndexStatement createIdx = (CreateIndexStatement)statement.statement; clientState.hasColumnFamilySchemaAccess(Permission.WRITE); validateSchemaAgreement(); CFMetaData oldCfm = Schema.instance.getCFMetaData(keyspace, createIdx.getColumnFamily()); if (oldCfm == null) throw new InvalidRequestException("No such column family: " + createIdx.getColumnFamily()); boolean columnExists = false; ByteBuffer columnName = createIdx.getColumnName().getByteBuffer(); // mutating oldCfm directly would be bad, but mutating a Thrift copy is fine. This also // sets us up to use validateCfDef to check for index name collisions. CfDef cf_def = oldCfm.toThrift(); for (ColumnDef cd : cf_def.column_metadata) { if (cd.name.equals(columnName)) { if (cd.index_type != null) throw new InvalidRequestException("Index already exists"); if (logger.isDebugEnabled()) logger.debug("Updating column {} definition for index {}", oldCfm.comparator.getString(columnName), createIdx.getIndexName()); cd.setIndex_type(IndexType.KEYS); cd.setIndex_name(createIdx.getIndexName()); columnExists = true; break; } } if (!columnExists) throw new InvalidRequestException("No column definition found for column " + oldCfm.comparator.getString(columnName)); CFMetaData.addDefaultIndexNames(cf_def); ThriftValidation.validateCfDef(cf_def, oldCfm); try { org.apache.cassandra.db.migration.avro.CfDef result1; try { result1 = CFMetaData.fromThrift(cf_def).toAvro(); } catch (Exception e) { throw new RuntimeException(e); } applyMigrationOnStage(new UpdateColumnFamily(result1)); } catch (ConfigurationException e) { InvalidRequestException ex = new InvalidRequestException(e.toString()); ex.initCause(e); throw ex; } catch (IOException e) { InvalidRequestException ex = new InvalidRequestException(e.toString()); ex.initCause(e); throw ex; } result.type = CqlResultType.VOID; return result; case DROP_INDEX: DropIndexStatement dropIdx = (DropIndexStatement)statement.statement; clientState.hasColumnFamilySchemaAccess(Permission.WRITE); validateSchemaAgreement(); try { applyMigrationOnStage(dropIdx.generateMutation(clientState.getKeyspace())); } catch (ConfigurationException e) { InvalidRequestException ex = new InvalidRequestException(e.toString()); ex.initCause(e); throw ex; } catch (IOException e) { InvalidRequestException ex = new InvalidRequestException(e.toString()); ex.initCause(e); throw ex; } result.type = CqlResultType.VOID; return result; case DROP_KEYSPACE: String deleteKeyspace = (String)statement.statement; clientState.hasKeyspaceSchemaAccess(Permission.WRITE); validateSchemaAgreement(); try { applyMigrationOnStage(new DropKeyspace(deleteKeyspace)); } catch (ConfigurationException e) { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } catch (IOException e) { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } result.type = CqlResultType.VOID; return result; case DROP_COLUMNFAMILY: String deleteColumnFamily = (String)statement.statement; clientState.hasColumnFamilySchemaAccess(Permission.WRITE); validateSchemaAgreement(); try { applyMigrationOnStage(new DropColumnFamily(keyspace, deleteColumnFamily)); } catch (ConfigurationException e) { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } catch (IOException e) { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } result.type = CqlResultType.VOID; return result; case ALTER_TABLE: AlterTableStatement alterTable = (AlterTableStatement) statement.statement; validateColumnFamily(keyspace, alterTable.columnFamily); clientState.hasColumnFamilyAccess(alterTable.columnFamily, Permission.WRITE); validateSchemaAgreement(); try { applyMigrationOnStage(new UpdateColumnFamily(alterTable.getCfDef(keyspace))); } catch (ConfigurationException e) { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } catch (IOException e) { InvalidRequestException ex = new InvalidRequestException(e.getMessage()); ex.initCause(e); throw ex; } result.type = CqlResultType.VOID; return result; } return null; // We should never get here. } public static CqlResult process(String queryString, ClientState clientState) throws RecognitionException, UnavailableException, InvalidRequestException, TimedOutException, SchemaDisagreementException { logger.trace("CQL QUERY: {}", queryString); return processStatement(getStatement(queryString), clientState, new ArrayList<String>()); } public static CqlPreparedResult prepare(String queryString, ClientState clientState) throws RecognitionException, InvalidRequestException { logger.trace("CQL QUERY: {}", queryString); CQLStatement statement = getStatement(queryString); int statementId = makeStatementId(queryString); logger.trace("Discovered "+ statement.boundTerms + " bound variables."); clientState.getPrepared().put(statementId, statement); logger.trace(String.format("Stored prepared statement #%d with %d bind markers", statementId, statement.boundTerms)); return new CqlPreparedResult(statementId, statement.boundTerms); } public static CqlResult processPrepared(CQLStatement statement, ClientState clientState, List<String> variables) throws UnavailableException, InvalidRequestException, TimedOutException, SchemaDisagreementException { // Check to see if there are any bound variables to verify if (!(variables.isEmpty() && (statement.boundTerms == 0))) { if (variables.size() != statement.boundTerms) throw new InvalidRequestException(String.format("there were %d markers(?) in CQL but %d bound variables", statement.boundTerms, variables.size())); // at this point there is a match in count between markers and variables that is non-zero if (logger.isTraceEnabled()) for (int i = 0; i < variables.size(); i++) logger.trace("[{}] '{}'", i+1, variables.get(i)); } return processStatement(statement, clientState, variables); } private static final int makeStatementId(String cql) { // use the hash of the string till something better is provided return cql.hashCode(); } private static Column thriftify(IColumn c) { ByteBuffer value = (c instanceof CounterColumn) ? ByteBufferUtil.bytes(CounterContext.instance().total(c.value())) : c.value(); return new Column(c.name()).setValue(value).setTimestamp(c.timestamp()); } private static String getKeyString(CFMetaData metadata) { String keyString; try { keyString = ByteBufferUtil.string(metadata.getKeyName()); } catch (CharacterCodingException e) { throw new AssertionError(e); } return keyString; } private static CQLStatement getStatement(String queryStr) throws InvalidRequestException, RecognitionException { // Lexer and parser CharStream stream = new ANTLRStringStream(queryStr); CqlLexer lexer = new CqlLexer(stream); TokenStream tokenStream = new CommonTokenStream(lexer); CqlParser parser = new CqlParser(tokenStream); // Parse the query string to a statement instance CQLStatement statement = parser.query(); // The lexer and parser queue up any errors they may have encountered // along the way, if necessary, we turn them into exceptions here. lexer.throwLastRecognitionError(); parser.throwLastRecognitionError(); return statement; } private static void validateSchemaIsSettled() throws SchemaDisagreementException { long limit = System.currentTimeMillis() + timeLimitForSchemaAgreement; outer: while (limit - System.currentTimeMillis() >= 0) { String currentVersionId = Schema.instance.getVersion().toString(); for (String version : describeSchemaVersions().keySet()) { if (!version.equals(currentVersionId)) continue outer; } // schemas agree return; } throw new SchemaDisagreementException(); } private static void validateCountOperation(SelectStatement select) throws InvalidRequestException { if (select.isWildcard()) return; // valid count(*) if (!select.isColumnRange()) { List<Term> columnNames = select.getColumnNames(); String firstColumn = columnNames.get(0).getText(); if (columnNames.size() == 1 && (firstColumn.equals("*") || firstColumn.equals("1"))) return; // valid count(*) || count(1) } throw new InvalidRequestException("Only COUNT(*) and COUNT(1) operations are currently supported."); } private static String bufferToString(ByteBuffer string) { try { return ByteBufferUtil.string(string); } catch (CharacterCodingException e) { throw new RuntimeException(e.getMessage(), e); } } }
pbailis/cassandra-pbs
src/java/org/apache/cassandra/cql/QueryProcessor.java
213,919
// // This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.8-b130911.1802 // See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a> // Any modifications to this file will be lost upon recompilation of the source schema. // Generated on: 2017.01.28 at 02:11:12 PM CST // package org.mesa.xml.b2mml; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlSchemaType; import javax.xml.bind.annotation.XmlSeeAlso; import javax.xml.bind.annotation.XmlType; import javax.xml.bind.annotation.XmlValue; import javax.xml.bind.annotation.adapters.CollapsedStringAdapter; import javax.xml.bind.annotation.adapters.NormalizedStringAdapter; import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; /** * <p>Java class for AnyGenericValueType complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="AnyGenericValueType"> * &lt;simpleContent> * &lt;extension base="&lt;http://www.w3.org/2001/XMLSchema>string"> * &lt;attribute name="currencyID" type="{http://www.w3.org/2001/XMLSchema}normalizedString" /> * &lt;attribute name="currencyCodeListVersionID" type="{http://www.w3.org/2001/XMLSchema}normalizedString" /> * &lt;attribute name="encodingCode" type="{http://www.w3.org/2001/XMLSchema}normalizedString" /> * &lt;attribute name="format" type="{http://www.w3.org/2001/XMLSchema}string" /> * &lt;attribute name="characterSetCode" type="{http://www.w3.org/2001/XMLSchema}normalizedString" /> * &lt;attribute name="listID" type="{http://www.w3.org/2001/XMLSchema}normalizedString" /> * &lt;attribute name="listAgencyID" type="{http://www.w3.org/2001/XMLSchema}normalizedString" /> * &lt;attribute name="listAgencyName" type="{http://www.w3.org/2001/XMLSchema}string" /> * &lt;attribute name="listName" type="{http://www.w3.org/2001/XMLSchema}string" /> * &lt;attribute name="listVersionID" type="{http://www.w3.org/2001/XMLSchema}normalizedString" /> * &lt;attribute name="languageID" type="{http://www.w3.org/2001/XMLSchema}language" /> * &lt;attribute name="languageLocaleID" type="{http://www.w3.org/2001/XMLSchema}normalizedString" /> * &lt;attribute name="listURI" type="{http://www.w3.org/2001/XMLSchema}anyURI" /> * &lt;attribute name="listSchemaURI" type="{http://www.w3.org/2001/XMLSchema}anyURI" /> * &lt;attribute name="mimeCode" type="{http://www.w3.org/2001/XMLSchema}normalizedString" /> * &lt;attribute name="name" type="{http://www.w3.org/2001/XMLSchema}string" /> * &lt;attribute name="schemaID" type="{http://www.w3.org/2001/XMLSchema}normalizedString" /> * &lt;attribute name="schemaName" type="{http://www.w3.org/2001/XMLSchema}string" /> * &lt;attribute name="schemaAgencyID" type="{http://www.w3.org/2001/XMLSchema}normalizedString" /> * &lt;attribute name="schemaAgencyName" type="{http://www.w3.org/2001/XMLSchema}string" /> * &lt;attribute name="schemaVersionID" type="{http://www.w3.org/2001/XMLSchema}normalizedString" /> * &lt;attribute name="schemaDataURI" type="{http://www.w3.org/2001/XMLSchema}anyURI" /> * &lt;attribute name="schemaURI" type="{http://www.w3.org/2001/XMLSchema}anyURI" /> * &lt;attribute name="unitCode" type="{http://www.w3.org/2001/XMLSchema}normalizedString" /> * &lt;attribute name="unitCodeListID" type="{http://www.w3.org/2001/XMLSchema}normalizedString" /> * &lt;attribute name="unitCodeListAgencyID" type="{http://www.w3.org/2001/XMLSchema}normalizedString" /> * &lt;attribute name="unitCodeListAgencyName" type="{http://www.w3.org/2001/XMLSchema}string" /> * &lt;attribute name="unitCodeListVersionID" type="{http://www.w3.org/2001/XMLSchema}normalizedString" /> * &lt;attribute name="filename" type="{http://www.w3.org/2001/XMLSchema}string" /> * &lt;attribute name="uri" type="{http://www.w3.org/2001/XMLSchema}anyURI" /> * &lt;/extension> * &lt;/simpleContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "AnyGenericValueType", propOrder = { "value" }) @XmlSeeAlso({ ValueStringType.class, QuantityStringType.class }) public class AnyGenericValueType { @XmlValue protected String value; @XmlAttribute(name = "currencyID") @XmlJavaTypeAdapter(NormalizedStringAdapter.class) @XmlSchemaType(name = "normalizedString") protected String currencyID; @XmlAttribute(name = "currencyCodeListVersionID") @XmlJavaTypeAdapter(NormalizedStringAdapter.class) @XmlSchemaType(name = "normalizedString") protected String currencyCodeListVersionID; @XmlAttribute(name = "encodingCode") @XmlJavaTypeAdapter(NormalizedStringAdapter.class) @XmlSchemaType(name = "normalizedString") protected String encodingCode; @XmlAttribute(name = "format") protected String format; @XmlAttribute(name = "characterSetCode") @XmlJavaTypeAdapter(NormalizedStringAdapter.class) @XmlSchemaType(name = "normalizedString") protected String characterSetCode; @XmlAttribute(name = "listID") @XmlJavaTypeAdapter(NormalizedStringAdapter.class) @XmlSchemaType(name = "normalizedString") protected String listID; @XmlAttribute(name = "listAgencyID") @XmlJavaTypeAdapter(NormalizedStringAdapter.class) @XmlSchemaType(name = "normalizedString") protected String listAgencyID; @XmlAttribute(name = "listAgencyName") protected String listAgencyName; @XmlAttribute(name = "listName") protected String listName; @XmlAttribute(name = "listVersionID") @XmlJavaTypeAdapter(NormalizedStringAdapter.class) @XmlSchemaType(name = "normalizedString") protected String listVersionID; @XmlAttribute(name = "languageID") @XmlJavaTypeAdapter(CollapsedStringAdapter.class) @XmlSchemaType(name = "language") protected String languageID; @XmlAttribute(name = "languageLocaleID") @XmlJavaTypeAdapter(NormalizedStringAdapter.class) @XmlSchemaType(name = "normalizedString") protected String languageLocaleID; @XmlAttribute(name = "listURI") @XmlSchemaType(name = "anyURI") protected String listURI; @XmlAttribute(name = "listSchemaURI") @XmlSchemaType(name = "anyURI") protected String listSchemaURI; @XmlAttribute(name = "mimeCode") @XmlJavaTypeAdapter(NormalizedStringAdapter.class) @XmlSchemaType(name = "normalizedString") protected String mimeCode; @XmlAttribute(name = "name") protected String name; @XmlAttribute(name = "schemaID") @XmlJavaTypeAdapter(NormalizedStringAdapter.class) @XmlSchemaType(name = "normalizedString") protected String schemaID; @XmlAttribute(name = "schemaName") protected String schemaName; @XmlAttribute(name = "schemaAgencyID") @XmlJavaTypeAdapter(NormalizedStringAdapter.class) @XmlSchemaType(name = "normalizedString") protected String schemaAgencyID; @XmlAttribute(name = "schemaAgencyName") protected String schemaAgencyName; @XmlAttribute(name = "schemaVersionID") @XmlJavaTypeAdapter(NormalizedStringAdapter.class) @XmlSchemaType(name = "normalizedString") protected String schemaVersionID; @XmlAttribute(name = "schemaDataURI") @XmlSchemaType(name = "anyURI") protected String schemaDataURI; @XmlAttribute(name = "schemaURI") @XmlSchemaType(name = "anyURI") protected String schemaURI; @XmlAttribute(name = "unitCode") @XmlJavaTypeAdapter(NormalizedStringAdapter.class) @XmlSchemaType(name = "normalizedString") protected String unitCode; @XmlAttribute(name = "unitCodeListID") @XmlJavaTypeAdapter(NormalizedStringAdapter.class) @XmlSchemaType(name = "normalizedString") protected String unitCodeListID; @XmlAttribute(name = "unitCodeListAgencyID") @XmlJavaTypeAdapter(NormalizedStringAdapter.class) @XmlSchemaType(name = "normalizedString") protected String unitCodeListAgencyID; @XmlAttribute(name = "unitCodeListAgencyName") protected String unitCodeListAgencyName; @XmlAttribute(name = "unitCodeListVersionID") @XmlJavaTypeAdapter(NormalizedStringAdapter.class) @XmlSchemaType(name = "normalizedString") protected String unitCodeListVersionID; @XmlAttribute(name = "filename") protected String filename; @XmlAttribute(name = "uri") @XmlSchemaType(name = "anyURI") protected String uri; /** * Gets the value of the value property. * * @return * possible object is * {@link String } * */ public String getValue() { return value; } /** * Sets the value of the value property. * * @param value * allowed object is * {@link String } * */ public void setValue(String value) { this.value = value; } /** * Gets the value of the currencyID property. * * @return * possible object is * {@link String } * */ public String getCurrencyID() { return currencyID; } /** * Sets the value of the currencyID property. * * @param value * allowed object is * {@link String } * */ public void setCurrencyID(String value) { this.currencyID = value; } /** * Gets the value of the currencyCodeListVersionID property. * * @return * possible object is * {@link String } * */ public String getCurrencyCodeListVersionID() { return currencyCodeListVersionID; } /** * Sets the value of the currencyCodeListVersionID property. * * @param value * allowed object is * {@link String } * */ public void setCurrencyCodeListVersionID(String value) { this.currencyCodeListVersionID = value; } /** * Gets the value of the encodingCode property. * * @return * possible object is * {@link String } * */ public String getEncodingCode() { return encodingCode; } /** * Sets the value of the encodingCode property. * * @param value * allowed object is * {@link String } * */ public void setEncodingCode(String value) { this.encodingCode = value; } /** * Gets the value of the format property. * * @return * possible object is * {@link String } * */ public String getFormat() { return format; } /** * Sets the value of the format property. * * @param value * allowed object is * {@link String } * */ public void setFormat(String value) { this.format = value; } /** * Gets the value of the characterSetCode property. * * @return * possible object is * {@link String } * */ public String getCharacterSetCode() { return characterSetCode; } /** * Sets the value of the characterSetCode property. * * @param value * allowed object is * {@link String } * */ public void setCharacterSetCode(String value) { this.characterSetCode = value; } /** * Gets the value of the listID property. * * @return * possible object is * {@link String } * */ public String getListID() { return listID; } /** * Sets the value of the listID property. * * @param value * allowed object is * {@link String } * */ public void setListID(String value) { this.listID = value; } /** * Gets the value of the listAgencyID property. * * @return * possible object is * {@link String } * */ public String getListAgencyID() { return listAgencyID; } /** * Sets the value of the listAgencyID property. * * @param value * allowed object is * {@link String } * */ public void setListAgencyID(String value) { this.listAgencyID = value; } /** * Gets the value of the listAgencyName property. * * @return * possible object is * {@link String } * */ public String getListAgencyName() { return listAgencyName; } /** * Sets the value of the listAgencyName property. * * @param value * allowed object is * {@link String } * */ public void setListAgencyName(String value) { this.listAgencyName = value; } /** * Gets the value of the listName property. * * @return * possible object is * {@link String } * */ public String getListName() { return listName; } /** * Sets the value of the listName property. * * @param value * allowed object is * {@link String } * */ public void setListName(String value) { this.listName = value; } /** * Gets the value of the listVersionID property. * * @return * possible object is * {@link String } * */ public String getListVersionID() { return listVersionID; } /** * Sets the value of the listVersionID property. * * @param value * allowed object is * {@link String } * */ public void setListVersionID(String value) { this.listVersionID = value; } /** * Gets the value of the languageID property. * * @return * possible object is * {@link String } * */ public String getLanguageID() { return languageID; } /** * Sets the value of the languageID property. * * @param value * allowed object is * {@link String } * */ public void setLanguageID(String value) { this.languageID = value; } /** * Gets the value of the languageLocaleID property. * * @return * possible object is * {@link String } * */ public String getLanguageLocaleID() { return languageLocaleID; } /** * Sets the value of the languageLocaleID property. * * @param value * allowed object is * {@link String } * */ public void setLanguageLocaleID(String value) { this.languageLocaleID = value; } /** * Gets the value of the listURI property. * * @return * possible object is * {@link String } * */ public String getListURI() { return listURI; } /** * Sets the value of the listURI property. * * @param value * allowed object is * {@link String } * */ public void setListURI(String value) { this.listURI = value; } /** * Gets the value of the listSchemaURI property. * * @return * possible object is * {@link String } * */ public String getListSchemaURI() { return listSchemaURI; } /** * Sets the value of the listSchemaURI property. * * @param value * allowed object is * {@link String } * */ public void setListSchemaURI(String value) { this.listSchemaURI = value; } /** * Gets the value of the mimeCode property. * * @return * possible object is * {@link String } * */ public String getMimeCode() { return mimeCode; } /** * Sets the value of the mimeCode property. * * @param value * allowed object is * {@link String } * */ public void setMimeCode(String value) { this.mimeCode = value; } /** * Gets the value of the name property. * * @return * possible object is * {@link String } * */ public String getName() { return name; } /** * Sets the value of the name property. * * @param value * allowed object is * {@link String } * */ public void setName(String value) { this.name = value; } /** * Gets the value of the schemaID property. * * @return * possible object is * {@link String } * */ public String getSchemaID() { return schemaID; } /** * Sets the value of the schemaID property. * * @param value * allowed object is * {@link String } * */ public void setSchemaID(String value) { this.schemaID = value; } /** * Gets the value of the schemaName property. * * @return * possible object is * {@link String } * */ public String getSchemaName() { return schemaName; } /** * Sets the value of the schemaName property. * * @param value * allowed object is * {@link String } * */ public void setSchemaName(String value) { this.schemaName = value; } /** * Gets the value of the schemaAgencyID property. * * @return * possible object is * {@link String } * */ public String getSchemaAgencyID() { return schemaAgencyID; } /** * Sets the value of the schemaAgencyID property. * * @param value * allowed object is * {@link String } * */ public void setSchemaAgencyID(String value) { this.schemaAgencyID = value; } /** * Gets the value of the schemaAgencyName property. * * @return * possible object is * {@link String } * */ public String getSchemaAgencyName() { return schemaAgencyName; } /** * Sets the value of the schemaAgencyName property. * * @param value * allowed object is * {@link String } * */ public void setSchemaAgencyName(String value) { this.schemaAgencyName = value; } /** * Gets the value of the schemaVersionID property. * * @return * possible object is * {@link String } * */ public String getSchemaVersionID() { return schemaVersionID; } /** * Sets the value of the schemaVersionID property. * * @param value * allowed object is * {@link String } * */ public void setSchemaVersionID(String value) { this.schemaVersionID = value; } /** * Gets the value of the schemaDataURI property. * * @return * possible object is * {@link String } * */ public String getSchemaDataURI() { return schemaDataURI; } /** * Sets the value of the schemaDataURI property. * * @param value * allowed object is * {@link String } * */ public void setSchemaDataURI(String value) { this.schemaDataURI = value; } /** * Gets the value of the schemaURI property. * * @return * possible object is * {@link String } * */ public String getSchemaURI() { return schemaURI; } /** * Sets the value of the schemaURI property. * * @param value * allowed object is * {@link String } * */ public void setSchemaURI(String value) { this.schemaURI = value; } /** * Gets the value of the unitCode property. * * @return * possible object is * {@link String } * */ public String getUnitCode() { return unitCode; } /** * Sets the value of the unitCode property. * * @param value * allowed object is * {@link String } * */ public void setUnitCode(String value) { this.unitCode = value; } /** * Gets the value of the unitCodeListID property. * * @return * possible object is * {@link String } * */ public String getUnitCodeListID() { return unitCodeListID; } /** * Sets the value of the unitCodeListID property. * * @param value * allowed object is * {@link String } * */ public void setUnitCodeListID(String value) { this.unitCodeListID = value; } /** * Gets the value of the unitCodeListAgencyID property. * * @return * possible object is * {@link String } * */ public String getUnitCodeListAgencyID() { return unitCodeListAgencyID; } /** * Sets the value of the unitCodeListAgencyID property. * * @param value * allowed object is * {@link String } * */ public void setUnitCodeListAgencyID(String value) { this.unitCodeListAgencyID = value; } /** * Gets the value of the unitCodeListAgencyName property. * * @return * possible object is * {@link String } * */ public String getUnitCodeListAgencyName() { return unitCodeListAgencyName; } /** * Sets the value of the unitCodeListAgencyName property. * * @param value * allowed object is * {@link String } * */ public void setUnitCodeListAgencyName(String value) { this.unitCodeListAgencyName = value; } /** * Gets the value of the unitCodeListVersionID property. * * @return * possible object is * {@link String } * */ public String getUnitCodeListVersionID() { return unitCodeListVersionID; } /** * Sets the value of the unitCodeListVersionID property. * * @param value * allowed object is * {@link String } * */ public void setUnitCodeListVersionID(String value) { this.unitCodeListVersionID = value; } /** * Gets the value of the filename property. * * @return * possible object is * {@link String } * */ public String getFilename() { return filename; } /** * Sets the value of the filename property. * * @param value * allowed object is * {@link String } * */ public void setFilename(String value) { this.filename = value; } /** * Gets the value of the uri property. * * @return * possible object is * {@link String } * */ public String getUri() { return uri; } /** * Sets the value of the uri property. * * @param value * allowed object is * {@link String } * */ public void setUri(String value) { this.uri = value; } }
jpdillingham/B2MML4j
src/org/mesa/xml/b2mml/AnyGenericValueType.java
213,920
/* * Copyright 2024 - 2024 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.ai.vectorstore; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.CqlSessionBuilder; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; import com.datastax.oss.driver.api.core.type.DataType; import com.datastax.oss.driver.api.core.type.DataTypes; import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; import com.datastax.oss.driver.api.core.type.reflect.GenericType; import com.datastax.oss.driver.api.querybuilder.BuildableQuery; import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; import com.datastax.oss.driver.api.querybuilder.schema.AlterTableAddColumn; import com.datastax.oss.driver.api.querybuilder.schema.AlterTableAddColumnEnd; import com.datastax.oss.driver.api.querybuilder.schema.CreateTable; import com.datastax.oss.driver.api.querybuilder.schema.CreateTableStart; import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.lang.Nullable; import java.net.InetSocketAddress; import java.time.Duration; import java.util.concurrent.Executor; import java.util.concurrent.Executors; import java.util.function.Function; import java.util.stream.Stream; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Optional; import java.util.Set; /** * Configuration for the Cassandra vector store. * * All metadata columns configured to the store will be fetched and added to all queried * documents. * * To filter expression search against a metadata column configure it with * SchemaColumnTags.INDEXED * * The Cassandra Java Driver is configured via the application.conf resource found in the * classpath. See * https://github.com/apache/cassandra-java-driver/tree/4.x/manual/core/configuration * * @author Mick Semb Wever * @since 1.0.0 */ public class CassandraVectorStoreConfig implements AutoCloseable { public static final String DEFAULT_KEYSPACE_NAME = "springframework"; public static final String DEFAULT_TABLE_NAME = "ai_vector_store"; public static final String DEFAULT_ID_NAME = "id"; public static final String DEFAULT_INDEX_SUFFIX = "idx"; public static final String DEFAULT_CONTENT_COLUMN_NAME = "content"; public static final String DEFAULT_EMBEDDING_COLUMN_NAME = "embedding"; public static final int DEFAULT_ADD_CONCURRENCY = 16; private static final Logger logger = LoggerFactory.getLogger(CassandraVectorStoreConfig.class); record Schema(String keyspace, String table, List<SchemaColumn> partitionKeys, List<SchemaColumn> clusteringKeys, String content, String embedding, String index, Set<SchemaColumn> metadataColumns) { } public record SchemaColumn(String name, DataType type, SchemaColumnTags... tags) { public SchemaColumn(String name, DataType type) { this(name, type, new SchemaColumnTags[0]); } public GenericType<Object> javaType() { return CodecRegistry.DEFAULT.codecFor(type).getJavaType(); } public boolean indexed() { for (SchemaColumnTags t : tags) { if (SchemaColumnTags.INDEXED == t) { return true; } } return false; } } public enum SchemaColumnTags { INDEXED } /** * It is a requirement that an empty {@code List<Object>} returns an example formatted * id */ public interface DocumentIdTranslator extends Function<String, List<Object>> { } public interface PrimaryKeyTranslator extends Function<List<Object>, String> { } final CqlSession session; final Schema schema; final boolean disallowSchemaChanges; final boolean returnEmbeddings; final DocumentIdTranslator documentIdTranslator; final PrimaryKeyTranslator primaryKeyTranslator; final Executor executor; private final boolean closeSessionOnClose; private CassandraVectorStoreConfig(Builder builder) { this.session = null != builder.session ? builder.session : builder.sessionBuilder.build(); this.closeSessionOnClose = null == builder.session; this.schema = new Schema(builder.keyspace, builder.table, builder.partitionKeys, builder.clusteringKeys, builder.contentColumnName, builder.embeddingColumnName, builder.indexName, builder.metadataColumns); this.disallowSchemaChanges = builder.disallowSchemaCreation; this.returnEmbeddings = builder.returnEmbeddings; this.documentIdTranslator = builder.documentIdTranslator; this.primaryKeyTranslator = builder.primaryKeyTranslator; this.executor = Executors.newFixedThreadPool(builder.fixedThreadPoolExecutorSize); } public static Builder builder() { return new Builder(); } @Override public void close() throws Exception { if (this.closeSessionOnClose) { this.session.close(); } } SchemaColumn getPrimaryKeyColumn(int index) { return index < this.schema.partitionKeys().size() ? this.schema.partitionKeys().get(index) : this.schema.clusteringKeys().get(index - this.schema.partitionKeys().size()); } @VisibleForTesting void dropKeyspace() { Preconditions.checkState(this.schema.keyspace.startsWith("test_"), "Only test keyspaces can be dropped"); this.session.execute(SchemaBuilder.dropKeyspace(this.schema.keyspace).ifExists().build()); } public static class Builder { private CqlSession session = null; private CqlSessionBuilder sessionBuilder = null; private String keyspace = DEFAULT_KEYSPACE_NAME; private String table = DEFAULT_TABLE_NAME; private List<SchemaColumn> partitionKeys = List.of(new SchemaColumn(DEFAULT_ID_NAME, DataTypes.TEXT)); private List<SchemaColumn> clusteringKeys = List.of(); private String indexName = null; private String contentColumnName = DEFAULT_CONTENT_COLUMN_NAME; private String embeddingColumnName = DEFAULT_EMBEDDING_COLUMN_NAME; private Set<SchemaColumn> metadataColumns = new HashSet<>(); private boolean disallowSchemaCreation = false; private boolean returnEmbeddings = false; private int fixedThreadPoolExecutorSize = DEFAULT_ADD_CONCURRENCY; private DocumentIdTranslator documentIdTranslator = (String id) -> List.of(id); private PrimaryKeyTranslator primaryKeyTranslator = (List<Object> primaryKeyColumns) -> { if (primaryKeyColumns.isEmpty()) { return "test"; } Preconditions.checkArgument(1 == primaryKeyColumns.size()); return (String) primaryKeyColumns.get(0); }; private Builder() { } public Builder withCqlSession(CqlSession session) { Preconditions.checkState(null == this.sessionBuilder, "Cannot call withContactPoint(..) or withLocalDatacenter(..) and this method"); this.session = session; return this; } public Builder addContactPoint(InetSocketAddress contactPoint) { Preconditions.checkState(null == this.session, "Cannot call withCqlSession(..) and this method"); if (null == this.sessionBuilder) { this.sessionBuilder = new CqlSessionBuilder(); } this.sessionBuilder.addContactPoint(contactPoint); return this; } public Builder withLocalDatacenter(String localDC) { Preconditions.checkState(null == this.session, "Cannot call withCqlSession(..) and this method"); if (null == this.sessionBuilder) { this.sessionBuilder = new CqlSessionBuilder(); } this.sessionBuilder.withLocalDatacenter(localDC); return this; } public Builder withKeyspaceName(String keyspace) { this.keyspace = keyspace; return this; } public Builder withTableName(String table) { this.table = table; return this; } public Builder withPartitionKeys(List<SchemaColumn> partitionKeys) { this.partitionKeys = partitionKeys; return this; } public Builder withClusteringKeys(List<SchemaColumn> clusteringKeys) { this.clusteringKeys = clusteringKeys; return this; } /** * defaults (if null) to '&lt;table_name&gt;_&lt;embedding_column_name&gt;_idx' **/ @Nullable public Builder withIndexName(String name) { this.indexName = name; return this; } public Builder withContentColumnName(String name) { this.contentColumnName = name; return this; } public Builder withEmbeddingColumnName(String name) { this.embeddingColumnName = name; return this; } public Builder addMetadataColumns(SchemaColumn... columns) { Builder builder = this; for (SchemaColumn f : columns) { builder = builder.addMetadataColumn(f); } return builder; } public Builder addMetadataColumns(List<SchemaColumn> columns) { Builder builder = this; this.metadataColumns.addAll(columns); return builder; } public Builder addMetadataColumn(SchemaColumn column) { Preconditions.checkArgument( this.metadataColumns.stream().noneMatch((sc) -> sc.name().equals(column.name())), "A metadata column with name %s has already been added", column.name()); this.metadataColumns.add(column); return this; } public Builder disallowSchemaChanges() { this.disallowSchemaCreation = true; return this; } public Builder returnEmbeddings() { this.returnEmbeddings = true; return this; } /** * Executor to use when adding documents. The hotspot is the call to the * embeddingClient. For remote transformers you probably want a higher value to * utilize network. For local transformers you probably want a lower value to * avoid saturation. **/ public Builder withFixedThreadPoolExecutorSize(int threads) { Preconditions.checkArgument(0 < threads); this.fixedThreadPoolExecutorSize = threads; return this; } public Builder withDocumentIdTranslator(DocumentIdTranslator documentIdTranslator) { this.documentIdTranslator = documentIdTranslator; return this; } public Builder withPrimaryKeyTranslator(PrimaryKeyTranslator primaryKeyTranslator) { this.primaryKeyTranslator = primaryKeyTranslator; return this; } public CassandraVectorStoreConfig build() { if (null == this.indexName) { this.indexName = String.format("%s_%s_%s", this.table, this.embeddingColumnName, DEFAULT_INDEX_SUFFIX); } for (SchemaColumn metadata : this.metadataColumns) { Preconditions.checkArgument( !this.partitionKeys.stream().anyMatch((c) -> c.name().equals(metadata.name())), "metadataColumn %s cannot have same name as a partition key", metadata.name()); Preconditions.checkArgument( !this.clusteringKeys.stream().anyMatch((c) -> c.name().equals(metadata.name())), "metadataColumn %s cannot have same name as a clustering key", metadata.name()); Preconditions.checkArgument(!metadata.name().equals(this.contentColumnName), "metadataColumn %s cannot have same name as content column name", this.contentColumnName); Preconditions.checkArgument(!metadata.name().equals(this.embeddingColumnName), "metadataColumn %s cannot have same name as embedding column name", this.embeddingColumnName); } { int primaryKeyColumnsCount = this.partitionKeys.size() + this.clusteringKeys.size(); String exampleId = this.primaryKeyTranslator.apply(Collections.emptyList()); List<Object> testIdTranslation = this.documentIdTranslator.apply(exampleId); Preconditions.checkArgument(testIdTranslation.size() == primaryKeyColumnsCount, "documentIdTranslator results length %s doesn't match number of primary key columns %s", String.valueOf(testIdTranslation.size()), String.valueOf(primaryKeyColumnsCount)); Preconditions.checkArgument( exampleId.equals(this.primaryKeyTranslator.apply(this.documentIdTranslator.apply(exampleId))), "primaryKeyTranslator is not an inverse function to documentIdTranslator"); } return new CassandraVectorStoreConfig(this); } } void ensureSchemaExists(int vectorDimension) { if (!this.disallowSchemaChanges) { ensureKeyspaceExists(); ensureTableExists(vectorDimension); ensureTableColumnsExist(vectorDimension); ensureIndexesExists(); checkSchemaAgreement(); } else { checkSchemaValid(vectorDimension); } } private void checkSchemaAgreement() throws IllegalStateException { if (!this.session.checkSchemaAgreement()) { logger.warn("Waiting for cluster schema agreement, sleeping 10s…"); try { Thread.sleep(Duration.ofSeconds(10).toMillis()); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); throw new IllegalStateException(ex); } if (!this.session.checkSchemaAgreement()) { logger.error("no cluster schema agreement still, continuing, let's hope this works…"); } } } void checkSchemaValid(int vectorDimension) { Preconditions.checkState(this.session.getMetadata().getKeyspace(this.schema.keyspace).isPresent(), "keyspace %s does not exist", this.schema.keyspace); Preconditions.checkState(this.session.getMetadata() .getKeyspace(this.schema.keyspace) .get() .getTable(this.schema.table) .isPresent(), "table %s does not exist"); TableMetadata tableMetadata = this.session.getMetadata() .getKeyspace(this.schema.keyspace) .get() .getTable(this.schema.table) .get(); Preconditions.checkState(tableMetadata.getColumn(this.schema.content).isPresent(), "column %s does not exist", this.schema.content); Preconditions.checkState(tableMetadata.getColumn(this.schema.embedding).isPresent(), "column %s does not exist", this.schema.embedding); for (SchemaColumn m : this.schema.metadataColumns) { Optional<ColumnMetadata> column = tableMetadata.getColumn(m.name()); Preconditions.checkState(column.isPresent(), "column %s does not exist", m.name()); Preconditions.checkArgument(column.get().getType().equals(m.type()), "Mismatching type on metadata column %s of %s vs %s", m.name(), column.get().getType(), m.type()); if (m.indexed()) { Preconditions.checkState( tableMetadata.getIndexes().values().stream().anyMatch((i) -> i.getTarget().equals(m.name())), "index %s does not exist", m.name()); } } } private void ensureIndexesExists() { { SimpleStatement indexStmt = SchemaBuilder.createIndex(this.schema.index) .ifNotExists() .custom("StorageAttachedIndex") .onTable(this.schema.keyspace, this.schema.table) .andColumn(this.schema.embedding) .build(); logger.debug("Executing {}", indexStmt.getQuery()); this.session.execute(indexStmt); } Stream .concat(this.schema.partitionKeys.stream(), Stream.concat(this.schema.clusteringKeys.stream(), this.schema.metadataColumns.stream())) .filter((cs) -> cs.indexed()) .forEach((metadata) -> { SimpleStatement indexStmt = SchemaBuilder.createIndex(String.format("%s_idx", metadata.name())) .ifNotExists() .custom("StorageAttachedIndex") .onTable(this.schema.keyspace, this.schema.table) .andColumn(metadata.name()) .build(); logger.debug("Executing {}", indexStmt.getQuery()); this.session.execute(indexStmt); }); } private void ensureTableExists(int vectorDimension) { if (this.session.getMetadata().getKeyspace(this.schema.keyspace).get().getTable(this.schema.table).isEmpty()) { CreateTable createTable = null; CreateTableStart createTableStart = SchemaBuilder.createTable(this.schema.keyspace, this.schema.table) .ifNotExists(); for (SchemaColumn partitionKey : this.schema.partitionKeys) { createTable = (null != createTable ? createTable : createTableStart).withPartitionKey(partitionKey.name, partitionKey.type); } for (SchemaColumn clusteringKey : this.schema.clusteringKeys) { createTable = createTable.withClusteringColumn(clusteringKey.name, clusteringKey.type); } createTable = createTable.withColumn(this.schema.content, DataTypes.TEXT); for (SchemaColumn metadata : this.schema.metadataColumns) { createTable = createTable.withColumn(metadata.name(), metadata.type()); } // https://datastax-oss.atlassian.net/browse/JAVA-3118 // .withColumn(config.embedding, new DefaultVectorType(DataTypes.FLOAT, // vectorDimension)); StringBuilder tableStmt = new StringBuilder(createTable.asCql()); tableStmt.setLength(tableStmt.length() - 1); tableStmt.append(',') .append(this.schema.embedding) .append(" vector<float,") .append(vectorDimension) .append(">)"); logger.debug("Executing {}", tableStmt.toString()); this.session.execute(tableStmt.toString()); } } private void ensureTableColumnsExist(int vectorDimension) { TableMetadata tableMetadata = this.session.getMetadata() .getKeyspace(this.schema.keyspace) .get() .getTable(this.schema.table) .get(); Set<SchemaColumn> newColumns = new HashSet<>(); boolean addContent = tableMetadata.getColumn(this.schema.content).isEmpty(); boolean addEmbedding = tableMetadata.getColumn(this.schema.embedding).isEmpty(); for (SchemaColumn metadata : this.schema.metadataColumns) { Optional<ColumnMetadata> column = tableMetadata.getColumn(metadata.name()); if (column.isPresent()) { Preconditions.checkArgument(column.get().getType().equals(metadata.type()), "Cannot change type on metadata column %s from %s to %s", metadata.name(), column.get().getType(), metadata.type()); } else { newColumns.add(metadata); } } if (!newColumns.isEmpty() || addContent || addEmbedding) { AlterTableAddColumn alterTable = SchemaBuilder.alterTable(this.schema.keyspace, this.schema.table); for (SchemaColumn metadata : newColumns) { alterTable = alterTable.addColumn(metadata.name(), metadata.type()); } if (addContent) { alterTable = alterTable.addColumn(this.schema.content, DataTypes.TEXT); } if (addEmbedding) { // special case for embedding column, bc JAVA-3118, as above StringBuilder alterTableStmt = new StringBuilder(((BuildableQuery) alterTable).asCql()); if (newColumns.isEmpty() && !addContent) { alterTableStmt.append(" ADD ("); } else { alterTableStmt.setLength(alterTableStmt.length() - 1); alterTableStmt.append(','); } alterTableStmt.append(this.schema.embedding) .append(" vector<float,") .append(vectorDimension) .append(">)"); logger.debug("Executing {}", alterTableStmt.toString()); this.session.execute(alterTableStmt.toString()); } else { SimpleStatement stmt = ((AlterTableAddColumnEnd) alterTable).build(); logger.debug("Executing {}", stmt.getQuery()); this.session.execute(stmt); } } } private void ensureKeyspaceExists() { if (this.session.getMetadata().getKeyspace(this.schema.keyspace).isEmpty()) { SimpleStatement keyspaceStmt = SchemaBuilder.createKeyspace(this.schema.keyspace) .ifNotExists() .withSimpleStrategy(1) .build(); logger.debug("Executing {}", keyspaceStmt.getQuery()); this.session.execute(keyspaceStmt); } } }
spring-projects/spring-ai
vector-stores/spring-ai-cassandra-store/src/main/java/org/springframework/ai/vectorstore/CassandraVectorStoreConfig.java
213,921
package scalabilityAnalysis; import utilities.Tokenizer; import utilities.Pair; import gnu.trove.iterator.TIntIterator; import gnu.trove.list.TIntList; import gnu.trove.list.array.TIntArrayList; import gnu.trove.set.TIntSet; import gnu.trove.set.hash.TIntHashSet; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import joins.AbstractJoin; import org.scify.jedai.datamodel.EntityProfile; import org.scify.jedai.datamodel.IdDuplicates; import org.scify.jedai.datareader.entityreader.EntitySerializationReader; import org.scify.jedai.datareader.groundtruthreader.GtSerializationReader; import utilities.RepresentationModel; /** * * @author Georgios */ public class SchemaAgnosticEJoin extends AbstractJoin { public static void main(String[] args) { String mainDir = "/home/data/syntheticData/"; String[] datasets = {"10K", "50K", "100K", "200K", "300K", "1M", "2M"}; for (int datasetId = 0; datasetId < datasets.length; datasetId++) { System.out.println("\n\nCurrent dataset\t:\t" + datasetId); // read source entities String sourcePath = mainDir + datasets[datasetId] + "profiles"; EntitySerializationReader reader = new EntitySerializationReader(sourcePath); List<EntityProfile> sourceEntities = reader.getEntityProfiles(); System.out.println("Source Entities: " + sourceEntities.size()); // read ground-truth file String groundTruthPath = mainDir + datasets[datasetId] + "IdDuplicates"; GtSerializationReader gtReader = new GtSerializationReader(groundTruthPath); Set<IdDuplicates> gtDuplicates = gtReader.getDuplicatePairs(sourceEntities); System.out.println("GT Duplicates Entities: " + gtDuplicates.size()); System.out.println(); float threshold = 0.44f; Tokenizer tokenizer = Tokenizer.CHARACTER_BIGRAMS; // SimilarityFunction simFunction = SimilarityFunction.JACCARD_SIM; for (int iterations = 0; iterations < ITERATIONS; iterations++) { long time1 = System.currentTimeMillis(); // first run int noOfEntities = sourceEntities.size(); SOURCE_FREQUENCY = new int[noOfEntities]; final Map<String, TIntList> index = new HashMap<>(); int[] counters = new int[noOfEntities]; int[] flags = new int[noOfEntities]; for (int i = 0; i < noOfEntities; i++) { flags[i] = -1; } final List<Pair> sims = new ArrayList<>(noOfEntities * 1000); for (int id = 0; id < noOfEntities; id++) { final String query = RepresentationModel.getAttributeValue(sourceEntities.get(id)); final Set<String> tokens = RepresentationModel.tokenizeEntity(query, tokenizer); final TIntSet candidates = new TIntHashSet(); for (String token : tokens) { final TIntList sourceEnts = index.get(token); if (sourceEnts == null) { continue; } for (TIntIterator tIterator = sourceEnts.iterator(); tIterator.hasNext();) { int sourceId = tIterator.next(); candidates.add(sourceId); if (flags[sourceId] != id) { counters[sourceId] = 0; flags[sourceId] = id; } counters[sourceId]++; } } for (String token : tokens) { TIntList ids = index.get(token); if (ids == null) { ids = new TIntArrayList(); index.put(token, ids); } ids.add(id); } SOURCE_FREQUENCY[id] = tokens.size(); if (candidates.isEmpty()) { continue; } for (TIntIterator tIterator = candidates.iterator(); tIterator.hasNext();) { int sourceId = tIterator.next(); float commonTokens = counters[sourceId]; float sim = commonTokens / (SOURCE_FREQUENCY[sourceId] + tokens.size() - commonTokens); if (threshold <= sim) { sims.add(new Pair(id, sourceId)); } } } long time2 = System.currentTimeMillis(); System.out.println("Run-time\t:\t" + (time2 - time1)); double duplicates = 0; for (Pair jp : sims) { if (gtDuplicates.contains(new IdDuplicates(jp.getEntityId1(), jp.getEntityId2())) || gtDuplicates.contains(new IdDuplicates(jp.getEntityId2(), jp.getEntityId1()))) { duplicates++; } } double recall_ = duplicates / gtDuplicates.size(); double precision_ = duplicates / sims.size(); double f1_ = 2 * ((precision_ * recall_) / (precision_ + recall_)); System.out.println("Recall\t:\t" + recall_); System.out.println("Precision\t:\t" + precision_); System.out.println("F-Measure\t:\t" + f1_); System.out.println("Candidates\t:\t" + sims.size()); } } } }
gpapadis/ContinuousFilteringBenchmark
joins/src/scalabilityAnalysis/SchemaAgnosticEJoin.java
213,922
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.datastax.oss.driver.internal.core.metadata; import com.datastax.oss.driver.api.core.AsyncAutoCloseable; import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.internal.core.context.EventBus; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import java.net.InetSocketAddress; import java.util.Optional; import java.util.concurrent.CompletionStage; /** * Monitors the state of the Cassandra cluster. * * <p>It can either push {@link TopologyEvent topology events} to the rest of the driver (to do * that, retrieve the {@link EventBus}) from the {@link InternalDriverContext}), or receive requests * to refresh data about the nodes. * * <p>The default implementation uses the control connection: {@code TOPOLOGY_CHANGE} and {@code * STATUS_CHANGE} events on the connection are converted into {@code TopologyEvent}s, and node * refreshes are done with queries to system tables. If you prefer to rely on an external monitoring * tool, this can be completely overridden. */ public interface TopologyMonitor extends AsyncAutoCloseable { /** * Triggers the initialization of the monitor. * * <p>The completion of the future returned by this method marks the point when the driver * considers itself "connected" to the cluster, and proceeds with the rest of the initialization: * refreshing the list of nodes and the metadata, opening connection pools, etc. By then, the * topology monitor should be ready to accept calls to its other methods; in particular, {@link * #refreshNodeList()} will be called shortly after the completion of the future, to load the * initial list of nodes to connect to. * * <p>If {@code advanced.reconnect-on-init = true} in the configuration, this method is * responsible for handling reconnection. That is, if the initial attempt to "connect" to the * cluster fails, it must schedule reattempts, and only complete the returned future when * connection eventually succeeds. If the user cancels the returned future, then the reconnection * attempts should stop. * * <p>If this method is called multiple times, it should trigger initialization only once, and * return the same future on subsequent invocations. */ CompletionStage<Void> init(); /** * The future returned by {@link #init()}. * * <p>Note that this method may be called before {@link #init()}; at that stage, the future should * already exist, but be incomplete. */ CompletionStage<Void> initFuture(); /** * Invoked when the driver needs to refresh the information about an existing node. This is called * when the node was back and comes back up. * * <p>This will be invoked directly from a driver's internal thread; if the refresh involves * blocking I/O or heavy computations, it should be scheduled on a separate thread. * * @param node the node to refresh. * @return a future that completes with the information. If the monitor can't fulfill the request * at this time, it should reply with {@link Optional#empty()}, and the driver will carry on * with its current information. */ CompletionStage<Optional<NodeInfo>> refreshNode(Node node); /** * Invoked when the driver needs to get information about a newly discovered node. * * <p>This will be invoked directly from a driver's internal thread; if the refresh involves * blocking I/O or heavy computations, it should be scheduled on a separate thread. * * @param broadcastRpcAddress the node's broadcast RPC address,. * @return a future that completes with the information. If the monitor doesn't know any node with * this address, it should reply with {@link Optional#empty()}; the new node will be ignored. * @see Node#getBroadcastRpcAddress() */ CompletionStage<Optional<NodeInfo>> getNewNodeInfo(InetSocketAddress broadcastRpcAddress); /** * Invoked when the driver needs to refresh information about all the nodes. * * <p>This will be invoked directly from a driver's internal thread; if the refresh involves * blocking I/O or heavy computations, it should be scheduled on a separate thread. * * <p>The driver calls this at initialization, and uses the result to initialize the {@link * LoadBalancingPolicy}; successful initialization of the {@link Session} object depends on that * initial call succeeding. * * @return a future that completes with the information. We assume that the full node list will * always be returned in a single message (no paging). */ CompletionStage<Iterable<NodeInfo>> refreshNodeList(); /** * Checks whether the nodes in the cluster agree on a common schema version. * * <p>This should typically be implemented with a few retries and a timeout, as the schema can * take a while to replicate across nodes. */ CompletionStage<Boolean> checkSchemaAgreement(); }
apache/cassandra-java-driver
core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyMonitor.java
213,923
package sdp.vision.gui; import javax.swing.BoxLayout; import javax.swing.JCheckBox; import javax.swing.JLabel; import javax.swing.JPanel; import javax.swing.JSlider; import javax.swing.event.ChangeEvent; import javax.swing.event.ChangeListener; import sdp.vision.VideoStream; import java.awt.event.ActionListener; import java.awt.event.ActionEvent; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.util.Scanner; /** * A GUI panel for adjusting video device settings * * @author Alex Adams (s1046358) */ @SuppressWarnings("serial") class CameraSettingsPanel extends JPanel { // Values loaded when the settings file is missing private final static int DEFAULT_BRIGHTNESS = 128; private final static int DEFAULT_CONTRAST = 64; private final static int DEFAULT_SATURATION = 64; private final static int DEFAULT_HUE = 0; private final static int DEFAULT_CHROMA_GAIN = 0; private final static boolean DEFAULT_CHROMA_AGC = true; private final VideoStream vStream; private final int brightnessMin = 0; private final int brightnessMax = 255; private final JPanel brightnessPanel = new JPanel(); private final JLabel brightnessLabel = new JLabel("Brightness:"); private final JSlider brightnessSlider = new JSlider(this.brightnessMin, this.brightnessMax + 1); /** * A ChangeListener to update the video stream's brightness setting when the * brightness slider is adjusted */ private class BrightnessChangeListener implements ChangeListener { @Override public void stateChanged(ChangeEvent e) { CameraSettingsPanel.this.vStream.setBrightness(Math.min(CameraSettingsPanel.this.brightnessMax, CameraSettingsPanel.this.brightnessSlider.getValue())); CameraSettingsPanel.this.vStream.updateVideoDeviceSettings(); } } private final int contrastMin = 0; private final int contrastMax = 127; private final JPanel contrastPanel = new JPanel(); private final JLabel contrastLabel = new JLabel("Contrast:"); private JSlider contrastSlider = new JSlider(this.contrastMin, this.contrastMax + 1); /** * A ChangeListener to update the video stream's contrast setting when the * contrast slider is adjusted */ private class ContrastChangeListener implements ChangeListener { @Override public void stateChanged(ChangeEvent e) { CameraSettingsPanel.this.vStream.setContrast(Math.min(CameraSettingsPanel.this.contrastMax, CameraSettingsPanel.this.contrastSlider.getValue())); CameraSettingsPanel.this.vStream.updateVideoDeviceSettings(); } } private final int saturationMin = 0; private final int saturationMax = 127; private final JPanel saturationPanel = new JPanel(); private final JLabel saturationLabel = new JLabel("Saturation:"); private JSlider saturationSlider = new JSlider(this.saturationMin, this.saturationMax + 1); /** * A ChangeListener to update the video stream's saturation setting when the * saturation slider is adjusted */ private class SaturationChangeListener implements ChangeListener { @Override public void stateChanged(ChangeEvent e) { CameraSettingsPanel.this.vStream.setSaturation(Math.min(CameraSettingsPanel.this.saturationMax, CameraSettingsPanel.this.saturationSlider.getValue())); CameraSettingsPanel.this.vStream.updateVideoDeviceSettings(); } } private final int hueMin = -128; private final int hueMax = 127; private final JPanel huePanel = new JPanel(); private final JLabel hueLabel = new JLabel("Hue:"); private JSlider hueSlider = new JSlider(this.hueMin, this.hueMax + 1); /** * A ChangeListener to update the video stream's hue setting when the hue * slider is adjusted */ private class HueChangeListener implements ChangeListener { @Override public void stateChanged(ChangeEvent e) { CameraSettingsPanel.this.vStream.setHue(Math.min(CameraSettingsPanel.this.hueMax, CameraSettingsPanel.this.hueSlider.getValue())); CameraSettingsPanel.this.vStream.updateVideoDeviceSettings(); } } private final int chromaGainMin = 0; private final int chromaGainMax = 127; private final JPanel chromaGainPanel = new JPanel(); private final JLabel chromaGainLabel = new JLabel("Chroma Gain:"); private final JSlider chromaGainSlider = new JSlider(this.chromaGainMin, this.chromaGainMax + 1); /** * A ChangeListener to update the video stream's chroma gain setting when * the chroma gain slider is adjusted */ private class ChromaGainChangeListener implements ChangeListener { @Override public void stateChanged(ChangeEvent e) { CameraSettingsPanel.this.vStream.setChromaGain(Math.min(CameraSettingsPanel.this.chromaGainMax, CameraSettingsPanel.this.chromaGainSlider.getValue())); CameraSettingsPanel.this.vStream.updateVideoDeviceSettings(); } } private final JPanel chromaAGCPanel = new JPanel(); private final JCheckBox chromaAGCCheckBox = new JCheckBox("Chroma AGC"); /** * An ActionListener to update the video stream's chroma AGC setting when * the chroma AGC checkbox is activated either by mouse or keyboard */ private class ChromaAGCActionListener implements ActionListener { @Override public void actionPerformed(ActionEvent e) { if (CameraSettingsPanel.this.chromaAGCCheckBox.isSelected()) CameraSettingsPanel.this.vStream.setChromaAGC(true); else CameraSettingsPanel.this.vStream.setChromaAGC(false); CameraSettingsPanel.this.vStream.updateVideoDeviceSettings(); } } public CameraSettingsPanel(final VideoStream vStream, String settingsFile) { super(); this.setLayout(new BoxLayout(this, BoxLayout.Y_AXIS)); this.vStream = vStream; initialiseSlider(this.brightnessSlider, 16, 64); this.brightnessSlider.addChangeListener(new BrightnessChangeListener()); this.brightnessPanel.add(this.brightnessLabel); this.brightnessPanel.add(this.brightnessSlider); this.add(this.brightnessPanel); initialiseSlider(this.contrastSlider, 8, 32); this.contrastSlider.addChangeListener(new ContrastChangeListener()); this.contrastPanel.add(this.contrastLabel); this.contrastPanel.add(this.contrastSlider); this.add(this.contrastPanel); initialiseSlider(this.saturationSlider, 8, 32); this.saturationSlider.addChangeListener(new SaturationChangeListener()); this.saturationPanel.add(this.saturationLabel); this.saturationPanel.add(this.saturationSlider); this.add(this.saturationPanel); initialiseSlider(this.hueSlider, 16, 64); this.hueSlider.addChangeListener(new HueChangeListener()); this.huePanel.add(this.hueLabel); this.huePanel.add(this.hueSlider); this.add(this.huePanel); initialiseSlider(this.chromaGainSlider, 8, 32); this.chromaGainSlider.addChangeListener(new ChromaGainChangeListener()); this.chromaGainPanel.add(this.chromaGainLabel); this.chromaGainPanel.add(this.chromaGainSlider); this.add(this.chromaGainPanel); this.chromaAGCCheckBox.addActionListener(new ChromaAGCActionListener()); this.chromaAGCPanel.add(this.chromaAGCCheckBox); this.add(this.chromaAGCPanel); loadSettings(settingsFile); } /** * Sets up initial settings for one of the sliders * * @param slider * The slider to set up * @param minorTick * The value difference between the smaller ticks on the slider * @param majorTick * The value difference between the larger ticks on the slider */ private static void initialiseSlider(JSlider slider, int minorTick, int majorTick) { slider.setOrientation(JSlider.HORIZONTAL); slider.setMinorTickSpacing(minorTick); slider.setMajorTickSpacing(majorTick); slider.setPaintTicks(true); slider.setPaintLabels(true); } /** * Saves the video device settings to a file in the specified location * * @param fileName * where to save the file */ public void saveSettings(String fileName) { try { FileWriter file = new FileWriter(new File(fileName)); file.write(String.valueOf(this.vStream.getBrightness()) + "\n"); file.write(String.valueOf(this.vStream.getContrast()) + "\n"); file.write(String.valueOf(this.vStream.getSaturation()) + "\n"); file.write(String.valueOf(this.vStream.getHue()) + "\n"); file.write(String.valueOf(this.vStream.getChromaGain()) + "\n"); file.write(String.valueOf(this.vStream.getChromaAGC()) + "\n"); file.close(); } catch (IOException e) { System.err .println("Error writing camera settings file " + fileName); System.err.println(e.getMessage()); e.printStackTrace(); } } /** * Loads video device settings from the specified file and updates the GUI * and VideoStream. It is assumed the file is well formed * * @param fileName */ public void loadSettings(String fileName) { Scanner reader; try { reader = new Scanner(new File(fileName)); assert (reader != null); int data = 0; data = reader.nextInt(); this.brightnessSlider.setValue(data); this.vStream.setBrightness(data); data = reader.nextInt(); this.contrastSlider.setValue(data); this.vStream.setContrast(data); data = reader.nextInt(); this.saturationSlider.setValue(data); this.vStream.setSaturation(data); data = reader.nextInt(); this.hueSlider.setValue(data); this.vStream.setHue(data); data = reader.nextInt(); this.chromaGainSlider.setValue(data); this.vStream.setChromaGain(data); boolean chromaAGC = reader.nextBoolean(); this.chromaAGCCheckBox.setSelected(chromaAGC); this.vStream.setChromaAGC(chromaAGC); reader.close(); } catch (Exception e) { System.err.println("Cannot load camera settings file " + fileName); System.err.println(e.getMessage()); loadDefaultSettings(); return; } this.vStream.updateVideoDeviceSettings(); } /** * Loads default video device settings in the event loadSettings fails */ private void loadDefaultSettings() { this.brightnessSlider.setValue(DEFAULT_BRIGHTNESS); this.vStream.setBrightness(DEFAULT_BRIGHTNESS); this.contrastSlider.setValue(DEFAULT_CONTRAST); this.vStream.setContrast(DEFAULT_CONTRAST); this.saturationSlider.setValue(DEFAULT_SATURATION); this.vStream.setSaturation(DEFAULT_SATURATION); this.hueSlider.setValue(DEFAULT_HUE); this.vStream.setHue(DEFAULT_HUE); this.chromaGainSlider.setValue(DEFAULT_CHROMA_GAIN); this.vStream.setChromaGain(DEFAULT_CHROMA_GAIN); this.chromaAGCCheckBox.setSelected(DEFAULT_CHROMA_AGC); this.vStream.setChromaAGC(DEFAULT_CHROMA_AGC); this.vStream.updateVideoDeviceSettings(); } }
usc-m/SDP-Team-F
PCCode/sdp/vision/gui/CameraSettingsPanel.java
213,924
package sample; import javafx.animation.*; import javafx.application.Platform; import javafx.beans.value.ChangeListener; import javafx.beans.value.ObservableValue; import javafx.collections.FXCollections; import javafx.collections.ObservableList; import javafx.embed.swing.SwingFXUtils; import javafx.event.EventHandler; import javafx.fxml.FXML; import javafx.fxml.Initializable; import javafx.scene.Node; import javafx.scene.control.Alert; import javafx.scene.control.DialogEvent; import javafx.scene.control.Label; import javafx.scene.layout.AnchorPane; import javafx.scene.paint.Color; import javafx.scene.paint.ImagePattern; import javafx.scene.shape.LineTo; import javafx.scene.shape.MoveTo; import javafx.scene.shape.Path; import javafx.util.Duration; import java.awt.image.BufferedImage; import java.io.File; import java.net.URL; import java.util.ArrayList; import java.util.Collections; import java.util.ResourceBundle; class NewResizeChangeWidth implements ChangeListener<Number> { double ratio = 1; private AnchorPane panel; private ObservableList<Tile> tileList = FXCollections.observableArrayList(); public NewResizeChangeWidth(AnchorPane pane, ObservableList<Tile> tileList) { this.panel = pane; this.tileList = tileList; } @Override public void changed(ObservableValue<? extends Number> observableValue, Number number, Number t1) { if (number.intValue() != 0 && t1.doubleValue() / number.doubleValue() != ratio) { this.ratio = t1.doubleValue() / number.doubleValue(); System.out.println(number + " " + t1); panel.getChildren().removeAll(tileList); for (Tile tile : tileList) { BufferedImage a = ImageSpliter.update(tile.getPart(), ratio); tile.setPart(a); tile.setFill(new ImagePattern(SwingFXUtils.toFXImage(tile.getPart(), null))); } panel.getChildren().addAll(tileList); } } } public class Game implements Initializable { private String path = "Tapety"; private int countimaage = 0; private ArrayList<String> files = new ArrayList<String>(); private Tile first = null; private Tile second = null; private int wtiles = Options.getWtiles(); private int height = Options.getHeight(); private int width = Options.getWidth(); private int htiles = Options.getHtiles(); private boolean isAnimationFinished = true; private boolean won = false; private Time time; private Timeline timeline; private int timeLimit = Options.getTimelimit(); private int movess = 0; private boolean isStarted = false; private ObservableList<Tile> tileList = FXCollections.observableArrayList(); @FXML private AnchorPane panel; @FXML private Label moves; @FXML private Label mytime; @Override public void initialize(URL location, ResourceBundle resources) { listf(path); File imgFile = new File(randomImage()); files.clear(); ImageSpliter cutter = new ImageSpliter(imgFile, width, height, wtiles, htiles); tileList = cutter.spliter(); shuffle(); isStarted = true; panel.getChildren().addAll(tileList); for (Tile tile : tileList) { tile.setOnMouseClicked(event -> { if (isAnimationFinished && isStarted) { first = (Tile) event.getSource(); if (isNeightbour() != -1 && isAnimationFinished) { second = tileList.get(isNeightbour()); playAnimation(); } } }); tile.setOnMouseExited(event -> { if (tile != first) tile.setStrokeWidth(0); }); tile.setOnMouseMoved(event -> { if (tile != first && (first == null || first != null && isNeightbour() != -1)) { tile.setStrokeWidth(3); tile.setStroke(Color.ORANGERED); } }); // panel.heightProperty().addListener(new NewResizeChangeWidth(panel,tileList)); } time = new Time(); timeline = new Timeline(new KeyFrame( Duration.millis(100), event -> { if (time.getMinutes() == timeLimit) { timeline.stop(); isStarted = false; Platform.runLater(() -> { timeline.stop(); if (first != null) { first.setStrokeWidth(0); first = null; } second = null; Alert alert = new Alert(Alert.AlertType.INFORMATION); alert.setTitle("You lose!"); alert.setHeaderText("Time is up!"); alert.setContentText("Better luck next time."); alert.setOnCloseRequest(new EventHandler<DialogEvent>() { @Override public void handle(DialogEvent event) { panel.getParent().getScene().getWindow().hide(); } }); alert.showAndWait(); }); } time.updateTime(); mytime.setText("Time: " + time.getTimeString()); } )); timeline.setCycleCount(Animation.INDEFINITE); time.setZero(); timeline.play(); } public int isNeightbour() { int indexone = tileList.indexOf(first); int indextwo = indexone + wtiles; int indexthree = indexone - wtiles; int indexfour = indexone + 1; int indexfive = indexone - 1; int[] arr = {indextwo, indexthree, indexfour, indexfive}; for (int i = 0; i < arr.length; i++) { if (arr[i] >= 0 && arr[i] < tileList.size() && tileList.get(arr[i]).getNum() == -1) { if (arr[i] == indexfour && indexfour % wtiles == 0) { return -1; } else if (arr[i] == indexfive && indexfive % wtiles == wtiles - 1) { return -1; } else { return arr[i]; } } } return -1; } private void swapPuzzles() { int indexFirst = tileList.indexOf(first); int indexSecond = tileList.indexOf(second); double firstX = first.getLayoutX(); double firstY = first.getLayoutY(); first.setLayoutX(second.getLayoutX()); first.setLayoutY(second.getLayoutY()); second.setLayoutX(firstX); second.setLayoutY(firstY); Collections.swap(tileList, indexFirst, indexSecond); movess++; moves.setText("Moves: " + movess); first = null; second = null; if (checkWin()) { isStarted = false; Result a = new Result(movess, new Timee((int) time.getMinutes(), (int) time.getSeconds(), (int) time.getMillis())); int ish = Highscore.isHigh(a); if (ish != -1) { Highscore.saveScores(); Platform.runLater(() -> { timeline.stop(); Alert alert = new Alert(Alert.AlertType.INFORMATION); alert.setTitle("Winner"); alert.setHeaderText("New HIGHSCORE number " + (ish + 1)); alert.setContentText("Your time is: " + time.getTimeString() + "\nMoves: " + movess); alert.setOnCloseRequest(new EventHandler<DialogEvent>() { @Override public void handle(DialogEvent event) { panel.getParent().getScene().getWindow().hide(); } }); alert.showAndWait(); }); } else { Platform.runLater(() -> { timeline.stop(); Alert alert = new Alert(Alert.AlertType.INFORMATION); alert.setTitle("Winner"); alert.setHeaderText("You win!"); alert.setContentText("Your time is: " + time.getTimeString() + "\nMoves: " + movess); alert.setOnCloseRequest(new EventHandler<DialogEvent>() { @Override public void handle(DialogEvent event) { panel.getParent().getScene().getWindow().hide(); } }); alert.showAndWait(); }); } } } public void shuffle() { Collections.shuffle(tileList); for (int i = 0; i < htiles; i++) for (int j = 0; j < wtiles; j++) { Tile tile = tileList.get(i * wtiles + j); tile.setLayoutX(j * tile.getWidth() + j * 5); tile.setLayoutY(i * tile.getHeight() + i * 5); } } public void listf(String directoryName) { File directory = new File(directoryName); File[] fList = directory.listFiles(); if (fList != null) for (File file : fList) { if (file.isFile()) { countimaage++; files.add(file.getAbsolutePath()); } else if (file.isDirectory()) { listf(file.getAbsolutePath()); } } } public String randomImage() { String randompath = files.get((int) (Math.random() * countimaage)); return randompath; } private void playAnimation() { isAnimationFinished = false; first.toFront(); second.toFront(); first.setStrokeWidth(5); first.setStroke(Color.GOLD); second.setStrokeWidth(5); second.setStroke(Color.GOLD); PathTransition ptr = getPathTransition(first, second); PathTransition ptr2 = getPathTransition(second, first); ParallelTransition pt = new ParallelTransition(ptr, ptr2); pt.setOnFinished(event -> { first.setStrokeWidth(0); second.setStrokeWidth(0); first.setTranslateX(0); first.setTranslateY(0); second.setTranslateX(0); second.setTranslateY(0); swapPuzzles(); isAnimationFinished = true; if (won) { } }); pt.play(); } private boolean checkWin() { for (Tile tile : tileList) { if (tile.getNum() != -1) { if (tile.getNum() != tileList.indexOf(tile)) System.out.println("check"); System.out.println("check 2"); return false; } } return true; } private PathTransition getPathTransition(Tile first, Tile second) { PathTransition ptr = new PathTransition(); Path path = new Path(); path.getElements().clear(); path.getElements().add(new MoveToAbs(first)); path.getElements().add(new LineToAbs(first, second.getLayoutX(), second.getLayoutY())); ptr.setPath(path); ptr.setNode(first); return ptr; } public static class MoveToAbs extends MoveTo { MoveToAbs(Node node) { super(node.getLayoutBounds().getWidth() / 2, node.getLayoutBounds().getHeight() / 2); } } public static class LineToAbs extends LineTo { LineToAbs(Node node, double x, double y) { super(x - node.getLayoutX() + node.getLayoutBounds().getWidth() / 2, y - node.getLayoutY() + node.getLayoutBounds().getHeight() / 2); } } }
Zacer559/PJATK
GUI/GUI-Project-2-PUZZLE-GAME/src/sample/Game.java
213,926
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.cassandra.db; import java.io.DataInputStream; import java.io.IOException; import java.lang.management.ManagementFactory; import java.net.InetAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; import javax.management.MBeanServer; import javax.management.ObjectName; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import com.google.common.util.concurrent.RateLimiter; import com.google.common.util.concurrent.Uninterruptibles; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.cassandra.concurrent.JMXEnabledScheduledThreadPoolExecutor; import org.apache.cassandra.concurrent.NamedThreadFactory; import org.apache.cassandra.config.ColumnDefinition; import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.db.compaction.CompactionManager; import org.apache.cassandra.db.rows.*; import org.apache.cassandra.db.partitions.*; import org.apache.cassandra.db.filter.*; import org.apache.cassandra.db.marshal.Int32Type; import org.apache.cassandra.db.marshal.UUIDType; import org.apache.cassandra.dht.Token; import org.apache.cassandra.exceptions.WriteTimeoutException; import org.apache.cassandra.gms.ApplicationState; import org.apache.cassandra.gms.FailureDetector; import org.apache.cassandra.gms.Gossiper; import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.sstable.SSTable; import org.apache.cassandra.io.util.DataInputPlus; import org.apache.cassandra.io.util.NIODataInputStream; import org.apache.cassandra.metrics.HintedHandoffMetrics; import org.apache.cassandra.net.MessageOut; import org.apache.cassandra.net.MessagingService; import org.apache.cassandra.service.*; import org.apache.cassandra.utils.*; import org.apache.cassandra.utils.concurrent.OpOrder; import org.cliffc.high_scale_lib.NonBlockingHashSet; /** * The hint schema looks like this: * * CREATE TABLE hints ( * target_id uuid, * hint_id timeuuid, * message_version int, * mutation blob, * PRIMARY KEY (target_id, hint_id, message_version) * ) WITH COMPACT STORAGE; * * Thus, for each node in the cluster we treat its uuid as the partition key; each hint is a logical row * (physical composite column) containing the mutation to replay and associated metadata. * * When FailureDetector signals that a node that was down is back up, we page through * the hinted mutations and send them over one at a time, waiting for * hinted_handoff_throttle_delay in between each. * * deliverHints is also exposed to JMX so it can be run manually if FD ever misses * its cue somehow. */ public class HintedHandOffManager implements HintedHandOffManagerMBean { public static final String MBEAN_NAME = "org.apache.cassandra.db:type=HintedHandoffManager"; public static final HintedHandOffManager instance = new HintedHandOffManager(); private static final Logger logger = LoggerFactory.getLogger(HintedHandOffManager.class); private static final int MAX_SIMULTANEOUSLY_REPLAYED_HINTS = 128; private static final int LARGE_NUMBER = 65536; // 64k nodes ought to be enough for anybody. public final HintedHandoffMetrics metrics = new HintedHandoffMetrics(); private volatile boolean hintedHandOffPaused = false; static final int maxHintTTL = Integer.parseInt(System.getProperty("cassandra.maxHintTTL", String.valueOf(Integer.MAX_VALUE))); private final NonBlockingHashSet<InetAddress> queuedDeliveries = new NonBlockingHashSet<>(); private final JMXEnabledScheduledThreadPoolExecutor executor = new JMXEnabledScheduledThreadPoolExecutor( DatabaseDescriptor.getMaxHintsThread(), new NamedThreadFactory("HintedHandoff", Thread.MIN_PRIORITY), "internal"); private final ColumnFamilyStore hintStore = Keyspace.open(SystemKeyspace.NAME).getColumnFamilyStore(SystemKeyspace.HINTS); private static final ColumnDefinition hintColumn = SystemKeyspace.Hints.compactValueColumn(); /** * Returns a mutation representing a Hint to be sent to <code>targetId</code> * as soon as it becomes available again. */ public Mutation hintFor(Mutation mutation, long now, int ttl, UUID targetId) { assert ttl > 0; InetAddress endpoint = StorageService.instance.getTokenMetadata().getEndpointForHostId(targetId); // during tests we may not have a matching endpoint, but this would be unexpected in real clusters if (endpoint != null) metrics.incrCreatedHints(endpoint); else logger.warn("Unable to find matching endpoint for target {} when storing a hint", targetId); UUID hintId = UUIDGen.getTimeUUID(); // serialize the hint with id and version as a composite column name PartitionUpdate upd = new PartitionUpdate(SystemKeyspace.Hints, StorageService.getPartitioner().decorateKey(UUIDType.instance.decompose(targetId)), PartitionColumns.of(hintColumn), 1); Row.Writer writer = upd.writer(); Rows.writeClustering(SystemKeyspace.Hints.comparator.make(hintId, MessagingService.current_version), writer); ByteBuffer value = ByteBuffer.wrap(FBUtilities.serialize(mutation, Mutation.serializer, MessagingService.current_version)); writer.writeCell(hintColumn, false, value, SimpleLivenessInfo.forUpdate(now, ttl, FBUtilities.nowInSeconds(), SystemKeyspace.Hints), null); writer.endOfRow(); return new Mutation(upd); } /* * determine the TTL for the hint Mutation * this is set at the smallest GCGraceSeconds for any of the CFs in the RM * this ensures that deletes aren't "undone" by delivery of an old hint */ public static int calculateHintTTL(Mutation mutation) { int ttl = maxHintTTL; for (PartitionUpdate upd : mutation.getPartitionUpdates()) ttl = Math.min(ttl, upd.metadata().getGcGraceSeconds()); return ttl; } public void start() { MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); try { mbs.registerMBean(this, new ObjectName(MBEAN_NAME)); } catch (Exception e) { throw new RuntimeException(e); } logger.debug("Created HHOM instance, registered MBean."); Runnable runnable = new Runnable() { public void run() { scheduleAllDeliveries(); metrics.log(); } }; executor.scheduleWithFixedDelay(runnable, 10, 10, TimeUnit.MINUTES); } private static void deleteHint(ByteBuffer tokenBytes, Clustering clustering, long timestamp) { DecoratedKey dk = StorageService.getPartitioner().decorateKey(tokenBytes); PartitionUpdate upd = new PartitionUpdate(SystemKeyspace.Hints, dk, PartitionColumns.of(hintColumn), 1); Row.Writer writer = upd.writer(); Rows.writeClustering(clustering, writer); Cells.writeTombstone(writer, hintColumn, timestamp, FBUtilities.nowInSeconds()); new Mutation(upd).applyUnsafe(); // don't bother with commitlog since we're going to flush as soon as we're done with delivery } public void deleteHintsForEndpoint(final String ipOrHostname) { try { InetAddress endpoint = InetAddress.getByName(ipOrHostname); deleteHintsForEndpoint(endpoint); } catch (UnknownHostException e) { logger.warn("Unable to find {}, not a hostname or ipaddr of a node", ipOrHostname); throw new RuntimeException(e); } } public void deleteHintsForEndpoint(final InetAddress endpoint) { if (!StorageService.instance.getTokenMetadata().isMember(endpoint)) return; UUID hostId = StorageService.instance.getTokenMetadata().getHostId(endpoint); DecoratedKey dk = StorageService.getPartitioner().decorateKey(ByteBuffer.wrap(UUIDGen.decompose(hostId))); final Mutation mutation = new Mutation(PartitionUpdate.fullPartitionDelete(SystemKeyspace.Hints, dk, System.currentTimeMillis(), FBUtilities.nowInSeconds())); // execute asynchronously to avoid blocking caller (which may be processing gossip) Runnable runnable = new Runnable() { public void run() { try { logger.info("Deleting any stored hints for {}", endpoint); mutation.apply(); hintStore.forceBlockingFlush(); compact(); } catch (Exception e) { JVMStabilityInspector.inspectThrowable(e); logger.warn("Could not delete hints for {}: {}", endpoint, e); } } }; executor.submit(runnable); } //foobar public void truncateAllHints() throws ExecutionException, InterruptedException { Runnable runnable = new Runnable() { public void run() { try { logger.info("Truncating all stored hints."); Keyspace.open(SystemKeyspace.NAME).getColumnFamilyStore(SystemKeyspace.HINTS).truncateBlocking(); } catch (Exception e) { logger.warn("Could not truncate all hints.", e); } } }; executor.submit(runnable).get(); } @VisibleForTesting protected synchronized void compact() { ArrayList<Descriptor> descriptors = new ArrayList<>(); for (SSTable sstable : hintStore.getTracker().getUncompacting()) descriptors.add(sstable.descriptor); if (descriptors.isEmpty()) return; try { CompactionManager.instance.submitUserDefined(hintStore, descriptors, (int) (System.currentTimeMillis() / 1000)).get(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } } private int waitForSchemaAgreement(InetAddress endpoint) throws TimeoutException { Gossiper gossiper = Gossiper.instance; int waited = 0; // first, wait for schema to be gossiped. while (gossiper.getEndpointStateForEndpoint(endpoint) != null && gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA) == null) { Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); waited += 1000; if (waited > 2 * StorageService.RING_DELAY) throw new TimeoutException("Didin't receive gossiped schema from " + endpoint + " in " + 2 * StorageService.RING_DELAY + "ms"); } if (gossiper.getEndpointStateForEndpoint(endpoint) == null) throw new TimeoutException("Node " + endpoint + " vanished while waiting for agreement"); waited = 0; // then wait for the correct schema version. // usually we use DD.getDefsVersion, which checks the local schema uuid as stored in the system keyspace. // here we check the one in gossip instead; this serves as a canary to warn us if we introduce a bug that // causes the two to diverge (see CASSANDRA-2946) while (gossiper.getEndpointStateForEndpoint(endpoint) != null && !gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA).value.equals( gossiper.getEndpointStateForEndpoint(FBUtilities.getBroadcastAddress()).getApplicationState(ApplicationState.SCHEMA).value)) { Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); waited += 1000; if (waited > 2 * StorageService.RING_DELAY) throw new TimeoutException("Could not reach schema agreement with " + endpoint + " in " + 2 * StorageService.RING_DELAY + "ms"); } if (gossiper.getEndpointStateForEndpoint(endpoint) == null) throw new TimeoutException("Node " + endpoint + " vanished while waiting for agreement"); logger.debug("schema for {} matches local schema", endpoint); return waited; } private void deliverHintsToEndpoint(InetAddress endpoint) { if (hintStore.isEmpty()) return; // nothing to do, don't confuse users by logging a no-op handoff // check if hints delivery has been paused if (hintedHandOffPaused) { logger.debug("Hints delivery process is paused, aborting"); return; } logger.debug("Checking remote({}) schema before delivering hints", endpoint); try { waitForSchemaAgreement(endpoint); } catch (TimeoutException e) { return; } if (!FailureDetector.instance.isAlive(endpoint)) { logger.debug("Endpoint {} died before hint delivery, aborting", endpoint); return; } doDeliverHintsToEndpoint(endpoint); // Flush all the tombstones to disk hintStore.forceBlockingFlush(); } private boolean checkDelivered(InetAddress endpoint, List<WriteResponseHandler<Mutation>> handlers, AtomicInteger rowsReplayed) { for (WriteResponseHandler<Mutation> handler : handlers) { try { handler.get(); } catch (WriteTimeoutException e) { logger.info("Failed replaying hints to {}; aborting ({} delivered), error : {}", endpoint, rowsReplayed, e.getMessage()); return false; } } return true; } /* * 1. Get the key of the endpoint we need to handoff * 2. For each column, deserialize the mutation and send it to the endpoint * 3. Delete the column if the write was successful * 4. Force a flush */ private void doDeliverHintsToEndpoint(InetAddress endpoint) { // find the hints for the node using its token. UUID hostId = Gossiper.instance.getHostId(endpoint); logger.info("Started hinted handoff for host: {} with IP: {}", hostId, endpoint); final ByteBuffer hostIdBytes = ByteBuffer.wrap(UUIDGen.decompose(hostId)); DecoratedKey epkey = StorageService.getPartitioner().decorateKey(hostIdBytes); final AtomicInteger rowsReplayed = new AtomicInteger(0); // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml). // max rate is scaled by the number of nodes in the cluster (CASSANDRA-5272). int throttleInKB = DatabaseDescriptor.getHintedHandoffThrottleInKB() / (StorageService.instance.getTokenMetadata().getAllEndpoints().size() - 1); RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024); int nowInSec = FBUtilities.nowInSeconds(); try (OpOrder.Group op = hintStore.readOrdering.start(); RowIterator iter = UnfilteredRowIterators.filter(SinglePartitionReadCommand.fullPartitionRead(SystemKeyspace.Hints, nowInSec, epkey).queryMemtableAndDisk(hintStore, op), nowInSec)) { List<WriteResponseHandler<Mutation>> responseHandlers = Lists.newArrayList(); while (iter.hasNext()) { // check if node is still alive and we should continue delivery process if (!FailureDetector.instance.isAlive(endpoint)) { logger.info("Endpoint {} died during hint delivery; aborting ({} delivered)", endpoint, rowsReplayed); return; } // check if hints delivery has been paused during the process if (hintedHandOffPaused) { logger.debug("Hints delivery process is paused, aborting"); return; } // Wait regularly on the endpoint acknowledgment. If we timeout on it, the endpoint is probably dead so stop delivery if (responseHandlers.size() > MAX_SIMULTANEOUSLY_REPLAYED_HINTS && !checkDelivered(endpoint, responseHandlers, rowsReplayed)) return; final Row hint = iter.next(); int version = Int32Type.instance.compose(hint.clustering().get(1)); Cell cell = hint.getCell(hintColumn); final long timestamp = cell.livenessInfo().timestamp(); DataInputPlus in = new NIODataInputStream(cell.value(), true); Mutation mutation; try { mutation = Mutation.serializer.deserialize(in, version); } catch (UnknownColumnFamilyException e) { logger.debug("Skipping delivery of hint for deleted table", e); deleteHint(hostIdBytes, hint.clustering(), timestamp); continue; } catch (IOException e) { throw new AssertionError(e); } for (UUID cfId : mutation.getColumnFamilyIds()) { if (timestamp <= SystemKeyspace.getTruncatedAt(cfId)) { logger.debug("Skipping delivery of hint for truncated table {}", cfId); mutation = mutation.without(cfId); } } if (mutation.isEmpty()) { deleteHint(hostIdBytes, hint.clustering(), timestamp); continue; } MessageOut<Mutation> message = mutation.createMessage(); rateLimiter.acquire(message.serializedSize(MessagingService.current_version)); Runnable callback = new Runnable() { public void run() { rowsReplayed.incrementAndGet(); deleteHint(hostIdBytes, hint.clustering(), timestamp); } }; WriteResponseHandler<Mutation> responseHandler = new WriteResponseHandler<>(endpoint, WriteType.SIMPLE, callback); MessagingService.instance().sendRR(message, endpoint, responseHandler, false); responseHandlers.add(responseHandler); } // Wait on the last handlers if (checkDelivered(endpoint, responseHandlers, rowsReplayed)) logger.info("Finished hinted handoff of {} rows to endpoint {}", rowsReplayed, endpoint); } } /** * Attempt delivery to any node for which we have hints. Necessary since we can generate hints even for * nodes which are never officially down/failed. */ private void scheduleAllDeliveries() { logger.debug("Started scheduleAllDeliveries"); // Force a major compaction to get rid of the tombstones and expired hints. Do it once, before we schedule any // individual replay, to avoid N - 1 redundant individual compactions (when N is the number of nodes with hints // to deliver to). compact(); ReadCommand cmd = new PartitionRangeReadCommand(hintStore.metadata, FBUtilities.nowInSeconds(), ColumnFilter.all(hintStore.metadata), RowFilter.NONE, DataLimits.cqlLimits(Integer.MAX_VALUE, 1), DataRange.allData(StorageService.getPartitioner())); try (ReadOrderGroup orderGroup = cmd.startOrderGroup(); UnfilteredPartitionIterator iter = cmd.executeLocally(orderGroup)) { while (iter.hasNext()) { try (UnfilteredRowIterator partition = iter.next()) { UUID hostId = UUIDGen.getUUID(partition.partitionKey().getKey()); InetAddress target = StorageService.instance.getTokenMetadata().getEndpointForHostId(hostId); // token may have since been removed (in which case we have just read back a tombstone) if (target != null) scheduleHintDelivery(target, false); } } } logger.debug("Finished scheduleAllDeliveries"); } /* * This method is used to deliver hints to a particular endpoint. * When we learn that some endpoint is back up we deliver the data * to him via an event driven mechanism. */ public void scheduleHintDelivery(final InetAddress to, final boolean precompact) { // We should not deliver hints to the same host in 2 different threads if (!queuedDeliveries.add(to)) return; logger.debug("Scheduling delivery of Hints to {}", to); executor.execute(new Runnable() { public void run() { try { // If it's an individual node hint replay (triggered by Gossip or via JMX), and not the global scheduled replay // (every 10 minutes), force a major compaction to get rid of the tombstones and expired hints. if (precompact) compact(); deliverHintsToEndpoint(to); } finally { queuedDeliveries.remove(to); } } }); } public void scheduleHintDelivery(String to) throws UnknownHostException { scheduleHintDelivery(InetAddress.getByName(to), true); } public void pauseHintsDelivery(boolean b) { hintedHandOffPaused = b; } public List<String> listEndpointsPendingHints() { Token.TokenFactory tokenFactory = StorageService.getPartitioner().getTokenFactory(); // Extract the keys as strings to be reported. LinkedList<String> result = new LinkedList<>(); ReadCommand cmd = PartitionRangeReadCommand.allDataRead(SystemKeyspace.Hints, FBUtilities.nowInSeconds()); try (ReadOrderGroup orderGroup = cmd.startOrderGroup(); UnfilteredPartitionIterator iter = cmd.executeLocally(orderGroup)) { while (iter.hasNext()) { try (UnfilteredRowIterator partition = iter.next()) { // We don't delete by range on the hints table, so we don't have to worry about the // iterator returning only range tombstone marker if (partition.hasNext()) result.addFirst(tokenFactory.toString(partition.partitionKey().getToken())); } } } return result; } }
chaordic/cassandra
src/java/org/apache/cassandra/db/HintedHandOffManager.java
213,928
/* Copyright (C) 2008 Human Media Interaction - University of Twente * * This file is part of The Virtual Storyteller. * * The Virtual Storyteller is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The Virtual Storyteller is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with The Virtual Storyteller. If not, see <http://www.gnu.org/licenses/>. * */ package vs.knowledge; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.Vector; import java.util.logging.Level; import java.util.logging.Logger; import org.jpl7.Atom; import org.jpl7.Compound; import org.jpl7.JPLException; import org.jpl7.Query; import org.jpl7.Term; import org.jpl7.Util; import org.jpl7.Variable; import vs.communication.GoalSchema; import vs.communication.Operator; import vs.communication.RDFtriple; import vs.debug.LogFactory; import vs.poplanner.PlanLink; import vs.poplanner.PlanOrdering; import vs.poplanner.PlanStep; /** * Contains all methods that have to call Prolog for their functionality. Do _NOT_ use any call to Prolog * anywhere else than here. The idea is that this class provides _the_ interface to Prolog. * * @author swartjes */ public class PrologKB { // Modules protected static final String rdf_db = "rdf_db:"; protected static final String schema_management = ""; // "schema_management:"; protected static final String thread_management = ""; // "thread_management:"; protected static final String goal_management = ""; // "goal_manager:"; protected static final String reactive_layer = ""; // "reactive_layer:"; protected static final String planner = "planner:"; protected static final String knowledgebase = ""; // "knowledgebase:"; protected static final String narrator = ""; // "narrator:"; protected static final String narrative_inspiration = ""; // "narrative_inspiration:"; protected static final String characterAgent = ""; // "basicCharacterAgent:"; protected static final String worldAgent = ""; // "basicCharacterAgent:"; // TODO: Below predicates should all be made protected. Other classes should not be able to reference them. // ------------------------- // General prolog predicates // ------------------------- protected static final String consult = "consult"; public static final String member = "member"; protected static final String rdf_load = PrologKB.rdf_db + "rdf_load"; public static final String length = "length"; // ----------------------- // Character agent module // ----------------------- protected static final String setAgentID = characterAgent + "setAgentID"; protected static final String hasAction = characterAgent + "hasAction"; protected static final String canDo = characterAgent + "canDo"; protected static final String goal_intention = characterAgent + "goal_intention"; protected static final String schema_enablement = characterAgent + "schema_enablement"; protected static final String goal_motivation = characterAgent + "goal_motivation"; protected static final String possible_goal_after_plan = characterAgent + "possible_goal_after_plan"; // ----------------------- // World agent module // ----------------------- protected static final String check_schema_facts = worldAgent + "check_schema_facts"; protected static final String operator_effect = worldAgent + "operator_effect"; // ------------------------ // Schema management module // ------------------------ protected static final String applyOperatorEffects = schema_management + "apply_operator_effects"; protected static final String getSchemaPreconditions = schema_management + "schema_preconditions"; protected static final String getSchemaKind = schema_management + "schema_kind"; protected static final String getOperatorType = schema_management + "schema_type"; protected static final String getOperatorAgens = schema_management + "schema_agens"; protected static final String getOperatorPatiens = schema_management + "schema_patiens"; protected static final String getOperatorTarget = schema_management + "schema_target"; protected static final String getOperatorOpponent = schema_management + "schema_opponent"; protected static final String getOperatorInstrument = schema_management + "schema_instrument"; protected static final String getOperatorDuration = schema_management + "operator_duration"; protected static final String getGoalUrgency = schema_management + "goal_urgency"; protected static final String getFramingScopeAll = schema_management + "framing_scope_all"; protected static final String getFramingScopePersonal = schema_management + "framing_scope_personal"; protected static final String getFramingScopeHidden = schema_management + "framing_scope_hidden"; protected static final String validateSchema = schema_management + "validate_schema"; protected static final String validateGoalFailure = schema_management + "validate_goal_failure_conditions"; // --------------------- // Knowledge base module // --------------------- protected static final String rdfAssert = knowledgebase + "rdfAssert"; protected static final String rdfRetract = knowledgebase + "rdfRetract"; protected static final String query = knowledgebase + "query"; protected static final String first = knowledgebase + "first"; protected static final String second = knowledgebase + "second"; protected static final String getSubject = knowledgebase + "getSubject"; protected static final String getPredicate = knowledgebase + "getPredicate"; protected static final String getObject = knowledgebase + "getObject"; // ---------------------------- // Narrative inspiration module // ---------------------------- public static final String getSuggestion = narrative_inspiration + "getSuggestion"; public static final String getSuggestionName = narrative_inspiration + "getSuggestionName"; public static final String getSuggestionIndividual = narrative_inspiration + "getSuggestionIndividual"; public static final String getSuggestionType = narrative_inspiration + "getSuggestionType"; public static final String getSuggestionCausers = narrative_inspiration + "getSuggestionCausers"; public static final String getSuggestionBody = narrative_inspiration + "getSuggestionBody"; public static final String nodeClass = narrative_inspiration + "nodeClass"; public static final String causalityClass = narrative_inspiration + "causalityClass"; public static final String fabulaNode = narrative_inspiration + "fabulaNode"; public static final String fabulaCause = narrative_inspiration + "fabulaCause"; public static final String getFabulaCharacter = narrative_inspiration + "getFabulaCharacter"; public static final String getFabulaContents = narrative_inspiration + "getFabulaContents"; public static final String getFabulaContentTruth = narrative_inspiration + "getFabulaContentTruth"; public static final String createValidatedEvent = narrative_inspiration + "createValidatedEvent"; public static final String createValidatedAction = narrative_inspiration + "createValidatedAction"; // ----------------------------- // Partial order planning module // ----------------------------- protected static final String plan = planner + "plan"; protected static final String adaptPlan = planner + "adapt_plan"; protected static final String invalidatesPlan = planner + "invalidates_plan"; protected static final String invalidPlan = planner + "invalid_plan"; protected static final String executableOperator = planner + "executableOperator"; protected static final String executableFramingOperator = planner + "executableImprovisation"; protected static final String executableInferenceOperator = planner + "executableInference"; protected static final String executableEvent = planner + "executableEvent"; protected static final String finishedPlan = planner + "finished_plan"; protected static final String planStep = planner + "planStep"; protected static final String planOrdering = planner + "planOrdering"; protected static final String planLink = planner + "planLink"; protected static final String planLinkFrom = planner + "planLinkFrom"; protected static final String planLinkTo = planner + "planLinkTo"; protected static final String planLinkCond = planner + "planLinkCond"; // ------------------------- // Episode management module // ------------------------- protected static String possibleThread = thread_management + "possible_thread"; // protected static String startEpisode = episode_management + "start_episode"; protected static String necessaryCharacter = thread_management + "necessary_character"; // protected static String castedCharacter = episode_management + "casted_character"; protected static String threadGoal = thread_management + "thread_goal"; protected static String threadResolveGoal = thread_management + "thread_resolve_goal"; protected static String threadSetting = thread_management + "thread_setting"; protected static String condition_to_triples = thread_management + "condition_to_triples"; // ---------------------- // Goal management module // ---------------------- protected static final String possible_goal = goal_management + "possible_goal"; protected static final String adopt_goal = goal_management + "adopt_goal"; protected static final String drop_goal = goal_management + "drop_goal"; protected static final String adopt_justifiable_goal = goal_management + "adopt_justifiable_goal"; protected static final String adopted_goal = goal_management + "adopted_goal"; protected static final String adopted_justifiable_goal = goal_management + "adopted_justifiable_goal"; protected static final String suggest_goal = goal_management + "suggest_goal"; protected static final String suggested_goal = goal_management + "suggested_goal"; // --------------------- // Reactive layer module // --------------------- protected static final String select_action_reactively = reactive_layer + "select_action"; // ------------------------- // Narrator module // ------------------------- protected static String narrate = narrator + "narrate"; protected static PrologKB M_KNOWLEDGEMANAGER = null; public static String addQuotes(String input) { StringBuilder sb = new StringBuilder().append('\'').append(input) .append('\''); return sb.toString(); //"'" + input + "'"; } /** * Substring from '#' * @param input a String containing a '#' * @return the string after the first '#' */ public static String fromNrSign(String input) { return input.substring(input.indexOf("#") + 1); } public static PrologKB getInstance() { if (M_KNOWLEDGEMANAGER == null) { M_KNOWLEDGEMANAGER = new PrologKB(); } return M_KNOWLEDGEMANAGER; } public static String listToProlog(List<RDFtriple> l) { boolean notFirst = false; if (l == null) return "[]"; StringBuilder sb = new StringBuilder(); sb.append('['); for (RDFtriple t: l) { if (notFirst) { sb.append(','); } sb.append(tripleToProlog(t)); notFirst = true; } sb.append(']'); return sb.toString(); } /** * Remove the single quotes from a Prolog value * @param input the string with potential quotes * @return a string with quotes removed */ public static String removeQuotes(String input) { return input.replace('\'', ' ').trim(); } /** * Turns an RDF triple Java object back into a Prolog fact * @param t the RDF triple in Java * @return a Prolog string representing the triple */ public static String tripleToProlog(RDFtriple t) { StringBuilder sb = new StringBuilder(); if (t == null) return ""; Atom subj = new Atom(t.getSubject()); Atom pred = new Atom(t.getPredicate()); Atom obj = new Atom(t.getObject()); sb.append("(").append(subj).append(",").append( pred).append(",").append(obj).append( ")"); return sb.toString(); } protected final Logger logger; /** * Singleton pattern's private constructor */ private PrologKB() { // Initialize logger logger = LogFactory.getLogger(this); } /** * Tell Prolog to adopt a goal. This makes it no longer eligible for goal selection * * @param goal the Prolog string representing the goal to adopt */ public boolean adoptGoal(String goal) { return call(PrologKB.adopt_goal, goal); } /** * Tell Prolog to drop a goal. * * @param goal the Prolog string representing the goal to drop */ public boolean dropGoal(String goal) { return call(PrologKB.drop_goal, goal); } public boolean isAdoptedGoal(String goal) { return call(PrologKB.adopted_goal, goal); } public boolean isAdoptedJustifiableGoal(String goal) { return call(PrologKB.adopted_justifiable_goal, goal); } public boolean suggestGoal(String goal) { return call(PrologKB.suggest_goal, goal); } public boolean isSuggestedGoal(String goal) { return call(PrologKB.suggested_goal, goal); } /** * Tell Prolog to adopt a justifiable goal. This makes it no longer eligible for goal selection * * @param goal the Prolog string representing the goal to justify */ public boolean adoptJustifiableGoal(String goal) { return call(PrologKB.adopt_justifiable_goal, goal); } /** * Applies operator effects * * @param schema the operator schema to apply the effects of * * @return wether successful */ public boolean applyOperatorEffects(String schema) { if (call(PrologKB.applyOperatorEffects, schema) ) { return true; } else { if (logger.isLoggable(Level.WARNING)) { logger.warning("Applying operator effects failed.\nSchema: " + schema); } return false; } } /* See interface */ public boolean ask(String query) { Query q = new Query(query); return q.hasSolution(); } /* See interface */ public boolean call(String prologCommand, String input) { Query query = new Query(prologCommand + "(" + input + ")."); return query.hasSolution(); } /** * Determine which actions given character can pursue * @param character the URI of the character * @return a vector of actions */ public Vector<String> canDo(String character, String operator) { return getPrologSingleVariableList( PrologKB.canDo, PrologKB.addQuotes(character) + "," + operator); } /** * Checks whether given schema's preconditions hold, and ignores fabula * * @param schema the schema to validate, as prolog string * @return whether the schemas preconditions (that are not fabula preconditions) are true */ public boolean checkSchemaFacts(String schema) { StringBuilder queryString = new StringBuilder(); queryString.append(PrologKB.check_schema_facts).append('(').append(schema).append(")."); return ask(queryString.toString()); } public Vector<RDFtriple> conditionToTripleList(String condition) { Vector<RDFtriple> triples = new Vector<RDFtriple>(); // Build query StringBuilder sb = new StringBuilder(); sb.append(condition_to_triples).append("("); sb.append(condition); sb.append(",T,S,P,O)"); Query q = new Query(sb.toString()); // Retrieve results Vector<RDFtriple> returnList = new Vector<RDFtriple>(); for (Map<String, Term> binding: q.allSolutions()) { String t = binding.get("T").toString(); String s = binding.get("S").toString(); String p = binding.get("P").toString(); String o = binding.get("O").toString(); RDFtriple trip = new RDFtriple(); trip.setSubject(removeQuotes(s)); trip.setPredicate(removeQuotes(p)); trip.setObject(removeQuotes(o)); trip.setTruth(Boolean.parseBoolean(t)); triples.add(trip); } q.close(); return returnList; } /** * consult expects foreward slashes (/) in file names **/ public boolean consult(String filename) { Query query = new Query(PrologKB.consult + "('" + filename + "')."); return query.hasSolution(); } /** * Returns the events in the plan that are executable, i.e. do not depend on the execution of other steps (in terms of causal links) * * @param plan a prolog string representation of a plan * @return a collection of prolog strings representing the schemas of steps in the plan */ public Vector<String> executableEvents(String plan) { if (logger.isLoggable(Level.FINE)) { logger.fine("Getting executable events for plan."); } return PrologKB.getInstance().getPrologSingleVariableList( PrologKB.executableEvent, plan); } /** * Returns the improvisations in the plan that are executable, i.e. do not depend on the execution of other steps (in terms of causal links) * * @param plan a prolog string representation of a plan * @return a collection of prolog strings representing the schemas of steps in the plan */ public Vector<String> executableFramingOperators(String plan) { if (logger.isLoggable(Level.FINE)) { logger.fine("Getting executable framing operators for plan."); } return PrologKB.getInstance().getPrologSingleVariableList( PrologKB.executableFramingOperator, plan); } /** * Returns the inferences in the plan that are executable, i.e. do not depend on the execution of other steps (in terms of causal links) * * @param plan a prolog string representation of a plan * @return a collection of prolog strings representing the schemas of steps in the plan */ public Vector<String> executableInferenceOperators(String plan) { logger.fine("Getting executable inference operators for plan."); return PrologKB.getInstance().getPrologSingleVariableList( PrologKB.executableInferenceOperator, plan); } /** * Returns the operators in the plan that are executable, i.e. do not depend on the execution of other steps (in terms of causal links) * * @param plan a prolog string representation of a plan * @return a collection of prolog strings representing the schemas of steps in the plan */ public Vector<String> executableOperators(String plan) { if (logger.isLoggable(Level.FINE)) { logger.fine("Getting executable operators for plan."); } return PrologKB.getInstance().getPrologSingleVariableList( PrologKB.executableOperator, plan); } /** * Determines whether given plan is "finished", i.e., there are no more steps that can be executed * @param plan Prolog string representing the plan * @return whether plan is finished */ public boolean finishedPlan(String plan) { if (logger.isLoggable(Level.FINE)) { logger.fine("Seeing if plan is finished"); } if (plan == null) { return false; } return call(finishedPlan, plan); } /** * Determines whether plan is still valid in given context, i.e., replanning is not needed. This is a speedup for the planner. * @param plan a Prolog string representing the plan * @return whether the plan is "invalid" */ public boolean invalidPlan(String plan) { if (logger.isLoggable(Level.FINE)) { logger.fine("Seeing if current plan is invalid"); } if (plan == null) { return true; } return call(invalidPlan, plan); } /** * Get first element of tuple * @param tuple a string representing the tuple * @return first element of the tuple */ public String first(String tuple) { return getPrologSingleVariable(PrologKB.first, tuple); } /** * Given a query and a variable occurring in this query, get all resulting binding from the answers */ public Vector<String> getAllResults(Query q, Variable v) { Vector<String> returnList = new Vector<String>(); for (Map<String, Term> binding: q.allSolutions()) { Term t = (Term) binding.get(v.toString()); if (t != null) { returnList.add(t.toString()); } } q.close(); return returnList; } /*--------------------------------- * METHODS * -------------------------------- */ /** * This method returns a number of Strings that represent the URIs of Individuals of fabula elements, that * enable the given schema. For instance, if #belief_23 and #belief_25 make the preconditions of #goal_22 true, * this method returns <#belief_23, #belief_25>. * @param gs the goal schema under investigation * @return a set of individuals */ public Set<String> getEnablingFabulaElements(GoalSchema gs) { return getEnablingFabulaElements(gs.getPrologDescription()); } public Set<String> getEnablingFabulaElements(Operator op) { return getEnablingFabulaElements(op.getPrologDescription()); } protected Set<String> getEnablingFabulaElements(String schema) { StringBuilder sb = new StringBuilder(); sb.append(schema_enablement).append("("); sb.append(schema); sb.append(",Individual)"); Query q = new Query(sb.toString()); // Retrieve results Set<String> returnList = new HashSet<String>(); for (Map<String, Term> binding: q.allSolutions()) { String ind = binding.get("Individual").toString(); returnList.add(removeQuotes(ind)); } q.close(); return returnList; } public String getGoalPossibleAfterPlan(String character, String goal) { Atom charc = new Atom(character); return getPrologSingleVariable(PrologKB.possible_goal_after_plan, charc + "," + goal); } public Map<String,String> getGoalsPossibleAfterPlan(String character) { Map<String,String> goalPlanMap = new HashMap<String,String>(); Variable G = new Variable("Goal"); Variable P = new Variable("Plan"); Atom charc = new Atom(character); // TODO use new Atom and new Term and new Query things StringBuilder sb = new StringBuilder(); sb.append(PrologKB.possible_goal_after_plan).append('(').append(charc).append(", ").append( G.toString()).append(", ").append(P.toString()).append(")."); Query q = new Query(sb.toString()); //prologCommand + "(" + input + ", " + X.toString() + ")."); if (logger.isLoggable(Level.FINE)) { logger.fine("Querying prolog with: " + q.toString()); } //Map<String, Term>[] answers = q.allSolutions(); while (q.hasMoreElements()) { Map<String, Term> binding = (Map<String, Term>) q.nextElement(); Term goal = (Term) binding.get(G.toString()); Term plan = (Term) binding.get(P.toString()); if (goal != null && plan != null) { goalPlanMap.put(goal.toString(), plan.toString()); } } q.close(); if (logger.isLoggable(Level.FINE)) { logger.fine("Prolog returned: " + goalPlanMap); } return goalPlanMap; } //------------------------- // Character agent module //------------------------- public float getGoalUrgency(String schema) { String urg = getQNPF(PrologKB.getGoalUrgency, schema); return Float.parseFloat(urg); } /** * This method returns a number of Strings that represent the URIs of Individuals of fabula elements, that * motivate the given schema. For instance, if #goal_23 and #goal_25 make the preconditions of #goal_22 true, * this method returns <#goal_23, #goal_25>. * @param gs the Goal schema under investigation * @return the set of Individuals as described */ public Set<String> getMotivatingFabulaElements(GoalSchema gs) { StringBuilder sb = new StringBuilder(); sb.append(goal_motivation).append("("); sb.append(gs.getPrologDescription()); sb.append(",Individual)"); Query q = new Query(sb.toString()); // Retrieve results Set<String> returnList = new HashSet<String>(); for (Map<String, Term> binding: q.allSolutions()) { String ind = binding.get("Individual").toString(); returnList.add(removeQuotes(ind)); } q.close(); return returnList; } /** * Given a query and a variable occurring in this query, get one resulting binding from the answers */ public String getOneResult(Query q, Variable v) { Map<String, Term> binding = q.oneSolution(); q.close(); if (binding != null) { Term t = (Term) binding.get(v.toString()); if (t != null) { return t.toString(); } } return null; } /** * Retrieve effects of operator schema * assumption is that effects can be applied * * @param schema the schema to retrieve the effects of, in prolog string * @return a vector of the retrieved triples, or null if applying failed */ public Vector<RDFtriple> getOperatorEffects(String schema) { if (logger.isLoggable(Level.FINE)) { logger.fine("Successfully applied operator effects"); } Vector<RDFtriple> effects = new Vector<RDFtriple>(); // Build query StringBuilder sb = new StringBuilder(); sb.append(operator_effect).append("("); sb.append(schema); sb.append(",T,S,P,O)"); Query q = new Query(sb.toString()); // Retrieve results Vector<RDFtriple> returnList = new Vector<RDFtriple>(); for (Map<String, Term> binding: q.allSolutions()) { String t = binding.get("T").toString(); String s = binding.get("S").toString(); String p = binding.get("P").toString(); String o = binding.get("O").toString(); RDFtriple trip = new RDFtriple(); trip.setSubject(removeQuotes(s)); trip.setPredicate(removeQuotes(p)); trip.setObject(removeQuotes(o)); trip.setTruth(Boolean.parseBoolean(t)); effects.add(trip); } q.close(); return effects; } /** * Returns a collection of the plan links of given plan * @param plan a prolog string representation of a plan * @return a collection of plan links */ public Vector<PlanLink> getPlanLinks(String plan) { if (logger.isLoggable(Level.FINE)) { logger.fine("Retrieving all Links from Plan."); } Vector<PlanLink> planLinks = new Vector<PlanLink>(); Vector<String> links = PrologKB.getInstance().getPrologSingleVariableList( PrologKB.planLink, plan); for (String link: links) { String from = PrologKB.getInstance().getPrologSingleVariable(PrologKB.planLinkFrom, link); String to = PrologKB.getInstance().getPrologSingleVariable(PrologKB.planLinkTo, link); String cond = PrologKB.getInstance().getPrologSingleVariable(PrologKB.planLinkCond, link); //RDFtriple posTriple = PrologKB.getInstance().prologToTriple(pos, true); //RDFtriple negTriple = PrologKB.getInstance().prologToTriple(neg, false); PlanLink nwLink = new PlanLink(from, to, cond); planLinks.add(nwLink); } return planLinks; } /** * Returns a collection of the plan orderings of given plan * @param plan a prolog string representation of a plan * @return a collection of plan orderings */ public Vector<PlanOrdering> getPlanOrderings(String plan) { if (logger.isLoggable(Level.FINE)) { logger.fine("Retrieving all Links from Plan."); } Vector<String> orderings = PrologKB.getInstance().getPrologSingleVariableList( PrologKB.planOrdering, plan); Vector<PlanOrdering> planOrderings = new Vector<PlanOrdering>(); for (String ordering: orderings) { String v1 = PrologKB.getInstance().first(ordering); String v2 = PrologKB.getInstance().second(ordering); String v1_name = PrologKB.getInstance().first(v1); String v2_name = PrologKB.getInstance().first(v2); PlanOrdering nwOrdering = new PlanOrdering(v1_name, v2_name); planOrderings.add(nwOrdering); } return planOrderings; } public Vector<PlanStep> getPlanSteps(String plan) { Vector<PlanStep> planSteps = new Vector<PlanStep>(); Vector<String> steps = PrologKB.getInstance().getPrologSingleVariableList( PrologKB.planStep, plan); for (String step: steps) { String name = PrologKB.getInstance().first(step); String operator = PrologKB.getInstance().second(step); String type = PrologKB.getInstance().getSchemaType(operator); String clss = PrologKB.getInstance().getSchemaClass(operator); PlanStep nwStep = new PlanStep(name, operator, type, clss); planSteps.add(nwStep); } return planSteps; } /** * Returns a vector containing all goals that are possible to pursue for given character. In BDI terms, these are the "desires" * of the agent. Prolog is responsible for establishing this set of desires; Java is responsible for selecting what to pursue. * No choices are made yet as to a consistent set of goals that the agent is actually pursuing (in BDI terms, the "goals" of the agent). * * @param character the URI of the character agent for which to retrieve possible goals * @return a list of possible goals */ public Vector<String> getPossibleGoals(String character) { return getPrologSingleVariableList( PrologKB.possible_goal, PrologKB.addQuotes(character)); } /** * Returns a collection of settings based on executed plot threads (asserted in Prolog) * @return a collection of (thread) settings as Prolog strings */ public Vector<String> selectReactiveActions(String character) { return getPrologSingleVariableList(PrologKB.select_action_reactively, PrologKB.addQuotes(character)); } /** * Builds the following structure * <i>prologCommand</i> ( <i>input</i> , GPLVar ) * @deprecated - use getOneResult() in future */ public String getPrologSingleVariable(String prologCommand, String input) { Variable X = new Variable("GPLVar"); // TODO use new Atom and new Term and new Query things StringBuilder sb = new StringBuilder(); sb.append(prologCommand).append('(').append(input).append(", ").append( X.toString()).append(")."); Query q = new Query(sb.toString());//prologCommand + "(" + input + ", " + X.toString() + ")."); if (logger.isLoggable(Level.FINE)) { logger.fine("Querying prolog with: " + q.toString()); } String answer = new String(); //TODO This is ugly if (q.hasMoreElements()) { Map<String, Term> binding = (Map<String, Term>) q.nextElement(); Term t = (Term) binding.get(X.toString()); if (t != null) { answer = t.toString(); } } q.close(); if (logger.isLoggable(Level.FINE)) { logger.fine("Prolog returned: " + answer); } return answer; } //------------------------- // Knowledge base module //------------------------- @Deprecated public Vector<String> getPrologSingleVariableList(String prologCommand) { Variable X = new Variable("GPLVar"); // TODO use new Atom and new Term and new Query things Query q = new Query(prologCommand + "(" + X.toString() + ")."); if (logger.isLoggable(Level.FINE)) { logger.fine("Querying prolog with: " + q.toString()); } //Map<String, Term>[] answers = q.allSolutions(); Vector<String> returnList = new Vector<String>(); while (q.hasMoreElements()) { Map<String, Term> binding = (Map<String, Term>) q.nextElement(); Term t = (Term) binding.get(X.toString()); if (t != null) { returnList.add(t.toString()); } } q.close(); if (logger.isLoggable(Level.FINE)) { logger.fine("Prolog returned: " + returnList); } return returnList; } /** * @deprecated - use getAllResults() in future */ public Vector<String> getPrologSingleVariableList(String prologCommand, String input) { Variable X = new Variable("GPLVar"); // TODO use new Atom and new Term and new Query things StringBuilder sb = new StringBuilder(); sb.append(prologCommand).append('(').append(input).append(", ").append( X.toString()).append(")."); Query q = new Query(sb.toString()); //prologCommand + "(" + input + ", " + X.toString() + ")."); logger.fine("Querying prolog with: " + q.toString()); //Map<String, Term>[] answers = q.allSolutions(); Vector<String> returnList = new Vector<String>(); while (q.hasMoreElements()) { Map<String, Term> binding = (Map<String, Term>) q.nextElement(); Term t = (Term) binding.get(X.toString()); if (t != null) { returnList.add(t.toString()); } } q.close(); if (logger.isLoggable(Level.FINE)) { logger.fine("Prolog returned: " + returnList); } return returnList; } //--------------------------- // Schema management module //--------------------------- // Returns the answer that Prolog returns without quotes and if the answer // is "none" or "" the result is set to null public String getQNPF(String command, String action) { String output = removeQuotes( getPrologSingleVariable(command, action)); if (output != null) { if (output.contentEquals("none") || output.contentEquals("")) { output = null; } } return output; } /** * Returns the URI of the agens of given schema (if it has any) * @param schema the schema as prolog string * @return the agens of the schema */ public String getSchemaAgens(String schema) { return getQNPF(PrologKB.getOperatorAgens, schema); } /** * Returns the class of given schema (e.g. action, goal, event, etc) * * TODO: make in prolog using =.. * * @param schema the schema as Prolog string * @return the class of the schema */ public String getSchemaClass(String schema) { return getQNPF(PrologKB.getSchemaKind, schema); } /** * Returns the URI of the instrument of given schema (if it has any) * @param schema the schema as prolog string * @return the instrument of the schema */ public String getSchemaInstrument(String schema) { return getQNPF(PrologKB.getOperatorInstrument, schema); } /** * Returns the URI of the opponent of given schema (if it has any) * @param schema the schema as prolog string * @return the instrument of the schema */ public String getSchemaOpponent(String schema) { return getQNPF(PrologKB.getOperatorOpponent, schema); } public int getOperatorDuration(String operator) { return Integer.parseInt(getQNPF(PrologKB.getOperatorDuration, operator)); } /** * Returns the URI of the patiens of given schema (if it has any) * @param schema the schema as prolog string * @return the patiens of the schema */ public String getSchemaPatiens(String schema) { return getQNPF(PrologKB.getOperatorPatiens, schema); } /** * Retrieves preconditions of given schema * @param schema the schema to retrieve the preconditions of * @return Prolog string representing the preconditions */ public String getSchemaPreconditions(String schema) { return getPrologSingleVariable( PrologKB.getSchemaPreconditions, schema); } /** * Returns the URI of the target of given schema (if it has any) * @param schema the schema as prolog string * @return the target of the schema */ public String getSchemaTarget(String schema) { return getQNPF(PrologKB.getOperatorTarget, schema); } /** * Returns the type of given schema (its corresponding URI in the ontology) * @param schema the schema as Prolog string * @return the type of the schema */ public String getSchemaType(String schema) { return getQNPF(PrologKB.getOperatorType, schema); } public boolean isFramingScopeAll(String schema) { boolean sa = call(getFramingScopeAll, schema); if (sa) { if (logger.isLoggable(Level.FINE)) { logger.fine("Given schema has scope ALL"); } } return sa; } public boolean isFramingScopePersonal(String schema) { boolean sp = call(getFramingScopePersonal, schema); if (sp) { if (logger.isLoggable(Level.FINE)) { logger.fine("Given schema has scope PERSONAL"); } } return sp; } public boolean isFramingScopeHidden(String schema) { boolean sh = call(getFramingScopeHidden, schema); if (sh) { if (logger.isLoggable(Level.FINE)) { logger.fine("Given schema has scope HIDDEN"); } } return sh; } public String getUntruePreconditionsOfSchema(String schema) { return getPrologSingleVariable(PrologKB.validateSchema, schema); } /** * Checks whether given goal's failure conditions are true * @param schema the goal schema * @return whether failure conditions of goal schema are true */ public boolean goalFailureConditionsTrue(String schema) { return call(validateGoalFailure, schema); } public String goalIntention(String goalSchema) { return getPrologSingleVariable( PrologKB.goal_intention, goalSchema); } /** * Determine which actions given character can pursue * @param character the URI of the character * @return a vector of actions */ public Vector<String> hasAction(String character) { return getPrologSingleVariableList( PrologKB.hasAction,PrologKB.addQuotes(character)); } /** * Load RDF/OWL knowledge * @param file filename of the knowledge base * @return whether loading succeeded */ public boolean loadKB(String file) { // TODO: translate to stringbuilder (JPL has no support for modules so this does not work) Compound c = new Compound(PrologKB.rdf_load, new Term[] {new Atom(file)}); Query query = new Query(c); return query.hasSolution(); } /** * Make a narration of given prolog string * @param prologString prolog string representing a narratable structure. * @return narration of this prolog string in "natural language" */ public String narrate(String prologString) { return getPrologSingleVariable(PrologKB.narrate, prologString); } /** * Retrieves characters necessary as a result of plot thread executions (asserted in Prolog) * @return a vector containing URIs of characters that are necessary */ public Vector<String> necessaryCharacters(String thread) { return getPrologSingleVariableList(PrologKB.necessaryCharacter, thread); } /** * Builds and retrieves a partial-order plan * @param intentions the intentions as prolog list * @return a plan as prolog String, or null if there was no solution. */ public String plan(String character, String intentions) { String charField; // no pun intended if (character != null && (! character.equals(""))) { Atom a = new Atom(character); charField = a.toString(); } else { charField = "_"; } StringBuilder sb = new StringBuilder(); sb.append(PrologKB.plan).append('(') .append(charField).append(',') .append(intentions).append(", Plan)."); if (logger.isLoggable(Level.INFO)) { logger.info ("Querying plan with: " + sb.toString ()); } Query q = new Query(sb.toString()); String solution = PrologKB.getInstance().getOneResult(q, new Variable("Plan")); //Map<String, Term> solution = PrologKB.getInstance().prologCallOneSolution(sb.toString()); if (logger.isLoggable(Level.INFO)) { if (solution != null) { logger.info("Successfully created plan using\n" + sb.toString()); } else { logger.info("Could not create plan using\n" + sb.toString()); } } return solution; } /** * Adapts an existing partial-order plan * @param intentions the intentions as prolog list * @return a plan as prolog String, or null if there was no solution. */ public String adaptPlan(String character, String intentions, String oldPlan) { // Speedup: only adapt plan if current plan is no longer valid. // might yield believability issues because "obvious" plan adaptions will not be chosen // (i.e. still sailing to the island to find a treasure chest // even though someone has placed another treasure chest right in front of your nose.) if (! invalidPlan(oldPlan)) { if (logger.isLoggable(Level.INFO)) { logger.info("Old plan still OK, reusing it."); } return oldPlan; } // TODO: if old plan is no longer valid, according to Trabasso et al. what should happen is that // the goal gets a negative outcome (O-), and the goal is re-instantiated (if its preconditions still hold). String charField; // no pun intended if (character != null && (! character.equals(""))) { Atom a = new Atom(character); charField = a.toString(); } else { charField = "_"; } StringBuilder sb = new StringBuilder(); sb.append(PrologKB.adaptPlan).append('(') .append(charField).append(',') .append(intentions).append(',') .append(oldPlan).append(", Plan)."); if (logger.isLoggable(Level.INFO)) { logger.info ("Querying adapt_plan with: " + sb.toString ()); } Query q = new Query(sb.toString()); String solution = PrologKB.getInstance().getOneResult(q, new Variable("Plan")); //Map<String, Term> solution = PrologKB.getInstance().prologCallOneSolution(sb.toString()); if (logger.isLoggable(Level.INFO)) { if (solution != null) { logger.info("Successfully created plan using\n" + sb.toString()); } else { logger.info("Could not create plan using\n" + sb.toString()); } } return solution; } //-------------------------------- // Partial order planning module //-------------------------------- /** * Retrieve plot threads that are now possible * * @return vector containing string representations of possible plot threads */ public Vector<String> possibleThreads() { return PrologKB.getInstance().getPrologSingleVariableList(PrologKB.possibleThread); } public Map<String, Term>[] prologCall(String prologString) { Query q = new Query(prologString); Map<String, Term>[] answers = q.allSolutions(); q.close(); return answers; } public Map<String, Term>[] prologCall(String prologCommand, String input, Vector<String> vars) { // Use prologCommand^(AgentID, Variable) which is in prolog in BasicCharacterAgent.pl StringBuilder queryString = new StringBuilder(); queryString.append(prologCommand).append('(').append(input); for (String v : vars) { queryString.append(',').append(v); } queryString.append(")."); Query q = new Query(queryString.toString()); //prologCommand + "(" + input + queryString.toString() + ")."); if (logger.isLoggable(Level.INFO)) { logger.info("Querying Prolog with: " + queryString.toString());//prologCommand + "(" + input + queryString.toString() + ")."); } Map<String, Term>[] answers = q.allSolutions(); q.close(); return answers; } public Map<String, Term> prologCallOneSolution(String prologString) { Query q = new Query(prologString); Map<String, Term> answer = q.oneSolution(); q.close(); return answer; } /* * @see vs.knowledge.IPrologKnowledgeManager#prologToTriple(java.lang.String, boolean) */ public List<RDFtriple> prologToList(String prologTripleList, boolean truth) { List<RDFtriple> tripleList = new Vector<RDFtriple>(); Term[] triples = null; try { Term t = Util.textToTerm(prologTripleList); triples = Util.listToTermArray(t); } catch(JPLException e) { //do nothing if (logger.isLoggable(Level.WARNING)) { logger.warning("Could not create triples from given Prolog list string: " + prologTripleList); } } for (Term t: triples) { tripleList.add(prologToTriple(t.toString(), truth)); } return tripleList; } /** * Builds an RDFtriple object from prolog string representation of an rdf triple * TODO: replace getSubject...etc by JPL parsing * * @param prologTriple the string representation of the triple * @param truth whether the triple should be interpreted as true or false * @return an RDFtriple object if successful, or null if unsuccessful */ public RDFtriple prologToTriple(String prologTriple, boolean truth) { RDFtriple t = new RDFtriple(); Term term = Util.textToTerm(prologTriple); String subject = getPrologSingleVariable(PrologKB.getSubject, prologTriple); String predicate = getPrologSingleVariable(PrologKB.getPredicate, prologTriple); String object = getPrologSingleVariable(PrologKB.getObject, prologTriple); if (subject.equals("") || predicate.equals("") || object.equals("")) { return null; } else { t.setSubject(removeQuotes(subject)); t.setPredicate(removeQuotes(predicate)); t.setObject(removeQuotes(object)); t.setTruth(truth); } return t; } /** * RDF query method * @param triple the RDF triple we want to query Prolog about. * @return a Query object */ public Query query(String triple) { return new Query(PrologKB.query + "(" + triple + ")"); } /** * Save knowledge base. * Not implemented yet. * @param file file name to save KB to * @return whether saving succeeded */ public boolean saveKB(String file) { // TODO Auto-generated method stub return false; } //-------------------------------- // Thread management module //-------------------------------- /** * Get second element of tuple * @param tuple a string representing the tuple * @return second element of the tuple */ public String second(String tuple) { return getPrologSingleVariable(PrologKB.second, tuple); } /** * Set the character ID for this agent (e.g. ps:leChuck) */ public boolean setAgentID(String agentID) { if (logger.isLoggable(Level.INFO)) { logger.info("Setting agent ID to: " + agentID); } StringBuilder qb = new StringBuilder(); Atom a = new Atom(agentID); qb.append(PrologKB.setAgentID).append('(').append(a.toString()).append(')'); Query q = new Query(qb.toString()); return q.hasSolution(); } /* *//* * Assert that given character is now casted * @param character the character that has been casted * @return whether asserting succeeded *//* public boolean castedCharacter(String character) { return call(PrologKB.castedCharacter, character); }*/ /* See interface */ public boolean tellRDF(String term) { Query q = new Query(PrologKB.rdfAssert + "(" + term + ")"); //new Query("assert(" + term + ")"); return q.hasSolution(); } public boolean tellRDFtriple(RDFtriple t) { if (t.getTruth() == true) { if (logger.isLoggable(Level.FINE)) { logger.fine("Asserting RDF triple: " + t); } Query q = new Query(PrologKB.rdfAssert + "(" + tripleToProlog(t) + ")"); return q.hasSolution(); } else { if (logger.isLoggable(Level.FINE)) { logger.fine("Retracting RDF triple: " + t); } Query q = new Query(PrologKB.rdfRetract + "(" + tripleToProlog(t) + ")"); return q.hasSolution(); } } /** * Returns a collection of plot thread goals for given character * * @param characterURI the URI of the character to retrieve plot thread goals for * @return a collection of goals that the character should adopt */ public Vector<String> threadGoals(String episode, String characterURI) { return getPrologSingleVariableList(PrologKB.threadGoal, episode + "," + PrologKB.addQuotes(characterURI)); } /** * Returns a collection of plot thread resolve goals for given character * * @param characterURI the URI of the character to retrieve plot thread goals for * @return a collection of goals that the character should adopt */ public Vector<String> threadResolveGoals(String episode, String characterURI) { return getPrologSingleVariableList(PrologKB.threadResolveGoal, episode + "," + PrologKB.addQuotes(characterURI)); } //-------------------------------- // Narrator module //-------------------------------- /** * Returns a collection of settings based on executed plot threads (asserted in Prolog) * @return a collection of (thread) settings as Prolog strings */ public Vector<String> threadSettings(String thread) { return getPrologSingleVariableList(PrologKB.threadSetting, thread); } /*===================================================================================================================== * HELPER METHODS /*===================================================================================================================== */ /** * Returns true iff left hand side unifies with right hand side * @param lhs left hand side of the unification * @param rhs right hand side of the unification * @return whether unification succeeds */ public boolean unifies(String lhs, String rhs) { Query q = new Query(lhs + " = " + rhs); if (q.hasSolution()) { if (logger.isLoggable(Level.FINE)) { logger.fine("Checking whether following two terms unify:\n1) " + lhs + "\n2) " + rhs + "\nThey do."); } return true; } else { if (logger.isLoggable(Level.FINE)) { logger.fine("Checking whether following two terms unify:\n1) " + lhs + "\n2) " + rhs + "\nThey do NOT."); } return false; } } /* See interface */ public boolean untellRDF(String term) { Query q = new Query(PrologKB.rdfRetract + "(" + term + ")"); //new Query("retract(" + term + ")"); return q.hasSolution(); } }
logicmoo/virtstoryteller
src/vs/knowledge/PrologKB.java
213,929
package edu.phema.quantify; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.MapperFeature; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationFeature; import java.util.*; public class ElmQuantities { public Stack<PhemaAnalysisDepths> depths; public PhemaAnalysisDimensions dimensions; public ElmQuantities() { depths = new Stack<>(); // push the global depth counter depths.push(new PhemaAnalysisDepths()); dimensions = new PhemaAnalysisDimensions(); } public void pushDepthTracker() { depths.push(new PhemaAnalysisDepths()); } public PhemaAnalysisDepths popDepthTracker() { return depths.pop(); } ///////////////////////////////////////////////////////////////////////////// // // ANALYSIS DIMENSIONS: Object for counting relevant metrics in a given context // ///////////////////////////////////////////////////////////////////////////// public class PhemaAnalysisDepths { // Logical expressions public int phemaLogicalMaxDepth = 0; private int phemaLogicalDepth = 0; public void incrementPhemaLogicalDepth() { phemaLogicalDepth++; if (phemaLogicalDepth > phemaLogicalMaxDepth) { phemaLogicalMaxDepth = phemaLogicalDepth; } } public void decrementPhemaLogicalDepth() { phemaLogicalDepth--; } // Comparison expressions public int phemaComparisonMaxDepth = 0; private int phemaComparisonDepth = 0; public void incrementPhemaComparisonDepth() { phemaComparisonDepth++; if (phemaComparisonDepth > phemaComparisonMaxDepth) { phemaComparisonMaxDepth = phemaComparisonDepth; } } public void decrementPhemaComparisonDepth() { phemaComparisonDepth--; } // Arithmetic expressions public int phemaArithmeticMaxDepth = 0; private int phemaArithmeticDepth = 0; public void incrementPhemaArithmeticDepth() { phemaArithmeticDepth++; if (phemaArithmeticDepth > phemaArithmeticMaxDepth) { phemaArithmeticMaxDepth = phemaArithmeticDepth; } } public void decrementPhemaArithmeticDepth() { phemaArithmeticDepth--; } // Aggregate expressions public int phemaAggregateMaxDepth = 0; private int phemaAggregateDepth = 0; public void incrementPhemaAggregateDepth() { phemaAggregateDepth++; if (phemaAggregateDepth > phemaAggregateMaxDepth) { phemaAggregateMaxDepth = phemaAggregateDepth; } } public void decrementPhemaAggregateDepth() { phemaAggregateDepth--; } // Data expressions public int whereClauseMaxDepth = 0; private int whereClauseDepth = 0; public void setWhereClauseDepth(int depth) { whereClauseDepth = depth; if (whereClauseDepth > whereClauseMaxDepth) { whereClauseMaxDepth = whereClauseDepth; } } public int sortClauseMaxDepth = 0; private int sortClauseDepth = 0; public void setSortClauseDepth(int depth) { sortClauseDepth = depth; if (sortClauseDepth > sortClauseMaxDepth) { sortClauseMaxDepth = sortClauseDepth; } } // Conditional expressions public int phemaConditionalMaxDepth = 0; private int phemaConditionalDepth = 0; public void incrementPhemaConditionalDepth() { phemaConditionalDepth++; if (phemaConditionalDepth > phemaConditionalMaxDepth) { phemaConditionalMaxDepth = phemaConditionalDepth; } } public void decrementPhemaConditionalDepth() { phemaConditionalDepth--; } // Temporal expressions public int phemaTemporalMaxDepth = 0; private int phemaTemporalDepth = 0; public void incrementPhemaTemporalDepth() { phemaTemporalDepth++; if (phemaTemporalDepth > phemaTemporalMaxDepth) { phemaTemporalMaxDepth = phemaTemporalDepth; } } public void decrementPhemaTemporalDepth() { phemaTemporalDepth--; } // Total expressions public int phemaExpressionMaxDepth = 0; private int phemaExpressionDepth = 0; public void incrementPhemaExpressionDepth() { phemaExpressionDepth++; if (phemaExpressionDepth > phemaExpressionMaxDepth) { phemaExpressionMaxDepth = phemaExpressionDepth; } } public void decrementPhemaExpressionDepth() { phemaExpressionDepth--; } // Modular expressions (statement and function calls) public int phemaModularityMaxDepth = 0; private int phemaModularityDepth = 0; public void incrementPhemaModularityDepth() { phemaModularityDepth++; if (phemaModularityDepth > phemaModularityMaxDepth) { phemaModularityMaxDepth = phemaModularityDepth; } } public void decrementPhemaModularityDepth() { phemaModularityDepth--; } // Terminology expressions public int phemaTerminologyMaxDepth = 0; private int phemaTerminologyDepth = 0; public void incrementPhemaTerminologyDepth() { phemaTerminologyDepth++; if (phemaTerminologyDepth > phemaTerminologyMaxDepth) { phemaTerminologyMaxDepth = phemaTerminologyDepth; } } public void decrementPhemaTerminologyDepth() { phemaTerminologyDepth--; } // Collection expressions public int phemaCollectionMaxDepth = 0; private int phemaCollectionDepth = 0; public void incrementPhemaCollectionDepth() { phemaCollectionDepth++; if (phemaCollectionDepth > phemaCollectionMaxDepth) { phemaCollectionMaxDepth = phemaCollectionDepth; } } public void decrementPhemaCollectionDepth() { phemaCollectionDepth--; } } public class PhemaAnalysisDimensions { // 1. Literals public PhemaLiteralCounts phemaLiteralCounts = new PhemaLiteralCounts(); // 2. Logical expressions public PhemaLogicalCounts phemaLogicalCounts = new PhemaLogicalCounts(); // 3. Comparison expressions public PhemaComparisonCounts phemaComparisonCounts = new PhemaComparisonCounts(); // 4. Arithmetic expressions public PhemaArithmeticCounts phemaArithmeticCounts = new PhemaArithmeticCounts(); // 5. Aggregate expressions public PhemaAggregateCounts phemaAggregateCounts = new PhemaAggregateCounts(); // 6. Data expressions public PhemaDataCounts phemaDataCounts = new PhemaDataCounts(); // 7. Conditional expressions public PhemaConditionalCounts phemaConditionalCounts = new PhemaConditionalCounts(); // 8. Temporal expressions public PhemaTemporalCounts phemaTemporalCounts = new PhemaTemporalCounts(); // 9. Modularity public PhemaModularityCounts phemaModularityCounts = new PhemaModularityCounts(); // 10. Terminology public PhemaTerminologyCounts phemaTerminologyCounts = new PhemaTerminologyCounts(); // 11. Collections public PhemaCollectionCounts phemaCollectionCounts = new PhemaCollectionCounts(); } ///////////////////////////////////////////////////////////////////////////// // // DERIVED COUNTS: The stuff we want to report // ///////////////////////////////////////////////////////////////////////////// // Used to count both simple and clinical values public class PhemaLiteralCounts { public int total; public Set<String> types = new HashSet<>(); } public class PhemaLogicalCounts { public int and; public int or; public int not; public int implies; public int xor; } public class PhemaComparisonCounts { public int equal; public int equivalent; public int notEqual; public int less; public int greater; public int lessOrEqual; public int greaterOrEqual; } public class PhemaArithmeticCounts { public int add; public int subtract; public int multiply; public int divide; public int truncatedDivide; public int modulo; public int ceiling; public int floor; public int truncate; public int abs; public int negate; public int round; public int ln; public int exp; public int log; public int power; public int successor; public int predecessor; public int minValue; public int maxValue; public int precision; public int lowBoundary; public int highBoundary; public int total; } public class PhemaAggregateCounts { public int product; public int geometricMean; public int count; public int sum; public int min; public int max; public int avg; public int median; public int mode; public int variance; public int populationVariance; public int stdDev; public int populationStdDev; public int allTrue; public int anyTrue; } public class PhemaDataCounts { public Set<String> dataModels = new HashSet<>(); public int retrieve; public Set<String> dataSources = new HashSet<>(); public int query; public int sort; public int property; public int aggregate; public int whereClauseMaxExpressionCount; public int whereClauseMaxDepth; public int sortClauseMaxExpressionCount; public int sortClauseMaxDepth; public void recordWhereClauseMaxDepth(int depth) { if (depth > whereClauseMaxDepth) { whereClauseMaxDepth = depth; } } public void recordWhereClauseExpressionCount(int count) { if (count > whereClauseMaxExpressionCount) { whereClauseMaxExpressionCount = count; } } public void recordSortClauseMaxDepth(int depth) { if (depth > sortClauseMaxDepth) { sortClauseMaxDepth = depth; } } public void recordSortClauseExpressionCount(int count) { if (count > sortClauseMaxExpressionCount) { sortClauseMaxExpressionCount = count; } } } public class PhemaConditionalCounts { public int _if; public int _case; // Debatable whether this should go here public int coalesce; } public class PhemaTemporalCounts { // interval public int start; // Patient age public int calculateAge; public int calculateAgeAt; // Other temporal operators public int durationBetween; public int differenceBetween; public int today; public int now; public int sameAs; public int sameOrBefore; public int sameOrAfter; public int end; public int contains; public int properContains; public int in; public int properIn; public int includes; public int includedIn; public int properIncludes; public int properIncludedIn; public int before; public int after; public int meets; public int meetsBefore; public int meetsAfter; public int overlaps; public int overlapsBefore; public int overlapsAfter; public int starts; public int ends; public int collapse; public int union; public int intersect; public int except; public int size; public int pointFrom; public int expand; } public class PhemaModularityCounts { // includes public Set<String> includes = new HashSet<>(); // totals public int expression; public int statementDef; public int functionDef; // local totals public int localStatementCalls; public int localFunctionCalls; // external totals public int externalStatementCalls; public int externalFunctionCalls; } public class PhemaTerminologyCounts { // definitions public int codeDef; public int codeSystemDef; public int conceptDef; public int valueSetDef; // references public int codeRef; public int codeSystemRef; public int conceptRef; public int valueSetRef; // operations public int inCodeSystem; public int inValueSet; public int anyInCodeSystem; public int anyInValueSet; public int subsumes; public int subsumedBy; // value set numbers public Set<String> uniqueValueSets = new HashSet<>(); public List<Integer> perValueSetCounts = new ArrayList<>(); public Map<String, Integer> perSystemCounts = new HashMap<>(); public int codes; public int systems; } public class PhemaCollectionCounts { // non-temporal interval public int start; // list operations public int exists; public int times; public int filter; public int first; public int last; public int indexOf; public int flatten; public int sort; public int forEach; public int distinct; public int current; public int singletonFrom; public int slice; public int repeat; public int iteration; // operations from intervals that also apply to lists public int contains; public int equal; public int equivalent; public int except; public int in; public int includes; public int includedIn; public int notEqual; public int properContains; public int properIn; public int properIncludes; public int properIncludedIn; public int union; public int end; public int before; public int after; public int meets; public int meetsBefore; public int meetsAfter; public int overlaps; public int overlapsBefore; public int overlapsAfter; public int starts; public int ends; public int collapse; public int intersect; public int size; public int pointFrom; public int expand; } ///////////////////////////////////////////////////////////////////////////// // // AUTOMATIC COUNTS: Raw ELM node counts // ///////////////////////////////////////////////////////////////////////////// public class ElmExpressionCounts { public int binaryExpression; public int element; public int expression; public int operatorExpression; public int unaryExpression; public int ternaryExpression; public int naryExpression; public int typeSpecifier; public int aggregateExpression; } public class ElmLiteralCounts { public int literal; public int tuple; public int tupleElement; public int tupleElementDefinition; public int instance; public int instanceElement; public int property; public int search; } public class ElmClinicalValueCounts { public int codeSystemDef; public int valueSetDef; public int codeSystemRef; public int valueSetRef; public int code; public int concept; public int quantity; public int ratio; public int codeDef; public int conceptDef; public int codeRef; public int conceptRef; } public class ElmTypeSpecifierCounts { public int namedTypeSpecifier; public int intervalTypeSpecifier; public int listTypeSpecifier; public int tupleTypeSpecifier; public int choiceTypeSpecifier; } public class ElmReuseCounts { public int library; public int usingDef; public int includeDef; public int contextDef; public int parameterDef; public int parameterRef; public int expressionDef; public int functionDef; public int functionRef; public int operandDef; public int expressionRef; public int operandRef; public int identifierRef; public int accessModifier; } public class ElmQueryCounts { public int aliasedQuerySource; public int letClause; public int relationshipClause; public int with; public int without; public int sortByItem; public int byDirection; public int byColumn; public int byExpression; public int sortClause; public int returnClause; public int query; public int aliasRef; public int queryLetRef; public int retrieve; public int codeFilterElement; public int dateFilterElement; public int otherFilterElement; public int includeElement; public int aggregateClause; } public class ElmComparisonCounts { public int equal; public int equivalent; public int notEqual; public int less; public int greater; public int lessOrEqual; public int greaterOrEqual; } public class ElmLogicalCounts { public int and; public int or; public int xor; public int not; public int implies; } public class ElmNullCounts { public int _null; public int isNull; public int isTrue; public int isFalse; public int coalesce; } public class ElmConditionalCounts { public int _if; public int caseItem; public int _case; } public class ElmArithmeticCounts { public int add; public int subtract; public int multiply; public int divide; public int truncatedDivide; public int modulo; public int ceiling; public int floor; public int truncate; public int abs; public int negate; public int round; public int ln; public int exp; public int log; public int power; public int successor; public int predecessor; public int minValue; public int maxValue; public int precision; public int lowBoundary; public int highBoundary; public int total; } public class ElmStringCounts { public int concatenate; public int combine; public int split; public int length; public int upper; public int lower; public int indexer; public int positionOf; public int substring; public int splitOnMatches; public int lastPositionOf; public int startsWith; public int endsWith; public int matches; public int replaceMatches; } public class ElmTemporalCounts { public int durationBetween; public int differenceBetween; public int dateFrom; public int timeFrom; public int timezoneOffsetFrom; public int dateTimeComponentFrom; public int timeOfDay; public int today; public int now; public int dateTime; public int time; public int sameAs; public int sameOrBefore; public int sameOrAfter; public int timezoneFrom; public int date; } public class ElmIntervalCounts { public int interval; public int width; public int start; public int end; public int contains; public int properContains; public int in; public int properIn; public int includes; public int includedIn; public int properIncludes; public int properIncludedIn; public int before; public int after; public int meets; public int meetsBefore; public int meetsAfter; public int overlaps; public int overlapsBefore; public int overlapsAfter; public int starts; public int ends; public int collapse; public int union; public int intersect; public int except; public int size; public int pointFrom; public int expand; } public class ElmListCounts { public int list; public int exists; public int times; public int filter; public int first; public int last; public int indexOf; public int flatten; public int sort; public int forEach; public int distinct; public int current; public int singletonFrom; public int slice; public int repeat; public int iteration; } public class ElmAggregateCounts { public int aggregate; public int product; public int geometricMean; public int count; public int sum; public int min; public int max; public int avg; public int median; public int mode; public int variance; public int populationVariance; public int stdDev; public int populationStdDev; public int allTrue; public int anyTrue; } public class ElmTypeCounts { public int is; public int as; public int convert; public int toBoolean; public int toConcept; public int toDateTime; public int toDecimal; public int toInteger; public int toQuantity; public int toString; public int toTime; public int canConvert; public int convertsToBoolean; public int toChars; public int convertsToDate; public int toDate; public int convertsToDateTime; public int convertsToLong; public int toLong; public int convertsToDecimal; public int convertsToInteger; public int toList; public int convertQuantity; public int canConvertQuantity; public int convertsToQuantity; public int convertsToRatio; public int toRatio; public int convertsToString; public int convertsToTime; public int children; public int descendents; } public class ElmTerminologyCounts { public int inCodeSystem; public int inValueSet; public int calculateAge; public int calculateAgeAt; public int anyInCodeSystem; public int anyInValueSet; public int subsumes; public int subsumedBy; } public class ElmErrorCounts { public int message; } public ElmExpressionCounts elmExpressionCounts = new ElmExpressionCounts(); public ElmLiteralCounts elmLiteralCounts = new ElmLiteralCounts(); public ElmClinicalValueCounts elmClinicalValueCounts = new ElmClinicalValueCounts(); public ElmTypeSpecifierCounts elmTypeSpecifierCounts = new ElmTypeSpecifierCounts(); public ElmReuseCounts elmReuseCounts = new ElmReuseCounts(); public ElmQueryCounts elmQueryCounts = new ElmQueryCounts(); public ElmComparisonCounts elmComparisonCounts = new ElmComparisonCounts(); public ElmLogicalCounts elmLogicalCounts = new ElmLogicalCounts(); public ElmNullCounts elmNullCounts = new ElmNullCounts(); public ElmConditionalCounts elmConditionalCounts = new ElmConditionalCounts(); public ElmArithmeticCounts elmArithmeticCounts = new ElmArithmeticCounts(); public ElmStringCounts elmStringCounts = new ElmStringCounts(); public ElmTemporalCounts elmTemporalCounts = new ElmTemporalCounts(); public ElmIntervalCounts elmIntervalCounts = new ElmIntervalCounts(); public ElmListCounts elmListCounts = new ElmListCounts(); public ElmAggregateCounts elmAggregateCounts = new ElmAggregateCounts(); public ElmTypeCounts elmTypeCounts = new ElmTypeCounts(); public ElmTerminologyCounts elmTerminologyCounts = new ElmTerminologyCounts(); public ElmErrorCounts elmErrorCounts = new ElmErrorCounts(); @JsonIgnore public String getJson() throws JsonProcessingException { ObjectMapper mapper = new ObjectMapper(); mapper.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); return mapper.writeValueAsString(this); } }
PheMA/elm-utils
src/main/java/edu/phema/quantify/ElmQuantities.java
213,948
package entity; import java.awt.Point; import logger.Logger; public abstract class Projector { protected Point _pos, _dir; protected Long _speed; protected Long _last_move_time, _generation_time, _max_life_time; protected int _attacker_ID; protected int _damage; protected int _asset_index; protected Collider _collider; public Projector(Point pos, Point dir, Collider collider, Long speed, int aID, int asset_index) { Init(pos, dir, collider, speed, aID, asset_index); } public Projector(Point dir, Collider collider, Long speed, int aID, int asset_index) { Init(collider.getPosition(), dir, collider, speed, aID, asset_index); } private void Init(Point pos, Point dir, Collider collider, Long speed, int aID, int asset_index) { _pos = pos; _dir = dir; _last_move_time = System.currentTimeMillis(); _generation_time = System.currentTimeMillis(); _max_life_time = 2000L; _collider = collider; _speed = speed; _attacker_ID = aID; _asset_index = asset_index; } protected abstract boolean canMove(); public abstract void move(); public abstract Projector clone(); public abstract String getType(); public void setCollider(Collider c) { assert c != null : "Null Object."; _collider = c; _pos = _collider.getPosition(); } public void setDirection(Point d) { assert d != null : "Null Object."; _dir = d; } public void setPosition(Point p) { assert p != null : "Null Object."; _pos = p; } public void setSpeed(Long speed) { _speed = speed; } public void setAttacker(int aID) { _attacker_ID = aID; } public void setDamaage(int damage) { _damage = damage; } public Collider getCollider() { return _collider; } public Long getSpeed() { return _speed; } public int getAttackerID() { return _attacker_ID; } public Point getPosition() { return _pos; } public Point getDirection() { return _dir; } public int getDamage() { return _damage; } public boolean isAlive() { return ( System.currentTimeMillis() - _generation_time < _max_life_time ); } public void Print() { Logger.log("Projector : "); Logger.log("Position : " + _pos.x + " " + _pos.y); Logger.log("======================"); } public String toString() { return String.valueOf(_pos.x) + " " + String.valueOf(_pos.y) + " " + String.valueOf(_dir.x) + " " + String.valueOf(_dir.y) + " " + String.valueOf(_asset_index) + " "; } }
jaidTw/FatelTale
entity/Projector.java
213,950
public class KoKoEatingBanana { public static void main(String[] args) { int[] piles = {3,6,7,11}; System.out.println(minEatingSpeed(piles, 8)); } static int minEatingSpeed(int[] piles, int h) { int s = 1; int e = piles[0]; for(int pile : piles){ e = Math.max(e, pile); } while(s < e){ int m = s + (e-s)/2; if( canEatAllWithSpeed(piles, m, h) ){ e = m; } else{ s = m+1; } } return s; } static boolean canEatAllWithSpeed(int[] piles, int allowedBananas, int h) { int hoursNeeded = 0; for(int pile : piles){ // dekh agr mne allow kre h 4 banana in 1 hour or is pile main 10 h to 4 + 4 kha paegi or do bchange to 3 ghnte ab ek cheez dekh simple ye nh kr skti 10 /4 to vo seedha mtlb dedega ki 10 main kitne proper 4 ke piece bnenge baat smjh soch thoda thik h or fir ek check lga liyo ki agr modulo krke 0 nhi aaara to mtlb kya h iska ki abhi kuch bche h allowed se km mtlb 4 se km h to bss hour main 1 plus krde bss ab bss shant dimaag se soch aa jaega smjh or agr allowed to 4 h pr piles h 3 to 3 /4 0 aaega or aage bss ek plus ho jaega and that is right!! hoursNeeded += pile/allowedBananas; if(pile % allowedBananas != 0){ hoursNeeded++; } } return hoursNeeded <= h; } }
Vanshika2063/Leetcode-Questions
Medium/KoKoEatingBanana.java
213,951
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.cassandra.db; import java.io.DataInputStream; import java.io.IOException; import java.lang.management.ManagementFactory; import java.net.InetAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; import javax.management.MBeanServer; import javax.management.ObjectName; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableSortedSet; import com.google.common.collect.Lists; import com.google.common.util.concurrent.RateLimiter; import com.google.common.util.concurrent.Uninterruptibles; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor; import org.apache.cassandra.concurrent.NamedThreadFactory; import org.apache.cassandra.config.CFMetaData; import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.config.Schema; import org.apache.cassandra.db.composites.CellName; import org.apache.cassandra.db.composites.Composite; import org.apache.cassandra.db.composites.Composites; import org.apache.cassandra.db.compaction.CompactionManager; import org.apache.cassandra.db.filter.*; import org.apache.cassandra.db.marshal.Int32Type; import org.apache.cassandra.db.marshal.UUIDType; import org.apache.cassandra.dht.IPartitioner; import org.apache.cassandra.dht.Range; import org.apache.cassandra.dht.Token; import org.apache.cassandra.exceptions.WriteTimeoutException; import org.apache.cassandra.gms.ApplicationState; import org.apache.cassandra.gms.FailureDetector; import org.apache.cassandra.gms.Gossiper; import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.sstable.SSTable; import org.apache.cassandra.metrics.HintedHandoffMetrics; import org.apache.cassandra.net.MessageOut; import org.apache.cassandra.net.MessagingService; import org.apache.cassandra.service.*; import org.apache.cassandra.utils.ByteBufferUtil; import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.UUIDGen; import org.cliffc.high_scale_lib.NonBlockingHashSet; /** * The hint schema looks like this: * * CREATE TABLE hints ( * target_id uuid, * hint_id timeuuid, * message_version int, * mutation blob, * PRIMARY KEY (target_id, hint_id, message_version) * ) WITH COMPACT STORAGE; * * Thus, for each node in the cluster we treat its uuid as the partition key; each hint is a logical row * (physical composite column) containing the mutation to replay and associated metadata. * * When FailureDetector signals that a node that was down is back up, we page through * the hinted mutations and send them over one at a time, waiting for * hinted_handoff_throttle_delay in between each. * * deliverHints is also exposed to JMX so it can be run manually if FD ever misses * its cue somehow. */ public class HintedHandOffManager implements HintedHandOffManagerMBean { public static final String MBEAN_NAME = "org.apache.cassandra.db:type=HintedHandoffManager"; public static final HintedHandOffManager instance = new HintedHandOffManager(); private static final Logger logger = LoggerFactory.getLogger(HintedHandOffManager.class); private static final int PAGE_SIZE = 128; private static final int LARGE_NUMBER = 65536; // 64k nodes ought to be enough for anybody. public final HintedHandoffMetrics metrics = new HintedHandoffMetrics(); private volatile boolean hintedHandOffPaused = false; static final int maxHintTTL = Integer.parseInt(System.getProperty("cassandra.maxHintTTL", String.valueOf(Integer.MAX_VALUE))); private final NonBlockingHashSet<InetAddress> queuedDeliveries = new NonBlockingHashSet<InetAddress>(); private final ThreadPoolExecutor executor = new JMXEnabledThreadPoolExecutor(DatabaseDescriptor.getMaxHintsThread(), Integer.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("HintedHandoff", Thread.MIN_PRIORITY), "internal"); private final ColumnFamilyStore hintStore = Keyspace.open(Keyspace.SYSTEM_KS).getColumnFamilyStore(SystemKeyspace.HINTS_CF); /** * Returns a mutation representing a Hint to be sent to <code>targetId</code> * as soon as it becomes available again. */ public Mutation hintFor(Mutation mutation, long now, int ttl, UUID targetId) { assert ttl > 0; InetAddress endpoint = StorageService.instance.getTokenMetadata().getEndpointForHostId(targetId); // during tests we may not have a matching endpoint, but this would be unexpected in real clusters if (endpoint != null) metrics.incrCreatedHints(endpoint); else logger.warn("Unable to find matching endpoint for target {} when storing a hint", targetId); UUID hintId = UUIDGen.getTimeUUID(); // serialize the hint with id and version as a composite column name CellName name = CFMetaData.HintsCf.comparator.makeCellName(hintId, MessagingService.current_version); ByteBuffer value = ByteBuffer.wrap(FBUtilities.serialize(mutation, Mutation.serializer, MessagingService.current_version)); ColumnFamily cf = ArrayBackedSortedColumns.factory.create(Schema.instance.getCFMetaData(Keyspace.SYSTEM_KS, SystemKeyspace.HINTS_CF)); cf.addColumn(name, value, now, ttl); return new Mutation(Keyspace.SYSTEM_KS, UUIDType.instance.decompose(targetId), cf); } /* * determine the TTL for the hint Mutation * this is set at the smallest GCGraceSeconds for any of the CFs in the RM * this ensures that deletes aren't "undone" by delivery of an old hint */ public static int calculateHintTTL(Mutation mutation) { int ttl = maxHintTTL; for (ColumnFamily cf : mutation.getColumnFamilies()) ttl = Math.min(ttl, cf.metadata().getGcGraceSeconds()); return ttl; } public void start() { MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); try { mbs.registerMBean(this, new ObjectName(MBEAN_NAME)); } catch (Exception e) { throw new RuntimeException(e); } logger.debug("Created HHOM instance, registered MBean."); Runnable runnable = new Runnable() { public void run() { scheduleAllDeliveries(); metrics.log(); } }; StorageService.optionalTasks.scheduleWithFixedDelay(runnable, 10, 10, TimeUnit.MINUTES); } private static void deleteHint(ByteBuffer tokenBytes, CellName columnName, long timestamp) { Mutation mutation = new Mutation(Keyspace.SYSTEM_KS, tokenBytes); mutation.delete(SystemKeyspace.HINTS_CF, columnName, timestamp); mutation.applyUnsafe(); // don't bother with commitlog since we're going to flush as soon as we're done with delivery } public void deleteHintsForEndpoint(final String ipOrHostname) { try { InetAddress endpoint = InetAddress.getByName(ipOrHostname); deleteHintsForEndpoint(endpoint); } catch (UnknownHostException e) { logger.warn("Unable to find {}, not a hostname or ipaddr of a node", ipOrHostname); throw new RuntimeException(e); } } public void deleteHintsForEndpoint(final InetAddress endpoint) { if (!StorageService.instance.getTokenMetadata().isMember(endpoint)) return; UUID hostId = StorageService.instance.getTokenMetadata().getHostId(endpoint); ByteBuffer hostIdBytes = ByteBuffer.wrap(UUIDGen.decompose(hostId)); final Mutation mutation = new Mutation(Keyspace.SYSTEM_KS, hostIdBytes); mutation.delete(SystemKeyspace.HINTS_CF, System.currentTimeMillis()); // execute asynchronously to avoid blocking caller (which may be processing gossip) Runnable runnable = new Runnable() { public void run() { try { logger.info("Deleting any stored hints for {}", endpoint); mutation.apply(); compact(); } catch (Exception e) { logger.warn("Could not delete hints for {}: {}", endpoint, e); } } }; StorageService.optionalTasks.submit(runnable); } //foobar public void truncateAllHints() throws ExecutionException, InterruptedException { Runnable runnable = new Runnable() { public void run() { try { logger.info("Truncating all stored hints."); Keyspace.open(Keyspace.SYSTEM_KS).getColumnFamilyStore(SystemKeyspace.HINTS_CF).truncateBlocking(); } catch (Exception e) { logger.warn("Could not truncate all hints.", e); } } }; StorageService.optionalTasks.submit(runnable).get(); } @VisibleForTesting protected Future<?> compact() { hintStore.forceBlockingFlush(); ArrayList<Descriptor> descriptors = new ArrayList<Descriptor>(); for (SSTable sstable : hintStore.getDataTracker().getUncompactingSSTables()) descriptors.add(sstable.descriptor); return CompactionManager.instance.submitUserDefined(hintStore, descriptors, (int) (System.currentTimeMillis() / 1000)); } private static boolean pagingFinished(ColumnFamily hintColumnFamily, Composite startColumn) { // done if no hints found or the start column (same as last column processed in previous iteration) is the only one return hintColumnFamily == null || (!startColumn.isEmpty() && hintColumnFamily.getSortedColumns().size() == 1 && hintColumnFamily.getColumn((CellName)startColumn) != null); } private int waitForSchemaAgreement(InetAddress endpoint) throws TimeoutException { Gossiper gossiper = Gossiper.instance; int waited = 0; // first, wait for schema to be gossiped. while (gossiper.getEndpointStateForEndpoint(endpoint) != null && gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA) == null) { Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); waited += 1000; if (waited > 2 * StorageService.RING_DELAY) throw new TimeoutException("Didin't receive gossiped schema from " + endpoint + " in " + 2 * StorageService.RING_DELAY + "ms"); } if (gossiper.getEndpointStateForEndpoint(endpoint) == null) throw new TimeoutException("Node " + endpoint + " vanished while waiting for agreement"); waited = 0; // then wait for the correct schema version. // usually we use DD.getDefsVersion, which checks the local schema uuid as stored in the system keyspace. // here we check the one in gossip instead; this serves as a canary to warn us if we introduce a bug that // causes the two to diverge (see CASSANDRA-2946) while (gossiper.getEndpointStateForEndpoint(endpoint) != null && !gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA).value.equals( gossiper.getEndpointStateForEndpoint(FBUtilities.getBroadcastAddress()).getApplicationState(ApplicationState.SCHEMA).value)) { Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); waited += 1000; if (waited > 2 * StorageService.RING_DELAY) throw new TimeoutException("Could not reach schema agreement with " + endpoint + " in " + 2 * StorageService.RING_DELAY + "ms"); } if (gossiper.getEndpointStateForEndpoint(endpoint) == null) throw new TimeoutException("Node " + endpoint + " vanished while waiting for agreement"); logger.debug("schema for {} matches local schema", endpoint); return waited; } private void deliverHintsToEndpoint(InetAddress endpoint) { if (hintStore.isEmpty()) return; // nothing to do, don't confuse users by logging a no-op handoff // check if hints delivery has been paused if (hintedHandOffPaused) { logger.debug("Hints delivery process is paused, aborting"); return; } logger.debug("Checking remote({}) schema before delivering hints", endpoint); try { waitForSchemaAgreement(endpoint); } catch (TimeoutException e) { return; } if (!FailureDetector.instance.isAlive(endpoint)) { logger.debug("Endpoint {} died before hint delivery, aborting", endpoint); return; } doDeliverHintsToEndpoint(endpoint); } /* * 1. Get the key of the endpoint we need to handoff * 2. For each column, deserialize the mutation and send it to the endpoint * 3. Delete the subcolumn if the write was successful * 4. Force a flush * 5. Do major compaction to clean up all deletes etc. */ private void doDeliverHintsToEndpoint(InetAddress endpoint) { // find the hints for the node using its token. UUID hostId = Gossiper.instance.getHostId(endpoint); logger.info("Started hinted handoff for host: {} with IP: {}", hostId, endpoint); final ByteBuffer hostIdBytes = ByteBuffer.wrap(UUIDGen.decompose(hostId)); DecoratedKey epkey = StorageService.getPartitioner().decorateKey(hostIdBytes); final AtomicInteger rowsReplayed = new AtomicInteger(0); Composite startColumn = Composites.EMPTY; int pageSize = calculatePageSize(); logger.debug("Using pageSize of {}", pageSize); // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml). // max rate is scaled by the number of nodes in the cluster (CASSANDRA-5272). int throttleInKB = DatabaseDescriptor.getHintedHandoffThrottleInKB() / (StorageService.instance.getTokenMetadata().getAllEndpoints().size() - 1); RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024); boolean finished = false; delivery: while (true) { long now = System.currentTimeMillis(); QueryFilter filter = QueryFilter.getSliceFilter(epkey, SystemKeyspace.HINTS_CF, startColumn, Composites.EMPTY, false, pageSize, now); ColumnFamily hintsPage = ColumnFamilyStore.removeDeleted(hintStore.getColumnFamily(filter), (int) (now / 1000)); if (pagingFinished(hintsPage, startColumn)) { logger.info("Finished hinted handoff of {} rows to endpoint {}", rowsReplayed, endpoint); finished = true; break; } // check if node is still alive and we should continue delivery process if (!FailureDetector.instance.isAlive(endpoint)) { logger.info("Endpoint {} died during hint delivery; aborting ({} delivered)", endpoint, rowsReplayed); break; } List<WriteResponseHandler> responseHandlers = Lists.newArrayList(); for (final Cell hint : hintsPage) { // check if hints delivery has been paused during the process if (hintedHandOffPaused) { logger.debug("Hints delivery process is paused, aborting"); break delivery; } // Skip tombstones: // if we iterate quickly enough, it's possible that we could request a new page in the same millisecond // in which the local deletion timestamp was generated on the last column in the old page, in which // case the hint will have no columns (since it's deleted) but will still be included in the resultset // since (even with gcgs=0) it's still a "relevant" tombstone. if (!hint.isLive()) continue; startColumn = hint.name(); int version = Int32Type.instance.compose(hint.name().get(1)); DataInputStream in = new DataInputStream(ByteBufferUtil.inputStream(hint.value())); Mutation mutation; try { mutation = Mutation.serializer.deserialize(in, version); } catch (UnknownColumnFamilyException e) { logger.debug("Skipping delivery of hint for deleted columnfamily", e); deleteHint(hostIdBytes, hint.name(), hint.timestamp()); continue; } catch (IOException e) { throw new AssertionError(e); } for (UUID cfId : mutation.getColumnFamilyIds()) { if (hint.timestamp() <= SystemKeyspace.getTruncatedAt(cfId)) { logger.debug("Skipping delivery of hint for truncated columnfamily {}", cfId); mutation = mutation.without(cfId); } } if (mutation.isEmpty()) { deleteHint(hostIdBytes, hint.name(), hint.timestamp()); continue; } MessageOut<Mutation> message = mutation.createMessage(); rateLimiter.acquire(message.serializedSize(MessagingService.current_version)); Runnable callback = new Runnable() { public void run() { rowsReplayed.incrementAndGet(); deleteHint(hostIdBytes, hint.name(), hint.timestamp()); } }; WriteResponseHandler responseHandler = new WriteResponseHandler(endpoint, WriteType.SIMPLE, callback); MessagingService.instance().sendRR(message, endpoint, responseHandler, false); responseHandlers.add(responseHandler); } for (WriteResponseHandler handler : responseHandlers) { try { handler.get(); } catch (WriteTimeoutException e) { logger.info("Timed out replaying hints to {}; aborting ({} delivered)", endpoint, rowsReplayed); break delivery; } } } if (finished || rowsReplayed.get() >= DatabaseDescriptor.getTombstoneWarnThreshold()) { try { compact().get(); } catch (Exception e) { throw new RuntimeException(e); } } } // read less columns (mutations) per page if they are very large private int calculatePageSize() { int meanColumnCount = hintStore.getMeanColumns(); if (meanColumnCount <= 0) return PAGE_SIZE; int averageColumnSize = (int) (hintStore.getMeanRowSize() / meanColumnCount); if (averageColumnSize <= 0) return PAGE_SIZE; // page size of 1 does not allow actual paging b/c of >= behavior on startColumn return Math.max(2, Math.min(PAGE_SIZE, DatabaseDescriptor.getInMemoryCompactionLimit() / averageColumnSize)); } /** * Attempt delivery to any node for which we have hints. Necessary since we can generate hints even for * nodes which are never officially down/failed. */ private void scheduleAllDeliveries() { if (logger.isDebugEnabled()) logger.debug("Started scheduleAllDeliveries"); IPartitioner p = StorageService.getPartitioner(); RowPosition minPos = p.getMinimumToken().minKeyBound(); Range<RowPosition> range = new Range<RowPosition>(minPos, minPos, p); IDiskAtomFilter filter = new NamesQueryFilter(ImmutableSortedSet.<CellName>of()); List<Row> rows = hintStore.getRangeSlice(range, null, filter, Integer.MAX_VALUE, System.currentTimeMillis()); for (Row row : rows) { UUID hostId = UUIDGen.getUUID(row.key.getKey()); InetAddress target = StorageService.instance.getTokenMetadata().getEndpointForHostId(hostId); // token may have since been removed (in which case we have just read back a tombstone) if (target != null) scheduleHintDelivery(target); } if (logger.isDebugEnabled()) logger.debug("Finished scheduleAllDeliveries"); } /* * This method is used to deliver hints to a particular endpoint. * When we learn that some endpoint is back up we deliver the data * to him via an event driven mechanism. */ public void scheduleHintDelivery(final InetAddress to) { // We should not deliver hints to the same host in 2 different threads if (!queuedDeliveries.add(to)) return; logger.debug("Scheduling delivery of Hints to {}", to); executor.execute(new Runnable() { public void run() { try { deliverHintsToEndpoint(to); } finally { queuedDeliveries.remove(to); } } }); } public void scheduleHintDelivery(String to) throws UnknownHostException { scheduleHintDelivery(InetAddress.getByName(to)); } public void pauseHintsDelivery(boolean b) { hintedHandOffPaused = b; } public List<String> listEndpointsPendingHints() { Token.TokenFactory tokenFactory = StorageService.getPartitioner().getTokenFactory(); // Extract the keys as strings to be reported. LinkedList<String> result = new LinkedList<String>(); for (Row row : getHintsSlice(1)) { if (row.cf != null) //ignore removed rows result.addFirst(tokenFactory.toString(row.key.getToken())); } return result; } private List<Row> getHintsSlice(int columnCount) { // Get count # of columns... SliceQueryFilter predicate = new SliceQueryFilter(ColumnSlice.ALL_COLUMNS_ARRAY, false, columnCount); // From keys "" to ""... IPartitioner<?> partitioner = StorageService.getPartitioner(); RowPosition minPos = partitioner.getMinimumToken().minKeyBound(); Range<RowPosition> range = new Range<RowPosition>(minPos, minPos); try { RangeSliceCommand cmd = new RangeSliceCommand(Keyspace.SYSTEM_KS, SystemKeyspace.HINTS_CF, System.currentTimeMillis(), predicate, range, null, LARGE_NUMBER); return StorageProxy.getRangeSlice(cmd, ConsistencyLevel.ONE); } catch (Exception e) { logger.info("HintsCF getEPPendingHints timed out."); throw new RuntimeException(e); } } }
ibmsoe/cassandra
src/java/org/apache/cassandra/db/HintedHandOffManager.java
213,954
/** * */ /** * @author MAAG * */ public interface IFigura3D { public double calcularVolumen(); }
malonso-uvg/Ejemplos_OOP2022
EjemploHerencia2/src/IFigura3D.java
213,955
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.cassandra.db; import java.io.IOException; import java.lang.management.ManagementFactory; import java.net.InetAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.nio.charset.CharacterCodingException; import java.util.*; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeoutException; import static com.google.common.base.Charsets.UTF_8; import org.apache.cassandra.utils.FBUtilities; import org.apache.commons.lang.ArrayUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.management.MBeanServer; import javax.management.ObjectName; import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor; import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.db.filter.QueryFilter; import org.apache.cassandra.db.filter.QueryPath; import org.apache.cassandra.dht.IPartitioner; import org.apache.cassandra.dht.Range; import org.apache.cassandra.gms.FailureDetector; import org.apache.cassandra.gms.Gossiper; import org.apache.cassandra.gms.ApplicationState; import org.apache.cassandra.net.Message; import org.apache.cassandra.net.MessagingService; import org.apache.cassandra.service.*; import org.apache.cassandra.thrift.*; import org.apache.cassandra.utils.ByteBufferUtil; import org.apache.cassandra.utils.WrappedRunnable; import org.cliffc.high_scale_lib.NonBlockingHashSet; /** * For each endpoint for which we have hints, there is a row in the system hints CF. * The key for this row is ByteBuffer.wrap(string), i.e. "127.0.0.1". * * SuperColumns in that row are keys for which we have hinted data. * Subcolumns names within that supercolumn are keyspace+CF, concatenated with SEPARATOR. * Subcolumn values are always empty; instead, we store the row data "normally" * in the application table it belongs in. * * When FailureDetector signals that a node that was down is back up, we read its * hints row to see what rows we need to forward data for, then reach each row in its * entirety and send it over. * * deliverHints is also exposed to JMX so it can be run manually if FD ever misses * its cue somehow. * * HHM never deletes the row from Application tables; usually (but not for CL.ANY!) * the row belongs on this node, as well. instead, we rely on cleanup compactions * to remove data that doesn't belong. (Cleanup compactions may be started manually * -- on a per node basis -- with "nodeprobe cleanup.") * * TODO this avoids our hint rows from growing excessively large by offloading the * message data into application tables. But, this means that cleanup compactions * will nuke HH data. Probably better would be to store the RowMutation messages * in a HHData (non-super) CF, modifying the above to store a UUID value in the * HH subcolumn value, which we use as a key to a [standard] HHData system CF * that would contain the message bytes. */ public class HintedHandOffManager implements HintedHandOffManagerMBean { public static final HintedHandOffManager instance = new HintedHandOffManager(); public static final String HINTS_CF = "HintsColumnFamily"; private static final Logger logger_ = LoggerFactory.getLogger(HintedHandOffManager.class); private static final int PAGE_SIZE = 10000; private static final String SEPARATOR = "-"; private static final int LARGE_NUMBER = 65536; // 64k nodes ought to be enough for anybody. private final NonBlockingHashSet<InetAddress> queuedDeliveries = new NonBlockingHashSet<InetAddress>(); private final ExecutorService executor_ = new JMXEnabledThreadPoolExecutor("HintedHandoff", DatabaseDescriptor.getCompactionThreadPriority()); public HintedHandOffManager() { MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); try { mbs.registerMBean(this, new ObjectName("org.apache.cassandra.db:type=HintedHandoffManager")); } catch (Exception e) { throw new RuntimeException(e); } } public void registerMBean() { logger_.debug("Created HHOM instance, registered MBean."); } private static boolean sendMessage(InetAddress endpoint, String tableName, String cfName, ByteBuffer key) throws IOException { if (!Gossiper.instance.isKnownEndpoint(endpoint)) { logger_.warn("Hints found for endpoint " + endpoint + " which is not part of the gossip network. discarding."); return true; } if (!FailureDetector.instance.isAlive(endpoint)) { return false; } Table table = Table.open(tableName); DecoratedKey dkey = StorageService.getPartitioner().decorateKey(key); ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName); ByteBuffer startColumn = ByteBufferUtil.EMPTY_BYTE_BUFFER; while (true) { QueryFilter filter = QueryFilter.getSliceFilter(dkey, new QueryPath(cfs.getColumnFamilyName()), startColumn, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, PAGE_SIZE); ColumnFamily cf = cfs.getColumnFamily(filter); if (pagingFinished(cf, startColumn)) break; if (cf.getColumnNames().isEmpty()) { logger_.debug("Nothing to hand off for {}", dkey); break; } startColumn = cf.getColumnNames().last(); RowMutation rm = new RowMutation(tableName, key); rm.add(cf); Message message = rm.makeRowMutationMessage(); IWriteResponseHandler responseHandler = WriteResponseHandler.create(endpoint); MessagingService.instance().sendRR(message, endpoint, responseHandler); try { responseHandler.get(); } catch (TimeoutException e) { return false; } try { Thread.sleep(DatabaseDescriptor.getHintedHandoffThrottleDelay()); } catch (InterruptedException e) { throw new AssertionError(e); } } return true; } private static void deleteHintKey(ByteBuffer endpointAddress, ByteBuffer key, ByteBuffer tableCF, long timestamp) throws IOException { RowMutation rm = new RowMutation(Table.SYSTEM_TABLE, endpointAddress); rm.delete(new QueryPath(HINTS_CF, key, tableCF), timestamp); rm.apply(); } public void deleteHintsForEndpoint(final String ipOrHostname) { try { InetAddress endpoint = InetAddress.getByName(ipOrHostname); deleteHintsForEndpoint(endpoint); } catch (UnknownHostException e) { logger_.warn("Unable to find "+ipOrHostname+", not a hostname or ipaddr of a node?:"); e.printStackTrace(); throw new RuntimeException(e); } } public void deleteHintsForEndpoint(final InetAddress endpoint) { final String ipaddr = endpoint.getHostAddress(); final ColumnFamilyStore hintStore = Table.open(Table.SYSTEM_TABLE).getColumnFamilyStore(HINTS_CF); final RowMutation rm = new RowMutation(Table.SYSTEM_TABLE, ByteBufferUtil.bytes(ipaddr)); rm.delete(new QueryPath(HINTS_CF), System.currentTimeMillis()); // execute asynchronously to avoid blocking caller (which may be processing gossip) Runnable runnable = new Runnable() { public void run() { try { logger_.info("Deleting any stored hints for " + ipaddr); rm.apply(); hintStore.forceFlush(); CompactionManager.instance.submitMajor(hintStore, 0, Integer.MAX_VALUE); } catch (Exception e) { logger_.warn("Could not delete hints for " + ipaddr + ": " + e); } } }; StorageService.scheduledTasks.execute(runnable); } private static boolean pagingFinished(ColumnFamily hintColumnFamily, ByteBuffer startColumn) { // done if no hints found or the start column (same as last column processed in previous iteration) is the only one return hintColumnFamily == null || (hintColumnFamily.getSortedColumns().size() == 1 && hintColumnFamily.getColumn(startColumn) != null); } public static ByteBuffer makeCombinedName(String tableName, String columnFamily) { byte[] withsep = ArrayUtils.addAll(tableName.getBytes(UTF_8), SEPARATOR.getBytes(UTF_8)); return ByteBuffer.wrap(ArrayUtils.addAll(withsep, columnFamily.getBytes(UTF_8))); } private static String[] getTableAndCFNames(ByteBuffer joined) { int index = ByteBufferUtil.lastIndexOf(joined, SEPARATOR.getBytes(UTF_8)[0], joined.limit()); if (index == -1 || index < (joined.position() + 1)) throw new RuntimeException("Corrupted hint name " + ByteBufferUtil.bytesToHex(joined)); try { return new String[] { ByteBufferUtil.string(joined, joined.position(), index - joined.position()), ByteBufferUtil.string(joined, index + 1, joined.limit() - (index + 1)) }; } catch (CharacterCodingException e) { throw new RuntimeException(e); } } private int waitForSchemaAgreement(InetAddress endpoint) throws InterruptedException { Gossiper gossiper = Gossiper.instance; int waited = 0; // first, wait for schema to be gossiped. while (gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA) == null) { Thread.sleep(1000); waited += 1000; if (waited > 2 * StorageService.RING_DELAY) throw new RuntimeException("Didin't receive gossiped schema from " + endpoint + " in " + 2 * StorageService.RING_DELAY + "ms"); } waited = 0; // then wait for the correct schema version. while (!gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA).value.equals( gossiper.getEndpointStateForEndpoint(FBUtilities.getLocalAddress()).getApplicationState(ApplicationState.SCHEMA).value)) { Thread.sleep(1000); waited += 1000; if (waited > 2 * StorageService.RING_DELAY) throw new RuntimeException("Could not reach schema agreement with " + endpoint + " in " + 2 * StorageService.RING_DELAY + "ms"); } logger_.debug("schema for {} matches local schema", endpoint); return waited; } private void deliverHintsToEndpoint(InetAddress endpoint) throws IOException, DigestMismatchException, InvalidRequestException, TimeoutException, InterruptedException { try { logger_.debug("Checking remote schema before delivering hints"); int waited = waitForSchemaAgreement(endpoint); // sleep a random amount to stagger handoff delivery from different replicas. // (if we had to wait, then gossiper randomness took care of that for us already.) if (waited == 0) { int sleep = new Random().nextInt(60000); logger_.debug("Sleeping {}ms to stagger hint delivery", sleep); Thread.sleep(sleep); } if (!Gossiper.instance.getEndpointStateForEndpoint(endpoint).isAlive()) { logger_.info("Endpoint {} died before hint delivery, aborting", endpoint); return; } } finally { queuedDeliveries.remove(endpoint); } logger_.info("Started hinted handoff for endpoint " + endpoint); // 1. Get the key of the endpoint we need to handoff // 2. For each column read the list of rows: subcolumns are KS + SEPARATOR + CF // 3. Delete the subcolumn if the write was successful // 4. Force a flush // 5. Do major compaction to clean up all deletes etc. ByteBuffer endpointAsUTF8 = ByteBufferUtil.bytes(endpoint.getHostAddress()); // keys have to be UTF8 to make OPP happy DecoratedKey epkey = StorageService.getPartitioner().decorateKey(endpointAsUTF8); int rowsReplayed = 0; ColumnFamilyStore hintStore = Table.open(Table.SYSTEM_TABLE).getColumnFamilyStore(HINTS_CF); ByteBuffer startColumn = ByteBufferUtil.EMPTY_BYTE_BUFFER; delivery: while (true) { QueryFilter filter = QueryFilter.getSliceFilter(epkey, new QueryPath(HINTS_CF), startColumn, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, PAGE_SIZE); ColumnFamily hintColumnFamily = ColumnFamilyStore.removeDeleted(hintStore.getColumnFamily(filter), Integer.MAX_VALUE); if (pagingFinished(hintColumnFamily, startColumn)) break; for (IColumn keyColumn : hintColumnFamily.getSortedColumns()) { startColumn = keyColumn.name(); Collection<IColumn> tableCFs = keyColumn.getSubColumns(); for (IColumn tableCF : tableCFs) { String[] parts = getTableAndCFNames(tableCF.name()); if (sendMessage(endpoint, parts[0], parts[1], keyColumn.name())) { deleteHintKey(endpointAsUTF8, keyColumn.name(), tableCF.name(), tableCF.timestamp()); rowsReplayed++; } else { logger_.info("Could not complete hinted handoff to " + endpoint); break delivery; } startColumn = keyColumn.name(); } } } if (rowsReplayed > 0) { hintStore.forceFlush(); try { CompactionManager.instance.submitMajor(hintStore, 0, Integer.MAX_VALUE).get(); } catch (Exception e) { throw new RuntimeException(e); } } logger_.info(String.format("Finished hinted handoff of %s rows to endpoint %s", rowsReplayed, endpoint)); } /** called when a keyspace is dropped or rename. newTable==null in the case of a drop. */ public static void renameHints(String oldTable, String newTable) throws IOException { DecoratedKey oldTableKey = StorageService.getPartitioner().decorateKey(ByteBufferUtil.bytes(oldTable)); // we're basically going to fetch, drop and add the scf for the old and new table. we need to do it piecemeal // though since there could be GB of data. ColumnFamilyStore hintStore = Table.open(Table.SYSTEM_TABLE).getColumnFamilyStore(HINTS_CF); ByteBuffer startCol = ByteBufferUtil.EMPTY_BYTE_BUFFER; long now = System.currentTimeMillis(); while (true) { QueryFilter filter = QueryFilter.getSliceFilter(oldTableKey, new QueryPath(HINTS_CF), startCol, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, PAGE_SIZE); ColumnFamily cf = ColumnFamilyStore.removeDeleted(hintStore.getColumnFamily(filter), Integer.MAX_VALUE); if (pagingFinished(cf, startCol)) break; if (newTable != null) { RowMutation insert = new RowMutation(Table.SYSTEM_TABLE, ByteBufferUtil.bytes(newTable)); insert.add(cf); insert.apply(); } RowMutation drop = new RowMutation(Table.SYSTEM_TABLE, oldTableKey.key); for (ByteBuffer key : cf.getColumnNames()) { drop.delete(new QueryPath(HINTS_CF, key), now); startCol = key; } drop.apply(); } } /* * This method is used to deliver hints to a particular endpoint. * When we learn that some endpoint is back up we deliver the data * to him via an event driven mechanism. */ public void deliverHints(final InetAddress to) { if (!queuedDeliveries.add(to)) return; Runnable r = new WrappedRunnable() { public void runMayThrow() throws Exception { deliverHintsToEndpoint(to); } }; executor_.execute(r); } public void deliverHints(String to) throws UnknownHostException { deliverHints(InetAddress.getByName(to)); } public List<String> listEndpointsPendingHints() { List<Row> rows = getHintsSlice(1); // Extract the keys as strings to be reported. LinkedList<String> result = new LinkedList<String>(); for (Row r : rows) { if (r.cf != null) //ignore removed rows result.addFirst(new String(r.key.key.array())); } return result; } public Map<String, Integer> countPendingHints() { List<Row> rows = getHintsSlice(Integer.MAX_VALUE); Map<String, Integer> result = new HashMap<String, Integer>(); for (Row r : rows) { if (r.cf != null) //ignore removed rows result.put(new String(r.key.key.array()), r.cf.getColumnCount()); } return result; } private List<Row> getHintsSlice(int column_count) { // ColumnParent for HintsCF... ColumnParent parent = new ColumnParent(HINTS_CF); // Get count # of columns... SlicePredicate predicate = new SlicePredicate(); SliceRange sliceRange = new SliceRange(); sliceRange.setStart(new byte[0]).setFinish(new byte[0]); sliceRange.setCount(column_count); predicate.setSlice_range(sliceRange); // From keys "" to ""... IPartitioner partitioner = StorageService.getPartitioner(); ByteBuffer empty = ByteBufferUtil.EMPTY_BYTE_BUFFER; Range range = new Range(partitioner.getToken(empty), partitioner.getToken(empty)); // Get a bunch of rows! List<Row> rows; try { rows = StorageProxy.getRangeSlice(new RangeSliceCommand("system", parent, predicate, range, LARGE_NUMBER), ConsistencyLevel.ONE); } catch (Exception e) { logger_.info("HintsCF getEPPendingHints timed out."); throw new RuntimeException(e); } return rows; } }
sunsuk7tp/MyCassandra
MyCassandra-0.2.1/src/java/org/apache/cassandra/db/HintedHandOffManager.java
213,959
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.cassandra.db; import java.io.DataInputStream; import java.io.IOException; import java.lang.management.ManagementFactory; import java.net.InetAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; import javax.management.MBeanServer; import javax.management.ObjectName; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableSortedSet; import com.google.common.collect.Lists; import com.google.common.util.concurrent.RateLimiter; import com.google.common.util.concurrent.Uninterruptibles; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor; import org.apache.cassandra.concurrent.NamedThreadFactory; import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.config.Schema; import org.apache.cassandra.db.compaction.CompactionManager; import org.apache.cassandra.db.filter.*; import org.apache.cassandra.db.marshal.AbstractType; import org.apache.cassandra.db.marshal.CompositeType; import org.apache.cassandra.db.marshal.Int32Type; import org.apache.cassandra.db.marshal.UUIDType; import org.apache.cassandra.dht.IPartitioner; import org.apache.cassandra.dht.Range; import org.apache.cassandra.dht.Token; import org.apache.cassandra.exceptions.WriteTimeoutException; import org.apache.cassandra.gms.ApplicationState; import org.apache.cassandra.gms.FailureDetector; import org.apache.cassandra.gms.Gossiper; import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.sstable.SSTable; import org.apache.cassandra.metrics.HintedHandoffMetrics; import org.apache.cassandra.net.MessageOut; import org.apache.cassandra.net.MessagingService; import org.apache.cassandra.service.*; import org.apache.cassandra.utils.ByteBufferUtil; import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.UUIDGen; import org.cliffc.high_scale_lib.NonBlockingHashSet; /** * The hint schema looks like this: * * CREATE TABLE hints ( * target_id uuid, * hint_id timeuuid, * message_version int, * mutation blob, * PRIMARY KEY (target_id, hint_id, message_version) * ) WITH COMPACT STORAGE; * * Thus, for each node in the cluster we treat its uuid as the partition key; each hint is a logical row * (physical composite column) containing the mutation to replay and associated metadata. * * When FailureDetector signals that a node that was down is back up, we page through * the hinted mutations and send them over one at a time, waiting for * hinted_handoff_throttle_delay in between each. * * deliverHints is also exposed to JMX so it can be run manually if FD ever misses * its cue somehow. */ public class HintedHandOffManager implements HintedHandOffManagerMBean { public static final String MBEAN_NAME = "org.apache.cassandra.db:type=HintedHandoffManager"; public static final HintedHandOffManager instance = new HintedHandOffManager(); private static final Logger logger = LoggerFactory.getLogger(HintedHandOffManager.class); private static final int PAGE_SIZE = 128; private static final int LARGE_NUMBER = 65536; // 64k nodes ought to be enough for anybody. public final HintedHandoffMetrics metrics = new HintedHandoffMetrics(); private volatile boolean hintedHandOffPaused = false; static final CompositeType comparator = CompositeType.getInstance(Arrays.<AbstractType<?>>asList(UUIDType.instance, Int32Type.instance)); private final NonBlockingHashSet<InetAddress> queuedDeliveries = new NonBlockingHashSet<InetAddress>(); private final ThreadPoolExecutor executor = new JMXEnabledThreadPoolExecutor(DatabaseDescriptor.getMaxHintsThread(), Integer.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("HintedHandoff", Thread.MIN_PRIORITY), "internal"); private final ColumnFamilyStore hintStore = Keyspace.open(Keyspace.SYSTEM_KS).getColumnFamilyStore(SystemKeyspace.HINTS_CF); /** * Returns a mutation representing a Hint to be sent to <code>targetId</code> * as soon as it becomes available again. */ public RowMutation hintFor(RowMutation mutation, int ttl, UUID targetId) { assert ttl > 0; InetAddress endpoint = StorageService.instance.getTokenMetadata().getEndpointForHostId(targetId); // during tests we may not have a matching endpoint, but this would be unexpected in real clusters if (endpoint != null) metrics.incrCreatedHints(endpoint); else logger.warn("Unable to find matching endpoint for target {} when storing a hint", targetId); UUID hintId = UUIDGen.getTimeUUID(); // serialize the hint with id and version as a composite column name ByteBuffer name = comparator.decompose(hintId, MessagingService.current_version); ByteBuffer value = ByteBuffer.wrap(FBUtilities.serialize(mutation, RowMutation.serializer, MessagingService.current_version)); ColumnFamily cf = ArrayBackedSortedColumns.factory.create(Schema.instance.getCFMetaData(Keyspace.SYSTEM_KS, SystemKeyspace.HINTS_CF)); cf.addColumn(name, value, System.currentTimeMillis(), ttl); return new RowMutation(Keyspace.SYSTEM_KS, UUIDType.instance.decompose(targetId), cf); } /* * determine the TTL for the hint RowMutation * this is set at the smallest GCGraceSeconds for any of the CFs in the RM * this ensures that deletes aren't "undone" by delivery of an old hint */ public static int calculateHintTTL(RowMutation mutation) { int ttl = Integer.MAX_VALUE; for (ColumnFamily cf : mutation.getColumnFamilies()) ttl = Math.min(ttl, cf.metadata().getGcGraceSeconds()); return ttl; } public void start() { MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); try { mbs.registerMBean(this, new ObjectName(MBEAN_NAME)); } catch (Exception e) { throw new RuntimeException(e); } logger.debug("Created HHOM instance, registered MBean."); Runnable runnable = new Runnable() { public void run() { scheduleAllDeliveries(); metrics.log(); } }; StorageService.optionalTasks.scheduleWithFixedDelay(runnable, 10, 10, TimeUnit.MINUTES); } private static void deleteHint(ByteBuffer tokenBytes, ByteBuffer columnName, long timestamp) { RowMutation rm = new RowMutation(Keyspace.SYSTEM_KS, tokenBytes); rm.delete(SystemKeyspace.HINTS_CF, columnName, timestamp); rm.applyUnsafe(); // don't bother with commitlog since we're going to flush as soon as we're done with delivery } public void deleteHintsForEndpoint(final String ipOrHostname) { try { InetAddress endpoint = InetAddress.getByName(ipOrHostname); deleteHintsForEndpoint(endpoint); } catch (UnknownHostException e) { logger.warn("Unable to find {}, not a hostname or ipaddr of a node", ipOrHostname); throw new RuntimeException(e); } } public void deleteHintsForEndpoint(final InetAddress endpoint) { if (!StorageService.instance.getTokenMetadata().isMember(endpoint)) return; UUID hostId = StorageService.instance.getTokenMetadata().getHostId(endpoint); ByteBuffer hostIdBytes = ByteBuffer.wrap(UUIDGen.decompose(hostId)); final RowMutation rm = new RowMutation(Keyspace.SYSTEM_KS, hostIdBytes); rm.delete(SystemKeyspace.HINTS_CF, System.currentTimeMillis()); // execute asynchronously to avoid blocking caller (which may be processing gossip) Runnable runnable = new Runnable() { public void run() { try { logger.info("Deleting any stored hints for {}", endpoint); rm.apply(); compact(); } catch (Exception e) { logger.warn("Could not delete hints for {}: {}", endpoint, e); } } }; StorageService.optionalTasks.execute(runnable); } @VisibleForTesting protected Future<?> compact() { hintStore.forceBlockingFlush(); ArrayList<Descriptor> descriptors = new ArrayList<Descriptor>(); for (SSTable sstable : hintStore.getSSTables()) descriptors.add(sstable.descriptor); return CompactionManager.instance.submitUserDefined(hintStore, descriptors, (int) (System.currentTimeMillis() / 1000)); } private static boolean pagingFinished(ColumnFamily hintColumnFamily, ByteBuffer startColumn) { // done if no hints found or the start column (same as last column processed in previous iteration) is the only one return hintColumnFamily == null || (hintColumnFamily.getSortedColumns().size() == 1 && hintColumnFamily.getColumn(startColumn) != null); } private int waitForSchemaAgreement(InetAddress endpoint) throws TimeoutException { Gossiper gossiper = Gossiper.instance; int waited = 0; // first, wait for schema to be gossiped. while (gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA) == null) { Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); waited += 1000; if (waited > 2 * StorageService.RING_DELAY) throw new TimeoutException("Didin't receive gossiped schema from " + endpoint + " in " + 2 * StorageService.RING_DELAY + "ms"); } waited = 0; // then wait for the correct schema version. // usually we use DD.getDefsVersion, which checks the local schema uuid as stored in the system keyspace. // here we check the one in gossip instead; this serves as a canary to warn us if we introduce a bug that // causes the two to diverge (see CASSANDRA-2946) while (!gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA).value.equals( gossiper.getEndpointStateForEndpoint(FBUtilities.getBroadcastAddress()).getApplicationState(ApplicationState.SCHEMA).value)) { Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); waited += 1000; if (waited > 2 * StorageService.RING_DELAY) throw new TimeoutException("Could not reach schema agreement with " + endpoint + " in " + 2 * StorageService.RING_DELAY + "ms"); } logger.debug("schema for {} matches local schema", endpoint); return waited; } private void deliverHintsToEndpoint(InetAddress endpoint) { if (hintStore.isEmpty()) return; // nothing to do, don't confuse users by logging a no-op handoff // check if hints delivery has been paused if (hintedHandOffPaused) { logger.debug("Hints delivery process is paused, aborting"); return; } logger.debug("Checking remote({}) schema before delivering hints", endpoint); try { waitForSchemaAgreement(endpoint); } catch (TimeoutException e) { return; } if (!FailureDetector.instance.isAlive(endpoint)) { logger.debug("Endpoint {} died before hint delivery, aborting", endpoint); return; } doDeliverHintsToEndpoint(endpoint); } /* * 1. Get the key of the endpoint we need to handoff * 2. For each column, deserialize the mutation and send it to the endpoint * 3. Delete the subcolumn if the write was successful * 4. Force a flush * 5. Do major compaction to clean up all deletes etc. */ private void doDeliverHintsToEndpoint(InetAddress endpoint) { // find the hints for the node using its token. UUID hostId = Gossiper.instance.getHostId(endpoint); logger.info("Started hinted handoff for host: {} with IP: {}", hostId, endpoint); final ByteBuffer hostIdBytes = ByteBuffer.wrap(UUIDGen.decompose(hostId)); DecoratedKey epkey = StorageService.getPartitioner().decorateKey(hostIdBytes); final AtomicInteger rowsReplayed = new AtomicInteger(0); ByteBuffer startColumn = ByteBufferUtil.EMPTY_BYTE_BUFFER; int pageSize = calculatePageSize(); logger.debug("Using pageSize of {}", pageSize); // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml). // max rate is scaled by the number of nodes in the cluster (CASSANDRA-5272). int throttleInKB = DatabaseDescriptor.getHintedHandoffThrottleInKB() / (StorageService.instance.getTokenMetadata().getAllEndpoints().size() - 1); RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024); delivery: while (true) { long now = System.currentTimeMillis(); QueryFilter filter = QueryFilter.getSliceFilter(epkey, SystemKeyspace.HINTS_CF, startColumn, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, pageSize, now); ColumnFamily hintsPage = ColumnFamilyStore.removeDeleted(hintStore.getColumnFamily(filter), (int) (now / 1000)); if (pagingFinished(hintsPage, startColumn)) break; // check if node is still alive and we should continue delivery process if (!FailureDetector.instance.isAlive(endpoint)) { logger.info("Endpoint {} died during hint delivery; aborting ({} delivered)", endpoint, rowsReplayed); return; } List<WriteResponseHandler> responseHandlers = Lists.newArrayList(); Map<UUID, Long> truncationTimesCache = new HashMap<UUID, Long>(); for (final Column hint : hintsPage) { // check if hints delivery has been paused during the process if (hintedHandOffPaused) { logger.debug("Hints delivery process is paused, aborting"); break delivery; } // Skip tombstones: // if we iterate quickly enough, it's possible that we could request a new page in the same millisecond // in which the local deletion timestamp was generated on the last column in the old page, in which // case the hint will have no columns (since it's deleted) but will still be included in the resultset // since (even with gcgs=0) it's still a "relevant" tombstone. if (!hint.isLive(System.currentTimeMillis())) continue; startColumn = hint.name(); ByteBuffer[] components = comparator.split(hint.name()); int version = Int32Type.instance.compose(components[1]); DataInputStream in = new DataInputStream(ByteBufferUtil.inputStream(hint.value())); RowMutation rm; try { rm = RowMutation.serializer.deserialize(in, version); } catch (UnknownColumnFamilyException e) { logger.debug("Skipping delivery of hint for deleted columnfamily", e); deleteHint(hostIdBytes, hint.name(), hint.maxTimestamp()); continue; } catch (IOException e) { throw new AssertionError(e); } truncationTimesCache.clear(); for (UUID cfId : ImmutableSet.copyOf((rm.getColumnFamilyIds()))) { Long truncatedAt = truncationTimesCache.get(cfId); if (truncatedAt == null) { ColumnFamilyStore cfs = Keyspace.open(rm.getKeyspaceName()).getColumnFamilyStore(cfId); truncatedAt = cfs.getTruncationTime(); truncationTimesCache.put(cfId, truncatedAt); } if (hint.maxTimestamp() < truncatedAt) { logger.debug("Skipping delivery of hint for truncated columnfamily {}", cfId); rm = rm.without(cfId); } } if (rm.isEmpty()) { deleteHint(hostIdBytes, hint.name(), hint.maxTimestamp()); continue; } MessageOut<RowMutation> message = rm.createMessage(); rateLimiter.acquire(message.serializedSize(MessagingService.current_version)); Runnable callback = new Runnable() { public void run() { rowsReplayed.incrementAndGet(); deleteHint(hostIdBytes, hint.name(), hint.maxTimestamp()); } }; WriteResponseHandler responseHandler = new WriteResponseHandler(endpoint, WriteType.UNLOGGED_BATCH, callback); MessagingService.instance().sendRR(message, endpoint, responseHandler); responseHandlers.add(responseHandler); } for (WriteResponseHandler handler : responseHandlers) { try { handler.get(); } catch (WriteTimeoutException e) { logger.info("Timed out replaying hints to {}; aborting ({} delivered)", endpoint, rowsReplayed); return; } } } logger.info("Finished hinted handoff of {} rows to endpoint {}", rowsReplayed, endpoint); try { compact().get(); } catch (Exception e) { throw new RuntimeException(e); } } private int calculatePageSize() { // read less columns (mutations) per page if they are very large int meanColumnCount = hintStore.getMeanColumns(); if (meanColumnCount > 0) { int averageColumnSize = (int) (hintStore.getMeanRowSize() / meanColumnCount); // page size of 1 does not allow actual paging b/c of >= behavior on startColumn return Math.max(2, Math.min(PAGE_SIZE, DatabaseDescriptor.getInMemoryCompactionLimit() / averageColumnSize)); } else { return PAGE_SIZE; } } /** * Attempt delivery to any node for which we have hints. Necessary since we can generate hints even for * nodes which are never officially down/failed. */ private void scheduleAllDeliveries() { if (logger.isDebugEnabled()) logger.debug("Started scheduleAllDeliveries"); IPartitioner p = StorageService.getPartitioner(); RowPosition minPos = p.getMinimumToken().minKeyBound(); Range<RowPosition> range = new Range<RowPosition>(minPos, minPos, p); IDiskAtomFilter filter = new NamesQueryFilter(ImmutableSortedSet.<ByteBuffer>of()); List<Row> rows = hintStore.getRangeSlice(range, null, filter, Integer.MAX_VALUE, System.currentTimeMillis()); for (Row row : rows) { UUID hostId = UUIDGen.getUUID(row.key.key); InetAddress target = StorageService.instance.getTokenMetadata().getEndpointForHostId(hostId); // token may have since been removed (in which case we have just read back a tombstone) if (target != null) scheduleHintDelivery(target); } if (logger.isDebugEnabled()) logger.debug("Finished scheduleAllDeliveries"); } /* * This method is used to deliver hints to a particular endpoint. * When we learn that some endpoint is back up we deliver the data * to him via an event driven mechanism. */ public void scheduleHintDelivery(final InetAddress to) { // We should not deliver hints to the same host in 2 different threads if (!queuedDeliveries.add(to)) return; logger.debug("Scheduling delivery of Hints to {}", to); executor.execute(new Runnable() { public void run() { try { deliverHintsToEndpoint(to); } finally { queuedDeliveries.remove(to); } } }); } public void scheduleHintDelivery(String to) throws UnknownHostException { scheduleHintDelivery(InetAddress.getByName(to)); } public void pauseHintsDelivery(boolean b) { hintedHandOffPaused = b; } public List<String> listEndpointsPendingHints() { Token.TokenFactory tokenFactory = StorageService.getPartitioner().getTokenFactory(); // Extract the keys as strings to be reported. LinkedList<String> result = new LinkedList<String>(); for (Row row : getHintsSlice(1)) { if (row.cf != null) //ignore removed rows result.addFirst(tokenFactory.toString(row.key.token)); } return result; } private List<Row> getHintsSlice(int columnCount) { // Get count # of columns... SliceQueryFilter predicate = new SliceQueryFilter(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, columnCount); // From keys "" to ""... IPartitioner<?> partitioner = StorageService.getPartitioner(); RowPosition minPos = partitioner.getMinimumToken().minKeyBound(); Range<RowPosition> range = new Range<RowPosition>(minPos, minPos); try { RangeSliceCommand cmd = new RangeSliceCommand(Keyspace.SYSTEM_KS, SystemKeyspace.HINTS_CF, System.currentTimeMillis(), predicate, range, null, LARGE_NUMBER); return StorageProxy.getRangeSlice(cmd, ConsistencyLevel.ONE); } catch (Exception e) { logger.info("HintsCF getEPPendingHints timed out."); throw new RuntimeException(e); } } }
pgaref/ACaZoo
src/java/org/apache/cassandra/db/HintedHandOffManager.java
213,960
package ui; public class DadosTarifacao { private int tipoLinha; private int matricula; private int codigoCategoria; private short indicadorTarifaCategoria; private String codigoSubcategoria; private double valorFaturadoAgua; private int consumoFaturadoAgua; private double valorTarifaMinimaAgua; private int consumoMinimoAgua; private double valorFaturadoEsgoto; private int consumoFaturadoEsgoto; private double valorTarifaMinimaEsgoto; private int consumoMinimoEsgoto; private int quantidadeContasImpressas; public DadosTarifacao(int tipoLinha) { this.tipoLinha = tipoLinha; } public int getTipoLinha() { return tipoLinha; } public void setTipoLinha(int tipoLinha) { this.tipoLinha = tipoLinha; } public int getMatricula() { return matricula; } public void setMatricula(int matricula) { this.matricula = matricula; } public int getCodigoCategoria() { return codigoCategoria; } public void setCodigoCategoria(int codigoCategoria) { this.codigoCategoria = codigoCategoria; } public short getIndicadorTarifaCategoria() { return indicadorTarifaCategoria; } public void setIndicadorTarifaCategoria(short indicadorTarifaCategoria) { this.indicadorTarifaCategoria = indicadorTarifaCategoria; } public String getCodigoSubcategoria() { return codigoSubcategoria; } public void setCodigoSubcategoria(String codigoSubcategoria) { this.codigoSubcategoria = codigoSubcategoria; } public double getValorFaturadoAgua() { return valorFaturadoAgua; } public void setValorFaturadoAgua(double valorFaturadoAgua) { this.valorFaturadoAgua = valorFaturadoAgua; } public int getConsumoFaturadoAgua() { return consumoFaturadoAgua; } public void setConsumoFaturadoAgua(int consumoFaturadoAgua) { this.consumoFaturadoAgua = consumoFaturadoAgua; } public double getValorTarifaMinimaAgua() { return valorTarifaMinimaAgua; } public void setValorTarifaMinimaAgua(double valorTarifaMinimaAgua) { this.valorTarifaMinimaAgua = valorTarifaMinimaAgua; } public int getConsumoMinimoAgua() { return consumoMinimoAgua; } public void setConsumoMinimoAgua(int consumoMinimoAgua) { this.consumoMinimoAgua = consumoMinimoAgua; } public double getValorFaturadoEsgoto() { return valorFaturadoEsgoto; } public void setValorFaturadoEsgoto(double valorFaturadoEsgoto) { this.valorFaturadoEsgoto = valorFaturadoEsgoto; } public int getConsumoFaturadoEsgoto() { return consumoFaturadoEsgoto; } public void setConsumoFaturadoEsgoto(int consumoFaturadoEsgoto) { this.consumoFaturadoEsgoto = consumoFaturadoEsgoto; } public double getValorTarifaMinimaEsgoto() { return valorTarifaMinimaEsgoto; } public void setValorTarifaMinimaEsgoto(double valorTarifaMinimaEsgoto) { this.valorTarifaMinimaEsgoto = valorTarifaMinimaEsgoto; } public int getConsumoMinimoEsgoto() { return consumoMinimoEsgoto; } public void setConsumoMinimoEsgoto(int consumoMinimoEsgoto) { this.consumoMinimoEsgoto = consumoMinimoEsgoto; } public int getQuantidadeContasImpressas() { return quantidadeContasImpressas; } public void setQuantidadeContasImpressas(int quantidadeContasImpressas) { this.quantidadeContasImpressas = quantidadeContasImpressas; } }
prodigasistemas/impressao-simultanea-android
src/ui/DadosTarifacao.java
213,961
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.cassandra.db; import java.io.DataInputStream; import java.io.IOException; import java.lang.management.ManagementFactory; import java.net.InetAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; import javax.management.MBeanServer; import javax.management.ObjectName; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableSortedSet; import com.google.common.collect.Lists; import com.google.common.util.concurrent.RateLimiter; import com.google.common.util.concurrent.Uninterruptibles; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor; import org.apache.cassandra.concurrent.NamedThreadFactory; import org.apache.cassandra.config.CFMetaData; import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.config.Schema; import org.apache.cassandra.db.composites.CellName; import org.apache.cassandra.db.composites.Composite; import org.apache.cassandra.db.composites.Composites; import org.apache.cassandra.db.compaction.CompactionManager; import org.apache.cassandra.db.filter.*; import org.apache.cassandra.db.marshal.Int32Type; import org.apache.cassandra.db.marshal.UUIDType; import org.apache.cassandra.dht.IPartitioner; import org.apache.cassandra.dht.Range; import org.apache.cassandra.dht.Token; import org.apache.cassandra.exceptions.WriteTimeoutException; import org.apache.cassandra.gms.ApplicationState; import org.apache.cassandra.gms.FailureDetector; import org.apache.cassandra.gms.Gossiper; import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.sstable.SSTable; import org.apache.cassandra.metrics.HintedHandoffMetrics; import org.apache.cassandra.net.MessageOut; import org.apache.cassandra.net.MessagingService; import org.apache.cassandra.service.*; import org.apache.cassandra.utils.ByteBufferUtil; import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.UUIDGen; import org.cliffc.high_scale_lib.NonBlockingHashSet; /** * The hint schema looks like this: * * CREATE TABLE hints ( * target_id uuid, * hint_id timeuuid, * message_version int, * mutation blob, * PRIMARY KEY (target_id, hint_id, message_version) * ) WITH COMPACT STORAGE; * * Thus, for each node in the cluster we treat its uuid as the partition key; each hint is a logical row * (physical composite column) containing the mutation to replay and associated metadata. * * When FailureDetector signals that a node that was down is back up, we page through * the hinted mutations and send them over one at a time, waiting for * hinted_handoff_throttle_delay in between each. * * deliverHints is also exposed to JMX so it can be run manually if FD ever misses * its cue somehow. */ public class HintedHandOffManager implements HintedHandOffManagerMBean { public static final String MBEAN_NAME = "org.apache.cassandra.db:type=HintedHandoffManager"; public static final HintedHandOffManager instance = new HintedHandOffManager(); private static final Logger logger = LoggerFactory.getLogger(HintedHandOffManager.class); private static final int PAGE_SIZE = 128; private static final int LARGE_NUMBER = 65536; // 64k nodes ought to be enough for anybody. public final HintedHandoffMetrics metrics = new HintedHandoffMetrics(); private volatile boolean hintedHandOffPaused = false; static final int maxHintTTL = Integer.parseInt(System.getProperty("cassandra.maxHintTTL", String.valueOf(Integer.MAX_VALUE))); private final NonBlockingHashSet<InetAddress> queuedDeliveries = new NonBlockingHashSet<InetAddress>(); private final ThreadPoolExecutor executor = new JMXEnabledThreadPoolExecutor(DatabaseDescriptor.getMaxHintsThread(), Integer.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("HintedHandoff", Thread.MIN_PRIORITY), "internal"); private final ColumnFamilyStore hintStore = Keyspace.open(Keyspace.SYSTEM_KS).getColumnFamilyStore(SystemKeyspace.HINTS_CF); /** * Returns a mutation representing a Hint to be sent to <code>targetId</code> * as soon as it becomes available again. */ public Mutation hintFor(Mutation mutation, long now, int ttl, UUID targetId) { assert ttl > 0; InetAddress endpoint = StorageService.instance.getTokenMetadata().getEndpointForHostId(targetId); // during tests we may not have a matching endpoint, but this would be unexpected in real clusters if (endpoint != null) metrics.incrCreatedHints(endpoint); else logger.warn("Unable to find matching endpoint for target {} when storing a hint", targetId); UUID hintId = UUIDGen.getTimeUUID(); // serialize the hint with id and version as a composite column name CellName name = CFMetaData.HintsCf.comparator.makeCellName(hintId, MessagingService.current_version); ByteBuffer value = ByteBuffer.wrap(FBUtilities.serialize(mutation, Mutation.serializer, MessagingService.current_version)); ColumnFamily cf = ArrayBackedSortedColumns.factory.create(Schema.instance.getCFMetaData(Keyspace.SYSTEM_KS, SystemKeyspace.HINTS_CF)); cf.addColumn(name, value, now, ttl); return new Mutation(Keyspace.SYSTEM_KS, UUIDType.instance.decompose(targetId), cf); } /* * determine the TTL for the hint Mutation * this is set at the smallest GCGraceSeconds for any of the CFs in the RM * this ensures that deletes aren't "undone" by delivery of an old hint */ public static int calculateHintTTL(Mutation mutation) { int ttl = maxHintTTL; for (ColumnFamily cf : mutation.getColumnFamilies()) ttl = Math.min(ttl, cf.metadata().getGcGraceSeconds()); return ttl; } public void start() { MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); try { mbs.registerMBean(this, new ObjectName(MBEAN_NAME)); } catch (Exception e) { throw new RuntimeException(e); } logger.debug("Created HHOM instance, registered MBean."); Runnable runnable = new Runnable() { public void run() { scheduleAllDeliveries(); metrics.log(); } }; StorageService.optionalTasks.scheduleWithFixedDelay(runnable, 10, 10, TimeUnit.MINUTES); } private static void deleteHint(ByteBuffer tokenBytes, CellName columnName, long timestamp) { Mutation mutation = new Mutation(Keyspace.SYSTEM_KS, tokenBytes); mutation.delete(SystemKeyspace.HINTS_CF, columnName, timestamp); mutation.applyUnsafe(); // don't bother with commitlog since we're going to flush as soon as we're done with delivery } public void deleteHintsForEndpoint(final String ipOrHostname) { try { InetAddress endpoint = InetAddress.getByName(ipOrHostname); deleteHintsForEndpoint(endpoint); } catch (UnknownHostException e) { logger.warn("Unable to find {}, not a hostname or ipaddr of a node", ipOrHostname); throw new RuntimeException(e); } } public void deleteHintsForEndpoint(final InetAddress endpoint) { if (!StorageService.instance.getTokenMetadata().isMember(endpoint)) return; UUID hostId = StorageService.instance.getTokenMetadata().getHostId(endpoint); ByteBuffer hostIdBytes = ByteBuffer.wrap(UUIDGen.decompose(hostId)); final Mutation mutation = new Mutation(Keyspace.SYSTEM_KS, hostIdBytes); mutation.delete(SystemKeyspace.HINTS_CF, System.currentTimeMillis()); // execute asynchronously to avoid blocking caller (which may be processing gossip) Runnable runnable = new Runnable() { public void run() { try { logger.info("Deleting any stored hints for {}", endpoint); mutation.apply(); compact(); } catch (Exception e) { logger.warn("Could not delete hints for {}: {}", endpoint, e); } } }; StorageService.optionalTasks.submit(runnable); } //foobar public void truncateAllHints() throws ExecutionException, InterruptedException { Runnable runnable = new Runnable() { public void run() { try { logger.info("Truncating all stored hints."); Keyspace.open(Keyspace.SYSTEM_KS).getColumnFamilyStore(SystemKeyspace.HINTS_CF).truncateBlocking(); } catch (Exception e) { logger.warn("Could not truncate all hints.", e); } } }; StorageService.optionalTasks.submit(runnable).get(); } @VisibleForTesting protected Future<?> compact() { hintStore.forceBlockingFlush(); ArrayList<Descriptor> descriptors = new ArrayList<Descriptor>(); for (SSTable sstable : hintStore.getDataTracker().getUncompactingSSTables()) descriptors.add(sstable.descriptor); return CompactionManager.instance.submitUserDefined(hintStore, descriptors, (int) (System.currentTimeMillis() / 1000)); } private static boolean pagingFinished(ColumnFamily hintColumnFamily, Composite startColumn) { // done if no hints found or the start column (same as last column processed in previous iteration) is the only one return hintColumnFamily == null || (!startColumn.isEmpty() && hintColumnFamily.getSortedColumns().size() == 1 && hintColumnFamily.getColumn((CellName)startColumn) != null); } private int waitForSchemaAgreement(InetAddress endpoint) throws TimeoutException { Gossiper gossiper = Gossiper.instance; int waited = 0; // first, wait for schema to be gossiped. while (gossiper.getEndpointStateForEndpoint(endpoint) != null && gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA) == null) { Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); waited += 1000; if (waited > 2 * StorageService.RING_DELAY) throw new TimeoutException("Didin't receive gossiped schema from " + endpoint + " in " + 2 * StorageService.RING_DELAY + "ms"); } if (gossiper.getEndpointStateForEndpoint(endpoint) == null) throw new TimeoutException("Node " + endpoint + " vanished while waiting for agreement"); waited = 0; // then wait for the correct schema version. // usually we use DD.getDefsVersion, which checks the local schema uuid as stored in the system keyspace. // here we check the one in gossip instead; this serves as a canary to warn us if we introduce a bug that // causes the two to diverge (see CASSANDRA-2946) while (gossiper.getEndpointStateForEndpoint(endpoint) != null && !gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA).value.equals( gossiper.getEndpointStateForEndpoint(FBUtilities.getBroadcastAddress()).getApplicationState(ApplicationState.SCHEMA).value)) { Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); waited += 1000; if (waited > 2 * StorageService.RING_DELAY) throw new TimeoutException("Could not reach schema agreement with " + endpoint + " in " + 2 * StorageService.RING_DELAY + "ms"); } if (gossiper.getEndpointStateForEndpoint(endpoint) == null) throw new TimeoutException("Node " + endpoint + " vanished while waiting for agreement"); logger.debug("schema for {} matches local schema", endpoint); return waited; } private void deliverHintsToEndpoint(InetAddress endpoint) { if (hintStore.isEmpty()) return; // nothing to do, don't confuse users by logging a no-op handoff // check if hints delivery has been paused if (hintedHandOffPaused) { logger.debug("Hints delivery process is paused, aborting"); return; } logger.debug("Checking remote({}) schema before delivering hints", endpoint); try { waitForSchemaAgreement(endpoint); } catch (TimeoutException e) { return; } if (!FailureDetector.instance.isAlive(endpoint)) { logger.debug("Endpoint {} died before hint delivery, aborting", endpoint); return; } doDeliverHintsToEndpoint(endpoint); } /* * 1. Get the key of the endpoint we need to handoff * 2. For each column, deserialize the mutation and send it to the endpoint * 3. Delete the subcolumn if the write was successful * 4. Force a flush * 5. Do major compaction to clean up all deletes etc. */ private void doDeliverHintsToEndpoint(InetAddress endpoint) { // find the hints for the node using its token. UUID hostId = Gossiper.instance.getHostId(endpoint); logger.info("Started hinted handoff for host: {} with IP: {}", hostId, endpoint); final ByteBuffer hostIdBytes = ByteBuffer.wrap(UUIDGen.decompose(hostId)); DecoratedKey epkey = StorageService.getPartitioner().decorateKey(hostIdBytes); final AtomicInteger rowsReplayed = new AtomicInteger(0); Composite startColumn = Composites.EMPTY; int pageSize = calculatePageSize(); logger.debug("Using pageSize of {}", pageSize); // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml). // max rate is scaled by the number of nodes in the cluster (CASSANDRA-5272). int throttleInKB = DatabaseDescriptor.getHintedHandoffThrottleInKB() / (StorageService.instance.getTokenMetadata().getAllEndpoints().size() - 1); RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024); boolean finished = false; delivery: while (true) { long now = System.currentTimeMillis(); QueryFilter filter = QueryFilter.getSliceFilter(epkey, SystemKeyspace.HINTS_CF, startColumn, Composites.EMPTY, false, pageSize, now); ColumnFamily hintsPage = ColumnFamilyStore.removeDeleted(hintStore.getColumnFamily(filter), (int) (now / 1000)); if (pagingFinished(hintsPage, startColumn)) { logger.info("Finished hinted handoff of {} rows to endpoint {}", rowsReplayed, endpoint); finished = true; break; } // check if node is still alive and we should continue delivery process if (!FailureDetector.instance.isAlive(endpoint)) { logger.info("Endpoint {} died during hint delivery; aborting ({} delivered)", endpoint, rowsReplayed); break; } List<WriteResponseHandler> responseHandlers = Lists.newArrayList(); for (final Cell hint : hintsPage) { // check if hints delivery has been paused during the process if (hintedHandOffPaused) { logger.debug("Hints delivery process is paused, aborting"); break delivery; } // Skip tombstones: // if we iterate quickly enough, it's possible that we could request a new page in the same millisecond // in which the local deletion timestamp was generated on the last column in the old page, in which // case the hint will have no columns (since it's deleted) but will still be included in the resultset // since (even with gcgs=0) it's still a "relevant" tombstone. if (!hint.isLive()) continue; startColumn = hint.name(); int version = Int32Type.instance.compose(hint.name().get(1)); DataInputStream in = new DataInputStream(ByteBufferUtil.inputStream(hint.value())); Mutation mutation; try { mutation = Mutation.serializer.deserialize(in, version); } catch (UnknownColumnFamilyException e) { logger.debug("Skipping delivery of hint for deleted table", e); deleteHint(hostIdBytes, hint.name(), hint.timestamp()); continue; } catch (IOException e) { throw new AssertionError(e); } for (UUID cfId : mutation.getColumnFamilyIds()) { if (hint.timestamp() <= SystemKeyspace.getTruncatedAt(cfId)) { logger.debug("Skipping delivery of hint for truncated table {}", cfId); mutation = mutation.without(cfId); } } if (mutation.isEmpty()) { deleteHint(hostIdBytes, hint.name(), hint.timestamp()); continue; } MessageOut<Mutation> message = mutation.createMessage(); rateLimiter.acquire(message.serializedSize(MessagingService.current_version)); Runnable callback = new Runnable() { public void run() { rowsReplayed.incrementAndGet(); deleteHint(hostIdBytes, hint.name(), hint.timestamp()); } }; WriteResponseHandler responseHandler = new WriteResponseHandler(endpoint, WriteType.SIMPLE, callback); MessagingService.instance().sendRR(message, endpoint, responseHandler, false); responseHandlers.add(responseHandler); } for (WriteResponseHandler handler : responseHandlers) { try { handler.get(); } catch (WriteTimeoutException e) { logger.info("Timed out replaying hints to {}; aborting ({} delivered)", endpoint, rowsReplayed); break delivery; } } } if (finished || rowsReplayed.get() >= DatabaseDescriptor.getTombstoneWarnThreshold()) { try { compact().get(); } catch (Exception e) { throw new RuntimeException(e); } } } // read less columns (mutations) per page if they are very large private int calculatePageSize() { int meanColumnCount = hintStore.getMeanColumns(); if (meanColumnCount <= 0) return PAGE_SIZE; int averageColumnSize = (int) (hintStore.getMeanRowSize() / meanColumnCount); if (averageColumnSize <= 0) return PAGE_SIZE; // page size of 1 does not allow actual paging b/c of >= behavior on startColumn return Math.max(2, Math.min(PAGE_SIZE, 4 * 1024 * 1024 / averageColumnSize)); } /** * Attempt delivery to any node for which we have hints. Necessary since we can generate hints even for * nodes which are never officially down/failed. */ private void scheduleAllDeliveries() { if (logger.isDebugEnabled()) logger.debug("Started scheduleAllDeliveries"); IPartitioner p = StorageService.getPartitioner(); RowPosition minPos = p.getMinimumToken().minKeyBound(); Range<RowPosition> range = new Range<RowPosition>(minPos, minPos, p); IDiskAtomFilter filter = new NamesQueryFilter(ImmutableSortedSet.<CellName>of()); List<Row> rows = hintStore.getRangeSlice(range, null, filter, Integer.MAX_VALUE, System.currentTimeMillis()); for (Row row : rows) { UUID hostId = UUIDGen.getUUID(row.key.getKey()); InetAddress target = StorageService.instance.getTokenMetadata().getEndpointForHostId(hostId); // token may have since been removed (in which case we have just read back a tombstone) if (target != null) scheduleHintDelivery(target); } if (logger.isDebugEnabled()) logger.debug("Finished scheduleAllDeliveries"); } /* * This method is used to deliver hints to a particular endpoint. * When we learn that some endpoint is back up we deliver the data * to him via an event driven mechanism. */ public void scheduleHintDelivery(final InetAddress to) { // We should not deliver hints to the same host in 2 different threads if (!queuedDeliveries.add(to)) return; logger.debug("Scheduling delivery of Hints to {}", to); executor.execute(new Runnable() { public void run() { try { deliverHintsToEndpoint(to); } finally { queuedDeliveries.remove(to); } } }); } public void scheduleHintDelivery(String to) throws UnknownHostException { scheduleHintDelivery(InetAddress.getByName(to)); } public void pauseHintsDelivery(boolean b) { hintedHandOffPaused = b; } public List<String> listEndpointsPendingHints() { Token.TokenFactory tokenFactory = StorageService.getPartitioner().getTokenFactory(); // Extract the keys as strings to be reported. LinkedList<String> result = new LinkedList<String>(); for (Row row : getHintsSlice(1)) { if (row.cf != null) //ignore removed rows result.addFirst(tokenFactory.toString(row.key.getToken())); } return result; } private List<Row> getHintsSlice(int columnCount) { // Get count # of columns... SliceQueryFilter predicate = new SliceQueryFilter(ColumnSlice.ALL_COLUMNS_ARRAY, false, columnCount); // From keys "" to ""... IPartitioner<?> partitioner = StorageService.getPartitioner(); RowPosition minPos = partitioner.getMinimumToken().minKeyBound(); Range<RowPosition> range = new Range<RowPosition>(minPos, minPos); try { RangeSliceCommand cmd = new RangeSliceCommand(Keyspace.SYSTEM_KS, SystemKeyspace.HINTS_CF, System.currentTimeMillis(), predicate, range, null, LARGE_NUMBER); return StorageProxy.getRangeSlice(cmd, ConsistencyLevel.ONE); } catch (Exception e) { logger.info("HintsCF getEPPendingHints timed out."); throw new RuntimeException(e); } } }
vast-engineering/cassandra
src/java/org/apache/cassandra/db/HintedHandOffManager.java
213,962
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.datastax.oss.driver.internal.core.session; import com.datastax.oss.driver.api.core.AsyncAutoCloseable; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.ProtocolVersion; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.context.DriverContext; import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; import com.datastax.oss.driver.api.core.metadata.EndPoint; import com.datastax.oss.driver.api.core.metadata.Metadata; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metadata.NodeState; import com.datastax.oss.driver.api.core.metrics.Metrics; import com.datastax.oss.driver.api.core.session.Request; import com.datastax.oss.driver.api.core.type.reflect.GenericType; import com.datastax.oss.driver.internal.core.channel.DriverChannel; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.context.LifecycleListener; import com.datastax.oss.driver.internal.core.metadata.DefaultNode; import com.datastax.oss.driver.internal.core.metadata.MetadataManager; import com.datastax.oss.driver.internal.core.metadata.MetadataManager.RefreshSchemaResult; import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; import com.datastax.oss.driver.internal.core.metadata.NodeStateManager; import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; import com.datastax.oss.driver.internal.core.pool.ChannelPool; import com.datastax.oss.driver.internal.core.util.Loggers; import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import io.netty.util.concurrent.EventExecutor; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; import net.jcip.annotations.ThreadSafe; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * The session implementation. * * <p>It maintains a {@link ChannelPool} to each node that the {@link LoadBalancingPolicy} set to a * non-ignored distance. It listens for distance events and node state events, in order to adjust * the pools accordingly. * * <p>It executes requests by: * * <ul> * <li>picking the appropriate processor to convert the request into a protocol message. * <li>getting a query plan from the load balancing policy * <li>trying to send the message on each pool, in the order of the query plan * </ul> */ @ThreadSafe public class DefaultSession implements CqlSession { private static final Logger LOG = LoggerFactory.getLogger(DefaultSession.class); private static final AtomicInteger INSTANCE_COUNT = new AtomicInteger(); public static CompletionStage<CqlSession> init( InternalDriverContext context, Set<EndPoint> contactPoints, CqlIdentifier keyspace) { return new DefaultSession(context, contactPoints).init(keyspace); } private final InternalDriverContext context; private final EventExecutor adminExecutor; private final String logPrefix; private final SingleThreaded singleThreaded; private final MetadataManager metadataManager; private final RequestProcessorRegistry processorRegistry; private final PoolManager poolManager; private final SessionMetricUpdater metricUpdater; private DefaultSession(InternalDriverContext context, Set<EndPoint> contactPoints) { int instanceCount = INSTANCE_COUNT.incrementAndGet(); int threshold = context.getConfig().getDefaultProfile().getInt(DefaultDriverOption.SESSION_LEAK_THRESHOLD); LOG.debug( "Creating new session {} ({} live instances)", context.getSessionName(), instanceCount); if (threshold > 0 && instanceCount > threshold) { LOG.warn( "You have too many session instances: {} active, expected less than {} " + "(see '{}' in the configuration)", instanceCount, threshold, DefaultDriverOption.SESSION_LEAK_THRESHOLD.getPath()); } this.logPrefix = context.getSessionName(); this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); try { this.context = context; this.singleThreaded = new SingleThreaded(context, contactPoints); this.metadataManager = context.getMetadataManager(); this.processorRegistry = context.getRequestProcessorRegistry(); this.poolManager = context.getPoolManager(); this.metricUpdater = context.getMetricsFactory().getSessionUpdater(); } catch (Throwable t) { LOG.debug( "Error creating session {} ({} live instances)", context.getSessionName(), INSTANCE_COUNT.decrementAndGet()); // Rethrow but make sure we release any resources allocated by Netty. At this stage there are // no scheduled tasks on the event loops so getNow() won't block. try { context.getNettyOptions().onClose().getNow(); } catch (Throwable suppressed) { Loggers.warnWithException( LOG, "[{}] Error while closing NettyOptions " + "(suppressed because we're already handling an init failure)", logPrefix, suppressed); } throw t; } } private CompletionStage<CqlSession> init(CqlIdentifier keyspace) { RunOrSchedule.on(adminExecutor, () -> singleThreaded.init(keyspace)); return singleThreaded.initFuture; } @NonNull @Override public String getName() { return context.getSessionName(); } @NonNull @Override public Metadata getMetadata() { return metadataManager.getMetadata(); } @Override public boolean isSchemaMetadataEnabled() { return metadataManager.isSchemaEnabled(); } @NonNull @Override public CompletionStage<Metadata> setSchemaMetadataEnabled(@Nullable Boolean newValue) { return metadataManager.setSchemaEnabled(newValue); } @NonNull @Override public CompletionStage<Metadata> refreshSchemaAsync() { return metadataManager .refreshSchema(null, true, true) .thenApply(RefreshSchemaResult::getMetadata); } @NonNull @Override public CompletionStage<Boolean> checkSchemaAgreementAsync() { return context.getTopologyMonitor().checkSchemaAgreement(); } @NonNull @Override public DriverContext getContext() { return context; } @NonNull @Override public Optional<CqlIdentifier> getKeyspace() { return Optional.ofNullable(poolManager.getKeyspace()); } @NonNull @Override public Optional<Metrics> getMetrics() { return context.getMetricsFactory().getMetrics(); } /** * <b>INTERNAL USE ONLY</b> -- switches the session to a new keyspace. * * <p>This is called by the driver when a {@code USE} query is successfully executed through the * session. Calling it from anywhere else is highly discouraged, as an invalid keyspace would * wreak havoc (close all connections and make the session unusable). */ @NonNull public CompletionStage<Void> setKeyspace(@NonNull CqlIdentifier newKeyspace) { return poolManager.setKeyspace(newKeyspace); } @NonNull public Map<Node, ChannelPool> getPools() { return poolManager.getPools(); } @Nullable @Override public <RequestT extends Request, ResultT> ResultT execute( @NonNull RequestT request, @NonNull GenericType<ResultT> resultType) { RequestProcessor<RequestT, ResultT> processor = processorRegistry.processorFor(request, resultType); return isClosed() ? processor.newFailure(new IllegalStateException("Session is closed")) : processor.process(request, this, context, logPrefix); } @Nullable public DriverChannel getChannel(@NonNull Node node, @NonNull String logPrefix) { ChannelPool pool = poolManager.getPools().get(node); if (pool == null) { LOG.trace("[{}] No pool to {}, skipping", logPrefix, node); return null; } else { DriverChannel channel = pool.next(); if (channel == null) { LOG.trace("[{}] Pool returned no channel for {}, skipping", logPrefix, node); return null; } else if (channel.closeFuture().isDone()) { LOG.trace("[{}] Pool returned closed connection to {}, skipping", logPrefix, node); return null; } else { return channel; } } } @NonNull public ConcurrentMap<ByteBuffer, RepreparePayload> getRepreparePayloads() { return poolManager.getRepreparePayloads(); } @NonNull public SessionMetricUpdater getMetricUpdater() { return metricUpdater; } @NonNull @Override public CompletionStage<Void> closeFuture() { return singleThreaded.closeFuture; } @NonNull @Override public CompletionStage<Void> closeAsync() { return closeSafely(singleThreaded::close); } @NonNull @Override public CompletionStage<Void> forceCloseAsync() { return closeSafely(singleThreaded::forceClose); } private CompletionStage<Void> closeSafely(Runnable action) { // Protect against getting closed twice: with the default NettyOptions, closing shuts down // adminExecutor, so we don't want to call RunOrSchedule the second time. if (!singleThreaded.closeFuture.isDone()) { try { RunOrSchedule.on(adminExecutor, action); } catch (RejectedExecutionException e) { // Checking the future is racy, there is still a tiny window that could get us here. LOG.warn( "[{}] Ignoring terminated executor. " + "This generally happens if you close the session multiple times concurrently, " + "and can be safely ignored if the close() call returns normally.", logPrefix, e); } } return singleThreaded.closeFuture; } private class SingleThreaded { private final InternalDriverContext context; private final Set<EndPoint> initialContactPoints; private final NodeStateManager nodeStateManager; private final SchemaListenerNotifier schemaListenerNotifier; private final CompletableFuture<CqlSession> initFuture = new CompletableFuture<>(); private boolean initWasCalled; private final CompletableFuture<Void> closeFuture = new CompletableFuture<>(); private boolean closeWasCalled; private boolean forceCloseWasCalled; private SingleThreaded(InternalDriverContext context, Set<EndPoint> contactPoints) { this.context = context; this.nodeStateManager = new NodeStateManager(context); this.initialContactPoints = contactPoints; this.schemaListenerNotifier = new SchemaListenerNotifier( context.getSchemaChangeListener(), context.getEventBus(), adminExecutor); context .getEventBus() .register( NodeStateEvent.class, RunOrSchedule.on(adminExecutor, this::onNodeStateChanged)); CompletableFutures.propagateCancellation( this.initFuture, context.getTopologyMonitor().initFuture()); } private void init(CqlIdentifier keyspace) { assert adminExecutor.inEventLoop(); if (initWasCalled) { return; } initWasCalled = true; LOG.debug("[{}] Starting initialization", logPrefix); // Eagerly fetch user-facing policies right now, no need to start opening connections if // something is wrong in the configuration. try { context.getLoadBalancingPolicies(); context.getRetryPolicies(); context.getSpeculativeExecutionPolicies(); context.getReconnectionPolicy(); context.getAddressTranslator(); context.getNodeStateListener(); context.getSchemaChangeListener(); context.getRequestTracker(); context.getRequestThrottler(); context.getAuthProvider(); context.getSslHandlerFactory(); context.getTimestampGenerator(); } catch (Throwable error) { RunOrSchedule.on(adminExecutor, this::closePolicies); context .getNettyOptions() .onClose() .addListener( f -> { if (!f.isSuccess()) { Loggers.warnWithException( LOG, "[{}] Error while closing NettyOptions " + "(suppressed because we're already handling an init failure)", logPrefix, f.cause()); } initFuture.completeExceptionally(error); }); LOG.debug( "Error initializing new session {} ({} live instances)", context.getSessionName(), INSTANCE_COUNT.decrementAndGet()); return; } closeFuture.whenComplete( (v, error) -> LOG.debug( "Closing session {} ({} live instances)", context.getSessionName(), INSTANCE_COUNT.decrementAndGet())); MetadataManager metadataManager = context.getMetadataManager(); metadataManager.addContactPoints(initialContactPoints); context .getTopologyMonitor() .init() .thenCompose(v -> metadataManager.refreshNodes()) .thenCompose(v -> checkProtocolVersion()) .thenCompose(v -> initialSchemaRefresh()) .thenCompose(v -> initializePools(keyspace)) .whenComplete( (v, error) -> { if (error == null) { LOG.debug("[{}] Initialization complete, ready", logPrefix); notifyListeners(); initFuture.complete(DefaultSession.this); } else { LOG.debug("[{}] Initialization failed, force closing", logPrefix, error); forceCloseAsync() .whenComplete( (v1, error1) -> { if (error1 != null) { error.addSuppressed(error1); } initFuture.completeExceptionally(error); }); } }); } private CompletionStage<Void> checkProtocolVersion() { try { boolean protocolWasForced = context.getConfig().getDefaultProfile().isDefined(DefaultDriverOption.PROTOCOL_VERSION); if (!protocolWasForced) { ProtocolVersion currentVersion = context.getProtocolVersion(); ProtocolVersion bestVersion = context .getProtocolVersionRegistry() .highestCommon(metadataManager.getMetadata().getNodes().values()); if (bestVersion.getCode() < currentVersion.getCode()) { LOG.info( "[{}] Negotiated protocol version {} for the initial contact point, " + "but other nodes only support {}, downgrading", logPrefix, currentVersion, bestVersion); context.getChannelFactory().setProtocolVersion(bestVersion); // Note that, with the default topology monitor, the control connection is already // connected with currentVersion at this point. This doesn't really matter because none // of the control queries use any protocol-dependent feature. // Keep going as-is, the control connection might switch to the "correct" version later // if it reconnects to another node. } else if (bestVersion.getCode() > currentVersion.getCode()) { LOG.info( "[{}] Negotiated protocol version {} for the initial contact point, " + "but cluster seems to support {}, keeping the negotiated version", logPrefix, currentVersion, bestVersion); } } return CompletableFuture.completedFuture(null); } catch (Throwable throwable) { return CompletableFutures.failedFuture(throwable); } } private CompletionStage<RefreshSchemaResult> initialSchemaRefresh() { try { return metadataManager .refreshSchema(null, false, true) .exceptionally( error -> { Loggers.warnWithException( LOG, "[{}] Unexpected error while refreshing schema during initialization, " + "proceeding without schema metadata", logPrefix, error); return null; }); } catch (Throwable throwable) { return CompletableFutures.failedFuture(throwable); } } private CompletionStage<Void> initializePools(CqlIdentifier keyspace) { try { nodeStateManager.markInitialized(); context.getLoadBalancingPolicyWrapper().init(); context.getConfigLoader().onDriverInit(context); return poolManager.init(keyspace); } catch (Throwable throwable) { return CompletableFutures.failedFuture(throwable); } } private void notifyListeners() { for (LifecycleListener lifecycleListener : context.getLifecycleListeners()) { try { lifecycleListener.onSessionReady(); } catch (Throwable t) { Loggers.warnWithException( LOG, "[{}] Error while notifying {} of session ready", logPrefix, lifecycleListener, t); } } try { context.getNodeStateListener().onSessionReady(DefaultSession.this); } catch (Throwable t) { Loggers.warnWithException( LOG, "[{}] Error while notifying {} of session ready", logPrefix, context.getNodeStateListener(), t); } try { schemaListenerNotifier.onSessionReady(DefaultSession.this); } catch (Throwable t) { Loggers.warnWithException( LOG, "[{}] Error while notifying {} of session ready", logPrefix, schemaListenerNotifier, t); } try { context.getRequestTracker().onSessionReady(DefaultSession.this); } catch (Throwable t) { Loggers.warnWithException( LOG, "[{}] Error while notifying {} of session ready", logPrefix, context.getRequestTracker(), t); } } private void onNodeStateChanged(NodeStateEvent event) { assert adminExecutor.inEventLoop(); if (event.newState == null) { context.getNodeStateListener().onRemove(event.node); } else if (event.oldState == null && event.newState == NodeState.UNKNOWN) { context.getNodeStateListener().onAdd(event.node); } else if (event.newState == NodeState.UP) { context.getNodeStateListener().onUp(event.node); } else if (event.newState == NodeState.DOWN || event.newState == NodeState.FORCED_DOWN) { context.getNodeStateListener().onDown(event.node); } } private void close() { assert adminExecutor.inEventLoop(); if (closeWasCalled) { return; } closeWasCalled = true; LOG.debug("[{}] Starting shutdown", logPrefix); closePolicies(); // clear metrics to prevent memory leak for (Node n : metadataManager.getMetadata().getNodes().values()) { NodeMetricUpdater updater = ((DefaultNode) n).getMetricUpdater(); if (updater != null) updater.clearMetrics(); } if (metricUpdater != null) metricUpdater.clearMetrics(); List<CompletionStage<Void>> childrenCloseStages = new ArrayList<>(); for (AsyncAutoCloseable closeable : internalComponentsToClose()) { childrenCloseStages.add(closeable.closeAsync()); } CompletableFutures.whenAllDone( childrenCloseStages, () -> onChildrenClosed(childrenCloseStages), adminExecutor); } private void forceClose() { assert adminExecutor.inEventLoop(); if (forceCloseWasCalled) { return; } forceCloseWasCalled = true; LOG.debug( "[{}] Starting forced shutdown (was {}closed before)", logPrefix, (closeWasCalled ? "" : "not ")); // clear metrics to prevent memory leak for (Node n : metadataManager.getMetadata().getNodes().values()) { NodeMetricUpdater updater = ((DefaultNode) n).getMetricUpdater(); if (updater != null) updater.clearMetrics(); } if (metricUpdater != null) metricUpdater.clearMetrics(); if (closeWasCalled) { // onChildrenClosed has already been scheduled for (AsyncAutoCloseable closeable : internalComponentsToClose()) { closeable.forceCloseAsync(); } } else { closePolicies(); List<CompletionStage<Void>> childrenCloseStages = new ArrayList<>(); for (AsyncAutoCloseable closeable : internalComponentsToClose()) { childrenCloseStages.add(closeable.forceCloseAsync()); } CompletableFutures.whenAllDone( childrenCloseStages, () -> onChildrenClosed(childrenCloseStages), adminExecutor); } } private void onChildrenClosed(List<CompletionStage<Void>> childrenCloseStages) { assert adminExecutor.inEventLoop(); for (CompletionStage<Void> stage : childrenCloseStages) { warnIfFailed(stage); } context .getNettyOptions() .onClose() .addListener( f -> { if (!f.isSuccess()) { closeFuture.completeExceptionally(f.cause()); } else { closeFuture.complete(null); } }); } private void warnIfFailed(CompletionStage<Void> stage) { CompletableFuture<Void> future = stage.toCompletableFuture(); assert future.isDone(); if (future.isCompletedExceptionally()) { Loggers.warnWithException( LOG, "[{}] Unexpected error while closing", logPrefix, CompletableFutures.getFailed(future)); } } private void closePolicies() { // This is a bit tricky: we might be closing the session because of an initialization error. // This error might have been triggered by a policy failing to initialize. If we try to access // the policy here to close it, it will fail again. So make sure we ignore that error and // proceed to close the other policies. List<AutoCloseable> policies = new ArrayList<>(); for (Supplier<AutoCloseable> supplier : ImmutableList.<Supplier<AutoCloseable>>of( context::getReconnectionPolicy, context::getLoadBalancingPolicyWrapper, context::getAddressTranslator, context::getConfigLoader, context::getNodeStateListener, context::getSchemaChangeListener, context::getRequestTracker, context::getRequestThrottler, context::getTimestampGenerator)) { try { policies.add(supplier.get()); } catch (Throwable t) { // Assume the policy had failed to initialize, and we don't need to close it => ignore } } try { context.getAuthProvider().ifPresent(policies::add); } catch (Throwable t) { // ignore } try { context.getSslHandlerFactory().ifPresent(policies::add); } catch (Throwable t) { // ignore } try { policies.addAll(context.getRetryPolicies().values()); } catch (Throwable t) { // ignore } try { policies.addAll(context.getSpeculativeExecutionPolicies().values()); } catch (Throwable t) { // ignore } policies.addAll(context.getLifecycleListeners()); // Finally we have a list of all the policies that initialized successfully, close them: for (AutoCloseable policy : policies) { try { policy.close(); } catch (Throwable t) { Loggers.warnWithException(LOG, "[{}] Error while closing {}", logPrefix, policy, t); } } } private List<AsyncAutoCloseable> internalComponentsToClose() { ImmutableList.Builder<AsyncAutoCloseable> components = ImmutableList.<AsyncAutoCloseable>builder() .add(poolManager, nodeStateManager, metadataManager); // Same as closePolicies(): make sure we don't trigger errors by accessing context components // that had failed to initialize: try { components.add(context.getTopologyMonitor()); } catch (Throwable t) { // ignore } try { components.add(context.getControlConnection()); } catch (Throwable t) { // ignore } return components.build(); } } }
apache/cassandra-java-driver
core/src/main/java/com/datastax/oss/driver/internal/core/session/DefaultSession.java
213,963
/* * Copyright (C) 2012-2014 DataStax Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.datastax.driver.core; import java.io.Closeable; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.*; import java.util.Map.Entry; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Predicates; import com.google.common.collect.*; import com.google.common.util.concurrent.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.datastax.driver.core.exceptions.AuthenticationException; import com.datastax.driver.core.exceptions.DriverInternalError; import com.datastax.driver.core.exceptions.NoHostAvailableException; import com.datastax.driver.core.policies.*; /** * Information and known state of a Cassandra cluster. * <p> * This is the main entry point of the driver. A simple example of access to a * Cassandra cluster would be: * <pre> * Cluster cluster = Cluster.builder().addContactPoint("192.168.0.1").build(); * Session session = cluster.connect("db1"); * * for (Row row : session.execute("SELECT * FROM table1")) * // do something ... * </pre> * <p> * A cluster object maintains a permanent connection to one of the cluster nodes * which it uses solely to maintain information on the state and current * topology of the cluster. Using the connection, the driver will discover all * the nodes currently in the cluster as well as new nodes joining the cluster * subsequently. */ public class Cluster implements Closeable { private static final Logger logger = LoggerFactory.getLogger(Cluster.class); @VisibleForTesting static final int NEW_NODE_DELAY_SECONDS = SystemProperties.getInt("com.datastax.driver.NEW_NODE_DELAY_SECONDS", 1); private static final int NON_BLOCKING_EXECUTOR_SIZE = SystemProperties.getInt("com.datastax.driver.NON_BLOCKING_EXECUTOR_SIZE", Runtime.getRuntime().availableProcessors()); private static final ResourceBundle driverProperties = ResourceBundle.getBundle("com.datastax.driver.core.Driver"); // Some per-JVM number that allows to generate unique cluster names when // multiple Cluster instance are created in the same JVM. private static final AtomicInteger CLUSTER_ID = new AtomicInteger(0); private static final int DEFAULT_THREAD_KEEP_ALIVE = 30; private static final int NOTIF_LOCK_TIMEOUT_SECONDS = SystemProperties.getInt("com.datastax.driver.NOTIF_LOCK_TIMEOUT_SECONDS", 60); final Manager manager; /** * Constructs a new Cluster instance. * <p> * This constructor is mainly exposed so Cluster can be sub-classed as a means to make testing/mocking * easier or to "intercept" its method call. Most users shouldn't extend this class however and * should prefer either using the {@link #builder} or calling {@link #buildFrom} with a custom * Initializer. * * @param name the name to use for the cluster (this is not the Cassandra cluster name, see {@link #getClusterName}). * @param contactPoints the list of contact points to use for the new cluster. * @param configuration the configuration for the new cluster. */ protected Cluster(String name, List<InetSocketAddress> contactPoints, Configuration configuration) { this(name, contactPoints, configuration, Collections.<Host.StateListener>emptySet()); } /** * Constructs a new Cluster instance. * <p> * This constructor is mainly exposed so Cluster can be sub-classed as a means to make testing/mocking * easier or to "intercept" its method call. Most users shouldn't extend this class however and * should prefer using the {@link #builder}. * * @param initializer the initializer to use. * @see #buildFrom */ protected Cluster(Initializer initializer) { this(initializer.getClusterName(), checkNotEmpty(initializer.getContactPoints()), initializer.getConfiguration(), initializer.getInitialListeners()); } private static List<InetSocketAddress> checkNotEmpty(List<InetSocketAddress> contactPoints) { if (contactPoints.isEmpty()) throw new IllegalArgumentException("Cannot build a cluster without contact points"); return contactPoints; } private Cluster(String name, List<InetSocketAddress> contactPoints, Configuration configuration, Collection<Host.StateListener> listeners) { this.manager = new Manager(name, contactPoints, configuration, listeners); } /** * Initialize this Cluster instance. * * This method creates an initial connection to one of the contact points * used to construct the {@code Cluster} instance. That connection is then * used to populate the cluster {@link Metadata}. * <p> * Calling this method is optional in the sense that any call to one of the * {@code connect} methods of this object will automatically trigger a call * to this method beforehand. It is thus only useful to call this method if * for some reason you want to populate the metadata (or test that at least * one contact point can be reached) without creating a first {@code * Session}. * <p> * Please note that this method only creates one control connection for * gathering cluster metadata. In particular, it doesn't create any connection pools. * Those are created when a new {@code Session} is created through * {@code connect}. * <p> * This method has no effect if the cluster is already initialized. * * @return this {@code Cluster} object. * * @throws NoHostAvailableException if no host amongst the contact points * can be reached. * @throws AuthenticationException if an authentication error occurs * while contacting the initial contact points. * @throws IllegalStateException if the Cluster was closed prior to calling * this method. This can occur either directly (through {@link #close()} or * {@link #closeAsync()}), or as a result of an error while initializing the * Cluster. */ public Cluster init() { this.manager.init(); return this; } /** * Build a new cluster based on the provided initializer. * <p> * Note that for building a cluster pragmatically, Cluster.Builder * provides a slightly less verbose shortcut with {@link Builder#build}. * <p> * Also note that that all the contact points provided by {@code * initializer} must share the same port. * * @param initializer the Cluster.Initializer to use * @return the newly created Cluster instance * * @throws IllegalArgumentException if the list of contact points provided * by {@code initializer} is empty or if not all those contact points have the same port. */ public static Cluster buildFrom(Initializer initializer) { return new Cluster(initializer); } /** * Creates a new {@link Cluster.Builder} instance. * <p> * This is a convenience method for {@code new Cluster.Builder()}. * * @return the new cluster builder. */ public static Cluster.Builder builder() { return new Cluster.Builder(); } /** * Returns the current version of the driver. * <p> * This is intended for products that wrap or extend the driver, as a way to check * compatibility if end-users override the driver version in their application. * * @return the version. */ public static String getDriverVersion() { return driverProperties.getString("driver.version"); } /** * Creates a new session on this cluster but does not initialize it. * <p> * Because this method does not perform any initialization, it cannot fail. * The initialization of the session (the connection of the Session to the * Cassandra nodes) will occur if either the {@link Session#init} method is * called explicitly, or whenever the returned session object is used. * <p> * Once a session returned by this method gets initialized (see above), it * will be set to no keyspace. If you want to set such session to a * keyspace, you will have to explicitly execute a 'USE mykeyspace' query. * <p> * Note that if you do not particularly need to defer initialization, it is * simpler to use one of the {@code connect()} method of this class. * * @return a new, non-initialized session on this cluster. */ public Session newSession() { return manager.newSession(); } /** * Creates a new session on this cluster and initialize it. * <p> * Note that this method will initialize the newly created session, trying * to connect to the Cassandra nodes before returning. If you only want to * create a Session object without initializing it right away, see * {@link #newSession}. * * @return a new session on this cluster sets to no keyspace. * * @throws NoHostAvailableException if the Cluster has not been initialized * yet ({@link #init} has not be called and this is the first connect call) * and no host amongst the contact points can be reached. * @throws AuthenticationException if an authentication error occurs while * contacting the initial contact points. * @throws IllegalStateException if the Cluster was closed prior to calling * this method. This can occur either directly (through {@link #close()} or * {@link #closeAsync()}), or as a result of an error while initializing the * Cluster. */ public Session connect() { init(); Session session = manager.newSession(); session.init(); return session; } /** * Creates a new session on this cluster, initialize it and sets the * keyspace to the provided one. * <p> * Note that this method will initialize the newly created session, trying * to connect to the Cassandra nodes before returning. If you only want to * create a Session object without initializing it right away, see * {@link #newSession}. * * @param keyspace The name of the keyspace to use for the created * {@code Session}. * @return a new session on this cluster sets to keyspace * {@code keyspaceName}. * * @throws NoHostAvailableException if the Cluster has not been initialized * yet ({@link #init} has not be called and this is the first connect call) * and no host amongst the contact points can be reached, or if no host can * be contacted to set the {@code keyspace}. * @throws AuthenticationException if an authentication error occurs while * contacting the initial contact points. * @throws IllegalStateException if the Cluster was closed prior to calling * this method. This can occur either directly (through {@link #close()} or * {@link #closeAsync()}), or as a result of an error while initializing the * Cluster. */ public Session connect(String keyspace) { long timeout = getConfiguration().getSocketOptions().getConnectTimeoutMillis(); Session session = connect(); try { ResultSetFuture future = session.executeAsync("USE " + keyspace); // Note: using the connection timeout isn't perfectly correct, we should probably change that someday Uninterruptibles.getUninterruptibly(future, timeout, TimeUnit.MILLISECONDS); return session; } catch (TimeoutException e) { throw new DriverInternalError(String.format("No responses after %d milliseconds while setting current keyspace. This should not happen, unless you have setup a very low connection timeout.", timeout)); } catch (ExecutionException e) { throw DefaultResultSetFuture.extractCauseFromExecutionException(e); } catch (RuntimeException e) { session.close(); throw e; } } /** * The name of this cluster object. * <p> * Note that this is not the Cassandra cluster name, but rather a name * assigned to this Cluster object. Currently, that name is only used * for one purpose: to distinguish exposed JMX metrics when multiple * Cluster instances live in the same JVM (which should be rare in the first * place). That name can be set at Cluster building time (through * {@link Builder#withClusterName} for instance) but will default to a * name like {@code cluster1} where each Cluster instance in the same JVM * will have a different number. * * @return the name for this cluster instance. */ public String getClusterName() { return manager.clusterName; } /** * Returns read-only metadata on the connected cluster. * <p> * This includes the known nodes with their status as seen by the driver, * as well as the schema definitions. Since this return metadata on the * connected cluster, this method may trigger the creation of a connection * if none has been established yet (neither {@code init()} nor {@code connect()} * has been called yet). * * @return the cluster metadata. * * @throws NoHostAvailableException if the Cluster has not been initialized yet * and no host amongst the contact points can be reached. * @throws AuthenticationException if an authentication error occurs * while contacting the initial contact points. * @throws IllegalStateException if the Cluster was closed prior to calling * this method. This can occur either directly (through {@link #close()} or * {@link #closeAsync()}), or as a result of an error while initializing the * Cluster. */ public Metadata getMetadata() { manager.init(); return manager.metadata; } /** * The cluster configuration. * * @return the cluster configuration. */ public Configuration getConfiguration() { return manager.configuration; } /** * The cluster metrics. * * @return the cluster metrics, or {@code null} if metrics collection has * been disabled (that is if {@link Configuration#getMetricsOptions} * returns {@code null}). */ public Metrics getMetrics() { return manager.metrics; } /** * Registers the provided listener to be notified on hosts * up/down/added/removed events. * <p> * Registering the same listener multiple times is a no-op. * <p> * Note that while {@link LoadBalancingPolicy} implements * {@code Host.StateListener}, the configured load balancing does not * need to (and should not) be registered through this method to * received host related events. * * @param listener the new {@link Host.StateListener} to register. * @return this {@code Cluster} object; */ public Cluster register(Host.StateListener listener) { manager.listeners.add(listener); return this; } /** * Unregisters the provided listener from being notified on hosts events. * <p> * This method is a no-op if {@code listener} hadn't previously be * registered against this Cluster. * * @param listener the {@link Host.StateListener} to unregister. * @return this {@code Cluster} object; */ public Cluster unregister(Host.StateListener listener) { manager.listeners.remove(listener); return this; } /** * Registers the provided tracker to be updated with hosts read * latencies. * <p> * Registering the same listener multiple times is a no-op. * <p> * Be wary that the registered tracker {@code update} method will be call * very frequently (at the end of every query to a Cassandra host) and * should thus not be costly. * <p> * The main use case for a {@code LatencyTracker} is so * {@link LoadBalancingPolicy} can implement latency awareness * Typically, {@link LatencyAwarePolicy} registers it's own internal * {@code LatencyTracker} (automatically, you don't have to call this * method directly). * * @param tracker the new {@link LatencyTracker} to register. * @return this {@code Cluster} object; */ public Cluster register(LatencyTracker tracker) { manager.trackers.add(tracker); return this; } /** * Unregisters the provided latency tracking from being updated * with host read latencies. * <p> * This method is a no-op if {@code tracker} hadn't previously be * registered against this Cluster. * * @param tracker the {@link LatencyTracker} to unregister. * @return this {@code Cluster} object; */ public Cluster unregister(LatencyTracker tracker) { manager.trackers.remove(tracker); return this; } /** * Initiates a shutdown of this cluster instance. * <p> * This method is asynchronous and return a future on the completion * of the shutdown process. As soon a the cluster is shutdown, no * new request will be accepted, but already submitted queries are * allowed to complete. This method closes all connections from all * sessions and reclaims all resources used by this Cluster * instance. * <p> * If for some reason you wish to expedite this process, the * {@link CloseFuture#force} can be called on the result future. * <p> * This method has no particular effect if the cluster was already closed * (in which case the returned future will return immediately). * * @return a future on the completion of the shutdown process. */ public CloseFuture closeAsync() { return manager.close(); } /** * Initiates a shutdown of this cluster instance and blocks until * that shutdown completes. * <p> * This method is a shortcut for {@code closeAsync().get()}. */ public void close() { try { closeAsync().get(); } catch (ExecutionException e) { throw DefaultResultSetFuture.extractCauseFromExecutionException(e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } /** * Whether this Cluster instance has been closed. * <p> * Note that this method returns true as soon as one of the close methods * ({@link #closeAsync} or {@link #close}) has been called, it does not guarantee * that the closing is done. If you want to guarantee that the closing is done, * you can call {@code close()} and wait until it returns (or call the get method * on {@code closeAsync()} with a very short timeout and check this doesn't timeout). * * @return {@code true} if this Cluster instance has been closed, {@code false} * otherwise. */ public boolean isClosed() { return manager.closeFuture.get() != null; } /** * Initializer for {@link Cluster} instances. * <p> * If you want to create a new {@code Cluster} instance programmatically, * then it is advised to use {@link Cluster.Builder} which can be obtained from the * {@link Cluster#builder} method. * <p> * But it is also possible to implement a custom {@code Initializer} that * retrieves initialization from a web-service or from a configuration file. */ public interface Initializer { /** * An optional name for the created cluster. * <p> * Such name is optional (a default name will be created otherwise) and is currently * only use for JMX reporting of metrics. See {@link Cluster#getClusterName} for more * information. * * @return the name for the created cluster or {@code null} to use an automatically * generated name. */ public String getClusterName(); /** * Returns the initial Cassandra hosts to connect to. * * @return the initial Cassandra contact points. See {@link Builder#addContactPoint} * for more details on contact points. */ public List<InetSocketAddress> getContactPoints(); /** * The configuration to use for the new cluster. * <p> * Note that some configuration can be modified after the cluster * initialization but some others cannot. In particular, the ones that * cannot be changed afterwards includes: * <ul> * <li>the port use to connect to Cassandra nodes (see {@link ProtocolOptions}).</li> * <li>the policies used (see {@link Policies}).</li> * <li>the authentication info provided (see {@link Configuration}).</li> * <li>whether metrics are enabled (see {@link Configuration}).</li> * </ul> * * @return the configuration to use for the new cluster. */ public Configuration getConfiguration(); /** * Optional listeners to register against the newly created cluster. * <p> * Note that contrary to listeners registered post Cluster creation, * the listeners returned by this method will see {@link Host.StateListener#onAdd} * events for the initial contact points. * * @return a possibly empty collection of {@code Host.StateListener} to register * against the newly created cluster. */ public Collection<Host.StateListener> getInitialListeners(); } /** * Helper class to build {@link Cluster} instances. */ public static class Builder implements Initializer { private String clusterName; private final List<InetSocketAddress> addresses = new ArrayList<InetSocketAddress>(); private final List<InetAddress> rawAddresses = new ArrayList<InetAddress>(); private int port = ProtocolOptions.DEFAULT_PORT; private int maxSchemaAgreementWaitSeconds = ProtocolOptions.DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS; private int protocolVersion = -1; private AuthProvider authProvider = AuthProvider.NONE; private LoadBalancingPolicy loadBalancingPolicy; private ReconnectionPolicy reconnectionPolicy; private RetryPolicy retryPolicy; private AddressTranslater addressTranslater; private ProtocolOptions.Compression compression = ProtocolOptions.Compression.NONE; private SSLOptions sslOptions = null; private boolean metricsEnabled = true; private boolean jmxEnabled = true; private PoolingOptions poolingOptions; private SocketOptions socketOptions; private QueryOptions queryOptions; private Collection<Host.StateListener> listeners; @Override public String getClusterName() { return clusterName; } @Override public List<InetSocketAddress> getContactPoints() { if (rawAddresses.isEmpty()) return addresses; List<InetSocketAddress> allAddresses = new ArrayList<InetSocketAddress>(addresses); for (InetAddress address : rawAddresses) allAddresses.add(new InetSocketAddress(address, port)); return allAddresses; } /** * An optional name for the create cluster. * <p> * Note: this is not related to the Cassandra cluster name (though you * are free to provide the same name). See {@link Cluster#getClusterName} for * details. * <p> * If you use this method and create more than one Cluster instance in the * same JVM (which should be avoided unless you need to connect to multiple * Cassandra clusters), you should make sure each Cluster instance get a * unique name or you may have a problem with JMX reporting. * * @param name the cluster name to use for the created Cluster instance. * @return this Builder. */ public Builder withClusterName(String name) { this.clusterName = name; return this; } /** * The port to use to connect to the Cassandra host. * <p> * If not set through this method, the default port (9042) will be used * instead. * * @param port the port to set. * @return this Builder. */ public Builder withPort(int port) { this.port = port; return this; } /** * Sets the maximum time to wait for schema agreement before returning from a DDL query. * <p> * If not set through this method, the default value (10 seconds) will be used. * * @param maxSchemaAgreementWaitSeconds the new value to set. * @return this Builder. * * @throws IllegalStateException if the provided value is zero or less. */ public Builder withMaxSchemaAgreementWaitSeconds(int maxSchemaAgreementWaitSeconds) { if (maxSchemaAgreementWaitSeconds <= 0) throw new IllegalArgumentException("Max schema agreement wait must be greater than zero"); this.maxSchemaAgreementWaitSeconds = maxSchemaAgreementWaitSeconds; return this; } /** * The native protocol version to use. * <p> * The driver supports both version 1 and 2 of the native protocol. Version 2 * of the protocol has more features and should be preferred, but it is only * supported by Cassandra 2.0 and above, so you will have to use version 1 with * Cassandra 1.2 nodes. * <p> * By default, the driver will "auto-detect" which protocol version it can use * when connecting to the first node. More precisely, it will try the version * 2 first and will fallback to version 1 if it is not supported by that first * node it connects to. Please note that once the version is "auto-detected", * it won't change: if the first node the driver connects to is a Cassandra 1.2 * node and auto-detection is used (the default), then the native protocol * version 1 will be use for the lifetime of the Cluster instance. * <p> * This method allows to force the use of a particular protocol version. Forcing * version 1 is always fine since all Cassandra version (at least all those * supporting the native protocol in the first place) so far supports it. However, * please note that a number of features of the driver won't be available if that * version of thr protocol is in use, including result set paging, * {@link BatchStatement}, executing a non-prepared query with binary values * ({@link Session#execute(String, Object...)}), ... (those methods will throw * an UnsupportedFeatureException). Using the protocol version 1 should thus * only be considered when using Cassandra 1.2, until nodes have been upgraded * to Cassandra 2.0. * <p> * If version 2 of the protocol is used, then Cassandra 1.2 nodes will be ignored * (the driver won't connect to them). * <p> * The default behavior (auto-detection) is fine in almost all case, but you may * want to force a particular version if you have a Cassandra cluster with mixed * 1.2/2.0 nodes (i.e. during a Cassandra upgrade). * * @param version the native protocol version to use. The versions supported by * this driver are version 1 and 2. Negative values are also supported to trigger * auto-detection (see above) but this is the default (so you don't have to call * this method for that behavior). * @return this Builder. * * @throws IllegalArgumentException if {@code version} is neither 1, 2 or a * negative value. */ public Builder withProtocolVersion(int version) { if (version >= 0 && version != 1 && version != 2) throw new IllegalArgumentException(String.format("Unsupported protocol version %d; valid values are 1, 2 or negative (for auto-detect).", version)); this.protocolVersion = version; return this; } /** * Adds a contact point. * <p> * Contact points are addresses of Cassandra nodes that the driver uses * to discover the cluster topology. Only one contact point is required * (the driver will retrieve the address of the other nodes * automatically), but it is usually a good idea to provide more than * one contact point, because if that single contact point is unavailable, * the driver cannot initialize itself correctly. * <p> * Note that by default (that is, unless you use the {@link #withLoadBalancingPolicy}) * method of this builder), the first succesfully contacted host will be use * to define the local data-center for the client. If follows that if you are * running Cassandra in a multiple data-center setting, it is a good idea to * only provided contact points that are in the same datacenter than the client, * or to provide manually the load balancing policy that suits your need. * * @param address the address of the node to connect to * @return this Builder. * * @throws IllegalArgumentException if no IP address for {@code address} * could be found * @throws SecurityException if a security manager is present and * permission to resolve the host name is denied. */ public Builder addContactPoint(String address) { // We explicitely check for nulls because InetAdress.getByName() will happily // accept it and use localhost (while a null here almost likely mean a user error, // not "connect to localhost") if (address == null) throw new NullPointerException(); try { this.rawAddresses.add(InetAddress.getByName(address)); return this; } catch (UnknownHostException e) { throw new IllegalArgumentException(e.getMessage()); } } /** * Adds contact points. * <p> * See {@link Builder#addContactPoint} for more details on contact * points. * * @param addresses addresses of the nodes to add as contact point. * @return this Builder. * * @throws IllegalArgumentException if no IP address for at least one * of {@code addresses} could be found * @throws SecurityException if a security manager is present and * permission to resolve the host name is denied. * * @see Builder#addContactPoint */ public Builder addContactPoints(String... addresses) { for (String address : addresses) addContactPoint(address); return this; } /** * Adds contact points. * <p> * See {@link Builder#addContactPoint} for more details on contact * points. * * @param addresses addresses of the nodes to add as contact point. * @return this Builder. * * @see Builder#addContactPoint */ public Builder addContactPoints(InetAddress... addresses) { Collections.addAll(this.rawAddresses, addresses); return this; } /** * Adds contact points. * * See {@link Builder#addContactPoint} for more details on contact * points. * * @param addresses addresses of the nodes to add as contact point * @return this Builder * * @see Builder#addContactPoint */ public Builder addContactPoints(Collection<InetAddress> addresses) { this.rawAddresses.addAll(addresses); return this; } /** * Adds contact points. * <p> * See {@link Builder#addContactPoint} for more details on contact * points. Contrarily to other {@code addContactPoints} methods, this method * allow to provide a different port for each contact points. Since Cassandra * nodes must always all listen on the same port, this is rarelly what you * want and most use should prefer other {@code addContactPoints} methods to * this one. However, this can be useful if the Cassandra nodes are behind * a router and are not accessed directly. Note that if you are in this * situtation (Cassandra nodes are behind a router, not directly accessible), * you almost surely want to provide a specific {@code AddressTranslater} * (through {@link #withAddressTranslater}) to translate actual Cassandra node * addresses to the addresses the driver should use, otherwise the driver * will not be able to auto-detect new nodes (and will generally not function * optimally). * * @param addresses addresses of the nodes to add as contact point * @return this Builder * * @see Builder#addContactPoint */ public Builder addContactPointsWithPorts(Collection<InetSocketAddress> addresses) { this.addresses.addAll(addresses); return this; } /** * Configures the load balancing policy to use for the new cluster. * <p> * If no load balancing policy is set through this method, * {@link Policies#defaultLoadBalancingPolicy} will be used instead. * * @param policy the load balancing policy to use. * @return this Builder. */ public Builder withLoadBalancingPolicy(LoadBalancingPolicy policy) { this.loadBalancingPolicy = policy; return this; } /** * Configures the reconnection policy to use for the new cluster. * <p> * If no reconnection policy is set through this method, * {@link Policies#DEFAULT_RECONNECTION_POLICY} will be used instead. * * @param policy the reconnection policy to use. * @return this Builder. */ public Builder withReconnectionPolicy(ReconnectionPolicy policy) { this.reconnectionPolicy = policy; return this; } /** * Configures the retry policy to use for the new cluster. * <p> * If no retry policy is set through this method, * {@link Policies#DEFAULT_RETRY_POLICY} will be used instead. * * @param policy the retry policy to use. * @return this Builder. */ public Builder withRetryPolicy(RetryPolicy policy) { this.retryPolicy = policy; return this; } /** * Configures the address translater to use for the new cluster. * <p> * See {@link AddressTranslater} for more detail on address translation, * but the default tanslater, {@link IdentityTranslater}, should be * correct in most cases. If unsure, stick to the default. * * @param translater the translater to use. * @return this Builder. */ public Builder withAddressTranslater(AddressTranslater translater) { this.addressTranslater = translater; return this; } /** * Uses the provided credentials when connecting to Cassandra hosts. * <p> * This should be used if the Cassandra cluster has been configured to * use the {@code PasswordAuthenticator}. If the the default {@code * AllowAllAuthenticator} is used instead, using this method has no * effect. * * @param username the username to use to login to Cassandra hosts. * @param password the password corresponding to {@code username}. * @return this Builder. */ public Builder withCredentials(String username, String password) { this.authProvider = new PlainTextAuthProvider(username, password); return this; } /** * Use the specified AuthProvider when connecting to Cassandra * hosts. * <p> * Use this method when a custom authentication scheme is in place. * You shouldn't call both this method and {@code withCredentials} * on the same {@code Builder} instance as one will supersede the * other * * @param authProvider the {@link AuthProvider} to use to login to * Cassandra hosts. * @return this Builder */ public Builder withAuthProvider(AuthProvider authProvider) { this.authProvider = authProvider; return this; } /** * Sets the compression to use for the transport. * * @param compression the compression to set. * @return this Builder. * * @see ProtocolOptions.Compression */ public Builder withCompression(ProtocolOptions.Compression compression) { this.compression = compression; return this; } /** * Disables metrics collection for the created cluster (metrics are * enabled by default otherwise). * * @return this builder. */ public Builder withoutMetrics() { this.metricsEnabled = false; return this; } /** * Enables the use of SSL for the created {@code Cluster}. * <p> * Calling this method will use default SSL options (see {@link SSLOptions#SSLOptions()}). * This is thus a shortcut for {@code withSSL(new SSLOptions())}. * <p> * Note that if SSL is enabled, the driver will not connect to any * Cassandra nodes that doesn't have SSL enabled and it is strongly * advised to enable SSL on every Cassandra node if you plan on using * SSL in the driver. * * @return this builder. */ public Builder withSSL() { this.sslOptions = new SSLOptions(); return this; } /** * Enable the use of SSL for the created {@code Cluster} using the provided options. * * @param sslOptions the SSL options to use. * * @return this builder. */ public Builder withSSL(SSLOptions sslOptions) { this.sslOptions = sslOptions; return this; } /** * Register the provided listeners in the newly created cluster. * <p> * Note: repeated calls to this method will override the previous ones. * * @param listeners the listeners to register. * @return this builder. */ public Builder withInitialListeners(Collection<Host.StateListener> listeners) { this.listeners = listeners; return this; } /** * Disables JMX reporting of the metrics. * <p> * JMX reporting is enabled by default (see {@link Metrics}) but can be * disabled using this option. If metrics are disabled, this is a * no-op. * * @return this builder. */ public Builder withoutJMXReporting() { this.jmxEnabled = false; return this; } /** * Sets the PoolingOptions to use for the newly created Cluster. * <p> * If no pooling options are set through this method, default pooling * options will be used. * * @param options the pooling options to use. * @return this builder. */ public Builder withPoolingOptions(PoolingOptions options) { this.poolingOptions = options; return this; } /** * Sets the SocketOptions to use for the newly created Cluster. * <p> * If no socket options are set through this method, default socket * options will be used. * * @param options the socket options to use. * @return this builder. */ public Builder withSocketOptions(SocketOptions options) { this.socketOptions = options; return this; } /** * Sets the QueryOptions to use for the newly created Cluster. * <p> * If no query options are set through this method, default query * options will be used. * * @param options the query options to use. * @return this builder. */ public Builder withQueryOptions(QueryOptions options) { this.queryOptions = options; return this; } /** * The configuration that will be used for the new cluster. * <p> * You <b>should not</b> modify this object directly because changes made * to the returned object may not be used by the cluster build. * Instead, you should use the other methods of this {@code Builder}. * * @return the configuration to use for the new cluster. */ @Override public Configuration getConfiguration() { Policies policies = new Policies( loadBalancingPolicy == null ? Policies.defaultLoadBalancingPolicy() : loadBalancingPolicy, reconnectionPolicy == null ? Policies.defaultReconnectionPolicy() : reconnectionPolicy, retryPolicy == null ? Policies.defaultRetryPolicy() : retryPolicy, addressTranslater == null ? Policies.defaultAddressTranslater() : addressTranslater ); return new Configuration(policies, new ProtocolOptions(port, protocolVersion, maxSchemaAgreementWaitSeconds, sslOptions, authProvider).setCompression(compression), poolingOptions == null ? new PoolingOptions() : poolingOptions, socketOptions == null ? new SocketOptions() : socketOptions, metricsEnabled ? new MetricsOptions(jmxEnabled) : null, queryOptions == null ? new QueryOptions() : queryOptions); } @Override public Collection<Host.StateListener> getInitialListeners() { return listeners == null ? Collections.<Host.StateListener>emptySet() : listeners; } /** * Builds the cluster with the configured set of initial contact points * and policies. * <p> * This is a convenience method for {@code Cluster.buildFrom(this)}. * * @return the newly built Cluster instance. */ public Cluster build() { return Cluster.buildFrom(this); } } private static ThreadFactory threadFactory(String nameFormat) { return new ThreadFactoryBuilder().setNameFormat(nameFormat).build(); } static long timeSince(long startNanos, TimeUnit destUnit) { return destUnit.convert(System.nanoTime() - startNanos, TimeUnit.NANOSECONDS); } private static String generateClusterName() { return "cluster" + CLUSTER_ID.incrementAndGet(); } private static ListeningExecutorService makeExecutor(int threads, String name) { ThreadPoolExecutor executor = new ThreadPoolExecutor(threads, threads, DEFAULT_THREAD_KEEP_ALIVE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory(name)); executor.allowCoreThreadTimeOut(true); return MoreExecutors.listeningDecorator(executor); } /** * The sessions and hosts managed by this a Cluster instance. * <p> * Note: the reason we create a Manager object separate from Cluster is * that Manager is not publicly visible. For instance, we wouldn't want * user to be able to call the {@link #onUp} and {@link #onDown} methods. */ class Manager implements Connection.DefaultResponseHandler { final String clusterName; private boolean isInit; private volatile boolean isFullyInit; // Initial contacts point final List<InetSocketAddress> contactPoints; final Set<SessionManager> sessions = new CopyOnWriteArraySet<SessionManager>(); final Metadata metadata; final Configuration configuration; final Metrics metrics; final Connection.Factory connectionFactory; final ControlConnection controlConnection; final ConvictionPolicy.Factory convictionPolicyFactory = new ConvictionPolicy.Simple.Factory(); final ScheduledExecutorService reconnectionExecutor = Executors.newScheduledThreadPool(2, threadFactory("Reconnection-%d")); // scheduledTasksExecutor is used to process C* notifications. So having it mono-threaded ensures notifications are // applied in the order received. final ScheduledExecutorService scheduledTasksExecutor = Executors.newScheduledThreadPool(1, threadFactory("Scheduled Tasks-%d")); // Executor used for tasks that shouldn't be executed on an IO thread. Used for short-lived, generally non-blocking tasks final ListeningExecutorService executor; // An executor for tasks that might block some time, like creating new connection, but are generally not too critical. final ListeningExecutorService blockingExecutor; final ConnectionReaper reaper; final AtomicReference<CloseFuture> closeFuture = new AtomicReference<CloseFuture>(); // All the queries that have been prepared (we keep them so we can re-prepared them when a node fail or a // new one join the cluster). // Note: we could move this down to the session level, but since prepared statement are global to a node, // this would yield a slightly less clear behavior. final ConcurrentMap<MD5Digest, PreparedStatement> preparedQueries = new MapMaker().weakValues().makeMap(); final Set<Host.StateListener> listeners; final Set<LatencyTracker> trackers = new CopyOnWriteArraySet<LatencyTracker>(); private Manager(String clusterName, List<InetSocketAddress> contactPoints, Configuration configuration, Collection<Host.StateListener> listeners) { logger.debug("Starting new cluster with contact points " + contactPoints); this.clusterName = clusterName == null ? generateClusterName() : clusterName; this.configuration = configuration; this.configuration.register(this); this.executor = makeExecutor(NON_BLOCKING_EXECUTOR_SIZE, "Cassandra Java Driver worker-%d"); this.blockingExecutor = makeExecutor(2, "Cassandra Java Driver blocking tasks worker-%d"); this.reaper = new ConnectionReaper(); this.metadata = new Metadata(this); this.contactPoints = contactPoints; this.connectionFactory = new Connection.Factory(this, configuration); this.controlConnection = new ControlConnection(this); this.metrics = configuration.getMetricsOptions() == null ? null : new Metrics(this); this.listeners = new CopyOnWriteArraySet<Host.StateListener>(listeners); } // Initialization is not too performance intensive and in practice there shouldn't be contention // on it so synchronized is good enough. synchronized void init() { if (isClosed()) throw new IllegalStateException("Can't use this Cluster instance because it was previously closed"); if (isInit) return; isInit = true; for (InetSocketAddress address : contactPoints) { // We don't want to signal -- call onAdd() -- because nothing is ready // yet (loadbalancing policy, control connection, ...). All we want is // create the Host object so we can initialize the control connection. metadata.add(address); } try { while (true) { try { // At this stage, metadata.allHosts() only contains the contact points, that's what we want to pass to LBP.init(). // But the control connection will initialize first and discover more hosts, so make a copy. Set<Host> contactPointHosts = Sets.newHashSet(metadata.allHosts()); controlConnection.connect(); if (connectionFactory.protocolVersion < 0) connectionFactory.protocolVersion = 2; // The control connection can mark hosts down if it failed to connect to them, separate them Set<Host> downContactPointHosts = Sets.newHashSet(); for (Host host : contactPointHosts) if (host.state == Host.State.DOWN) downContactPointHosts.add(host); contactPointHosts.removeAll(downContactPointHosts); // Now that the control connection is ready, we have all the information we need about the nodes (datacenter, // rack...) to initialize the load balancing policy loadBalancingPolicy().init(Cluster.this, contactPointHosts); for (Host host : downContactPointHosts) { loadBalancingPolicy().onDown(host); for (Host.StateListener listener : listeners) listener.onDown(host); } for (Host host : metadata.allHosts()) { // If the host is down at this stage, it's a contact point that the control connection failed to reach. // Reconnection attempts are already scheduled, and the LBP and listeners have been notified above. if (host.state == Host.State.DOWN) continue; // Otherwise, we want to do the equivalent of onAdd(). But since we know for sure that no sessions or prepared // statements exist at this point, we can skip some of the steps (plus this avoids scheduling concurrent pool // creations if a session is created right after this method returns). logger.info("New Cassandra host {} added", host); if (connectionFactory.protocolVersion == 2 && !supportsProtocolV2(host)) { logUnsupportedVersionProtocol(host); continue; } if (!contactPointHosts.contains(host)) loadBalancingPolicy().onAdd(host); host.setUp(); for (Host.StateListener listener : listeners) listener.onAdd(host); } isFullyInit = true; return; } catch (UnsupportedProtocolVersionException e) { assert connectionFactory.protocolVersion < 1; // For now, all C* version supports the protocol version 1 if (e.versionUnsupported <= 1) throw new DriverInternalError("Got a node that don't even support the protocol version 1, this makes no sense", e); logger.debug("{}: retrying with version {}", e.getMessage(), e.versionUnsupported - 1); connectionFactory.protocolVersion = e.versionUnsupported - 1; } } } catch (NoHostAvailableException e) { close(); throw e; } } int protocolVersion() { return connectionFactory.protocolVersion; } Cluster getCluster() { return Cluster.this; } LoadBalancingPolicy loadBalancingPolicy() { return configuration.getPolicies().getLoadBalancingPolicy(); } ReconnectionPolicy reconnectionPolicy() { return configuration.getPolicies().getReconnectionPolicy(); } InetSocketAddress translateAddress(InetAddress address) { InetSocketAddress sa = new InetSocketAddress(address, connectionFactory.getPort()); InetSocketAddress translated = configuration.getPolicies().getAddressTranslater().translate(sa); return translated == null ? sa : translated; } private Session newSession() { SessionManager session = new SessionManager(Cluster.this); sessions.add(session); return session; } boolean removeSession(Session session) { return sessions.remove(session); } void reportLatency(Host host, long latencyNanos) { for (LatencyTracker tracker : trackers) { tracker.update(host, latencyNanos); } } boolean isClosed() { return closeFuture.get() != null; } private CloseFuture close() { CloseFuture future = closeFuture.get(); if (future != null) return future; logger.debug("Shutting down"); // If we're shutting down, there is no point in waiting on scheduled reconnections, nor on notifications // delivery or blocking tasks so we use shutdownNow shutdownNow(reconnectionExecutor); shutdownNow(scheduledTasksExecutor); shutdownNow(blockingExecutor); // but for the worker executor, we want to let submitted tasks finish unless the shutdown is forced. executor.shutdown(); // We also close the metrics if (metrics != null) metrics.shutdown(); // And the load balancing policy LoadBalancingPolicy loadBalancingPolicy = loadBalancingPolicy(); if (loadBalancingPolicy instanceof CloseableLoadBalancingPolicy) ((CloseableLoadBalancingPolicy)loadBalancingPolicy).close(); // Then we shutdown all connections List<CloseFuture> futures = new ArrayList<CloseFuture>(sessions.size() + 1); futures.add(controlConnection.closeAsync()); for (Session session : sessions) futures.add(session.closeAsync()); future = new ClusterCloseFuture(futures); // The rest will happen asynchronously, when all connections are successfully closed return closeFuture.compareAndSet(null, future) ? future : closeFuture.get(); // We raced, it's ok, return the future that was actually set } private void shutdownNow(ExecutorService executor) { List<Runnable> pendingTasks = executor.shutdownNow(); // If some tasks were submitted to this executor but not yet commenced, make sure the corresponding futures complete for (Runnable pendingTask : pendingTasks) { if (pendingTask instanceof FutureTask<?>) ((FutureTask<?>)pendingTask).cancel(false); } } void logUnsupportedVersionProtocol(Host host) { logger.warn("Detected added or restarted Cassandra host {} but ignoring it since it does not support the version 2 of the native " + "protocol which is currently in use. If you want to force the use of the version 1 of the native protocol, use " + "Cluster.Builder#usingProtocolVersion() when creating the Cluster instance.", host); } void logClusterNameMismatch(Host host, String expectedClusterName, String actualClusterName) { logger.warn("Detected added or restarted Cassandra host {} but ignoring it since its cluster name '{}' does not match the one " + "currently known ({})", host, actualClusterName, expectedClusterName); } public ListenableFuture<?> triggerOnUp(final Host host) { return executor.submit(new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws InterruptedException, ExecutionException { onUp(host); } }); } private void onUp(final Host host) throws InterruptedException, ExecutionException { // Note that in generalize we can parallelize the pool creation on // each session, but we shouldn't use executor since we're already // running on it most probably (and so we could deadlock). Use the // blockingExecutor instead, that's why it's for. onUp(host, blockingExecutor); } // Use triggerOnUp unless you're sure you want to run this on the current thread. private void onUp(final Host host, ListeningExecutorService poolCreationExecutor) throws InterruptedException, ExecutionException { logger.debug("Host {} is UP", host); if (isClosed()) return; if (connectionFactory.protocolVersion == 2 && !supportsProtocolV2(host)) { logUnsupportedVersionProtocol(host); return; } boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); if (!locked) { logger.warn("Could not acquire notifications lock within {} seconds, ignoring UP notification for {}", NOTIF_LOCK_TIMEOUT_SECONDS, host); return; } try { // We don't want to use the public Host.isUp() as this would make us skip the rest for suspected hosts if (host.state == Host.State.UP) return; // If there is a reconnection attempt scheduled for that node, cancel it Future<?> scheduledAttempt = host.reconnectionAttempt.getAndSet(null); if (scheduledAttempt != null) { logger.debug("Cancelling reconnection attempt since node is UP"); scheduledAttempt.cancel(false); } try { prepareAllQueries(host); } catch (InterruptedException e) { Thread.currentThread().interrupt(); // Don't propagate because we don't want to prevent other listener to run } catch (UnsupportedProtocolVersionException e) { logUnsupportedVersionProtocol(host); return; } catch (ClusterNameMismatchException e) { logClusterNameMismatch(host, e.expectedClusterName, e.actualClusterName); return; } // Session#onUp() expects the load balancing policy to have been updated first, so that // Host distances are up to date. This mean the policy could return the node before the // new pool have been created. This is harmless if there is no prior pool since RequestHandler // will ignore the node, but we do want to make sure there is no prior pool so we don't // query from a pool we will shutdown right away. for (SessionManager s : sessions) s.removePool(host); loadBalancingPolicy().onUp(host); controlConnection.onUp(host); logger.trace("Adding/renewing host pools for newly UP host {}", host); List<ListenableFuture<Boolean>> futures = new ArrayList<ListenableFuture<Boolean>>(sessions.size()); for (SessionManager s : sessions) futures.add(s.forceRenewPool(host, poolCreationExecutor)); // Only mark the node up once all session have re-added their pool (if the load-balancing // policy says it should), so that Host.isUp() don't return true before we're reconnected // to the node. ListenableFuture<List<Boolean>> f = Futures.allAsList(futures); Futures.addCallback(f, new FutureCallback<List<Boolean>>() { public void onSuccess(List<Boolean> poolCreationResults) { // If any of the creation failed, they will have signaled a connection failure // which will trigger a reconnection to the node. So don't bother marking UP. if (Iterables.any(poolCreationResults, Predicates.equalTo(false))) { logger.debug("Connection pool cannot be created, not marking {} UP", host); return; } host.setUp(); for (Host.StateListener listener : listeners) listener.onUp(host); } public void onFailure(Throwable t) { // That future is not really supposed to throw unexpected exceptions if (!(t instanceof InterruptedException)) logger.error("Unexpected error while marking node UP: while this shouldn't happen, this shouldn't be critical", t); } }); f.get(); // Now, check if there isn't pools to create/remove following the addition. // We do that now only so that it's not called before we've set the node up. for (SessionManager s : sessions) s.updateCreatedPools(blockingExecutor); } finally { host.notificationsLock.unlock(); } } public ListenableFuture<?> triggerOnDown(final Host host) { return triggerOnDown(host, false); } public ListenableFuture<?> triggerOnDown(final Host host, final boolean isHostAddition) { return executor.submit(new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws InterruptedException, ExecutionException { onDown(host, isHostAddition, false); } }); } public void onSuspected(final Host host) { logger.debug("Host {} is Suspected", host); if (isClosed()) return; // We shouldn't really get there for IGNORED nodes since we shouldn't have // connected to one in the first place, but if we ever do, simply hand it // off to onDown if (loadBalancingPolicy().distance(host) == HostDistance.IGNORED) { triggerOnDown(host); return; } // We need to // 1) mark the node suspect if no-one has bitten us to it // 2) start the reconnection attempt // 3) inform the loadbalancing policy // We must do 2) before 3) as we want the policy to be able to rely // on the reconnection attempt future. // // If multiple threads get there, we want to start reconnection attempts only // once, but we also don't want said threads to return from this method before // the loadbalancing policy has been informed (otherwise those threads won't // consider the host suspect but simply ignore it). So we synchronize. synchronized (host) { // If we've already marked the node down/suspected, ignore this if (!host.setSuspected() || host.reconnectionAttempt.get() != null) return; // Start the initial initial reconnection attempt host.initialReconnectionAttempt.set(executor.submit(new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws InterruptedException, ExecutionException { boolean success; try { // TODO: as for the ReconnectionHandler, we could avoid "wasting" this connection connectionFactory.open(host).closeAsync(); // Note that we want to do the pool creation on this thread because we want that // when onUp return, the host is ready for querying onUp(host, MoreExecutors.newDirectExecutorService()); // If one of the connections in onUp failed, it signaled the error and triggerd onDown, // but onDown aborted because this reconnection attempt was in progress (JAVA-577). // Test the state now to check than onUp succeeded (we know it's up-to-date since onUp was // executed synchronously). success = host.state == Host.State.UP; } catch (Exception e) { success = false; } if (!success) onDown(host, false, true); } })); loadBalancingPolicy().onSuspected(host); } controlConnection.onSuspected(host); for (SessionManager s : sessions) s.onSuspected(host); for (Host.StateListener listener : listeners) listener.onSuspected(host); } // Use triggerOnDown unless you're sure you want to run this on the current thread. private void onDown(final Host host, final boolean isHostAddition, final boolean isSuspectedVerification) throws InterruptedException, ExecutionException { logger.debug("Host {} is DOWN", host); if (isClosed()) return; boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); if (!locked) { logger.warn("Could not acquire notifications lock within {} seconds, ignoring DOWN notification for {}", NOTIF_LOCK_TIMEOUT_SECONDS, host); return; } try { // If we're SUSPECT and not the task validating the suspicion, then some other task is // already checking to verify if the node is really down (or if it's simply that the // connections where broken). So just skip this in that case. if (!isSuspectedVerification && host.state == Host.State.SUSPECT) { logger.debug("Aborting onDown because a reconnection is running on SUSPECT host {}", host); return; } // Note: we don't want to skip that method if !host.isUp() because we set isUp // late in onUp, and so we can rely on isUp if there is an error during onUp. // But if there is a reconnection attempt in progress already, then we know // we've already gone through that method since the last successful onUp(), so // we're good skipping it. if (host.reconnectionAttempt.get() != null) { logger.debug("Aborting onDown because a reconnection is running on DOWN host {}", host); return; } // Remember if we care about this node at all. We must call this before // we've signalled the load balancing policy, since most policy will always // IGNORE down nodes anyway. HostDistance distance = loadBalancingPolicy().distance(host); boolean wasUp = host.isUp(); host.setDown(); loadBalancingPolicy().onDown(host); controlConnection.onDown(host); for (SessionManager s : sessions) s.onDown(host); // Contrarily to other actions of that method, there is no reason to notify listeners // unless the host was UP at the beginning of this function since even if a onUp fail // mid-method, listeners won't have been notified of the UP. if (wasUp) { for (Host.StateListener listener : listeners) listener.onDown(host); } // Don't start a reconnection if we ignore the node anyway (JAVA-314) if (distance == HostDistance.IGNORED) return; // Note: we basically waste the first successful reconnection, but it's probably not a big deal logger.debug("{} is down, scheduling connection retries", host); startPeriodicReconnectionAttempt(host, isHostAddition); } finally { host.notificationsLock.unlock(); } } void startPeriodicReconnectionAttempt(final Host host, final boolean isHostAddition) { new AbstractReconnectionHandler(reconnectionExecutor, reconnectionPolicy().newSchedule(), host.reconnectionAttempt) { protected Connection tryReconnect() throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { return connectionFactory.open(host); } protected void onReconnection(Connection connection) { // We don't use that first connection so close it. // TODO: this is a bit wasteful, we should consider passing it to onAdd/onUp so // we use it for the first HostConnectionPool created connection.closeAsync(); // Make sure we have up-to-date infos on that host before adding it (so we typically // catch that an upgraded node uses a new cassandra version). if (controlConnection.refreshNodeInfo(host)) { logger.debug("Successful reconnection to {}, setting host UP", host); try { if (isHostAddition) onAdd(host); else onUp(host); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } catch (Exception e) { logger.error("Unexpected error while setting node up", e); } } else { logger.debug("Not enough info for {}, ignoring host", host); } } protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { if (logger.isDebugEnabled()) logger.debug("Failed reconnection to {} ({}), scheduling retry in {} milliseconds", host, e.getMessage(), nextDelayMs); return true; } protected boolean onUnknownException(Exception e, long nextDelayMs) { logger.error(String.format("Unknown error during reconnection to %s, scheduling retry in %d milliseconds", host, nextDelayMs), e); return true; } protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { logger.error(String.format("Authentication error during reconnection to %s, scheduling retry in %d milliseconds", host, nextDelayMs), e); return true; } }.start(); } void startSingleReconnectionAttempt(final Host host) { if (isClosed() || host.isUp()) return; logger.debug("Scheduling one-time reconnection to {}", host); // Setting an initial delay of 0 to start immediately, and all the exception handlers return false to prevent further attempts new AbstractReconnectionHandler(reconnectionExecutor, reconnectionPolicy().newSchedule(), host.reconnectionAttempt, 0) { protected Connection tryReconnect() throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { return connectionFactory.open(host); } protected void onReconnection(Connection connection) { // We don't use that first connection so close it. // TODO: this is a bit wasteful, we should consider passing it to onAdd/onUp so // we use it for the first HostConnectionPool created connection.closeAsync(); // Make sure we have up-to-date infos on that host before adding it (so we typically // catch that an upgraded node uses a new cassandra version). if (controlConnection.refreshNodeInfo(host)) { logger.debug("Successful reconnection to {}, setting host UP", host); try { onUp(host); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } catch (Exception e) { logger.error("Unexpected error while setting node up", e); } } else { logger.debug("Not enough info for {}, ignoring host", host); } } protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { if (logger.isDebugEnabled()) logger.debug("Failed one-time reconnection to {} ({})", host, e.getMessage()); return false; } protected boolean onUnknownException(Exception e, long nextDelayMs) { logger.error(String.format("Unknown error during one-time reconnection to %s", host), e); return false; } protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { logger.error(String.format("Authentication error during one-time reconnection to %s", host), e); return false; } }.start(); } public ListenableFuture<?> triggerOnAdd(final Host host) { return executor.submit(new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws InterruptedException, ExecutionException { onAdd(host); } }); } // Use triggerOnAdd unless you're sure you want to run this on the current thread. private void onAdd(final Host host) throws InterruptedException, ExecutionException { if (isClosed()) return; logger.info("New Cassandra host {} added", host); if (connectionFactory.protocolVersion == 2 && !supportsProtocolV2(host)) { logUnsupportedVersionProtocol(host); return; } boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); if (!locked) { logger.warn("Could not acquire notifications lock within {} seconds, ignoring ADD notification for {}", NOTIF_LOCK_TIMEOUT_SECONDS, host); return; } try { // Adds to the load balancing first and foremost, as doing so might change the decision // it will make for distance() on that node (not likely but we leave that possibility). // This does mean the policy may start returning that node for query plan, but as long // as no pools have been created (below) this will be ignored by RequestHandler so it's fine. loadBalancingPolicy().onAdd(host); // Next, if the host should be ignored, well, ignore it. if (loadBalancingPolicy().distance(host) == HostDistance.IGNORED) { // We still mark the node UP though as it should be (and notifiy the listeners). // We'll mark it down if we have a notification anyway and we've documented that especially // for IGNORED hosts, the isUp() method was a best effort guess host.setUp(); for (Host.StateListener listener : listeners) listener.onAdd(host); return; } try { prepareAllQueries(host); } catch (InterruptedException e) { Thread.currentThread().interrupt(); // Don't propagate because we don't want to prevent other listener to run } catch (UnsupportedProtocolVersionException e) { logUnsupportedVersionProtocol(host); return; } catch (ClusterNameMismatchException e) { logClusterNameMismatch(host, e.expectedClusterName, e.actualClusterName); return; } controlConnection.onAdd(host); List<ListenableFuture<Boolean>> futures = new ArrayList<ListenableFuture<Boolean>>(sessions.size()); for (SessionManager s : sessions) futures.add(s.maybeAddPool(host, blockingExecutor)); // Only mark the node up once all session have added their pool (if the load-balancing // policy says it should), so that Host.isUp() don't return true before we're reconnected // to the node. ListenableFuture<List<Boolean>> f = Futures.allAsList(futures); Futures.addCallback(f, new FutureCallback<List<Boolean>>() { public void onSuccess(List<Boolean> poolCreationResults) { // If any of the creation failed, they will have signaled a connection failure // which will trigger a reconnection to the node. So don't bother marking UP. if (Iterables.any(poolCreationResults, Predicates.equalTo(false))) { logger.debug("Connection pool cannot be created, not marking {} UP", host); return; } host.setUp(); for (Host.StateListener listener : listeners) listener.onAdd(host); } public void onFailure(Throwable t) { // That future is not really supposed to throw unexpected exceptions if (!(t instanceof InterruptedException)) logger.error("Unexpected error while adding node: while this shouldn't happen, this shouldn't be critical", t); } }); f.get(); // Now, check if there isn't pools to create/remove following the addition. // We do that now only so that it's not called before we've set the node up. for (SessionManager s : sessions) s.updateCreatedPools(blockingExecutor); } finally { host.notificationsLock.unlock(); } } public ListenableFuture<?> triggerOnRemove(final Host host) { return executor.submit(new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws InterruptedException, ExecutionException { onRemove(host); } }); } // Use triggerOnRemove unless you're sure you want to run this on the current thread. private void onRemove(Host host) throws InterruptedException, ExecutionException { if (isClosed()) return; boolean locked = host.notificationsLock.tryLock(NOTIF_LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); if (!locked) { logger.warn("Could not acquire notifications lock within {} seconds, ignoring REMOVE notification for {}", NOTIF_LOCK_TIMEOUT_SECONDS, host); return; } try { host.setDown(); logger.debug("Removing host {}", host); loadBalancingPolicy().onRemove(host); controlConnection.onRemove(host); for (SessionManager s : sessions) s.onRemove(host); for (Host.StateListener listener : listeners) listener.onRemove(host); } finally { host.notificationsLock.unlock(); } } public boolean signalConnectionFailure(Host host, ConnectionException exception, boolean isHostAddition, boolean markSuspected) { // Don't signal failure until we've fully initialized the controlConnection as this might mess up with // the protocol detection if (!isFullyInit || isClosed()) return true; boolean isDown = host.signalConnectionFailure(exception); if (isDown) { if (isHostAddition || !markSuspected) { triggerOnDown(host, isHostAddition); } else { // Note that we do want to call onSuspected on the current thread, as the whole point is // that by the time this method return, the host initialReconnectionAttempt will have been // set and the load balancing policy informed of the suspection. We know that onSuspected // does little work (and non blocking one) itself however. onSuspected(host); } } return isDown; } private boolean supportsProtocolV2(Host newHost) { return newHost.getCassandraVersion() == null || newHost.getCassandraVersion().getMajor() >= 2; } public void removeHost(Host host, boolean isInitialConnection) { if (host == null) return; if (metadata.remove(host)) { if (isInitialConnection) { logger.warn("You listed {} in your contact points, but it could not be reached at startup", host); } else { logger.info("Cassandra host {} removed", host); triggerOnRemove(host); } } } public void ensurePoolsSizing() { for (SessionManager session : sessions) { for (HostConnectionPool pool : session.pools.values()) pool.ensureCoreConnections(); } } public PreparedStatement addPrepared(PreparedStatement stmt) { PreparedStatement previous = preparedQueries.putIfAbsent(stmt.getPreparedId().id, stmt); if (previous != null) { logger.warn("Re-preparing already prepared query {}. Please note that preparing the same query more than once is " + "generally an anti-pattern and will likely affect performance. Consider preparing the statement only once.", stmt.getQueryString()); // The one object in the cache will get GCed once it's not referenced by the client anymore since we use a weak reference. // So we need to make sure that the instance we do return to the user is the one that is in the cache. return previous; } return stmt; } private void prepareAllQueries(Host host) throws InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { if (preparedQueries.isEmpty()) return; logger.debug("Preparing {} prepared queries on newly up node {}", preparedQueries.size(), host); try { Connection connection = connectionFactory.open(host); try { try { ControlConnection.waitForSchemaAgreement(connection, this); } catch (ExecutionException e) { // As below, just move on } // Furthermore, along with each prepared query we keep the current keyspace at the time of preparation // as we need to make it is the same when we re-prepare on new/restarted nodes. Most query will use the // same keyspace so keeping it each time is slightly wasteful, but this doesn't really matter and is // simpler. Besides, we do avoid in prepareAllQueries to not set the current keyspace more than needed. // We need to make sure we prepared every query with the right current keyspace, i.e. the one originally // used for preparing it. However, since we are likely that all prepared query belong to only a handful // of different keyspace (possibly only one), and to avoid setting the current keyspace more than needed, // we first sort the query per keyspace. SetMultimap<String, String> perKeyspace = HashMultimap.create(); for (PreparedStatement ps : preparedQueries.values()) { // It's possible for a query to not have a current keyspace. But since null doesn't work well as // map keys, we use the empty string instead (that is not a valid keyspace name). String keyspace = ps.getQueryKeyspace() == null ? "" : ps.getQueryKeyspace(); perKeyspace.put(keyspace, ps.getQueryString()); } for (String keyspace : perKeyspace.keySet()) { // Empty string mean no particular keyspace to set if (!keyspace.isEmpty()) connection.setKeyspace(keyspace); List<Connection.Future> futures = new ArrayList<Connection.Future>(preparedQueries.size()); for (String query : perKeyspace.get(keyspace)) { futures.add(connection.write(new Requests.Prepare(query))); } for (Connection.Future future : futures) { try { future.get(); } catch (ExecutionException e) { // This "might" happen if we drop a CF but haven't removed it's prepared queries (which we don't do // currently). It's not a big deal however as if it's a more serious problem it'll show up later when // the query is tried for execution. logger.debug("Unexpected error while preparing queries on new/newly up host", e); } } } } finally { connection.closeAsync(); } } catch (ConnectionException e) { // Ignore, not a big deal } catch (AuthenticationException e) { // That's a bad news, but ignore at this point } catch (BusyConnectionException e) { // Ignore, not a big deal } } public void submitSchemaRefresh(final String keyspace, final String table) { logger.trace("Submitting schema refresh"); executor.submit(new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws InterruptedException, ExecutionException { controlConnection.refreshSchema(keyspace, table); } }); } // refresh the schema using the provided connection, and notice the future with the provided resultset once done public void refreshSchemaAndSignal(final Connection connection, final DefaultResultSetFuture future, final ResultSet rs, final String keyspace, final String table) { if (logger.isDebugEnabled()) logger.debug("Refreshing schema for {}{}", keyspace == null ? "" : keyspace, table == null ? "" : '.' + table); executor.submit(new Runnable() { @Override public void run() { try { // Before refreshing the schema, wait for schema agreement so // that querying a table just after having created it don't fail. if (!ControlConnection.waitForSchemaAgreement(connection, Cluster.Manager.this)) logger.warn("No schema agreement from live replicas after {} s. The schema may not be up to date on some nodes.", configuration.getProtocolOptions().getMaxSchemaAgreementWaitSeconds()); ControlConnection.refreshSchema(connection, keyspace, table, Cluster.Manager.this, false); } catch (Exception e) { logger.error("Error during schema refresh ({}). The schema from Cluster.getMetadata() might appear stale. Asynchronously submitting job to fix.", e.getMessage()); submitSchemaRefresh(keyspace, table); } finally { // Always sets the result future.setResult(rs); } } }); } // Called when some message has been received but has been initiated from the server (streamId < 0). // This is called on an I/O thread, so all blocking operation must be done on an executor. @Override public void handle(Message.Response response) { if (!(response instanceof Responses.Event)) { logger.error("Received an unexpected message from the server: {}", response); return; } final ProtocolEvent event = ((Responses.Event)response).event; logger.debug("Received event {}, scheduling delivery", response); switch (event.type) { case TOPOLOGY_CHANGE: ProtocolEvent.TopologyChange tpc = (ProtocolEvent.TopologyChange)event; InetSocketAddress tpAddr = translateAddress(tpc.node.getAddress()); switch (tpc.change) { case NEW_NODE: final Host newHost = metadata.add(tpAddr); if (newHost != null) { // Cassandra tends to send notifications for new/up nodes a bit early (it is triggered once // gossip is up, but that is before the client-side server is up), so we add a delay // (otherwise the connection will likely fail and have to be retry which is wasteful). This // probably should be fixed C* side, after which we'll be able to remove this. scheduledTasksExecutor.schedule(new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws InterruptedException, ExecutionException { // Make sure we have up-to-date infos on that host before adding it (so we typically // catch that an upgraded node uses a new cassandra version). if (controlConnection.refreshNodeInfo(newHost)) { onAdd(newHost); } else { logger.debug("Not enough info for {}, ignoring host", newHost); } } }, NEW_NODE_DELAY_SECONDS, TimeUnit.SECONDS); } break; case REMOVED_NODE: removeHost(metadata.getHost(tpAddr), false); break; case MOVED_NODE: executor.submit(new ExceptionCatchingRunnable() { @Override public void runMayThrow() { controlConnection.refreshNodeListAndTokenMap(); } }); break; } break; case STATUS_CHANGE: ProtocolEvent.StatusChange stc = (ProtocolEvent.StatusChange)event; InetSocketAddress stAddr = translateAddress(stc.node.getAddress()); switch (stc.status) { case UP: final Host hostUp = metadata.getHost(stAddr); if (hostUp == null) { final Host h = metadata.add(stAddr); // If hostUp is still null, it means we didn't knew about it the line before but // got beaten at adding it to the metadata by another thread. In that case, it's // fine to let the other thread win and ignore the notification here if (h == null) return; // See NEW_NODE above scheduledTasksExecutor.schedule(new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws InterruptedException, ExecutionException { // Make sure we have up-to-date infos on that host before adding it (so we typically // catch that an upgraded node uses a new cassandra version). if (controlConnection.refreshNodeInfo(h)) { onAdd(h); } else { logger.debug("Not enough info for {}, ignoring host", h); } } }, NEW_NODE_DELAY_SECONDS, TimeUnit.SECONDS); } else { executor.submit(new ExceptionCatchingRunnable() { @Override public void runMayThrow() throws InterruptedException, ExecutionException { // Make sure we have up-to-date infos on that host before adding it (so we typically // catch that an upgraded node uses a new cassandra version). if (controlConnection.refreshNodeInfo(hostUp)) { onUp(hostUp); } else { logger.debug("Not enough info for {}, ignoring host", hostUp); } } }); } break; case DOWN: // Note that there is a slight risk we can receive the event late and thus // mark the host down even though we already had reconnected successfully. // But it is unlikely, and don't have too much consequence since we'll try reconnecting // right away, so we favor the detection to make the Host.isUp method more reliable. Host hostDown = metadata.getHost(stAddr); if (hostDown != null) triggerOnDown(hostDown); break; } break; case SCHEMA_CHANGE: ProtocolEvent.SchemaChange scc = (ProtocolEvent.SchemaChange)event; switch (scc.change) { case CREATED: if (scc.table.isEmpty()) submitSchemaRefresh(scc.keyspace, null); else submitSchemaRefresh(scc.keyspace, scc.table); break; case DROPPED: if (scc.table.isEmpty()) manager.metadata.removeKeyspace(scc.keyspace); else { KeyspaceMetadata keyspace = manager.metadata.getKeyspace(scc.keyspace); if (keyspace == null) logger.warn("Received a DROPPED notification for {}.{}, but this keyspace is unknown in our metadata", scc.keyspace, scc.table); else keyspace.removeTable(scc.table); } break; case UPDATED: if (scc.table.isEmpty()) submitSchemaRefresh(scc.keyspace, null); else submitSchemaRefresh(scc.keyspace, scc.table); break; } break; } } void refreshConnectedHosts() { // Deal first with the control connection: if it's connected to a node that is not LOCAL, try // reconnecting (thus letting the loadBalancingPolicy pick a better node) Host ccHost = controlConnection.connectedHost(); if (ccHost == null || loadBalancingPolicy().distance(ccHost) != HostDistance.LOCAL) controlConnection.reconnect(); for (SessionManager s : sessions) s.updateCreatedPools(executor); } void refreshConnectedHost(Host host) { // Deal with the control connection if it was using this host Host ccHost = controlConnection.connectedHost(); if (ccHost == null || ccHost.equals(host) && loadBalancingPolicy().distance(ccHost) != HostDistance.LOCAL) controlConnection.reconnect(); for (SessionManager s : sessions) s.updateCreatedPools(host, executor); } private class ClusterCloseFuture extends CloseFuture.Forwarding { ClusterCloseFuture(List<CloseFuture> futures) { super(futures); } @Override public CloseFuture force() { // The only ExecutorService we haven't forced yet is executor shutdownNow(executor); return super.force(); } @Override protected void onFuturesDone() { /* * When we reach this, all sessions should be shutdown. We've also started a shutdown * of the thread pools used by this object. Remains 2 things before marking the shutdown * as done: * 1) we need to wait for the completion of the shutdown of the Cluster threads pools. * 2) we need to shutdown the Connection.Factory, i.e. the executors used by Netty. * But at least for 2), we must not do it on the current thread because that could be * a netty worker, which we're going to shutdown. So creates some thread for that. */ (new Thread("Shutdown-checker") { public void run() { // Just wait indefinitely on the the completion of the thread pools. Provided the user // call force(), we'll never really block forever. try { reconnectionExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); scheduledTasksExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); blockingExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); // Some of the jobs on the executors can be doing query stuff, so close the // connectionFactory at the very last connectionFactory.shutdown(); reaper.shutdown(); set(null); } catch (InterruptedException e) { Thread.currentThread().interrupt(); setException(e); } } }).start(); } } } /** * Periodically ensures that closed connections are properly terminated once they have no more pending requests. * * This is normally done when the connection errors out, or when the last request is processed; this class acts as * a last-effort protection since unterminated connections can lead to deadlocks. If it terminates a connection, * this indicates a bug; warnings are logged so that this can be reported. * * @see Connection#tryTerminate(boolean) */ static class ConnectionReaper { private static final int INTERVAL_MS = 15000; private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1, threadFactory("Reaper-%d")); private final Map<Connection, Long> connections = new ConcurrentHashMap<Connection, Long>(); private volatile boolean shutdown; private final Runnable reaperTask = new Runnable() { @Override public void run() { long now = System.currentTimeMillis(); Iterator<Entry<Connection, Long>> iterator = connections.entrySet().iterator(); while (iterator.hasNext()) { Entry<Connection, Long> entry = iterator.next(); Connection connection = entry.getKey(); Long terminateTime = entry.getValue(); if (terminateTime <= now) { boolean terminated = connection.tryTerminate(true); if (terminated) iterator.remove(); } } } }; ConnectionReaper() { executor.scheduleWithFixedDelay(reaperTask, INTERVAL_MS, INTERVAL_MS, TimeUnit.MILLISECONDS); } void register(Connection connection, long terminateTime) { if (shutdown) { // This should not happen since the reaper is shut down after all sessions. logger.warn("Connection registered after reaper shutdown: {}", connection); connection.tryTerminate(true); } else { connections.put(connection, terminateTime); } } void shutdown() { shutdown = true; // Force shutdown to avoid waiting for the interval, and run the task manually one last time executor.shutdownNow(); reaperTask.run(); } } }
signalfx/java-driver
driver-core/src/main/java/com/datastax/driver/core/Cluster.java
213,964
/* * Copyright (c) 2016-2019 VMware, Inc. All Rights Reserved. * * This product is licensed to you under the Apache License, Version 2.0 (the "License"). * You may not use this product except in compliance with the License. * * This product may include a number of subcomponents with separate copyright notices * and license terms. Your use of these subcomponents is subject to the terms and * conditions of the subcomponent's license, as noted in the LICENSE file. */ package com.vmware.mangle.services.cassandra; import java.util.Collections; import java.util.List; import com.datastax.driver.core.AuthProvider; import com.datastax.driver.core.Cluster; import com.datastax.driver.core.NettyOptions; import com.datastax.driver.core.PoolingOptions; import com.datastax.driver.core.ProtocolVersion; import com.datastax.driver.core.QueryOptions; import com.datastax.driver.core.SocketOptions; import com.datastax.driver.core.TimestampGenerator; import com.datastax.driver.core.policies.AddressTranslator; import com.datastax.driver.core.policies.LoadBalancingPolicy; import com.datastax.driver.core.policies.ReconnectionPolicy; import com.datastax.driver.core.policies.RetryPolicy; import com.datastax.driver.core.policies.SpeculativeExecutionPolicy; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.data.cassandra.config.ClusterBuilderConfigurer; import org.springframework.data.cassandra.config.CompressionType; import org.springframework.data.cassandra.core.cql.keyspace.CreateKeyspaceSpecification; import org.springframework.data.cassandra.core.cql.keyspace.DropKeyspaceSpecification; import org.springframework.lang.Nullable; import org.springframework.util.Assert; /** * Base class for Spring Cassandra configuration that can handle creating namespaces, execute * arbitrary CQL on startup & shutdown, and optionally drop keyspaces. * * @author kumargautam */ @Configuration public abstract class AbstractClusterConfiguration { /** * Returns the initialized {@link Cluster} instance. * * @return the {@link Cluster}. * @throws IllegalStateException * if the cluster factory is not initialized. */ @Bean public Cluster getRequiredCluster() { CassandraClusterFactoryBean factoryBean = cluster(); Assert.state(factoryBean.getObject() != null, "Cluster factory not initialized"); return factoryBean.getObject(); } /** * Creates a {@link CassandraClusterFactoryBean} that provides a Cassandra * {@link com.datastax.driver.core.Cluster}. The lifecycle of * {@link CassandraClusterFactoryBean} executes {@link #getStartupScripts() startup} and * {@link #getShutdownScripts() shutdown} scripts. * * @return the {@link CassandraClusterFactoryBean}. * @see #cluster() * @see #getStartupScripts() * @see #getShutdownScripts() */ @Bean public CassandraClusterFactoryBean cluster() { CassandraClusterFactoryBean bean = new CassandraClusterFactoryBean(); bean.setAddressTranslator(getAddressTranslator()); bean.setAuthProvider(getAuthProvider()); bean.setClusterBuilderConfigurer(getClusterBuilderConfigurer()); bean.setClusterName(getClusterName()); bean.setCompressionType(getCompressionType()); bean.setContactPoints(getContactPoints()); bean.setLoadBalancingPolicy(getLoadBalancingPolicy()); bean.setMaxSchemaAgreementWaitSeconds(getMaxSchemaAgreementWaitSeconds()); bean.setMetricsEnabled(getMetricsEnabled()); bean.setNettyOptions(getNettyOptions()); bean.setPoolingOptions(getPoolingOptions()); bean.setPort(getPort()); bean.setProtocolVersion(getProtocolVersion()); bean.setQueryOptions(getQueryOptions()); bean.setReconnectionPolicy(getReconnectionPolicy()); bean.setRetryPolicy(getRetryPolicy()); bean.setSpeculativeExecutionPolicy(getSpeculativeExecutionPolicy()); bean.setSocketOptions(getSocketOptions()); bean.setTimestampGenerator(getTimestampGenerator()); bean.setKeyspaceCreations(getKeyspaceCreations()); bean.setKeyspaceDrops(getKeyspaceDrops()); bean.setStartupScripts(getStartupScripts()); bean.setShutdownScripts(getShutdownScripts()); return bean; } /** * Returns the {@link AddressTranslator}. * * @return the {@link AddressTranslator}; may be {@literal null}. * @since 1.5 */ @Nullable protected abstract AddressTranslator getAddressTranslator(); /** * Returns the {@link AuthProvider}. * * @return the {@link AuthProvider}, may be {@literal null}. */ @Nullable protected abstract AuthProvider getAuthProvider(); /** * Returns the {@link ClusterBuilderConfigurer}. * * @return the {@link ClusterBuilderConfigurer}; may be {@literal null}. * @since 1.5 */ @Nullable protected abstract ClusterBuilderConfigurer getClusterBuilderConfigurer(); /** * Returns the cluster name. * * @return the cluster name; may be {@literal null}. * @since 1.5 */ @Nullable protected abstract String getClusterName(); /** * Returns the {@link CompressionType}. * * @return the {@link CompressionType}, may be {@literal null}. */ @Nullable protected abstract CompressionType getCompressionType(); /** * Returns the Cassandra contact points. Defaults to {@code localhost} * * @return the Cassandra contact points * @see CassandraClusterFactoryBean#DEFAULT_CONTACT_POINTS */ protected String getContactPoints() { return CassandraClusterFactoryBean.DEFAULT_CONTACT_POINTS; } /** * Returns the {@link LoadBalancingPolicy}. * * @return the {@link LoadBalancingPolicy}, may be {@literal null}. */ @Nullable protected abstract LoadBalancingPolicy getLoadBalancingPolicy(); /** * Returns the maximum schema agreement wait in seconds. * * @return the maximum schema agreement wait in seconds; default to {@literal 10} seconds. */ protected int getMaxSchemaAgreementWaitSeconds() { return CassandraClusterFactoryBean.DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS; } /** * Returns the whether to enable metrics. Defaults to {@literal true} * * @return {@literal true} to enable metrics. * @see CassandraClusterFactoryBean#DEFAULT_METRICS_ENABLED */ protected boolean getMetricsEnabled() { return CassandraClusterFactoryBean.DEFAULT_METRICS_ENABLED; } /** * Returns the {@link NettyOptions}. Defaults to {@link NettyOptions#DEFAULT_INSTANCE}. * * @return the {@link NettyOptions} to customize netty behavior. * @since 1.5 */ protected NettyOptions getNettyOptions() { return NettyOptions.DEFAULT_INSTANCE; } /** * Returns the {@link PoolingOptions}. * * @return the {@link PoolingOptions}, may be {@literal null}. */ @Nullable protected abstract PoolingOptions getPoolingOptions(); /** * Returns the Cassandra port. Defaults to {@code 9042}. * * @return the Cassandra port * @see CassandraClusterFactoryBean#DEFAULT_PORT */ protected String getPort() { return String.valueOf(CassandraClusterFactoryBean.DEFAULT_PORT); } /** * Returns the {@link ProtocolVersion}. Defaults to {@link ProtocolVersion#NEWEST_SUPPORTED}. * * @return the {@link ProtocolVersion}. * @see ProtocolVersion#NEWEST_SUPPORTED. */ protected ProtocolVersion getProtocolVersion() { return ProtocolVersion.NEWEST_SUPPORTED; } /** * Returns the {@link QueryOptions}. * * @return the {@link QueryOptions}, may be {@literal null}. * @since 1.5 */ @Nullable protected abstract QueryOptions getQueryOptions(); /** * Returns the {@link ReconnectionPolicy}. * * @return the {@link ReconnectionPolicy}, may be {@literal null}. */ @Nullable protected abstract ReconnectionPolicy getReconnectionPolicy(); /** * Returns the {@link RetryPolicy}. * * @return the {@link RetryPolicy}, may be {@literal null}. */ @Nullable protected abstract RetryPolicy getRetryPolicy(); /** * Returns the {@link SpeculativeExecutionPolicy}. * * @return the {@link SpeculativeExecutionPolicy}; may be {@literal null}. * @since 1.5 */ @Nullable protected abstract SpeculativeExecutionPolicy getSpeculativeExecutionPolicy(); /** * Returns the {@link SocketOptions}. * * @return the {@link SocketOptions}, may be {@literal null}. */ @Nullable protected abstract SocketOptions getSocketOptions(); /** * Returns the {@link TimestampGenerator}. * * @return the {@link TimestampGenerator}; may be {@literal null}. * @since 1.5 */ @Nullable protected abstract TimestampGenerator getTimestampGenerator(); /** * Returns the list of keyspace creations to be run right after * {@link com.datastax.driver.core.Cluster} initialization. * * @return the list of keyspace creations, may be empty but never {@link null} */ protected List<CreateKeyspaceSpecification> getKeyspaceCreations() { return Collections.emptyList(); } /** * Returns the list of keyspace drops to be run before {@link com.datastax.driver.core.Cluster} * shutdown. * * @return the list of keyspace drops, may be empty but never {@link null} */ protected List<DropKeyspaceSpecification> getKeyspaceDrops() { return Collections.emptyList(); } /** * Returns the list of startup scripts to be run after {@link #getKeyspaceCreations() keyspace * creations} and after {@link com.datastax.driver.core.Cluster} initialization. * * @return the list of startup scripts, may be empty but never {@link null} */ protected List<String> getStartupScripts() { return Collections.emptyList(); } /** * Returns the list of shutdown scripts to be run after {@link #getKeyspaceDrops() keyspace * drops} and right before {@link com.datastax.driver.core.Cluster} shutdown. * * @return the list of shutdown scripts, may be empty but never {@link null} */ protected List<String> getShutdownScripts() { return Collections.emptyList(); } }
vmware/mangle
mangle-services/src/main/java/com/vmware/mangle/services/cassandra/AbstractClusterConfiguration.java
213,965
/* * Copyright 2018 T-Mobile US, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.tmobile.opensource.casquatch; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.CqlSessionBuilder; import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.typesafe.config.Config; import com.typesafe.config.ConfigValue; import com.typesafe.config.ConfigValueFactory; import lombok.extern.slf4j.Slf4j; import java.util.*; /** * Builder for {@link CasquatchDao} */ @SuppressWarnings({"WeakerAccess", "SpellCheckingInspection"}) @Slf4j public class CasquatchDaoBuilder { protected final Map<String,Object> configMap = new HashMap<>(); protected String prefix=null; protected String path=null; protected Config config; /** * Clear cached configuration */ private void clearConfigCache() { this.config=null; } /** * Create CasquatchDao from configuration * @return configured CasquatchDao */ public CasquatchDao build() { return new CasquatchDao(this); } /** * End the current profile * @return builder profile ended */ public CasquatchDaoBuilder endProfile() { this.path=null; return this; } /** * Generate configuration from files as well as runtime settings * @return typesafe config object */ public Config getConfig() { if(this.config == null) { ConfigLoader.clear(); if (this.prefix == null) { this.config = ConfigLoader.casquatch(); } else { this.config = ConfigLoader.casquatch(this.prefix); } if (!this.configMap.isEmpty()) { for (Map.Entry<String, Object> entry : this.configMap.entrySet()) { if (entry.getValue() != null && !(entry.getValue() instanceof String && ((String) entry.getValue()).isEmpty())) { if (log.isTraceEnabled()) log.trace("Runtime Property: {} -> {}", entry.getKey(), entry.getValue()); this.config = this.config.withValue(entry.getKey(), ConfigValueFactory.fromAnyRef(entry.getValue())); } } } } return this.config; } /** * Get prefix of properties * @return prefix */ public String getPrefix() { return this.prefix; } /** * Providers access to a raw session based on Casquatch config * @return CqlSession object */ public CqlSession session() { return this.sessionBuilder().build(); } /** * Provides access to a raw session based on Casquatch config * @param keyspace override the keyspace for this session * @return CqlSession object */ public CqlSession session(String keyspace) { return this.sessionBuilder().withKeyspace(keyspace).build(); } /** * Provides access to the underlying session builder based on Casquatch config * @return CqlSessionBuilder object */ public CqlSessionBuilder sessionBuilder() { return CqlSession.builder().withConfigLoader(new DefaultDriverConfigLoader(this::getConfig)); } /** * Set the prefix for properties * @param prefix property prefix */ public void setPrefix(String prefix) { this.prefix = prefix; } /** * Start a profile * @param profile name of profile * @return builder with profile started */ public CasquatchDaoBuilder startProfile(String profile) { this.path=String.format("profiles.%s.",profile); return this; } /** * Prints out the config in JSON format * #return config in json format */ public String toString() { Map<String,String> configString = new HashMap<>(); for (Map.Entry<String, ConfigValue> entry : this.getConfig().entrySet()) { configString.put(entry.getKey(),entry.getValue().render()); } try { return new ObjectMapper().writeValueAsString(configString); } catch (JsonProcessingException e) { return "Unable to convert to JSON"; } } /** * Add a single value to the config * @param key key for value * @param value value (object) * @return builder with value set */ public CasquatchDaoBuilder with(String key, Object value) { this.clearConfigCache(); if(this.path !=null) { key = this.path + key; } this.configMap.put(key,value); return this; } /** * Add a list to the config * @param key key for value * @param valueList list of values * @return builder with value set */ public CasquatchDaoBuilder with(String key, List<String> valueList) { if(this.path !=null) { key = this.path + key; } List<String> list; if(this.configMap.containsKey(key)) { if(this.configMap.get(key) instanceof List) { //noinspection unchecked list = (List<String>) this.configMap.get(key); list.addAll(valueList); } else if(this.configMap.get(key) instanceof String) { list = new ArrayList<>(); list.add((String) this.configMap.get(key)); } else { throw new DriverException(DriverException.CATEGORIES.CASQUATCH_INVALID_CONFIGURATION, "Attempted to set %s to a list but it already contained another class"); } } else { list = new ArrayList<>(valueList); } this.configMap.put(key,list); return this; } /** * Add value to property list mapped to advanced.ssl-engine-factory.truststore-path * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedSslEngineFactoryTruststorePath(String value) { return this.with("advanced.ssl-engine-factory.truststore-path",value); } /** * Add value to property list mapped to basic.load-balancing-policy.class * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withBasicLoadBalancingPolicyClass(String value) { return this.with("basic.load-balancing-policy.class",value); } /** * Add value to property list mapped to basic.load-balancing-policy.local-datacenter * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withBasicLoadBalancingPolicyLocalDatacenter(String value) { return this.with("basic.load-balancing-policy.local-datacenter",value); } /** * Add value to property list mapped to advanced.ssl-engine-factory.keystore-path * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedSslEngineFactoryKeystorePath(String value) { return this.with("advanced.ssl-engine-factory.keystore-path",value); } /** * Add value to property list mapped to advanced.connection.pool.remote.size * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedConnectionPoolRemoteSize(Integer value) { return this.with("advanced.connection.pool.remote.size",value); } /** * Add value to property list mapped to basic.request.serial-consistency * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withBasicRequestSerialConsistency(String value) { return this.with("basic.request.serial-consistency",value); } /** * Add value to property list mapped to advanced.metadata.topology-event-debouncer.window * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetadataTopologyEventDebouncerWindow(String value) { return this.with("advanced.metadata.topology-event-debouncer.window",value); } /** * Add value to property list mapped to advanced.address-translator.class * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedAddressTranslatorClass(String value) { return this.with("advanced.address-translator.class",value); } /** * Add value to property list mapped to solr-query-options.null-saving-strategy * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withSolrQueryOptionsNullSavingStrategy(String value) { return this.with("solr-query-options.null-saving-strategy",value); } /** * Add value to property list mapped to advanced.control-connection.schema-agreement.timeout * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedControlConnectionSchemaAgreementTimeout(String value) { return this.with("advanced.control-connection.schema-agreement.timeout",value); } /** * Add value to property list mapped to advanced.connection.warn-on-init-error * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedConnectionWarnOnInitError(Boolean value) { return this.with("advanced.connection.warn-on-init-error",value); } /** * Add value to property list mapped to basic.request.default-idempotence * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withBasicRequestDefaultIdempotence(Boolean value) { return this.with("basic.request.default-idempotence",value); } /** * Add value to property list mapped to advanced.heartbeat.interval * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedHeartbeatInterval(String value) { return this.with("advanced.heartbeat.interval",value); } /** * Add value to property list mapped to advanced.metadata.topology-event-debouncer.max-events * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetadataTopologyEventDebouncerMaxEvents(Integer value) { return this.with("advanced.metadata.topology-event-debouncer.max-events",value); } /** * Add value to property list mapped to advanced.protocol.max-frame-length * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedProtocolMaxFrameLength(String value) { return this.with("advanced.protocol.max-frame-length",value); } /** * Add value to property list mapped to advanced.metrics.session.throttling.delay.highest-latency * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetricsSessionThrottlingDelayHighestLatency(String value) { return this.with("advanced.metrics.session.throttling.delay.highest-latency",value); } /** * Add value to property list mapped to advanced.metadata.schema.request-timeout * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetadataSchemaRequestTimeout(String value) { return this.with("advanced.metadata.schema.request-timeout",value); } /** * Add value to property list mapped to advanced.resolve-contact-points * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedResolveContactPoints(Boolean value) { return this.with("advanced.resolve-contact-points",value); } /** * Add value to property list mapped to advanced.netty.io-group.shutdown.quiet-period * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedNettyIoGroupShutdownQuietPeriod(Integer value) { return this.with("advanced.netty.io-group.shutdown.quiet-period",value); } /** * Add value to property list mapped to advanced.netty.timer.ticks-per-wheel * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedNettyTimerTicksPerWheel(Integer value) { return this.with("advanced.netty.timer.ticks-per-wheel",value); } /** * Add value to property list mapped to advanced.request.log-warnings * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedRequestLogWarnings(Boolean value) { return this.with("advanced.request.log-warnings",value); } /** * Add value to property list mapped to advanced.auth-provider.password * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedAuthProviderPassword(String value) { return this.with("advanced.auth-provider.password",value); } /** * Add value to property list mapped to query-options.profile * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withQueryOptionsProfile(String value) { return this.with("query-options.profile",value); } /** * Add value to property list mapped to advanced.netty.admin-group.shutdown.timeout * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedNettyAdminGroupShutdownTimeout(Integer value) { return this.with("advanced.netty.admin-group.shutdown.timeout",value); } /** * Add value to property list mapped to advanced.netty.admin-group.shutdown.unit * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedNettyAdminGroupShutdownUnit(String value) { return this.with("advanced.netty.admin-group.shutdown.unit",value); } /** * Add value to property list mapped to advanced.metrics.node.cql-messages.highest-latency * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetricsNodeCqlMessagesHighestLatency(String value) { return this.with("advanced.metrics.node.cql-messages.highest-latency",value); } /** * Add value to property list mapped to solr-query-options.limit * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withSolrQueryOptionsLimit(Integer value) { return this.with("solr-query-options.limit",value); } /** * Add value to property list mapped to advanced.netty.admin-group.size * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedNettyAdminGroupSize(Integer value) { return this.with("advanced.netty.admin-group.size",value); } /** * Add value to property list mapped to advanced.retry-policy.class * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedRetryPolicyClass(String value) { return this.with("advanced.retry-policy.class",value); } /** * Add value to property list mapped to advanced.metadata.schema.enabled * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetadataSchemaEnabled(Boolean value) { return this.with("advanced.metadata.schema.enabled",value); } /** * Add value to property list mapped to advanced.prepared-statements.reprepare-on-up.check-system-table * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedPreparedStatementsReprepareOnUpCheckSystemTable(Boolean value) { return this.with("advanced.prepared-statements.reprepare-on-up.check-system-table",value); } /** * Add value to property list mapped to advanced.connection.max-requests-per-connection * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedConnectionMaxRequestsPerConnection(Integer value) { return this.with("advanced.connection.max-requests-per-connection",value); } /** * Add value to property list mapped to advanced.request.trace.interval * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedRequestTraceInterval(String value) { return this.with("advanced.request.trace.interval",value); } /** * Add value to property list mapped to advanced.control-connection.schema-agreement.interval * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedControlConnectionSchemaAgreementInterval(String value) { return this.with("advanced.control-connection.schema-agreement.interval",value); } /** * Add value to property list mapped to advanced.control-connection.schema-agreement.warn-on-failure * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedControlConnectionSchemaAgreementWarnOnFailure(Boolean value) { return this.with("advanced.control-connection.schema-agreement.warn-on-failure",value); } /** * Add value to property list mapped to advanced.metrics.node.enabled * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetricsNodeEnabled(String value) { if(value.contains(",")) { return this.with("advanced.metrics.node.enabled", Collections.singletonList(value.split(","))); } else { return this.with("advanced.metrics.node.enabled", Collections.singletonList(value)); } } /** * Add value to property list mapped to advanced.metrics.node.enabled * * NOTE: Generated from reference.conf files on release * * @param value list of values for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetricsNodeEnabled(List<String> value) { return this.with("advanced.metrics.node.enabled",value); } /** * Add value to property list mapped to advanced.timestamp-generator.class * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedTimestampGeneratorClass(String value) { return this.with("advanced.timestamp-generator.class",value); } /** * Add value to property list mapped to failover-policy.class * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withFailoverPolicyClass(String value) { return this.with("failover-policy.class",value); } /** * Add value to property list mapped to advanced.netty.admin-group.shutdown.quiet-period * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedNettyAdminGroupShutdownQuietPeriod(Integer value) { return this.with("advanced.netty.admin-group.shutdown.quiet-period",value); } /** * Add value to property list mapped to failover-policy.profile * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withFailoverPolicyProfile(String value) { return this.with("failover-policy.profile",value); } /** * Add value to property list mapped to basic.load-balancing-policy.filter.class * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withBasicLoadBalancingPolicyFilterClass(String value) { return this.with("basic.load-balancing-policy.filter.class",value); } /** * Add value to property list mapped to advanced.ssl-engine-factory.truststore-password * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedSslEngineFactoryTruststorePassword(String value) { return this.with("advanced.ssl-engine-factory.truststore-password",value); } /** * Add value to property list mapped to query-options.ignore-non-primary-keys * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withQueryOptionsIgnoreNonPrimaryKeys(Boolean value) { return this.with("query-options.ignore-non-primary-keys",value); } /** * Add value to property list mapped to advanced.metrics.session.throttling.delay.significant-digits * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetricsSessionThrottlingDelaySignificantDigits(Integer value) { return this.with("advanced.metrics.session.throttling.delay.significant-digits",value); } /** * Add value to property list mapped to basic.request.consistency * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withBasicRequestConsistency(String value) { return this.with("basic.request.consistency",value); } /** * Add value to property list mapped to query-options.persist-nulls * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withQueryOptionsPersistNulls(Boolean value) { return this.with("query-options.persist-nulls",value); } /** * Add value to property list mapped to advanced.coalescer.reschedule-interval * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedCoalescerRescheduleInterval(String value) { return this.with("advanced.coalescer.reschedule-interval",value); } /** * Add value to property list mapped to query-options.allow-non-primary-keys * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withQueryOptionsAllowNonPrimaryKeys(Boolean value) { return this.with("query-options.allow-non-primary-keys",value); } /** * Add value to property list mapped to advanced.metrics.session.throttling.delay.refresh-interval * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetricsSessionThrottlingDelayRefreshInterval(String value) { return this.with("advanced.metrics.session.throttling.delay.refresh-interval",value); } /** * Add value to property list mapped to advanced.metrics.session.enabled * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetricsSessionEnabled(String value) { if(value.contains(",")) { return this.with("advanced.metrics.session.enabled", Collections.singletonList(value.split(","))); } else { return this.with("advanced.metrics.session.enabled", Collections.singletonList(value)); } } /** * Add value to property list mapped to advanced.metrics.session.enabled * * NOTE: Generated from reference.conf files on release * * @param value list of values for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetricsSessionEnabled(List<String> value) { return this.with("advanced.metrics.session.enabled",value); } /** * Add value to property list mapped to basic.config-reload-interval * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withBasicConfigReloadInterval(String value) { return this.with("basic.config-reload-interval",value); } /** * Add value to property list mapped to query-options.limit * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withQueryOptionsLimit(Integer value) { return this.with("query-options.limit",value); } /** * Add value to property list mapped to advanced.netty.io-group.shutdown.timeout * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedNettyIoGroupShutdownTimeout(Integer value) { return this.with("advanced.netty.io-group.shutdown.timeout",value); } /** * Add value to property list mapped to advanced.metrics.session.cql-requests.refresh-interval * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetricsSessionCqlRequestsRefreshInterval(String value) { return this.with("advanced.metrics.session.cql-requests.refresh-interval",value); } /** * Add value to property list mapped to solr-query-options.consistency * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withSolrQueryOptionsConsistency(String value) { return this.with("solr-query-options.consistency",value); } /** * Add value to property list mapped to advanced.request-tracker.class * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedRequestTrackerClass(String value) { return this.with("advanced.request-tracker.class",value); } /** * Add value to property list mapped to basic.request.timeout * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withBasicRequestTimeout(String value) { return this.with("basic.request.timeout",value); } /** * Add value to property list mapped to advanced.coalescer.max-runs-with-no-work * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedCoalescerMaxRunsWithNoWork(Integer value) { return this.with("advanced.coalescer.max-runs-with-no-work",value); } /** * Add value to property list mapped to advanced.auth-provider.username * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedAuthProviderUsername(String value) { return this.with("advanced.auth-provider.username",value); } /** * Add value to property list mapped to advanced.throttler.class * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedThrottlerClass(String value) { return this.with("advanced.throttler.class",value); } /** * Add value to property list mapped to advanced.reconnection-policy.max-delay * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedReconnectionPolicyMaxDelay(String value) { return this.with("advanced.reconnection-policy.max-delay",value); } /** * Add value to property list mapped to advanced.metadata.schema.request-page-size * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetadataSchemaRequestPageSize(Integer value) { return this.with("advanced.metadata.schema.request-page-size",value); } /** * Add value to property list mapped to advanced.reconnection-policy.class * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedReconnectionPolicyClass(String value) { return this.with("advanced.reconnection-policy.class",value); } /** * Add value to property list mapped to advanced.request.warn-if-set-keyspace * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedRequestWarnIfSetKeyspace(Boolean value) { return this.with("advanced.request.warn-if-set-keyspace",value); } /** * Add value to property list mapped to advanced.netty.io-group.shutdown.unit * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedNettyIoGroupShutdownUnit(String value) { return this.with("advanced.netty.io-group.shutdown.unit",value); } /** * Add value to property list mapped to advanced.schema-change-listener.class * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedSchemaChangeListenerClass(String value) { return this.with("advanced.schema-change-listener.class",value); } /** * Add value to property list mapped to basic.contact-points * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withBasicContactPoints(String value) { if(value.contains(",")) { return this.with("basic.contact-points", Collections.singletonList(value.split(","))); } else { return this.with("basic.contact-points", Collections.singletonList(value)); } } /** * Add value to property list mapped to basic.contact-points * * NOTE: Generated from reference.conf files on release * * @param value list of values for property * @return builder with property set */ public CasquatchDaoBuilder withBasicContactPoints(List<String> value) { return this.with("basic.contact-points",value); } /** * Add value to property list mapped to advanced.speculative-execution-policy.delay * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedSpeculativeExecutionPolicyDelay(String value) { return this.with("advanced.speculative-execution-policy.delay",value); } /** * Add value to property list mapped to advanced.heartbeat.timeout * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedHeartbeatTimeout(String value) { return this.with("advanced.heartbeat.timeout",value); } /** * Add value to property list mapped to basic.request.page-size * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withBasicRequestPageSize(Integer value) { return this.with("basic.request.page-size",value); } /** * Add value to property list mapped to advanced.speculative-execution-policy.max-executions * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedSpeculativeExecutionPolicyMaxExecutions(Integer value) { return this.with("advanced.speculative-execution-policy.max-executions",value); } /** * Add value to property list mapped to solr-query-options.allow-non-primary-keys * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withSolrQueryOptionsAllowNonPrimaryKeys(Boolean value) { return this.with("solr-query-options.allow-non-primary-keys",value); } /** * Add value to property list mapped to advanced.timestamp-generator.drift-warning.interval * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedTimestampGeneratorDriftWarningInterval(String value) { return this.with("advanced.timestamp-generator.drift-warning.interval",value); } /** * Add value to property list mapped to advanced.ssl-engine-factory.class * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedSslEngineFactoryClass(String value) { return this.with("advanced.ssl-engine-factory.class",value); } /** * Add value to property list mapped to advanced.connection.init-query-timeout * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedConnectionInitQueryTimeout(String value) { return this.with("advanced.connection.init-query-timeout",value); } /** * Add value to property list mapped to advanced.request.trace.attempts * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedRequestTraceAttempts(Integer value) { return this.with("advanced.request.trace.attempts",value); } /** * Add value to property list mapped to advanced.ssl-engine-factory.keystore-password * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedSslEngineFactoryKeystorePassword(String value) { return this.with("advanced.ssl-engine-factory.keystore-password",value); } /** * Add value to property list mapped to advanced.request.trace.consistency * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedRequestTraceConsistency(String value) { return this.with("advanced.request.trace.consistency",value); } /** * Add value to property list mapped to advanced.connection.pool.local.size * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedConnectionPoolLocalSize(Integer value) { return this.with("advanced.connection.pool.local.size",value); } /** * Add value to property list mapped to solr-query-options.ignore-non-primary-keys * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withSolrQueryOptionsIgnoreNonPrimaryKeys(Boolean value) { return this.with("solr-query-options.ignore-non-primary-keys",value); } /** * Add value to property list mapped to advanced.socket.tcp-no-delay * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedSocketTcpNoDelay(Boolean value) { return this.with("advanced.socket.tcp-no-delay",value); } /** * Add value to property list mapped to advanced.metrics.node.cql-messages.significant-digits * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetricsNodeCqlMessagesSignificantDigits(Integer value) { return this.with("advanced.metrics.node.cql-messages.significant-digits",value); } /** * Add value to property list mapped to advanced.metrics.session.cql-requests.highest-latency * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetricsSessionCqlRequestsHighestLatency(String value) { return this.with("advanced.metrics.session.cql-requests.highest-latency",value); } /** * Add value to property list mapped to advanced.metadata.schema.debouncer.window * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetadataSchemaDebouncerWindow(String value) { return this.with("advanced.metadata.schema.debouncer.window",value); } /** * Add value to property list mapped to query-options.null-saving-strategy * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withQueryOptionsNullSavingStrategy(String value) { return this.with("query-options.null-saving-strategy",value); } /** * Add value to property list mapped to advanced.reconnection-policy.base-delay * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedReconnectionPolicyBaseDelay(String value) { return this.with("advanced.reconnection-policy.base-delay",value); } /** * Add value to property list mapped to advanced.reconnect-on-init * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedReconnectOnInit(Boolean value) { return this.with("advanced.reconnect-on-init",value); } /** * Add value to property list mapped to advanced.node-state-listener.class * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedNodeStateListenerClass(String value) { return this.with("advanced.node-state-listener.class",value); } /** * Add value to property list mapped to advanced.prepared-statements.reprepare-on-up.enabled * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedPreparedStatementsReprepareOnUpEnabled(Boolean value) { return this.with("advanced.prepared-statements.reprepare-on-up.enabled",value); } /** * Add value to property list mapped to advanced.timestamp-generator.drift-warning.threshold * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedTimestampGeneratorDriftWarningThreshold(String value) { return this.with("advanced.timestamp-generator.drift-warning.threshold",value); } /** * Add value to property list mapped to advanced.auth-provider.class * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedAuthProviderClass(String value) { return this.with("advanced.auth-provider.class",value); } /** * Add value to property list mapped to basic.session-keyspace * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withBasicSessionKeyspace(String value) { return this.with("basic.session-keyspace",value); } /** * Add value to property list mapped to advanced.timestamp-generator.force-java-clock * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedTimestampGeneratorForceJavaClock(Boolean value) { return this.with("advanced.timestamp-generator.force-java-clock",value); } /** * Add value to property list mapped to advanced.prepared-statements.reprepare-on-up.timeout * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedPreparedStatementsReprepareOnUpTimeout(String value) { return this.with("advanced.prepared-statements.reprepare-on-up.timeout",value); } /** * Add value to property list mapped to advanced.metadata.schema.debouncer.max-events * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetadataSchemaDebouncerMaxEvents(Integer value) { return this.with("advanced.metadata.schema.debouncer.max-events",value); } /** * Add value to property list mapped to advanced.prepared-statements.reprepare-on-up.max-parallelism * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedPreparedStatementsReprepareOnUpMaxParallelism(Integer value) { return this.with("advanced.prepared-statements.reprepare-on-up.max-parallelism",value); } /** * Add value to property list mapped to advanced.metrics.node.cql-messages.refresh-interval * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetricsNodeCqlMessagesRefreshInterval(String value) { return this.with("advanced.metrics.node.cql-messages.refresh-interval",value); } /** * Add value to property list mapped to advanced.netty.io-group.size * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedNettyIoGroupSize(Integer value) { return this.with("advanced.netty.io-group.size",value); } /** * Add value to property list mapped to advanced.connection.set-keyspace-timeout * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedConnectionSetKeyspaceTimeout(Integer value) { return this.with("advanced.connection.set-keyspace-timeout",value); } /** * Add value to property list mapped to advanced.speculative-execution-policy.class * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedSpeculativeExecutionPolicyClass(String value) { return this.with("advanced.speculative-execution-policy.class",value); } /** * Add value to property list mapped to advanced.metadata.token-map.enabled * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetadataTokenMapEnabled(Boolean value) { return this.with("advanced.metadata.token-map.enabled",value); } /** * Add value to property list mapped to advanced.prepared-statements.prepare-on-all-nodes * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedPreparedStatementsPrepareOnAllNodes(Boolean value) { return this.with("advanced.prepared-statements.prepare-on-all-nodes",value); } /** * Add value to property list mapped to solr-query-options.profile * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withSolrQueryOptionsProfile(String value) { return this.with("solr-query-options.profile",value); } /** * Add value to property list mapped to advanced.ssl-engine-factory.hostname-validation * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedSslEngineFactoryHostnameValidation(Boolean value) { return this.with("advanced.ssl-engine-factory.hostname-validation",value); } /** * Add value to property list mapped to advanced.control-connection.timeout * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedControlConnectionTimeout(String value) { return this.with("advanced.control-connection.timeout",value); } /** * Add value to property list mapped to advanced.prepared-statements.reprepare-on-up.max-statements * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedPreparedStatementsReprepareOnUpMaxStatements(Integer value) { return this.with("advanced.prepared-statements.reprepare-on-up.max-statements",value); } /** * Add value to property list mapped to max-requests-per-connection * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withMaxRequestsPerConnection(Integer value) { return this.with("max-requests-per-connection",value); } /** * Add value to property list mapped to profiles.ddl.basic.request.timeout * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withProfilesDdlBasicRequestTimeout(String value) { return this.with("profiles.ddl.basic.request.timeout",value); } /** * Add value to property list mapped to advanced.ssl-engine-factory.cipher-suites * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedSslEngineFactoryCipherSuites(String value) { if(value.contains(",")) { return this.with("advanced.ssl-engine-factory.cipher-suites", Collections.singletonList(value.split(","))); } else { return this.with("advanced.ssl-engine-factory.cipher-suites", Collections.singletonList(value)); } } /** * Add value to property list mapped to advanced.ssl-engine-factory.cipher-suites * * NOTE: Generated from reference.conf files on release * * @param value list of values for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedSslEngineFactoryCipherSuites(List<String> value) { return this.with("advanced.ssl-engine-factory.cipher-suites",value); } /** * Add value to property list mapped to advanced.metrics.session.cql-requests.significant-digits * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedMetricsSessionCqlRequestsSignificantDigits(Integer value) { return this.with("advanced.metrics.session.cql-requests.significant-digits",value); } /** * Add value to property list mapped to advanced.netty.timer.tick-duration * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedNettyTimerTickDuration(String value) { return this.with("advanced.netty.timer.tick-duration",value); } /** * Add value to property list mapped to advanced.connection.max-orphan-requests * * NOTE: Generated from reference.conf files on release * * @param value value for property * @return builder with property set */ public CasquatchDaoBuilder withAdvancedConnectionMaxOrphanRequests(Integer value) { return this.with("advanced.connection.max-orphan-requests",value); } }
tmobile/casquatch
casquatch-driver/src/main/java/com/tmobile/opensource/casquatch/CasquatchDaoBuilder.java
213,966
package com.jwetherell.algorithms.graph; import java.util.HashMap; import java.util.List; import java.util.Map; import com.jwetherell.algorithms.data_structures.Graph; /** * Johnson's algorithm is a way to find the shortest paths between all pairs of * vertices in a sparse directed graph. It allows some of the edge weights to be * negative numbers, but no negative-weight cycles may exist. * <p> * Worst case: O(V^2 log V + VE) * <p> * @see <a href="https://en.wikipedia.org/wiki/Johnson%27s_algorithm">Johnson's Algorithm (Wikipedia)</a> * <br> * @author Justin Wetherell <[email protected]> */ public class Johnson { private Johnson() { } public static Map<Graph.Vertex<Integer>, Map<Graph.Vertex<Integer>, List<Graph.Edge<Integer>>>> getAllPairsShortestPaths(Graph<Integer> g) { if (g == null) throw (new NullPointerException("Graph must be non-NULL.")); // First, a new node 'connector' is added to the graph, connected by zero-weight edges to each of the other nodes. final Graph<Integer> graph = new Graph<Integer>(g); final Graph.Vertex<Integer> connector = new Graph.Vertex<Integer>(Integer.MAX_VALUE); // Add the connector Vertex to all edges. for (Graph.Vertex<Integer> v : graph.getVertices()) { final int indexOfV = graph.getVertices().indexOf(v); final Graph.Edge<Integer> edge = new Graph.Edge<Integer>(0, connector, graph.getVertices().get(indexOfV)); connector.addEdge(edge); graph.getEdges().add(edge); } graph.getVertices().add(connector); // Second, the Bellman–Ford algorithm is used, starting from the new vertex 'connector', to find for each vertex 'v' // the minimum weight h(v) of a path from 'connector' to 'v'. If this step detects a negative cycle, the algorithm is terminated. final Map<Graph.Vertex<Integer>, Graph.CostPathPair<Integer>> costs = BellmanFord.getShortestPaths(graph, connector); // Next the edges of the original graph are re-weighted using the values computed by the Bellman–Ford algorithm: an edge // from u to v, having length w(u,v), is given the new length w(u,v) + h(u) − h(v). for (Graph.Edge<Integer> e : graph.getEdges()) { final int weight = e.getCost(); final Graph.Vertex<Integer> u = e.getFromVertex(); final Graph.Vertex<Integer> v = e.getToVertex(); // Don't worry about the connector if (u.equals(connector) || v.equals(connector)) continue; // Adjust the costs final int uCost = costs.get(u).getCost(); final int vCost = costs.get(v).getCost(); final int newWeight = weight + uCost - vCost; e.setCost(newWeight); } // Finally, 'connector' is removed, and Dijkstra's algorithm is used to find the shortest paths from each node (s) to every // other vertex in the re-weighted graph. final int indexOfConnector = graph.getVertices().indexOf(connector); graph.getVertices().remove(indexOfConnector); for (Graph.Edge<Integer> e : connector.getEdges()) { final int indexOfConnectorEdge = graph.getEdges().indexOf(e); graph.getEdges().remove(indexOfConnectorEdge); } final Map<Graph.Vertex<Integer>, Map<Graph.Vertex<Integer>, List<Graph.Edge<Integer>>>> allShortestPaths = new HashMap<Graph.Vertex<Integer>, Map<Graph.Vertex<Integer>, List<Graph.Edge<Integer>>>>(); for (Graph.Vertex<Integer> v : graph.getVertices()) { final Map<Graph.Vertex<Integer>, Graph.CostPathPair<Integer>> costPaths = Dijkstra.getShortestPaths(graph, v); final Map<Graph.Vertex<Integer>, List<Graph.Edge<Integer>>> paths = new HashMap<Graph.Vertex<Integer>, List<Graph.Edge<Integer>>>(); for (Graph.Vertex<Integer> v2 : costPaths.keySet()) { final Graph.CostPathPair<Integer> pair = costPaths.get(v2); paths.put(v2, pair.getPath()); } allShortestPaths.put(v, paths); } return allShortestPaths; } }
sureshg/java-algorithms-implementation
src/com/jwetherell/algorithms/graph/Johnson.java
213,969
/* * Copyright 2014-2024 TNG Technology Consulting GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.tngtech.archunit.library.cycle_detection; import java.util.Arrays; import java.util.HashSet; import java.util.Set; import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; import com.tngtech.archunit.library.cycle_detection.PrimitiveDataTypes.IntStack; import static java.util.Arrays.binarySearch; /** * Extends a {@link PrimitiveGraph} with any data structures necessary to execute Johnson's algorithm for cycle detection. * The idea is mainly to keep the code readable and understandable while using many primitive data structures * for performance reasons. * <br><br> * Note that for {@code blocked} and {@code dependentlyBlocked} more sophisticated data structures like {@link Set} * and {@link Multimap} seemed to be a good fit without having a big performance impact when tested locally. */ class JohnsonComponent { private final PrimitiveGraph graph; /** * We always operate on a single strongly connected component to detect cycles within. * Note that by convention components (represented as {@code int[]}) will always be a sequence * of node indexes sorted in ascending order. */ private int[] stronglyConnectedComponent = new int[0]; /** * Records which nodes are blocked at the moment according to Johnson's algorithm. This is mainly * a performance optimization so we do not have to redundantly follow paths where we know no * cycle can exist. */ private final Set<Integer> blocked = new HashSet<>(); /** * Records if we have to unblock other nodes once we unblock a specific node. Johnson's algorithm * uses this to free nodes if we have found a cycle to create the possibility to find a further cycle * through these nodes. */ private final Multimap<Integer, Integer> dependentlyBlocked = HashMultimap.create(); /** * Contains the nodes we have currently visited. In case we ever find a path back to the starting node, * we can pop this stack and consequently obtain a cycle through the starting node. */ private final IntStack nodeStack; /** * Performance optimization. When we return the nodes adjacent to a specific node within this * strongly connected component, we initially do not know how many nodes we will return. * Thus we need to temporarily store these nodes in some array to copy that array with the correct * size in the end. * Note that allocating a new temporary array on each method call will make a <b>huge</b> difference * with regard to performance. */ private final int[] tempAdjacentNodesInComponent; private JohnsonComponent(PrimitiveGraph graph) { this.graph = graph; nodeStack = new IntStack(graph.getSize()); tempAdjacentNodesInComponent = new int[graph.getSize()]; } /** * Initialize the Johnson component with the strongly connected component returned by Tarjan's algorithm. * @param sortedStronglyConnectedComponent the array of node indexes sorted in ascending order */ void init(int[] sortedStronglyConnectedComponent) { this.stronglyConnectedComponent = sortedStronglyConnectedComponent; blocked.clear(); dependentlyBlocked.clear(); } int[] getAdjacentNodesOf(int nodeIndex) { int index = 0; for (int candidate : graph.getAdjacentNodesOf(nodeIndex)) { if (componentContains(candidate)) { tempAdjacentNodesInComponent[index++] = candidate; } } return Arrays.copyOf(tempAdjacentNodesInComponent, index); } private boolean componentContains(int nodeIndex) { return binarySearch(stronglyConnectedComponent, nodeIndex) >= 0; } boolean isStartNodeIndex(int nodeIndex) { return getStartNodeIndex() == nodeIndex; } int getStartNodeIndex() { return stronglyConnectedComponent[0]; } boolean isNotBlocked(int nodeIndex) { return !blocked.contains(nodeIndex); } void block(int nodeIndex) { blocked.add(nodeIndex); } void unblock(int nodeIndex) { if (!blocked.remove(nodeIndex)) { return; } for (Integer dependentlyBlockedIndex : dependentlyBlocked.get(nodeIndex)) { unblock(dependentlyBlockedIndex); } dependentlyBlocked.get(nodeIndex).clear(); } /** * Marks node {@code indexOfNodeDependentlyBlocked} as dependently blocked by {@code indexOfNode}. * This means that if we ever unblock node {@code indexOfNode} we also have to unblock * {@code indexOfNodeDependentlyBlocked}. */ void markDependentlyBlocked(int indexOfNodeDependentlyBlocked, int indexOfNode) { dependentlyBlocked.put(indexOfNode, indexOfNodeDependentlyBlocked); } void pushOnStack(int nodeIndex) { nodeStack.push(nodeIndex); } void popFromStack() { nodeStack.pop(); } int[] getStack() { return nodeStack.asArray(); } static JohnsonComponent within(PrimitiveGraph graph) { return new JohnsonComponent(graph); } }
TNG/ArchUnit
archunit/src/main/java/com/tngtech/archunit/library/cycle_detection/JohnsonComponent.java
213,970
package chapter4.section4; import chapter3.section4.SeparateChainingHashTable; import chapter3.section5.HashSet; import edu.princeton.cs.algs4.StdOut; import java.util.*; /** * Created by Rene Argento on 30/12/17. */ // Runtime: O((E + V) * (C + 1), where C is the number of cycles in the graph. // The number of cycles in a graph can be exponential. // Space complexity: O(E + V + S) where S is the sum of the number of vertices in all cycles. // Based on Johnson's original paper http://www.cs.tufts.edu/comp/150GA/homeworks/hw1/Johnson%2075.PDF and // Tushar Roy's excellent video https://www.youtube.com/watch?v=johyrWospv0 and // https://github.com/mission-peace/interview/blob/master/src/com/interview/graph/AllCyclesInDirectedGraphJohnson.java public class JohnsonAllCycles { private HashSet<Integer> blockedVerticesSet; private SeparateChainingHashTable<Integer, HashSet<Integer>> blockedVerticesMap; private Deque<Integer> stack; private List<List<Integer>> allCyclesByVertices; private Deque<DirectedEdge> stackOfEdges; private List<List<DirectedEdge>> allCyclesByEdges; private int verticesCount; public void findAllCycles(EdgeWeightedDigraphInterface edgeWeightedDigraph) { blockedVerticesSet = new HashSet<>(); blockedVerticesMap = new SeparateChainingHashTable<>(); stack = new ArrayDeque<>(); allCyclesByVertices = new ArrayList<>(); stackOfEdges = new ArrayDeque<>(); allCyclesByEdges = new ArrayList<>(); verticesCount = edgeWeightedDigraph.vertices(); KosarajuSharirSCCWeighted kosarajuSharirSCCWeighted = new KosarajuSharirSCCWeighted(edgeWeightedDigraph); List<Integer>[] stronglyConnectedComponents = kosarajuSharirSCCWeighted.getSCCs(); for (List<Integer> stronglyConnectedComponent : stronglyConnectedComponents) { if (stronglyConnectedComponent.size() == 1) { continue; } EdgeWeightedDigraphInterface sccSubGraph = createSubGraphFromSCC(edgeWeightedDigraph, stronglyConnectedComponent); for (int vertexToProcess : stronglyConnectedComponent) { if (sccSubGraph.outdegree(vertexToProcess) == 0) { continue; } // Clear blockedVerticesSet and blockedVerticesMap blockedVerticesSet = new HashSet<>(); blockedVerticesMap = new SeparateChainingHashTable<>(); findCycles(sccSubGraph, vertexToProcess, vertexToProcess, null); sccSubGraph = createSubGraphByRemovingVertex(sccSubGraph, vertexToProcess); } } } private boolean findCycles(EdgeWeightedDigraphInterface edgeWeightedDigraph, int startVertex, int currentVertex, DirectedEdge currentEdge) { boolean foundCycle = false; stack.push(currentVertex); blockedVerticesSet.add(currentVertex); if (currentEdge != null) { stackOfEdges.push(currentEdge); } for (DirectedEdge edge : edgeWeightedDigraph.adjacent(currentVertex)) { int neighbor = edge.to(); // If neighbor is the same as the start vertex, a cycle was found. if (neighbor == startVertex) { // Add cycle with vertices to the cycles list List<Integer> cycle = new ArrayList<>(); stack.push(startVertex); cycle.addAll(stack); Collections.reverse(cycle); stack.pop(); allCyclesByVertices.add(cycle); // Add cycle with edges to the cycles list List<DirectedEdge> cycleByEdges = new ArrayList<>(); stackOfEdges.push(edge); cycleByEdges.addAll(stackOfEdges); Collections.reverse(cycleByEdges); stackOfEdges.pop(); allCyclesByEdges.add(cycleByEdges); foundCycle = true; } else if (!blockedVerticesSet.contains(neighbor)) { boolean foundCycleThroughNeighbor = findCycles(edgeWeightedDigraph, startVertex, neighbor, edge); foundCycle = foundCycle || foundCycleThroughNeighbor; } } // If a cycle was found with the current vertex, then recursively unblock it and all vertices which are // dependent on it if (foundCycle) { unblock(currentVertex); } else { // If no cycle was found, add the current vertex to its neighbors blockedVerticesMap. // If any of those neighbors ever get unblocked, then unblock the current vertex as well. for (DirectedEdge edge : edgeWeightedDigraph.adjacent(currentVertex)) { int neighbor = edge.to(); HashSet<Integer> dependentVerticesFromNeighbor = blockedVerticesMap.get(neighbor); if (dependentVerticesFromNeighbor == null) { dependentVerticesFromNeighbor = new HashSet<>(); dependentVerticesFromNeighbor.add(currentVertex); blockedVerticesMap.put(neighbor, dependentVerticesFromNeighbor); } else { dependentVerticesFromNeighbor.add(currentVertex); } } } stack.pop(); if (!stackOfEdges.isEmpty()) { stackOfEdges.pop(); } return foundCycle; } private void unblock(int vertex) { blockedVerticesSet.delete(vertex); HashSet<Integer> dependentVertices = blockedVerticesMap.get(vertex); if (dependentVertices != null) { for (int dependentVertex : dependentVertices.keys()) { if (blockedVerticesSet.contains(dependentVertex)) { unblock(dependentVertex); } } blockedVerticesMap.delete(vertex); } } private EdgeWeightedDigraphInterface createSubGraphFromSCC(EdgeWeightedDigraphInterface edgeWeightedDigraph, List<Integer> stronglyConnectedComponent) { HashSet<Integer> verticesInSCC = new HashSet<>(); for (int vertex : stronglyConnectedComponent) { verticesInSCC.add(vertex); } EdgeWeightedDigraphInterface subGraph = new EdgeWeightedDigraph(edgeWeightedDigraph.vertices()); for (int vertex = 0; vertex < edgeWeightedDigraph.vertices(); vertex++) { for (DirectedEdge edge : edgeWeightedDigraph.adjacent(vertex)) { int neighbor = edge.to(); if (!verticesInSCC.contains(vertex) || !verticesInSCC.contains(neighbor)) { continue; } subGraph.addEdge(new DirectedEdge(vertex, edge.to(), edge.weight())); } } return subGraph; } // Creates a subgraph with vertexToRemove removed private EdgeWeightedDigraphInterface createSubGraphByRemovingVertex(EdgeWeightedDigraphInterface edgeWeightedDigraph, int vertexToRemove) { EdgeWeightedDigraphInterface subGraph = new EdgeWeightedDigraph(edgeWeightedDigraph.vertices()); for (int vertex = 0; vertex < edgeWeightedDigraph.vertices(); vertex++) { for (DirectedEdge edge : edgeWeightedDigraph.adjacent(vertex)) { int neighbor = edge.to(); if (vertex == vertexToRemove || neighbor == vertexToRemove) { continue; } subGraph.addEdge(new DirectedEdge(vertex, edge.to(), edge.weight())); } } return subGraph; } public List<List<Integer>> getAllCyclesByVertices() { return allCyclesByVertices; } @SuppressWarnings("unchecked") public List<List<Integer>> getAllCyclesByVerticesInOrder() { List<List<Integer>>[] cyclesByInitialVertex = (List<List<Integer>>[]) new ArrayList[verticesCount]; for (int cycles = 0; cycles < cyclesByInitialVertex.length; cycles++) { cyclesByInitialVertex[cycles] = new ArrayList<>(); } for (List<Integer> cycle : allCyclesByVertices) { int initialVertex = cycle.get(0); cyclesByInitialVertex[initialVertex].add(cycle); } List<List<Integer>> allCyclesInOrder = new ArrayList<>(); for (List<List<Integer>> cycles : cyclesByInitialVertex) { allCyclesInOrder.addAll(cycles); } return allCyclesInOrder; } public List<List<DirectedEdge>> getAllCyclesByEdges() { return allCyclesByEdges; } public static void main(String[] args) { JohnsonAllCycles johnsonAllCycles = new JohnsonAllCycles(); EdgeWeightedDigraphInterface edgeWeightedDigraph = new EdgeWeightedDigraph(9); edgeWeightedDigraph.addEdge(new DirectedEdge(0, 1, 2)); edgeWeightedDigraph.addEdge(new DirectedEdge(0, 7, -1)); edgeWeightedDigraph.addEdge(new DirectedEdge(0, 4, 1)); edgeWeightedDigraph.addEdge(new DirectedEdge(1, 8, 4)); edgeWeightedDigraph.addEdge(new DirectedEdge(1, 6, 2)); edgeWeightedDigraph.addEdge(new DirectedEdge(1, 2, 1)); edgeWeightedDigraph.addEdge(new DirectedEdge(2, 0, -3)); edgeWeightedDigraph.addEdge(new DirectedEdge(2, 1, 6)); edgeWeightedDigraph.addEdge(new DirectedEdge(2, 5, 2)); edgeWeightedDigraph.addEdge(new DirectedEdge(2, 3, 1)); edgeWeightedDigraph.addEdge(new DirectedEdge(5, 3, -2)); edgeWeightedDigraph.addEdge(new DirectedEdge(3, 4, 3)); edgeWeightedDigraph.addEdge(new DirectedEdge(4, 1, -2)); edgeWeightedDigraph.addEdge(new DirectedEdge(7, 8, 4)); edgeWeightedDigraph.addEdge(new DirectedEdge(8, 7, 4)); johnsonAllCycles.findAllCycles(edgeWeightedDigraph); List<List<Integer>> allCycles = johnsonAllCycles.getAllCyclesByVerticesInOrder(); StdOut.println("All cycles"); allCycles.forEach(cycle -> { StringJoiner joiner = new StringJoiner("->"); cycle.forEach(vertex -> joiner.add(String.valueOf(vertex))); StdOut.println(joiner); }); StdOut.println("\nExpected:"); StdOut.println("0->1->2->0\n" + "0->4->1->2->0\n" + "1->2->3->4->1\n" + "1->2->5->3->4->1\n" + "1->2->1\n" + "7->8->7"); List<List<DirectedEdge>> allCyclesByEdges = johnsonAllCycles.getAllCyclesByEdges(); StdOut.println("\nAll cycles (and edge weights)"); allCyclesByEdges.forEach(cycle -> { StringJoiner joiner = new StringJoiner(" "); cycle.forEach(edge -> joiner.add(String.valueOf(edge))); StdOut.println(joiner); }); StdOut.println("\nExpected:"); StdOut.println("7->8 4.00 8->7 4.00\n" + "0->1 2.00 1->2 1.00 2->0 -3.00\n" + "0->4 1.00 4->1 -2.00 1->2 1.00 2->0 -3.00\n" + "1->2 1.00 2->3 1.00 3->4 3.00 4->1 -2.00\n" + "1->2 1.00 2->5 2.00 5->3 -2.00 3->4 3.00 4->1 -2.00\n" + "1->2 1.00 2->1 6.00"); } }
reneargento/algorithms-sedgewick-wayne
src/chapter4/section4/JohnsonAllCycles.java
213,971
/* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ package org.nd4j.linalg.dimensionalityreduction; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.api.ops.random.impl.GaussianDistribution; import org.nd4j.linalg.api.rng.Random; import org.nd4j.linalg.exception.ND4JIllegalStateException; import org.nd4j.linalg.factory.Nd4j; import java.util.ArrayList; import java.util.Arrays; import java.util.List; public class RandomProjection { private int components; private Random rng; private double eps; private boolean autoMode; public RandomProjection(double eps, Random rng){ this.rng = rng; this.eps = eps; this.autoMode = true; } public RandomProjection(double eps){ this(eps, Nd4j.getRandom()); } public RandomProjection(int components, Random rng){ this.rng = rng; this.components = components; this.autoMode = false; } public RandomProjection(int components){ this(components, Nd4j.getRandom()); } /** * Find a safe number of components to project this to, through * the Johnson-Lindenstrauss lemma * The minimum number n' of components to guarantee the eps-embedding is * given by: * * n' >= 4 log(n) / (eps² / 2 - eps³ / 3) * * see http://cseweb.ucsd.edu/~dasgupta/papers/jl.pdf §2.1 * @param n Number of samples. If an array is given, it will compute * a safe number of components array-wise. * @param eps Maximum distortion rate as defined by the Johnson-Lindenstrauss lemma. * Will compute array-wise if an array is given. * @return */ public static List<Integer> johnsonLindenstraussMinDim(int[] n, double... eps){ Boolean basicCheck = n == null || n.length == 0 || eps == null || eps.length == 0; if (basicCheck) throw new IllegalArgumentException("Johnson-Lindenstrauss dimension estimation requires > 0 components and at least a relative error"); for (double epsilon: eps){ if (epsilon <= 0 || epsilon >= 1) { throw new IllegalArgumentException("A relative error should be in ]0, 1["); } } List<Integer> res = new ArrayList(n.length * eps.length); for (double epsilon : eps){ double denom = (Math.pow(epsilon, 2) / 2 - Math.pow(epsilon, 3) / 3); for (int components: n){ res.add((int) (4 * Math.log(components) / denom)); } } return res; } public static List<Long> johnsonLindenstraussMinDim(long[] n, double... eps){ Boolean basicCheck = n == null || n.length == 0 || eps == null || eps.length == 0; if (basicCheck) throw new IllegalArgumentException("Johnson-Lindenstrauss dimension estimation requires > 0 components and at least a relative error"); for (double epsilon: eps){ if (epsilon <= 0 || epsilon >= 1) { throw new IllegalArgumentException("A relative error should be in ]0, 1["); } } List<Long> res = new ArrayList(n.length * eps.length); for (double epsilon : eps){ double denom = (Math.pow(epsilon, 2) / 2 - Math.pow(epsilon, 3) / 3); for (long components: n){ res.add((long) (4 * Math.log(components) / denom)); } } return res; } public static List<Integer> johnsonLindenStraussMinDim(int n, double... eps){ return johnsonLindenstraussMinDim(new int[]{n}, eps); } public static List<Long> johnsonLindenStraussMinDim(long n, double... eps){ return johnsonLindenstraussMinDim(new long[]{n}, eps); } /** * Generate a dense Gaussian random matrix. * * The n' components of the random matrix are drawn from * N(0, 1.0 / n'). * * @param shape * @param rng * @return */ private INDArray gaussianRandomMatrix(long[] shape, Random rng){ Nd4j.checkShapeValues(shape); INDArray res = Nd4j.create(shape); GaussianDistribution op1 = new GaussianDistribution(res, 0.0, 1.0 / Math.sqrt(shape[0])); Nd4j.getExecutioner().exec(op1, rng); return res; } private long[] projectionMatrixShape; private INDArray _projectionMatrix; private INDArray getProjectionMatrix(long[] shape, Random rng){ if (! Arrays.equals(projectionMatrixShape, shape) || _projectionMatrix == null) _projectionMatrix = gaussianRandomMatrix(shape, rng); return _projectionMatrix; } /** * * Compute the target shape of the projection matrix * @param shape the shape of the data tensor * @param eps the relative error used in the Johnson-Lindenstrauss estimation * @param auto whether to use JL estimation for user specification * @param targetDimension the target size for the * */ private static int[] targetShape(int[] shape, double eps, int targetDimension, boolean auto){ int components = targetDimension; if (auto) components = johnsonLindenStraussMinDim(shape[0], eps).get(0); // JL or user spec edge cases if (auto && (components <= 0 || components > shape[1])){ throw new ND4JIllegalStateException(String.format("Estimation led to a target dimension of %d, which is invalid", components)); } return new int[]{ shape[1], components}; } private static long[] targetShape(long[] shape, double eps, int targetDimension, boolean auto){ long components = targetDimension; if (auto) components = johnsonLindenStraussMinDim(shape[0], eps).get(0); // JL or user spec edge cases if (auto && (components <= 0 || components > shape[1])){ throw new ND4JIllegalStateException(String.format("Estimation led to a target dimension of %d, which is invalid", components)); } return new long[]{ shape[1], components}; } /** * Compute the target shape of a suitable projection matrix * @param X the Data tensor * @param eps the relative error used in the Johnson-Lindenstrauss estimation * @return the shape of the projection matrix to use */ public static long[] targetShape(INDArray X, double eps) { return targetShape(X.shape(), eps, -1, true); } /** * Compute the target shape of a suitable projection matrix * @param X the Data Tensor * @param targetDimension a desired dimension * @return the shape of the projection matrix to use */ protected static long[] targetShape(INDArray X, int targetDimension) { return targetShape(X.shape(), -1, targetDimension, false); } /** * Create a copy random projection by using matrix product with a random matrix * @param data * @return the projected matrix */ public INDArray project(INDArray data){ long[] tShape = targetShape(data.shape(), eps, components, autoMode); return data.mmul(getProjectionMatrix(tShape, this.rng)); } /** * Create a copy random projection by using matrix product with a random matrix * * @param data * @param result a placeholder result * @return */ public INDArray project(INDArray data, INDArray result){ long[] tShape = targetShape(data.shape(), eps, components, autoMode); return data.mmuli(getProjectionMatrix(tShape, this.rng), result); } /** * Create an in-place random projection by using in-place matrix product with a random matrix * @param data * @return the projected matrix */ public INDArray projecti(INDArray data){ long[] tShape = targetShape(data.shape(), eps, components, autoMode); return data.mmuli(getProjectionMatrix(tShape, this.rng)); } /** * Create an in-place random projection by using in-place matrix product with a random matrix * * @param data * @param result a placeholder result * @return */ public INDArray projecti(INDArray data, INDArray result){ long[] tShape = targetShape(data.shape(), eps, components, autoMode); return data.mmuli(getProjectionMatrix(tShape, this.rng), result); } }
deeplearning4j/deeplearning4j
nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/dimensionalityreduction/RandomProjection.java
213,972
/* * Copyright 2002-2016 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.aopalliance.aop; /** * Tag interface for Advice. Implementations can be any type * of advice, such as Interceptors. * * @author Rod Johnson * @version $Id: Advice.java,v 1.1 2004/03/19 17:02:16 johnsonr Exp $ */ public interface Advice { }
spring-projects/spring-framework
spring-aop/src/main/java/org/aopalliance/aop/Advice.java
213,973
/** * Min cost max flow implementation using Johnson's algorithm (initial Bellman- Ford + subsequent * Dijkstra runs) as a method of finding augmenting paths. * * <p>Tested against: - https://open.kattis.com/problems/mincostmaxflow - * https://open.kattis.com/problems/jobpostings * * <p>Time Complexity: O(E²Vlog(V)) * * @author William Fiset, [email protected] */ package com.williamfiset.algorithms.graphtheory.networkflow; import static java.lang.Math.min; import java.util.*; public class MinCostMaxFlowJohnsons extends NetworkFlowSolverBase { /** * Creates an instance of a flow network solver. Use the {@link NetworkFlowSolverBase#addEdge} * method to add edges to the graph. * * @param n - The number of nodes in the graph including source and sink nodes. * @param s - The index of the source node, 0 <= s < n * @param t - The index of the sink node, 0 <= t < n, t != s */ public MinCostMaxFlowJohnsons(int n, int s, int t) { super(n, s, t); } private void init() { long[] dist = new long[n]; Arrays.fill(dist, INF); dist[s] = 0; // Run Bellman-Ford algorithm to get the optimal distance to each node, O(VE) for (int i = 0; i < n - 1; i++) for (List<Edge> edges : graph) for (Edge edge : edges) if (edge.remainingCapacity() > 0 && dist[edge.from] + edge.cost < dist[edge.to]) dist[edge.to] = dist[edge.from] + edge.cost; adjustEdgeCosts(dist); } // Adjust edge costs to be non-negative for Dijkstra's algorithm, O(E) private void adjustEdgeCosts(long[] dist) { for (int from = 0; from < n; from++) { for (Edge edge : graph[from]) { if (edge.remainingCapacity() > 0) { edge.cost += dist[from] - dist[edge.to]; } else { edge.cost = 0; } } } } @Override public void solve() { init(); // Sum up the bottlenecks on each augmenting path to find the max flow and min cost. List<Edge> path; while ((path = getAugmentingPath()).size() != 0) { // Find bottle neck edge value along path. long bottleNeck = Long.MAX_VALUE; for (Edge edge : path) bottleNeck = min(bottleNeck, edge.remainingCapacity()); // Retrace path while augmenting the flow for (Edge edge : path) { edge.augment(bottleNeck); minCost += bottleNeck * edge.originalCost; } maxFlow += bottleNeck; } } // Finds an augmenting path from the source node to the sink using Johnson's // shortest path algorithm. First, Bellman-Ford was ran to get the shortest // path from the source to every node, and then the graph was cost adjusted // to remove negative edge weights so that Dijkstra's can be used in // subsequent runs for improved time complexity. private List<Edge> getAugmentingPath() { class Node implements Comparable<Node> { int id; long value; public Node(int id, long value) { this.id = id; this.value = value; } @Override public int compareTo(Node other) { return (int) (value - other.value); } } long[] dist = new long[n]; Arrays.fill(dist, INF); dist[s] = 0; markAllNodesAsUnvisited(); Edge[] prev = new Edge[n]; PriorityQueue<Node> pq = new PriorityQueue<>(); pq.offer(new Node(s, 0)); // Run Dijkstra's to find augmenting path. while (!pq.isEmpty()) { Node node = pq.poll(); visit(node.id); if (dist[node.id] < node.value) continue; List<Edge> edges = graph[node.id]; for (int i = 0; i < edges.size(); i++) { Edge edge = edges.get(i); if (visited(edge.to)) continue; long newDist = dist[edge.from] + edge.cost; if (edge.remainingCapacity() > 0 && newDist < dist[edge.to]) { prev[edge.to] = edge; dist[edge.to] = newDist; pq.offer(new Node(edge.to, dist[edge.to])); } } } LinkedList<Edge> path = new LinkedList<>(); if (dist[t] == INF) return path; adjustEdgeCosts(dist); for (Edge edge = prev[t]; edge != null; edge = prev[edge.from]) path.addFirst(edge); return path; } }
williamfiset/Algorithms
src/main/java/com/williamfiset/algorithms/graphtheory/networkflow/MinCostMaxFlowJohnsons.java
213,975
package com.hbm.config; import net.minecraftforge.common.config.Configuration; public class MobConfig { public static boolean enableMaskman = true; public static int maskmanDelay = 60 * 60 * 60; public static int maskmanChance = 3; public static int maskmanMinRad = 50; public static boolean maskmanUnderground = true; public static boolean enableRaids = false; public static int raidDelay = 30 * 60 * 60; public static int raidChance = 3; public static int raidAmount = 15; public static int raidDrones = 5; public static int raidAttackDelay = 40; public static int raidAttackReach = 2; public static int raidAttackDistance = 32; public static boolean enableElementals = true; public static int elementalDelay = 30 * 60 * 60; public static int elementalChance = 2; public static int elementalAmount = 10; public static int elementalDistance = 32; public static boolean enableDucks = true; public static boolean enableMobGear = true; public static boolean enableHives = true; public static int hiveSpawn = 256; public static double scoutThreshold = 5; public static int scoutSwarmSpawnChance = 2; public static boolean waypointDebug = false; public static int largeHiveChance = 5; public static int largeHiveThreshold = 30; public static int swarmCooldown = 120 * 20; public static int baseSwarmSize = 5; public static double swarmScalingMult = 1.2; public static int sootStep = 50; public static int[] glyphidChance = {50, -40, 0}; public static int[] brawlerChance = {5, 35, 1}; public static int[] bombardierChance = {20, -15, 1}; public static int[] blasterChance = {-15, 40, 5}; public static int[] diggerChance = {-15, 25, 5}; public static int[] behemothChance = {-30, 45, 10}; public static int[] brendaChance = {-50, 60, 20}; public static int[] johnsonChance = {-50, 60, 50}; public static double spawnMax = 50; public static boolean enableInfestation = true; public static double baseInfestChance = 5; public static double targetingThreshold = 1; public static boolean rampantMode = false; public static boolean rampantNaturalScoutSpawn = false; public static double rampantScoutSpawnThresh = 14; public static int rampantScoutSpawnChance = 1400; public static boolean scoutInitialSpawn = false; public static boolean rampantExtendedTargetting = false; public static boolean rampantDig = false; public static boolean rampantGlyphidGuidance = false; public static double rampantSmokeStackOverride = 0.4; public static double pollutionMult = 3; public static void loadFromConfig(Configuration config) { final String CATEGORY = CommonConfig.CATEGORY_MOBS; enableMaskman = CommonConfig.createConfigBool(config, CATEGORY, "12.M00_enableMaskman", "Whether mask man should spawn", true); maskmanDelay = CommonConfig.createConfigInt(config, CATEGORY, "12.M01_maskmanDelay", "How many world ticks need to pass for a check to be performed", 60 * 60 * 60); maskmanChance = CommonConfig.createConfigInt(config, CATEGORY, "12.M02_maskmanChance", "1:x chance to spawn mask man, must be at least 1", 3); maskmanMinRad = CommonConfig.createConfigInt(config, CATEGORY, "12.M03_maskmanMinRad", "The amount of radiation needed for mask man to spawn", 50); maskmanUnderground = CommonConfig.createConfigBool(config, CATEGORY, "12.M04_maskmanUnderound", "Whether players need to be underground for mask man to spawn", true); enableRaids = CommonConfig.createConfigBool(config, CATEGORY, "12.F00_enableFBIRaids", "Whether there should be FBI raids", false); raidDelay = CommonConfig.createConfigInt(config, CATEGORY, "12.F01_raidDelay", "How many world ticks need to pass for a check to be performed", 30 * 60 * 60); raidChance = CommonConfig.createConfigInt(config, CATEGORY, "12.F02_raidChance", "1:x chance to spawn a raid, must be at least 1", 3); raidAmount = CommonConfig.createConfigInt(config, CATEGORY, "12.F03_raidAmount", "How many FBI agents are spawned each raid", 15); raidAttackDelay = CommonConfig.createConfigInt(config, CATEGORY, "12.F04_raidAttackDelay", "Time between individual attempts to break machines", 40); raidAttackReach = CommonConfig.createConfigInt(config, CATEGORY, "12.F05_raidAttackReach", "How far away machines can be broken", 2); raidAttackDistance = CommonConfig.createConfigInt(config, CATEGORY, "12.F06_raidAttackDistance", "How far away agents will spawn from the targeted player", 32); raidDrones = CommonConfig.createConfigInt(config, CATEGORY, "12.F07_raidDrones", "How many quadcopter drones are spawned each raid", 5); enableElementals = CommonConfig.createConfigBool(config, CATEGORY, "12.E00_enableMeltdownElementals", "Whether there should be radiation elementals", true); elementalDelay = CommonConfig.createConfigInt(config, CATEGORY, "12.E01_elementalDelay", "How many world ticks need to pass for a check to be performed", 30 * 60 * 60); elementalChance = CommonConfig.createConfigInt(config, CATEGORY, "12.E02_elementalChance", "1:x chance to spawn elementals, must be at least 1", 2); elementalAmount = CommonConfig.createConfigInt(config, CATEGORY, "12.E03_elementalAmount", "How many elementals are spawned each raid", 10); elementalDistance = CommonConfig.createConfigInt(config, CATEGORY, "12.E04_elementalAttackDistance", "How far away elementals will spawn from the targeted player", 32); enableDucks = CommonConfig.createConfigBool(config, CATEGORY, "12.D00_enableDucks", "Whether pressing O should allow the player to duck", true); enableMobGear = CommonConfig.createConfigBool(config, CATEGORY, "12.D01_enableMobGear", "Whether zombies and skeletons should have additional gear when spawning", true); enableHives = CommonConfig.createConfigBool(config, CATEGORY, "12.G00_enableHives", "Whether glyphid hives should spawn", true); hiveSpawn = CommonConfig.createConfigInt(config, CATEGORY, "12.G01_hiveSpawn", "The average amount of chunks per hive", 256); scoutThreshold = CommonConfig.createConfigDouble(config, CATEGORY, "12.G02_scoutThreshold", "Minimum amount of soot for scouts to spawn", 1); spawnMax = CommonConfig.createConfigDouble(config, CATEGORY, "12.G07_spawnMax", "Maximum amount of glyphids being able to exist at once through natural spawning", 50); targetingThreshold = CommonConfig.createConfigDouble(config, CATEGORY, "12.G08_targetingThreshold", "Minimum amount of soot required for glyphids' extended targeting range to activate", 1D); scoutSwarmSpawnChance = CommonConfig.createConfigInt(config, CATEGORY,"12.G10_scoutSwarmSpawn", "How likely are scouts to spawn in swarms, 1 in x chance format", 3); largeHiveChance = CommonConfig.createConfigInt(config, CATEGORY,"12.G11_largeHiveChance", "The chance for a large hive to spawn, formula: 1/x", 5); largeHiveThreshold = CommonConfig.createConfigInt(config, CATEGORY,"12.G12_largeHiveThreshold", "The soot threshold for a large hive to spawn", 20); waypointDebug = CommonConfig.createConfigBool(config, CATEGORY,"12.G13_waypointDebug", "Allows glyphid waypoints to be seen, mainly used for debugging, also useful as an aid against them", false); //Infested structures enableInfestation= CommonConfig.createConfigBool(config, CATEGORY, "12.I01_enableInfestation", "Whether structures infested with glyphids should spawn", true); baseInfestChance = CommonConfig.createConfigDouble(config, CATEGORY, "12.I02_baseInfestChance", "The chance for infested structures to spawn", 5); //Glyphid spawn stuff config.addCustomCategoryComment(CATEGORY, "General Glyphid spawn logic configuration\n" + "\n" + "The first number is the base chance which applies at 0 soot,\n" + "the second number is the modifier that applies with soot based on the formular below,\n" + "the third number is a hard minimum of soot for this type to spawn.\n" + "Negative base chances mean that glyphids won't spawn outright, negative modifiers mean that the type becomes less likely with higher soot.\n" + "The formula for glyphid spawning chance is: (base chance + (modifier - modifier / max( (soot + 1)/3, 3 )))\n" + "The formula for glyphid swarm scaling is: (baseSwarmSize * Math.max(swarmScalingMult * soot/sootStep, 1))"); baseSwarmSize = CommonConfig.createConfigInt(config, CATEGORY, "12.GS01_baseSwarmSize", "The basic, soot-less swarm size", 5); swarmScalingMult = CommonConfig.createConfigDouble(config, CATEGORY, "12.GS02_swarmScalingMult", "By how much should swarm size scale by per soot amount determined below", 1.2); sootStep = CommonConfig.createConfigInt(config, CATEGORY, "12.GS03_sootStep", "The soot amount the above multiplier applies to the swarm size", 50); swarmCooldown = CommonConfig.createConfigInt(config, CATEGORY, "12.GS04_swarmCooldown", "How often do glyphid swarms spawn, in seconds", 120) * 20; glyphidChance = CommonConfig.createConfigIntList(config, CATEGORY, "12.GC01_glyphidChance", "Base Spawn chance and soot modifier for a glyphid grunt", new int[]{50, -45, 0}); brawlerChance = CommonConfig.createConfigIntList(config, CATEGORY, "12.GC02_brawlerChance", "Base Spawn chance and soot modifier for a glyphid brawler", new int[]{10, 30, 1}); bombardierChance = CommonConfig.createConfigIntList(config, CATEGORY, "12.GC03_bombardierChance", "Base Spawn chance and soot modifier for a glyphid bombardier", new int[]{20, -15, 1}); blasterChance = CommonConfig.createConfigIntList(config, CATEGORY, "12.GC04_blasterChance", "Base Spawn chance and soot modifier for a glyphid blaster", new int[]{-5, 40, 5}); diggerChance = CommonConfig.createConfigIntList(config, CATEGORY, "12.GC05_diggerChance", "Base Spawn chance and soot modifier for a glyphid digger", new int[]{-15, 25, 5}); behemothChance = CommonConfig.createConfigIntList(config, CATEGORY, "12.GC06_behemothChance", "Base Spawn chance and soot modifier for a glyphid behemoth", new int[]{-30, 45, 10}); brendaChance = CommonConfig.createConfigIntList(config, CATEGORY, "12.GC07_brendaChance", "Base Spawn chance and soot modifier for a glyphid brenda", new int[]{-50, 60, 20}); johnsonChance = CommonConfig.createConfigIntList(config, CATEGORY, "12.GC08_johnsonChance", "Base Spawn chance and soot modifier for Big Man Johnson", new int[]{-50, 60, 50}); String rampantDesc = "Rampant Mode changes glyphid behavior and spawning to be more aggressive, changes include:\n" + "\n" + "Glyphid Scouts will naturally spawn alongside normal mobs if soot levels are above a certain threshold\n" + "Glyphids will always have the extended targetting enabled\n" + "Glyphids can dig to waypoints\n" + "The Glyphids will expand always toward your base\n" + "Scouts will spawn from the start, making glyphids start expanding off the bat\n" + "Smokestacks have reduced efficiency, only reducing soot by 40%\n"; config.addCustomCategoryComment(CATEGORY,rampantDesc); rampantMode = CommonConfig.createConfigBool(config, CATEGORY, "12.R01_rampantMode", "The main rampant mode toggle, enables all other features associated with it", false); config.addCustomCategoryComment(CATEGORY, "The individual features of rampant can be used regardless of whether the main rampant toggle is enabled or not"); rampantNaturalScoutSpawn = CommonConfig.createConfigBool(config, CATEGORY,"12.R02_rampantScoutSpawn", "Whether scouts should spawn natually in highly polluted chunks", false); rampantScoutSpawnThresh = CommonConfig.createConfigDouble(config, CATEGORY, "12.R02.1_rampantScoutSpawnThresh", "How much soot is needed for scouts to naturally spawn", 13); rampantScoutSpawnChance = CommonConfig.createConfigInt(config, CATEGORY, "12.R02.2_rampantScoutSpawnChance", "How often scouts naturally spawn per mob population, 1/x format, the bigger the number, the more uncommon the scouts", 1400); rampantExtendedTargetting = CommonConfig.createConfigBool(config, CATEGORY,"12.R03_rampantExtendedTargeting", "Whether Glyphids should have the extended targetting always enabled", false); rampantDig = CommonConfig.createConfigBool(config, CATEGORY,"12.R04_rampantDig", "Whether Glyphids should be able to dig to waypoints", false); rampantGlyphidGuidance = CommonConfig.createConfigBool(config, CATEGORY,"12.R05_rampantGlyphidGuidance", "Whether Glyphids should always expand toward a player's spawnpoint", false); rampantSmokeStackOverride = CommonConfig.createConfigDouble(config, CATEGORY, "12.R06_rampantSmokeStackOverride", "How much should the smokestack multiply soot by when on rampant mode", 0.4); scoutInitialSpawn = CommonConfig.createConfigBool(config, CATEGORY,"12.R07_scoutInitialSpawn", "Whether glyphid scouts should be able to spawn on the first swarm of a hive, causes glyphids to expand significantly faster", false); pollutionMult = CommonConfig.createConfigDouble(config, CATEGORY, "12.R08_pollutionMult", "A multiplier for soot emitted, whether you want to increase or decrease it", 1); if(rampantMode){ rampantNaturalScoutSpawn = true; rampantExtendedTargetting = true; rampantDig = true; rampantGlyphidGuidance = true; scoutSwarmSpawnChance = 1; scoutThreshold = 0.1; if(pollutionMult == 1) { pollutionMult = 3; } if (bombardierChance[2] == 1){ bombardierChance[2] = 0; } RadiationConfig.sootFogThreshold *= pollutionMult; } } }
HbmMods/Hbm-s-Nuclear-Tech-GIT
src/main/java/com/hbm/config/MobConfig.java
213,978
/* * (C) Copyright 2017-2023, by Dimitrios Michail and Contributors. * * JGraphT : a free Java graph-theory library * * See the CONTRIBUTORS.md file distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0, or the * GNU Lesser General Public License v2.1 or later * which is available at * http://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html. * * SPDX-License-Identifier: EPL-2.0 OR LGPL-2.1-or-later */ package org.jgrapht.alg.shortestpath; import org.jgrapht.*; import org.jgrapht.alg.util.*; import org.jgrapht.graph.*; import org.jgrapht.graph.builder.*; import org.jgrapht.util.*; import java.util.*; /** * Johnson's all pairs shortest paths algorithm. * * <p> * Finds the shortest paths between all pairs of vertices in a sparse graph. Edge weights can be * negative, but no negative-weight cycles may exist. It first executes the Bellman-Ford algorithm * to compute a transformation of the input graph that removes all negative weights, allowing * Dijkstra's algorithm to be used on the transformed graph. * * <p> * Running time is $O(n m + n^2 \log n)$. * * <p> * Since Johnson's algorithm creates additional vertices, this implementation requires the user to * provide a graph which is initialized with a vertex supplier. * * <p> * In case the algorithm detects a negative weight cycle it will throw an exception of type * {@link NegativeCycleDetectedException} which will contain the detected negative weight cycle. * * @param <V> the graph vertex type * @param <E> the graph edge type * * @author Dimitrios Michail */ public class JohnsonShortestPaths<V, E> extends BaseShortestPathAlgorithm<V, E> { private double[][] distance; private E[][] pred; private Map<V, Integer> vertexIndices; private final Comparator<Double> comparator; /** * Construct a new instance. * * @param graph the input graph */ public JohnsonShortestPaths(Graph<V, E> graph) { this(graph, ToleranceDoubleComparator.DEFAULT_EPSILON); } /** * Construct a new instance. * * @param graph the input graph * @param epsilon tolerance when comparing floating point values */ public JohnsonShortestPaths(Graph<V, E> graph, double epsilon) { super(graph); this.comparator = new ToleranceDoubleComparator(epsilon); } /** * {@inheritDoc} * * @throws IllegalArgumentException in case the provided vertex factory creates vertices which * are already in the original graph * @throws NegativeCycleDetectedException in case a negative weight cycle is detected */ @Override public GraphPath<V, E> getPath(V source, V sink) { if (!graph.containsVertex(source)) { throw new IllegalArgumentException(GRAPH_MUST_CONTAIN_THE_SOURCE_VERTEX); } if (!graph.containsVertex(sink)) { throw new IllegalArgumentException(GRAPH_MUST_CONTAIN_THE_SINK_VERTEX); } run(); if (source.equals(sink)) { return GraphWalk.singletonWalk(graph, source, 0d); } int vSource = vertexIndices.get(source); int vSink = vertexIndices.get(sink); V cur = sink; E e = pred[vSource][vSink]; if (e == null) { return null; } LinkedList<E> edgeList = new LinkedList<>(); while (e != null) { edgeList.addFirst(e); cur = Graphs.getOppositeVertex(graph, e, cur); e = pred[vSource][vertexIndices.get(cur)]; } return new GraphWalk<>(graph, source, sink, null, edgeList, distance[vSource][vSink]); } /** * {@inheritDoc} * * @throws IllegalArgumentException in case the provided vertex factory creates vertices which * are already in the original graph */ @Override public double getPathWeight(V source, V sink) { if (!graph.containsVertex(source)) { throw new IllegalArgumentException(GRAPH_MUST_CONTAIN_THE_SOURCE_VERTEX); } if (!graph.containsVertex(sink)) { throw new IllegalArgumentException(GRAPH_MUST_CONTAIN_THE_SINK_VERTEX); } run(); return distance[vertexIndices.get(source)][vertexIndices.get(sink)]; } /** * {@inheritDoc} * * @throws IllegalArgumentException in case the provided vertex factory creates vertices which * are already in the original graph * @throws NegativeCycleDetectedException in case a negative weight cycle is detected */ @Override public SingleSourcePaths<V, E> getPaths(V source) { run(); return new JohnsonSingleSourcePaths(source); } /** * Executes the actual algorithm. */ private void run() { if (pred != null) { return; } GraphTests.requireDirectedOrUndirected(graph); E detectedNegativeEdge = null; for (E e : graph.edgeSet()) { if (comparator.compare(graph.getEdgeWeight(e), 0.0) < 0) { detectedNegativeEdge = e; break; } } if (detectedNegativeEdge != null) { if (graph.getType().isUndirected()) { V source = graph.getEdgeSource(detectedNegativeEdge); double weight = graph.getEdgeWeight(detectedNegativeEdge); GraphWalk<V, E> cycle = new GraphWalk<>( graph, source, source, Arrays.asList(detectedNegativeEdge, detectedNegativeEdge), 2d * weight); throw new NegativeCycleDetectedException( GRAPH_CONTAINS_A_NEGATIVE_WEIGHT_CYCLE, cycle); } runWithNegativeEdgeWeights(graph); } else { runWithPositiveEdgeWeights(graph); } } /** * Graph has no edges with negative weights. Only perform the last step of Johnson's algorithm: * run Dijkstra's algorithm from every vertex. * * @param g the input graph */ private void runWithPositiveEdgeWeights(Graph<V, E> g) { /* * Create vertex numbering for array representation of results. */ vertexIndices = computeVertexIndices(g); final int n = g.vertexSet().size(); distance = new double[n][n]; pred = TypeUtil.uncheckedCast(new Object[n][n]); /* * Execute Dijkstra multiple times */ for (V v : g.vertexSet()) { DijkstraClosestFirstIterator<V, E> it = new DijkstraClosestFirstIterator<>(g, v, Double.POSITIVE_INFINITY); while (it.hasNext()) { it.next(); } Map<V, Pair<Double, E>> distanceAndPredecessorMap = it.getDistanceAndPredecessorMap(); // transform result for (V u : g.vertexSet()) { Pair<Double, E> pair = distanceAndPredecessorMap .getOrDefault(u, Pair.of(Double.POSITIVE_INFINITY, null)); distance[vertexIndices.get(v)][vertexIndices.get(u)] = pair.getFirst(); pred[vertexIndices.get(v)][vertexIndices.get(u)] = pair.getSecond(); } } } /** * Graph contains edges with negative weights. Transform the input graph, thereby ensuring that * there are no edges with negative weights. Then run Dijkstra's algorithm for all vertices. * * @param g the input graph */ private void runWithNegativeEdgeWeights(Graph<V, E> g) { /* * Compute vertex weights using Bellman-Ford */ Map<V, Double> vertexWeights = computeVertexWeights(g); /* * Compute new non-negative edge weights */ Map<E, Double> newEdgeWeights = new HashMap<>(); for (E e : g.edgeSet()) { V u = g.getEdgeSource(e); V v = g.getEdgeTarget(e); double weight = g.getEdgeWeight(e); newEdgeWeights.put(e, weight + vertexWeights.get(u) - vertexWeights.get(v)); } /* * Create graph with new edge weights */ Graph<V, E> newEdgeWeightsGraph = new AsWeightedGraph<>(g, newEdgeWeights); /* * Create vertex numbering, for array representation of results */ vertexIndices = computeVertexIndices(g); final int n = g.vertexSet().size(); distance = new double[n][n]; pred = TypeUtil.uncheckedCast(new Object[n][n]); /* * Run Dijkstra using new weights for all vertices */ for (V v : g.vertexSet()) { DijkstraClosestFirstIterator<V, E> it = new DijkstraClosestFirstIterator<>( newEdgeWeightsGraph, v, Double.POSITIVE_INFINITY); while (it.hasNext()) { it.next(); } Map<V, Pair<Double, E>> distanceAndPredecessorMap = it.getDistanceAndPredecessorMap(); // transform distances to original weights for (V u : g.vertexSet()) { Pair<Double, E> oldPair = distanceAndPredecessorMap.get(u); Pair<Double, E> newPair; if (oldPair != null) { newPair = Pair.of( oldPair.getFirst() - vertexWeights.get(v) + vertexWeights.get(u), oldPair.getSecond()); } else { newPair = Pair.of(Double.POSITIVE_INFINITY, null); } distance[vertexIndices.get(v)][vertexIndices.get(u)] = newPair.getFirst(); pred[vertexIndices.get(v)][vertexIndices.get(u)] = newPair.getSecond(); } } } /** * Compute vertex weights for edge re-weighting using Bellman-Ford. * * @param g the input graph * @return the vertex weights */ private Map<V, Double> computeVertexWeights(Graph<V, E> g) { assert g.getType().isDirected(); // create extra graph Graph<V, E> extraGraph = GraphTypeBuilder .<V, E> directed().allowingMultipleEdges(true).allowingSelfLoops(true) .edgeSupplier(graph.getEdgeSupplier()).vertexSupplier(graph.getVertexSupplier()) .buildGraph(); // add new vertex V s = extraGraph.addVertex(); if (s == null) { throw new IllegalArgumentException( "Invalid vertex supplier (does not return unique vertices on each call)."); } // add new edges with zero weight Map<E, Double> zeroWeightFunction = new HashMap<>(); for (V v : g.vertexSet()) { extraGraph.addVertex(v); zeroWeightFunction.put(extraGraph.addEdge(s, v), 0d); } /* * Union extra and input graph */ Graph<V, E> unionGraph = new AsGraphUnion<>(new AsWeightedGraph<>(extraGraph, zeroWeightFunction), g); /* * Run Bellman-Ford from new vertex */ SingleSourcePaths<V, E> paths = new BellmanFordShortestPath<>(unionGraph).getPaths(s); Map<V, Double> weights = new HashMap<>(); for (V v : g.vertexSet()) { weights.put(v, paths.getWeight(v)); } return weights; } /** * Compute a unique integer for each vertex of the graph * * @param g the graph * @return a map with the result */ private Map<V, Integer> computeVertexIndices(Graph<V, E> g) { Map<V, Integer> numbering = new HashMap<>(); int num = 0; for (V v : g.vertexSet()) { numbering.put(v, num++); } return numbering; } class JohnsonSingleSourcePaths implements SingleSourcePaths<V, E> { private V source; public JohnsonSingleSourcePaths(V source) { this.source = source; } @Override public Graph<V, E> getGraph() { return graph; } @Override public V getSourceVertex() { return source; } @Override public double getWeight(V sink) { return JohnsonShortestPaths.this.getPathWeight(source, sink); } @Override public GraphPath<V, E> getPath(V sink) { return JohnsonShortestPaths.this.getPath(source, sink); } } }
d-michail/jgrapht
jgrapht-core/src/main/java/org/jgrapht/alg/shortestpath/JohnsonShortestPaths.java
213,979
// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE package org.bytedeco.bullet.BulletCollision; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; import static org.bytedeco.javacpp.presets.javacpp.*; import org.bytedeco.bullet.LinearMath.*; import static org.bytedeco.bullet.global.LinearMath.*; import static org.bytedeco.bullet.global.BulletCollision.*; // #ifdef NO_VIRTUAL_INTERFACE // #else /** btSimplexSolverInterface can incrementally calculate distance between origin and up to 4 vertices * Used by GJK or Linear Casting. Can be implemented by the Johnson-algorithm or alternative approaches based on * voronoi regions or barycentric coordinates */ @Properties(inherit = org.bytedeco.bullet.presets.BulletCollision.class) public class btSimplexSolverInterface extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public btSimplexSolverInterface(Pointer p) { super(p); } public native void reset(); public native void addVertex(@Const @ByRef btVector3 w, @Const @ByRef btVector3 p, @Const @ByRef btVector3 q); public native @Cast("bool") boolean closest(@ByRef btVector3 v); public native @Cast("btScalar") double maxVertex(); public native @Cast("bool") boolean fullSimplex(); public native int getSimplex(btVector3 pBuf, btVector3 qBuf, btVector3 yBuf); public native @Cast("bool") boolean inSimplex(@Const @ByRef btVector3 w); public native void backup_closest(@ByRef btVector3 v); public native @Cast("bool") boolean emptySimplex(); public native void compute_points(@ByRef btVector3 p1, @ByRef btVector3 p2); public native int numVertices(); }
bytedeco/javacpp-presets
bullet/src/gen/java/org/bytedeco/bullet/BulletCollision/btSimplexSolverInterface.java
213,982
/* * Copyright 2002-2012 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.aop; /** * Interface supplying the information necessary to describe an introduction. * * <p>{@link IntroductionAdvisor IntroductionAdvisors} must implement this * interface. If an {@link org.aopalliance.aop.Advice} implements this, * it may be used as an introduction without an {@link IntroductionAdvisor}. * In this case, the advice is self-describing, providing not only the * necessary behavior, but describing the interfaces it introduces. * * @author Rod Johnson * @since 1.1.1 */ public interface IntroductionInfo { /** * Return the additional interfaces introduced by this Advisor or Advice. * @return the introduced interfaces */ Class<?>[] getInterfaces(); }
spring-projects/spring-framework
spring-aop/src/main/java/org/springframework/aop/IntroductionInfo.java