diff --git a/README.textile b/README.textile index 219de856b3564..95204a8e412ba 100644 --- a/README.textile +++ b/README.textile @@ -207,7 +207,7 @@ h1. License
 This software is licensed under the Apache 2 license, quoted below.
 
-Copyright 2009-2012 Shay Banon and ElasticSearch 
+Copyright 2009-2013 Shay Banon and ElasticSearch 
 
 Licensed under the Apache License, Version 2.0 (the "License"); you may not
 use this file except in compliance with the License. You may obtain a copy of
@@ -220,4 +220,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 License for the specific language governing permissions and limitations under
 the License.
-
\ No newline at end of file + diff --git a/pom.xml b/pom.xml index 88d8402300e7a..eea412d37fe7e 100644 --- a/pom.xml +++ b/pom.xml @@ -30,7 +30,7 @@ - 4.1.0 + 4.2.0 @@ -143,7 +143,7 @@ com.google.guava guava - 14.0 + 14.0.1 compile diff --git a/src/main/assemblies/targz-bin.xml b/src/main/assemblies/targz-bin.xml index 0e3650b5e816b..f587844bb5425 100644 --- a/src/main/assemblies/targz-bin.xml +++ b/src/main/assemblies/targz-bin.xml @@ -27,6 +27,7 @@ lib/sigar *.dll + **winnt** diff --git a/src/main/java/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.java b/src/main/java/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.java new file mode 100644 index 0000000000000..ec8ad4d12f312 --- /dev/null +++ b/src/main/java/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.java @@ -0,0 +1,71 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.lucene.analysis.miscellaneous; + +import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.KeywordAttribute; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; + +import java.io.IOException; + + +/** + * This TokenFilterĀ emits each incoming token twice once as keyword and once non-keyword, in other words once with + * {@link KeywordAttribute#setKeyword(boolean)} set to true and once set to false. + * This is useful if used with a stem filter that respects the {@link KeywordAttribute} to index the stemmed and the + * un-stemmed version of a term into the same field. + */ +//LUCENE MONITOR - this will be included in Lucene 4.3. (it's a plain copy of the lucene version) + +public final class KeywordRepeatFilter extends TokenFilter { + private final KeywordAttribute keywordAttribute = addAttribute(KeywordAttribute.class); + private final PositionIncrementAttribute posIncAttr = addAttribute(PositionIncrementAttribute.class); + private State state; + + /** + * Construct a token stream filtering the given input. + */ + public KeywordRepeatFilter(TokenStream input) { + super(input); + } + + @Override + public boolean incrementToken() throws IOException { + if (state != null) { + restoreState(state); + posIncAttr.setPositionIncrement(0); + keywordAttribute.setKeyword(false); + state = null; + return true; + } + if (input.incrementToken()) { + state = captureState(); + keywordAttribute.setKeyword(true); + return true; + } + return false; + } + + @Override + public void reset() throws IOException { + super.reset(); + state = null; + } +} diff --git a/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java b/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java index 7e1e5eadd305f..e45aa93fd5efb 100644 --- a/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java +++ b/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java @@ -25,7 +25,7 @@ * minimumNumberShouldMatch specification that uses the actual num of high frequent terms * to calculate the minimum matching terms. */ -public class ExtendedCommonTermsQuery extends XCommonTermsQuery { +public class ExtendedCommonTermsQuery extends CommonTermsQuery { public ExtendedCommonTermsQuery(Occur highFreqOccur, Occur lowFreqOccur, float maxTermFrequency, boolean disableCoord) { super(highFreqOccur, lowFreqOccur, maxTermFrequency, disableCoord); @@ -38,7 +38,7 @@ public ExtendedCommonTermsQuery(Occur highFreqOccur, Occur lowFreqOccur, float m private String minNumShouldMatchSpec; @Override - protected int getMinimumNumberShouldMatch(int numOptional) { + protected int calcLowFreqMinimumNumberShouldMatch(int numOptional) { if (minNumShouldMatchSpec == null) { return 0; } diff --git a/src/main/java/org/apache/lucene/queries/XCommonTermsQuery.java b/src/main/java/org/apache/lucene/queries/XCommonTermsQuery.java deleted file mode 100644 index f83649d8021a1..0000000000000 --- a/src/main/java/org/apache/lucene/queries/XCommonTermsQuery.java +++ /dev/null @@ -1,381 +0,0 @@ -package org.apache.lucene.queries; - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; - -import org.apache.lucene.index.AtomicReaderContext; -import org.apache.lucene.index.Fields; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.Term; -import org.apache.lucene.index.TermContext; -import org.apache.lucene.index.Terms; -import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanClause.Occur; -import org.apache.lucene.search.similarities.Similarity; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.util.ToStringUtils; - -/** - * A query that executes high-frequency terms in a optional sub-query to prevent - * slow queries due to "common" terms like stopwords. This query basically - * builds 2 queries off the {@link #add(Term) added} terms where low-frequency - * terms are added to a required boolean clause and high-frequency terms are - * added to an optional boolean clause. The optional clause is only executed if - * the required "low-frequency' clause matches. Scores produced by this query - * will be slightly different to plain {@link BooleanQuery} scorer mainly due to - * differences in the {@link Similarity#coord(int,int) number of leave queries} - * in the required boolean clause. In the most cases high-frequency terms are - * unlikely to significantly contribute to the document score unless at least - * one of the low-frequency terms are matched such that this query can improve - * query execution times significantly if applicable. - *

- * {@link XCommonTermsQuery} has several advantages over stopword filtering at - * index or query time since a term can be "classified" based on the actual - * document frequency in the index and can prevent slow queries even across - * domains without specialized stopword files. - *

- *

- * Note: if the query only contains high-frequency terms the query is - * rewritten into a plain conjunction query ie. all high-frequency terms need to - * match in order to match a document. - *

- */ -//LUCENE MONITOR - Copied from CommonTermsQuery changes are tracked with //CHANGE -public class XCommonTermsQuery extends Query { - /* - * TODO maybe it would make sense to abstract this even further and allow to - * rewrite to dismax rather than boolean. Yet, this can already be subclassed - * to do so. - */ - protected final List terms = new ArrayList(); - protected final boolean disableCoord; - protected final float maxTermFrequency; - protected final Occur lowFreqOccur; - protected final Occur highFreqOccur; - protected float lowFreqBoost = 1.0f; - protected float highFreqBoost = 1.0f; - //CHANGE made minNr... a float for fractions - protected float minNrShouldMatch = 0; - - /** - * Creates a new {@link XCommonTermsQuery} - * - * @param highFreqOccur - * {@link Occur} used for high frequency terms - * @param lowFreqOccur - * {@link Occur} used for low frequency terms - * @param maxTermFrequency - * a value in [0..1] (or absolute number >=1) representing the - * maximum threshold of a terms document frequency to be considered a - * low frequency term. - * @throws IllegalArgumentException - * if {@link Occur#MUST_NOT} is pass as lowFreqOccur or - * highFreqOccur - */ - public XCommonTermsQuery(Occur highFreqOccur, Occur lowFreqOccur, - float maxTermFrequency) { - this(highFreqOccur, lowFreqOccur, maxTermFrequency, false); - } - - /** - * Creates a new {@link XCommonTermsQuery} - * - * @param highFreqOccur - * {@link Occur} used for high frequency terms - * @param lowFreqOccur - * {@link Occur} used for low frequency terms - * @param maxTermFrequency - * a value in [0..1] (or absolute number >=1) representing the - * maximum threshold of a terms document frequency to be considered a - * low frequency term. - * @param disableCoord - * disables {@link Similarity#coord(int,int)} in scoring for the low - * / high frequency sub-queries - * @throws IllegalArgumentException - * if {@link Occur#MUST_NOT} is pass as lowFreqOccur or - * highFreqOccur - */ - public XCommonTermsQuery(Occur highFreqOccur, Occur lowFreqOccur, - float maxTermFrequency, boolean disableCoord) { - if (highFreqOccur == Occur.MUST_NOT) { - throw new IllegalArgumentException( - "highFreqOccur should be MUST or SHOULD but was MUST_NOT"); - } - if (lowFreqOccur == Occur.MUST_NOT) { - throw new IllegalArgumentException( - "lowFreqOccur should be MUST or SHOULD but was MUST_NOT"); - } - this.disableCoord = disableCoord; - this.highFreqOccur = highFreqOccur; - this.lowFreqOccur = lowFreqOccur; - this.maxTermFrequency = maxTermFrequency; - } - - /** - * Adds a term to the {@link CommonTermsQuery} - * - * @param term - * the term to add - */ - public void add(Term term) { - if (term == null) { - throw new IllegalArgumentException("Term must not be null"); - } - this.terms.add(term); - } - - @Override - public Query rewrite(IndexReader reader) throws IOException { - if (this.terms.isEmpty()) { - return new BooleanQuery(); - } else if (this.terms.size() == 1) { - final TermQuery tq = new TermQuery(this.terms.get(0)); - tq.setBoost(getBoost()); - return tq; - } - final List leaves = reader.leaves(); - final int maxDoc = reader.maxDoc(); - final TermContext[] contextArray = new TermContext[terms.size()]; - final Term[] queryTerms = this.terms.toArray(new Term[0]); - collectTermContext(reader, leaves, contextArray, queryTerms); - return buildQuery(maxDoc, contextArray, queryTerms); - } - - //CHANGE added to get num optional - protected int getMinimumNumberShouldMatch(int numOptional) { - if (minNrShouldMatch >= 1.0f) { - return (int) minNrShouldMatch; - } - return (int) (minNrShouldMatch * numOptional); - } - - protected Query buildQuery(final int maxDoc, - final TermContext[] contextArray, final Term[] queryTerms) { - BooleanQuery lowFreq = new BooleanQuery(disableCoord); - BooleanQuery highFreq = new BooleanQuery(disableCoord); - highFreq.setBoost(highFreqBoost); - lowFreq.setBoost(lowFreqBoost); - - BooleanQuery query = new BooleanQuery(true); - - for (int i = 0; i < queryTerms.length; i++) { - TermContext termContext = contextArray[i]; - if (termContext == null) { - lowFreq.add(new TermQuery(queryTerms[i]), lowFreqOccur); - } else { - if ((maxTermFrequency >= 1f && termContext.docFreq() > maxTermFrequency) - || (termContext.docFreq() > (int) Math.ceil(maxTermFrequency - * (float) maxDoc))) { - highFreq - .add(new TermQuery(queryTerms[i], termContext), highFreqOccur); - } else { - lowFreq.add(new TermQuery(queryTerms[i], termContext), lowFreqOccur); - } - } - - } - if (lowFreqOccur == Occur.SHOULD) { - lowFreq.setMinimumNumberShouldMatch(getMinimumNumberShouldMatch(lowFreq.clauses().size())); - } - if (lowFreq.clauses().isEmpty()) { - /* - * if lowFreq is empty we rewrite the high freq terms in a conjunction to - * prevent slow queries. - */ - if (highFreqOccur == Occur.MUST) { - highFreq.setBoost(getBoost()); - return highFreq; - } else { - BooleanQuery highFreqConjunction = new BooleanQuery(); - for (BooleanClause booleanClause : highFreq) { - highFreqConjunction.add(booleanClause.getQuery(), Occur.MUST); - } - highFreqConjunction.setBoost(getBoost()); - return highFreqConjunction; - - } - } else if (highFreq.clauses().isEmpty()) { - // only do low freq terms - we don't have high freq terms - lowFreq.setBoost(getBoost()); - return lowFreq; - } else { - query.add(highFreq, Occur.SHOULD); - query.add(lowFreq, Occur.MUST); - query.setBoost(getBoost()); - return query; - } - } - - public void collectTermContext(IndexReader reader, - List leaves, TermContext[] contextArray, - Term[] queryTerms) throws IOException { - TermsEnum termsEnum = null; - for (AtomicReaderContext context : leaves) { - final Fields fields = context.reader().fields(); - if (fields == null) { - // reader has no fields - continue; - } - for (int i = 0; i < queryTerms.length; i++) { - Term term = queryTerms[i]; - TermContext termContext = contextArray[i]; - final Terms terms = fields.terms(term.field()); - if (terms == null) { - // field does not exist - continue; - } - termsEnum = terms.iterator(termsEnum); - assert termsEnum != null; - - if (termsEnum == TermsEnum.EMPTY) continue; - if (termsEnum.seekExact(term.bytes(), false)) { - if (termContext == null) { - contextArray[i] = new TermContext(reader.getContext(), - termsEnum.termState(), context.ord, termsEnum.docFreq(), - termsEnum.totalTermFreq()); - } else { - termContext.register(termsEnum.termState(), context.ord, - termsEnum.docFreq(), termsEnum.totalTermFreq()); - } - - } - - } - } - } - - /** - * Returns true iff {@link Similarity#coord(int,int)} is disabled in scoring - * for the high and low frequency query instance. The top level query will - * always disable coords. - */ - public boolean isCoordDisabled() { - return disableCoord; - } - - /** - * Specifies a minimum number of the optional BooleanClauses which must be - * satisfied in order to produce a match on the low frequency terms query - * part. - * - *

- * By default no optional clauses are necessary for a match (unless there are - * no required clauses). If this method is used, then the specified number of - * clauses is required. - *

- * - * @param min - * the number of optional clauses that must match - */ - //CHANGE accepts now a float - public void setMinimumNumberShouldMatch(float min) { - this.minNrShouldMatch = min; - } - - /** - * Gets the minimum number of the optional BooleanClauses which must be - * satisfied. - */ - //CHANGE returns now a float - public float getMinimumNumberShouldMatch() { - return minNrShouldMatch; - } - - @Override - public void extractTerms(Set terms) { - terms.addAll(this.terms); - } - - @Override - public String toString(String field) { - StringBuilder buffer = new StringBuilder(); - boolean needParens = (getBoost() != 1.0) - || (getMinimumNumberShouldMatch() > 0); - if (needParens) { - buffer.append("("); - } - for (int i = 0; i < terms.size(); i++) { - Term t = terms.get(i); - buffer.append(new TermQuery(t).toString()); - - if (i != terms.size() - 1) buffer.append(", "); - } - if (needParens) { - buffer.append(")"); - } - if (getMinimumNumberShouldMatch() > 0) { - buffer.append('~'); - buffer.append(getMinimumNumberShouldMatch()); - } - if (getBoost() != 1.0f) { - buffer.append(ToStringUtils.boost(getBoost())); - } - return buffer.toString(); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + (disableCoord ? 1231 : 1237); - result = prime * result + Float.floatToIntBits(highFreqBoost); - result = prime * result - + ((highFreqOccur == null) ? 0 : highFreqOccur.hashCode()); - result = prime * result + Float.floatToIntBits(lowFreqBoost); - result = prime * result - + ((lowFreqOccur == null) ? 0 : lowFreqOccur.hashCode()); - result = prime * result + Float.floatToIntBits(maxTermFrequency); - result = prime * result + Float.floatToIntBits(minNrShouldMatch); - result = prime * result + ((terms == null) ? 0 : terms.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (!super.equals(obj)) return false; - if (getClass() != obj.getClass()) return false; - XCommonTermsQuery other = (XCommonTermsQuery) obj; - if (disableCoord != other.disableCoord) return false; - if (Float.floatToIntBits(highFreqBoost) != Float - .floatToIntBits(other.highFreqBoost)) return false; - if (highFreqOccur != other.highFreqOccur) return false; - if (Float.floatToIntBits(lowFreqBoost) != Float - .floatToIntBits(other.lowFreqBoost)) return false; - if (lowFreqOccur != other.lowFreqOccur) return false; - if (Float.floatToIntBits(maxTermFrequency) != Float - .floatToIntBits(other.maxTermFrequency)) return false; - if (minNrShouldMatch != other.minNrShouldMatch) return false; - if (terms == null) { - if (other.terms != null) return false; - } else if (!terms.equals(other.terms)) return false; - return true; - } - - //CHANGE added - public List terms() { - return this.terms; - } - -} diff --git a/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index be1ad9f03013e..bda22ed935440 100644 --- a/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -129,45 +129,14 @@ public void reset(QueryParserSettings settings) { } /** - * We override this one so we can get the fuzzy part to be treated as string, so people can do: "age:10~5". Note, - * we would love to support also "timestamp:2012-10-10~5d", but sadly the parser expects only numbers after the ~, - * hopefully we can change that in Lucene. + * We override this one so we can get the fuzzy part to be treated as string, so people can do: "age:10~5" or "timestamp:2012-10-10~5d" */ @Override - Query handleBareTokenQuery(String qfield, Token term, Token fuzzySlop, boolean prefix, boolean wildcard, boolean fuzzy, boolean regexp) throws ParseException { - Query q; - - String termImage = discardEscapeChar(term.image); - if (wildcard) { - q = getWildcardQuery(qfield, term.image); - } else if (prefix) { - q = getPrefixQuery(qfield, - discardEscapeChar(term.image.substring - (0, term.image.length() - 1))); - } else if (regexp) { - q = getRegexpQuery(qfield, term.image.substring(1, term.image.length() - 1)); - } else if (fuzzy) { -// float fms = fuzzyMinSim; -// try { -// fms = Float.valueOf(fuzzySlop.image.substring(1)).floatValue(); -// } catch (Exception ignored) { -// } -// if (fms < 0.0f) { -// throw new ParseException("Minimum similarity for a FuzzyQuery has to be between 0.0f and 1.0f !"); -// } else if (fms >= 1.0f && fms != (int) fms) { -// throw new ParseException("Fractional edit distances are not allowed!"); -// } -// q = getFuzzyQuery(qfield, termImage, fms); - if (fuzzySlop.image.length() == 1) { - q = getFuzzyQuery(qfield, termImage, Float.toString(fuzzyMinSim)); - } else { - q = getFuzzyQuery(qfield, termImage, fuzzySlop.image.substring(1)); - } - } else { - - q = getFieldQuery(qfield, termImage, false); + Query handleBareFuzzy(String qfield, Token fuzzySlop, String termImage) throws ParseException { + if (fuzzySlop.image.length() == 1) { + return getFuzzyQuery(qfield, termImage, Float.toString(fuzzyMinSim)); } - return q; + return getFuzzyQuery(qfield, termImage, fuzzySlop.image.substring(1)); } @Override diff --git a/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java b/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java index 2473829025c4a..3cc6c8032f65f 100644 --- a/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java +++ b/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java @@ -96,8 +96,6 @@ void flatten(Query sourceQuery, IndexReader reader, Collection flatQuerie flatten(sourceQuery.rewrite(reader), reader, flatQueries); } else if (sourceQuery instanceof FiltersFunctionScoreQuery) { flatten(((FiltersFunctionScoreQuery) sourceQuery).getSubQuery(), reader, flatQueries); - } else if (sourceQuery instanceof ExtendedCommonTermsQuery) { - flatten(((ExtendedCommonTermsQuery)sourceQuery).rewrite(reader), reader, flatQueries); } else if (sourceQuery instanceof MultiPhraseQuery) { MultiPhraseQuery q = ((MultiPhraseQuery) sourceQuery); convertMultiPhraseQuery(0, new int[q.getTermArrays().size()] , q, q.getTermArrays(), q.getPositions(), reader, flatQueries); diff --git a/src/main/java/org/apache/lucene/store/OpenBufferedIndexOutput.java b/src/main/java/org/apache/lucene/store/OpenBufferedIndexOutput.java deleted file mode 100644 index 8a5d1f9b832ff..0000000000000 --- a/src/main/java/org/apache/lucene/store/OpenBufferedIndexOutput.java +++ /dev/null @@ -1,151 +0,0 @@ -package org.apache.lucene.store; - -import java.io.IOException; - -/** - * Exactly the same as Lucene {@link BufferedIndexOutput} but with the ability to set the buffer size - */ -// LUCENE MONITOR -public abstract class OpenBufferedIndexOutput extends IndexOutput { - - public static final int DEFAULT_BUFFER_SIZE = BufferedIndexOutput.BUFFER_SIZE; - - final int BUFFER_SIZE; - - private final byte[] buffer; - private long bufferStart = 0; // position in file of buffer - private int bufferPosition = 0; // position in buffer - - protected OpenBufferedIndexOutput(int BUFFER_SIZE) { - this.BUFFER_SIZE = BUFFER_SIZE; - this.buffer = new byte[BUFFER_SIZE]; - } - - /** - * Writes a single byte. - * - * @see IndexInput#readByte() - */ - @Override - public void writeByte(byte b) throws IOException { - if (bufferPosition >= BUFFER_SIZE) - flush(); - buffer[bufferPosition++] = b; - } - - /** - * Writes an array of bytes. - * - * @param b the bytes to write - * @param length the number of bytes to write - * @see IndexInput#readBytes(byte[], int, int) - */ - @Override - public void writeBytes(byte[] b, int offset, int length) throws IOException { - int bytesLeft = BUFFER_SIZE - bufferPosition; - // is there enough space in the buffer? - if (bytesLeft >= length) { - // we add the data to the end of the buffer - System.arraycopy(b, offset, buffer, bufferPosition, length); - bufferPosition += length; - // if the buffer is full, flush it - if (BUFFER_SIZE - bufferPosition == 0) - flush(); - } else { - // is data larger then buffer? - if (length > BUFFER_SIZE) { - // we flush the buffer - if (bufferPosition > 0) - flush(); - // and write data at once - flushBuffer(b, offset, length); - bufferStart += length; - } else { - // we fill/flush the buffer (until the input is written) - int pos = 0; // position in the input data - int pieceLength; - while (pos < length) { - pieceLength = (length - pos < bytesLeft) ? length - pos : bytesLeft; - System.arraycopy(b, pos + offset, buffer, bufferPosition, pieceLength); - pos += pieceLength; - bufferPosition += pieceLength; - // if the buffer is full, flush it - bytesLeft = BUFFER_SIZE - bufferPosition; - if (bytesLeft == 0) { - flush(); - bytesLeft = BUFFER_SIZE; - } - } - } - } - } - - /** - * Forces any buffered output to be written. - */ - @Override - public void flush() throws IOException { - flushBuffer(buffer, bufferPosition); - bufferStart += bufferPosition; - bufferPosition = 0; - } - - /** - * Expert: implements buffer write. Writes bytes at the current position in - * the output. - * - * @param b the bytes to write - * @param len the number of bytes to write - */ - private void flushBuffer(byte[] b, int len) throws IOException { - flushBuffer(b, 0, len); - } - - /** - * Expert: implements buffer write. Writes bytes at the current position in - * the output. - * - * @param b the bytes to write - * @param offset the offset in the byte array - * @param len the number of bytes to write - */ - protected abstract void flushBuffer(byte[] b, int offset, int len) throws IOException; - - /** - * Closes this stream to further operations. - */ - @Override - public void close() throws IOException { - flush(); - } - - /** - * Returns the current position in this file, where the next write will - * occur. - * - * @see #seek(long) - */ - @Override - public long getFilePointer() { - return bufferStart + bufferPosition; - } - - /** - * Sets current position in this file, where the next write will occur. - * - * @see #getFilePointer() - */ - @Override - public void seek(long pos) throws IOException { - flush(); - bufferStart = pos; - } - - /** - * The number of bytes in the file. - */ - @Override - public abstract long length() throws IOException; - - -} diff --git a/src/main/java/org/apache/lucene/store/RateLimitedFSDirectory.java b/src/main/java/org/apache/lucene/store/RateLimitedFSDirectory.java index 9d744add44bb7..3e6c74625aebc 100644 --- a/src/main/java/org/apache/lucene/store/RateLimitedFSDirectory.java +++ b/src/main/java/org/apache/lucene/store/RateLimitedFSDirectory.java @@ -1,10 +1,28 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ package org.apache.lucene.store; +import org.apache.lucene.store.IOContext.Context; + import java.io.IOException; import java.util.Collection; -import org.apache.lucene.store.IOContext.Context; - public final class RateLimitedFSDirectory extends Directory { private final FSDirectory delegate; @@ -13,7 +31,7 @@ public final class RateLimitedFSDirectory extends Directory { private final StoreRateLimiting.Listener rateListener; public RateLimitedFSDirectory(FSDirectory wrapped, StoreRateLimiting.Provider rateLimitingProvider, - StoreRateLimiting.Listener rateListener) { + StoreRateLimiting.Listener rateListener) { this.delegate = wrapped; this.rateLimitingProvider = rateLimitingProvider; this.rateListener = rateListener; @@ -142,7 +160,7 @@ static final class RateLimitedIndexOutput extends BufferedIndexOutput { private final StoreRateLimiting.Listener rateListener; RateLimitedIndexOutput(final RateLimiter rateLimiter, final StoreRateLimiting.Listener rateListener, final IndexOutput delegate) { - // TODO should we make buffer size configurable + // TODO if Lucene exposed in BufferedIndexOutput#getBufferSize, we could initialize it if the delegate is buffered if (delegate instanceof BufferedIndexOutput) { bufferedDelegate = (BufferedIndexOutput) delegate; this.delegate = delegate; diff --git a/src/main/java/org/apache/lucene/store/StoreRateLimiting.java b/src/main/java/org/apache/lucene/store/StoreRateLimiting.java index 77cc2942d6b16..96840a94f075e 100644 --- a/src/main/java/org/apache/lucene/store/StoreRateLimiting.java +++ b/src/main/java/org/apache/lucene/store/StoreRateLimiting.java @@ -1,3 +1,21 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ package org.apache.lucene.store; import org.elasticsearch.ElasticSearchIllegalArgumentException; @@ -35,8 +53,8 @@ public static Type fromString(String type) throws ElasticSearchIllegalArgumentEx } } - private final RateLimiter.SimpleRateLimiter rateLimiter = new RateLimiter.SimpleRateLimiter(0); - private volatile RateLimiter.SimpleRateLimiter actualRateLimiter; + private final XSimpleRateLimiter rateLimiter = new XSimpleRateLimiter(0); + private volatile XSimpleRateLimiter actualRateLimiter; private volatile Type type; diff --git a/src/main/java/org/apache/lucene/store/XSimpleRateLimiter.java b/src/main/java/org/apache/lucene/store/XSimpleRateLimiter.java new file mode 100644 index 0000000000000..20a375fe17cfa --- /dev/null +++ b/src/main/java/org/apache/lucene/store/XSimpleRateLimiter.java @@ -0,0 +1,98 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.lucene.store; + +import org.apache.lucene.util.ThreadInterruptedException; + +// LUCENE UPGRADE - this is a copy of a RateLimiter.SimpleRateLimiter fixing bug #2785 Lucene 4.3 should fix that +public final class XSimpleRateLimiter extends RateLimiter { + private volatile double mbPerSec; + private volatile double nsPerByte; + private volatile long lastNS; + + // TODO: we could also allow eg a sub class to dynamically + // determine the allowed rate, eg if an app wants to + // change the allowed rate over time or something + + /** mbPerSec is the MB/sec max IO rate */ + public XSimpleRateLimiter(double mbPerSec) { + setMbPerSec(mbPerSec); + } + + /** + * Sets an updated mb per second rate limit. + */ + @Override + public void setMbPerSec(double mbPerSec) { + this.mbPerSec = mbPerSec; + nsPerByte = 1000000000. / (1024*1024*mbPerSec); + + } + + /** + * The current mb per second rate limit. + */ + @Override + public double getMbPerSec() { + return this.mbPerSec; + } + + /** Pauses, if necessary, to keep the instantaneous IO + * rate at or below the target. NOTE: multiple threads + * may safely use this, however the implementation is + * not perfectly thread safe but likely in practice this + * is harmless (just means in some rare cases the rate + * might exceed the target). It's best to call this + * with a biggish count, not one byte at a time. + * @return the pause time in nano seconds + * */ + @Override + public long pause(long bytes) { + if (bytes == 1) { + return 0; + } + + // TODO: this is purely instantaneous rate; maybe we + // should also offer decayed recent history one? + final long targetNS = lastNS = lastNS + ((long) (bytes * nsPerByte)); + final long startNs; + long curNS = startNs = System.nanoTime(); + if (lastNS < curNS) { + lastNS = curNS; + } + + // While loop because Thread.sleep doesn't always sleep + // enough: + while(true) { + final long pauseNS = targetNS - curNS; + if (pauseNS > 0) { + try { + Thread.sleep((int) (pauseNS/1000000), (int) (pauseNS % 1000000)); + } catch (InterruptedException ie) { + throw new ThreadInterruptedException(ie); + } + curNS = System.nanoTime(); + continue; + } + break; + } + + return curNS - startNs; + } + } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/action/ActionModule.java b/src/main/java/org/elasticsearch/action/ActionModule.java index e723f8fdededf..2a6cfbdff4f38 100644 --- a/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/src/main/java/org/elasticsearch/action/ActionModule.java @@ -36,6 +36,8 @@ import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsAction; import org.elasticsearch.action.admin.cluster.settings.TransportClusterUpdateSettingsAction; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction; +import org.elasticsearch.action.admin.cluster.shards.TransportClusterSearchShardsAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.TransportClusterStateAction; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction; @@ -110,6 +112,8 @@ import org.elasticsearch.action.percolate.TransportPercolateAction; import org.elasticsearch.action.search.*; import org.elasticsearch.action.search.type.*; +import org.elasticsearch.action.suggest.SuggestAction; +import org.elasticsearch.action.suggest.TransportSuggestAction; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.update.TransportUpdateAction; import org.elasticsearch.action.update.UpdateAction; @@ -171,6 +175,7 @@ protected void configure() { registerAction(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class); registerAction(ClusterUpdateSettingsAction.INSTANCE, TransportClusterUpdateSettingsAction.class); registerAction(ClusterRerouteAction.INSTANCE, TransportClusterRerouteAction.class); + registerAction(ClusterSearchShardsAction.INSTANCE, TransportClusterSearchShardsAction.class); registerAction(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class); registerAction(IndicesStatusAction.INSTANCE, TransportIndicesStatusAction.class); @@ -202,6 +207,7 @@ protected void configure() { registerAction(DeleteAction.INSTANCE, TransportDeleteAction.class, TransportIndexDeleteAction.class, TransportShardDeleteAction.class); registerAction(CountAction.INSTANCE, TransportCountAction.class); + registerAction(SuggestAction.INSTANCE, TransportSuggestAction.class); registerAction(UpdateAction.INSTANCE, TransportUpdateAction.class); registerAction(MultiGetAction.INSTANCE, TransportMultiGetAction.class, TransportShardMultiGetAction.class); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java index 402e1209789c9..a8320dca68a8e 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -21,6 +21,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeOperationRequest; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -37,18 +38,13 @@ public class ClusterHealthRequest extends MasterNodeOperationRequest { private String[] indices; - private TimeValue timeout = new TimeValue(30, TimeUnit.SECONDS); - private ClusterHealthStatus waitForStatus; - private int waitForRelocatingShards = -1; - private int waitForActiveShards = -1; - private String waitForNodes = ""; - private boolean local = false; + private Priority waitForEvents = null; ClusterHealthRequest() { } @@ -138,6 +134,15 @@ public boolean local() { return this.local; } + public ClusterHealthRequest waitForEvents(Priority waitForEvents) { + this.waitForEvents = waitForEvents; + return this; + } + + public Priority waitForEvents() { + return this.waitForEvents; + } + @Override public ActionRequestValidationException validate() { return null; @@ -163,6 +168,9 @@ public void readFrom(StreamInput in) throws IOException { waitForActiveShards = in.readInt(); waitForNodes = in.readString(); local = in.readBoolean(); + if (in.readBoolean()) { + waitForEvents = Priority.fromByte(in.readByte()); + } } @Override @@ -187,5 +195,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(waitForActiveShards); out.writeString(waitForNodes); out.writeBoolean(local); + if (waitForEvents == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeByte(waitForEvents.value()); + } } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java index eaf231e91b263..bc0ae8fb99fd3 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ClusterAdminClient; import org.elasticsearch.client.internal.InternalClusterAdminClient; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.unit.TimeValue; /** @@ -82,6 +83,11 @@ public ClusterHealthRequestBuilder setWaitForNodes(String waitForNodes) { return this; } + public ClusterHealthRequestBuilder setWaitForEvents(Priority waitForEvents) { + request.waitForEvents(waitForEvents); + return this; + } + @Override protected void doExecute(ActionListener listener) { ((ClusterAdminClient) client).health(request, listener); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index b88a9207b4712..cfec939925188 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; @@ -36,6 +37,9 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + /** * */ @@ -70,8 +74,37 @@ protected ClusterHealthResponse newResponse() { return new ClusterHealthResponse(); } + @Override + protected boolean localExecute(ClusterHealthRequest request) { + return request.local(); + } + @Override protected ClusterHealthResponse masterOperation(ClusterHealthRequest request, ClusterState unusedState) throws ElasticSearchException { + long endTime = System.currentTimeMillis() + request.timeout().millis(); + + if (request.waitForEvents() != null) { + final CountDownLatch latch = new CountDownLatch(1); + clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ProcessedClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } + + @Override + public void clusterStateProcessed(ClusterState clusterState) { + latch.countDown(); + } + }); + + try { + latch.await(request.timeout().millis(), TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + // ignore + } + } + + int waitFor = 5; if (request.waitForStatus() == null) { waitFor--; @@ -93,7 +126,6 @@ protected ClusterHealthResponse masterOperation(ClusterHealthRequest request, Cl ClusterState clusterState = clusterService.state(); return clusterHealth(request, clusterState); } - long endTime = System.currentTimeMillis() + request.timeout().millis(); while (true) { int waitForCounter = 0; ClusterState clusterState = clusterService.state(); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index b75b88a0ff704..f1ea7d40645d6 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -95,8 +95,13 @@ public ClusterState execute(ClusterState currentState) { transientSettings.put(currentState.metaData().transientSettings()); for (Map.Entry entry : request.transientSettings().getAsMap().entrySet()) { if (dynamicSettings.hasDynamicSetting(entry.getKey()) || entry.getKey().startsWith("logger.")) { - transientSettings.put(entry.getKey(), entry.getValue()); - changed = true; + String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue()); + if (error == null) { + transientSettings.put(entry.getKey(), entry.getValue()); + changed = true; + } else { + logger.warn("ignoring transient setting [{}], [{}]", entry.getKey(), error); + } } else { logger.warn("ignoring transient setting [{}], not dynamically updateable", entry.getKey()); } @@ -106,8 +111,13 @@ public ClusterState execute(ClusterState currentState) { persistentSettings.put(currentState.metaData().persistentSettings()); for (Map.Entry entry : request.persistentSettings().getAsMap().entrySet()) { if (dynamicSettings.hasDynamicSetting(entry.getKey()) || entry.getKey().startsWith("logger.")) { - changed = true; - persistentSettings.put(entry.getKey(), entry.getValue()); + String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue()); + if (error == null) { + persistentSettings.put(entry.getKey(), entry.getValue()); + changed = true; + } else { + logger.warn("ignoring persistent setting [{}], [{}]", entry.getKey(), error); + } } else { logger.warn("ignoring persistent setting [{}], not dynamically updateable", entry.getKey()); } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java new file mode 100644 index 0000000000000..c42d961dffc22 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java @@ -0,0 +1,45 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.shards; + +import org.elasticsearch.action.admin.cluster.ClusterAction; +import org.elasticsearch.client.ClusterAdminClient; + +/** + */ +public class ClusterSearchShardsAction extends ClusterAction { + + public static final ClusterSearchShardsAction INSTANCE = new ClusterSearchShardsAction(); + public static final String NAME = "cluster/shards/search_shards"; + + private ClusterSearchShardsAction() { + super(NAME); + } + + @Override + public ClusterSearchShardsResponse newResponse() { + return new ClusterSearchShardsResponse(); + } + + @Override + public ClusterSearchShardsRequestBuilder newRequestBuilder(ClusterAdminClient client) { + return new ClusterSearchShardsRequestBuilder(client); + } +} diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java new file mode 100644 index 0000000000000..1b8073bcb27af --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java @@ -0,0 +1,97 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.shards; + +import org.elasticsearch.cluster.routing.ImmutableShardRouting; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + */ +public class ClusterSearchShardsGroup implements Streamable, ToXContent { + + private String index; + private int shardId; + ShardRouting[] shards; + + ClusterSearchShardsGroup() { + + } + + public ClusterSearchShardsGroup(String index, int shardId, ShardRouting[] shards) { + this.index = index; + this.shardId = shardId; + this.shards = shards; + } + + public static ClusterSearchShardsGroup readSearchShardsGroupResponse(StreamInput in) throws IOException { + ClusterSearchShardsGroup response = new ClusterSearchShardsGroup(); + response.readFrom(in); + return response; + } + + public String getIndex() { + return index; + } + + public int getShardId() { + return shardId; + } + + public ShardRouting[] getShards() { + return shards; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + index = in.readString(); + shardId = in.readVInt(); + shards = new ShardRouting[in.readVInt()]; + for (int i = 0; i < shards.length; i++) { + shards[i] = ImmutableShardRouting.readShardRoutingEntry(in, index, shardId); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeVInt(shardId); + out.writeVInt(shards.length); + for (ShardRouting shardRouting : shards) { + shardRouting.writeToThin(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray(); + for (ShardRouting shard : getShards()) { + shard.toXContent(builder, params); + } + builder.endArray(); + return builder; + } +} diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java new file mode 100644 index 0000000000000..65724a236e8e1 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -0,0 +1,188 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.shards; + +import org.elasticsearch.ElasticSearchIllegalArgumentException; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.IgnoreIndices; +import org.elasticsearch.action.support.master.MasterNodeOperationRequest; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + */ +public class ClusterSearchShardsRequest extends MasterNodeOperationRequest { + private String[] indices; + @Nullable + private String routing; + @Nullable + private String preference; + private boolean local = false; + private String[] types = Strings.EMPTY_ARRAY; + private IgnoreIndices ignoreIndices = IgnoreIndices.DEFAULT; + + + public ClusterSearchShardsRequest() { + } + + public ClusterSearchShardsRequest(String... indices) { + indices(indices); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + /** + * Sets the indices the search will be executed on. + */ + public ClusterSearchShardsRequest indices(String... indices) { + if (indices == null) { + throw new ElasticSearchIllegalArgumentException("indices must not be null"); + } else { + for (int i = 0; i < indices.length; i++) { + if (indices[i] == null) { + throw new ElasticSearchIllegalArgumentException("indices[" + i + "] must not be null"); + } + } + } + this.indices = indices; + return this; + } + + /** + * The indices + */ + public String[] indices() { + return indices; + } + + public IgnoreIndices ignoreIndices() { + return ignoreIndices; + } + + public ClusterSearchShardsRequest ignoreIndices(IgnoreIndices ignoreIndices) { + this.ignoreIndices = ignoreIndices; + return this; + } + + /** + * The document types to execute the search against. Defaults to be executed against + * all types. + */ + public String[] types() { + return types; + } + + /** + * The document types to execute the search against. Defaults to be executed against + * all types. + */ + public ClusterSearchShardsRequest types(String... types) { + this.types = types; + return this; + } + + /** + * A comma separated list of routing values to control the shards the search will be executed on. + */ + public String routing() { + return this.routing; + } + + /** + * A comma separated list of routing values to control the shards the search will be executed on. + */ + public ClusterSearchShardsRequest routing(String routing) { + this.routing = routing; + return this; + } + + /** + * The routing values to control the shards that the search will be executed on. + */ + public ClusterSearchShardsRequest routing(String... routings) { + this.routing = Strings.arrayToCommaDelimitedString(routings); + return this; + } + + /** + * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to + * _local to prefer local shards, _primary to execute only on primary shards, or + * a custom value, which guarantees that the same order will be used across different requests. + */ + public ClusterSearchShardsRequest preference(String preference) { + this.preference = preference; + return this; + } + + public String preference() { + return this.preference; + } + + public ClusterSearchShardsRequest local(boolean local) { + this.local = local; + return this; + } + + public boolean local() { + return this.local; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + + indices = new String[in.readVInt()]; + for (int i = 0; i < indices.length; i++) { + indices[i] = in.readString(); + } + + routing = in.readOptionalString(); + preference = in.readOptionalString(); + + types = in.readStringArray(); + ignoreIndices = IgnoreIndices.fromId(in.readByte()); + local = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + + out.writeVInt(indices.length); + for (String index : indices) { + out.writeString(index); + } + + out.writeOptionalString(routing); + out.writeOptionalString(preference); + + out.writeStringArray(types); + out.writeByte(ignoreIndices.id()); + out.writeBoolean(local); + } + +} diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java new file mode 100644 index 0000000000000..9e45f73ce4675 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java @@ -0,0 +1,101 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.shards; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.IgnoreIndices; +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.client.internal.InternalClusterAdminClient; + +/** + */ +public class ClusterSearchShardsRequestBuilder extends MasterNodeOperationRequestBuilder { + + public ClusterSearchShardsRequestBuilder(ClusterAdminClient clusterClient) { + super((InternalClusterAdminClient) clusterClient, new ClusterSearchShardsRequest()); + } + + /** + * Sets the indices the search will be executed on. + */ + public ClusterSearchShardsRequestBuilder setIndices(String... indices) { + request.indices(indices); + return this; + } + + /** + * The document types to execute the search against. Defaults to be executed against + * all types. + */ + public ClusterSearchShardsRequestBuilder setTypes(String... types) { + request.types(types); + return this; + } + + /** + * A comma separated list of routing values to control the shards the search will be executed on. + */ + public ClusterSearchShardsRequestBuilder setRouting(String routing) { + request.routing(routing); + return this; + } + + /** + * The routing values to control the shards that the search will be executed on. + */ + public ClusterSearchShardsRequestBuilder setRouting(String... routing) { + request.routing(routing); + return this; + } + + /** + * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to + * _local to prefer local shards, _primary to execute only on primary shards, or + * a custom value, which guarantees that the same order will be used across different requests. + */ + public ClusterSearchShardsRequestBuilder setPreference(String preference) { + request.preference(preference); + return this; + } + + /** + * Specifies what type of requested indices to ignore. For example indices that don't exist. + */ + public ClusterSearchShardsRequestBuilder setIgnoreIndices(IgnoreIndices ignoreIndices) { + request().ignoreIndices(ignoreIndices); + return this; + } + + /** + * Specifies if request should be executed on local node rather than on master. + */ + public ClusterSearchShardsRequestBuilder setLocal(boolean local) { + request().local(local); + return this; + } + + + @Override + protected void doExecute(ActionListener listener) { + ((ClusterAdminClient) client).searchShards(request, listener); + } + +} diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java new file mode 100644 index 0000000000000..2ae029073aca4 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java @@ -0,0 +1,107 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.shards; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; + +/** + */ +public class ClusterSearchShardsResponse extends ActionResponse implements ToXContent { + + private ClusterSearchShardsGroup[] groups; + private DiscoveryNode[] nodes; + + ClusterSearchShardsResponse() { + + } + + public ClusterSearchShardsGroup[] getGroups() { + return groups; + } + + public DiscoveryNode[] getNodes() { + return nodes; + } + + public ClusterSearchShardsResponse(ClusterSearchShardsGroup[] groups, DiscoveryNode[] nodes) { + this.groups = groups; + this.nodes = nodes; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + groups = new ClusterSearchShardsGroup[in.readVInt()]; + for (int i = 0; i < groups.length; i++) { + groups[i] = ClusterSearchShardsGroup.readSearchShardsGroupResponse(in); + } + nodes = new DiscoveryNode[in.readVInt()]; + for (int i = 0; i < nodes.length; i++) { + nodes[i] = DiscoveryNode.readNode(in); + } + + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(groups.length); + for (ClusterSearchShardsGroup response : groups) { + response.writeTo(out); + } + out.writeVInt(nodes.length); + for (DiscoveryNode node : nodes) { + node.writeTo(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("nodes"); + for (DiscoveryNode node : nodes) { + builder.startObject(node.getId(), XContentBuilder.FieldCaseConversion.NONE); + builder.field("name", node.name()); + builder.field("transport_address", node.getAddress()); + if (!node.attributes().isEmpty()) { + builder.startObject("attributes"); + for (Map.Entry attr : node.attributes().entrySet()) { + builder.field(attr.getKey(), attr.getValue()); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + builder.startArray("shards"); + for (ClusterSearchShardsGroup group : groups) { + group.toXContent(builder, params); + } + builder.endArray(); + return builder; + } +} diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java new file mode 100644 index 0000000000000..e2fb0f9a33883 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -0,0 +1,103 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.shards; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.Map; +import java.util.Set; + +import static com.google.common.collect.Sets.newHashSet; + +/** + */ +public class TransportClusterSearchShardsAction extends TransportMasterNodeOperationAction { + + @Inject + public TransportClusterSearchShardsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) { + super(settings, transportService, clusterService, threadPool); + } + + @Override + protected String transportAction() { + return ClusterSearchShardsAction.NAME; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected boolean localExecute(ClusterSearchShardsRequest request) { + return request.local(); + } + + @Override + protected ClusterSearchShardsRequest newRequest() { + return new ClusterSearchShardsRequest(); + } + + @Override + protected ClusterSearchShardsResponse newResponse() { + return new ClusterSearchShardsResponse(); + } + + @Override + protected ClusterSearchShardsResponse masterOperation(ClusterSearchShardsRequest request, ClusterState state) throws ElasticSearchException { + ClusterState clusterState = clusterService.state(); + String[] concreteIndices = clusterState.metaData().concreteIndices(request.indices(), request.ignoreIndices(), true); + Map> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices()); + Set nodeIds = newHashSet(); + GroupShardsIterator groupShardsIterator = clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, request.preference()); + ShardRouting shard; + ClusterSearchShardsGroup[] groupResponses = new ClusterSearchShardsGroup[groupShardsIterator.size()]; + int currentGroup = 0; + for (ShardIterator shardIt : groupShardsIterator) { + String index = shardIt.shardId().getIndex(); + int shardId = shardIt.shardId().getId(); + ShardRouting[] shardRoutings = new ShardRouting[shardIt.size()]; + int currentShard = 0; + shardIt.reset(); + while ((shard = shardIt.nextOrNull()) != null) { + shardRoutings[currentShard++] = shard; + nodeIds.add(shard.currentNodeId()); + } + groupResponses[currentGroup++] = new ClusterSearchShardsGroup(index, shardId, shardRoutings); + } + DiscoveryNode[] nodes = new DiscoveryNode[nodeIds.size()]; + int currentNode = 0; + for (String nodeId : nodeIds) { + nodes[currentNode++] = clusterState.getNodes().get(nodeId); + } + return new ClusterSearchShardsResponse(groupResponses, nodes); + } +} diff --git a/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index c894e4f94c5f6..378653625b0bd 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.refresh; import org.elasticsearch.ElasticSearchException; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; @@ -33,11 +32,8 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexShardMissingException; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.service.IndexShard; -import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -81,21 +77,6 @@ protected boolean ignoreNonActiveExceptions() { return true; } - @Override - protected boolean ignoreException(Throwable t) { - Throwable actual = ExceptionsHelper.unwrapCause(t); - if (actual instanceof IllegalIndexShardStateException) { - return true; - } - if (actual instanceof IndexMissingException) { - return true; - } - if (actual instanceof IndexShardMissingException) { - return true; - } - return false; - } - @Override protected RefreshResponse newResponse(RefreshRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { int successfulShards = 0; diff --git a/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index 78dceea660e60..0930540d81316 100644 --- a/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -89,7 +89,10 @@ public MultiSearchRequest add(BytesReference data, boolean contentUnsafe, continue; } - SearchRequest searchRequest = new SearchRequest(indices); + SearchRequest searchRequest = new SearchRequest(); + if (indices != null) { + searchRequest.indices(indices); + } if (ignoreIndices != null) { searchRequest.ignoreIndices(ignoreIndices); } diff --git a/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 9306c56cab42c..214f5a8d27ac7 100644 --- a/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -91,14 +91,14 @@ public SearchRequest() { * will run against all indices. */ public SearchRequest(String... indices) { - this.indices = indices; + indices(indices); } /** * Constructs a new search request against the provided indices with the given search source. */ public SearchRequest(String[] indices, byte[] source) { - this.indices = indices; + indices(indices); this.source = new BytesArray(source); } @@ -135,6 +135,15 @@ public void beforeLocalFork() { * Sets the indices the search will be executed on. */ public SearchRequest indices(String... indices) { + if (indices == null) { + throw new ElasticSearchIllegalArgumentException("indices must not be null"); + } else { + for (int i = 0; i < indices.length; i++) { + if (indices[i] == null) { + throw new ElasticSearchIllegalArgumentException("indices[" + i +"] must not be null"); + } + } + } this.indices = indices; return this; } diff --git a/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 7f740b1dad691..40c49b2698489 100644 --- a/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -657,9 +657,9 @@ public SearchRequestBuilder setSuggestText(String globalText) { } /** - * Delegates to {@link org.elasticsearch.search.suggest.SuggestBuilder#addSuggestion(org.elasticsearch.search.suggest.SuggestBuilder.Suggestion)}. + * Delegates to {@link org.elasticsearch.search.suggest.SuggestBuilder#addSuggestion(org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder)}. */ - public SearchRequestBuilder addSuggestion(SuggestBuilder.Suggestion suggestion) { + public SearchRequestBuilder addSuggestion(SuggestBuilder.SuggestionBuilder suggestion) { suggestBuilder().addSuggestion(suggestion); return this; } diff --git a/src/main/java/org/elasticsearch/action/suggest/ShardSuggestRequest.java b/src/main/java/org/elasticsearch/action/suggest/ShardSuggestRequest.java new file mode 100644 index 0000000000000..d78c4cacb6d03 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/suggest/ShardSuggestRequest.java @@ -0,0 +1,59 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.suggest; + +import java.io.IOException; + +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +/** + * Internal suggest request executed directly against a specific index shard. + */ +final class ShardSuggestRequest extends BroadcastShardOperationRequest { + + private BytesReference suggestSource; + + ShardSuggestRequest() { + } + + public ShardSuggestRequest(String index, int shardId, SuggestRequest request) { + super(index, shardId, request); + this.suggestSource = request.suggest(); + } + + public BytesReference suggest() { + return suggestSource; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + suggestSource = in.readBytesReference(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBytesReference(suggestSource); + } +} diff --git a/src/main/java/org/elasticsearch/action/suggest/ShardSuggestResponse.java b/src/main/java/org/elasticsearch/action/suggest/ShardSuggestResponse.java new file mode 100644 index 0000000000000..4570b1bc20051 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/suggest/ShardSuggestResponse.java @@ -0,0 +1,60 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.suggest; + +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.suggest.Suggest; + +import java.io.IOException; + +/** + * Internal suggest response of a shard suggest request executed directly against a specific shard. + */ +class ShardSuggestResponse extends BroadcastShardOperationResponse { + + private final Suggest suggest; + + ShardSuggestResponse() { + this.suggest = new Suggest(); + } + + public ShardSuggestResponse(String index, int shardId, Suggest suggest) { + super(index, shardId); + this.suggest = suggest; + } + + public Suggest getSuggest() { + return this.suggest; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + suggest.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + suggest.writeTo(out); + } +} diff --git a/src/main/java/org/elasticsearch/jmx/AbstractJmxModule.java b/src/main/java/org/elasticsearch/action/suggest/SuggestAction.java similarity index 55% rename from src/main/java/org/elasticsearch/jmx/AbstractJmxModule.java rename to src/main/java/org/elasticsearch/action/suggest/SuggestAction.java index 6a6be47b29f91..fad368d617796 100644 --- a/src/main/java/org/elasticsearch/jmx/AbstractJmxModule.java +++ b/src/main/java/org/elasticsearch/action/suggest/SuggestAction.java @@ -17,29 +17,30 @@ * under the License. */ -package org.elasticsearch.jmx; +package org.elasticsearch.action.suggest; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.Client; +import org.elasticsearch.search.suggest.Suggest; /** - * Simple based class for JMX related services with {@link #doConfigure()} only being called if - * jmx is enabled. */ -public abstract class AbstractJmxModule extends AbstractModule { +public class SuggestAction extends Action { - private final Settings settings; + public static final SuggestAction INSTANCE = new SuggestAction(); + public static final String NAME = "suggest"; - protected AbstractJmxModule(Settings settings) { - this.settings = settings; + private SuggestAction() { + super(NAME); } @Override - protected void configure() { - if (JmxService.shouldExport(settings)) { - doConfigure(); - } + public SuggestResponse newResponse() { + return new SuggestResponse(new Suggest()); } - protected abstract void doConfigure(); + @Override + public SuggestRequestBuilder newRequestBuilder(Client client) { + return new SuggestRequestBuilder(client); + } } diff --git a/src/main/java/org/elasticsearch/action/suggest/SuggestRequest.java b/src/main/java/org/elasticsearch/action/suggest/SuggestRequest.java new file mode 100644 index 0000000000000..4a2f8798ab639 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/suggest/SuggestRequest.java @@ -0,0 +1,168 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.suggest; + +import java.io.IOException; +import java.util.Arrays; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest; +import org.elasticsearch.client.Requests; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; + +/** + * A request to get suggestions for corrections of phrases. Best created with + * {@link org.elasticsearch.client.Requests#suggestRequest(String...)}. + *

+ *

The request requires the query source to be set either using {@link #query(org.elasticsearch.index.query.QueryBuilder)}, + * or {@link #query(byte[])}. + * + * @see SuggestResponse + * @see org.elasticsearch.client.Client#suggest(SuggestRequest) + * @see org.elasticsearch.client.Requests#suggestRequest(String...) + */ +public final class SuggestRequest extends BroadcastOperationRequest { + + static final XContentType contentType = Requests.CONTENT_TYPE; + + @Nullable + private String routing; + + @Nullable + private String preference; + + private BytesReference suggestSource; + private boolean suggestSourceUnsafe; + + SuggestRequest() { + } + + /** + * Constructs a new suggest request against the provided indices. No indices provided means it will + * run against all indices. + */ + public SuggestRequest(String... indices) { + super(indices); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = super.validate(); + return validationException; + } + + @Override + protected void beforeStart() { + if (suggestSourceUnsafe) { + suggest(suggestSource.copyBytesArray(), false); + } + } + + /** + * The Phrase to get correction suggestions for + */ + BytesReference suggest() { + return suggestSource; + } + + /** + * set a new source for the suggest query + */ + public SuggestRequest suggest(BytesReference suggestSource) { + return suggest(suggestSource, false); + } + + /** + * A comma separated list of routing values to control the shards the search will be executed on. + */ + public String routing() { + return this.routing; + } + + /** + * A comma separated list of routing values to control the shards the search will be executed on. + */ + public SuggestRequest routing(String routing) { + this.routing = routing; + return this; + } + + /** + * The routing values to control the shards that the search will be executed on. + */ + public SuggestRequest routing(String... routings) { + this.routing = Strings.arrayToCommaDelimitedString(routings); + return this; + } + + public SuggestRequest preference(String preference) { + this.preference = preference; + return this; + } + + public String preference() { + return this.preference; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + routing = in.readOptionalString(); + preference = in.readOptionalString(); + suggest(in.readBytesReference()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalString(routing); + out.writeOptionalString(preference); + out.writeBytesReference(suggestSource); + } + + @Override + public String toString() { + String sSource = "_na_"; + try { + sSource = XContentHelper.convertToJson(suggestSource, false); + } catch (Exception e) { + // ignore + } + return "[" + Arrays.toString(indices) + "]" + ", suggestSource[" + sSource + "]"; + } + + public SuggestRequest suggest(BytesReference suggestSource, boolean contentUnsafe) { + this.suggestSource = suggestSource; + this.suggestSourceUnsafe = contentUnsafe; + return this; + } + + public SuggestRequest suggest(String source) { + return suggest(new BytesArray(source)); + } + +} diff --git a/src/main/java/org/elasticsearch/action/suggest/SuggestRequestBuilder.java b/src/main/java/org/elasticsearch/action/suggest/SuggestRequestBuilder.java new file mode 100644 index 0000000000000..5edb9d4268e0e --- /dev/null +++ b/src/main/java/org/elasticsearch/action/suggest/SuggestRequestBuilder.java @@ -0,0 +1,98 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.suggest; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.internal.InternalClient; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder; + +import java.io.IOException; + +/** + * A suggest action request builder. + */ +public class SuggestRequestBuilder extends BroadcastOperationRequestBuilder { + + final SuggestBuilder suggest = new SuggestBuilder(); + + public SuggestRequestBuilder(Client client) { + super((InternalClient) client, new SuggestRequest()); + } + + /** + * Add a definition for suggestions to the request + */ + public SuggestRequestBuilder addSuggestion(SuggestionBuilder suggestion) { + suggest.addSuggestion(suggestion); + return this; + } + + /** + * A comma separated list of routing values to control the shards the search will be executed on. + */ + public SuggestRequestBuilder setRouting(String routing) { + request.routing(routing); + return this; + } + + public SuggestRequestBuilder setSuggestText(String globalText) { + this.suggest.setText(globalText); + return this; + } + + /** + * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to + * _local to prefer local shards, _primary to execute only on primary shards, + * _shards:x,y to operate on shards x & y, or a custom value, which guarantees that the same order + * will be used across different requests. + */ + public SuggestRequestBuilder setPreference(String preference) { + request.preference(preference); + return this; + } + + /** + * The routing values to control the shards that the search will be executed on. + */ + public SuggestRequestBuilder setRouting(String... routing) { + request.routing(routing); + return this; + } + + @Override + protected void doExecute(ActionListener listener) { + try { + XContentBuilder builder = XContentFactory.contentBuilder(SuggestRequest.contentType); + suggest.toXContent(builder, ToXContent.EMPTY_PARAMS); + request.suggest(builder.bytes()); + } catch (IOException e) { + throw new ElasticSearchException("Unable to build suggestion request", e); + } + + ((InternalClient) client).suggest(request, listener); + } +} diff --git a/src/main/java/org/elasticsearch/action/suggest/SuggestResponse.java b/src/main/java/org/elasticsearch/action/suggest/SuggestResponse.java new file mode 100644 index 0000000000000..7abe441751376 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/suggest/SuggestResponse.java @@ -0,0 +1,83 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.suggest; + +import java.io.IOException; +import java.util.List; + +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.suggest.Suggest; + +/** + * The response of the suggest action. + */ +public final class SuggestResponse extends BroadcastOperationResponse { + + private final Suggest suggest; + + SuggestResponse(Suggest suggest) { + this.suggest = suggest; + } + + SuggestResponse(Suggest suggest, int totalShards, int successfulShards, int failedShards, List shardFailures) { + super(totalShards, successfulShards, failedShards, shardFailures); + this.suggest = suggest; + } + + /** + * The Suggestions of the phrase. + */ + public Suggest getSuggest() { + return suggest; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.suggest.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + this.suggest.writeTo(out); + } + + @Override + public String toString() { + String source; + try { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + suggest.toXContent(builder, null); + source = XContentHelper.convertToJson(builder.bytes(), true); + } catch (IOException e) { + source = "Error: " + e.getMessage(); + } + return "Suggest Response["+source+"]"; + } +} diff --git a/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java b/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java new file mode 100644 index 0000000000000..f1cd2b9677f1d --- /dev/null +++ b/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java @@ -0,0 +1,175 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.suggest; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.ElasticSearchIllegalArgumentException; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.service.IndexService; +import org.elasticsearch.index.shard.service.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.search.suggest.SuggestPhase; +import org.elasticsearch.search.suggest.SuggestionSearchContext; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReferenceArray; + +import static com.google.common.collect.Lists.newArrayList; + +/** + * Defines the transport of a suggestion request across the cluster + */ +public class TransportSuggestAction extends TransportBroadcastOperationAction { + + private final IndicesService indicesService; + + private final SuggestPhase suggestPhase; + + @Inject + public TransportSuggestAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, + IndicesService indicesService) { + super(settings, threadPool, clusterService, transportService); + this.indicesService = indicesService; + this.suggestPhase = new SuggestPhase(settings); + } + + @Override + protected String executor() { + return ThreadPool.Names.SEARCH; + } + + @Override + protected String transportAction() { + return SuggestAction.NAME; + } + + @Override + protected SuggestRequest newRequest() { + return new SuggestRequest(); + } + + @Override + protected ShardSuggestRequest newShardRequest() { + return new ShardSuggestRequest(); + } + + @Override + protected ShardSuggestRequest newShardRequest(ShardRouting shard, SuggestRequest request) { + return new ShardSuggestRequest(shard.index(), shard.id(), request); + } + + @Override + protected ShardSuggestResponse newShardResponse() { + return new ShardSuggestResponse(); + } + + @Override + protected GroupShardsIterator shards(ClusterState clusterState, SuggestRequest request, String[] concreteIndices) { + Map> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices()); + return clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, request.preference()); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state, SuggestRequest request) { + return state.blocks().globalBlockedException(ClusterBlockLevel.READ); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, SuggestRequest countRequest, String[] concreteIndices) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices); + } + + @Override + protected SuggestResponse newResponse(SuggestRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { + int successfulShards = 0; + int failedShards = 0; + + final Map> groupedSuggestions = new HashMap>(); + + List shardFailures = null; + for (int i = 0; i < shardsResponses.length(); i++) { + Object shardResponse = shardsResponses.get(i); + if (shardResponse == null) { + failedShards++; + } else if (shardResponse instanceof BroadcastShardOperationFailedException) { + failedShards++; + if (shardFailures == null) { + shardFailures = newArrayList(); + } + shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); + } else { + Suggest suggest = ((ShardSuggestResponse) shardResponse).getSuggest(); + Suggest.group(groupedSuggestions, suggest); + successfulShards++; + } + } + + return new SuggestResponse(new Suggest(Suggest.reduce(groupedSuggestions)), shardsResponses.length(), successfulShards, failedShards, shardFailures); + } + + @Override + protected ShardSuggestResponse shardOperation(ShardSuggestRequest request) throws ElasticSearchException { + IndexService indexService = indicesService.indexServiceSafe(request.index()); + IndexShard indexShard = indexService.shardSafe(request.shardId()); + final Engine.Searcher searcher = indexShard.searcher(); + XContentParser parser = null; + try { + BytesReference suggest = request.suggest(); + if (suggest != null && suggest.length() > 0) { + parser = XContentFactory.xContent(suggest).createParser(suggest); + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticSearchIllegalArgumentException("suggest content missing"); + } + final SuggestionSearchContext context = suggestPhase.parseElement().parseInternal(parser, indexService.mapperService()); + final Suggest result = suggestPhase.execute(context, searcher.reader()); + return new ShardSuggestResponse(request.index(), request.shardId(), result); + } + return new ShardSuggestResponse(request.index(), request.shardId(), new Suggest()); + } catch (Throwable ex) { + throw new ElasticSearchException("failed to execute suggest", ex); + } finally { + searcher.release(); + if (parser != null) { + parser.close(); + } + } + } +} diff --git a/src/main/java/org/elasticsearch/jmx/MBean.java b/src/main/java/org/elasticsearch/action/suggest/package-info.java similarity index 76% rename from src/main/java/org/elasticsearch/jmx/MBean.java rename to src/main/java/org/elasticsearch/action/suggest/package-info.java index 1b3deb75eb146..2499b54c7ed2e 100644 --- a/src/main/java/org/elasticsearch/jmx/MBean.java +++ b/src/main/java/org/elasticsearch/action/suggest/package-info.java @@ -17,18 +17,7 @@ * under the License. */ -package org.elasticsearch.jmx; - -import java.lang.annotation.*; - /** - * + * Suggest action. */ -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.TYPE}) -@Inherited -public @interface MBean { - String description() default ""; - - String objectName() default ""; -} +package org.elasticsearch.action.suggest; \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationResponse.java b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationResponse.java index 215c330c18de0..4ec088dc6bb8c 100644 --- a/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationResponse.java +++ b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationResponse.java @@ -19,27 +19,25 @@ package org.elasticsearch.action.support.broadcast; -import com.google.common.collect.ImmutableList; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ShardOperationFailedException; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; +import static org.elasticsearch.action.support.DefaultShardOperationFailedException.readShardOperationFailed; import java.io.IOException; -import java.util.ArrayList; import java.util.List; -import static org.elasticsearch.action.support.DefaultShardOperationFailedException.readShardOperationFailed; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; /** * Base class for all broadcast operation based responses. */ public abstract class BroadcastOperationResponse extends ActionResponse { - + private static final ShardOperationFailedException[] EMPTY = new ShardOperationFailedException[0]; private int totalShards; private int successfulShards; private int failedShards; - private List shardFailures = ImmutableList.of(); + private ShardOperationFailedException[] shardFailures = EMPTY; protected BroadcastOperationResponse() { } @@ -48,10 +46,7 @@ protected BroadcastOperationResponse(int totalShards, int successfulShards, int this.totalShards = totalShards; this.successfulShards = successfulShards; this.failedShards = failedShards; - this.shardFailures = shardFailures; - if (shardFailures == null) { - this.shardFailures = ImmutableList.of(); - } + this.shardFailures = shardFailures == null ? EMPTY : shardFailures.toArray(new ShardOperationFailedException[shardFailures.size()]); } /** @@ -78,10 +73,7 @@ public int getFailedShards() { /** * The list of shard failures exception. */ - public List getShardFailures() { - if (shardFailures == null) { - return ImmutableList.of(); - } + public ShardOperationFailedException[] getShardFailures() { return shardFailures; } @@ -93,9 +85,9 @@ public void readFrom(StreamInput in) throws IOException { failedShards = in.readVInt(); int size = in.readVInt(); if (size > 0) { - shardFailures = new ArrayList(size); + shardFailures = new ShardOperationFailedException[size]; for (int i = 0; i < size; i++) { - shardFailures.add(readShardOperationFailed(in)); + shardFailures[i] = readShardOperationFailed(in); } } } @@ -106,7 +98,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(totalShards); out.writeVInt(successfulShards); out.writeVInt(failedShards); - out.writeVInt(shardFailures.size()); + out.writeVInt(shardFailures.length); for (ShardOperationFailedException exp : shardFailures) { exp.writeTo(out); } diff --git a/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java b/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java index 3b811257caabd..8aff54f80e4f7 100644 --- a/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.support.broadcast; import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.cluster.ClusterService; @@ -32,6 +33,9 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexShardMissingException; +import org.elasticsearch.index.shard.IllegalIndexShardStateException; +import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.*; @@ -95,14 +99,41 @@ protected boolean accumulateExceptions() { return true; } + /** + * Override this method to ignore specific exception, note, the result should be OR'ed with the call + * to super#ignoreException since there is additional logic here.... + */ protected boolean ignoreException(Throwable t) { + if (ignoreIllegalShardState()) { + Throwable actual = ExceptionsHelper.unwrapCause(t); + if (actual instanceof IllegalIndexShardStateException) { + return true; + } + if (actual instanceof IndexMissingException) { + return true; + } + if (actual instanceof IndexShardMissingException) { + return true; + } + } return false; } + /** + * Should non active routing shard state be ignore or node, defaults to false. + */ protected boolean ignoreNonActiveExceptions() { return false; } + /** + * Should the API ignore illegal shard state cases, for example, if the shard is actually missing on the + * target node (cause it hasn't been allocated there for example). Defaults to true. + */ + protected boolean ignoreIllegalShardState() { + return true; + } + protected abstract ClusterBlockException checkGlobalBlock(ClusterState state, Request request); protected abstract ClusterBlockException checkRequestBlock(ClusterState state, Request request, String[] concreteIndices); diff --git a/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java b/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java index 7eab410441603..2cac917b41ae6 100644 --- a/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java +++ b/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.WriteConsistencyLevel; +import org.elasticsearch.action.support.IgnoreIndices; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; @@ -35,6 +36,8 @@ public class IndicesReplicationOperationRequest l throw blockException; } // get actual indices - String[] concreteIndices = clusterState.metaData().concreteIndices(request.indices()); + String[] concreteIndices = clusterState.metaData().concreteIndices(request.indices(), request.ignoreIndices(), false); blockException = checkRequestBlock(clusterState, request, concreteIndices); if (blockException != null) { throw blockException; @@ -86,36 +86,39 @@ protected void doExecute(final Request request, final ActionListener l final AtomicReferenceArray indexResponses = new AtomicReferenceArray(concreteIndices.length); Map> routingMap = resolveRouting(clusterState, request); - - for (final String index : concreteIndices) { - Set routing = null; - if (routingMap != null) { - routing = routingMap.get(index); - } - IndexRequest indexRequest = newIndexRequestInstance(request, index, routing); - // no threading needed, all is done on the index replication one - indexRequest.listenerThreaded(false); - indexAction.execute(indexRequest, new ActionListener() { - @Override - public void onResponse(IndexResponse result) { - indexResponses.set(indexCounter.getAndIncrement(), result); - if (completionCounter.decrementAndGet() == 0) { - listener.onResponse(newResponseInstance(request, indexResponses)); - } + if (concreteIndices == null || concreteIndices.length == 0) { + listener.onResponse(newResponseInstance(request, indexResponses)); + } else { + for (final String index : concreteIndices) { + Set routing = null; + if (routingMap != null) { + routing = routingMap.get(index); } - - @Override - public void onFailure(Throwable e) { - e.printStackTrace(); - int index = indexCounter.getAndIncrement(); - if (accumulateExceptions()) { - indexResponses.set(index, e); + IndexRequest indexRequest = newIndexRequestInstance(request, index, routing); + // no threading needed, all is done on the index replication one + indexRequest.listenerThreaded(false); + indexAction.execute(indexRequest, new ActionListener() { + @Override + public void onResponse(IndexResponse result) { + indexResponses.set(indexCounter.getAndIncrement(), result); + if (completionCounter.decrementAndGet() == 0) { + listener.onResponse(newResponseInstance(request, indexResponses)); + } } - if (completionCounter.decrementAndGet() == 0) { - listener.onResponse(newResponseInstance(request, indexResponses)); + + @Override + public void onFailure(Throwable e) { + e.printStackTrace(); + int index = indexCounter.getAndIncrement(); + if (accumulateExceptions()) { + indexResponses.set(index, e); + } + if (completionCounter.decrementAndGet() == 0) { + listener.onResponse(newResponseInstance(request, indexResponses)); + } } - } - }); + }); + } } } diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java index 2e71cb3b929aa..40c3c5025e7c8 100644 --- a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java @@ -469,7 +469,7 @@ public void handleException(TransportException exp) { } break; } - // we should never get here, but here we go + // we won't find a primary if there are no shards in the shard iterator, retry... if (!foundPrimary) { retry(fromClusterEvent, null); return false; diff --git a/src/main/java/org/elasticsearch/client/Client.java b/src/main/java/org/elasticsearch/client/Client.java index 14f3acfef0332..755baff847281 100644 --- a/src/main/java/org/elasticsearch/client/Client.java +++ b/src/main/java/org/elasticsearch/client/Client.java @@ -45,6 +45,9 @@ import org.elasticsearch.action.percolate.PercolateRequestBuilder; import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.action.search.*; +import org.elasticsearch.action.suggest.SuggestRequest; +import org.elasticsearch.action.suggest.SuggestRequestBuilder; +import org.elasticsearch.action.suggest.SuggestResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.action.update.UpdateResponse; @@ -331,6 +334,29 @@ public interface Client { */ CountRequestBuilder prepareCount(String... indices); + /** + * Suggestion matching a specific phrase. + * + * @param request The suggest request + * @return The result future + * @see Requests#suggestRequest(String...) + */ + ActionFuture suggest(SuggestRequest request); + + /** + * Suggestions matching a specific phrase. + * + * @param request The suggest request + * @param listener A listener to be notified of the result + * @see Requests#suggestRequest(String...) + */ + void suggest(SuggestRequest request, ActionListener listener); + + /** + * Suggestions matching a specific phrase. + */ + SuggestRequestBuilder prepareSuggest(String... indices); + /** * Search across one or more indices and one or more types with a query. * diff --git a/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/src/main/java/org/elasticsearch/client/ClusterAdminClient.java index 754ac1ceeac68..f55d678c36c13 100644 --- a/src/main/java/org/elasticsearch/client/ClusterAdminClient.java +++ b/src/main/java/org/elasticsearch/client/ClusterAdminClient.java @@ -45,6 +45,9 @@ import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequestBuilder; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -235,4 +238,25 @@ public interface ClusterAdminClient { * Restarts nodes in the cluster. */ NodesRestartRequestBuilder prepareNodesRestart(String... nodesIds); + + /** + * Returns list of shards the given search would be executed on. + */ + ActionFuture searchShards(ClusterSearchShardsRequest request); + + /** + * Returns list of shards the given search would be executed on. + */ + void searchShards(ClusterSearchShardsRequest request, ActionListener listener); + + /** + * Returns list of shards the given search would be executed on. + */ + ClusterSearchShardsRequestBuilder prepareSearchShards(); + + /** + * Returns list of shards the given search would be executed on. + */ + ClusterSearchShardsRequestBuilder prepareSearchShards(String... indices); + } diff --git a/src/main/java/org/elasticsearch/client/Requests.java b/src/main/java/org/elasticsearch/client/Requests.java index 5debbcb68e0a1..48dc30b70bbfd 100644 --- a/src/main/java/org/elasticsearch/client/Requests.java +++ b/src/main/java/org/elasticsearch/client/Requests.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; @@ -369,6 +370,20 @@ public static ClusterHealthRequest clusterHealthRequest(String... indices) { return new ClusterHealthRequest(indices); } + /** + * List all shards for the give search + */ + public static ClusterSearchShardsRequest clusterSearchShardsRequest() { + return new ClusterSearchShardsRequest(); + } + + /** + * List all shards for the give search + */ + public static ClusterSearchShardsRequest clusterSearchShardsRequest(String... indices) { + return new ClusterSearchShardsRequest(indices); + } + /** * Creates a nodes info request against all the nodes. * @@ -436,4 +451,5 @@ public static NodesRestartRequest nodesRestartRequest() { public static NodesRestartRequest nodesRestartRequest(String... nodesIds) { return new NodesRestartRequest(nodesIds); } + } diff --git a/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 09a10fd8efd6c..9261d3034ef82 100644 --- a/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -53,6 +53,10 @@ import org.elasticsearch.action.percolate.PercolateRequestBuilder; import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.action.search.*; +import org.elasticsearch.action.suggest.SuggestAction; +import org.elasticsearch.action.suggest.SuggestRequest; +import org.elasticsearch.action.suggest.SuggestRequestBuilder; +import org.elasticsearch.action.suggest.SuggestResponse; import org.elasticsearch.action.update.UpdateAction; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; @@ -260,6 +264,21 @@ public CountRequestBuilder prepareCount(String... indices) { return new CountRequestBuilder(this).setIndices(indices); } + @Override + public ActionFuture suggest(final SuggestRequest request) { + return execute(SuggestAction.INSTANCE, request); + } + + @Override + public void suggest(final SuggestRequest request, final ActionListener listener) { + execute(SuggestAction.INSTANCE, request, listener); + } + + @Override + public SuggestRequestBuilder prepareSuggest(String... indices) { + return new SuggestRequestBuilder(this).setIndices(indices); + } + @Override public ActionFuture moreLikeThis(final MoreLikeThisRequest request) { return execute(MoreLikeThisAction.INSTANCE, request); diff --git a/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java b/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java index a613b202895b2..df491d2fd33ba 100644 --- a/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java +++ b/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java @@ -53,6 +53,10 @@ import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequestBuilder; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder; @@ -203,4 +207,26 @@ public void nodesShutdown(final NodesShutdownRequest request, final ActionListen public NodesShutdownRequestBuilder prepareNodesShutdown(String... nodesIds) { return new NodesShutdownRequestBuilder(this).setNodesIds(nodesIds); } + + @Override + public ActionFuture searchShards(final ClusterSearchShardsRequest request) { + return execute(ClusterSearchShardsAction.INSTANCE, request); + } + + @Override + public void searchShards(final ClusterSearchShardsRequest request, final ActionListener listener) { + execute(ClusterSearchShardsAction.INSTANCE, request, listener); + } + + @Override + public ClusterSearchShardsRequestBuilder prepareSearchShards() { + return new ClusterSearchShardsRequestBuilder(this); + } + + @Override + public ClusterSearchShardsRequestBuilder prepareSearchShards(String... indices) { + return new ClusterSearchShardsRequestBuilder(this).setIndices(indices); + } + + } diff --git a/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/src/main/java/org/elasticsearch/client/transport/TransportClient.java index c466e1aafada2..6bf45f631863f 100644 --- a/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -42,6 +42,8 @@ import org.elasticsearch.action.percolate.PercolateRequest; import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.action.search.*; +import org.elasticsearch.action.suggest.SuggestRequest; +import org.elasticsearch.action.suggest.SuggestResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.AdminClient; @@ -378,6 +380,16 @@ public void count(CountRequest request, ActionListener listener) internalClient.count(request, listener); } + @Override + public ActionFuture suggest(SuggestRequest request) { + return internalClient.suggest(request); + } + + @Override + public void suggest(SuggestRequest request, ActionListener listener) { + internalClient.suggest(request, listener); + } + @Override public ActionFuture search(SearchRequest request) { return internalClient.search(request); diff --git a/src/main/java/org/elasticsearch/cluster/ClusterState.java b/src/main/java/org/elasticsearch/cluster/ClusterState.java index ab660cc42c109..3559a769d72a2 100644 --- a/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -209,6 +209,19 @@ public ClusterState settingsFilter(SettingsFilter settingsFilter) { return this; } + @Override + public String toString() { + try { + XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return builder.string(); + } catch (IOException e) { + return "{ \"error\" : \"" + e.getMessage() + "\"}"; + } + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (!params.paramAsBoolean("filter_nodes", false)) { @@ -305,7 +318,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("state", indexMetaData.state().toString().toLowerCase(Locale.ENGLISH)); builder.startObject("settings"); - Settings settings = settingsFilter.filterSettings(indexMetaData.settings()); + Settings settings = indexMetaData.settings(); + if (settingsFilter != null) { + settings = settingsFilter.filterSettings(indexMetaData.settings()); + } for (Map.Entry entry : settings.getAsMap().entrySet()) { builder.field(entry.getKey(), entry.getValue()); } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java index f5a167c7f6d75..269dc5f499658 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; import java.io.IOException; @@ -442,6 +443,9 @@ private void innerParse(XContentParser parser, ParseContext context) throws IOEx boolean incLocationTimestamp = false; if (context.idParsingStillNeeded() && fieldName.equals(idPart)) { if (context.locationId + 1 == id.pathElements().length) { + if (!t.isValue()) { + throw new MapperParsingException("id field must be a value but was either an object or an array"); + } context.id = parser.textOrNull(); context.idResolved = true; } else { diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index d82f8df744776..2482325835236 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -365,7 +365,7 @@ public void run() { logger.warn("[{}] failed to create", e, request.index); if (indexCreated) { // Index was already partially created - need to clean up - indicesService.deleteIndex(request.index, failureReason != null ? failureReason : "failed to create index"); + indicesService.removeIndex(request.index, failureReason != null ? failureReason : "failed to create index"); } listener.onFailure(e); return currentState; diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 7f39522770ff8..fe92351d77b74 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -180,7 +180,7 @@ public ClusterState execute(ClusterState currentState) { } } finally { for (String index : indicesToClose) { - indicesService.cleanIndex(index, "created for alias processing"); + indicesService.removeIndex(index, "created for alias processing"); } } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 154613055040b..9d76533bdda57 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -148,7 +148,7 @@ public ClusterState execute(ClusterState currentState) { return currentState; } finally { if (createdIndex) { - indicesService.cleanIndex(index, "created for mapping processing"); + indicesService.removeIndex(index, "created for mapping processing"); } } } @@ -209,7 +209,7 @@ public ClusterState execute(ClusterState currentState) { return currentState; } finally { if (createdIndex) { - indicesService.cleanIndex(index, "created for mapping processing"); + indicesService.removeIndex(index, "created for mapping processing"); } } } @@ -409,7 +409,7 @@ public ClusterState execute(ClusterState currentState) { return currentState; } finally { for (String index : indicesToClose) { - indicesService.cleanIndex(index, "created for mapping processing"); + indicesService.removeIndex(index, "created for mapping processing"); } } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index d93971d28c752..cd6be27cdf792 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -143,11 +143,23 @@ public void updateSettings(final Settings pSettings, final String[] indices, fin final Settings closeSettings = updatedSettingsBuilder.build(); final Set removedSettings = Sets.newHashSet(); - for (String key : updatedSettingsBuilder.internalMap().keySet()) { - if (!dynamicSettings.hasDynamicSetting(key)) { - removedSettings.add(key); + final Set errors = Sets.newHashSet(); + for (Map.Entry setting : updatedSettingsBuilder.internalMap().entrySet()) { + if (!dynamicSettings.hasDynamicSetting(setting.getKey())) { + removedSettings.add(setting.getKey()); + } else { + String error = dynamicSettings.validateDynamicSetting(setting.getKey(), setting.getValue()); + if (error != null) { + errors.add("[" + setting.getKey() + "] - " + error); + } } } + + if (!errors.isEmpty()) { + listener.onFailure(new ElasticSearchIllegalArgumentException("can't process the settings: " + errors.toString())); + return; + } + if (!removedSettings.isEmpty()) { for (String removedSetting : removedSettings) { updatedSettingsBuilder.remove(removedSetting); diff --git a/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 1df26510d5638..79fc193a37fff 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -64,9 +64,9 @@ public class IndexShardRoutingTable implements Iterable { this.counter = new AtomicInteger(ThreadLocalRandom.current().nextInt(shards.size())); ShardRouting primary = null; - List replicas = new ArrayList(); - List activeShards = new ArrayList(); - List assignedShards = new ArrayList(); + ImmutableList.Builder replicas = ImmutableList.builder(); + ImmutableList.Builder activeShards = ImmutableList.builder(); + ImmutableList.Builder assignedShards = ImmutableList.builder(); for (ShardRouting shard : shards) { if (shard.primary()) { @@ -88,9 +88,9 @@ public class IndexShardRoutingTable implements Iterable { } else { this.primaryAsList = ImmutableList.of(); } - this.replicas = ImmutableList.copyOf(replicas); - this.activeShards = ImmutableList.copyOf(activeShards); - this.assignedShards = ImmutableList.copyOf(assignedShards); + this.replicas = replicas.build(); + this.activeShards = activeShards.build(); + this.assignedShards = assignedShards.build(); } /** @@ -137,6 +137,7 @@ public boolean primaryAllocatedPostApi() { /** * Returns the shards id + * * @return id of the shard */ public ShardId shardId() { @@ -145,6 +146,7 @@ public ShardId shardId() { /** * Returns the shards id + * * @return id of the shard */ public ShardId getShardId() { @@ -162,7 +164,7 @@ public UnmodifiableIterator iterator() { public int size() { return shards.size(); } - + /** * Returns the number of this shards instances. */ @@ -172,6 +174,7 @@ public int getSize() { /** * Returns a {@link ImmutableList} of shards + * * @return a {@link ImmutableList} of shards */ public ImmutableList shards() { @@ -180,6 +183,7 @@ public ImmutableList shards() { /** * Returns a {@link ImmutableList} of shards + * * @return a {@link ImmutableList} of shards */ public ImmutableList getShards() { @@ -188,6 +192,7 @@ public ImmutableList getShards() { /** * Returns a {@link ImmutableList} of active shards + * * @return a {@link ImmutableList} of shards */ public ImmutableList activeShards() { @@ -196,6 +201,7 @@ public ImmutableList activeShards() { /** * Returns a {@link ImmutableList} of active shards + * * @return a {@link ImmutableList} of shards */ public ImmutableList getActiveShards() { @@ -204,6 +210,7 @@ public ImmutableList getActiveShards() { /** * Returns a {@link ImmutableList} of assigned shards + * * @return a {@link ImmutableList} of shards */ public ImmutableList assignedShards() { @@ -212,6 +219,7 @@ public ImmutableList assignedShards() { /** * Returns a {@link ImmutableList} of assigned shards + * * @return a {@link ImmutableList} of shards */ public ImmutableList getAssignedShards() { @@ -220,6 +228,7 @@ public ImmutableList getAssignedShards() { /** * Returns the number of shards in a specific state + * * @param state state of the shards to count * @return number of shards in state */ diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 43818aa2fdc9e..61699b35e14ac 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -54,6 +54,8 @@ public class ThrottlingAllocationDecider extends AllocationDecider { public static final String CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = "cluster.routing.allocation.node_initial_primaries_recoveries"; public static final String CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = "cluster.routing.allocation.node_concurrent_recoveries"; + public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2; + public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4; private volatile int primariesInitialRecoveries; private volatile int concurrentRecoveries; @@ -62,8 +64,8 @@ public class ThrottlingAllocationDecider extends AllocationDecider { public ThrottlingAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { super(settings); - this.primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 4); - this.concurrentRecoveries = settings.getAsInt("cluster.routing.allocation.concurrent_recoveries", settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, 2)); + this.primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES); + this.concurrentRecoveries = settings.getAsInt("cluster.routing.allocation.concurrent_recoveries", settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES)); logger.debug("using node_concurrent_recoveries [{}], node_initial_primaries_recoveries [{}]", concurrentRecoveries, primariesInitialRecoveries); nodeSettingsService.addListener(new ApplySettings()); diff --git a/src/main/java/org/elasticsearch/cluster/routing/operation/plain/PlainOperationRouting.java b/src/main/java/org/elasticsearch/cluster/routing/operation/plain/PlainOperationRouting.java index 9bf5b7114d9b7..4e90b95f082f4 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/operation/plain/PlainOperationRouting.java +++ b/src/main/java/org/elasticsearch/cluster/routing/operation/plain/PlainOperationRouting.java @@ -42,6 +42,7 @@ import org.elasticsearch.indices.IndexMissingException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -143,19 +144,20 @@ public int searchShardsCount(ClusterState clusterState, String[] indices, String return count; } } + + private static final Map> EMPTY_ROUTING = Collections.emptyMap(); @Override public GroupShardsIterator searchShards(ClusterState clusterState, String[] indices, String[] concreteIndices, @Nullable Map> routing, @Nullable String preference) throws IndexMissingException { if (concreteIndices == null || concreteIndices.length == 0) { concreteIndices = clusterState.metaData().concreteAllOpenIndices(); } - - if (routing != null) { + routing = routing == null ? EMPTY_ROUTING : routing; // just use an empty map + final Set set = new HashSet(); // we use set here and not list since we might get duplicates - HashSet set = new HashSet(); for (String index : concreteIndices) { - IndexRoutingTable indexRouting = indexRoutingTable(clusterState, index); - Set effectiveRouting = routing.get(index); + final IndexRoutingTable indexRouting = indexRoutingTable(clusterState, index); + final Set effectiveRouting = routing.get(index); if (effectiveRouting != null) { for (String r : effectiveRouting) { int shardId = shardId(clusterState, index, null, null, r); @@ -169,23 +171,16 @@ public GroupShardsIterator searchShards(ClusterState clusterState, String[] indi set.add(iterator); } } - } - } - return new GroupShardsIterator(set); - } else { - // we use list here since we know we are not going to create duplicates - ArrayList set = new ArrayList(); - for (String index : concreteIndices) { - IndexRoutingTable indexRouting = indexRoutingTable(clusterState, index); - for (IndexShardRoutingTable indexShard : indexRouting) { - ShardIterator iterator = preferenceActiveShardIterator(indexShard, clusterState.nodes().localNodeId(), clusterState.nodes(), preference); - if (iterator != null) { - set.add(iterator); + } else { + for (IndexShardRoutingTable indexShard : indexRouting) { + ShardIterator iterator = preferenceActiveShardIterator(indexShard, clusterState.nodes().localNodeId(), clusterState.nodes(), preference); + if (iterator != null) { + set.add(iterator); + } } } } - return new GroupShardsIterator(set); - } + return new GroupShardsIterator(set); } private ShardIterator preferenceActiveShardIterator(IndexShardRoutingTable indexShard, String localNodeId, DiscoveryNodes nodes, @Nullable String preference) { diff --git a/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java b/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java index ee752a9d95581..014c8c0c01d05 100644 --- a/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java +++ b/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java @@ -71,10 +71,15 @@ public ClusterDynamicSettingsModule() { ); } - public void addDynamicSetting(String... settings) { + public void addDynamicSettings(String... settings) { clusterDynamicSettings.addDynamicSettings(settings); } + public void addDynamicSetting(String setting, Validator validator) { + clusterDynamicSettings.addDynamicSetting(setting, validator); + } + + @Override protected void configure() { bind(DynamicSettings.class).annotatedWith(ClusterDynamicSettings.class).toInstance(clusterDynamicSettings); diff --git a/src/main/java/org/elasticsearch/cluster/settings/DynamicSettings.java b/src/main/java/org/elasticsearch/cluster/settings/DynamicSettings.java index 61c3e1f90376f..460b74c0126d6 100644 --- a/src/main/java/org/elasticsearch/cluster/settings/DynamicSettings.java +++ b/src/main/java/org/elasticsearch/cluster/settings/DynamicSettings.java @@ -19,20 +19,20 @@ package org.elasticsearch.cluster.settings; -import com.google.common.collect.ImmutableSet; +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.regex.Regex; -import java.util.Arrays; -import java.util.HashSet; +import java.util.Map; /** */ public class DynamicSettings { - private ImmutableSet dynamicSettings = ImmutableSet.of(); + private ImmutableMap dynamicSettings = ImmutableMap.of(); public boolean hasDynamicSetting(String key) { - for (String dynamicSetting : dynamicSettings) { + for (String dynamicSetting : dynamicSettings.keySet()) { if (Regex.simpleMatch(dynamicSetting, key)) { return true; } @@ -40,10 +40,32 @@ public boolean hasDynamicSetting(String key) { return false; } + public String validateDynamicSetting(String dynamicSetting, String value) { + for (Map.Entry setting : dynamicSettings.entrySet()) { + if (Regex.simpleMatch(dynamicSetting, setting.getKey())) { + return setting.getValue().validate(dynamicSetting, value); + } + } + return null; + } + + public synchronized void addDynamicSetting(String setting, Validator validator) { + MapBuilder updatedSettings = MapBuilder.newMapBuilder(dynamicSettings); + updatedSettings.put(setting, validator); + dynamicSettings = updatedSettings.immutableMap(); + } + + public synchronized void addDynamicSetting(String setting) { + addDynamicSetting(setting, Validator.EMPTY); + } + + public synchronized void addDynamicSettings(String... settings) { - HashSet updatedSettings = new HashSet(dynamicSettings); - updatedSettings.addAll(Arrays.asList(settings)); - dynamicSettings = ImmutableSet.copyOf(updatedSettings); + MapBuilder updatedSettings = MapBuilder.newMapBuilder(dynamicSettings); + for (String setting : settings) { + updatedSettings.put(setting, Validator.EMPTY); + } + dynamicSettings = updatedSettings.immutableMap(); } } diff --git a/src/main/java/org/elasticsearch/cluster/settings/Validator.java b/src/main/java/org/elasticsearch/cluster/settings/Validator.java new file mode 100644 index 0000000000000..cf56239f412be --- /dev/null +++ b/src/main/java/org/elasticsearch/cluster/settings/Validator.java @@ -0,0 +1,52 @@ +/* + * Licensed to ElasticSearch and Shay Banon under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.settings; + +import org.elasticsearch.ElasticSearchParseException; +import org.elasticsearch.common.unit.TimeValue; + +/** + * Validates a setting, returning a failure message if applicable. + */ +public interface Validator { + + String validate(String setting, String value); + + public static final Validator EMPTY = new Validator() { + @Override + public String validate(String setting, String value) { + return null; + } + }; + + public static final Validator TIME = new Validator() { + @Override + public String validate(String setting, String value) { + try { + if (TimeValue.parseTimeValue(value, null) == null) { + return "cannot parse value [" + value + "] as time"; + } + } catch (ElasticSearchParseException ex) { + return "cannot parse value [" + value + "] as time"; + } + return null; + } + }; +} diff --git a/src/main/java/org/elasticsearch/common/Priority.java b/src/main/java/org/elasticsearch/common/Priority.java index 033243c8da83f..48178ace514bd 100644 --- a/src/main/java/org/elasticsearch/common/Priority.java +++ b/src/main/java/org/elasticsearch/common/Priority.java @@ -19,11 +19,30 @@ package org.elasticsearch.common; +import org.elasticsearch.ElasticSearchIllegalArgumentException; + /** * */ public final class Priority implements Comparable { + public static Priority fromByte(byte b) { + switch (b) { + case 0: + return URGENT; + case 1: + return HIGH; + case 2: + return NORMAL; + case 3: + return LOW; + case 4: + return LANGUID; + default: + throw new ElasticSearchIllegalArgumentException("can't find priority for [" + b + "]"); + } + } + public static Priority URGENT = new Priority((byte) 0); public static Priority HIGH = new Priority((byte) 1); public static Priority NORMAL = new Priority((byte) 2); @@ -36,6 +55,10 @@ private Priority(byte value) { this.value = value; } + public byte value() { + return this.value; + } + public int compareTo(Priority p) { return this.value - p.value; } diff --git a/src/main/java/org/elasticsearch/common/geo/GeoJSONShapeParser.java b/src/main/java/org/elasticsearch/common/geo/GeoJSONShapeParser.java index c6863037dba2e..d457c8216256b 100644 --- a/src/main/java/org/elasticsearch/common/geo/GeoJSONShapeParser.java +++ b/src/main/java/org/elasticsearch/common/geo/GeoJSONShapeParser.java @@ -80,14 +80,17 @@ public static Shape parse(XContentParser parser) throws IOException { String fieldName = parser.currentName(); if ("type".equals(fieldName)) { - token = parser.nextToken(); + parser.nextToken(); shapeType = parser.text().toLowerCase(Locale.ENGLISH); if (shapeType == null) { throw new ElasticSearchParseException("Unknown Shape type [" + parser.text() + "]"); } } else if ("coordinates".equals(fieldName)) { - token = parser.nextToken(); + parser.nextToken(); node = parseCoordinates(parser); + } else { + parser.nextToken(); + parser.skipChildren(); } } } diff --git a/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java b/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java new file mode 100644 index 0000000000000..09b0fc79e573a --- /dev/null +++ b/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java @@ -0,0 +1,34 @@ +package org.elasticsearch.common.geo; + +import org.apache.lucene.spatial.prefix.PrefixTreeStrategy; +import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; +import org.apache.lucene.spatial.prefix.TermQueryPrefixTreeStrategy; +import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; + +import java.lang.String; + +/** + * + */ +public enum SpatialStrategy { + + TERM("term"), + RECURSIVE("recursive"); + + private final String strategyName; + + private SpatialStrategy(String strategyName) { + this.strategyName = strategyName; + } + + public String getStrategyName() { + return strategyName; + } + + public PrefixTreeStrategy create(SpatialPrefixTree grid, String fieldName) { + if (this == TERM) { + return new TermQueryPrefixTreeStrategy(grid, fieldName); + } + return new RecursivePrefixTreeStrategy(grid, fieldName); + } +} diff --git a/src/main/java/org/elasticsearch/common/inject/Key.java b/src/main/java/org/elasticsearch/common/inject/Key.java index e8ee244fbc43e..f92ff7ec991dd 100644 --- a/src/main/java/org/elasticsearch/common/inject/Key.java +++ b/src/main/java/org/elasticsearch/common/inject/Key.java @@ -506,6 +506,6 @@ static boolean isBindingAnnotation(Annotation annotation) { static boolean isBindingAnnotation( Class annotationType) { - return annotationType.isAnnotationPresent(BindingAnnotation.class); + return annotationType.getAnnotation(BindingAnnotation.class) != null; } } diff --git a/src/main/java/org/elasticsearch/common/inject/internal/Annotations.java b/src/main/java/org/elasticsearch/common/inject/internal/Annotations.java index 0910c827d345d..91a889e85aa2a 100644 --- a/src/main/java/org/elasticsearch/common/inject/internal/Annotations.java +++ b/src/main/java/org/elasticsearch/common/inject/internal/Annotations.java @@ -56,7 +56,7 @@ public static Class findScopeAnnotation(Errors errors, Ann Class found = null; for (Annotation annotation : annotations) { - if (annotation.annotationType().isAnnotationPresent(ScopeAnnotation.class)) { + if (annotation.annotationType().getAnnotation(ScopeAnnotation.class) != null) { if (found != null) { errors.duplicateScopeAnnotations(found, annotation.annotationType()); } else { @@ -69,7 +69,7 @@ public static Class findScopeAnnotation(Errors errors, Ann } public static boolean isScopeAnnotation(Class annotationType) { - return annotationType.isAnnotationPresent(ScopeAnnotation.class); + return annotationType.getAnnotation(ScopeAnnotation.class) != null; } /** @@ -107,7 +107,7 @@ public static Annotation findBindingAnnotation( Annotation found = null; for (Annotation annotation : annotations) { - if (annotation.annotationType().isAnnotationPresent(BindingAnnotation.class)) { + if (annotation.annotationType().getAnnotation(BindingAnnotation.class) != null) { if (found != null) { errors.duplicateBindingAnnotations(member, found.annotationType(), annotation.annotationType()); diff --git a/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java b/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java index 603a42aa2dacf..590ccd9d68b38 100644 --- a/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java +++ b/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethod.java @@ -53,7 +53,7 @@ public class ProviderMethod implements ProviderWithDependencies { this.dependencies = dependencies; this.method = method; this.parameterProviders = parameterProviders; - this.exposed = method.isAnnotationPresent(Exposed.class); + this.exposed = method.getAnnotation(Exposed.class) != null; method.setAccessible(true); } diff --git a/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java b/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java index 9192e577a7297..6c29361d90f4c 100644 --- a/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java +++ b/src/main/java/org/elasticsearch/common/inject/internal/ProviderMethodsModule.java @@ -76,7 +76,7 @@ public List> getProviderMethods(Binder binder) { List> result = Lists.newArrayList(); for (Class c = delegate.getClass(); c != Object.class; c = c.getSuperclass()) { for (Method method : c.getDeclaredMethods()) { - if (method.isAnnotationPresent(Provides.class)) { + if (method.getAnnotation(Provides.class) != null) { result.add(createProviderMethod(binder, method)); } } diff --git a/src/main/java/org/elasticsearch/common/inject/spi/DefaultBindingTargetVisitor.java b/src/main/java/org/elasticsearch/common/inject/spi/DefaultBindingTargetVisitor.java index c7f69ae9d0302..d730357b07632 100644 --- a/src/main/java/org/elasticsearch/common/inject/spi/DefaultBindingTargetVisitor.java +++ b/src/main/java/org/elasticsearch/common/inject/spi/DefaultBindingTargetVisitor.java @@ -71,6 +71,6 @@ public V visit(ConvertedConstantBinding convertedConstantBinding) { // javac says it's an error to cast ProviderBinding to Binding @SuppressWarnings("unchecked") public V visit(ProviderBinding providerBinding) { - return visitOther((Binding) providerBinding); + return visitOther((Binding) providerBinding); } } diff --git a/src/main/java/org/elasticsearch/common/joda/DateMathParser.java b/src/main/java/org/elasticsearch/common/joda/DateMathParser.java index 7366cb0d58b36..17bf4d2d852b8 100644 --- a/src/main/java/org/elasticsearch/common/joda/DateMathParser.java +++ b/src/main/java/org/elasticsearch/common/joda/DateMathParser.java @@ -198,7 +198,11 @@ private long parseStringValue(String value) { private long parseUpperInclusiveStringValue(String value) { try { - MutableDateTime dateTime = new MutableDateTime(3000, 12, 31, 23, 59, 59, 999, DateTimeZone.UTC); + // we create a date time for inclusive upper range, we "include" by default the day level data + // so something like 2011-01-01 will include the full first day of 2011. + // we also use 1970-01-01 as the base for it so we can handle searches like 10:12:55 (just time) + // since when we index those, the base is 1970-01-01 + MutableDateTime dateTime = new MutableDateTime(1970, 1, 1, 23, 59, 59, 999, DateTimeZone.UTC); int location = dateTimeFormatter.parser().parseInto(dateTime, value, 0); // if we parsed all the string value, we are good if (location == value.length()) { diff --git a/src/main/java/org/elasticsearch/common/lucene/BytesRefs.java b/src/main/java/org/elasticsearch/common/lucene/BytesRefs.java index 3d57f85dfe015..37473eca1cd01 100644 --- a/src/main/java/org/elasticsearch/common/lucene/BytesRefs.java +++ b/src/main/java/org/elasticsearch/common/lucene/BytesRefs.java @@ -25,6 +25,20 @@ */ public class BytesRefs { + /** + * Converts a value to a string, taking special care if its a {@link BytesRef} to call + * {@link org.apache.lucene.util.BytesRef#utf8ToString()}. + */ + public static String toString(Object value) { + if (value == null) { + return null; + } + if (value instanceof BytesRef) { + return ((BytesRef) value).utf8ToString(); + } + return value.toString(); + } + /** * Converts an object value to BytesRef. */ @@ -37,7 +51,7 @@ public static BytesRef toBytesRef(Object value) { } return new BytesRef(value.toString()); } - + public static BytesRef toBytesRef(Object value, BytesRef spare) { if (value == null) { return null; diff --git a/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 3892d02f8a2a3..fbca3cbb56295 100644 --- a/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -42,7 +42,7 @@ */ public class Lucene { - public static final Version VERSION = Version.LUCENE_41; + public static final Version VERSION = Version.LUCENE_42; public static final Version ANALYZER_VERSION = VERSION; public static final Version QUERYPARSER_VERSION = VERSION; @@ -57,6 +57,9 @@ public static Version parseVersion(@Nullable String version, Version defaultVers if (version == null) { return defaultVersion; } + if ("4.2".equals(version)) { + return Version.LUCENE_42; + } if ("4.1".equals(version)) { return Version.LUCENE_41; } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/XFilteredQuery.java b/src/main/java/org/elasticsearch/common/lucene/search/XFilteredQuery.java index 9f0eb6a86e3a0..92ecab205fae6 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/XFilteredQuery.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/XFilteredQuery.java @@ -160,7 +160,7 @@ public String toString(String s) { */ @Override public boolean equals(Object o) { - return delegate.equals(o); + return delegate.equals(o); } /** @@ -178,12 +178,17 @@ public int hashCode() { public static final CustomRandomAccessFilterStrategy CUSTOM_FILTER_STRATEGY = new CustomRandomAccessFilterStrategy(); /** - * A {@link FilterStrategy} that conditionally uses a random access filter if - * the given {@link DocIdSet} supports random access (returns a non-null value - * from {@link DocIdSet#bits()}) and - * {@link RandomAccessFilterStrategy#useRandomAccess(Bits, int)} returns - * true. Otherwise this strategy falls back to a "zig-zag join" ( - * {@link XFilteredQuery#LEAP_FROG_FILTER_FIRST_STRATEGY}) strategy . + * Extends {@link org.apache.lucene.search.FilteredQuery.RandomAccessFilterStrategy}. + *

+ * Adds a threshold value, which defaults to -1. When set to -1, it will check if the filter docSet is + * *not* a fast docSet, and if not, it will use {@link FilteredQuery#QUERY_FIRST_FILTER_STRATEGY} (since + * the assumption is that its a "slow" filter and better computed only on whatever matched the query). + *

+ * If the threshold value is 0, it always tries to pass "down" the filter as acceptDocs, and it the filter + * can't be represented as Bits (never really), then it uses {@link FilteredQuery#LEAP_FROG_QUERY_FIRST_STRATEGY}. + *

+ * If the above conditions are not met, then it reverts to the {@link FilteredQuery.RandomAccessFilterStrategy} logic, + * with the threshold used to control {@link #useRandomAccess(org.apache.lucene.util.Bits, int)}. */ public static class CustomRandomAccessFilterStrategy extends FilteredQuery.RandomAccessFilterStrategy { @@ -217,9 +222,19 @@ public Scorer filteredScorer(AtomicReaderContext context, boolean scoreDocsInOrd } } - return super.filteredScorer(context, scoreDocsInOrder, topScorer, weight, docIdSet); + return super.filteredScorer(context, scoreDocsInOrder, topScorer, weight, docIdSet); } + /** + * Expert: decides if a filter should be executed as "random-access" or not. + * random-access means the filter "filters" in a similar way as deleted docs are filtered + * in Lucene. This is faster when the filter accepts many documents. + * However, when the filter is very sparse, it can be faster to execute the query+filter + * as a conjunction in some cases. + *

+ * The default implementation returns true if the first document accepted by the + * filter is < threshold, if threshold is -1 (the default), then it checks for < 100. + */ protected boolean useRandomAccess(Bits bits, int firstFilterDoc) { // "default" if (threshold == -1) { @@ -229,5 +244,5 @@ protected boolean useRandomAccess(Bits bits, int firstFilterDoc) { return firstFilterDoc < threshold; } } - + } diff --git a/src/main/java/org/elasticsearch/common/lucene/spatial/XTermQueryPrefixTreeStategy.java b/src/main/java/org/elasticsearch/common/lucene/spatial/XTermQueryPrefixTreeStategy.java deleted file mode 100644 index 66fb58dfcabf0..0000000000000 --- a/src/main/java/org/elasticsearch/common/lucene/spatial/XTermQueryPrefixTreeStategy.java +++ /dev/null @@ -1,169 +0,0 @@ -package org.elasticsearch.common.lucene.spatial; - -import java.util.List; - -import org.apache.lucene.queries.TermsFilter; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.Filter; -import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.spatial.SpatialStrategy; -import org.apache.lucene.spatial.prefix.PrefixTreeStrategy; -import org.apache.lucene.spatial.prefix.tree.Node; -import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; -import org.apache.lucene.spatial.query.SpatialArgs; -import org.apache.lucene.spatial.query.SpatialOperation; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.geo.GeoShapeConstants; -import org.elasticsearch.common.geo.ShapeBuilder; -import org.elasticsearch.common.lucene.search.TermFilter; -import org.elasticsearch.common.lucene.search.XBooleanFilter; -import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.FieldMapper.Names; - -import com.spatial4j.core.context.SpatialContext; -import com.spatial4j.core.shape.Shape; -import com.spatial4j.core.shape.jts.JtsGeometry; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.operation.buffer.BufferOp; -import com.vividsolutions.jts.operation.buffer.BufferParameters; - -/** - * Implementation of {@link SpatialStrategy} that uses TermQuerys / TermFilters - * to query and filter for Shapes related to other Shapes. - */ -public final class XTermQueryPrefixTreeStategy extends PrefixTreeStrategy { - - private static final double WITHIN_BUFFER_DISTANCE = 0.5; - private static final BufferParameters BUFFER_PARAMETERS = new BufferParameters(3, BufferParameters.CAP_SQUARE); - private final Names fieldName; - - /** - * Creates a new XTermQueryPrefixTreeStategy - * - * @param prefixTree SpatialPrefixTree that will be used to represent Shapes - * @param fieldName Name of the field the Strategy applies to - */ - public XTermQueryPrefixTreeStategy(SpatialPrefixTree prefixTree, FieldMapper.Names fieldName) { - super(prefixTree, fieldName.indexName()); - this.fieldName = fieldName; - } - - private static double resolveDistErr(Shape shape, SpatialContext ctx, double distErrPct) { - return SpatialArgs.calcDistanceFromErrPct(shape, distErrPct, ctx); - } - - public Filter createIntersectsFilter(Shape shape) { - int detailLevel = getGrid().getLevelForDistance(resolveDistErr(shape, ctx, getDistErrPct())); - List nodes = getGrid().getNodes(shape, detailLevel, false); - - BytesRef[] nodeTerms = new BytesRef[nodes.size()]; - for (int i = 0; i < nodes.size(); i++) { - nodeTerms[i] = new BytesRef(nodes.get(i).getTokenString()); - } - return new TermsFilter(fieldName.indexName(), nodeTerms); - } - - public Query createIntersectsQuery(Shape shape) { - int detailLevel = getGrid().getLevelForDistance(resolveDistErr(shape, ctx, getDistErrPct())); - List nodes = getGrid().getNodes(shape, detailLevel, false); - - BooleanQuery query = new BooleanQuery(); - for (Node node : nodes) { - query.add(new TermQuery(fieldName.createIndexNameTerm(node.getTokenString())), - BooleanClause.Occur.SHOULD); - } - - return new ConstantScoreQuery(query); - } - - public Filter createDisjointFilter(Shape shape) { - int detailLevel = getGrid().getLevelForDistance(resolveDistErr(shape, ctx, getDistErrPct())); - List nodes = getGrid().getNodes(shape, detailLevel, false); - - XBooleanFilter filter = new XBooleanFilter(); - for (Node node : nodes) { - filter.add(new TermFilter(fieldName.createIndexNameTerm(node.getTokenString())), BooleanClause.Occur.MUST_NOT); - } - - return filter; - } - - public Query createDisjointQuery(Shape shape) { - int detailLevel = getGrid().getLevelForDistance(resolveDistErr(shape, ctx, getDistErrPct())); - List nodes = getGrid().getNodes(shape, detailLevel, false); - - BooleanQuery query = new BooleanQuery(); - query.add(new MatchAllDocsQuery(), BooleanClause.Occur.SHOULD); - for (Node node : nodes) { - query.add(new TermQuery(fieldName.createIndexNameTerm(node.getTokenString())), - BooleanClause.Occur.MUST_NOT); - } - - return new ConstantScoreQuery(query); - } - - public Filter createWithinFilter(Shape shape) { - Filter intersectsFilter = createIntersectsFilter(shape); - - Geometry shapeGeometry = ShapeBuilder.toJTSGeometry(shape); - Geometry buffer = BufferOp.bufferOp(shapeGeometry, WITHIN_BUFFER_DISTANCE, BUFFER_PARAMETERS); - Shape bufferedShape = new JtsGeometry(buffer.difference(shapeGeometry), GeoShapeConstants.SPATIAL_CONTEXT, true); - Filter bufferedFilter = createIntersectsFilter(bufferedShape); - - XBooleanFilter filter = new XBooleanFilter(); - filter.add(intersectsFilter, BooleanClause.Occur.SHOULD); - filter.add(bufferedFilter, BooleanClause.Occur.MUST_NOT); - - return filter; - } - - public Query createWithinQuery(Shape shape) { - Query intersectsQuery = createIntersectsQuery(shape); - - Geometry shapeGeometry = ShapeBuilder.toJTSGeometry(shape); - Geometry buffer = BufferOp.bufferOp(shapeGeometry, WITHIN_BUFFER_DISTANCE, BUFFER_PARAMETERS); - Shape bufferedShape = new JtsGeometry(buffer.difference(shapeGeometry), GeoShapeConstants.SPATIAL_CONTEXT, true); - Query bufferedQuery = createIntersectsQuery(bufferedShape); - - BooleanQuery query = new BooleanQuery(); - query.add(intersectsQuery, BooleanClause.Occur.SHOULD); - query.add(bufferedQuery, BooleanClause.Occur.MUST_NOT); - - return new ConstantScoreQuery(query); - } - - @Override - public Filter makeFilter(SpatialArgs args) { - if (args.getOperation() == SpatialOperation.Intersects) { - return createIntersectsFilter(args.getShape()); - - } else if (args.getOperation() == SpatialOperation.IsWithin) { - return createWithinFilter(args.getShape()); - - } else if (args.getOperation() == SpatialOperation.IsDisjointTo) { - return createDisjointFilter(args.getShape()); - - } - throw new UnsupportedOperationException("Shape Relation [" + args.getOperation().getName() + "] not currently supported"); - } - - public Query makeQuery(SpatialArgs args) { - if (args.getOperation() == SpatialOperation.Intersects) { - return createIntersectsQuery(args.getShape()); - - } else if (args.getOperation() == SpatialOperation.IsWithin) { - return createWithinQuery(args.getShape()); - - } else if (args.getOperation() == SpatialOperation.IsDisjointTo) { - return createDisjointQuery(args.getShape()); - - } - throw new UnsupportedOperationException("Shape Relation [" + args.getOperation().getName() + "] not currently supported"); - } - -} - diff --git a/src/main/java/org/elasticsearch/common/lucene/store/BufferedChecksumIndexOutput.java b/src/main/java/org/elasticsearch/common/lucene/store/BufferedChecksumIndexOutput.java index 5ec266345a1e8..533e566435693 100644 --- a/src/main/java/org/elasticsearch/common/lucene/store/BufferedChecksumIndexOutput.java +++ b/src/main/java/org/elasticsearch/common/lucene/store/BufferedChecksumIndexOutput.java @@ -19,15 +19,15 @@ package org.elasticsearch.common.lucene.store; +import org.apache.lucene.store.BufferedIndexOutput; import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.store.OpenBufferedIndexOutput; import java.io.IOException; import java.util.zip.Checksum; /** */ -public class BufferedChecksumIndexOutput extends OpenBufferedIndexOutput { +public class BufferedChecksumIndexOutput extends BufferedIndexOutput { private final IndexOutput out; @@ -36,7 +36,7 @@ public class BufferedChecksumIndexOutput extends OpenBufferedIndexOutput { public BufferedChecksumIndexOutput(IndexOutput out, Checksum digest) { // we add 8 to be bigger than the default BufferIndexOutput buffer size so any flush will go directly // to the output without being copied over to the delegate buffer - super(OpenBufferedIndexOutput.DEFAULT_BUFFER_SIZE + 64); + super(BufferedIndexOutput.DEFAULT_BUFFER_SIZE + 64); this.out = out; this.digest = digest; } diff --git a/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java b/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java index afc14f31098fd..72a16a6f01951 100644 --- a/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java +++ b/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java @@ -54,11 +54,12 @@ */ public class ImmutableSettings implements Settings { - private ImmutableMap settings; + public static final Settings EMPTY = new Builder().build(); + private ImmutableMap settings; private transient ClassLoader classLoader; - private ImmutableSettings(Map settings, ClassLoader classLoader) { + ImmutableSettings(Map settings, ClassLoader classLoader) { this.settings = ImmutableMap.copyOf(settings); this.classLoader = classLoader; } @@ -124,9 +125,30 @@ public String get(String setting) { return settings.get(toCamelCase(setting)); } + @Override + public String get(String[] settings) { + for (String setting : settings) { + String retVal = this.settings.get(setting); + if (retVal != null) { + return retVal; + } + retVal = this.settings.get(toCamelCase(setting)); + if (retVal != null) { + return retVal; + } + } + return null; + } + @Override public String get(String setting, String defaultValue) { - String retVal = settings.get(setting); + String retVal = get(setting); + return retVal == null ? defaultValue : retVal; + } + + @Override + public String get(String[] settings, String defaultValue) { + String retVal = get(settings); return retVal == null ? defaultValue : retVal; } @@ -143,6 +165,19 @@ public Float getAsFloat(String setting, Float defaultValue) { } } + @Override + public Float getAsFloat(String[] settings, Float defaultValue) throws SettingsException { + String sValue = get(settings); + if (sValue == null) { + return defaultValue; + } + try { + return Float.parseFloat(sValue); + } catch (NumberFormatException e) { + throw new SettingsException("Failed to parse float setting [" + Arrays.toString(settings) + "] with value [" + sValue + "]", e); + } + } + @Override public Double getAsDouble(String setting, Double defaultValue) { String sValue = get(setting); @@ -156,6 +191,20 @@ public Double getAsDouble(String setting, Double defaultValue) { } } + @Override + public Double getAsDouble(String[] settings, Double defaultValue) { + String sValue = get(settings); + if (sValue == null) { + return defaultValue; + } + try { + return Double.parseDouble(sValue); + } catch (NumberFormatException e) { + throw new SettingsException("Failed to parse double setting [" + Arrays.toString(settings) + "] with value [" + sValue + "]", e); + } + } + + @Override public Integer getAsInt(String setting, Integer defaultValue) { String sValue = get(setting); @@ -169,6 +218,19 @@ public Integer getAsInt(String setting, Integer defaultValue) { } } + @Override + public Integer getAsInt(String[] settings, Integer defaultValue) { + String sValue = get(settings); + if (sValue == null) { + return defaultValue; + } + try { + return Integer.parseInt(sValue); + } catch (NumberFormatException e) { + throw new SettingsException("Failed to parse int setting [" + Arrays.toString(settings) + "] with value [" + sValue + "]", e); + } + } + @Override public Long getAsLong(String setting, Long defaultValue) { String sValue = get(setting); @@ -182,26 +244,59 @@ public Long getAsLong(String setting, Long defaultValue) { } } + @Override + public Long getAsLong(String[] settings, Long defaultValue) { + String sValue = get(settings); + if (sValue == null) { + return defaultValue; + } + try { + return Long.parseLong(sValue); + } catch (NumberFormatException e) { + throw new SettingsException("Failed to parse long setting [" + Arrays.toString(settings) + "] with value [" + sValue + "]", e); + } + } + @Override public Boolean getAsBoolean(String setting, Boolean defaultValue) { return Booleans.parseBoolean(get(setting), defaultValue); } + @Override + public Boolean getAsBoolean(String[] settings, Boolean defaultValue) { + return Booleans.parseBoolean(get(settings), defaultValue); + } + @Override public TimeValue getAsTime(String setting, TimeValue defaultValue) { return parseTimeValue(get(setting), defaultValue); } + @Override + public TimeValue getAsTime(String[] settings, TimeValue defaultValue) { + return parseTimeValue(get(settings), defaultValue); + } + @Override public ByteSizeValue getAsBytesSize(String setting, ByteSizeValue defaultValue) throws SettingsException { return parseBytesSizeValue(get(setting), defaultValue); } + @Override + public ByteSizeValue getAsBytesSize(String[] settings, ByteSizeValue defaultValue) throws SettingsException { + return parseBytesSizeValue(get(settings), defaultValue); + } + @Override public SizeValue getAsSize(String setting, SizeValue defaultValue) throws SettingsException { return parseSizeValue(get(setting), defaultValue); } + @Override + public SizeValue getAsSize(String[] settings, SizeValue defaultValue) throws SettingsException { + return parseSizeValue(get(settings), defaultValue); + } + @SuppressWarnings({"unchecked"}) @Override public Class getAsClass(String setting, Class defaultClazz) throws NoClassSettingsException { @@ -241,7 +336,7 @@ public Class getAsClass(String setting, Class defa try { return (Class) getClassLoader().loadClass(fullClassName); } catch (ClassNotFoundException e2) { - throw new NoClassSettingsException("Failed to load class setting [" + setting + "] with value [" + get(setting) + "]", e); + throw new NoClassSettingsException("Failed to load class setting [" + setting + "] with value [" + get(setting) + "]", e2); } } } diff --git a/src/main/java/org/elasticsearch/common/settings/Settings.java b/src/main/java/org/elasticsearch/common/settings/Settings.java index c5068e0f00ab8..778ce1402b1b7 100644 --- a/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -81,16 +81,23 @@ public interface Settings { */ String get(String setting); + /** + * Returns the setting value associated with the first setting key. + */ + String get(String[] settings); + /** * Returns the setting value associated with the setting key. If it does not exists, * returns the default value provided. - * - * @param setting The setting key - * @param defaultValue The value to return if no value is associated with the setting - * @return The setting value, or the default value if no value exists */ String get(String setting, String defaultValue); + /** + * Returns the setting value associated with the first setting key, if none exists, + * returns the default value provided. + */ + String get(String[] settings, String defaultValue); + /** * Returns group settings for the given setting prefix. */ @@ -99,94 +106,99 @@ public interface Settings { /** * Returns the setting value (as float) associated with the setting key. If it does not exists, * returns the default value provided. - * - * @param setting The setting key - * @param defaultValue The value to return if no value is associated with the setting - * @return The (float) value, or the default value if no value exists. - * @throws SettingsException Failure to parse the setting */ Float getAsFloat(String setting, Float defaultValue) throws SettingsException; + /** + * Returns the setting value (as float) associated with teh first setting key, if none + * exists, returns the default value provided. + */ + Float getAsFloat(String[] settings, Float defaultValue) throws SettingsException; + /** * Returns the setting value (as double) associated with the setting key. If it does not exists, * returns the default value provided. - * - * @param setting The setting key - * @param defaultValue The value to return if no value is associated with the setting - * @return The (double) value, or the default value if no value exists. - * @throws SettingsException Failure to parse the setting */ Double getAsDouble(String setting, Double defaultValue) throws SettingsException; + /** + * Returns the setting value (as double) associated with teh first setting key, if none + * exists, returns the default value provided. + */ + Double getAsDouble(String[] settings, Double defaultValue) throws SettingsException; + /** * Returns the setting value (as int) associated with the setting key. If it does not exists, * returns the default value provided. - * - * @param setting The setting key - * @param defaultValue The value to return if no value is associated with the setting - * @return The (int) value, or the default value if no value exists. - * @throws SettingsException Failure to parse the setting */ Integer getAsInt(String setting, Integer defaultValue) throws SettingsException; + /** + * Returns the setting value (as int) associated with the first setting key. If it does not exists, + * returns the default value provided. + */ + Integer getAsInt(String[] settings, Integer defaultValue) throws SettingsException; + /** * Returns the setting value (as long) associated with the setting key. If it does not exists, * returns the default value provided. - * - * @param setting The setting key - * @param defaultValue The value to return if no value is associated with the setting - * @return The (long) value, or the default value if no value exists. - * @throws SettingsException Failure to parse the setting */ Long getAsLong(String setting, Long defaultValue) throws SettingsException; + /** + * Returns the setting value (as long) associated with the setting key. If it does not exists, + * returns the default value provided. + */ + Long getAsLong(String[] settings, Long defaultValue) throws SettingsException; + /** * Returns the setting value (as boolean) associated with the setting key. If it does not exists, * returns the default value provided. - * - * @param setting The setting key - * @param defaultValue The value to return if no value is associated with the setting - * @return The (boolean) value, or the default value if no value exists. - * @throws SettingsException Failure to parse the setting */ Boolean getAsBoolean(String setting, Boolean defaultValue) throws SettingsException; + /** + * Returns the setting value (as boolean) associated with the setting key. If it does not exists, + * returns the default value provided. + */ + Boolean getAsBoolean(String[] settings, Boolean defaultValue) throws SettingsException; + /** * Returns the setting value (as time) associated with the setting key. If it does not exists, * returns the default value provided. - * - * @param setting The setting key - * @param defaultValue The value to return if no value is associated with the setting - * @return The (time) value, or the default value if no value exists. - * @throws SettingsException Failure to parse the setting - * @see TimeValue#parseTimeValue(String, org.elasticsearch.common.unit.TimeValue) */ TimeValue getAsTime(String setting, TimeValue defaultValue) throws SettingsException; + /** + * Returns the setting value (as time) associated with the setting key. If it does not exists, + * returns the default value provided. + */ + TimeValue getAsTime(String[] settings, TimeValue defaultValue) throws SettingsException; + /** * Returns the setting value (as size) associated with the setting key. If it does not exists, * returns the default value provided. - * - * @param setting The setting key - * @param defaultValue The value to return if no value is associated with the setting - * @return The (size) value, or the default value if no value exists. - * @throws SettingsException Failure to parse the setting - * @see org.elasticsearch.common.unit.ByteSizeValue#parseBytesSizeValue(String, org.elasticsearch.common.unit.ByteSizeValue) */ ByteSizeValue getAsBytesSize(String setting, ByteSizeValue defaultValue) throws SettingsException; /** * Returns the setting value (as size) associated with the setting key. If it does not exists, * returns the default value provided. - * - * @param setting The setting key - * @param defaultValue The value to return if no value is associated with the setting - * @return The (size) value, or the default value if no value exists. - * @throws SettingsException Failure to parse the setting - * @see org.elasticsearch.common.unit.ByteSizeValue#parseBytesSizeValue(String, org.elasticsearch.common.unit.ByteSizeValue) + */ + ByteSizeValue getAsBytesSize(String[] settings, ByteSizeValue defaultValue) throws SettingsException; + + /** + * Returns the setting value (as size) associated with the setting key. If it does not exists, + * returns the default value provided. */ SizeValue getAsSize(String setting, SizeValue defaultValue) throws SettingsException; + /** + * Returns the setting value (as size) associated with the setting key. If it does not exists, + * returns the default value provided. + */ + SizeValue getAsSize(String[] settings, SizeValue defaultValue) throws SettingsException; + /** * Returns the setting value (as a class) associated with the setting key. If it does not exists, * returns the default class provided. diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java index 2c77cd5ccdd7e..5117a4c320d39 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java @@ -62,44 +62,29 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem private static final byte[] INTERNAL_HEADER = new byte[]{1, 9, 8, 4}; private final String address; - private final int port; - private final String group; - private final int bufferSize; - private final int ttl; private final ThreadPool threadPool; - private final TransportService transportService; - private final ClusterName clusterName; - private final NetworkService networkService; + private volatile DiscoveryNodesProvider nodesProvider; private final boolean pingEnabled; - - private volatile DiscoveryNodesProvider nodesProvider; - private volatile Receiver receiver; - private volatile Thread receiverThread; - - private MulticastSocket multicastSocket; - + private volatile MulticastSocket multicastSocket; private DatagramPacket datagramPacketSend; - private DatagramPacket datagramPacketReceive; private final AtomicInteger pingIdGenerator = new AtomicInteger(); - private final Map> receivedResponses = newConcurrentMap(); private final Object sendMutex = new Object(); - private final Object receiveMutex = new Object(); public MulticastZenPing(ThreadPool threadPool, TransportService transportService, ClusterName clusterName) { @@ -376,7 +361,23 @@ public void run() { continue; } catch (Exception e) { if (running) { - logger.warn("failed to receive packet", e); + if (multicastSocket.isClosed()) { + logger.warn("multicast socket closed while running, restarting..."); + // for some reason, the socket got closed on us while we are still running + // make a best effort in trying to start the multicast socket again... + threadPool.generic().execute(new Runnable() { + @Override + public void run() { + MulticastZenPing.this.stop(); + MulticastZenPing.this.start(); + } + }); + running = false; + return; + } else { + logger.warn("failed to receive packet, throttling...", e); + Thread.sleep(500); + } } continue; } @@ -422,7 +423,9 @@ public void run() { handleNodePingRequest(id, requestingNodeX, clusterName); } } catch (Exception e) { - logger.warn("unexpected exception in multicast receiver", e); + if (running) { + logger.warn("unexpected exception in multicast receiver", e); + } } } } diff --git a/src/main/java/org/elasticsearch/gateway/local/state/meta/LocalGatewayMetaState.java b/src/main/java/org/elasticsearch/gateway/local/state/meta/LocalGatewayMetaState.java index 08573905e80fb..6401b36231642 100644 --- a/src/main/java/org/elasticsearch/gateway/local/state/meta/LocalGatewayMetaState.java +++ b/src/main/java/org/elasticsearch/gateway/local/state/meta/LocalGatewayMetaState.java @@ -219,7 +219,7 @@ public void clusterChanged(ClusterChangedEvent event) { continue; } if (!newMetaData.hasIndex(current.index())) { - logger.debug("[{}] deleting index that is no longer part of the metadata"); + logger.debug("[{}] deleting index that is no longer part of the metadata (indices: [{}])", current.index(), newMetaData.indices().keySet()); FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(current.index()))); } } diff --git a/src/main/java/org/elasticsearch/http/HttpServer.java b/src/main/java/org/elasticsearch/http/HttpServer.java index dea57b11d5d00..a21b228861824 100644 --- a/src/main/java/org/elasticsearch/http/HttpServer.java +++ b/src/main/java/org/elasticsearch/http/HttpServer.java @@ -21,15 +21,12 @@ import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticSearchException; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.env.Environment; import org.elasticsearch.node.service.NodeService; -import org.elasticsearch.plugins.PluginsHelper; import org.elasticsearch.rest.*; import java.io.File; @@ -37,7 +34,6 @@ import java.util.HashMap; import java.util.Map; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.rest.RestStatus.*; /** @@ -148,39 +144,17 @@ void handlePluginSite(HttpRequest request, HttpChannel channel) { channel.sendResponse(new StringRestResponse(FORBIDDEN)); return; } + // TODO for a "/_plugin" endpoint, we should have a page that lists all the plugins? String path = request.rawPath().substring("/_plugin/".length()); int i1 = path.indexOf('/'); String pluginName; String sitePath; if (i1 == -1) { - // If user tries to reach "/_plugin/" endpoint, we display a page that lists all the plugins #2664 - if (!Strings.hasText(path)) { - try { - XContentBuilder json = jsonBuilder(); - if (request.hasParam("pretty")) { - json.prettyPrint(); - } - json.startObject().startArray("sites"); - - for(String plugin : PluginsHelper.sitePlugins(environment)) { - json.startObject() - .field("name", plugin) - .field("url", "/_plugin/" + plugin +"/") - .endObject(); - } - json.endArray().endObject(); - channel.sendResponse(new BytesRestResponse(json.bytes().toBytes(), - guessMimeType(guessMimeType("index.json")))); - } catch (IOException e) { - channel.sendResponse(new StringRestResponse(INTERNAL_SERVER_ERROR)); - } - - return; - } - + pluginName = path; + sitePath = null; // If a trailing / is missing, we redirect to the right page #2654 - channel.sendResponse(new HttpRedirectRestResponse(request.rawPath()+"/")); + channel.sendResponse(new HttpRedirectRestResponse(request.rawPath() + "/")); return; } else { pluginName = path.substring(0, i1); @@ -217,6 +191,7 @@ void handlePluginSite(HttpRequest request, HttpChannel channel) { } } + // TODO: Don't respond with a mime type that violates the request's Accept header private String guessMimeType(String path) { int lastDot = path.lastIndexOf('.'); diff --git a/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java b/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java index 35d1638f74ab1..013d3f19a5d9a 100644 --- a/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java +++ b/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java @@ -23,9 +23,6 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.jmx.ManagedGroupName; - -import static org.elasticsearch.index.IndexServiceManagement.buildIndexGroupName; /** * @@ -77,9 +74,4 @@ public Index index() { public String nodeName() { return indexSettings.get("name", ""); } - - @ManagedGroupName - private String managementGroupName() { - return buildIndexGroupName(index); - } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/CloseableIndexComponent.java b/src/main/java/org/elasticsearch/index/CloseableIndexComponent.java index 07daa651248cb..bae9a0b89b211 100644 --- a/src/main/java/org/elasticsearch/index/CloseableIndexComponent.java +++ b/src/main/java/org/elasticsearch/index/CloseableIndexComponent.java @@ -29,9 +29,6 @@ public interface CloseableIndexComponent { /** * Closes the index component. A boolean indicating if its part of an actual index * deletion or not is passed. - * - * @param delete true if the index is being deleted. - * @throws ElasticSearchException */ - void close(boolean delete) throws ElasticSearchException; + void close() throws ElasticSearchException; } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/IndexModule.java b/src/main/java/org/elasticsearch/index/IndexModule.java index 0052628c0350b..c664081b02f09 100644 --- a/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/src/main/java/org/elasticsearch/index/IndexModule.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.service.IndexService; import org.elasticsearch.index.service.InternalIndexService; -import org.elasticsearch.jmx.JmxService; /** * @@ -39,8 +38,5 @@ public IndexModule(Settings settings) { @Override protected void configure() { bind(IndexService.class).to(InternalIndexService.class).asEagerSingleton(); - if (JmxService.shouldExport(settings)) { - bind(IndexServiceManagement.class).asEagerSingleton(); - } } } diff --git a/src/main/java/org/elasticsearch/index/IndexServiceManagement.java b/src/main/java/org/elasticsearch/index/IndexServiceManagement.java deleted file mode 100644 index 32799fe5ac62a..0000000000000 --- a/src/main/java/org/elasticsearch/index/IndexServiceManagement.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to ElasticSearch and Shay Banon under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index; - -import org.elasticsearch.common.component.CloseableComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.service.IndexService; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.jmx.JmxService; -import org.elasticsearch.jmx.MBean; -import org.elasticsearch.jmx.ManagedAttribute; - -/** - * - */ -@MBean(objectName = "", description = "") -public class IndexServiceManagement extends AbstractIndexComponent implements CloseableComponent { - - public static String buildIndexGroupName(Index index) { - return "service=indices,index=" + index.name(); - } - - private final JmxService jmxService; - - private final IndexService indexService; - - @Inject - public IndexServiceManagement(Index index, @IndexSettings Settings indexSettings, JmxService jmxService, IndexService indexService) { - super(index, indexSettings); - this.jmxService = jmxService; - this.indexService = indexService; - } - - public void close() { - jmxService.unregisterGroup(buildIndexGroupName(indexService.index())); - } - - @ManagedAttribute(description = "Index Name") - public String getIndex() { - return indexService.index().name(); - } -} diff --git a/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java index 3a61bf2dc6c15..b72b6507a4803 100644 --- a/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java @@ -32,32 +32,83 @@ */ public class ShingleTokenFilterFactory extends AbstractTokenFilterFactory { - private final int maxShingleSize; - - private final boolean outputUnigrams; - - private Boolean outputUnigramsIfNoShingles; - - private String tokenSeparator; - - private int minShingleSize; + private final Factory factory; @Inject public ShingleTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) { super(index, indexSettings, name, settings); - maxShingleSize = settings.getAsInt("max_shingle_size", ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE); - minShingleSize = settings.getAsInt("min_shingle_size", ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE); - outputUnigrams = settings.getAsBoolean("output_unigrams", true); - outputUnigramsIfNoShingles = settings.getAsBoolean("output_unigrams_if_no_shingles", false); - tokenSeparator = settings.get("token_separator", ShingleFilter.TOKEN_SEPARATOR); + Integer maxShingleSize = settings.getAsInt("max_shingle_size", ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE); + Integer minShingleSize = settings.getAsInt("min_shingle_size", ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE); + Boolean outputUnigrams = settings.getAsBoolean("output_unigrams", true); + Boolean outputUnigramsIfNoShingles = settings.getAsBoolean("output_unigrams_if_no_shingles", false); + String tokenSeparator = settings.get("token_separator", ShingleFilter.TOKEN_SEPARATOR); + factory = new Factory("shingle", minShingleSize, maxShingleSize, outputUnigrams, outputUnigramsIfNoShingles, tokenSeparator); } + @Override public TokenStream create(TokenStream tokenStream) { - ShingleFilter filter = new ShingleFilter(tokenStream, minShingleSize, maxShingleSize); - filter.setOutputUnigrams(outputUnigrams); - filter.setOutputUnigramsIfNoShingles(outputUnigramsIfNoShingles); - filter.setTokenSeparator(tokenSeparator); - return filter; + return factory.create(tokenStream); + } + + + public Factory getInnerFactory() { + return this.factory; + } + + public static final class Factory implements TokenFilterFactory { + private final int maxShingleSize; + + private final boolean outputUnigrams; + + private final boolean outputUnigramsIfNoShingles; + + private final String tokenSeparator; + + private int minShingleSize; + + private final String name; + + public Factory(String name) { + this(name, ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, true, false, ShingleFilter.TOKEN_SEPARATOR); + } + + Factory(String name, int minShingleSize, int maxShingleSize, boolean outputUnigrams, boolean outputUnigramsIfNoShingles, String tokenSeparator) { + this.maxShingleSize = maxShingleSize; + this.outputUnigrams = outputUnigrams; + this.outputUnigramsIfNoShingles = outputUnigramsIfNoShingles; + this.tokenSeparator = tokenSeparator; + this.minShingleSize = minShingleSize; + this.name = name; + } + + public TokenStream create(TokenStream tokenStream) { + ShingleFilter filter = new ShingleFilter(tokenStream, minShingleSize, maxShingleSize); + filter.setOutputUnigrams(outputUnigrams); + filter.setOutputUnigramsIfNoShingles(outputUnigramsIfNoShingles); + filter.setTokenSeparator(tokenSeparator); + return filter; + } + + public int getMaxShingleSize() { + return maxShingleSize; + } + + public int getMinShingleSize() { + return minShingleSize; + } + + public boolean getOutputUnigrams() { + return outputUnigrams; + } + + public boolean getOutputUnigramsIfNoShingles() { + return outputUnigramsIfNoShingles; + } + + @Override + public String name() { + return name; + } } } diff --git a/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java b/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java index ca5d0e06d7658..727971f57ecb4 100644 --- a/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -20,8 +20,7 @@ package org.elasticsearch.index.codec; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene40.Lucene40Codec; -import org.apache.lucene.codecs.lucene41.Lucene41Codec; +import org.apache.lucene.codecs.lucene42.Lucene42Codec; import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider; import org.elasticsearch.index.mapper.MapperService; @@ -34,7 +33,7 @@ * configured for a specific field the default postings format is used. */ // LUCENE UPGRADE: make sure to move to a new codec depending on the lucene version -public class PerFieldMappingPostingFormatCodec extends Lucene41Codec { +public class PerFieldMappingPostingFormatCodec extends Lucene42Codec { private final MapperService mapperService; private final PostingsFormat defaultPostingFormat; diff --git a/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java b/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java index 5bac2327bb9ab..2bf14e0c068d9 100644 --- a/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java +++ b/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java @@ -111,7 +111,7 @@ public BloomFilteredFieldsProducer(SegmentReadState state) IndexInput bloomIn = null; boolean success = false; try { - bloomIn = state.dir.openInput(bloomFileName, state.context); + bloomIn = state.directory.openInput(bloomFileName, state.context); CodecUtil.checkHeader(bloomIn, BLOOM_CODEC_NAME, BLOOM_CODEC_VERSION, BLOOM_CODEC_VERSION); // // Load the hash function used in the BloomFilter @@ -209,7 +209,7 @@ public TermsEnum iterator(TermsEnum reuse) throws IOException { } @Override - public Comparator getComparator() throws IOException { + public Comparator getComparator() { return delegateTerms.getComparator(); } diff --git a/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java index 45bd34a058804..94fa150145803 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java @@ -64,6 +64,11 @@ public interface AtomicFieldData