Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove BytesArray and BytesReference usage from XContentFactory #29151

Merged
merged 2 commits into from
Mar 20, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
*/
@Deprecated
public PercolateQueryBuilder(String field, String documentType, BytesReference document) {
this(field, documentType, Collections.singletonList(document), XContentFactory.xContentType(document));
this(field, documentType, Collections.singletonList(document), XContentHelper.xContentType(document));
}

/**
Expand Down Expand Up @@ -276,7 +276,7 @@ public PercolateQueryBuilder(String field, String documentType, String indexedDo
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
documentXContentType = in.readEnum(XContentType.class);
} else {
documentXContentType = XContentFactory.xContentType(documents.iterator().next());
documentXContentType = XContentHelper.xContentType(documents.iterator().next());
}
} else {
documentXContentType = null;
Expand Down Expand Up @@ -525,7 +525,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) {
return this; // not executed yet
} else {
return new PercolateQueryBuilder(field, documentType, Collections.singletonList(source),
XContentFactory.xContentType(source));
XContentHelper.xContentType(source));
}
}
GetRequest getRequest = new GetRequest(indexedDocumentIndex, indexedDocumentType, indexedDocumentId);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ public void readFrom(StreamInput in) throws IOException {
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
xContentType = in.readEnum(XContentType.class);
} else {
xContentType = XContentFactory.xContentType(content);
xContentType = XContentHelper.xContentType(content);
}
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha2)) {
context = in.readOptionalString();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;

import java.io.IOException;
Expand All @@ -43,7 +43,7 @@ public class PutPipelineRequest extends AcknowledgedRequest<PutPipelineRequest>
*/
@Deprecated
public PutPipelineRequest(String id, BytesReference source) {
this(id, source, XContentFactory.xContentType(source));
this(id, source, XContentHelper.xContentType(source));
}

/**
Expand Down Expand Up @@ -83,7 +83,7 @@ public void readFrom(StreamInput in) throws IOException {
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
xContentType = in.readEnum(XContentType.class);
} else {
xContentType = XContentFactory.xContentType(source);
xContentType = XContentHelper.xContentType(source);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,7 @@
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.ingest.ConfigurationUtils;
Expand Down Expand Up @@ -56,7 +55,7 @@ public class SimulatePipelineRequest extends ActionRequest {
*/
@Deprecated
public SimulatePipelineRequest(BytesReference source) {
this(source, XContentFactory.xContentType(source));
this(source, XContentHelper.xContentType(source));
}

/**
Expand All @@ -78,7 +77,7 @@ public SimulatePipelineRequest(BytesReference source, XContentType xContentType)
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
xContentType = in.readEnum(XContentType.class);
} else {
xContentType = XContentFactory.xContentType(source);
xContentType = XContentHelper.xContentType(source);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
Expand Down Expand Up @@ -265,7 +265,7 @@ public TermVectorsRequest doc(XContentBuilder documentBuilder) {
*/
@Deprecated
public TermVectorsRequest doc(BytesReference doc, boolean generateRandomId) {
return this.doc(doc, generateRandomId, XContentFactory.xContentType(doc));
return this.doc(doc, generateRandomId, XContentHelper.xContentType(doc));
}

/**
Expand Down Expand Up @@ -518,7 +518,7 @@ public void readFrom(StreamInput in) throws IOException {
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
xContentType = in.readEnum(XContentType.class);
} else {
xContentType = XContentFactory.xContentType(doc);
xContentType = XContentHelper.xContentType(doc);
}
}
routing = in.readOptionalString();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;

import java.io.IOException;
Expand All @@ -44,11 +44,11 @@ public static Compressor compressor(BytesReference bytes) {
// bytes should be either detected as compressed or as xcontent,
// if we have bytes that can be either detected as compressed or
// as a xcontent, we have a problem
assert XContentFactory.xContentType(bytes) == null;
assert XContentHelper.xContentType(bytes) == null;
return COMPRESSOR;
}

XContentType contentType = XContentFactory.xContentType(bytes);
XContentType contentType = XContentHelper.xContentType(bytes);
if (contentType == null) {
if (isAncient(bytes)) {
throw new IllegalStateException("unsupported compression: index was created before v2.0.0.beta1 and wasn't upgraded?");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,6 @@
import com.fasterxml.jackson.dataformat.cbor.CBORConstants;
import com.fasterxml.jackson.dataformat.smile.SmileConstants;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.xcontent.cbor.CborXContent;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.common.xcontent.smile.SmileXContent;
Expand Down Expand Up @@ -221,18 +218,6 @@ public static XContent xContent(byte[] data, int offset, int length) {
return xContent(type);
}

/**
* Guesses the content type based on the provided bytes.
*
* @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type.
* The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed.
* This method is deprecated to prevent usages of it from spreading further without specific reasons.
*/
@Deprecated
public static XContentType xContentType(byte[] data) {
return xContentType(data, 0, data.length);
}

/**
* Guesses the content type based on the provided input stream without consuming it.
*
Expand All @@ -248,8 +233,15 @@ public static XContentType xContentType(InputStream si) throws IOException {
si.mark(GUESS_HEADER_LENGTH);
try {
final byte[] firstBytes = new byte[GUESS_HEADER_LENGTH];
final int read = Streams.readFully(si, firstBytes);
return xContentType(new BytesArray(firstBytes, 0, read));
int read = 0;
while (read < GUESS_HEADER_LENGTH) {
final int r = si.read(firstBytes, read, GUESS_HEADER_LENGTH - read);
if (r == -1) {
break;
}
read += r;
}
return xContentType(firstBytes, 0, read);
} finally {
si.reset();
}
Expand All @@ -263,24 +255,8 @@ public static XContentType xContentType(InputStream si) throws IOException {
* This method is deprecated to prevent usages of it from spreading further without specific reasons.
*/
@Deprecated
public static XContentType xContentType(byte[] data, int offset, int length) {
return xContentType(new BytesArray(data, offset, length));
}

/**
* Guesses the content type based on the provided bytes and returns the corresponding {@link XContent}
*
* @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type.
* The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed.
* This method is deprecated to prevent usages of it from spreading further without specific reasons.
*/
@Deprecated
public static XContent xContent(BytesReference bytes) {
XContentType type = xContentType(bytes);
if (type == null) {
throw new ElasticsearchParseException("Failed to derive xcontent");
}
return xContent(type);
public static XContentType xContentType(byte[] bytes) {
return xContentType(bytes, 0, bytes.length);
}

/**
Expand All @@ -291,19 +267,21 @@ public static XContent xContent(BytesReference bytes) {
* This method is deprecated to prevent usages of it from spreading further without specific reasons.
*/
@Deprecated
public static XContentType xContentType(BytesReference bytes) {
int length = bytes.length();
if (length == 0) {
public static XContentType xContentType(byte[] bytes, int offset, int length) {
int totalLength = bytes.length;
if (totalLength == 0 || length == 0) {
return null;
} else if ((offset + length) > totalLength) {
return null;
}
byte first = bytes.get(0);
byte first = bytes[offset];
if (first == '{') {
return XContentType.JSON;
}
if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && bytes.get(1) == SmileConstants.HEADER_BYTE_2 && bytes.get(2) == SmileConstants.HEADER_BYTE_3) {
if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && bytes[offset + 1] == SmileConstants.HEADER_BYTE_2 && bytes[offset + 2] == SmileConstants.HEADER_BYTE_3) {
return XContentType.SMILE;
}
if (length > 2 && first == '-' && bytes.get(1) == '-' && bytes.get(2) == '-') {
if (length > 2 && first == '-' && bytes[offset + 1] == '-' && bytes[offset + 2] == '-') {
return XContentType.YAML;
}
// CBOR logic similar to CBORFactory#hasCBORFormat
Expand All @@ -312,7 +290,7 @@ public static XContentType xContentType(BytesReference bytes) {
}
if (CBORConstants.hasMajorType(CBORConstants.MAJOR_TYPE_TAG, first) && length > 2) {
// Actually, specific "self-describe tag" is a very good indicator
if (first == (byte) 0xD9 && bytes.get(1) == (byte) 0xD9 && bytes.get(2) == (byte) 0xF7) {
if (first == (byte) 0xD9 && bytes[offset + 1] == (byte) 0xD9 && bytes[offset + 2] == (byte) 0xF7) {
return XContentType.CBOR;
}
}
Expand All @@ -324,13 +302,13 @@ public static XContentType xContentType(BytesReference bytes) {

int jsonStart = 0;
// JSON may be preceded by UTF-8 BOM
if (length > 3 && first == (byte) 0xEF && bytes.get(1) == (byte) 0xBB && bytes.get(2) == (byte) 0xBF) {
if (length > 3 && first == (byte) 0xEF && bytes[offset + 1] == (byte) 0xBB && bytes[offset + 2] == (byte) 0xBF) {
jsonStart = 3;
}

// a last chance for JSON
for (int i = jsonStart; i < length; i++) {
byte b = bytes.get(i);
byte b = bytes[offset + i];
if (b == '{') {
return XContentType.JSON;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

package org.elasticsearch.common.xcontent;

import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
Expand Down Expand Up @@ -55,7 +56,7 @@ public static XContentParser createParser(NamedXContentRegistry xContentRegistry
final XContentType contentType = XContentFactory.xContentType(compressedInput);
return XContentFactory.xContent(contentType).createParser(xContentRegistry, deprecationHandler, compressedInput);
} else {
return XContentFactory.xContent(bytes).createParser(xContentRegistry, deprecationHandler, bytes.streamInput());
return XContentFactory.xContent(xContentType(bytes)).createParser(xContentRegistry, deprecationHandler, bytes.streamInput());
}
}

Expand Down Expand Up @@ -151,7 +152,7 @@ public static String convertToJson(BytesReference bytes, boolean reformatJson) t

@Deprecated
public static String convertToJson(BytesReference bytes, boolean reformatJson, boolean prettyPrint) throws IOException {
return convertToJson(bytes, reformatJson, prettyPrint, XContentFactory.xContentType(bytes));
return convertToJson(bytes, reformatJson, prettyPrint, XContentFactory.xContentType(bytes.toBytesRef().bytes));
}

public static String convertToJson(BytesReference bytes, boolean reformatJson, XContentType xContentType) throws IOException {
Expand Down Expand Up @@ -436,4 +437,17 @@ public static BytesReference toXContent(ToXContent toXContent, XContentType xCon
return BytesReference.bytes(builder);
}
}

/**
* Guesses the content type based on the provided bytes.
*
* @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type.
* The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed.
* This method is deprecated to prevent usages of it from spreading further without specific reasons.
*/
@Deprecated
public static XContentType xContentType(BytesReference bytes) {
BytesRef br = bytes.toBytesRef();
return XContentFactory.xContentType(br.bytes, br.offset, br.length);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
Expand Down Expand Up @@ -224,7 +225,7 @@ public Item(@Nullable String index, @Nullable String type, XContentBuilder doc)
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
xContentType = in.readEnum(XContentType.class);
} else {
xContentType = XContentFactory.xContentType(doc);
xContentType = XContentHelper.xContentType(doc);
}
} else {
id = in.readString();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
Expand Down Expand Up @@ -186,7 +187,7 @@ protected ScoreFunction doToFunction(QueryShardContext context) throws IOExcepti
AbstractDistanceScoreFunction scoreFunction;
// EMPTY is safe because parseVariable doesn't use namedObject
try (InputStream stream = functionBytes.streamInput();
XContentParser parser = XContentFactory.xContent(functionBytes)
XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(functionBytes))
.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) {
scoreFunction = parseVariable(fieldName, parser, context, multiValueMode);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.mapper.ParentFieldMapper;
import org.elasticsearch.index.mapper.RoutingFieldMapper;
Expand Down Expand Up @@ -236,7 +236,7 @@ public BytesReference getSource() {

@Override
public XContentType getXContentType() {
return XContentFactory.xContentType(source);
return XContentHelper.xContentType(source);
}
@Override
public long getVersion() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.LeafReaderContext;
Expand All @@ -37,7 +36,6 @@
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.core.internal.io.IOUtils;
import org.apache.lucene.util.ThreadInterruptedException;
import org.elasticsearch.Assertions;
import org.elasticsearch.ElasticsearchException;
Expand Down Expand Up @@ -66,7 +64,8 @@
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.AsyncIOProcessor;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexNotFoundException;
Expand Down Expand Up @@ -1238,7 +1237,8 @@ public Engine.Result applyTranslogOperation(Translog.Operation operation, Engine
// autoGeneratedID docs that are coming from the primary are updated correctly.
result = applyIndexOperation(index.seqNo(), index.primaryTerm(), index.version(),
index.versionType().versionTypeForReplicationAndRecovery(), index.getAutoGeneratedIdTimestamp(), true, origin,
source(shardId.getIndexName(), index.type(), index.id(), index.source(), XContentFactory.xContentType(index.source()))
source(shardId.getIndexName(), index.type(), index.id(), index.source(),
XContentHelper.xContentType(index.source()))
.routing(index.routing()).parent(index.parent()), onMappingUpdate);
break;
case DELETE:
Expand Down
Loading