Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.druid.indexing.common.actions;

import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.type.TypeReference;
import org.apache.druid.indexing.common.task.Task;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.metadata.ReplaceTaskLock;
import org.apache.druid.timeline.DataSegment;

import java.util.List;
import java.util.Map;
import java.util.Set;

public class SegmentUpgradeAction implements TaskAction<Integer>
{
private final String dataSource;
private final List<DataSegment> upgradeSegments;

@JsonCreator
public SegmentUpgradeAction(
@JsonProperty("dataSource") String dataSource,
@JsonProperty("upgradeSegments") List<DataSegment> upgradeSegments
)
{
this.dataSource = dataSource;
this.upgradeSegments = upgradeSegments;
}

@JsonProperty
public String getDataSource()
{
return dataSource;
}

@JsonProperty
public List<DataSegment> getUpgradeSegments()
{
return upgradeSegments;
}

@Override
public TypeReference<Integer> getReturnTypeReference()
{
return new TypeReference<>()
{
};
}

@Override
public Integer perform(Task task, TaskActionToolbox toolbox)
{
final String datasource = task.getDataSource();
final Map<DataSegment, ReplaceTaskLock> segmentToReplaceLock
= TaskLocks.findReplaceLocksCoveringSegments(datasource, toolbox.getTaskLockbox(), Set.copyOf(upgradeSegments));

if (segmentToReplaceLock.size() < upgradeSegments.size()) {
throw new IAE(
"Not all segments are hold by a replace lock, only [%d] segments out of total segments[%d] are hold by repalce lock",
segmentToReplaceLock.size(),
upgradeSegments.size()
);
}

return toolbox.getIndexerMetadataStorageCoordinator()
.insertIntoUpgradeSegmentsTable(segmentToReplaceLock);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
@JsonSubTypes.Type(name = "segmentTransactionalInsert", value = SegmentTransactionalInsertAction.class),
@JsonSubTypes.Type(name = "segmentTransactionalAppend", value = SegmentTransactionalAppendAction.class),
@JsonSubTypes.Type(name = "segmentTransactionalReplace", value = SegmentTransactionalReplaceAction.class),
@JsonSubTypes.Type(name = "segmentUpgrade", value = SegmentUpgradeAction.class),
@JsonSubTypes.Type(name = "retrieveSegmentsById", value = RetrieveSegmentsByIdAction.class),
@JsonSubTypes.Type(name = "retrieveUpgradedFromSegmentIds", value = RetrieveUpgradedFromSegmentIdsAction.class),
@JsonSubTypes.Type(name = "retrieveUpgradedToSegmentIds", value = RetrieveUpgradedToSegmentIdsAction.class),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,12 @@
package org.apache.druid.indexing.common.task;

import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.druid.indexing.common.LockGranularity;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.JodaUtils;
import org.apache.druid.query.SegmentDescriptor;
import org.apache.druid.segment.SegmentUtils;
import org.apache.druid.timeline.DataSegment;
import org.joda.time.Interval;
Expand All @@ -42,18 +44,52 @@ public class CompactionIntervalSpec implements CompactionInputSpec

private final Interval interval;
@Nullable
private final List<SegmentDescriptor> uncompactedSegments;
/**
* Optional hash of all segment IDs for validation. When set, this is used in {@link #validateSegments} to verify
* that the segments haven't changed since this spec was created.
* <p>
* Note: This hash is computed and validated against ALL segments overlapping the interval, not just the
* uncompactedSegments. This is because compaction operates on all segments within the interval - compacted
* segments may need to be rewritten alongside uncompacted ones to maintain proper partitioning and sort order.
* Therefore, the validation check must apply to all segments to ensure correctness.
*/
@Nullable
private final String sha256OfSortedSegmentIds;

public CompactionIntervalSpec(Interval interval, String sha256OfSortedSegmentIds)
{
this(interval, null, sha256OfSortedSegmentIds);
}

@JsonCreator
public CompactionIntervalSpec(
@JsonProperty("interval") Interval interval,
@JsonProperty("uncompactedSegments") @Nullable
List<SegmentDescriptor> uncompactedSegments,
@JsonProperty("sha256OfSortedSegmentIds") @Nullable String sha256OfSortedSegmentIds
)
{
if (interval != null && interval.toDurationMillis() == 0) {
throw new IAE("Interval[%s] is empty, must specify a nonempty interval", interval);
}
this.interval = interval;
if (uncompactedSegments == null) {
// all segments within interval are included, pass check
} else if (uncompactedSegments.isEmpty()) {
throw new IAE("Can not supply empty segments as input, please use either null or non-empty segments.");
} else if (interval != null) {
List<SegmentDescriptor> segmentsNotInInterval =
uncompactedSegments.stream().filter(s -> !interval.contains(s.getInterval())).collect(Collectors.toList());
if (!segmentsNotInInterval.isEmpty()) {
throw new IAE(
"Can not supply segments outside interval[%s], got segments[%s].",
interval,
segmentsNotInInterval
);
}
}
this.uncompactedSegments = uncompactedSegments;
this.sha256OfSortedSegmentIds = sha256OfSortedSegmentIds;
}

Expand All @@ -63,6 +99,14 @@ public Interval getInterval()
return interval;
}

@Nullable
@JsonProperty
@JsonInclude(JsonInclude.Include.NON_NULL)
public List<SegmentDescriptor> getUncompactedSegments()
{
return uncompactedSegments;
}

@Nullable
@JsonProperty
public String getSha256OfSortedSegmentIds()
Expand Down Expand Up @@ -105,21 +149,23 @@ public boolean equals(Object o)
}
CompactionIntervalSpec that = (CompactionIntervalSpec) o;
return Objects.equals(interval, that.interval) &&
Objects.equals(uncompactedSegments, that.uncompactedSegments) &&
Objects.equals(sha256OfSortedSegmentIds, that.sha256OfSortedSegmentIds);
}

@Override
public int hashCode()
{
return Objects.hash(interval, sha256OfSortedSegmentIds);
return Objects.hash(interval, uncompactedSegments, sha256OfSortedSegmentIds);
}

@Override
public String toString()
{
return "CompactionIntervalSpec{" +
"interval=" + interval +
", sha256OfSegmentIds='" + sha256OfSortedSegmentIds + '\'' +
", uncompactedSegments=" + uncompactedSegments +
", sha256OfSortedSegmentIds='" + sha256OfSortedSegmentIds + '\'' +
'}';
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@
import org.apache.druid.client.indexing.ClientCompactionRunnerInfo;
import org.apache.druid.indexer.TaskStatus;
import org.apache.druid.indexing.common.TaskToolbox;
import org.apache.druid.query.spec.QuerySegmentSpec;
import org.apache.druid.segment.indexing.DataSchema;
import org.apache.druid.server.coordinator.CompactionConfigValidationResult;
import org.joda.time.Interval;

import java.util.Map;

Expand All @@ -47,7 +47,7 @@ public interface CompactionRunner
*/
TaskStatus runCompactionTasks(
CompactionTask compactionTask,
Map<Interval, DataSchema> intervalDataSchemaMap,
Map<QuerySegmentSpec, DataSchema> inputSchemas,
TaskToolbox taskToolbox
) throws Exception;

Expand All @@ -59,7 +59,7 @@ TaskStatus runCompactionTasks(
*/
CompactionConfigValidationResult validateCompactionTask(
CompactionTask compactionTask,
Map<Interval, DataSchema> intervalToDataSchemaMap
Map<QuerySegmentSpec, DataSchema> inputSchemas
);

}
Loading
Loading