Skip to content

Commit

Permalink
Refactor Iceberg connector to support call distributed procedure
Browse files Browse the repository at this point in the history
  • Loading branch information
hantangwangd committed May 12, 2024
1 parent 67d4964 commit 3d1560a
Show file tree
Hide file tree
Showing 12 changed files with 474 additions and 21 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.iceberg;

import com.facebook.presto.iceberg.delete.DeleteFile;
import com.facebook.presto.spi.ConnectorSession;
import com.facebook.presto.spi.ConnectorSplit;
import com.facebook.presto.spi.ConnectorSplitSource;
import com.facebook.presto.spi.SplitWeight;
import com.facebook.presto.spi.connector.ConnectorPartitionHandle;
import com.google.common.collect.ImmutableList;
import com.google.common.io.Closer;
import org.apache.iceberg.FileScanTask;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.PartitionSpecParser;
import org.apache.iceberg.TableScan;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.io.CloseableIterator;

import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.function.Consumer;

import static com.facebook.presto.hive.HiveCommonSessionProperties.getNodeSelectionStrategy;
import static com.facebook.presto.iceberg.FileFormat.fromIcebergFileFormat;
import static com.facebook.presto.iceberg.IcebergUtil.getDataSequenceNumber;
import static com.facebook.presto.iceberg.IcebergUtil.getPartitionKeys;
import static com.facebook.presto.iceberg.IcebergUtil.partitionDataFromStructLike;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.collect.Iterators.limit;
import static java.util.Objects.requireNonNull;
import static java.util.concurrent.CompletableFuture.completedFuture;

public class CallDistributedProcedureSplitSource
implements ConnectorSplitSource
{
private CloseableIterator<FileScanTask> fileScanTaskIterator;
private Optional<Consumer<FileScanTask>> fileScanTaskConsumer;

private final TableScan tableScan;
private final Closer closer = Closer.create();
private final double minimumAssignedSplitWeight;
private final ConnectorSession session;

public CallDistributedProcedureSplitSource(
ConnectorSession session,
TableScan tableScan,
CloseableIterable<FileScanTask> fileScanTaskIterable,
Optional<Consumer<FileScanTask>> fileScanTaskConsumer,
double minimumAssignedSplitWeight)
{
this.session = requireNonNull(session, "session is null");
this.tableScan = requireNonNull(tableScan, "tableScan is null");
this.fileScanTaskIterator = fileScanTaskIterable.iterator();
this.fileScanTaskConsumer = requireNonNull(fileScanTaskConsumer, "fileScanTaskConsumer is null");
this.minimumAssignedSplitWeight = minimumAssignedSplitWeight;
closer.register(fileScanTaskIterator);
}

@Override
public CompletableFuture<ConnectorSplitBatch> getNextBatch(ConnectorPartitionHandle partitionHandle, int maxSize)
{
// TODO: move this to a background thread
List<ConnectorSplit> splits = new ArrayList<>();
Iterator<FileScanTask> iterator = limit(fileScanTaskIterator, maxSize);
while (iterator.hasNext()) {
FileScanTask task = iterator.next();
fileScanTaskConsumer.ifPresent(consumer -> consumer.accept(task));
splits.add(toIcebergSplit(task));
}
return completedFuture(new ConnectorSplitBatch(splits, isFinished()));
}

@Override
public boolean isFinished()
{
return !fileScanTaskIterator.hasNext();
}

@Override
public void close()
{
try {
closer.close();
// TODO: remove this after org.apache.iceberg.io.CloseableIterator'withClose
// correct release resources holds by iterator.
fileScanTaskIterator = CloseableIterator.empty();
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}

private ConnectorSplit toIcebergSplit(FileScanTask task)
{
PartitionSpec spec = task.spec();
Optional<PartitionData> partitionData = partitionDataFromStructLike(spec, task.file().partition());

// TODO: We should leverage residual expression and convert that to TupleDomain.
// The predicate here is used by readers for predicate push down at reader level,
// so when we do not use residual expression, we are just wasting CPU cycles
// on reader side evaluating a condition that we know will always be true.

return new IcebergSplit(
task.file().path().toString(),
task.start(),
task.length(),
fromIcebergFileFormat(task.file().format()),
ImmutableList.of(),
getPartitionKeys(task),
PartitionSpecParser.toJson(spec),
partitionData.map(PartitionData::toJson),
getNodeSelectionStrategy(session),
SplitWeight.fromProportion(Math.min(Math.max((double) task.length() / tableScan.targetSplitSize(), minimumAssignedSplitWeight), 1.0)),
task.deletes().stream().map(DeleteFile::fromIceberg).collect(toImmutableList()),
Optional.empty(),
getDataSequenceNumber(task.file()));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

import com.facebook.airlift.json.JsonCodec;
import com.facebook.airlift.log.Logger;
import com.facebook.presto.common.QualifiedObjectName;
import com.facebook.presto.common.Subfield;
import com.facebook.presto.common.predicate.TupleDomain;
import com.facebook.presto.common.type.BigintType;
Expand All @@ -27,10 +28,13 @@
import com.facebook.presto.iceberg.changelog.ChangelogUtil;
import com.facebook.presto.spi.ColumnHandle;
import com.facebook.presto.spi.ColumnMetadata;
import com.facebook.presto.spi.ConnectorDistributedProcedureHandle;
import com.facebook.presto.spi.ConnectorId;
import com.facebook.presto.spi.ConnectorInsertTableHandle;
import com.facebook.presto.spi.ConnectorNewTableLayout;
import com.facebook.presto.spi.ConnectorOutputTableHandle;
import com.facebook.presto.spi.ConnectorSession;
import com.facebook.presto.spi.ConnectorSplitSource;
import com.facebook.presto.spi.ConnectorTableHandle;
import com.facebook.presto.spi.ConnectorTableLayout;
import com.facebook.presto.spi.ConnectorTableLayoutHandle;
Expand All @@ -47,6 +51,9 @@
import com.facebook.presto.spi.connector.ConnectorOutputMetadata;
import com.facebook.presto.spi.connector.ConnectorTableVersion;
import com.facebook.presto.spi.function.StandardFunctionResolution;
import com.facebook.presto.spi.procedure.DistributedProcedure;
import com.facebook.presto.spi.procedure.IProcedureRegistry;
import com.facebook.presto.spi.procedure.Procedure;
import com.facebook.presto.spi.relation.RowExpression;
import com.facebook.presto.spi.relation.RowExpressionService;
import com.facebook.presto.spi.statistics.ColumnStatisticMetadata;
Expand All @@ -70,6 +77,7 @@
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.FileMetadata;
import org.apache.iceberg.FileScanTask;
import org.apache.iceberg.ManifestFile;
import org.apache.iceberg.PartitionField;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.PartitionSpecParser;
Expand Down Expand Up @@ -161,23 +169,26 @@ public abstract class IcebergAbstractMetadata
implements ConnectorMetadata
{
protected final TypeManager typeManager;
protected final IProcedureRegistry procedureRegistry;
protected final JsonCodec<CommitTaskData> commitTaskCodec;
protected final NodeVersion nodeVersion;
protected final RowExpressionService rowExpressionService;
protected Transaction transaction;
protected IcebergTransactionContext transactionContext;

private final StandardFunctionResolution functionResolution;
private final ConcurrentMap<SchemaTableName, Table> icebergTables = new ConcurrentHashMap<>();
private static final Logger log = Logger.get(IcebergAbstractMetadata.class);

public IcebergAbstractMetadata(
TypeManager typeManager,
IProcedureRegistry procedureRegistry,
StandardFunctionResolution functionResolution,
RowExpressionService rowExpressionService,
JsonCodec<CommitTaskData> commitTaskCodec,
NodeVersion nodeVersion)
{
this.typeManager = requireNonNull(typeManager, "typeManager is null");
this.procedureRegistry = requireNonNull(procedureRegistry, "procedureRegistry is null");
this.commitTaskCodec = requireNonNull(commitTaskCodec, "commitTaskCodec is null");
this.functionResolution = requireNonNull(functionResolution, "functionResolution is null");
this.rowExpressionService = requireNonNull(rowExpressionService, "rowExpressionService is null");
Expand All @@ -199,6 +210,11 @@ protected final Table getIcebergTable(ConnectorSession session, SchemaTableName

public abstract void unregisterTable(ConnectorSession clientSession, SchemaTableName schemaTableName);

public Optional<ConnectorSplitSource> getSplitSourceInCurrentCallProcedureTransaction()
{
return transactionContext == null ? Optional.empty() : transactionContext.getConnectorSplitSource();
}

/**
* This class implements the default implementation for getTableLayoutForConstraint which will be used in the case of a Java Worker
*/
Expand Down Expand Up @@ -428,7 +444,7 @@ public Optional<ConnectorOutputMetadata> finishCreateTable(ConnectorSession sess

protected ConnectorInsertTableHandle beginIcebergTableInsert(IcebergTableHandle table, Table icebergTable)
{
transaction = icebergTable.newTransaction();
transactionContext = new IcebergTransactionContext(Optional.of(icebergTable), icebergTable.newTransaction());

return new IcebergWritableTableHandle(
table.getSchemaName(),
Expand All @@ -445,12 +461,13 @@ protected ConnectorInsertTableHandle beginIcebergTableInsert(IcebergTableHandle
public Optional<ConnectorOutputMetadata> finishInsert(ConnectorSession session, ConnectorInsertTableHandle insertHandle, Collection<Slice> fragments, Collection<ComputedStatistics> computedStatistics)
{
if (fragments.isEmpty()) {
transaction.commitTransaction();
transactionContext.getTransaction().commitTransaction();
transactionContext.destroy();
return Optional.empty();
}

IcebergWritableTableHandle table = (IcebergWritableTableHandle) insertHandle;
Table icebergTable = transaction.table();
Table icebergTable = transactionContext.getTransaction().table();

List<CommitTaskData> commitTasks = fragments.stream()
.map(slice -> commitTaskCodec.fromJson(slice.getBytes()))
Expand All @@ -461,7 +478,7 @@ public Optional<ConnectorOutputMetadata> finishInsert(ConnectorSession session,
icebergTable.schema().findType(field.sourceId())))
.toArray(Type[]::new);

AppendFiles appendFiles = transaction.newFastAppend();
AppendFiles appendFiles = transactionContext.getTransaction().newFastAppend();
for (CommitTaskData task : commitTasks) {
DataFiles.Builder builder = DataFiles.builder(icebergTable.spec())
.withPath(task.getPath())
Expand All @@ -479,7 +496,8 @@ public Optional<ConnectorOutputMetadata> finishInsert(ConnectorSession session,
}

appendFiles.commit();
transaction.commitTransaction();
transactionContext.getTransaction().commitTransaction();
transactionContext.destroy();

return Optional.of(new HiveWrittenPartitions(commitTasks.stream()
.map(CommitTaskData::getPath)
Expand Down Expand Up @@ -770,6 +788,44 @@ public void truncateTable(ConnectorSession session, ConnectorTableHandle tableHa
}
}

@Override
public ConnectorDistributedProcedureHandle beginCallDistributedProcedure(
ConnectorSession session,
QualifiedObjectName procedureName,
ConnectorTableLayoutHandle tableLayoutHandle,
Object[] arguments)
{
IcebergTableHandle handle = ((IcebergTableLayoutHandle) tableLayoutHandle).getTable();
Table icebergTable = getIcebergTable(session, handle.getSchemaTableName());

if (handle.isSnapshotSpecified()) {
throw new PrestoException(NOT_SUPPORTED, "This connector do not allow table execute at specified snapshot");
}

transactionContext = new IcebergTransactionContext(Optional.of(icebergTable), icebergTable.newTransaction());
Procedure procedure = procedureRegistry.resolve(
new ConnectorId(procedureName.getCatalogName()),
new SchemaTableName(
procedureName.getSchemaName(),
procedureName.getObjectName()));
verify(procedure instanceof DistributedProcedure, "procedure must be DistributedProcedure");
return ((DistributedProcedure) procedure).getBeginCallDistributedProcedure().begin(session, transactionContext, tableLayoutHandle, arguments);
}

@Override
public void finishCallDistributedProcedure(ConnectorSession session, ConnectorDistributedProcedureHandle tableHandle, QualifiedObjectName procedureName, Collection<Slice> fragments)
{
Procedure procedure = procedureRegistry.resolve(
new ConnectorId(procedureName.getCatalogName()),
new SchemaTableName(
procedureName.getSchemaName(),
procedureName.getObjectName()));
verify(procedure instanceof DistributedProcedure, "procedure must be DistributedProcedure");
((DistributedProcedure) procedure).getFinishCallDistributedProcedure().finish(transactionContext, tableHandle, fragments);
transactionContext.getTransaction().commitTransaction();
transactionContext.destroy();
}

@Override
public ConnectorTableHandle beginDelete(ConnectorSession session, ConnectorTableHandle tableHandle)
{
Expand All @@ -790,7 +846,7 @@ public ConnectorTableHandle beginDelete(ConnectorSession session, ConnectorTable
}

validateTableMode(session, icebergTable);
transaction = icebergTable.newTransaction();
transactionContext = new IcebergTransactionContext(Optional.of(icebergTable), icebergTable.newTransaction());

return handle;
}
Expand All @@ -801,7 +857,7 @@ public void finishDelete(ConnectorSession session, ConnectorTableHandle tableHan
IcebergTableHandle handle = (IcebergTableHandle) tableHandle;
Table icebergTable = getIcebergTable(session, handle.getSchemaTableName());

RowDelta rowDelta = transaction.newRowDelta();
RowDelta rowDelta = transactionContext.getTransaction().newRowDelta();

List<CommitTaskData> commitTasks = fragments.stream()
.map(slice -> commitTaskCodec.fromJson(slice.getBytes()))
Expand Down Expand Up @@ -838,7 +894,8 @@ public void finishDelete(ConnectorSession session, ConnectorTableHandle tableHan
}

rowDelta.commit();
transaction.commitTransaction();
transactionContext.getTransaction().commitTransaction();
transactionContext.destroy();
}

@Override
Expand Down Expand Up @@ -872,7 +929,15 @@ public boolean supportsMetadataDelete(ConnectorSession session, ConnectorTableHa
Table icebergTable = getIcebergTable(session, handle.getSchemaTableName());

boolean supportsMetadataDelete = true;
for (PartitionSpec spec : icebergTable.specs().values()) {
// Get partition specs that really need to be checked
Set<PartitionSpec> partitionSpecs = handle.getIcebergTableName().getSnapshotId().map(
snapshot -> icebergTable.snapshot(snapshot).allManifests(icebergTable.io()).stream()
.map(ManifestFile::partitionSpecId)
.map(icebergTable.specs()::get)
.collect(toImmutableSet()))
.orElseGet(() -> ImmutableSet.copyOf(icebergTable.specs().values())); // No snapshot, so no data. This case doesn't matter.

for (PartitionSpec spec : partitionSpecs) {
// Currently we do not support delete when any partition columns in predicate is not transform by identity()
Set<Integer> partitionColumnSourceIds = spec.fields().stream()
.filter(field -> field.transform().isIdentity())
Expand Down Expand Up @@ -928,15 +993,16 @@ public OptionalLong metadataDelete(ConnectorSession session, ConnectorTableHandl
*/
private long removeScanFiles(Table icebergTable, Iterable<FileScanTask> scan)
{
transaction = icebergTable.newTransaction();
DeleteFiles deletes = transaction.newDelete();
transactionContext = new IcebergTransactionContext(Optional.of(icebergTable), icebergTable.newTransaction());
DeleteFiles deletes = transactionContext.getTransaction().newDelete();
AtomicLong rowsDeleted = new AtomicLong(0L);
scan.forEach(t -> {
deletes.deleteFile(t.file());
rowsDeleted.addAndGet(t.estimatedRowsCount());
});
deletes.commit();
transaction.commitTransaction();
transactionContext.getTransaction().commitTransaction();
transactionContext.destroy();
return rowsDeleted.get();
}

Expand Down
Loading

0 comments on commit 3d1560a

Please sign in to comment.