Skip to content
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
64 changes: 53 additions & 11 deletions pyiceberg/table/upsert_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,14 +82,56 @@ def get_rows_to_update(source_table: pa.Table, target_table: pa.Table, join_cols
],
)

return (
source_table
# We already know that the schema is compatible, this is to fix large_ types
.cast(target_table.schema)
.join(target_table, keys=list(join_cols_set), join_type="inner", left_suffix="-lhs", right_suffix="-rhs")
.filter(diff_expr)
.drop_columns([f"{col}-rhs" for col in non_key_cols])
.rename_columns({f"{col}-lhs" if col not in join_cols else col: col for col in source_table.column_names})
# Finally cast to the original schema since it doesn't carry nullability:
# https://github.com/apache/arrow/issues/45557
).cast(target_table.schema)
try:
return (
source_table
# We already know that the schema is compatible, this is to fix large_ types
.cast(target_table.schema)
.join(target_table, keys=list(join_cols_set), join_type="inner", left_suffix="-lhs", right_suffix="-rhs")
.filter(diff_expr)
.drop_columns([f"{col}-rhs" for col in non_key_cols])
.rename_columns({f"{col}-lhs" if col not in join_cols else col: col for col in source_table.column_names})
# Finally cast to the original schema since it doesn't carry nullability:
# https://github.com/apache/arrow/issues/45557
).cast(target_table.schema)
except pa.ArrowInvalid:
# When we are not able to compare (e.g. due to unsupported types),
# fall back to selecting only rows in the source table that do NOT already exist in the target.
# See: https://github.com/apache/arrow/issues/35785
MARKER_COLUMN_NAME = "__from_target"
INDEX_COLUMN_NAME = "__source_index"

if MARKER_COLUMN_NAME in join_cols_set or INDEX_COLUMN_NAME in join_cols_set:
raise ValueError(
f"{MARKER_COLUMN_NAME} and {INDEX_COLUMN_NAME} are reserved for joining "
f"DataFrames, and cannot be used as column names"
) from None

# Step 1: Prepare source index with join keys and a marker index
# Cast to target table schema, so we can do the join
# See: https://github.com/apache/arrow/issues/37542
source_index = (
source_table.cast(target_table.schema)
.select(join_cols_set)
.append_column(INDEX_COLUMN_NAME, pa.array(range(len(source_table))))
)

# Step 2: Prepare target index with join keys and a marker
target_index = target_table.select(join_cols_set).append_column(
MARKER_COLUMN_NAME, pa.array([True] * len(target_table), pa.bool_())
)

# Step 3: Perform a left outer join to find which rows from source exist in target
joined = source_index.join(target_index, keys=list(join_cols_set), join_type="left outer")

# Step 4: Restore original source order
joined = joined.sort_by(INDEX_COLUMN_NAME)

# Step 5: Create a boolean mask for rows that do exist in the target
# i.e., where marker column is true after the join
to_update_mask = pc.invert(pc.is_null(joined[MARKER_COLUMN_NAME]))

# Step 6: Filter source table using the mask and cast to target schema
filtered = source_table.filter(to_update_mask)

return filtered
72 changes: 71 additions & 1 deletion tests/table/test_upsert.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
from pyiceberg.table import UpsertResult
from pyiceberg.table.snapshots import Operation
from pyiceberg.table.upsert_util import create_match_filter
from pyiceberg.types import IntegerType, NestedField, StringType
from pyiceberg.types import IntegerType, NestedField, StringType, StructType
from tests.catalog.test_base import InMemoryCatalog, Table


Expand Down Expand Up @@ -511,6 +511,76 @@ def test_upsert_without_identifier_fields(catalog: Catalog) -> None:
tbl.upsert(df)


def test_upsert_struct_field_fails_in_join(catalog: Catalog) -> None:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit change the name since it no longer fails :)

i also added another test here for when complex type is used as the join key

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I copied your test to this branch and renamed the non failing :-)

identifier = "default.test_upsert_struct_field_fails"
_drop_table(catalog, identifier)

schema = Schema(
NestedField(1, "id", IntegerType(), required=True),
NestedField(
2,
"nested_type",
# Struct<type: string, coordinates: list<double>>
StructType(
NestedField(3, "sub1", StringType(), required=True),
NestedField(4, "sub2", StringType(), required=True),
),
required=False,
),
identifier_field_ids=[1],
)

tbl = catalog.create_table(identifier, schema=schema)

arrow_schema = pa.schema(
[
pa.field("id", pa.int32(), nullable=False),
pa.field(
"nested_type",
pa.struct(
[
pa.field("sub1", pa.large_string(), nullable=False),
pa.field("sub2", pa.large_string(), nullable=False),
]
),
nullable=True,
),
]
)

initial_data = pa.Table.from_pylist(
[
{
"id": 1,
"nested_type": {"sub1": "bla1", "sub2": "bla"},
}
],
schema=arrow_schema,
)
tbl.append(initial_data)

update_data = pa.Table.from_pylist(
[
{
"id": 2,
"nested_type": {"sub1": "bla1", "sub2": "bla"},
},
{
"id": 1,
"nested_type": {"sub1": "bla1", "sub2": "bla"},
},
],
schema=arrow_schema,
)

upd = tbl.upsert(update_data, join_cols=["id"])

# Row needs to be updated even tho it's not changed.
# When pyarrow isn't able to compare rows, just update everything
assert upd.rows_updated == 1
assert upd.rows_inserted == 1


def test_upsert_with_nulls(catalog: Catalog) -> None:
identifier = "default.test_upsert_with_nulls"
_drop_table(catalog, identifier)
Expand Down