From ad6be9e736aa34c18f375dfa9330d0f87f43a1d7 Mon Sep 17 00:00:00 2001 From: kanthi subramanian Date: Mon, 27 Apr 2026 14:29:47 -0500 Subject: [PATCH 1/2] Added docker compose setup with 3 node etcd cluster --- .../docker-compose-etcd-3-node.yaml | 175 ++++++++++++++++++ 1 file changed, 175 insertions(+) create mode 100644 examples/docker-compose/docker-compose-etcd-3-node.yaml diff --git a/examples/docker-compose/docker-compose-etcd-3-node.yaml b/examples/docker-compose/docker-compose-etcd-3-node.yaml new file mode 100644 index 0000000..2152520 --- /dev/null +++ b/examples/docker-compose/docker-compose-etcd-3-node.yaml @@ -0,0 +1,175 @@ +# Iceberg REST catalog with a 3-member etcd cluster (static bootstrap). +# +# First-time / fresh data: +# docker compose -f docker-compose-etcd-3-node.yaml up +# +# Coming from a single-node stack (see docker-compose-etcd-1-instance.yaml): +# - If you do NOT need to keep etcd data: stop the 1-node stack, remove the old +# etcd volume if any, then start this file (3 fresh members). +# - If you MUST keep the existing data: you cannot re-bootstrap a new 3-node +# static cluster from the same data dir. Add members to the *running* 1-node +# cluster with `etcdctl member add` and start the new nodes with +# --initial-cluster-state=existing (see etcd "runtime reconfiguration" docs). +# +# Usage: docker compose -f docker-compose-etcd-3-node.yaml up +services: + etcd-1: + image: quay.io/coreos/etcd:v3.5.12 + restart: unless-stopped + command: + - etcd + - --name=etcd-1 + - --data-dir=/etcd-data + - --listen-client-urls=http://0.0.0.0:2379 + - --advertise-client-urls=http://etcd-1:2379 + - --listen-peer-urls=http://0.0.0.0:2380 + - --initial-advertise-peer-urls=http://etcd-1:2380 + - --initial-cluster=etcd-1=http://etcd-1:2380,etcd-2=http://etcd-2:2380,etcd-3=http://etcd-3:2380 + - --initial-cluster-token=ice-rest-catalog-etcd + - --initial-cluster-state=new + volumes: + - etcd-1-data:/etcd-data + healthcheck: + test: ["CMD", "etcdctl", "--endpoints=http://127.0.0.1:2379", "endpoint", "health"] + interval: 5s + timeout: 3s + retries: 15 + + etcd-2: + image: quay.io/coreos/etcd:v3.5.12 + restart: unless-stopped + command: + - etcd + - --name=etcd-2 + - --data-dir=/etcd-data + - --listen-client-urls=http://0.0.0.0:2379 + - --advertise-client-urls=http://etcd-2:2379 + - --listen-peer-urls=http://0.0.0.0:2380 + - --initial-advertise-peer-urls=http://etcd-2:2380 + - --initial-cluster=etcd-1=http://etcd-1:2380,etcd-2=http://etcd-2:2380,etcd-3=http://etcd-3:2380 + - --initial-cluster-token=ice-rest-catalog-etcd + - --initial-cluster-state=new + volumes: + - etcd-2-data:/etcd-data + healthcheck: + test: ["CMD", "etcdctl", "--endpoints=http://127.0.0.1:2379", "endpoint", "health"] + interval: 5s + timeout: 3s + retries: 15 + + etcd-3: + image: quay.io/coreos/etcd:v3.5.12 + restart: unless-stopped + command: + - etcd + - --name=etcd-3 + - --data-dir=/etcd-data + - --listen-client-urls=http://0.0.0.0:2379 + - --advertise-client-urls=http://etcd-3:2379 + - --listen-peer-urls=http://0.0.0.0:2380 + - --initial-advertise-peer-urls=http://etcd-3:2380 + - --initial-cluster=etcd-1=http://etcd-1:2380,etcd-2=http://etcd-2:2380,etcd-3=http://etcd-3:2380 + - --initial-cluster-token=ice-rest-catalog-etcd + - --initial-cluster-state=new + volumes: + - etcd-3-data:/etcd-data + healthcheck: + test: ["CMD", "etcdctl", "--endpoints=http://127.0.0.1:2379", "endpoint", "health"] + interval: 5s + timeout: 3s + retries: 15 + + minio: + image: minio/minio:RELEASE.2025-03-12T18-04-18Z + restart: unless-stopped + command: [ 'server', '/data', '--address', ':8999', '--console-address', ':9001' ] + environment: + MINIO_ROOT_USER: miniouser + MINIO_ROOT_PASSWORD: miniopassword + ports: + - '8999:8999' # 9000 is taken by clickhouse + - '9001:9001' # web console + volumes: + - minio:/data + minio-init: + image: minio/mc:RELEASE.2025-03-12T17-29-24Z + restart: on-failure # run once and exit + entrypoint: > + /bin/sh -c " + sleep 1; until /usr/bin/mc alias set local http://minio:8999 $$MINIO_ROOT_USER $$MINIO_ROOT_PASSWORD; do echo waiting for minio to start...; sleep 1; done; + /usr/bin/mc mb --ignore-existing local/bucket1; + exit 0; + " + environment: + MINIO_ROOT_USER: miniouser + MINIO_ROOT_PASSWORD: miniopassword + depends_on: + - minio + ice-rest-catalog: + image: altinity/ice-rest-catalog:${ICE_REST_CATALOG_TAG:-debug-with-ice-latest-master@sha256:9f5308309b98d2e0b76346cd29c22557076bfcb0070eb8f32b69b87932b9e37c} + pull_policy: ${ICE_REST_CATALOG_PULL_POLICY:-always} + restart: unless-stopped + ports: + - '5001:5000' # iceberg/http + configs: + - source: ice-rest-catalog-yaml + target: /etc/ice/ice-rest-catalog.yaml + depends_on: + etcd-1: + condition: service_healthy + etcd-2: + condition: service_healthy + etcd-3: + condition: service_healthy + minio-init: + condition: service_completed_successfully + clickhouse: + image: altinity/clickhouse-server:25.8.9.20496.altinityantalya-alpine + restart: unless-stopped + environment: + CLICKHOUSE_SKIP_USER_SETUP: "1" # insecure + ports: + - "8123:8123" # clickhouse/http + - "9000:9000" # clickhouse/native + configs: + - source: clickhouse-init + target: /docker-entrypoint-initdb.d/init-db.sh + volumes: + # for access to clickhouse-logs + - ./data/docker-compose/clickhouse/var/log/clickhouse-server:/var/log/clickhouse-server + # - ./config.xml:/etc/clickhouse-server/conf.d/config.xml + depends_on: + - ice-rest-catalog +configs: + clickhouse-init: + content: | + #!/bin/bash + exec clickhouse client --query $" + SET allow_experimental_database_iceberg = 1; + + DROP DATABASE IF EXISTS ice; + + CREATE DATABASE ice + ENGINE = DataLakeCatalog('http://ice-rest-catalog:5000') + SETTINGS catalog_type = 'rest', + auth_header = 'Authorization: Bearer foo', + storage_endpoint = 'http://minio:8999', + warehouse = 's3://bucket1'; + " + ice-rest-catalog-yaml: + content: | + uri: etcd:http://etcd-1:2379,http://etcd-2:2379,http://etcd-3:2379 + warehouse: s3://bucket1 + s3: + endpoint: http://minio:8999 + pathStyleAccess: true + accessKeyID: miniouser + secretAccessKey: miniopassword + region: minio + bearerTokens: + - value: foo +volumes: + minio: + etcd-1-data: + etcd-2-data: + etcd-3-data: From 82c903f88d388d46f35fea2b1c68f8ce8dfbe704 Mon Sep 17 00:00:00 2001 From: kanthi subramanian Date: Tue, 28 Apr 2026 14:06:47 -0500 Subject: [PATCH 2/2] Added documentation to the etcd-cluster-setup for 3 node docker compose cluster. --- docs/etcd-cluster-setup.md | 27 ++++++++++++------- .../docker-compose-etcd-3-node.yaml | 9 +++++++ 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/docs/etcd-cluster-setup.md b/docs/etcd-cluster-setup.md index 2266ac6..d785132 100644 --- a/docs/etcd-cluster-setup.md +++ b/docs/etcd-cluster-setup.md @@ -10,20 +10,24 @@ This guide walks through setting up ice-rest-catalog backed by a 3-node etcd clu ## 1. Start the etcd Cluster -Use the provided docker-compose file to bring up etcd, MinIO, ice-rest-catalog, and ClickHouse: +Use the provided 3-node docker-compose file to bring up etcd, MinIO, ice-rest-catalog, and ClickHouse: ```bash cd examples/docker-compose -docker compose -f docker-compose-etcd.yaml up -d +docker compose -f docker-compose-etcd-3-node.yaml up -d ``` -This starts a single-node etcd by default. For a 3-node cluster, replace the `etcd` service definition with three separate nodes (etcd1, etcd2, etcd3) each with their own ports mapped to the host: +By default, etcd ports are only exposed on the compose network. To run `etcdctl` from the host +(used in step 4), uncomment the `ports` block under each etcd service in +`docker-compose-etcd-3-node.yaml`. The host ports are: -| Node | Client Port | -|-------|-------------| -| etcd1 | 12379 | -| etcd2 | 12479 | -| etcd3 | 12579 | +| Node | Host Port | Container Port | +|--------|-----------|----------------| +| etcd-1 | 12379 | 2379 | +| etcd-2 | 12479 | 2379 | +| etcd-3 | 12579 | 2379 | + +For a single-node setup, use `docker-compose-etcd.yaml` instead. ## 2. Configure ice-rest-catalog @@ -48,10 +52,13 @@ Start (or restart) ice-rest-catalog so it picks up the new config. ## 3. Insert Data -Use the ice CLI to create a table and insert a Parquet file: +Use the ice CLI to create a table and insert a Parquet file. The example `iris.parquet` lives +under `examples/scratch/` — see [examples/scratch/iris.parquet.txt](../examples/scratch/iris.parquet.txt) +for the source link if you don't already have it locally: ```bash -ice insert flowers.iris file://iris.parquet +cd examples/scratch +ice insert flowers.iris -p file://iris.parquet ``` ## 4. Verify Replication with etcdctl diff --git a/examples/docker-compose/docker-compose-etcd-3-node.yaml b/examples/docker-compose/docker-compose-etcd-3-node.yaml index 2152520..5d2e02a 100644 --- a/examples/docker-compose/docker-compose-etcd-3-node.yaml +++ b/examples/docker-compose/docker-compose-etcd-3-node.yaml @@ -34,6 +34,9 @@ services: interval: 5s timeout: 3s retries: 15 + # Uncomment to run etcdctl from the host against this node: + # ports: + # - "12379:2379" etcd-2: image: quay.io/coreos/etcd:v3.5.12 @@ -56,6 +59,9 @@ services: interval: 5s timeout: 3s retries: 15 + # Uncomment to run etcdctl from the host against this node: + # ports: + # - "12479:2379" etcd-3: image: quay.io/coreos/etcd:v3.5.12 @@ -78,6 +84,9 @@ services: interval: 5s timeout: 3s retries: 15 + # Uncomment to run etcdctl from the host against this node: + # ports: + # - "12579:2379" minio: image: minio/minio:RELEASE.2025-03-12T18-04-18Z