Skip to main content
Skip to main content

DataStore I/O Operations

DataStore supports reading from and writing to various file formats and data sources.

Reading Data

CSV Files

read_csv(filepath_or_buffer, sep=',', header='infer', names=None, 
         usecols=None, dtype=None, nrows=None, skiprows=None,
         compression=None, encoding=None, **kwargs)

Examples:

from chdb import datastore as pd

# Basic CSV read
ds = pd.read_csv("data.csv")

# With options
ds = pd.read_csv(
    "data.csv",
    sep=";",                    # Custom delimiter
    header=0,                   # Header row index
    names=['a', 'b', 'c'],      # Custom column names
    usecols=['a', 'b'],         # Only read specific columns
    dtype={'a': 'Int64'},       # Specify dtypes
    nrows=1000,                 # Read only first 1000 rows
    skiprows=1,                 # Skip first row
    compression='gzip',         # Compressed file
    encoding='utf-8'            # Encoding
)

# From URL
ds = pd.read_csv("https://example.com/data.csv")

Parquet Files

Recommended for large datasets - columnar format with better compression.

read_parquet(path, columns=None, **kwargs)

Examples:

# Basic Parquet read
ds = pd.read_parquet("data.parquet")

# Read specific columns only (efficient - only reads needed data)
ds = pd.read_parquet("data.parquet", columns=['col1', 'col2', 'col3'])

# From S3
ds = pd.read_parquet("s3://bucket/data.parquet")

JSON Files

read_json(path_or_buf, orient=None, lines=False, **kwargs)

Examples:

# Standard JSON
ds = pd.read_json("data.json")

# JSON Lines (newline-delimited)
ds = pd.read_json("data.jsonl", lines=True)

# JSON with specific orientation
ds = pd.read_json("data.json", orient='records')

Excel Files

read_excel(io, sheet_name=0, header=0, names=None, **kwargs)

Examples:

# Read first sheet
ds = pd.read_excel("data.xlsx")

# Read specific sheet
ds = pd.read_excel("data.xlsx", sheet_name="Sheet1")
ds = pd.read_excel("data.xlsx", sheet_name=2)  # Third sheet

# Read multiple sheets (returns dict)
sheets = pd.read_excel("data.xlsx", sheet_name=['Sheet1', 'Sheet2'])

SQL Databases

read_sql(sql, con, **kwargs)

Examples:

# Read from SQL query
ds = pd.read_sql("SELECT * FROM users", connection)
ds = pd.read_sql("SELECT * FROM orders WHERE date > '2024-01-01'", connection)

Other Formats

# Feather (Arrow)
ds = pd.read_feather("data.feather")

# ORC
ds = pd.read_orc("data.orc")

# Pickle
ds = pd.read_pickle("data.pkl")

# Fixed-width formatted
ds = pd.read_fwf("data.txt", widths=[10, 20, 15])

# HTML tables
ds = pd.read_html("https://example.com/table.html")[0]

Writing Data

to_csv

Export to CSV format.

to_csv(path_or_buf=None, sep=',', na_rep='', header=True, 
       index=True, mode='w', compression=None, **kwargs)

Examples:

ds = pd.read_parquet("data.parquet")

# Basic export
ds.to_csv("output.csv")

# With options
ds.to_csv(
    "output.csv",
    sep=";",                    # Custom delimiter
    index=False,                # Don't include index
    header=True,                # Include header
    na_rep='NULL',              # Represent NaN as 'NULL'
    compression='gzip'          # Compress output
)

# To string
csv_string = ds.to_csv()

to_parquet

Export to Parquet format (recommended for large data).

to_parquet(path, engine='pyarrow', compression='snappy', **kwargs)

Examples:

# Basic export
ds.to_parquet("output.parquet")

# With compression options
ds.to_parquet("output.parquet", compression='gzip')
ds.to_parquet("output.parquet", compression='zstd')

# Partitioned output
ds.to_parquet(
    "output/",
    partition_cols=['year', 'month']
)

to_json

Export to JSON format.

to_json(path_or_buf=None, orient='records', lines=False, **kwargs)

Examples:

# Standard JSON (array of records)
ds.to_json("output.json", orient='records')

# JSON Lines (one JSON object per line)
ds.to_json("output.jsonl", lines=True)

# Different orientations
ds.to_json("output.json", orient='split')    # {columns, data, index}
ds.to_json("output.json", orient='records')  # [{col: val}, ...]
ds.to_json("output.json", orient='columns')  # {col: {idx: val}}

# To string
json_string = ds.to_json()

to_excel

Export to Excel format.

to_excel(excel_writer, sheet_name='Sheet1', index=True, **kwargs)

Examples:

# Single sheet
ds.to_excel("output.xlsx")
ds.to_excel("output.xlsx", sheet_name="Data", index=False)

# Multiple sheets
with pd.ExcelWriter("output.xlsx") as writer:
    ds1.to_excel(writer, sheet_name="Sales")
    ds2.to_excel(writer, sheet_name="Inventory")

to_sql

Export to SQL database or generate SQL string.

to_sql(name=None, con=None, schema=None, if_exists='fail', **kwargs)

Examples:

# Generate SQL query (no execution)
sql = ds.to_sql()
print(sql)
# SELECT ...
# FROM ...
# WHERE ...

# Write to database
ds.to_sql("table_name", connection, if_exists='replace')

Other Export Methods

# To pandas DataFrame
df = ds.to_df()
df = ds.to_pandas()

# To Arrow Table
table = ds.to_arrow()

# To NumPy array
arr = ds.to_numpy()

# To dictionary
d = ds.to_dict()
d = ds.to_dict(orient='records')  # List of dicts
d = ds.to_dict(orient='list')     # Dict of lists

# To records (list of tuples)
records = ds.to_records()

# To string
s = ds.to_string()
s = ds.to_string(max_rows=100)

# To Markdown
md = ds.to_markdown()

# To HTML
html = ds.to_html()

# To LaTeX
latex = ds.to_latex()

# To clipboard
ds.to_clipboard()

# To pickle
ds.to_pickle("output.pkl")

# To feather
ds.to_feather("output.feather")

File Format Comparison

FormatRead SpeedWrite SpeedFile SizeSchemaBest For
ParquetFastFastSmallYesLarge datasets, analytics
CSVMediumFastLargeNoCompatibility, simple data
JSONSlowMediumLargePartialAPIs, nested data
ExcelSlowSlowMediumPartialSharing with non-tech users
FeatherVery FastVery FastMediumYesInter-process, pandas

Recommendations

  1. For analytics workloads: Use Parquet

    • Columnar format allows reading only needed columns
    • Excellent compression
    • Preserves data types
  2. For data exchange: Use CSV or JSON

    • Universal compatibility
    • Human-readable
  3. For pandas interop: Use Feather or Arrow

    • Fastest serialization
    • Type preservation

Compression Support

Reading Compressed Files

# Auto-detect from extension
ds = pd.read_csv("data.csv.gz")
ds = pd.read_csv("data.csv.bz2")
ds = pd.read_csv("data.csv.xz")
ds = pd.read_csv("data.csv.zst")

# Explicit compression
ds = pd.read_csv("data.csv", compression='gzip')

Writing Compressed Files

# CSV with compression
ds.to_csv("output.csv.gz", compression='gzip')
ds.to_csv("output.csv.bz2", compression='bz2')

# Parquet (always compressed)
ds.to_parquet("output.parquet", compression='snappy')  # Default
ds.to_parquet("output.parquet", compression='gzip')
ds.to_parquet("output.parquet", compression='zstd')    # Best ratio
ds.to_parquet("output.parquet", compression='lz4')     # Fastest

Compression Options

CompressionSpeedRatioUse Case
snappyVery FastLowDefault for Parquet
lz4Very FastLowSpeed priority
gzipMediumHighCompatibility
zstdFastVery HighBest balance
bz2SlowVery HighMaximum compression

Streaming I/O

For very large files that don't fit in memory:

Chunked Reading

# Read in chunks
for chunk in pd.read_csv("large.csv", chunksize=100000):
    # Process each chunk
    process(chunk)

# Using iterator
reader = pd.read_csv("large.csv", iterator=True)
chunk = reader.get_chunk(10000)

Using ClickHouse Streaming

from chdb.datastore import DataStore

# Stream from file without loading all into memory
ds = DataStore.from_file("huge.parquet")

# Operations are lazy - only computes what's needed
result = ds.filter(ds['amount'] > 1000).head(100)

Remote Data Sources

HTTP/HTTPS

# Read from URL
ds = pd.read_csv("https://example.com/data.csv")
ds = pd.read_parquet("https://example.com/data.parquet")

S3

from chdb.datastore import DataStore

# Anonymous access
ds = DataStore.uri("s3://bucket/data.parquet?nosign=true")

# With credentials
ds = DataStore.from_s3(
    "s3://bucket/data.parquet",
    access_key_id="KEY",
    secret_access_key="SECRET"
)

GCS, Azure, HDFS

See Factory Methods for cloud storage options.


Best Practices

1. Use Parquet for Large Files

# Convert CSV to Parquet for better performance
ds = pd.read_csv("large.csv")
ds.to_parquet("large.parquet")

# Future reads are much faster
ds = pd.read_parquet("large.parquet")

2. Select Only Needed Columns

# Efficient - only reads col1 and col2
ds = pd.read_parquet("data.parquet", columns=['col1', 'col2'])

# Inefficient - reads all columns then filters
ds = pd.read_parquet("data.parquet")[['col1', 'col2']]

3. Use Compression

# Smaller file size, usually faster due to less I/O
ds.to_parquet("output.parquet", compression='zstd')

4. Batch Writes

# Write once, not in a loop
result = process_all_data(ds)
result.to_parquet("output.parquet")

# NOT this (inefficient)
for chunk in chunks:
    chunk.to_parquet(f"output_{i}.parquet")