Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: build merkl airdrop csv based on user pool shares #1575

Draft
wants to merge 3 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
99 changes: 99 additions & 0 deletions tools/python/gen_morpho_airdrop.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
import json

import numpy as np
import pandas as pd

from bal_tools import Subgraph


SUBGRAPH = Subgraph()
MORPHO = "0x58D97B57BB95320F9a05dC918Aef65434969c2B2"


def get_user_shares(pool, block):
query = """query PoolShares($where: PoolShare_filter, $block: Block_height) {
poolShares(where: $where, block: $block) {
user {
id
}
balance
}
}"""
params = {
"where": {
"balance_gt": 0.001,
"pool": pool,
},
"block": {"number": block},
}
raw = SUBGRAPH.fetch_graphql_data(
"subgraphs-v3",
query,
params,
url="https://api.studio.thegraph.com/query/75376/balancer-v3/version/latest",
)
return dict([(x["user"]["id"], x["balance"]) for x in raw["poolShares"]])


def get_block_from_timestamp(ts):
query = """query GetBlockFromTimestamp($where: Block_filter) {
blocks(orderBy: "number", orderDirection: "desc", where: $where) {
number
timestamp
}
}"""
params = {"where": {"timestamp_lte": ts}}
raw = SUBGRAPH.fetch_graphql_data(
"blocks",
query,
params,
url="https://api.studio.thegraph.com/query/48427/ethereum-blocks/version/latest",
)
return int(raw["blocks"][0]["number"])


def build_snapshot_df(
pool, # pool address
end, # timestamp of the last snapshot
n=7, # amount of snapshots
step_size=60 * 60 * 24, # amount of seconds between snapshots
):
shares = {}
for _ in range(n):
block = get_block_from_timestamp(end)
shares[block] = get_user_shares(pool=pool, block=block)
end -= step_size
return pd.DataFrame(shares, dtype=float).fillna(0)


def consolidate_shares(df):
consolidated = pd.DataFrame()
for block in df.columns:
# calculate the percentage of the pool each user owns
consolidated[block] = df[block] / df[block].sum()
# weigh it by the total pool size of that block
consolidated[block] *= df.sum()[block]
# sum the weighted percentages per user
consolidated["total"] = consolidated.sum(axis=1)
# divide the weighted percentages by the sum of all weights
consolidated["total"] = consolidated["total"] / df.sum().sum()
return consolidated
Comment on lines +69 to +80
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@Xeonus or @jalbrekt85 it would be good if one of you has time to draft review this function. it is at the core of this whole feature/pr

if we agree on this piece of logic, then next step for me will be to add multi pool support

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Mathematically, looks correct to me.
I see 2 edge cases:

  1. might end up with division by zero if df[block].sum is 0
  2. accumulate of floating point precision issues for small balances but that is a general limiation of data fetching

I would rewrite it to something like this so you don't do the same block sum operation

def consolidate_shares(df):
    if df.empty:
        raise ValueError("Empty dataframe provided")
    
    # Pre-calculate sums to avoid redundant computation
    block_sums = df.sum()
    total_sum = block_sums.sum()
    
    if total_sum == 0:
        raise ValueError("No shares found in any block")
    
    consolidated = pd.DataFrame()
    for block in df.columns:
        block_sum = block_sums[block]
        if block_sum == 0:
            continue
        
        # Calculate weighted ownership for this block
        consolidated[block] = (df[block] / block_sum) * block_sum
    
    # Calculate final weighted average
    consolidated["total"] = consolidated.sum(axis=1) / total_sum
    return consolidated



def build_airdrop(reward_token, reward_total_wei, df):
# https://docs.merkl.xyz/merkl-mechanisms/types-of-campaign/airdrop
df["wei"] = df["total"] * reward_total_wei
df["wei"] = df["wei"].apply(np.floor).astype(int).astype(str)
return {"rewardToken": reward_token, "rewards": df[["wei"]].to_dict(orient="index")}


if __name__ == "__main__":
# get bpt balances for a pool at different timestamps
df = build_snapshot_df(
pool="0x89bb794097234e5e930446c0cec0ea66b35d7570", end=1734393600
)
# consolidate user pool shares
df = consolidate_shares(df)
# build airdrop object and dump to json file
airdrop = build_airdrop(reward_token=MORPHO, reward_total_wei=1e18, df=df)
json.dump(airdrop, open("airdrop.json", "w"), indent=2)
2 changes: 1 addition & 1 deletion tools/python/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ pandas
tabulate
requests
web3
git+https://github.com/BalancerMaxis/bal_addresses@main
git+https://github.com/BalancerMaxis/bal_addresses@dev/use-bal_tools-dev-branch
dune-client
pytest
dataclasses-json
Expand Down
Loading