diff --git a/src/fetch/buffer_accounting.py b/src/fetch/buffer_accounting.py new file mode 100644 index 00000000..772cb113 --- /dev/null +++ b/src/fetch/buffer_accounting.py @@ -0,0 +1,39 @@ +"""Functionality for buffer accounting.""" + +from pandas import DataFrame + +BATCH_DATA_COLUMNS = ["solver", "network_fee_eth"] +SLIPPAGE_COLUMNS = [ + "solver", + "eth_slippage_wei", +] + +BUFFER_ACCOUNTING_COLUMNS = [ + "solver", + "network_fee_eth", + "slippage_eth", +] + + +def compute_buffer_accounting( + batch_data: DataFrame, slippage_data: DataFrame +) -> DataFrame: + """Compute buffer accounting per solver""" + + # validate batch rewards and quote rewards columns + assert set(BATCH_DATA_COLUMNS).issubset(set(batch_data.columns)) + assert set(SLIPPAGE_COLUMNS).issubset(set(slippage_data.columns)) + + buffer_accounting = batch_data[BATCH_DATA_COLUMNS].merge( + slippage_data[SLIPPAGE_COLUMNS], how="outer", on="solver", validate="one_to_one" + ) + buffer_accounting = buffer_accounting.rename( + columns={"eth_slippage_wei": "slippage_eth"} + ) + + # change all types to object to use native python types + buffer_accounting = buffer_accounting.astype(object) + + assert set(buffer_accounting.columns) == set(BUFFER_ACCOUNTING_COLUMNS) + + return buffer_accounting diff --git a/src/fetch/partner_fees.py b/src/fetch/partner_fees.py new file mode 100644 index 00000000..11954c9d --- /dev/null +++ b/src/fetch/partner_fees.py @@ -0,0 +1,62 @@ +"""Functionality for partner fees.""" + +from collections import defaultdict + +import numpy as np +import pandas as pd +from pandas import DataFrame + +from src.config import ProtocolFeeConfig + +BATCH_DATA_COLUMNS = ["partner_list", "partner_fee_eth"] + +PARTNER_FEES_COLUMNS = ["partner", "partner_fee_eth", "partner_fee_tax"] + + +def compute_partner_fees(batch_data: DataFrame, config: ProtocolFeeConfig) -> DataFrame: + """Compute partner fees per integrator""" + + # validate batch rewards and quote rewards columns + assert set(BATCH_DATA_COLUMNS).issubset(set(batch_data.columns)) + + partner_fee_lists = batch_data[BATCH_DATA_COLUMNS] + + partner_fees = compute_partner_fees_per_partner(partner_fee_lists, config) + + return partner_fees + + +def compute_partner_fees_per_partner( + partner_fee_lists: DataFrame, config: ProtocolFeeConfig +) -> DataFrame: + """Aggregate fees from different solvers""" + + partner_fees_dict: defaultdict[str, int] = defaultdict(int) + for _, row in partner_fee_lists.iterrows(): + if row["partner_list"] is None: + continue + + # We assume the two lists used below, i.e., + # partner_list and partner_fee_eth, + # are "aligned". + + for partner, partner_fee in zip(row["partner_list"], row["partner_fee_eth"]): + partner_fees_dict[partner] += int(partner_fee) + + partner_fees_df = pd.DataFrame( + list(partner_fees_dict.items()), + columns=["partner", "partner_fee_eth"], + ) + + partner_fees_df["partner_fee_tax"] = np.where( + partner_fees_df["partner"] == config.reduced_cut_address, + config.partner_fee_reduced_cut, + config.partner_fee_cut, + ) + + # change all types to object to use native python types + partner_fees_df = partner_fees_df.astype(object) + + assert set(partner_fees_df.columns) == set(PARTNER_FEES_COLUMNS) + + return partner_fees_df diff --git a/src/fetch/payouts.py b/src/fetch/payouts.py index 4c44d01f..69551931 100644 --- a/src/fetch/payouts.py +++ b/src/fetch/payouts.py @@ -2,13 +2,11 @@ from __future__ import annotations -import math from dataclasses import dataclass -from datetime import timedelta +from datetime import datetime, timedelta from fractions import Fraction -from typing import Callable +from functools import reduce -import numpy as np import pandas from dune_client.types import Address from pandas import DataFrame, Series @@ -16,6 +14,14 @@ from src.config import AccountingConfig from src.fetch.dune import DuneFetcher from src.fetch.prices import exchange_rate_atoms +from src.fetch.solver_info import SOLVER_INFO_COLUMNS, compute_solver_info +from src.fetch.rewards import REWARDS_COLUMNS, compute_rewards +from src.fetch.protocol_fees import PROTOCOL_FEES_COLUMNS, compute_protocol_fees +from src.fetch.partner_fees import compute_partner_fees +from src.fetch.buffer_accounting import ( + BUFFER_ACCOUNTING_COLUMNS, + compute_buffer_accounting, +) from src.logger import log_saver, set_log from src.models.accounting_period import AccountingPeriod from src.models.overdraft import Overdraft @@ -26,34 +32,16 @@ log = set_log(__name__) -PAYMENT_COLUMNS = { +SOLVER_PAYOUTS_COLUMNS = [ "solver", "primary_reward_eth", "primary_reward_cow", "quote_reward_cow", "protocol_fee_eth", "network_fee_eth", -} -SLIPPAGE_COLUMNS = { - "solver", - "solver_name", - "eth_slippage_wei", -} -REWARD_TARGET_COLUMNS = {"solver", "reward_target", "pool_address"} -SERVICE_FEE_COLUMNS = {"solver", "service_fee"} -ADDITIONAL_PAYMENT_COLUMNS = {"buffer_accounting_target", "reward_token_address"} - -COMPLETE_COLUMNS = ( - PAYMENT_COLUMNS.union(SLIPPAGE_COLUMNS) - .union(REWARD_TARGET_COLUMNS) - .union(ADDITIONAL_PAYMENT_COLUMNS) -) -NUMERICAL_COLUMNS = [ - "primary_reward_eth", - "primary_reward_cow", - "quote_reward_cow", - "protocol_fee_eth", + "slippage_eth", ] +PARTNER_PAYOUTS_COLUMNS = ["partner", "partner_fee_eth", "partner_fee_tax"] @dataclass @@ -100,11 +88,7 @@ def __init__( # pylint: disable=too-many-arguments @classmethod def from_series(cls, frame: Series) -> RewardAndPenaltyDatum: """Constructor from row in Dataframe""" - slippage = ( - int(frame["eth_slippage_wei"]) - if not math.isnan(frame["eth_slippage_wei"]) - else 0 - ) + slippage = int(frame["slippage_eth"]) + int(frame["network_fee_eth"]) solver = frame["solver"] reward_target = frame["reward_target"] if pandas.isna(reward_target): @@ -261,63 +245,20 @@ def as_payouts(self) -> list[Transfer]: return result -@dataclass -class TokenConversion: - """ - Data Structure containing token conversion methods. - """ - - eth_to_token: Callable - - -def extend_payment_df( - pdf: DataFrame, converter: TokenConversion, config: AccountingConfig -) -> DataFrame: - """ - Extending the basic columns returned by SQL Query with some after-math: - - reward_eth as difference of payment and execution_cost - - reward_cow as conversion from ETH to cow. - """ - # Note that this can be negative! - pdf["primary_reward_cow"] = pdf["primary_reward_eth"].apply(converter.eth_to_token) - - # Pandas has poor support for large integers, must cast the constant to float here, - # otherwise the dtype would be inferred as int64 (which overflows). - - reward_per_quote = float( - min( - config.reward_config.quote_reward_cow, - converter.eth_to_token(config.reward_config.quote_reward_cap_native), - ) - ) - - log.info(f"A reward of {reward_per_quote / 10**18:.4f} COW per quote is used.") - pdf["quote_reward_cow"] = reward_per_quote * pdf["num_quotes"] - - for number_col in NUMERICAL_COLUMNS: - pdf[number_col] = pandas.to_numeric(pdf[number_col]) - - return pdf - - -def prepare_transfers( # pylint: disable=too-many-arguments - payout_df: DataFrame, +def prepare_transfers( # pylint: disable=too-many-locals + solver_payouts: DataFrame, + partner_payouts: DataFrame, period: AccountingPeriod, - final_protocol_fee_wei: int, - partner_fee_tax_wei: int, - partner_fees_wei: dict[str, int], config: AccountingConfig, ) -> PeriodPayouts: - """ - Manipulates the payout DataFrame to split into ETH and COW. - Specifically, We deduct total_rewards by total_execution_cost (both initially in ETH) - keep the execution cost in ETH and convert the difference to COW. - """ - assert COMPLETE_COLUMNS.issubset(set(payout_df.columns)) + """Create transfers from payout data.""" + + assert set(SOLVER_PAYOUTS_COLUMNS).issubset(set(solver_payouts.columns)) + assert set(PARTNER_PAYOUTS_COLUMNS).issubset(set(partner_payouts.columns)) overdrafts: list[Overdraft] = [] transfers: list[Transfer] = [] - for _, payment in payout_df.iterrows(): + for _, payment in solver_payouts.iterrows(): payout_datum = RewardAndPenaltyDatum.from_series(payment) if payout_datum.is_overdraft(): overdraft = Overdraft( @@ -330,61 +271,87 @@ def prepare_transfers( # pylint: disable=too-many-arguments overdrafts.append(overdraft) transfers += payout_datum.as_payouts() - if final_protocol_fee_wei > 0: + total_protocol_fee = int(solver_payouts["protocol_fee_eth"].sum()) + total_partner_fee = int(partner_payouts["partner_fee_eth"].sum()) + total_partner_fee_taxed = sum( + int(row["partner_fee_eth"] * (1 - row["partner_fee_tax"])) + for _, row in partner_payouts.iterrows() + ) + total_partner_fee_tax = total_partner_fee - total_partner_fee_taxed + + net_protocol_fee = total_protocol_fee - total_partner_fee + + if net_protocol_fee > 0: transfers.append( Transfer( token=None, recipient=config.protocol_fee_config.protocol_fee_safe, - amount_wei=final_protocol_fee_wei, + amount_wei=net_protocol_fee, ) ) - if partner_fee_tax_wei > 0: + if total_partner_fee_tax > 0: transfers.append( Transfer( token=None, recipient=config.protocol_fee_config.protocol_fee_safe, - amount_wei=partner_fee_tax_wei, + amount_wei=total_partner_fee_tax, ) ) - for address in partner_fees_wei: - amount_wei = partner_fees_wei[address] - assert amount_wei >= 0, f"Can't construct negative transfer of {amount_wei}" - if amount_wei > 0: + for _, row in partner_payouts.iterrows(): + partner = row["partner"] + partner_fee = int(row["partner_fee_eth"] * (1 - row["partner_fee_tax"])) + assert partner_fee >= 0, f"Can't construct negative transfer of {partner_fee}" + if partner_fee > 0: transfers.append( Transfer( token=None, - recipient=Address(address), - amount_wei=amount_wei, + recipient=Address(partner), + amount_wei=partner_fee, ) ) return PeriodPayouts(overdrafts, transfers) +def fetch_exchange_rates( + period_end: datetime, config: AccountingConfig +) -> tuple[Fraction, Fraction]: + """Fetch exchange rate for converting the native token to COW.""" + reward_token = config.reward_config.reward_token_address + native_token = Address(config.payment_config.wrapped_native_token_address) + wrapped_eth = config.payment_config.wrapped_eth_address + price_day = period_end - timedelta(days=1) + exchange_rate_native_to_cow = exchange_rate_atoms( + native_token, reward_token, price_day + ) + exchange_rate_native_to_eth = exchange_rate_atoms( + native_token, wrapped_eth, price_day + ) + return exchange_rate_native_to_cow, exchange_rate_native_to_eth + + def validate_df_columns( - payment_df: DataFrame, - slippage_df: DataFrame, - reward_target_df: DataFrame, - service_fee_df: DataFrame, + solver_info: DataFrame, + rewards: DataFrame, + protocol_fees: DataFrame, + buffer_accounting: DataFrame, ) -> None: - """ + """Validate data frame columns. Since we are working with dataframes rather than concrete objects, we validate that the expected columns/fields are available within our datasets. - While it is ok for the input data to contain more columns, - this method merely validates that the expected ones are there. """ - assert PAYMENT_COLUMNS.issubset( - set(payment_df.columns) - ), f"Payment validation failed with columns: {set(payment_df.columns)}" - assert SLIPPAGE_COLUMNS.issubset( - set(slippage_df.columns) - ), f"Slippage validation Failed with columns: {set(slippage_df.columns)}" - assert REWARD_TARGET_COLUMNS.issubset( - set(reward_target_df.columns) - ), f"Reward Target validation Failed with columns: {set(reward_target_df.columns)}" - assert SERVICE_FEE_COLUMNS.issubset( - set(service_fee_df.columns) - ), f"Service Fee validation Failed with columns: {set(service_fee_df.columns)}" + assert set(solver_info.columns) == set( + SOLVER_INFO_COLUMNS + ), f"Solver info validation failed with columns: {set(solver_info.columns)}" + assert set(rewards.columns) == set( + REWARDS_COLUMNS + ), f"Rewards validation failed with columns: {set(rewards.columns)}" + assert set(protocol_fees.columns) == set( + PROTOCOL_FEES_COLUMNS + ), f"Protocol fee validation failed with columns: {set(protocol_fees.columns)}" + assert set(buffer_accounting.columns) == set( + BUFFER_ACCOUNTING_COLUMNS + ), f"Buffer accounting validation failed with columns: {set(buffer_accounting.columns)}" def normalize_address_field(frame: DataFrame, column_name: str) -> None: @@ -392,95 +359,81 @@ def normalize_address_field(frame: DataFrame, column_name: str) -> None: frame[column_name] = frame[column_name].str.lower() -def construct_payout_dataframe( - payment_df: DataFrame, - slippage_df: DataFrame, - reward_target_df: DataFrame, - service_fee_df: DataFrame, - config: AccountingConfig, +def compute_solver_payouts( + solver_info: DataFrame, + rewards: DataFrame, + protocol_fees: DataFrame, + buffer_accounting: DataFrame, ) -> DataFrame: - """ - Method responsible for joining datasets related to payouts. - Namely, reward targets and slippage (coming from Dune) - with reward and execution data coming from orderbook. - """ - # 1. Assert existence of required columns. - validate_df_columns(payment_df, slippage_df, reward_target_df, service_fee_df) + """Combines solver accounting data into one payment dataframe.""" + # 1. Validate data + validate_df_columns(solver_info, rewards, protocol_fees, buffer_accounting) # 2. Normalize Join Column (and Ethereum Address Field) join_column = "solver" - normalize_address_field(payment_df, join_column) - normalize_address_field(slippage_df, join_column) - normalize_address_field(reward_target_df, join_column) - normalize_address_field(service_fee_df, join_column) - - # 3. Merge the three dataframes (joining on solver) - merged_df = ( - payment_df.merge(slippage_df, on=join_column, how="left") - .merge(reward_target_df, on=join_column, how="left") - .merge(service_fee_df, on=join_column, how="left") - ) - - # 4. Add slippage from fees to slippage - merged_df["eth_slippage_wei"] = ( - merged_df["eth_slippage_wei"].fillna(0) + merged_df["network_fee_eth"] - ) + normalize_address_field(solver_info, join_column) + normalize_address_field(rewards, join_column) + normalize_address_field(protocol_fees, join_column) + normalize_address_field(buffer_accounting, join_column) + + # 3. Merge data + solver_payouts = reduce( + lambda left, right: left.merge( + right, + how="outer", + on="solver", + validate="one_to_one", + sort=True, + ), + [rewards, protocol_fees, buffer_accounting], + ).merge(solver_info, how="left", on="solver") - # 5. Compute buffer accounting target - merged_df["buffer_accounting_target"] = np.where( - merged_df["pool_address"] != config.reward_config.cow_bonding_pool.address, - merged_df["solver"], - merged_df["reward_target"], + # 4. Set default values + solver_payouts["primary_reward_eth"] = solver_payouts["primary_reward_eth"].fillna( + 0 ) - - # 6. Add reward token address - merged_df["reward_token_address"] = ( - config.reward_config.reward_token_address.address + solver_payouts["slippage_eth"] = solver_payouts["slippage_eth"].fillna(0) + solver_payouts["protocol_fee_eth"] = solver_payouts["protocol_fee_eth"].fillna(0) + solver_payouts["network_fee_eth"] = solver_payouts["network_fee_eth"].fillna(0) + solver_payouts["service_fee"] = solver_payouts["service_fee"].fillna( + Fraction(0, 1) # type: ignore ) - merged_df["service_fee"] = merged_df["service_fee"].fillna(Fraction(0, 1)) # type: ignore - - return merged_df + return solver_payouts -def construct_partner_fee_payments( - partner_fees_df: DataFrame, config: AccountingConfig -) -> tuple[dict[str, int], int]: - """Compute actual partner fee payments taking partner fee tax into account - The result is a tuple. The first entry is a dictionary that contains the destination address of - a partner as a key, and the value is the amount in wei to be transferred to that address, stored - as an int. The second entry is the total amount of partner fees charged. - """ - - partner_fees_wei: dict[str, int] = {} - for _, row in partner_fees_df.iterrows(): - if row["partner_list"] is None: - continue - - # We assume the two lists used below, i.e., - # partner_list and partner_fee_eth, - # are "aligned". - - for i in range(len(row["partner_list"])): - address = row["partner_list"][i] - if address in partner_fees_wei: - partner_fees_wei[address] += int(row["partner_fee_eth"][i]) - else: - partner_fees_wei[address] = int(row["partner_fee_eth"][i]) - total_partner_fee_wei_untaxed = 0 - total_partner_fee_wei_taxed = 0 - for address, value in partner_fees_wei.items(): - total_partner_fee_wei_untaxed += value - if address == config.protocol_fee_config.reduced_cut_address: - reduction_factor = 1 - config.protocol_fee_config.partner_fee_reduced_cut - partner_fees_wei[address] = int(reduction_factor * value) - total_partner_fee_wei_taxed += int(reduction_factor * value) - else: - reduction_factor = 1 - config.protocol_fee_config.partner_fee_cut - partner_fees_wei[address] = int(reduction_factor * value) - total_partner_fee_wei_taxed += int(reduction_factor * value) - - return partner_fees_wei, total_partner_fee_wei_untaxed +def summarize_payments( + solver_payouts: DataFrame, + partner_payouts: DataFrame, + exchange_rate_native_to_cow: Fraction, + exchange_rate_native_to_eth: Fraction, +) -> None: + """Summarize payments.""" + performance_reward = solver_payouts["primary_reward_cow"].sum() + quote_reward = solver_payouts["quote_reward_cow"].sum() + protocol_fee = solver_payouts["protocol_fee_eth"].sum() + service_fee = sum( + solver_payouts["service_fee"] + * (solver_payouts["primary_reward_cow"] + solver_payouts["quote_reward_cow"]) + ) + partner_fee = partner_payouts["partner_fee_eth"].sum() + partner_fee_taxed = sum( + row["partner_fee_eth"] * (1 - row["partner_fee_tax"]) + for _, row in partner_payouts.iterrows() + ) + partner_fee_tax = partner_fee - partner_fee_taxed + + print( + "Payment breakdown:\n" + f"Performance Reward (before fee): {performance_reward / 10 ** 18:.4f}\n" + f"Quote Reward (before fee): {quote_reward / 10 ** 18:.4f}\n" + f"COW DAO Service Fees: {service_fee / 10 ** 18:.4f}\n", + f"Protocol Fees (before partner fees): {protocol_fee / 10 ** 18:.4f}\n" + f"Partner Fees (before tax): {partner_fee / 10 ** 18:.4f}\n" + f"Partner Fees Tax: {partner_fee_tax / 10 ** 18:.4f}\n\n" + f"Exchange rate native token to COW: {exchange_rate_native_to_cow:.4f} COW/native token\n" + f"Exchange rate native token to ETH: {exchange_rate_native_to_eth:.4f} ETH/native token\n", + ) def construct_payouts( @@ -492,29 +445,16 @@ def construct_payouts( """Workflow of solver reward payout logic post-CIP27""" # pylint: disable-msg=too-many-locals + # fetch data + # TODO: move data fetching into respective files for quote_rewards_df = orderbook.get_quote_rewards(dune.start_block, dune.end_block) - batch_rewards_df = orderbook.get_solver_rewards( + batch_data = orderbook.get_solver_rewards( dune.start_block, dune.end_block, config.reward_config.batch_reward_cap_upper, config.reward_config.batch_reward_cap_lower, ) - partner_fees_df = batch_rewards_df[["partner_list", "partner_fee_eth"]] - batch_rewards_df = batch_rewards_df.drop( - ["partner_list", "partner_fee_eth"], axis=1 - ) - - assert batch_rewards_df["solver"].is_unique, "solver not unique in batch rewards" - assert quote_rewards_df["solver"].is_unique, "solver not unique in quote rewards" - merged_df = pandas.merge( - quote_rewards_df, batch_rewards_df, on="solver", how="outer" - ).fillna(0) - service_fee_df = pandas.DataFrame(dune.get_service_fee_status()) - service_fee_df["service_fee"] = [ - service_fee_flag * config.reward_config.service_fee_factor - for service_fee_flag in service_fee_df["service_fee"] - ] vouches = dune.get_vouches() if vouches: @@ -523,90 +463,53 @@ def construct_payouts( reward_target_df = DataFrame( columns=["solver", "solver_name", "reward_target", "pool_address"] ) - # construct slippage df - if ignore_slippage_flag or (not config.buffer_accounting_config.include_slippage): - slippage_df_temp = pandas.merge( - merged_df[["solver"]], - reward_target_df[["solver", "solver_name"]], - on="solver", - how="inner", - ) - slippage_df = slippage_df_temp.assign( - eth_slippage_wei=[0] * slippage_df_temp.shape[0] - ) - else: + # fetch slippage only if configured to do so + # otherwise set to an empty dataframe + if config.buffer_accounting_config.include_slippage and not ignore_slippage_flag: slippage_df = pandas.DataFrame(dune.get_period_slippage()) # TODO - After CIP-20 phased in, adapt query to return `solver` like all the others slippage_df = slippage_df.rename(columns={"solver_address": "solver"}) + else: + slippage_df = DataFrame(columns=["solver", "eth_slippage_wei"]) - reward_token = config.reward_config.reward_token_address - native_token = Address(config.payment_config.wrapped_native_token_address) - wrapped_eth = config.payment_config.wrapped_eth_address - price_day = dune.period.end - timedelta(days=1) - exchange_rate_native_to_cow = exchange_rate_atoms( - native_token, reward_token, price_day + # fetch conversion price + exchange_rate_native_to_cow, exchange_rate_native_to_eth = fetch_exchange_rates( + dune.period.end, config ) - exchange_rate_native_to_eth = exchange_rate_atoms( - native_token, wrapped_eth, price_day + + # compute individual components of payments + solver_info = compute_solver_info( + reward_target_df, + service_fee_df, + config, ) - converter = TokenConversion( - eth_to_token=lambda t: exchange_rate_native_to_cow * t, + rewards = compute_rewards( + batch_data, + quote_rewards_df, + exchange_rate_native_to_cow, + config.reward_config, ) - - complete_payout_df = construct_payout_dataframe( - # Fetch and extend auction data from orderbook. - payment_df=extend_payment_df( - pdf=merged_df, - # provide token conversion functions (ETH <--> COW) - converter=converter, - config=config, - ), - # Dune: Fetch Solver Slippage & Reward Targets - slippage_df=slippage_df, - reward_target_df=reward_target_df, - service_fee_df=service_fee_df, - config=config, + protocol_fees = compute_protocol_fees( + batch_data, ) - # Sort by solver before breaking this data frame into Transfer objects. - complete_payout_df = complete_payout_df.sort_values("solver") + buffer_accounting = compute_buffer_accounting(batch_data, slippage_df) - # compute partner fees - partner_fees_wei, total_partner_fee_wei_untaxed = construct_partner_fee_payments( - partner_fees_df, config + # combine into solver payouts and partner payouts + solver_payouts = compute_solver_payouts( + solver_info, rewards, protocol_fees, buffer_accounting ) - raw_protocol_fee_wei = int(complete_payout_df.protocol_fee_eth.sum()) - final_protocol_fee_wei = raw_protocol_fee_wei - total_partner_fee_wei_untaxed - total_partner_fee_wei_taxed = sum(partner_fees_wei.values()) - partner_fee_tax_wei = total_partner_fee_wei_untaxed - total_partner_fee_wei_taxed + partner_payouts = compute_partner_fees(batch_data, config.protocol_fee_config) - performance_reward = complete_payout_df["primary_reward_cow"].sum() - quote_reward = complete_payout_df["quote_reward_cow"].sum() - - service_fee = sum( - RewardAndPenaltyDatum.from_series(payment).total_service_fee() - for _, payment in complete_payout_df.iterrows() + summarize_payments( + solver_payouts, + partner_payouts, + exchange_rate_native_to_cow, + exchange_rate_native_to_eth, ) - log_saver.print( - "Payment breakdown (ignoring service fees):\n" - f"Performance Reward: {performance_reward / 10 ** 18:.4f}\n" - f"Quote Reward: {quote_reward / 10 ** 18:.4f}\n" - f"Protocol Fees: {final_protocol_fee_wei / 10 ** 18:.4f}\n" - f"Partner Fees Tax: {partner_fee_tax_wei / 10 ** 18:.4f}\n" - f"Partner Fees: {total_partner_fee_wei_taxed / 10 ** 18:.4f}\n" - f"COW DAO Service Fees: {service_fee / 10 ** 18:.4f}\n\n" - f"Exchange rate native token to COW: {exchange_rate_native_to_cow:.4f} COW/native token\n" - f"Exchange rate native token to ETH: {exchange_rate_native_to_eth:.4f} ETH/native token\n", - category=Category.TOTALS, - ) - payouts = prepare_transfers( - complete_payout_df, - dune.period, - final_protocol_fee_wei, - partner_fee_tax_wei, - partner_fees_wei, - config, - ) + # create transfers and overdrafts + payouts = prepare_transfers(solver_payouts, partner_payouts, dune.period, config) + for overdraft in payouts.overdrafts: log_saver.print(str(overdraft), Category.OVERDRAFT) return payouts.transfers diff --git a/src/fetch/protocol_fees.py b/src/fetch/protocol_fees.py new file mode 100644 index 00000000..2c3f3422 --- /dev/null +++ b/src/fetch/protocol_fees.py @@ -0,0 +1,25 @@ +"""Functionality for protocol fees.""" + +from pandas import DataFrame + +BATCH_DATA_COLUMNS = ["solver", "protocol_fee_eth"] + +PROTOCOL_FEES_COLUMNS = BATCH_DATA_COLUMNS + + +def compute_protocol_fees( + batch_data: DataFrame, +) -> DataFrame: + """Compute protocol fees per solver.""" + + # validate batch rewards and quote rewards columns + assert set(BATCH_DATA_COLUMNS).issubset(set(batch_data.columns)) + + protocol_fees = batch_data[BATCH_DATA_COLUMNS].copy() + + # change all types to object to use native python types + protocol_fees = protocol_fees.astype(object) + + assert set(protocol_fees.columns) == set(PROTOCOL_FEES_COLUMNS) + + return protocol_fees diff --git a/src/fetch/rewards.py b/src/fetch/rewards.py new file mode 100644 index 00000000..84a53af8 --- /dev/null +++ b/src/fetch/rewards.py @@ -0,0 +1,78 @@ +"""Functionality for rewards.""" + +from fractions import Fraction + +from pandas import DataFrame + +from src.config import RewardConfig +from src.logger import set_log + +log = set_log(__name__) + +BATCH_REWARDS_COLUMNS = ["solver", "primary_reward_eth"] +QUOTE_REWARDS_COLUMNS = ["solver", "num_quotes"] + +REWARDS_COLUMNS = [ + "solver", + "primary_reward_eth", + "primary_reward_cow", + "quote_reward_cow", + "reward_token_address", +] + +NUMERICAL_COLUMNS = [ + "primary_reward_eth", + "primary_reward_cow", + "quote_reward_cow", +] + + +def compute_rewards( + batch_data: DataFrame, + quote_rewards: DataFrame, + exchange_rate: Fraction, + reward_config: RewardConfig, +) -> DataFrame: + """Compute solver rewards""" + + # validate batch rewards and quote rewards columns + assert set(BATCH_REWARDS_COLUMNS).issubset(set(batch_data.columns)) + assert set(QUOTE_REWARDS_COLUMNS).issubset(set(quote_rewards.columns)) + + rewards = ( + batch_data[BATCH_REWARDS_COLUMNS] + .merge( + quote_rewards[QUOTE_REWARDS_COLUMNS], + how="outer", + on="solver", + validate="one_to_one", + ) + .fillna(0) + ) + + rewards["primary_reward_cow"] = rewards["primary_reward_eth"] * float(exchange_rate) + + # Pandas has poor support for large integers, must cast the constant to float here, + # otherwise the dtype would be inferred as int64 (which overflows). + reward_per_quote = float( + min( + reward_config.quote_reward_cow, + int(reward_config.quote_reward_cap_native * exchange_rate), + ) + ) + log.info(f"A reward of {reward_per_quote / 10**18:.4f} COW per quote is used.") + + rewards["quote_reward_cow"] = reward_per_quote * rewards["num_quotes"] + rewards = rewards.drop("num_quotes", axis=1) + + rewards["reward_token_address"] = str(reward_config.reward_token_address) + + # change all types to object to use native python types + rewards = rewards.astype(object) + + # for number_col in NUMERICAL_COLUMNS: + # rewards[number_col] = pandas.to_numeric(rewards[number_col]) + + assert set(rewards.columns) == set(REWARDS_COLUMNS) + + return rewards diff --git a/src/fetch/solver_info.py b/src/fetch/solver_info.py new file mode 100644 index 00000000..96bd7395 --- /dev/null +++ b/src/fetch/solver_info.py @@ -0,0 +1,63 @@ +"""Functionality for solver information.""" + +import numpy as np +from pandas import DataFrame + +from src.config import AccountingConfig +from src.logger import set_log + +log = set_log(__name__) + +REWARD_TARGETS_COLUMNS = ["solver", "reward_target", "pool_address", "solver_name"] +SERVICE_FEES_COLUMNS = ["solver", "service_fee"] + +SOLVER_INFO_COLUMNS = [ + "solver", + "reward_target", + "buffer_accounting_target", + "solver_name", + "service_fee", +] + + +def compute_solver_info( + reward_targets: DataFrame, + service_fees: DataFrame, + config: AccountingConfig, +) -> DataFrame: + """Compute solver information""" + + # validate reward targets and service fees columns + assert set(REWARD_TARGETS_COLUMNS).issubset(set(reward_targets.columns)) + assert set(SERVICE_FEES_COLUMNS).issubset(set(service_fees.columns)) + + solver_info = reward_targets[REWARD_TARGETS_COLUMNS].merge( + service_fees[SERVICE_FEES_COLUMNS], how="outer", on="solver" + ) + + solver_info["buffer_accounting_target"] = np.where( + solver_info["pool_address"] != config.reward_config.cow_bonding_pool.address, + solver_info["solver"], + solver_info["reward_target"], + ) + solver_info = solver_info.drop("pool_address", axis=1) + + solver_info["service_fee"] = [ + ( + service_fees_flag * config.reward_config.service_fee_factor + if service_fees_flag is not None + else 0 + ) + for service_fees_flag in solver_info["service_fee"] + ] + + if not solver_info["solver"].is_unique: + duplicate_solvers = solver_info[solver_info["solver"].duplicated(keep=False)] + log.warning(f"Duplicate solvers: {duplicate_solvers}. Choosing first entry.") + solver_info = solver_info.drop_duplicates(subset=["solver"]) + + solver_info = solver_info.astype(object) + + assert set(solver_info.columns) == set(SOLVER_INFO_COLUMNS) + + return solver_info