diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index fbbe1c2..af585d3 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.7, 3.9] + python-version: [3.9, "3.10"] steps: - uses: actions/checkout@v2 diff --git a/fink_filters/filter_early_sn_candidates/filter.py b/fink_filters/filter_early_sn_candidates/filter.py index 6663517..b3f6d33 100644 --- a/fink_filters/filter_early_sn_candidates/filter.py +++ b/fink_filters/filter_early_sn_candidates/filter.py @@ -16,15 +16,26 @@ from pyspark.sql.types import BooleanType from fink_utils.xmatch.simbad import return_list_of_eg_host +from fink_utils.tg_bot.utils import get_curve +from fink_utils.tg_bot.utils import get_cutout +from fink_utils.tg_bot.utils import msg_handler_tg from fink_filters.tester import spark_unit_tests import pandas as pd +import os + def early_sn_candidates_( - cdsxmatch, snn_snia_vs_nonia, snn_sn_vs_all, rf_snia_vs_nonia, - ndethist, drb, classtar) -> pd.Series: - """ Return alerts considered as Early SN-Ia candidates + cdsxmatch, + snn_snia_vs_nonia, + snn_sn_vs_all, + rf_snia_vs_nonia, + ndethist, + drb, + classtar, +) -> pd.Series: + """Return alerts considered as Early SN-Ia candidates Parameters ---------- @@ -82,9 +93,22 @@ def early_sn_candidates_( @pandas_udf(BooleanType(), PandasUDFType.SCALAR) def early_sn_candidates( - cdsxmatch, snn_snia_vs_nonia, snn_sn_vs_all, rf_snia_vs_nonia, - ndethist, drb, classtar) -> pd.Series: - """ Pandas UDF for early_sn_candidates_ + cdsxmatch, + snn_snia_vs_nonia, + snn_sn_vs_all, + rf_snia_vs_nonia, + ndethist, + drb, + classtar, + objectId, + cjdc, + cmagpsfc, + csigmapsfc, + cdiffmaglimc, + cfidc, + cstampDatac, +) -> pd.Series: + """Pandas UDF for early_sn_candidates_ Parameters ---------- @@ -112,21 +136,87 @@ def early_sn_candidates( Examples ---------- >>> from fink_utils.spark.utils import apply_user_defined_filter + >>> from fink_utils.spark.utils import concat_col >>> df = spark.read.format('parquet').load('datatest/regular') + + >>> to_expand = ['jd', 'fid', 'magpsf', 'sigmapsf', 'diffmaglim'] + + >>> prefix = 'c' + >>> for colname in to_expand: + ... df = concat_col(df, colname, prefix=prefix) + + # quick fix for https://github.com/astrolabsoftware/fink-broker/issues/457 + >>> for colname in to_expand: + ... df = df.withColumnRenamed('c' + colname, 'c' + colname + 'c') + + >>> df = df.withColumn('cstampDatac', df['cutoutScience.stampData']) + >>> f = 'fink_filters.filter_early_sn_candidates.filter.early_sn_candidates' >>> df = apply_user_defined_filter(df, f) >>> print(df.count()) 5 """ series = early_sn_candidates_( - cdsxmatch, snn_snia_vs_nonia, snn_sn_vs_all, rf_snia_vs_nonia, - ndethist, drb, classtar + cdsxmatch, + snn_snia_vs_nonia, + snn_sn_vs_all, + rf_snia_vs_nonia, + ndethist, + drb, + classtar, + ) + + pdf = pd.DataFrame( + { + "objectId": objectId, + "magpsf": cmagpsfc, + "sigmapsf": csigmapsfc, + "diffmaglim": cdiffmaglimc, + "fid": cfidc, + "jd": cjdc, + "snn_snia_vs_nonia": snn_snia_vs_nonia, + "snn_sn_vs_all": snn_sn_vs_all, + "rf_snia_vs_nonia": rf_snia_vs_nonia, + "cstampDatac": cstampDatac, + } ) + + # Loop over matches + if "FINK_TG_TOKEN" in os.environ: + payloads = [] + for _, alert in pdf[series.values].iterrows(): + curve_png = get_curve( + jd=alert["jd"], + magpsf=alert["magpsf"], + sigmapsf=alert["sigmapsf"], + diffmaglim=alert["diffmaglim"], + fid=alert["fid"], + objectId=alert["objectId"], + origin="fields", + ) + + cutout = get_cutout(cutout=alert["cstampDatac"]) + + text = """ +*Object ID*: [{}](https://fink-portal.org/{}) +*Scores:*\n- Early SN Ia: {:.2f}\n- Ia SN vs non-Ia SN: {:.2f}\n- SN Ia and Core-Collapse vs non-SN: {:.2f} + """.format( + alert["objectId"], + alert["objectId"], + alert["rf_snia_vs_nonia"], + alert["snn_snia_vs_nonia"], + alert["snn_sn_vs_all"], + ) + + payloads.append((text, curve_png, cutout)) + + if len(payloads) > 0: + msg_handler_tg(payloads, channel_id="@fink_early_ia", init_msg="") return series if __name__ == "__main__": - """ Execute the test suite """ + """Execute the test suite""" # Run the test suite globs = globals() diff --git a/fink_filters/filter_yso_spicy_candidates/filter.py b/fink_filters/filter_yso_spicy_candidates/filter.py index fbcca19..6e4fd65 100644 --- a/fink_filters/filter_yso_spicy_candidates/filter.py +++ b/fink_filters/filter_yso_spicy_candidates/filter.py @@ -47,7 +47,7 @@ def yso_spicy_candidates( >>> from fink_utils.spark.utils import concat_col >>> df = spark.read.format('parquet').load('datatest/spicy_yso') - >>> to_expand = ['jd', 'fid', 'magpsf', 'sigmapsf', 'diffmaglim'] + >>> to_expand = ['jd', 'fid', 'magpsf', 'sigmapsf', 'diffmaglim'] >>> prefix = 'c' >>> for colname in to_expand: @@ -90,7 +90,7 @@ def yso_spicy_candidates( objectId=alert["objectId"], origin="fields", ) - # pd.DataFrame({'magpsf': alert["magpsf"]})['magpsf'] + hyperlink = "[{}](https://fink-portal.org/{}): ID {} ({})".format( alert["objectId"], alert["objectId"],