Skip to content

Commit

Permalink
add functionality for roid fitting using polynomial function
Browse files Browse the repository at this point in the history
  • Loading branch information
FusRoman committed Oct 31, 2023
1 parent 135783f commit 4583d36
Show file tree
Hide file tree
Showing 7 changed files with 469 additions and 28 deletions.
36 changes: 18 additions & 18 deletions docker/centos7/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -39,37 +39,37 @@ COPY . fink-fat

# Install system build dependencies
RUN yum -y update \
&& yum -y install which git wget java-11-openjdk-devel \
&& echo "export JAVA_HOME=$(dirname $(dirname $(readlink -f $(type -P java))))" > /etc/profile.d/javahome.sh \
&& yum -y groupinstall "Development Tools" \
&& yum -y clean all \
&& rm -rf /var/cache
&& yum -y install which git wget java-11-openjdk-devel \
&& echo "export JAVA_HOME=$(dirname $(dirname $(readlink -f $(type -P java))))" > /etc/profile.d/javahome.sh \
&& yum -y groupinstall "Development Tools" \
&& yum -y clean all \
&& rm -rf /var/cache
# && echo "export JAVA_HOME=$(dirname $(dirname $(readlink -f $(type -P java))))" > /etc/profile.d/javahome.sh

# install python and the dependencies
RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-${PYTHON_VERSION}-Linux-x86_64.sh -O ~/miniconda.sh \
&& bash ~/miniconda.sh -b -p ${USRLIBS}/miniconda
&& bash ~/miniconda.sh -b -p ${USRLIBS}/miniconda

RUN pip install --no-cache-dir --upgrade pip setuptools wheel \
&& cd ${USRLIBS}/fink-fat/script \
&& source ./install_python_deps.sh \
&& cd ${USRLIBS}
&& cd ${USRLIBS}/fink-fat/script \
&& source ./install_python_deps.sh \
&& cd ${USRLIBS}


# install spark
RUN wget --quiet https://archive.apache.org/dist/spark/spark-${SPARK_VERSION}/spark-${SPARK_VERSION}-bin-${HADOOP_VERSION}.tgz \
&& tar -xf spark-${SPARK_VERSION}-bin-${HADOOP_VERSION}.tgz \
&& rm spark-${SPARK_VERSION}-bin-${HADOOP_VERSION}.tgz \
&& echo "export SPARK_HOME=$PWD/spark-${SPARK_VERSION}-bin-${HADOOP_VERSION}" > /etc/profile.d/sparkhome.sh
&& tar -xf spark-${SPARK_VERSION}-bin-${HADOOP_VERSION}.tgz \
&& rm spark-${SPARK_VERSION}-bin-${HADOOP_VERSION}.tgz \
&& echo "export SPARK_HOME=$PWD/spark-${SPARK_VERSION}-bin-${HADOOP_VERSION}" > /etc/profile.d/sparkhome.sh

# install OrbFit
RUN yum -y install epel-release \
&& yum -y install aria2 \
&& mkdir OrbFit \
&& cd ${USRLIBS}/fink-fat/script \
&& source ./orbFit_installer.sh --install_path ${USRLIBS}/OrbFit \
&& echo "export ORBFIT_HOME=${USRLIBS}/OrbFit" > /etc/profile.d/orbfithome.sh \
&& cd ${USRLIBS}
&& yum -y install aria2 \
&& mkdir OrbFit \
&& cd ${USRLIBS}/fink-fat/script \
&& source ./orbFit_installer.sh --install_path ${USRLIBS}/OrbFit \
&& echo "export ORBFIT_HOME=${USRLIBS}/OrbFit" > /etc/profile.d/orbfithome.sh \
&& cd ${USRLIBS}

ENV SPARK_HOME=$USRLIBS/spark-${SPARK_VERSION}-bin-${HADOOP_VERSION}
ENV SPARKLIB=${SPARK_HOME}/python:${SPARK_HOME}/python/lib/py4j-0.10.9-src.zip
Expand Down
32 changes: 22 additions & 10 deletions fink_fat/kalman/asteroid_kalman.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,11 +165,13 @@ def predict(self, A):

if len(np.shape(A)) == 2:
prediction = self.opposite_dec_2d(prediction)
# print("--")
# print(A.T)
# print(np.dot(self.P, A.T))
# print(np.dot(A, np.dot(self.P, A.T)))
# print("--")
print("---------------------------------------------------------")
print(A.T)
print()
print(np.dot(self.P, A.T))
print()
print(np.dot(A, np.dot(self.P, A.T)))
print("---------------------------------------------------------")
P = np.dot(A, np.dot(self.P, A.T)) # + self.Q

elif len(np.shape(A)) == 3:
Expand All @@ -179,11 +181,21 @@ def predict(self, A):
# Q_exp = np.array([np.eye(A.shape[-1]) for _ in range(A.shape[0])])
f1 = np.flip(P @ A, (1, 2))
P = A @ f1 # + Q_exp
# print("==")
# print(f1)
# print((A @ f1))
# print(P)
# print("==")
# TODO
# the computation of P bug here when shape of A == 3
# fix that

print("===============================================================")
# print(A)
# print()
# print(A.T)
# print()
print(f1)
print()
print((A @ f1))
print()
print(P)
print("===============================================================")

return prediction, P

Expand Down
1 change: 1 addition & 0 deletions fink_fat/kalman/kalman_prediction.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ def predictions(
numpy array
contains the trajectory_id, the predict coordinates and the errors
"""
print(f"kalmanDfPrediction dt: {dt}")
A = np.array(
[
[
Expand Down
Empty file.
168 changes: 168 additions & 0 deletions fink_fat/roid_fitting/init_roid_fitting.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
import numpy as np
import pandas as pd
import sys
import doctest
import fink_fat.roid_fitting.utils_roid_fit as uf
import warnings


def fit_polyast(gb_input):
"""
GroupBy input
Parameters
----------
gb_input : pd.DataFrame
contains a group from a groupby.
Examples
--------
>>> input = pd.DataFrame({'ra': [11, 12, 29, 21, 15], 'dec': [20, 4, 0, 17, 23], 'jd': [10, 12, 16, 24, 28], 'magpsf': [16, 12, 9, 8, 24], 'fid': [16, 27, 1, 7, 23], 'trajectory_id': [6, 6, 6, 6, 6]})
>>> fit_polyast(input)
ra_0 11
dec_0 20
ra_1 12
dec_1 4
jd_0 10
jd_1 12
mag_1 12
fid_1 27
xp [0.0003689548973376653, -0.017083339224524503,...
yp [-0.003374983061298368, 0.13154198411573034, -...
zp [0.003889943130855235, -0.13737266595516961, 1...
dtype: object
>>> input = pd.DataFrame({'ra': [18, 19], 'dec': [1, 16], 'jd': [3, 24], 'magpsf': [13, 2], 'fid': [18, 7], 'trajectory_id': [7, 7]})
>>> fit_polyast(input)
ra_0 18
dec_0 1
ra_1 19
dec_1 16
jd_0 3
jd_1 24
mag_1 2
fid_1 7
xp [-0.00022037675949518603, 0.003949178398779884...
yp [-5.387272454402794e-05, 0.0016443857868339133...
zp [0.0002562672237379922, 0.0053753063581082955,...
dtype: object
"""

ra, dec, jd, mag, fid = (
gb_input["ra"].values,
gb_input["dec"].values,
gb_input["jd"].values,
gb_input["magpsf"].values,
gb_input["fid"].values,
)

ra_0, ra_1 = ra[0], ra[1]
dec_0, dec_1 = dec[0], dec[1]
jd_0, jd_1 = jd[0], jd[1]
mag_1 = mag[1]
fid_1 = fid[1]

with warnings.catch_warnings():
warnings.simplefilter("ignore", np.RankWarning)
xp, yp, zp = uf.fit_traj(ra, dec, jd)

return pd.Series(
[ra_0, dec_0, ra_1, dec_1, jd_0, jd_1, mag_1, fid_1, xp, yp, zp],
index=[
"ra_0",
"dec_0",
"ra_1",
"dec_1",
"jd_0",
"jd_1",
"mag_1",
"fid_1",
"xp",
"yp",
"zp",
],
)


def init_polyast(
night_pdf: pd.DataFrame,
) -> pd.DataFrame:
"""
Initialize kalman filters based on the seeds or tracklets.
required columns: ra, dec, jd, magpsf, fid, trajectory_id
Parameters
----------
night_pdf : pd.DataFrame
alerts of one night
Returns
-------
pd.DataFrame
a dataframe containing the kalman filters
Examples
--------
>>> np.random.seed(3)
>>> df = pd.DataFrame(
... np.random.randint(0, 30, size=(30, 5)),
... columns=["ra", "dec", "jd", "magpsf", "fid"],
... )
>>> tr_id = np.repeat([0, 1, 2, 3, 4, 5, 6, 7], [2, 2, 5, 4, 8, 2, 5, 2])
>>> df["trajectory_id"] = tr_id
>>> init_polyast(df)
trajectory_id ra_0 ... yp zp
0 0 8 ... [5.8886683912253095e-05, 0.0021568132839048845... [0.002130983101599165, 0.0036589380953885127, ...
1 1 21 ... [-0.0002667547833632723, -0.000121215680172234... [-0.0003281002792150129, -0.000351108876583499...
2 2 2 ... [3.564698412338013e-05, 0.007580031292933427, ... [0.0012283807329896033, -0.04112918641543257, ...
3 3 5 ... [-0.005899809113439577, 0.27815784005379157, -... [-0.012428484041437362, 0.5283431168170123, -5...
4 4 24 ... [0.00033732318423005867, -0.020080309986114004... [0.001367714730073901, -0.06476655293412147, 0...
5 5 22 ... [-0.0008599104450328825, -0.001465385641499352... [-0.0003768027364769278, 0.0018922493129352458...
6 6 11 ... [-0.003374983061298368, 0.13154198411573034, -... [0.003889943130855235, -0.13737266595516961, 1...
7 7 18 ... [-5.387272454402794e-05, 0.0016443857868339133... [0.0002562672237379922, 0.0053753063581082955,...
<BLANKLINE>
[8 rows x 12 columns]
>>> df = pd.DataFrame(columns=["ra", "dec", "jd", "magpsf", "fid", "trajectory_id"])
>>> init_polyast(df)
Empty DataFrame
Columns: [ra_0, dec_0, ra_1, dec_1, jd_0, jd_1, mag_1, fid_1, xp, yp, zp]
Index: []
"""
if len(night_pdf) == 0:
return pd.DataFrame(
columns=[
"ra_0",
"dec_0",
"ra_1",
"dec_1",
"jd_0",
"jd_1",
"mag_1",
"fid_1",
"xp",
"yp",
"zp",
]
)

# TODO
# detecter les jd dupliqués et appliquer une fonction spécial dans le cas ou dt = 0

# remove dbscan noise
night_pdf = night_pdf[night_pdf["trajectory_id"] != -1.0]
return (
night_pdf.sort_values(["trajectory_id", "jd"])
.groupby("trajectory_id")
.apply(fit_polyast)
.reset_index()
)


if __name__ == "__main__": # pragma: no cover
if "unittest.util" in __import__("sys").modules:
# Show full diff in self.assertEqual.
__import__("sys").modules["unittest.util"]._MAX_LENGTH = 999999999

sys.exit(doctest.testmod()[0])
Loading

0 comments on commit 4583d36

Please sign in to comment.