-
Notifications
You must be signed in to change notification settings - Fork 8
/
llm-trading.py
94 lines (71 loc) · 2.94 KB
/
llm-trading.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
'''
RBI - Research, backtest, implement
LLMs - RBI
git: https://github.com/moondevonyt/AI---LLMs-For-Automated-Trading
'''
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
import langchainkeys as l
import time
###### RBI SYSTEM ########
#### RESEARCH LLM ########
# Research trading strategies
llm_research = OpenAI(openai_api_key=l.open_ai_key, temperature=0)
research_template = """look at the recent market data for bitcoin and make a trading strategy for it. use {indicator} of your choice. over the last {time_period} days """
research_prompt = PromptTemplate(template=research_template, input_variables=["indicator", "time_period"])
research_chain = LLMChain(prompt=research_prompt, llm=llm_research)
# Generate trading strategies
research_result = research_chain.run({"indicator": "any indicator", "time_period": "365"})
# Retrieve the generated text
trading_strategy = research_result
print(trading_strategy)
print('')
print('done thinking of strategies... moving on to instructions for backtests...')
print('')
time.sleep(5)
#### STRATEGY INSTRUCTIONS LLM ########
# Generate step-by-step instructions for the trading strategy
llm_instructions = OpenAI(openai_api_key=l.open_ai_key, temperature=0)
instructions_template = """
Based on the generated trading strategy:
- Determine the entry condition.
- Define the exit condition.
- Specify the market stay-out condition.
Trading Strategy:
{trading_strategy}
Entry Instructions:
...
Exit Instructions:
...
Market Stay-out Instructions:
...
"""
instructions_prompt = PromptTemplate(template=instructions_template, input_variables=["trading_strategy"])
instructions_chain = LLMChain(prompt=instructions_prompt, llm=llm_instructions)
print('made it to line 53')
# Generate instructions for the trading strategy
instructions_result = instructions_chain.run({"trading_strategy": trading_strategy})
step_by_step_outlines = [instructions_result]
print(step_by_step_outlines)
# Print the step-by-step outline
for outline in step_by_step_outlines:
print(outline)
# Print completion message
print("\nAll done with the research and step-by-step instructions!")
time.sleep(8756) # to not go passed
#### BACKTEST LLM ########
# use the ideas that the LLM above came up with and build a backtest
backtesting_strategies = research_result.output
# implement the backtesing logic
#TODO - #backtestint_code =
#### BUG TESTING LLM ######
llm_debugging = OpenAI(openai_api_key=l.open_ai_key, temperature=0)
# define a prompt template for code debugging
debugging_template = """give the backtesting coe, identify and fix any coding bugs or issues"""
debugging_prompt = PromptTemplate(template=debugging_template)
# create chain for code debugging
debugging_chain = LLMChain(prompt=debugging_prompt, llm=llm_debugging)
debugging_result = debugging_chain.run(backesting_code)
# fix any coding bugs in the backtesting code
fixed_backtesting_code = debugging_result.output