forked from euloggeradmin/LootNanny
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ocr.py
101 lines (70 loc) · 2.34 KB
/
ocr.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
from typing import List
import pyautogui
import pygetwindow
from PIL import Image
from pytesseract import image_to_string, pytesseract
import numpy as np
from decimal import Decimal
import re
LOOT_RE = "([a-zA-Z\(\) ]+) [\(\{\[](\d+[\.\,]\d+) PED[\)\]\}]"
pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
def screenshot_window():
window_name = None
for window_name in pygetwindow.getAllTitles():
if window_name.startswith("Entropia Universe Client"):
found_window = window_name
break
if not window_name:
return None, 0, 0
window = pygetwindow.getWindowsWithTitle(window_name)[0]
try:
window.activate()
except:
pass # ignore for now
im = pyautogui.screenshot()
top_left = window.topleft
width = window.width
height = window.height
im1 = im.crop((top_left.x, top_left.y, top_left.x + width, top_left.y + height))
return im1, width, height
def change_contrast(img, level):
factor = (259 * (level + 255)) / (255 * (259 - level))
def contrast(c):
return 128 + factor * (c - 128)
return img.point(contrast)
def get_loot_instances_from_screen():
loots = []
img = screenshot_window()
img = img.convert('LA')
data = np.array(img)
img = change_contrast(img, 150)
# Greyscale and try and isolate text
converted = np.where((data // 39) == 215 // 39, 0, 255)
img = Image.fromarray(converted.astype('uint8'))
text = image_to_string(img)
lines = text.split("\n")
for s in lines:
match = re.match(LOOT_RE, s)
print(s)
if match:
name, value = match.groups()
value = Decimal(value.replace(",", "."))
return loots
def capture_target(contrast=0, banding=35, filter=225):
im = pyautogui.screenshot()
width, height = im.size
sides = width / 3
bottom = height / 3
print((0, 0, sides, bottom))
im1 = im.crop((sides, 0, width - sides, bottom))
im1 = im1.convert('LA')
data = np.array(im1)
im1 = change_contrast(im1, contrast)
# Greyscale and try and isolate text
converted = np.where((data // banding) == filter // banding, 0, 255)
img = Image.fromarray(converted.astype('uint8'))
text = image_to_string(img)
lines = text.split("\n")
for s in lines:
if s:
print(s)