From cfb6033ee89ce52300fd6298992a737f38108144 Mon Sep 17 00:00:00 2001 From: zyddnys Date: Tue, 11 May 2021 22:11:13 -0400 Subject: [PATCH] add youdao translate & change linebreak to LF only --- CRAFT_resnet34.py | 306 +++++++++++++++++++++++----------------------- README.md | 4 +- README_EN.md | 4 +- imgproc.py | 150 +++++++++++------------ key.py | 6 +- translate_demo.py | 6 +- youdao.py | 66 ++++++++++ 7 files changed, 308 insertions(+), 234 deletions(-) create mode 100644 youdao.py diff --git a/CRAFT_resnet34.py b/CRAFT_resnet34.py index 1f68457e0..9e0f8032b 100755 --- a/CRAFT_resnet34.py +++ b/CRAFT_resnet34.py @@ -1,153 +1,153 @@ - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.nn.init as init - -from torchvision.models import resnet34 - -import einops -import math - -class ImageMultiheadSelfAttention(nn.Module) : - def __init__(self, planes): - super(ImageMultiheadSelfAttention, self).__init__() - self.attn = nn.MultiheadAttention(planes, 4) - def forward(self, x) : - res = x - n, c, h, w = x.shape - x = einops.rearrange(x, 'n c h w -> (h w) n c') - x = self.attn(x, x, x)[0] - x = einops.rearrange(x, '(h w) n c -> n c h w', n = n, c = c, h = h, w = w) - return res + x - -class double_conv(nn.Module): - def __init__(self, in_ch, mid_ch, out_ch, stride = 1, planes = 256): - super(double_conv, self).__init__() - self.planes = planes - # down = None - # if stride > 1 : - # down = nn.Sequential( - # nn.AvgPool2d(2, 2), - # nn.Conv2d(in_ch + mid_ch, self.planes * Bottleneck.expansion, kernel_size=1, stride=1, bias=False),nn.BatchNorm2d(self.planes * Bottleneck.expansion) - # ) - self.down = None - if stride > 1 : - self.down = nn.AvgPool2d(2,stride=2) - self.conv = nn.Sequential( - nn.Conv2d(in_ch + mid_ch, mid_ch, kernel_size=3, padding=1, stride = 1, bias=False), - nn.BatchNorm2d(mid_ch), - nn.ReLU(inplace=True), - #Bottleneck(mid_ch, self.planes, stride, down, 2, 1, avd = True, norm_layer = nn.BatchNorm2d), - nn.Conv2d(mid_ch, out_ch, kernel_size=3, stride = 1, padding=1, bias=False), - nn.BatchNorm2d(out_ch), - nn.ReLU(inplace=True), - ) - - def forward(self, x): - if self.down is not None : - x = self.down(x) - x = self.conv(x) - return x - -class CRAFT_net(nn.Module) : - def __init__(self) : - super(CRAFT_net, self).__init__() - self.backbone = resnet34() - - self.conv_rs = nn.Sequential( - nn.Conv2d(64, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True), - nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True), - nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True), - nn.Conv2d(32, 32, kernel_size=1), nn.ReLU(inplace=True), - nn.Conv2d(32, 1, kernel_size=1), - nn.Sigmoid() - ) - - self.conv_as = nn.Sequential( - nn.Conv2d(64, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True), - nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True), - nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True), - nn.Conv2d(32, 32, kernel_size=1), nn.ReLU(inplace=True), - nn.Conv2d(32, 1, kernel_size=1), - nn.Sigmoid() - ) - - self.conv_mask = nn.Sequential( - nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.ReLU(inplace=True), - nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.ReLU(inplace=True), - nn.Conv2d(64, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True), - nn.Conv2d(32, 1, kernel_size=1), - nn.Sigmoid() - ) - - self.down_conv1 = double_conv(0, 512, 512, 2) - self.down_conv2 = double_conv(0, 512, 512, 2) - self.down_conv3 = double_conv(0, 512, 512, 2) - - self.upconv1 = double_conv(0, 512, 256) - self.upconv2 = double_conv(256, 512, 256) - self.upconv3 = double_conv(256, 512, 256) - self.upconv4 = double_conv(256, 512, 256, planes = 128) - self.upconv5 = double_conv(256, 256, 128, planes = 64) - self.upconv6 = double_conv(128, 128, 64, planes = 32) - self.upconv7 = double_conv(64, 64, 64, planes = 16) - - def forward_train(self, x) : - x = self.backbone.conv1(x) - x = self.backbone.bn1(x) - x = self.backbone.relu(x) - x = self.backbone.maxpool(x) # 64@384 - - h4 = self.backbone.layer1(x) # 64@384 - h8 = self.backbone.layer2(h4) # 128@192 - h16 = self.backbone.layer3(h8) # 256@96 - h32 = self.backbone.layer4(h16) # 512@48 - h64 = self.down_conv1(h32) # 512@24 - h128 = self.down_conv2(h64) # 512@12 - h256 = self.down_conv3(h128) # 512@6 - - up256 = F.interpolate(self.upconv1(h256), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 512@12 - up128 = F.interpolate(self.upconv2(torch.cat([up256, h128], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) #51264@24 - up64 = F.interpolate(self.upconv3(torch.cat([up128, h64], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 256@48 - up32 = F.interpolate(self.upconv4(torch.cat([up64, h32], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 256@96 - up16 = F.interpolate(self.upconv5(torch.cat([up32, h16], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 128@192 - up8 = F.interpolate(self.upconv6(torch.cat([up16, h8], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 64@384 - up4 = F.interpolate(self.upconv7(torch.cat([up8, h4], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 64@768 - - ascore = self.conv_as(up4) - rscore = self.conv_rs(up4) - - return torch.cat([rscore, ascore], dim = 1), self.conv_mask(up4) - - def forward(self, x) : - x = self.backbone.conv1(x) - x = self.backbone.bn1(x) - x = self.backbone.relu(x) - x = self.backbone.maxpool(x) # 64@384 - - h4 = self.backbone.layer1(x) # 64@384 - h8 = self.backbone.layer2(h4) # 128@192 - h16 = self.backbone.layer3(h8) # 256@96 - h32 = self.backbone.layer4(h16) # 512@48 - h64 = self.down_conv1(h32) # 512@24 - h128 = self.down_conv2(h64) # 512@12 - h256 = self.down_conv3(h128) # 512@6 - - up256 = F.interpolate(self.upconv1(h256), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 512@12 - up128 = F.interpolate(self.upconv2(torch.cat([up256, h128], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) #51264@24 - up64 = F.interpolate(self.upconv3(torch.cat([up128, h64], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 256@48 - up32 = F.interpolate(self.upconv4(torch.cat([up64, h32], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 256@96 - up16 = F.interpolate(self.upconv5(torch.cat([up32, h16], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 128@192 - up8 = F.interpolate(self.upconv6(torch.cat([up16, h8], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 64@384 - up4 = F.interpolate(self.upconv7(torch.cat([up8, h4], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 64@768 - - ascore = self.conv_as(up4) - rscore = self.conv_rs(up4) - - return torch.cat([rscore, ascore], dim = 1), self.conv_mask(up4) - -if __name__ == '__main__' : - net = CRAFT_net().cuda() - img = torch.randn(2, 3, 1536, 1536).cuda() - print(net.forward_train(img)[0].shape) + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.nn.init as init + +from torchvision.models import resnet34 + +import einops +import math + +class ImageMultiheadSelfAttention(nn.Module) : + def __init__(self, planes): + super(ImageMultiheadSelfAttention, self).__init__() + self.attn = nn.MultiheadAttention(planes, 4) + def forward(self, x) : + res = x + n, c, h, w = x.shape + x = einops.rearrange(x, 'n c h w -> (h w) n c') + x = self.attn(x, x, x)[0] + x = einops.rearrange(x, '(h w) n c -> n c h w', n = n, c = c, h = h, w = w) + return res + x + +class double_conv(nn.Module): + def __init__(self, in_ch, mid_ch, out_ch, stride = 1, planes = 256): + super(double_conv, self).__init__() + self.planes = planes + # down = None + # if stride > 1 : + # down = nn.Sequential( + # nn.AvgPool2d(2, 2), + # nn.Conv2d(in_ch + mid_ch, self.planes * Bottleneck.expansion, kernel_size=1, stride=1, bias=False),nn.BatchNorm2d(self.planes * Bottleneck.expansion) + # ) + self.down = None + if stride > 1 : + self.down = nn.AvgPool2d(2,stride=2) + self.conv = nn.Sequential( + nn.Conv2d(in_ch + mid_ch, mid_ch, kernel_size=3, padding=1, stride = 1, bias=False), + nn.BatchNorm2d(mid_ch), + nn.ReLU(inplace=True), + #Bottleneck(mid_ch, self.planes, stride, down, 2, 1, avd = True, norm_layer = nn.BatchNorm2d), + nn.Conv2d(mid_ch, out_ch, kernel_size=3, stride = 1, padding=1, bias=False), + nn.BatchNorm2d(out_ch), + nn.ReLU(inplace=True), + ) + + def forward(self, x): + if self.down is not None : + x = self.down(x) + x = self.conv(x) + return x + +class CRAFT_net(nn.Module) : + def __init__(self) : + super(CRAFT_net, self).__init__() + self.backbone = resnet34() + + self.conv_rs = nn.Sequential( + nn.Conv2d(64, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True), + nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True), + nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True), + nn.Conv2d(32, 32, kernel_size=1), nn.ReLU(inplace=True), + nn.Conv2d(32, 1, kernel_size=1), + nn.Sigmoid() + ) + + self.conv_as = nn.Sequential( + nn.Conv2d(64, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True), + nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True), + nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True), + nn.Conv2d(32, 32, kernel_size=1), nn.ReLU(inplace=True), + nn.Conv2d(32, 1, kernel_size=1), + nn.Sigmoid() + ) + + self.conv_mask = nn.Sequential( + nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.ReLU(inplace=True), + nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.ReLU(inplace=True), + nn.Conv2d(64, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True), + nn.Conv2d(32, 1, kernel_size=1), + nn.Sigmoid() + ) + + self.down_conv1 = double_conv(0, 512, 512, 2) + self.down_conv2 = double_conv(0, 512, 512, 2) + self.down_conv3 = double_conv(0, 512, 512, 2) + + self.upconv1 = double_conv(0, 512, 256) + self.upconv2 = double_conv(256, 512, 256) + self.upconv3 = double_conv(256, 512, 256) + self.upconv4 = double_conv(256, 512, 256, planes = 128) + self.upconv5 = double_conv(256, 256, 128, planes = 64) + self.upconv6 = double_conv(128, 128, 64, planes = 32) + self.upconv7 = double_conv(64, 64, 64, planes = 16) + + def forward_train(self, x) : + x = self.backbone.conv1(x) + x = self.backbone.bn1(x) + x = self.backbone.relu(x) + x = self.backbone.maxpool(x) # 64@384 + + h4 = self.backbone.layer1(x) # 64@384 + h8 = self.backbone.layer2(h4) # 128@192 + h16 = self.backbone.layer3(h8) # 256@96 + h32 = self.backbone.layer4(h16) # 512@48 + h64 = self.down_conv1(h32) # 512@24 + h128 = self.down_conv2(h64) # 512@12 + h256 = self.down_conv3(h128) # 512@6 + + up256 = F.interpolate(self.upconv1(h256), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 512@12 + up128 = F.interpolate(self.upconv2(torch.cat([up256, h128], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) #51264@24 + up64 = F.interpolate(self.upconv3(torch.cat([up128, h64], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 256@48 + up32 = F.interpolate(self.upconv4(torch.cat([up64, h32], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 256@96 + up16 = F.interpolate(self.upconv5(torch.cat([up32, h16], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 128@192 + up8 = F.interpolate(self.upconv6(torch.cat([up16, h8], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 64@384 + up4 = F.interpolate(self.upconv7(torch.cat([up8, h4], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 64@768 + + ascore = self.conv_as(up4) + rscore = self.conv_rs(up4) + + return torch.cat([rscore, ascore], dim = 1), self.conv_mask(up4) + + def forward(self, x) : + x = self.backbone.conv1(x) + x = self.backbone.bn1(x) + x = self.backbone.relu(x) + x = self.backbone.maxpool(x) # 64@384 + + h4 = self.backbone.layer1(x) # 64@384 + h8 = self.backbone.layer2(h4) # 128@192 + h16 = self.backbone.layer3(h8) # 256@96 + h32 = self.backbone.layer4(h16) # 512@48 + h64 = self.down_conv1(h32) # 512@24 + h128 = self.down_conv2(h64) # 512@12 + h256 = self.down_conv3(h128) # 512@6 + + up256 = F.interpolate(self.upconv1(h256), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 512@12 + up128 = F.interpolate(self.upconv2(torch.cat([up256, h128], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) #51264@24 + up64 = F.interpolate(self.upconv3(torch.cat([up128, h64], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 256@48 + up32 = F.interpolate(self.upconv4(torch.cat([up64, h32], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 256@96 + up16 = F.interpolate(self.upconv5(torch.cat([up32, h16], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 128@192 + up8 = F.interpolate(self.upconv6(torch.cat([up16, h8], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 64@384 + up4 = F.interpolate(self.upconv7(torch.cat([up8, h4], dim = 1)), scale_factor = (2, 2), mode = 'bilinear', align_corners = False) # 64@768 + + ascore = self.conv_as(up4) + rscore = self.conv_rs(up4) + + return torch.cat([rscore, ascore], dim = 1), self.conv_mask(up4) + +if __name__ == '__main__' : + net = CRAFT_net().cuda() + img = torch.randn(2, 3, 1536, 1536).cuda() + print(net.forward_train(img)[0].shape) diff --git a/README.md b/README.md index 985e8c86c..98f968582 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,8 @@ Note this may not work sometimes due to stupid google gcp kept restarting my ins # English README [README_EN.md](README_EN.md) # Changelogs +### 2021-05-11 +1. 增加并默认使用有道翻译 ### 2021-05-06 1. 检测模型更新为基于ResNet101的DBNet 2. OCR模型更新更深 @@ -23,7 +25,7 @@ Note this may not work sometimes due to stupid google gcp kept restarting my ins # 使用说明 1. clone这个repo 2. [下载](https://github.com/zyddnys/manga-image-translator/releases/tag/alpha-v2.2.1)ocr.ckpt、detect.ckpt和inpainting.ckpt,放到这个repo的根目录下 -3. 申请百度翻译API,把你的appid和密钥存到key.py里 +3. 申请有道翻译API,把你的APP_KEY和APP_SECRET存到key.py里 4. 运行`python translate_demo.py --image <图片文件路径> [--use-inpainting] [--use-cuda]`,结果会存放到result文件夹里。请加上`--use-inpainting`使用图像修补,请加上`--use-cuda`使用GPU。 # 只是初步版本,我们需要您的帮助完善 这个项目目前只完成了简单的demo,依旧存在大量不完善的地方,我们需要您的帮助完善这个项目! diff --git a/README_EN.md b/README_EN.md index ece5e1403..5c32adc70 100644 --- a/README_EN.md +++ b/README_EN.md @@ -2,6 +2,8 @@ https://touhou.ai/imgtrans/ Note this may not work sometimes due to stupid google gcp kept restarting my instance. In that case you can wait for me to restart the service, which may take up to 24 hrs. # Changelogs +### 2021-05-11 +1. Add youdao translate and set as default translator ### 2021-05-06 1. Text detection model is now based on DBNet with ResNet101 backbone 2. OCR model is now deeper @@ -21,7 +23,7 @@ Successor to https://github.com/PatchyVideo/MMDOCR-HighPerformance # How to use 1. Clone this repo 2. [Download](https://github.com/zyddnys/manga-image-translator/releases/tag/alpha-v2.2.1)ocr.ckpt、detect.ckpt and inpainting.ckpt,put them in the root directory of this repo -3. Apply for baidu translate API, put ypur appid and key in `key.py` +3. Apply for youdao translate API, put ypur APP_KEY and APP_SECRET in `key.py` 4. Run`python translate_demo.py --image [--use-inpainting] [--use-cuda]`,result can be found in `result/`. Add `--use-inpainting` to enable inpainting, Add `--use-cuda` to use CUDA. # This is a hobby project, you are welcome to contribute Currently this only a simple demo, many imperfections exist, we need your support to make this project better! diff --git a/imgproc.py b/imgproc.py index 42dd7d059..c74269c55 100755 --- a/imgproc.py +++ b/imgproc.py @@ -1,75 +1,75 @@ -""" -Copyright (c) 2019-present NAVER Corp. -MIT License -""" - -# -*- coding: utf-8 -*- -import numpy as np -from skimage import io -import cv2 - -def loadImage(img_file): - img = io.imread(img_file) # RGB order - if img.shape[0] == 2: img = img[0] - if len(img.shape) == 2 : img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) - if img.shape[2] == 4: img = img[:,:,:3] - img = np.array(img) - - return img - -def normalizeMeanVariance(in_img, mean=(0.485, 0.456, 0.406), variance=(0.229, 0.224, 0.225)): - # should be RGB order - img = in_img.copy().astype(np.float32) - - img -= np.array([mean[0] * 255.0, mean[1] * 255.0, mean[2] * 255.0], dtype=np.float32) - img /= np.array([variance[0] * 255.0, variance[1] * 255.0, variance[2] * 255.0], dtype=np.float32) - return img - -def denormalizeMeanVariance(in_img, mean=(0.485, 0.456, 0.406), variance=(0.229, 0.224, 0.225)): - # should be RGB order - img = in_img.copy() - img *= variance - img += mean - img *= 255.0 - img = np.clip(img, 0, 255).astype(np.uint8) - return img - -def resize_aspect_ratio(img, square_size, interpolation, mag_ratio=1): - height, width, channel = img.shape - - # magnify image size - target_size = mag_ratio * square_size#max(height, width) - - # set original image size - # if target_size > square_size: - # target_size = square_size - - ratio = target_size / max(height, width) - - target_h, target_w = int(round(height * ratio)), int(round(width * ratio)) - proc = cv2.resize(img, (target_w, target_h), interpolation = interpolation) - - MULT = 256 - - # make canvas and paste image - target_h32, target_w32 = target_h, target_w - pad_h = 0 - pad_w = 0 - if target_h % MULT != 0: - pad_h = (MULT - target_h % MULT) - target_h32 = target_h + pad_h - if target_w % MULT != 0: - pad_w = (MULT - target_w % MULT) - target_w32 = target_w + pad_w - resized = np.zeros((target_h32, target_w32, channel), dtype=np.uint8) - resized[0:target_h, 0:target_w, :] = proc - target_h, target_w = target_h32, target_w32 - - size_heatmap = (int(target_w/2), int(target_h/2)) - - return resized, ratio, size_heatmap, pad_w, pad_h - -def cvt2HeatmapImg(img): - img = (np.clip(img, 0, 1) * 255).astype(np.uint8) - img = cv2.applyColorMap(img, cv2.COLORMAP_JET) - return img +""" +Copyright (c) 2019-present NAVER Corp. +MIT License +""" + +# -*- coding: utf-8 -*- +import numpy as np +from skimage import io +import cv2 + +def loadImage(img_file): + img = io.imread(img_file) # RGB order + if img.shape[0] == 2: img = img[0] + if len(img.shape) == 2 : img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) + if img.shape[2] == 4: img = img[:,:,:3] + img = np.array(img) + + return img + +def normalizeMeanVariance(in_img, mean=(0.485, 0.456, 0.406), variance=(0.229, 0.224, 0.225)): + # should be RGB order + img = in_img.copy().astype(np.float32) + + img -= np.array([mean[0] * 255.0, mean[1] * 255.0, mean[2] * 255.0], dtype=np.float32) + img /= np.array([variance[0] * 255.0, variance[1] * 255.0, variance[2] * 255.0], dtype=np.float32) + return img + +def denormalizeMeanVariance(in_img, mean=(0.485, 0.456, 0.406), variance=(0.229, 0.224, 0.225)): + # should be RGB order + img = in_img.copy() + img *= variance + img += mean + img *= 255.0 + img = np.clip(img, 0, 255).astype(np.uint8) + return img + +def resize_aspect_ratio(img, square_size, interpolation, mag_ratio=1): + height, width, channel = img.shape + + # magnify image size + target_size = mag_ratio * square_size#max(height, width) + + # set original image size + # if target_size > square_size: + # target_size = square_size + + ratio = target_size / max(height, width) + + target_h, target_w = int(round(height * ratio)), int(round(width * ratio)) + proc = cv2.resize(img, (target_w, target_h), interpolation = interpolation) + + MULT = 256 + + # make canvas and paste image + target_h32, target_w32 = target_h, target_w + pad_h = 0 + pad_w = 0 + if target_h % MULT != 0: + pad_h = (MULT - target_h % MULT) + target_h32 = target_h + pad_h + if target_w % MULT != 0: + pad_w = (MULT - target_w % MULT) + target_w32 = target_w + pad_w + resized = np.zeros((target_h32, target_w32, channel), dtype=np.uint8) + resized[0:target_h, 0:target_w, :] = proc + target_h, target_w = target_h32, target_w32 + + size_heatmap = (int(target_w/2), int(target_h/2)) + + return resized, ratio, size_heatmap, pad_w, pad_h + +def cvt2HeatmapImg(img): + img = (np.clip(img, 0, 1) * 255).astype(np.uint8) + img = cv2.applyColorMap(img, cv2.COLORMAP_JET) + return img diff --git a/key.py b/key.py index 06b7c5dc1..87fe37cd3 100644 --- a/key.py +++ b/key.py @@ -1,2 +1,6 @@ +# baidu APP_ID = '' #你的appid -SECRET_KEY = '' #你的密钥 \ No newline at end of file +SECRET_KEY = '' #你的密钥 +# youdao +APP_KEY = '' # 应用ID +APP_SECRET = '' # 应用秘钥 \ No newline at end of file diff --git a/translate_demo.py b/translate_demo.py index d6b5d4395..3cd5e3129 100755 --- a/translate_demo.py +++ b/translate_demo.py @@ -462,8 +462,8 @@ def run_inpainting(model_inpainting, img, mask, max_image_size = 1024, pad_size img_inpainted = cv2.resize(img_inpainted, (width, height), interpolation = cv2.INTER_LINEAR_EXACT) return img_inpainted * mask_original + img_original * (1 - mask_original), (img_torch.cpu() * 127.5 + 127.5).squeeze_(0).permute(1, 2, 0).numpy() -from baidutrans import Translator as baidu_trans -baidu_translator = baidu_trans() +from youdao import Translator +translator = Translator() import text_render @@ -597,7 +597,7 @@ def main() : print(' -- Translating') # translate text region texts texts = '\n'.join([r.text for r in text_regions]) - trans_ret = baidu_translator.translate('ja', 'zh-CN', texts) + trans_ret = translator.translate('auto', 'zh-CHS', texts) if trans_ret : translated_sentences = [] batch = len(text_regions) diff --git a/youdao.py b/youdao.py new file mode 100644 index 000000000..aaa981f62 --- /dev/null +++ b/youdao.py @@ -0,0 +1,66 @@ + +# -*- coding: utf-8 -*- +import sys +import uuid +import requests +import hashlib +import time +from imp import reload + +import time + +reload(sys) + +YOUDAO_URL = 'https://openapi.youdao.com/api' +APP_KEY = '5077fb725e38c9d3' +APP_SECRET = 'BHyiUU3c3ITyBNNaNzsaxnMnpamuePNo' + + +def encrypt(signStr): + hash_algorithm = hashlib.sha256() + hash_algorithm.update(signStr.encode('utf-8')) + return hash_algorithm.hexdigest() + + +def truncate(q): + if q is None: + return None + size = len(q) + return q if size <= 20 else q[0:10] + str(size) + q[size - 10:size] + + +def do_request(data): + headers = {'Content-Type': 'application/x-www-form-urlencoded'} + return requests.post(YOUDAO_URL, data=data, headers=headers) + +class Translator(object): + def __init__(self): + pass + + def translate(self, from_lang, to_lang, query_text): + data = {} + data['from'] = from_lang + data['to'] = to_lang + data['signType'] = 'v3' + curtime = str(int(time.time())) + data['curtime'] = curtime + salt = str(uuid.uuid1()) + signStr = APP_KEY + truncate(query_text) + salt + curtime + APP_SECRET + sign = encrypt(signStr) + data['appKey'] = APP_KEY + data['q'] = query_text + data['salt'] = salt + data['sign'] = sign + #data['vocabId'] = "您的用户词表ID" + + response = do_request(data) + contentType = response.headers['Content-Type'] + if contentType == "audio/mp3": + return [] + else: + result = response.json() + result_list = [] + for ret in result["translation"]: + result_list.extend(ret.split('\n')) + return result_list +