Skip to content

Commit

Permalink
style code
Browse files Browse the repository at this point in the history
  • Loading branch information
hyunwoongko committed Dec 29, 2021
1 parent d7007c9 commit ccaea51
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 7 deletions.
7 changes: 4 additions & 3 deletions parallelformers/parallel/process.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,17 +16,18 @@
import io
import os
import pickle
import random
import traceback
import types
from contextlib import suppress
from dataclasses import _is_dataclass_instance, asdict
from time import time
from inspect import signature
from time import time
from typing import Any, List, Union
import torch.distributed as dist

import numpy as np
import random
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from transformers.file_utils import ModelOutput
Expand Down
4 changes: 2 additions & 2 deletions parallelformers/parallelize.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,9 +77,9 @@ def __init__(
master_port: int = 29500,
backend="nccl",
verbose: str = None,
init_method="spawn",
init_method: str = "spawn",
daemon: bool = True,
seed: int = None
seed: int = None,
):
self.init_environments(
num_gpus,
Expand Down
5 changes: 3 additions & 2 deletions tests/causal_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,11 @@
# limitations under the License.

import os
import random
import unittest
from argparse import ArgumentParser

import numpy as np
import random
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer

Expand All @@ -33,7 +34,7 @@ def test_generation(self, model, tokens, tokenizer):
max_length=40,
no_repeat_ngram_size=4,
do_sample=True,
top_p=0.7
top_p=0.7,
)

gen = tokenizer.batch_decode(output)[0]
Expand Down

0 comments on commit ccaea51

Please sign in to comment.