diff --git a/index.md b/index.md index ee8f42ac852..8b47806393e 100644 --- a/index.md +++ b/index.md @@ -1,5 +1,5 @@ --- layout: docs_redirect title: PyTorch | Redirect -redirect_url: "/xla/release/r2.4/index.html" +redirect_url: "/xla/release/r2.5/index.html" --- diff --git a/release/2.5/_images/ddp_md_mnist_with_real_data.png b/release/2.5/_images/ddp_md_mnist_with_real_data.png new file mode 100644 index 00000000000..f83c5182be6 Binary files /dev/null and b/release/2.5/_images/ddp_md_mnist_with_real_data.png differ diff --git a/release/2.5/_images/spmd_mode.png b/release/2.5/_images/spmd_mode.png new file mode 100644 index 00000000000..dd9b5cc69cc Binary files /dev/null and b/release/2.5/_images/spmd_mode.png differ diff --git a/release/2.5/_images/torchbench_pjrt_vs_xrt.svg b/release/2.5/_images/torchbench_pjrt_vs_xrt.svg new file mode 100644 index 00000000000..effe9b72be8 --- /dev/null +++ b/release/2.5/_images/torchbench_pjrt_vs_xrt.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/release/2.5/_images/torchbench_tfrt_vs_se.svg b/release/2.5/_images/torchbench_tfrt_vs_se.svg new file mode 100644 index 00000000000..161f0433b0a --- /dev/null +++ b/release/2.5/_images/torchbench_tfrt_vs_se.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/release/2.5/_modules/index.html b/release/2.5/_modules/index.html new file mode 100644 index 00000000000..503d30fd780 --- /dev/null +++ b/release/2.5/_modules/index.html @@ -0,0 +1,712 @@ + + + + + + + + + + + + Overview: module code — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+ + +
+ + +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/_modules/torch_xla/core/xla_model.html b/release/2.5/_modules/torch_xla/core/xla_model.html new file mode 100644 index 00000000000..1a113b0e897 --- /dev/null +++ b/release/2.5/_modules/torch_xla/core/xla_model.html @@ -0,0 +1,2260 @@ + + + + + + + + + + + + torch_xla.core.xla_model — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +

Source code for torch_xla.core.xla_model

+import contextlib
+import io
+import itertools
+import logging
+import sys
+import re
+import threading
+import time
+import warnings
+from typing import Any, Callable, Dict, List, Optional, Set, TextIO, Tuple, TypedDict, Union
+import torch
+import torch.distributed._functional_collectives
+from torch.library import Library
+import torch.nn.functional as F
+import torch.optim as optim
+import torch_xla
+from torch_xla import runtime
+import torch_xla.core.xla_env_vars as xenv
+import torch_xla.debug.metrics_saver as ms
+import torch_xla.utils.utils as xu
+import torch_xla.utils.closures as xc
+from torch_xla.distributed.spmd.xla_sharding import ShardingSpec
+import os
+from torch_xla.experimental.deprecation import deprecated
+import torch_xla._internal.utils as _utils
+
+_DEVICES = xu.LazyProperty(lambda: torch_xla._XLAC._xla_get_devices())
+
+REDUCE_SUM = 'sum'
+REDUCE_MUL = 'mul'
+REDUCE_AND = 'and'
+REDUCE_OR = 'or'
+REDUCE_MIN = 'min'
+REDUCE_MAX = 'max'
+
+_DEVICE_CONTEXTS = dict()
+_DEVICE_CONTEXTS_LOCK = threading.Lock()
+
+XLA_LIB = Library("xla", "DEF")
+
+from . import xla_model as this_module
+
+xrt_world_size = deprecated(this_module, torch_xla.runtime.world_size,
+                            'xrt_world_size() will be removed in release 2.7.')
+get_ordinal = deprecated(
+    this_module, torch_xla.runtime.global_ordinal,
+    'xla_model.get_ordinal() will be removed in release 2.7.')
+parse_xla_device = deprecated(
+    this_module, _utils.parse_xla_device,
+    'xla_model.parse_xla_device() will be removed in release 2.7.')
+
+
+class DeviceContext(object):
+
+  def __init__(self, device: Union[str, torch.device]):
+    self.device = device
+
+
+def _get_device_context(
+    device: Optional[Union[str, torch.device]] = None) -> DeviceContext:
+  if device is None:
+    device = torch_xla._XLAC._xla_get_default_device()
+  else:
+    device = str(device)
+  with _DEVICE_CONTEXTS_LOCK:
+    devctx = _DEVICE_CONTEXTS.get(device, None)
+    if devctx is None:
+      devctx = DeviceContext(device)
+      _DEVICE_CONTEXTS[device] = devctx
+    return devctx
+
+
+def is_xla_tensor(tensor: torch.Tensor) -> bool:
+  return tensor.device.type == 'xla'
+
+
+def get_xla_supported_devices(devkind: Optional[str] = None,
+                              max_devices: Optional[int] = None) -> List[str]:
+  """Returns a list of supported devices of a given kind.
+
+  Args:
+    devkind (string..., optional): If specified, a device type such as `TPU`,
+      `CUDA`, `CPU`, or name of custom PJRT device.
+    max_devices (int, optional): The maximum number of devices to be returned of
+      that kind.
+
+  Returns:
+    The list of device strings such as ['xla:0', 'xla:1', ...]
+  """
+  # TODO(wcromar): Remove `devkind` after 2.3 release cut. We no longer support
+  # multiple device types.
+  if not devkind:
+    devices = torch_xla._XLAC._xla_get_devices()
+    return [
+        f'xla:{i}'
+        for i, _ in enumerate(devices[:max_devices] if max_devices else devices)
+    ]
+  else:
+    warnings.warn("`devkind` argument is deprecated and will be removed in a "
+                  "future release.")
+
+  xla_devices = _DEVICES.value
+  kind_devices = []
+  for i, device in enumerate(xla_devices):
+    if re.match(devkind + r':\d+$', device):
+      kind_devices.append('xla:{}'.format(i))
+  if kind_devices:
+    return kind_devices[:max_devices] if max_devices else kind_devices
+
+
+def get_local_ordinal() -> int:
+  """Retrieves the replication local ordinal of the current thread.
+
+  The local ordinals range from 0 to the number of local devices minus 1.
+
+  Returns:
+    The replication local ordinal of the current thread.
+  """
+  return runtime.local_ordinal()
+
+
+
[docs]def is_master_ordinal(local: bool = True) -> bool: + """Checks whether the current process is the master ordinal (0). + + Args: + local (bool): Whether the local or global master ordinal should be checked. + In case of multi-host replication, there is only one global master ordinal + (host 0, device 0), while there are NUM_HOSTS local master ordinals. + Default: True + + Returns: + A boolean indicating whether the current process is the master ordinal. + """ + ordinal = get_local_ordinal() if local else runtime.global_ordinal() + return ordinal == 0
+ + +def master_print(*args: Tuple[Any, ...], + fd: TextIO = sys.stdout, + local: bool = False, + flush: bool = False): + if is_master_ordinal(local=local): + print(*args, file=fd, flush=flush) + + +
[docs]def xla_device(n: Optional[int] = None, + devkind: Optional[str] = None) -> torch.device: + """Returns a given instance of an XLA device. + + Args: + n (int, optional): The specific instance (ordinal) to be returned. If + specified, the specific XLA device instance will be returned. Otherwise + the first device of `devkind` will be returned. + devkind (string..., optional): If specified, device type such as `TPU`, + `CUDA`, `CPU`, or custom PJRT device. Deprecated. + + Returns: + A `torch.device` with the requested instance. + """ + # When SPMD is enabled, we always return `xla:0` to the user, and + # under the hood we use virtual device logic for every xla tensor + if xu.check_env_flag('XLA_USE_SPMD'): + device = 'xla:0' + torch_xla._XLAC._xla_set_default_device(device) + return torch.device(device) + + return runtime.xla_device(n, devkind)
+ + +def _xla_real_device(device: torch.device) -> Any: + device_str = str(device) + m = re.match(r'xla:(\d+)$', device_str) + if not m: + raise RuntimeError('Invalid device format: {}'.format(device_str)) + return _DEVICES.value[int(m.group(1))] + + +def xla_real_devices(devices: Optional[List[torch.device]] = None) -> List[str]: + """Returns the real devices' name. + + Args: + devices: The list of torch devices such as ['xla:0', 'xla:1']. + + Returns: + A list of real devices' name such as ['CUDA:0', 'CUDA:1']. + """ + if not devices: + devices = get_xla_supported_devices() + + return [_xla_real_device(device) for device in devices] + + +
[docs]def xla_device_hw(device: Union[str, torch.device]) -> str: + """Returns the hardware type of the given device. + + Args: + device (string or torch.device): The xla device that will be mapped to the + real device. + + Returns: + A string representation of the hardware type of the given device. + """ + real_device = _xla_real_device(device) + return real_device.split(':')[0]
+ + +def xla_replication_devices( + local_devices: Optional[List[torch.device]] = None) -> List[str]: + real_devices = xla_real_devices(local_devices) + device_types = set() + for device in real_devices: + xdev = _utils.parse_xla_device(device) + device_types.add(xdev[0]) + if len(device_types) != 1: + # No replication if the device set spawns multiple device types. + raise RuntimeError( + 'Cannot replicate across different device types: devices={}/{}'.format( + local_devices, real_devices)) + device_type = device_types.pop() + kind_devices = get_xla_supported_devices() + if len(kind_devices) != len(local_devices): + # Replication can only happen among all devices of one kind. + raise RuntimeError( + 'Cannot replicate if number of devices ({}) is different from {}'. + format(len(local_devices), len(kind_devices))) + replication_devices = [] + for device in torch_xla._XLAC._xla_get_all_devices(): + # device is like 'CUDA:0' + xdev = _utils.parse_xla_device(device) + if not xdev: + raise RuntimeError('Invalid device format: {}'.format(device)) + if xdev[0] == device_type: + replication_devices.append(device) + sorted_by_ordinal = sorted( + replication_devices, + key=lambda device: _utils.parse_xla_device(device)[1]) + return sorted_by_ordinal + + +def unlazy(tensors: List[torch.Tensor]): + """Blocks the program until `tensors` are materialized. + + This API is for benchmarking, don't use it in real models. + + Args: + tensors: List of `torch.Tensor`s to materialize. For each + Tensor `t` in the list, `t.device` must be an `xla` device. + """ + torch_xla._XLAC._xla_sync_multi(tensors, devices=[], wait=True) + + +def set_replication(device: torch.device, + devices: Optional[List[torch.device]]): + device = str(device) + devctx = _get_device_context(device=device) + devices = [str(x) for x in devices] + if devices: + # sample replication_devices: ['CUDA:0', 'CUDA:1', 'CUDA:2', 'CUDA:3'] + replication_devices = xla_replication_devices(devices) + torch_xla._XLAC._xla_set_replication_devices(replication_devices) + devctx.device_index = devices.index(device) + else: + torch_xla._XLAC._xla_set_replication_devices([]) + devctx.device_index = 0 + torch_xla._XLAC._set_all_reduce_token(devctx.device, None) + torch_xla._XLAC._xla_set_default_device(device) + + +class RateTracker(object): + + def __init__(self, smooth_factor: Optional[float] = None): + self._smooth_factor = xu.getenv_as( + 'RATE_TRACKER_SMOOTHING', float, + 0.4) if smooth_factor is None else smooth_factor + self._start_time = time.time() + self._partial_time = self._start_time + self._partial_count = 0.0 + self._partial_rate = None + self._count = 0.0 + + def _update(self, now: float, rate: float): + self._partial_count += self._count + self._count = 0.0 + self._partial_time = now + self._partial_rate = rate + + def add(self, count: float): + self._count += count + + def _smooth(self, current_rate: float) -> float: + if self._partial_rate is None: + smoothed_rate = current_rate + else: + smoothed_rate = ((1 - self._smooth_factor) * current_rate + + self._smooth_factor * self._partial_rate) + return smoothed_rate + + def rate(self): + now = time.time() + delta = now - self._partial_time + report_rate = 0.0 + if delta > 0: + report_rate = self._smooth(self._count / delta) + self._update(now, report_rate) + return report_rate + + def global_rate(self): + delta = time.time() - self._start_time + count = self._partial_count + self._count + return count / delta if delta > 0 else 0.0 + + +class ToXlaTensorArena(object): + + def __init__(self, convert_fn: Callable[[List[torch.Tensor]], + List[torch.Tensor]], + select_fn: Callable[[torch.Tensor], bool]): + self._convert_fn = convert_fn + self._select_fn = select_fn + self._tensors = [] + + def _add(self, tensor: torch.Tensor): + self._tensors.append(tensor) + + def _convert(self): + self._index = 0 + if self._tensors: + self._converted_tensors = self._convert_fn(self._tensors) + else: + self._converted_tensors = [] + + def _get_converted_tensor(self) -> torch.Tensor: + assert self._index < len(self._converted_tensors) + new_tensor = self._converted_tensors[self._index] + self._index += 1 + return new_tensor + + def _collect_tensors(self, inputs: Any): + + def collect_fn(value: Any): + self._add(value) + + xu.for_each_instance(inputs, lambda x: self._select_fn(x), collect_fn) + + def _replace_tensors(self, inputs: Any): + + def convert_fn(value: Any): + return self._get_converted_tensor() + + return xu.for_each_instance_rewrite(inputs, lambda x: self._select_fn(x), + convert_fn) + + def transform(self, inputs: Any): + self._tensors = [] + self._collect_tensors(inputs) + self._convert() + return self._replace_tensors(inputs) + + +def check_view_sharing(obj): + tensors = set() + aliases = dict() + + def tensor_info(t: torch.Tensor) -> str: + return '{}{}'.format(t.dtype, list(t.size())) + + def tensor_id(t: torch.Tensor) -> Tuple[int, str]: + if is_xla_tensor(t): + return torch_xla._XLAC._xla_get_tensor_id(t), 'xla' + return id(t), 'torch' + + def alias_id(t: torch.Tensor) -> Tuple[int, str]: + if is_xla_tensor(t): + aid = torch_xla._XLAC._xla_get_tensor_view_alias_id(t) + return None if aid == 0 else aid, 'xla' + return t.storage().data_ptr(), 'torch' + + def check_object(obj): + tid = tensor_id(obj) + if tid not in tensors: + tensors.add(tid) + aid = alias_id(obj) + if aid[0] is not None: + if aid in aliases: + oobj = aliases[aid] + raise RuntimeError( + 'Tensor ID {} ({}) is sharing a view with tensor ID {} ({})'. + format(tid, tensor_info(obj), tensor_id(oobj), tensor_info(oobj))) + aliases[aid] = obj + + xu.for_each_instance(obj, lambda x: type(x) == torch.Tensor, check_object) + + +def _fetch_gradients(optimizer: optim.Optimizer) -> List[torch.Tensor]: + gradients = [] + for param_group in optimizer.__getstate__()['param_groups']: + for group, params in param_group.items(): + if group == 'params': + for p in params: + if isinstance(p, torch.Tensor) and p.grad is not None: + gradients.append(p.grad.data) + return gradients + + +def _get_all_reduce_token() -> Tuple[Any, DeviceContext]: + devctx = _get_device_context() + token = torch_xla._XLAC._get_all_reduce_token(devctx.device) + return token, devctx + + +
[docs]def all_reduce( + reduce_type: str, + inputs: Union[torch.Tensor, List[torch.Tensor]], + scale: float = 1.0, + groups: Optional[List[List[int]]] = None, + pin_layout: bool = True) -> Union[torch.Tensor, List[torch.Tensor]]: + """Performs an inplace reduce operation on the input tensor(s). + + Args: + reduce_type (string): One of ``xm.REDUCE_SUM``, ``xm.REDUCE_MUL``, + ``xm.REDUCE_AND``, ``xm.REDUCE_OR``, ``xm.REDUCE_MIN`` and + ``xm.REDUCE_MAX``. + inputs: Either a single `torch.Tensor` or a list of `torch.Tensor` to + perform the all reduce op to. + scale (float): A default scaling value to be applied after the reduce. + Default: 1.0 + groups (list, optional): A list of list, representing the replica groups for + the `all_reduce()` operation. Example: `[[0, 1, 2, 3], [4, 5, 6, 7]]` + defines two groups, one with the `[0, 1, 2, 3]` replicas and one with + the `[4, 5, 6, 7]` replicas. If `None` there will be only one group with + all the replicas in it. + pin_layout (bool, optional): whether to pin the layout for this communication op. + Layout pining can prevent potential data corruption when each process that + participate in the communication has slightly different program, but it might + cause some xla compilation to fail. Unpin the layout when you see error message + like "HloModule has a mix of layout constrained". + + Returns: + If a single `torch.Tensor` is passed, the return value is a `torch.Tensor` + holding the reduced value (across the replicas). If a list/tuple is passed, + this function performs an inplace all-reduce op on the input tensors, and + returns the list/tuple itself. + """ + groups = groups or [] + + # No-op if there is only one device + if runtime.world_size() == 1 and not xu.getenv_as('XLA_ALWAYS_ALLREDUCE', + bool, False): + if isinstance(inputs, torch.Tensor): + return inputs.clone() + else: + return inputs + + if isinstance(inputs, torch.Tensor): + result = None + if scale == 1.0 and groups == [] and pin_layout: + # TODO(alanwaketan): Support groups. + # Only c10d_functional version cc ops are traceable by Dynamo. + result = torch.ops._c10d_functional.all_reduce(inputs, reduce_type, "") + else: + result = torch_xla._XLAC._xla_all_reduce(reduce_type, inputs, scale, + groups, pin_layout) + results = [result] + else: + torch_xla._XLAC._xla_all_reduce_inplace(reduce_type, inputs, scale, groups, + pin_layout) + results = inputs + return results[0] if isinstance(inputs, torch.Tensor) else results
+ + +def _all_gather_using_all_reduce( + value: torch.Tensor, + dim: int = 0, + groups: Optional[List[List[int]]] = None, + pin_layout: bool = True) -> Optional[torch.Tensor]: + """Performs an all-gather operation using all-reduce along a given dimension. + + Args: + value (torch.Tensor): The input tensor. + dim (int): The gather dimension. + Default: 0 + groups (list, optional): A list of list, representing the replica groups for + the `all_gather()` operation. Example: `[[0, 1, 2, 3], [4, 5, 6, 7]]` + defines two groups, one with the `[0, 1, 2, 3]` replicas and one with + the `[4, 5, 6, 7]` replicas. If `None` there will be only one group with + all the replicas in it. + pin_layout (bool, optional): whether to pin the layout for this communication op. + Layout pining can prevent potential data corruption when each process that + participate in the communication has slightly different program, but it might + cause some xla compilation to fail. Unpin the layout when you see error message + like "HloModule has a mix of layout constrained". + + Returns: + A tensor which has, in the ``dim`` dimension, all the values from the + participating replicas. + """ + if dim < 0: + dim = value.dim() + dim + size = value.size(dim) + padding = [0] * (2 * value.dim()) + ordinal = runtime.global_ordinal() + if groups is None: + left, right = ordinal, runtime.world_size() - 1 - ordinal + else: + ordinals = dict() + for g in groups: + for i, x in enumerate(g): + ordinals[x] = (i, len(g) - 1 - i) + left, right = ordinals[ordinal] + idx = value.dim() - 1 - dim + padding[2 * idx] = left * size + padding[2 * idx + 1] = right * size + return all_reduce(REDUCE_SUM, F.pad(value, padding), groups=groups) + + +
[docs]def all_gather(value: torch.Tensor, + dim: int = 0, + groups: Optional[List[List[int]]] = None, + output: Optional[torch.Tensor] = None, + pin_layout: bool = True) -> torch.Tensor: + """Performs an all-gather operation along a given dimension. + + Args: + value (torch.Tensor): The input tensor. + dim (int): The gather dimension. + Default: 0 + groups (list, optional): A list of list, representing the replica groups for + the `all_gather()` operation. Example: `[[0, 1, 2, 3], [4, 5, 6, 7]]` + defines two groups, one with the `[0, 1, 2, 3]` replicas and one with + the `[4, 5, 6, 7]` replicas. If `None` there will be only one group with + all the replicas in it. + output (torch.Tensor): Optional output tensor. + pin_layout (bool, optional): whether to pin the layout for this communication op. + Layout pining can prevent potential data corruption when each process that + participate in the communication has slightly different program, but it might + cause some xla compilation to fail. Unpin the layout when you see error message + like "HloModule has a mix of layout constrained". + + Returns: + A tensor which has, in the ``dim`` dimension, all the values from the + participating replicas. + """ + # _all_gather_using_all_reduce does not support list of tensors as input + if pin_layout and output == None and isinstance(value, torch.Tensor): + # There is not an easy way to pin the all_gather layout, so use all_reduce + # based all_gather for this purpose. + return _all_gather_using_all_reduce( + value, dim=dim, groups=groups, pin_layout=True) + + if dim < 0: + dim = value.dim() + dim + if groups: + shard_count = len(groups[0]) + assert all(len(group) == shard_count for group in groups), \ + "Replica groups must have the same number of replicas/shards." + else: + # All replicas belong to a single group + shard_count = runtime.world_size() + + token, devctx = _get_all_reduce_token() + + if isinstance(value, torch.Tensor): + if output != None: + # Call the out of place version of the all_gather + new_token = torch_xla._XLAC._xla_all_gather_out(output, value, token, dim, + shard_count, groups or [], + pin_layout) + torch_xla._XLAC._set_all_reduce_token(devctx.device, new_token) + return output + + result = torch_xla._XLAC._xla_all_gather(value, dim, shard_count, groups or + [], pin_layout) + return result + + # Now the input should be a list of Tensors. + elif isinstance(value, list) and all( + isinstance(v, torch.Tensor) for v in value): + if pin_layout: + raise RuntimeError( + "For xm.all_gather with list of tensors input, pin_layout=True is not yet supported." + ) + if output != None: + if not isinstance(output, list) or any( + not isinstance(v, torch.Tensor) for v in output): + raise TypeError( + f"`output` needs to be a list of Tensors, but given {type(output)}." + ) + if len(output) != len(value): + raise ValueError("`output` length doesn't match `input` length: " + f"{len(output)} vs {len(input)}.") + # Call the out of place version of the reduce_scatter + new_token = torch_xla._XLAC._xla_all_gather_coalesced_out( + output, value, token, dim, shard_count, groups or [], pin_layout) + torch_xla._XLAC._set_all_reduce_token(devctx.device, new_token) + return output + + result = torch_xla._XLAC._xla_all_gather_coalesced(value, token, dim, + shard_count, groups or + [], pin_layout) + torch_xla._XLAC._set_all_reduce_token(devctx.device, result[-1]) + return result[:-1] + else: + raise TypeError("`value` needs to be a Tensor or a list of Tensors, but " + f"given {type(value)}.")
+ + +class CoalescingBuckets(object): + + def __init__( + self, + func: Callable[[ + Union[torch.Tensor, + List[torch.Tensor]], Optional[Union[torch.Tensor, + List[torch.Tensor]]] + ], Union[torch.Tensor, List[torch.Tensor]]], + input_list: Any, + output_list: Optional[Any] = None, + bucket_cap_mb: int = 160): + if not isinstance(input_list, list) or any( + not isinstance(v, torch.Tensor) for v in input_list): + raise TypeError( + f"`input_list` needs to be a list of Tensors, but given {type(input_list)}." + ) + if output_list != None: + if not isinstance(output_list, list) or any( + not isinstance(v, torch.Tensor) for v in output_list): + raise TypeError( + f"`output_list` needs to be a list of Tensors, but given {type(output_list)}." + ) + if len(output_list) != len(input_list): + raise ValueError( + "`output_list` length doesn't match `input_list` length: " + f"{len(output_list)} vs {len(input_list)}.") + self._func = func + self._input_list = input_list + self._output_list = output_list + self._total = 0 + self._tensor_bucket = [] + self._output_bucket = [] if output_list else None + self._bucket_cap = bucket_cap_mb * 1024 * 1024 + self._out_tensors = [] + + def flush(self): + if len(self._tensor_bucket) == 1: + # Use non-coalesced CCOp if its just one tensor + output = self._output_bucket[0] if self._output_bucket else None + self._out_tensors.append(self._func(self._tensor_bucket[0], output)) + elif len(self._tensor_bucket): + self._out_tensors.extend( + self._func(self._tensor_bucket, self._output_bucket)) + self._total = 0 + self._tensor_bucket = [] + self._output_bucket = [] if self._output_list else None + + def add(self, tensor: torch.Tensor, idx: int): + self._total += tensor.numel() * tensor.element_size() + self._tensor_bucket.append(tensor) + if self._output_list != None: + self._output_bucket.append(self._output_list[idx]) + + def __call__(self) -> Union[torch.Tensor, List[torch.Tensor]]: + for idx, tensor in enumerate(self._input_list): + tensor_bytes = tensor.numel() * tensor.element_size() + + # Aim for target bucket_cap_mb: flush new tensor with bucket if bucket content + # is small (1/2 cap) but don't combine if combined total is over 2x cap + total_new = self._total + tensor_bytes + if tensor_bytes > self._bucket_cap and self._total < 0.5 * self._bucket_cap and total_new <= 2 * self._bucket_cap: + self.add(tensor, idx) + self.flush() + else: + # Bucketize till the total spills over + if total_new > self._bucket_cap: + self.flush() + self.add(tensor, idx) + + # Flush the last remaining bucket + self.flush() + + assert len(self._out_tensors) == len(self._input_list) + + return self._out_tensors + + +def all_gather_bucketized( + input_list: List[torch.Tensor], + dim: int = 0, + groups: Optional[List[List[int]]] = None, + output: Optional[torch.Tensor] = None, + pin_layout: bool = False, + bucket_cap_mb=160) -> Union[torch.Tensor, List[torch.Tensor]]: + """Performs an all-gather operation along a given dimension, with bucketization. + + Args: + See all_gather for the args: dim, groups, output, pin_layout + input_list: List of input tensors + bucket_cap_mb: Number of MegaBytes of the tensor bucket to fill before doing all-gather. + + Returns: + A list of tensors each of which has, in the ``dim`` dimension, all the values from the + participating replicas. + """ + # sanity checks + if pin_layout: + raise RuntimeError( + "For xm.all_gather_bucketized, pin_layout=True is not yet supported.") + + def _all_gather_coalesced(_input_list, _output_list=None): + return all_gather( + value=_input_list, + dim=dim, + groups=groups, + output=_output_list, + pin_layout=pin_layout) + + buckets = CoalescingBuckets( + _all_gather_coalesced, input_list, output, bucket_cap_mb=bucket_cap_mb) + return buckets() + + +
[docs]def all_to_all(value: torch.Tensor, + split_dimension: int, + concat_dimension: int, + split_count: int, + groups: Optional[List[List[int]]] = None, + pin_layout: bool = True) -> torch.Tensor: + """Performs an XLA `AllToAll()` operation on the input tensor. + + See: https://www.tensorflow.org/xla/operation_semantics#alltoall + + Args: + value (torch.Tensor): The input tensor. + split_dimension (int): The dimension upon which the split should happen. + concat_dimension (int): The dimension upon which the concat should happen. + split_count (int): The split count. + groups (list, optional): A list of list, representing the replica groups for + the `all_reduce()` operation. Example: `[[0, 1, 2, 3], [4, 5, 6, 7]]` + defines two groups, one with the `[0, 1, 2, 3]` replicas and one with + the `[4, 5, 6, 7]` replicas. If `None` there will be only one group with + all the replicas in it. + pin_layout (bool, optional): whether to pin the layout for this communication op. + Layout pining can prevent potential data corruption when each process that + participate in the communication has slightly different program, but it might + cause some xla compilation to fail. Unpin the layout when you see error message + like "HloModule has a mix of layout constrained". + + Returns: + The result `torch.Tensor` of the `all_to_all()` operation. + """ + token, devctx = _get_all_reduce_token() + result = torch_xla._XLAC._xla_all_to_all(value, token, split_dimension, + concat_dimension, split_count, + groups or [], pin_layout) + torch_xla._XLAC._set_all_reduce_token(devctx.device, result[1]) + return result[0]
+ + +def collective_permute(value: torch.Tensor, + pairs: List[List[int]]) -> torch.Tensor: + """Performs a XLA `CollectivePermute()` operation on the input tensor. + + WARNING: This function is not very reliable, may produce wrong results under + certain inputs. Use it at your own risk. + + See: https://www.tensorflow.org/xla/operation_semantics#collectivepermute + + Args: + value (torch.Tensor): The input tensor. + pairs (list): A list of (source_replica_id, target_replica_id) pairs, + representing the sender and receiver for the `collective_permute()` + operation. Example: `[[0, 1], [1, 2], [2, 0]]` defines three pairs. The + tensor will be sent from replica 0 to replica 1, replica 1 to replica 2, + and replica 2 to replica 0. + + Returns: + The result `torch.Tensor` of the `collective_permute()` operation. + """ + token, devctx = _get_all_reduce_token() + result = torch_xla._XLAC._xla_collective_permute(value, token, pairs) + torch_xla._XLAC._set_all_reduce_token(devctx.device, result[1]) + return result[0] + + +def collective_broadcast(tensors: List[torch.Tensor], + root_ordinal: int = 0, + groups: Optional[List[int]] = None, + pin_layout: bool = True) -> None: + """Broadcast values of `tensors` from root replica to other replicas in-place. + + Args: + tensors (list): List of `torch.Tensor`s to broadcast. + root_ordinal (int): Ordinal of replica with values to broadcast. + groups (list, optional): A list of list, representing the replica groups for + the `all_reduce()` operation. Example: `[[0, 1, 2, 3], [4, 5, 6, 7]]` + defines two groups, one with the `[0, 1, 2, 3]` replicas and one with + the `[4, 5, 6, 7]` replicas. If `None` there will be only one group with + all the replicas in it. + pin_layout (bool, optional): whether to pin the layout for this communication op. + Layout pining can prevent potential data corruption when each process that + participate in the communication has slightly different program, but it might + cause some xla compilation to fail. Unpin the layout when you see error message + like "HloModule has a mix of layout constrained". + """ + with torch.no_grad(): + # We must produce the exact same graph in each replica to prevent hanging, + # so each replica must have the same multiply op with the same parameters. + for tensor in tensors: + scale = torch.tensor( + 1 if runtime.global_ordinal() == root_ordinal else 0, + dtype=tensor.dtype) + # Transfer scale tensor as device data instead of constant 1 or 0. + xscale = send_cpu_data_to_device(scale, tensor.device) + tensor.mul_(xscale[0]) + + all_reduce(REDUCE_SUM, tensors, groups=groups, pin_layout=pin_layout) + + +def send(value: torch.Tensor, channel_id: int) -> torch.Tensor: + """Performs a XLA `Send()` operation on the input tensor. + + See: https://www.tensorflow.org/xla/operation_semantics#send + + Args: + value (torch.Tensor): The input tensor. + channel_id (int64): opaque id identifying the destination of the send op. + """ + token, devctx = _get_all_reduce_token() + # The input will be returned as result. + input_as_result, new_token = torch_xla._XLAC._xla_send( + value, token, channel_id) + torch_xla._XLAC._set_all_reduce_token(devctx.device, new_token) + return input_as_result + + +def recv(output: torch.Tensor, channel_id: int) -> torch.Tensor: + """Performs a XLA `Recv()` operation on the input tensor. + + See: https://www.tensorflow.org/xla/operation_semantics#recv + + Args: + output (torch.Tensor): The output tensor. + channel_id (int64): opaque id identifying the source of the recv op. + """ + token, devctx = _get_all_reduce_token() + result, new_token = torch_xla._XLAC._xla_recv(output, token, channel_id) + torch_xla._XLAC._set_all_reduce_token(devctx.device, new_token) + return result + + +def reduce_scatter(reduce_type: str, + input: Union[torch.Tensor, List[torch.Tensor]], + scale: float, + scatter_dim: int, + shard_count: int, + groups: Optional[List[List[int]]] = None, + output: Optional[Union[torch.Tensor, + List[torch.Tensor]]] = None, + pin_layout: bool = True) -> torch.Tensor: + """Performs a XLA `ReduceScatter()` operation on the input tensor. + + See: https://www.tensorflow.org/xla/operation_semantics#reducescatter + + Args: + reduce_type (string): One of ``xm.REDUCE_SUM``, ``xm.REDUCE_MUL``, + ``xm.REDUCE_AND``, ``xm.REDUCE_OR``, ``xm.REDUCE_MIN`` and + ``xm.REDUCE_MAX``. + input: (torch.Tensor or a list of torch.Tensor): The input. If it's a list, then + it will also be the output. + scale (float): A default scaling value to be applied after the reduce. + scatter_dim (int): Dimension number to which apply scatter operation. + shard_count (int): The number of ways to split up the scatter_dim in. + groups (list): A list of list, representing the replica groups for + the `reduce_scatter()` operation. Example: `[[0, 1, 2, 3], [4, 5, 6, 7]]` + defines two groups, one with the `[0, 1, 2, 3]` replicas and one with + the `[4, 5, 6, 7]` replicas. If `None` there will be only one group with + all the replicas in it. + output: Optional output tensor if `input` is a torch.Tensor, or a list of + torch.Tensor if `input` is a list of torch.Tensor. + pin_layout (bool, optional): whether to pin the layout for this communication op. + Layout pining can prevent potential data corruption when each process that + participate in the communication has slightly different program, but it might + cause some xla compilation to fail. Unpin the layout when you see error message + like "HloModule has a mix of layout constrained". + + Returns: + A `torch.Tensor` with all the values reduced across replicas. Each process + gets a shard split along the `scatter_dim`. All other dimensions are + the same as the input. + """ + token, devctx = _get_all_reduce_token() + + if isinstance(input, torch.Tensor): + if output != None: + # Call the out of place version of the reduce_scatter + new_token = torch_xla._XLAC._xla_reduce_scatter_out( + reduce_type, output, input, token, scale, scatter_dim, shard_count, + groups or [], pin_layout) + torch_xla._XLAC._set_all_reduce_token(devctx.device, new_token) + return output + + result = torch_xla._XLAC._xla_reduce_scatter(reduce_type, input, token, + scale, scatter_dim, + shard_count, groups or [], + pin_layout) + torch_xla._XLAC._set_all_reduce_token(devctx.device, result[1]) + return result[0] + + # Now the input should be a list of Tensors. + elif isinstance(input, list) and all( + isinstance(v, torch.Tensor) for v in input): + if output != None: + if not isinstance(output, list) or any( + not isinstance(v, torch.Tensor) for v in output): + raise TypeError( + f"`output` needs to be a list of Tensors, but given {type(output)}." + ) + if len(output) != len(input): + raise ValueError("`output` length doesn't match `input` length: " + f"{len(output)} vs {len(input)}.") + # Call the out of place version of the reduce_scatter + new_token = torch_xla._XLAC._xla_reduce_scatter_coalesced_out( + reduce_type, output, input, token, scale, scatter_dim, shard_count, + groups or [], pin_layout) + torch_xla._XLAC._set_all_reduce_token(devctx.device, new_token) + return output + + result = torch_xla._XLAC._xla_reduce_scatter_coalesced( + reduce_type, input, token, scale, scatter_dim, shard_count, groups or + [], pin_layout) + torch_xla._XLAC._set_all_reduce_token(devctx.device, result[-1]) + return result[:-1] + else: + raise TypeError("`input` needs to be a Tensor or a list of Tensors, but " + f"given {type(input)}.") + + +def reduce_scatter_bucketized(reduce_type: str, + input_list: Union[torch.Tensor, + List[torch.Tensor]], + scale: float, + scatter_dim: int, + shard_count: int, + groups: Optional[List[List[int]]] = None, + output: Optional[Union[ + torch.Tensor, List[torch.Tensor]]] = None, + pin_layout: bool = False, + bucket_cap_mb: int = 160) -> CoalescingBuckets: + """Performs a XLA `ReduceScatter()` operation on a list of tensors (bucketized). + + See: https://www.tensorflow.org/xla/operation_semantics#reducescatter + + Args: + see reduce_scatter for reduce_type, scale, scatter_dim, shard_count, groups, pin_layout + input_list: List of input tensors + output: Optional list of output torch.Tensor + bucket_cap_mb: Number of MegaBytes of the tensor bucket to fill before doing reduce-scatter. + + Returns: + A list of `torch.Tensors` with all the values reduced across replicas. Each process + gets a shard split along the `scatter_dim`. All other dimensions are + the same as the input. + """ + + def _reduce_scatter_coalesced( + _input_list: Union[torch.Tensor, List[torch.Tensor]], + _output_list: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None + ) -> Union[torch.Tensor, List[torch.Tensor]]: + return reduce_scatter( + reduce_type=reduce_type, + input=_input_list, + scale=scale, + scatter_dim=scatter_dim, + shard_count=shard_count, + groups=groups, + output=_output_list, + pin_layout=pin_layout) + + buckets = CoalescingBuckets( + _reduce_scatter_coalesced, + input_list, + output, + bucket_cap_mb=bucket_cap_mb) + return buckets() + + +
[docs]def add_step_closure(closure: Callable[..., Any], + args: Tuple[Any] = (), + run_async: bool = False): + """Adds a closure to the list of the ones to be run at the end of the step. + + Many times during model training there is the need to print/report (print to + console, post to tensorboard, etc...) information which require the content of + intermediary tensors to be inspected. + Inspecting different tensors content in different points of the model code + requires many executions and typically causes performance issues. + Adding a step closure will ensure that it will be run after the barrier, when + all the live tensors will be already materialized to device data. + Live tensors which will include the ones captured by the closure arguments. + So using `add_step_closure()` will ensure a single execution will be + performed, even when multiple closures are queued, requiring multiple tensors + to be inspected. + Step closures will be run sequentially in the order they have been queued. + Note that even though using this API the execution will be optimized, it is + advised to throttle the printing/reporting events once every N steps. + + Args: + closure (callable): The function to be called. + args (tuple): The arguments to be passed to the closure. + run_async: If True, run the closure asynchronously. + """ + devctx = _get_device_context() + closures_type = 'async_step_closures' if run_async else 'step_closures' + step_closures = getattr(devctx, closures_type, None) + if step_closures is None: + step_closures = [] + setattr(devctx, closures_type, step_closures) + step_closures.append(lambda a=args: closure(*a))
+ + +def _run_step_closures() -> DeviceContext: + devctx = _get_device_context() + async_step_closures = getattr(devctx, 'async_step_closures', None) + if async_step_closures is not None: + devctx.async_step_closures = [] + async_closure_handler = getattr(devctx, 'async_closure_handler', None) + if async_closure_handler is None: + async_closure_handler = xc.AsyncClosureHandler() + devctx.async_closure_handler = async_closure_handler + async_closure_handler.run_all(async_step_closures) + + step_closures = getattr(devctx, 'step_closures', None) + if step_closures is not None: + devctx.step_closures = [] + for closure in step_closures: + closure() + return devctx + + +def mark_step(wait: bool = False, reset_scope: bool = True): + if xu.getenv_as('XLA_EMIT_STEPLOG', bool, False): + print( + 'torch_xla.core.xla_model::mark_step\n', + end='', + file=sys.stderr, + flush=True) + torch_xla._XLAC._xla_step_marker( + torch_xla._XLAC._xla_get_default_device(), [], + wait=xu.getenv_as('XLA_SYNC_WAIT', bool, wait), + reset_scope=reset_scope) + # Only emit metrics from the first local device index, to avoid emitting the + # same values from different threads. + if is_master_ordinal(): + ms.save_metrics() + devctx = _run_step_closures() + torch_xla._XLAC._set_all_reduce_token(devctx.device, None) + + +# TODO(lsy323): When `tensors` is empty, the some intermediate tensors will also be +# dump as outputs. Need further investigation. +
[docs]def get_stablehlo(tensors: Optional[List[torch.Tensor]] = None) -> str: + """Get StableHLO for the computation graph in string format. + + If `tensors` is not empty, the graph with `tensors` as outputs will be dump. + If `tensors` is empty, the whole computation graph will be dump. + + For inference graph, it is recommended to pass the model outputs to `tensors`. + For training graph, it is not straightforward to identify the "outputs". Using empty `tensors` is recommended. + + To enable source line info in StableHLO, please set env var XLA_HLO_DEBUG=1. + + Args: + tensors (list[torch.Tensor], optional): Tensors that represent the output/root of the StableHLO graph. + + Returns: + StableHLO Module in string format. + """ + if tensors is None: + tensors = [] + return torch_xla._XLAC._get_stablehlo( + tensors, torch_xla._XLAC._xla_get_default_device(), [], + False).decode('utf-8')
+ + +# TODO(lsy323): When `tensors` is empty, the some intermediate tensors will also be +# dump as outputs. Need further investigation. +
[docs]def get_stablehlo_bytecode(tensors: Optional[torch.Tensor] = None) -> bytes: + """Get StableHLO for the computation graph in bytecode format. + + If `tensors` is not empty, the graph with `tensors` as outputs will be dump. + If `tensors` is empty, the whole computation graph will be dump. + + For inference graph, it is recommended to pass the model outputs to `tensors`. + For training graph, it is not straightforward to identify the "outputs". Using empty `tensors` is recommended. + + Args: + tensors (list[torch.Tensor], optional): Tensors that represent the output/root of the StableHLO graph. + + Returns: + StableHLO Module in bytecode format. + """ + if tensors is None: + tensors = [] + return torch_xla._XLAC._get_stablehlo( + tensors, torch_xla._XLAC._xla_get_default_device(), [], True)
+ + +
[docs]def wait_device_ops(devices: List[str] = []): + """Waits for all the async operations on the given devices to complete. + + Args: + devices (string..., optional): The devices whose async ops need to be waited + for. If empty, all the local devices will be waited for. + """ + torch_xla._XLAC._xla_wait_device_ops(devices=devices)
+ + +def all_reduce_bucketized_gradients(gradients: List[torch.Tensor], + scale: float, + groups: Optional[List[List[int]]], + pin_layout: bool, + bucket_cap_mb: int = 0): + total = 0 + tensor_bucket = [] + bucket_cap = bucket_cap_mb * 1024 * 1024 + + for grad in gradients: + grad_bytes = grad.numel() * grad.element_size() + + # Bucketize till the total spills over + total += grad_bytes + if total > bucket_cap and len(tensor_bucket) > 0: + all_reduce( + REDUCE_SUM, + tensor_bucket, + scale=scale, + groups=groups, + pin_layout=pin_layout) + total = grad_bytes + tensor_bucket = [] + tensor_bucket.append(grad) + + # Flush the last remaining bucket + if len(tensor_bucket): + all_reduce( + REDUCE_SUM, + tensor_bucket, + scale=scale, + groups=groups, + pin_layout=pin_layout) + + +def reduce_gradients(optimizer: optim.Optimizer, + groups: Optional[List[List[int]]] = None, + pin_layout: bool = True): + """Reduces all the gradients handled by an optimizer. + + Args: + optimizer (:class:`torch.Optimizer`): The `torch.Optimizer` instance + containing the gradients to be reduced. + groups (list, optional): A list of list, representing the replica groups for + the `all_reduce()` operation. Example: `[[0, 1, 2, 3], [4, 5, 6, 7]]` + defines two groups, one with the `[0, 1, 2, 3]` replicas and one with + the `[4, 5, 6, 7]` replicas. If `None` there will be only one group with + all the replicas in it. + pin_layout (bool, optional): whether to pin the layout when reducing gradients. + See `xm.all_reduce` for details. + """ + count = runtime.world_size() + if count > 1: + gradients = _fetch_gradients(optimizer) + bucket_cap_mb = int(os.getenv('ALLREDUCE_GRADIENTS_BUCKET_SIZE_MB', 0)) + # Reverse the gradients list so that we start allreduce from the last layer + # onwards. This allows allreduce to trigger as soon as the bucket fills up and + # overlap with backward pass. + if bucket_cap_mb > 0: + gradients = reversed(gradients) + all_reduce_bucketized_gradients( + gradients, + scale=1.0 / count, + groups=groups, + pin_layout=pin_layout, + bucket_cap_mb=bucket_cap_mb) + else: + all_reduce( + REDUCE_SUM, + gradients, + scale=1.0 / count, + groups=groups, + pin_layout=pin_layout) + + +
[docs]def optimizer_step(optimizer: optim.Optimizer, + barrier: bool = False, + optimizer_args: Dict = {}, + groups: Optional[List[List[int]]] = None, + pin_layout: bool = True): + """Run the provided optimizer step and sync gradidents across all devices. + + Args: + optimizer (:class:`torch.Optimizer`): The `torch.Optimizer` instance whose + `step()` function needs to be called. The `step()` function will be called + with the `optimizer_args` named arguments. + barrier (bool, optional): Whether the XLA tensor barrier should be issued in + this API. If using the PyTorch XLA `ParallelLoader` or `DataParallel` + support, this is not necessary as the barrier will be issued by the XLA + data loader iterator `next()` call. + Default: False + optimizer_args (dict, optional): Named arguments dictionary for the + `optimizer.step()` call. + groups (list, optional): A list of list, representing the replica groups for + the `all_reduce()` operation. Example: `[[0, 1, 2, 3], [4, 5, 6, 7]]` + defines two groups, one with the `[0, 1, 2, 3]` replicas and one with + the `[4, 5, 6, 7]` replicas. If `None` there will be only one group with + all the replicas in it. + pin_layout (bool, optional): whether to pin the layout when reducing gradients. + See `xm.all_reduce` for details. + + Returns: + The same value returned by the `optimizer.step()` call. + + Example: + + >>> import torch_xla.core.xla_model as xm + >>> xm.optimizer_step(self.optimizer) + """ + reduce_gradients(optimizer, groups=groups, pin_layout=pin_layout) + loss = optimizer.step(**optimizer_args) + if barrier: + mark_step() + return loss
+ + +
[docs]def save(data: Any, + file_or_path: Union[str, TextIO], + master_only: bool = True, + global_master: bool = False): + """Saves the input data into a file. + + The saved data is transferred to PyTorch CPU device before being saved, so a + following `torch.load()` will load CPU data. + Care must be taken when working with views. Instead of saving views it's + recommended that you recreate them after the tensors have been loaded and + moved to their destination device(s). + + Args: + data: The input data to be saved. Any nested combination of Python objects + (list, tuples, sets, dicts, ...). + file_or_path: The destination for the data saving operation. Either a file + path or a Python file object. If `master_only` is ``False`` the path or + file objects must point to different destinations as otherwise all the + writes from the same host will override each other. + master_only (bool, optional): Whether only the master device should save the + data. If False, the `file_or_path` argument should be a different file or + path for each of the ordinals taking part to the replication, otherwise + all the replicas on the same host will be writing to the same location. + Default: True + global_master (bool, optional): When ``master_only`` is ``True`` this flag + controls whether every host's master (if ``global_master`` is ``False``) + saves the content, or only the global master (ordinal 0). + Default: False + + Example: + + >>> import torch_xla.core.xla_model as xm + >>> xm.wait_device_ops() # wait for all pending operations to finish. + >>> xm.save(obj_to_save, path_to_save) + >>> xm.rendezvous('torch_xla.core.xla_model.save') # multi process context only + """ + should_write_data = not master_only or is_master_ordinal( + local=not global_master) + + cpu_data = _maybe_convert_to_cpu(data, convert=should_write_data) + if should_write_data: + torch.save(cpu_data, file_or_path)
+ + +def _maybe_convert_to_cpu(data: Any, convert: bool = True) -> ToXlaTensorArena: + + def convert_fn(tensors): + torch_xla._XLAC._xla_sync_multi( + tensors, devices=[], wait=True, sync_xla_data=True) + if not convert: + return tensors + return torch_xla._XLAC._xla_get_cpu_tensors(tensors) + + def select_fn(v): + return type(v) == torch.Tensor and is_xla_tensor(v) + + return ToXlaTensorArena(convert_fn, select_fn).transform(data) + + +def send_cpu_data_to_device( + datas: Any, + device: Union[str, torch.device], + input_sharding: Optional[ShardingSpec] = None) -> ToXlaTensorArena: + + def convert_fn(tensors): + devices = [str(device)] * len(tensors) + shardings = None + if input_sharding: + shardings = [input_sharding.xla_spec(t) for t in tensors] + xtensors = torch_xla._XLAC._xla_tensors_from_aten(tensors, devices, + shardings) + return xtensors + + def select_fn(v): + return type(v) == torch.Tensor and v.device.type == 'cpu' + + if type(datas) is torch.Tensor: + datas = [datas] + return ToXlaTensorArena(convert_fn, select_fn).transform(datas) + + +def xla_rendezvous(payload: bytes = b'', + ordinals: Optional[List[int]] = None, + tag: Optional[str] = None) -> List[bytes]: + """Share `payload` with all replicas in `ordinals`. + + `tag` is ignored except for logging. + + Uses XLA collective communication to communicate between replicas, so this + will sync the graph (`xm.mark_step`). + + Args: + tag: Name of this rendezvous operation. + payload: Payload to share with other replicas. + ordinals: List of replicas participating in rendezvous. + Returns: + List of bytes from other replicas. + """ + if ordinals and len(ordinals) != runtime.global_device_count(): + raise ValueError('Only global rendezvous is supported') + + if not isinstance(payload, bytes): + raise TypeError('`payload` must be bytes, not {}'.format(type(payload))) + + # Finish all execution of previous graphs to avoid recompilation + mark_step() + + device = xla_device() + + data = torch.tensor(list(payload), dtype=torch.uint8) + size = torch.tensor([data.shape[0]], dtype=torch.int, device=device) + + if tag: + logging.info(f"Joining rendezvous '{tag}'...") + + sizes = all_gather(size) + + max_size = torch.max(sizes) + mark_step() + + # If all payloads are empty, return immediately to avoid more CPU transfers + if max_size.item() < 1: + return [b'' for _ in range(sizes.size()[0])] + + padded_data = torch.nn.functional.pad(data, ( + 0, + max_size.item() - size.item(), + )).to(xla_device()) + raw_data = all_gather(padded_data) + data_list = torch.split(raw_data, max_size) + + payloads = [d[:sz] for d, sz in zip(data_list, sizes.cpu())] + mark_step() + + return [bytes(p.cpu().tolist()) for p in payloads] + + +
[docs]def rendezvous(tag: str, + payload: bytes = b'', + replicas: List[int] = []) -> List[bytes]: + """Waits for all the mesh clients to reach the named rendezvous. + + Note: PJRT does not support the XRT mesh server, so this is effectively an + alias to `xla_rendezvous`. + + Args: + tag (string): The name of the rendezvous to join. + payload (bytes, optional): The payload to be sent to the rendezvous. + replicas (list, int): The replica ordinals taking part of the rendezvous. + Empty means all replicas in the mesh. + Default: [] + + Returns: + The payloads exchanged by all the other cores, with the payload of core + ordinal `i` at position `i` in the returned tuple. + + Example: + + >>> import torch_xla.core.xla_model as xm + >>> xm.rendezvous('example') + """ + return xla_rendezvous(payload, replicas or None, tag=tag)
+ + +def do_on_ordinals( + target: Callable[..., Any], + data: Union[Tuple, Any] = (), + ordinals: Union[List[int], Set[int], int] = (0,) +) -> Optional[Any]: + """Runs a function only on a given set of ordinals. + + Args: + target (callable): The function to be run on `ordinals`. + data: Any input data for the `target` function which contains tensors. All + the XLA tensors used by the `target` function must be passed in this + argument. Every other data used by the function can be captured by the + Python interpreter as usual. + Default: () + ordinals (list, int): The list/set of ordinals where the `target` function + should run. + Default: (0,) + + Returns: + In the ordinals that ran the `target` function, the function return value, + otherwise `None`. + """ + running = runtime.global_ordinal() in ordinals + cpu_data = _maybe_convert_to_cpu(data, convert=running) + if running: + result = target(*cpu_data) + else: + result = None + rendezvous('torch_xla.core.xla_model.do_on_ordinals') + return result + + +
[docs]def mesh_reduce(tag: str, data, + reduce_fn: Callable[..., Any]) -> Union[Any, ToXlaTensorArena]: + """Performs an out-of-graph client mesh reduction. + + Args: + tag (string): The name of the rendezvous to join. + data: The data to be reduced. The `reduce_fn` callable will receive a list + with the copies of the same data coming from all the mesh client processes + (one per core). + reduce_fn (callable): A function which receives a list of `data`-like + objects and returns the reduced result. + + Returns: + The reduced value. + + Example: + + >>> import torch_xla.core.xla_model as xm + >>> import numpy as np + >>> accuracy = xm.mesh_reduce('test_accuracy', accuracy, np.mean) + """ + cpu_data = _maybe_convert_to_cpu(data) + bio = io.BytesIO() + torch.save(cpu_data, bio) + xdata = rendezvous(tag, bio.getvalue()) + xldata = [] + for xd in xdata: + xbio = io.BytesIO(xd) + xldata.append(torch.load(xbio)) + return reduce_fn(xldata) if xldata else cpu_data
+ + +
[docs]def set_rng_state(seed: int, device: Optional[str] = None): + """Sets the random number generator state. + + Args: + seed (integer): The state to be set. + device (string, optional): The device where the RNG state needs to be set. + If missing the default device seed will be set. + """ + if device is None: + device = torch_xla._XLAC._xla_get_default_device() + torch_xla._XLAC._xla_set_rng_seed(seed, str(device) if device else '')
+ + +
[docs]def get_rng_state(device: Optional[str] = None) -> int: + """Gets the current running random number generator state. + + Args: + device (string, optional): The device whose RNG state needs to be retrieved. + If missing the default device seed will be set. + + Returns: + The RNG state, as integer. + """ + if device is None: + device = torch_xla._XLAC._xla_get_default_device() + return torch_xla._XLAC._xla_get_rng_seed(str(device) if device else '')
+ + +@contextlib.contextmanager +def fork_rng(device: Optional[str] = None, enabled: bool = True): + """ + Forks the RNG, so that when you return, the RNG is reset to the state that it was previously in. + Args: + device (string, optional): The device where the RNG state needs to be set. If missing the default device seed will be set. + enabled (bool): if ``False``, the RNG is not forked. This is a convenience argument for easily disabling the context manager without having to delete it and unindent your Python code under it. + """ + if not enabled: + yield + return + + if device is None: + device = torch_xla._XLAC._xla_get_default_device() + xla_rng_state = get_rng_state(device=device) + + try: + yield + finally: + set_rng_state(xla_rng_state, device=device) + + +class MemoryInfo(TypedDict): + bytes_used: str + bytes_limit: int + + +
[docs]def get_memory_info(device: Optional[torch.device] = None) -> MemoryInfo: + """Retrieves the device memory usage. + + Args: + device: Optional[torch.device] The device whose memory information are requested. + If not passed will use the default device. + + Returns: + MemoryInfo dict with memory usage for the given device. + + Example: + + >>> xm.get_memory_info() + {'bytes_used': 290816, 'bytes_limit': 34088157184} + """ + if device == None: + device = xla_device() + return torch_xla._XLAC._xla_memory_info(str(device))
+ + +def optimization_barrier_(tensors: List[torch.Tensor]): + """Blocks xla compiler from moving computations across this barrier. The common + use case would be blocking xla common-subexpression elimination pass from undoing + the gradient checkpointing. + + Args: + tensors (List[torch.Tensor]): List of `torch.Tensor` to add barrier to. + """ + torch_xla._XLAC._xla_optimization_barrier_(tensors) + + +def broadcast_master_param(model: torch.nn.Module) -> None: + """ + Broadcast the model parameters from master process to other processes + """ + parameters_and_buffers = list( + itertools.chain(model.parameters(), model.buffers())) + collective_broadcast(parameters_and_buffers) + mark_step() +
+ +
+ +
+ + +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/_modules/torch_xla/debug/metrics.html b/release/2.5/_modules/torch_xla/debug/metrics.html new file mode 100644 index 00000000000..97360be23c7 --- /dev/null +++ b/release/2.5/_modules/torch_xla/debug/metrics.html @@ -0,0 +1,792 @@ + + + + + + + + + + + + torch_xla.debug.metrics — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +

Source code for torch_xla.debug.metrics

+import torch_xla
+
+
+
[docs]def counter_names(): + """Retrieves all the currently active counter names.""" + return torch_xla._XLAC._xla_counter_names()
+ + +
[docs]def counter_value(name): + """Returns the value of an active counter. + + Args: + name (string): The name of the counter whose value needs to be retrieved. + + Returns: + The counter value as integer. + """ + return torch_xla._XLAC._xla_counter_value(name)
+ + +def clear_counters(): + """Clear the value of all counters. + """ + return torch_xla._XLAC._clear_xla_counters() + + +
[docs]def metric_names(): + """Retrieves all the currently active metric names.""" + return torch_xla._XLAC._xla_metric_names()
+ + +
[docs]def metric_data(name): + """Returns the data of an active metric. + + Args: + name (string): The name of the metric whose data needs to be retrieved. + + Returns: + The metric data, which is a tuple of (TOTAL_SAMPLES, ACCUMULATOR, SAMPLES). + The `TOTAL_SAMPLES` is the total number of samples which have been posted to + the metric. A metric retains only a given number of samples (in a circular + buffer). + The `ACCUMULATOR` is the sum of the samples over `TOTAL_SAMPLES`. + The `SAMPLES` is a list of (TIME, VALUE) tuples. + """ + return torch_xla._XLAC._xla_metric_data(name)
+ + +def clear_metrics(): + """Clear the value of all metrics. + """ + return torch_xla._XLAC._clear_xla_metrics() + + +def clear_all(): + """Clear the value of all metrics and all counters. + """ + clear_metrics() + clear_counters() + + +
[docs]def metrics_report(): + """Retrieves a string containing the full metrics and counters report.""" + return torch_xla._XLAC._xla_metrics_report()
+ + +
[docs]def short_metrics_report(counter_names: list = None, metric_names: list = None): + """Retrieves a string containing the full metrics and counters report. + + Args: + counter_names (list): The list of counter names whose data needs to be printed. + metric_names (list): The list of metric names whose data needs to be printed. + """ + if not counter_names: + counter_names = ['CachedCompile', 'MarkStep', 'DynamoSyncInputExecuteTime'] + if not metric_names: + metric_names = [ + 'CompileTime', 'ExecuteTime', 'ExecuteReplicatedTime', + 'TransferToDeviceTime', 'TransferFromDeviceTime' + ] + return torch_xla._XLAC._short_xla_metrics_report(counter_names, metric_names)
+ + +def executed_fallback_ops(): + """Retrieves a list of operations that were run in fallback mode.""" + return torch_xla._XLAC._get_executed_fallback_ops() +
+ +
+ +
+ + +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/_modules/torch_xla/distributed/parallel_loader.html b/release/2.5/_modules/torch_xla/distributed/parallel_loader.html new file mode 100644 index 00000000000..37bc6ba182b --- /dev/null +++ b/release/2.5/_modules/torch_xla/distributed/parallel_loader.html @@ -0,0 +1,973 @@ + + + + + + + + + + + + torch_xla.distributed.parallel_loader — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +

Source code for torch_xla.distributed.parallel_loader

+import itertools
+import queue
+import threading
+import torch
+import torch_xla
+import torch_xla.debug.profiler as xp
+import torch_xla.utils.keyd_queue as kq
+import torch_xla.utils.utils as xu
+import torch_xla.core.xla_model as xm
+
+
+class PerDeviceQueue(object):
+
+  def __init__(self, device, loader_prefetch_size, device_prefetch_size):
+    self.device = device
+    self.cpu_loader_queue = kq.Queue(maxsize=loader_prefetch_size)
+    self.queue = kq.Queue(maxsize=device_prefetch_size)
+    self.close_queue_count = itertools.count()
+
+
+class PerDeviceLoader(object):
+
+  def __init__(self, loader, device):
+    self._loader = loader
+    self._device = device
+    self._mark_step_batch_count = loader.batches_per_execution - 1
+    self._batches_yielded = 0
+
+  def __iter__(self):
+    return self
+
+  def __next__(self):
+    return self.next()
+
+  def __len__(self):
+    return self._loader.per_device_samples()
+
+  def next(self):
+    if xp.get_tracer_marked_step():
+      xp.set_tracer_marked_step(False)
+      self._batches_yielded += 1
+    else:
+      if self._mark_step_batch_count <= self._batches_yielded:
+        self._batches_yielded = 0
+        xm.mark_step()
+      else:
+        self._batches_yielded += 1
+
+    item = self._loader.next_item(self._device)
+    if item is None:
+      if not self._loader._exception_queue.empty():
+        raise self._loader._exception_queue.get()
+      xm.mark_step()
+      raise StopIteration
+    return item
+
+
+class ParallelLoader(object):
+  """Wraps an existing PyTorch DataLoader with background data upload.
+
+  Args:
+    cpu_loader (:class:`torch.utils.data.DataLoader`): The PyTorch DataLoader to be
+      wrapped.
+    devices (`torch.device`...): The list of devices where the data has to be
+      sent. The i-th sample returned by the `loader` will be sent to `devices[i
+      % len(devices)]`.
+    batchdim (int, optional): The dimension which is holding the batch size.
+      Default: 0
+    loader_prefetch_size (int, optional): The max capacity of the queue used by
+      the thread which is reading samples from the `loader`, to be processed by
+      the worker threads which upload data to the devices.
+      Default: 16
+    device_prefetch_size (int, optional): The max size of the per-device queues,
+      where the worker threads deposit tensors which have already been sent to
+      devices.
+      Default: 8
+    host_to_device_transfer_threads (int, optional): The number of threads that
+      work in parallel to transfer data from loader queue to device queue.
+      Default: 1
+    input_sharding (ShardingSpec, Dict(str, ShardingSpec), optional): Sharding
+      spec to apply to compatible input tensors after loading.
+      Default: None
+  """
+
+  def __init__(self,
+               cpu_loader,
+               devices,
+               batchdim=0,
+               batches_per_execution=1,
+               loader_prefetch_size=16,
+               device_prefetch_size=8,
+               host_to_device_transfer_threads=1,
+               input_sharding=None):
+    self._cpu_loader = cpu_loader
+    self._devices = [torch.device(x) for x in devices]
+    self._batchdim = batchdim
+    self._batches_per_execution = batches_per_execution
+    self._done = False
+    self._queues = dict()
+    self._exception_queue = queue.Queue()
+    self._input_sharding = input_sharding
+    for device in self._devices:
+      self._queues[device] = PerDeviceQueue(device, loader_prefetch_size,
+                                            device_prefetch_size)
+    thread = threading.Thread(target=self._loader_worker)
+    thread.daemon = True
+    thread.start()
+    for dqueue in self._queues.values():
+      for i in range(host_to_device_transfer_threads):
+        thread = threading.Thread(
+            target=self._worker,
+            args=(
+                dqueue,
+                host_to_device_transfer_threads,
+            ))
+        thread.daemon = True
+        thread.start()
+
+  def per_device_loader(self, device):
+    """Retrieves the loader iterator object for the given device.
+
+    Args:
+      device (`torch.device`): The device whole loader is being requested.
+
+    Returns:
+      The loader iterator object for the `device`. This is not a
+      `torch.utils.data.DataLoader` interface, but a Python iterator which
+      returns the same tensor data structure as returned by the wrapped
+      `torch.utils.data.DataLoader`, but residing on XLA devices.
+    """
+    return PerDeviceLoader(self, torch.device(device))
+
+  def per_device_samples(self):
+    return len(self._loader) // len(self._devices)
+
+  def next_item(self, device):
+    dqueue = self._queues[device]
+    return dqueue.queue.get()
+
+  def close(self):
+    self._done = True
+    for dqueue in self._queues.values():
+      dqueue.queue.close()
+      dqueue.cpu_loader_queue.close()
+
+  @property
+  def batches_per_execution(self):
+    return self._batches_per_execution
+
+  def _loader_worker(self):
+    queues = list(self._queues.values())
+    data_iter = enumerate(self._cpu_loader)
+    batch = []
+    while not self._done:
+      try:
+        _, data = next(data_iter)
+      except StopIteration:
+        break
+      batch.append(data)
+      if len(batch) == len(self._devices):
+        for queue_no, device_batch in enumerate(batch):
+          queues[queue_no].cpu_loader_queue.put(device_batch)
+        batch = []
+    for dqueue in queues:
+      dqueue.cpu_loader_queue.close_write()
+
+  def _get_batch(self, dqueue):
+    batch = []
+    while len(batch) < dqueue.queue.max_size():
+      item = dqueue.cpu_loader_queue.get()
+      if item is None:
+        break
+      batch.append(item)
+    return batch
+
+  def send_cpu_data_to_device(self, batches, device):
+    """Move batch to device.
+    Args:
+      batch -> List(torch.Tensor), List(Dict(str: torch.Tensor)): Input batch
+        present in the cpu memory
+      device: TPU device where the batch should be moved
+    
+    Returns:
+      result -> List(torch.Tensor), Dict(str: torch.Tensor): Returns a dict if the
+        input batch is a dict. Otherwise, returns a list of torch.Tensor.
+    """
+    result = None
+    if isinstance(self._input_sharding, dict):
+      if not isinstance(batches[0], dict):
+        raise ValueError(
+            f"input batch should be a dict when input sharding is a dict.")
+      result = []
+      for batch in batches:
+        xla_batch = {}
+        missing_keys = []
+        for key, tensor in batch.items():
+          assert type(tensor) == torch.Tensor
+          sharding_spec = None
+          if self._input_sharding:
+            if key not in self._input_sharding:
+              missing_keys.append(key)
+              continue
+            sharding_spec = self._input_sharding[key]
+
+          # xla_tensor is a list of tensors.
+          xla_tensor = xm.send_cpu_data_to_device(tensor, device, sharding_spec)
+          xla_batch[key] = xla_tensor[0]
+        if len(missing_keys) != 0:
+          # Returning exception as raising in the dataloading thread doesn't surface the problem in the main thread.
+          raise KeyError(
+              f"Keys: {missing_keys} are missing from input_sharding.")
+        result.append(xla_batch)
+    else:
+      result = xm.send_cpu_data_to_device(batches, device, self._input_sharding)
+    return result
+
+  def _worker(self, dqueue, host_to_device_transfer_threads):
+    device = torch.device(dqueue.device)
+    while True:
+      batch = self._get_batch(dqueue)
+      if not batch:
+        break
+      try:
+        batch = self.send_cpu_data_to_device(batch, device)
+      except Exception as e:
+        # _worker is being run in a daemon thread, raise the error
+        # will not work. Put the error in an error queue instead.
+        self._exception_queue.put(e)
+        break
+      for data in batch:
+        dqueue.queue.put(data)
+    close_queue_count = next(dqueue.close_queue_count)
+    if close_queue_count == host_to_device_transfer_threads - 1:
+      dqueue.queue.close_write()
+
+
+
[docs]class MpDeviceLoader(object): + """Wraps an existing PyTorch DataLoader with background data upload. + + This class should only be using with multi-processing data parallelism. It will wrap + the dataloader passed in with ParallelLoader and return the per_device_loader for the + current device. + + Args: + loader (:class:`torch.utils.data.DataLoader`): The PyTorch DataLoader to be + wrapped. + device (`torch.device`...): The device where the data has to be sent. + kwargs: Named arguments for the `ParallelLoader` constructor. + + Example: + + >>> device = torch_xla.device() + >>> train_device_loader = MpDeviceLoader(train_loader, device) + """ + + def __init__(self, loader, device, **kwargs): + self._loader = loader + self._device = device + self._parallel_loader_kwargs = kwargs + + def __iter__(self): + parallel_loader = ParallelLoader(self._loader, [self._device], + **self._parallel_loader_kwargs) + return parallel_loader.per_device_loader(self._device) + + def __len__(self): + return len(self._loader)
+
+ +
+ +
+ + +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/_modules/torch_xla/distributed/spmd/xla_sharding.html b/release/2.5/_modules/torch_xla/distributed/spmd/xla_sharding.html new file mode 100644 index 00000000000..9af28ad60b2 --- /dev/null +++ b/release/2.5/_modules/torch_xla/distributed/spmd/xla_sharding.html @@ -0,0 +1,1491 @@ + + + + + + + + + + + + torch_xla.distributed.spmd.xla_sharding — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +

Source code for torch_xla.distributed.spmd.xla_sharding

+import os
+from collections import OrderedDict, defaultdict
+from dataclasses import dataclass, field
+import torch
+import torch_xla
+import torch_xla.core.xla_model as xm
+import torch_xla._internal.utils as _utils
+from torch_xla.distributed.spmd import XLAShardedTensor, XLAShard
+import torch_xla.runtime as xr
+
+import numpy as np
+import functools
+import itertools
+from typing import Tuple, Union, List, Sequence, Any, Optional, Set
+from enum import IntEnum
+
+
+
[docs]class Mesh: + """Describe the logical XLA device topology mesh and the underlying resources. + + Args: + device_ids (Union[np.ndarray, List]): A raveled list of devices (IDs) in a custom order. The list is reshaped + to an `mesh_shape` array, filling the elements using C-like index order. + + mesh_shape (Tuple[int, ...]): A int tuple describing the logical topology shape + of the device mesh, and each element describes the number of devices in + the corresponding axis. + + axis_names (Tuple[str, ...]): A sequence of resource axis names to be assigned to the dimensions + of the `devices` argument. Its length should match the rank of `devices`. + + Example: + + >>> mesh_shape = (4, 2) + >>> num_devices = len(xm.get_xla_supported_devices()) + >>> device_ids = np.array(range(num_devices)) + >>> mesh = Mesh(device_ids, mesh_shape, ('x', 'y')) + >>> mesh.get_logical_mesh() + >>> array([[0, 1], + [2, 3], + [4, 5], + [6, 7]]) + >>> mesh.shape() + OrderedDict([('x', 4), ('y', 2)]) + """ + + device_ids: np.ndarray + mesh_shape: Tuple[int, ...] + axis_names: Tuple[str, ...] + + def __init__(self, + device_ids: Union[np.ndarray, List], + mesh_shape: Tuple[int, ...], + axis_names: Tuple[str, ...] = None): + if not isinstance(device_ids, np.ndarray): + device_ids = np.array(device_ids) + assert (axis_names is None) or (len(mesh_shape) == len(axis_names)) + assert axis_names is None or (len(set(axis_names)) == len(axis_names)) + assert (len(device_ids) == np.prod(mesh_shape)) + assert len(device_ids) == len(np.unique(device_ids)) + self.device_ids = device_ids + self.mesh_shape = mesh_shape + self.axis_names = axis_names + assert all(d < self.size() for d in device_ids) + + def size(self): + return np.prod(self.mesh_shape) + + def shape(self): + if self.axis_names is None: + return OrderedDict( + (dim, size) for dim, size in enumerate(self.mesh_shape)) + return OrderedDict( + (name, size) for name, size in zip(self.axis_names, self.mesh_shape)) + + def get_logical_mesh(self): + return self.device_ids.reshape(self.mesh_shape) + + def get_axis_name_idx(self, name: str) -> int: + if name not in self.axis_names: + return None + return self.axis_names.index(name) + + @functools.lru_cache(maxsize=None) + def _get_op_sharding_args(self, partition_spec: Tuple): + partition_spec = _translate_named_partition_spec(self, partition_spec) + flat_specs = np.hstack([d for d in partition_spec]) + specs = [d for d in flat_specs if d is not None] + assert all(d >= 0 and d < len(self.mesh_shape) for d in specs), \ + f"partition_spec ({partition_spec}) contains out of bound index into mesh_shape." + assert len(specs) == len(np.unique(specs)), \ + f"Each device mesh dimension should appear at most once in partition_spec {partition_spec}." + + tile_assignment = _get_tile_assignment(self, partition_spec) + if len(tile_assignment.shape) > len(partition_spec): + # Use partial replication for sharding a tensor over a higher-rank mesh + sharding_type = ShardingType.PARTIAL + else: + sharding_type = _get_sharding_type(partition_spec, self.size()) + replicate_dims = {i for i, d in enumerate(partition_spec) if d is None} + group_assignment, replication_groups = _get_group_assignment( + sharding_type, tile_assignment, len(partition_spec), replicate_dims) + + tile_assignment = tile_assignment.tolist() + sharding_type = int(sharding_type) + return tile_assignment, group_assignment, replication_groups, sharding_type + + @functools.lru_cache(maxsize=None) + def get_op_sharding(self, + partition_spec: Tuple) -> torch_xla._XLAC.OpSharding: + """ + Return the OpSharding for the given partition spec. This is an expensive + operation as the mesh grows, so the value is cached for reuse. + """ + # For scalar tensors, it can only be replicated. + # We have made sure len(t.shape) == len(partition_spec) + # in mark_sharding API. + if len(partition_spec) == 0: + return torch_xla._XLAC.OpSharding([], [], [], ShardingType.REPLICATED) + + tile_assignment, group_assignment, replication_groups, sharding_type = self._get_op_sharding_args( + partition_spec) + return torch_xla._XLAC.OpSharding(tile_assignment, group_assignment, + replication_groups, sharding_type)
+ + +_GLOBAL_MESH: Mesh = None + + +
[docs]def set_global_mesh(mesh: Mesh): + """ + Set the global mesh that can be used for the current process. + + Args: + mesh: (Mesh) Mesh object that will be the global mesh. + + Example: + + >>> import torch_xla.distributed.spmd as xs + >>> mesh = xs.get_1d_mesh("data") + >>> xs.set_global_mesh(mesh) + """ + global _GLOBAL_MESH + _GLOBAL_MESH = mesh
+ + +
[docs]def get_global_mesh() -> Optional[Mesh]: + """ + Get the global mesh for the current process. + + Returns: + mesh: (Optional[Mesh]) Mesh object if global mesh is set, otherwise return None. + + Example: + + >>> import torch_xla.distributed.spmd as xs + >>> xs.get_global_mesh() + """ + global _GLOBAL_MESH + return _GLOBAL_MESH
+ + +
[docs]def get_1d_mesh(axis_name: Optional[str] = None) -> Mesh: + """ + Helper function to return the mesh with all devices in one dimension. + + Args: + axis_name: (Optional[str]) optional string to represent the axis name of the mesh + + Returns: + Mesh: Mesh object + + Example: + + >>> # This example is assuming 1 TPU v4-8 + >>> import torch_xla.distributed.spmd as xs + >>> mesh = xs.get_1d_mesh("data") + >>> print(mesh.mesh_shape) + (4,) + >>> print(mesh.axis_names) + ('data',) + """ + num_devices = xr.global_runtime_device_count() + mesh_shape = (num_devices,) + device_ids = np.array(range(num_devices)) + if axis_name == None: + return Mesh(device_ids, mesh_shape) + else: + return Mesh(device_ids, mesh_shape, (axis_name,))
+ + +# HybridDevice class has been inspired from jax's mesh_utils: https://github.com/google/jax/blob/fc5960f2b8b7a0ef74dbae4e27c5c08ff1564cff/jax/experimental/mesh_utils.py#L4ƒ +
[docs]class HybridMesh(Mesh): + """Creates a hybrid device mesh of devices connected with ICI and DCN networks. + The shape of logical mesh should be ordered by increasing network-intensity + e.g. [replica, data, model] where mdl has the most network communication + requirements. + + Args: + ici_mesh_shape: shape of the logical mesh for inner connected devices. + dcn_mesh_shape: shape of logical mesh for outer connected devices. + + Example: + + >>> # This example is assuming 2 slices of v4-8. + >>> ici_mesh_shape = (1, 4, 1) # (data, fsdp, tensor) + >>> dcn_mesh_shape = (2, 1, 1) + >>> mesh = HybridMesh(ici_mesh_shape, dcn_mesh_shape, ('data','fsdp','tensor')) + >>> print(mesh.shape()) + >>> >> OrderedDict([('data', 2), ('fsdp', 4), ('tensor', 1)]) + """ + ici_mesh_shape: Tuple[int, ...] + dcn_mesh_shape: Tuple[int, ...] + + def __init__(self, + *, + ici_mesh_shape: Tuple[int, ...], + dcn_mesh_shape: Tuple[int, ...] = None, + axis_names: Tuple[str, ...] = None): + if dcn_mesh_shape == None: + dcn_mesh_shape = tuple([1] * len(ici_mesh_shape)) + assert len(ici_mesh_shape) == len(dcn_mesh_shape) + mesh_shape = tuple([x * y for x, y in zip(ici_mesh_shape, dcn_mesh_shape)]) + self.device_attributes = xr.global_runtime_device_attributes() + self.device_attributes.sort( + key=lambda attr: _utils.parse_xla_device(attr['name'])[1]) + + if 'slice_index' in self.device_attributes[0] and np.prod( + dcn_mesh_shape) == 1: + raise ValueError('Provide dcn_mesh_shape to create a mesh for multislice') + if 'slice_index' not in self.device_attributes[0] and np.prod( + dcn_mesh_shape) > 1: + raise ValueError('Invalid dcn_mesh_shape for single slice mesh') + self.ici_mesh_shape = ici_mesh_shape + self.dcn_mesh_shape = dcn_mesh_shape + if np.prod(dcn_mesh_shape) > 1 and 'slice_index' in self.device_attributes[ + 0]: # multislice + mesh = self._create_hybrid_device_mesh(self.ici_mesh_shape, + self.dcn_mesh_shape) + else: + mesh = self._create_device_mesh(self.ici_mesh_shape) + device_ids = mesh.flatten() + super().__init__(device_ids, mesh_shape, axis_names) + + # This is imported from JAX: https://github.com/google/jax/blob/main/jax/experimental/mesh_utils.py#L172 + def _get_physical_tpu_mesh(self, devices: Sequence[int]) -> np.ndarray: + r"""Rearrange TPU devices in a slice into a physical mesh. + + Args: + devices: A list of device logical ordinals in a TPU slice. + + Returns: + A np.ndarray of device logical ordinals with shape [global_x, global_y, global_z]. On + v2 and v3, global_z is instead cores_per_chip (i.e., 2). + """ + assert xm.xla_device_hw(xm.xla_device()) == 'TPU' + # coords is a 3-dims tuple representing the device in physical mesh + device_coords = [self.device_attributes[d]['coords'] for d in devices] + dims = tuple(d + 1 for d in max(device_coords)) + out = np.empty(dims, dtype=int) + for coords, d in zip(device_coords, devices): + out[coords[0], coords[1], coords[2]] = d + return out + + # This is imported from JAX: https://github.com/google/jax/blob/main/jax/experimental/mesh_utils.py#L64. + def _create_device_mesh_for_nd_torus( + self, physical_mesh: np.ndarray, + mesh_shape: Sequence[int]) -> Tuple[np.ndarray, List[Tuple[int, ...]]]: + """Assigns logical parallelism axes to physical axes of an N-D torus network. + + Given logical parallelism axes with sizes in `mesh_shape` and devices in an + N-dimensional torus network represented by `physical_mesh`, maps each logical + axis to one or more physical axes. Prefer to map more-performance-sensitive + logical axes to larger numbers of physical axes to maximize the bandwidth + available to them. Also prefer to assign logical axes to multiple physical + axes of the same size (e.g., a 2D square) rather than multiple physical axes + of different sizes when possible. + + Note that this routine will never split a physical axis over more than one + logical axis (which would reduce total usable bandwidth but may sometimes be + desired anyway). As a result, it will error out in cases where this is + necessary to produce a valid mapping. + + Let's use a concrete example to explain the concepts and considerations. + + As an example, suppose the logical mesh is [data, model], for data and model + parallelism respectively. Also suppose that data parallelism is less + performance sensitive than model parallelism. Consider a 3D TPU pod slice of + shape 4x4x16, represented by a physical mesh of shape (4, 4, 16). + + A TPU pod slice has equal bandwidth along all axes with wraparound links, but + a 2D plane of size 4x4 may have faster XLA collective implementations than a + non-square plane or a 1D subgroup. If the mesh_shape is [16, 16], we may want + the more performance sensitive `model` axis to be mapped to the 4x4 XY plane. + + Args: + physical_mesh: a np.ndarray of devices in the shape of the N-D torus + physical topology. + mesh_shape: shape of the logical mesh (size of the various logical + parallelism axes), with axes ordered by increasing network intensity. + + Returns: + An np.ndarray of devices in the shape of the logical mesh (mesh_shape), with + each logical parallelism axis mapped to one or more physical mesh axes. + The axis assignment (a list of length num_logical_axes, whose elements + are tuples representing physical axis indices). + """ + # Remaining physical axes to be assigned to logical axes. + assignable_physical_mesh = list(physical_mesh.shape) + # Map each logical axis to a subset of physical axes. + assignment: List[Tuple[int, ...]] = [() for _ in mesh_shape] + # Assign logical axes from highest network intensity to lowest. + # `mesh_shape` is assumed to ordered by lowest network intensity first, so + # reverse it first. + # Assigns devices to 2D or 3D logical mesh. + for logical_axis_index, logical_axis_size in reversed( + list(enumerate(mesh_shape))): + for num_axes in range(3, 0, -1): + # map a combination of devices in physical axes to the logical axis. + axes = itertools.combinations(assignable_physical_mesh, num_axes) + indices = itertools.combinations( + range(len(assignable_physical_mesh)), num_axes) + for c_axes, c_indices in zip(axes, indices): + if np.prod(c_axes) == logical_axis_size: + assignment[logical_axis_index] = c_indices + # Zero the assigned physical axes. + assignable_physical_mesh = [ + 0 if i in c_indices else v + for i, v in enumerate(assignable_physical_mesh) + ] + break + if assignment[logical_axis_index]: + # We already found an assignment from one candidate above. + break + else: + # If the num_axes for loop did not break, i.e. none of the candidates work + # goto here with this while-else construct. + if logical_axis_size > 1: + raise NotImplementedError( + 'Failed to find assignment for logical_axis_index' + f' {logical_axis_index} of size {logical_axis_size} with remaining' + f' assignable mesh {assignable_physical_mesh}. The size of each' + ' axis in your logical mesh must be equal to the product of' + ' some subset of the physical mesh axis sizes. E.g logical mesh (4,' + ' 16) is compatible with physical mesh 4x4x4 since 4=4 and 16=4x4.' + ) + # Flatten the assignment + transpose: List[int] = [] + for x in assignment: + for y in x: + transpose.append(int(y)) + return physical_mesh.transpose(transpose).reshape(mesh_shape), assignment + + def _create_device_mesh(self, + mesh_shape: Sequence[int], + devices: Sequence[Any] = None) -> Sequence[int]: + """Creates a performant device mesh. + + Args: + mesh_shape: shape of logical mesh, ordered by increasing network-intensity + e.g. [replica, data, mdl] where mdl has the most network communication + requirements. + devices: optionally, the devices to construct a mesh for. + + Returns: + A np.ndarray of devices with mesh_shape as its shape. + """ + + if devices is None: + devices = np.arange(xr.global_runtime_device_count()) + if np.prod(mesh_shape) != len(devices): + raise ValueError( + f'Number of devices {len(devices)} must equal the product ' + f'of mesh_shape {mesh_shape}') + physical_mesh = self._get_physical_tpu_mesh(devices) + device_mesh, assignment = self._create_device_mesh_for_nd_torus( + physical_mesh, mesh_shape) + return device_mesh + + # This is imported from JAX: https://github.com/google/jax/blob/main/jax/experimental/mesh_utils.py#L288. + def _create_hybrid_device_mesh( + self, ici_mesh_shape: Sequence[int], + dcn_mesh_shape: Sequence[int]) -> Sequence[int]: + """Creates a device mesh for hybrid (e.g., ICI and DCN) parallelism. + + Args: + ici_mesh_shape: shape of the logical mesh for the faster/inner network, ordered + by increasing network intensity, e.g. [replica, data, mdl] where mdl has + the most network communication requirements. + dcn_mesh_shape: shape of the logical mesh for the slower/outer network, + in the same order as mesh_shape. + + Returns: + A np.ndarray of device logical ordinal with ici_mesh_shape * dcn_mesh_shape as its shape + that can be fed into HybridMesh for hybrid parallelism. + """ + granule_dict = defaultdict(list) + for d, dev in enumerate(self.device_attributes): + granule_dict[dev['slice_index']].append(d) + # sorts devices based on slice_index. + granules = list(granule_dict[key] for key in sorted(granule_dict.keys())) + if np.prod(dcn_mesh_shape) != len(granules): + raise ValueError( + f'Number of slices {len(granules)} must equal the product of ' + f'dcn_mesh_shape {dcn_mesh_shape}') + # creates a seperate internal mesh for each slice. + per_granule_meshes = [ + self._create_device_mesh(ici_mesh_shape, granule) + for granule in granules + ] + granule_mesh = np.arange(len(granules)).reshape(dcn_mesh_shape) + blocks = np.vectorize( + lambda i: per_granule_meshes[i], otypes=[object])( + granule_mesh) + device_mesh = np.block(blocks.tolist()) + return device_mesh
+ + +class ShardingType(IntEnum): + # ShardingType enum ID maps to OpSharidng.Type (https://shorturl.at/pvAJX) + REPLICATED = 0 + MAXIMAL = 1 + TUPLE = 2 + TILED = 3 + MANUAL = 4 + PARTIAL = 5 + UNKNOWN = 6 # implicit replication. TODO(yeounoh) wait for auto-sharding support + + +def _get_sharding_type(partition_spec: Tuple[Union[int, None]], + num_devices: int) -> ShardingType: + sharding_type = ShardingType.TILED + if num_devices == 1: + sharding_type = ShardingType.MAXIMAL + elif all(d is None for d in partition_spec): + sharding_type = ShardingType.REPLICATED + elif any(d is None for d in partition_spec): + sharding_type = ShardingType.PARTIAL + return sharding_type + + +def _get_tile_assignment( + mesh: Mesh, partition_spec: Tuple[Union[Tuple[int], int, + None]]) -> np.ndarray: + """ + Permute the given mesh to create the tile assignment based on the partition + spec. Returns the tiling assignment as a numpy ndarray. + + If the input partition_spec combines multiple logical mesh axes over a single + tensor axis, the resulting tiling assignment will combine the specified axes + into a single axis. + """ + # Flatten the partition spec and ensure that it is fully specified over the + # mesh for permutation. + tiled_dims = [x for x in partition_spec if x is not None] + permutation = np.hstack(tiled_dims).tolist() if tiled_dims else [] + missing_axes = sorted(set(range(len(mesh.shape()))) - set(permutation)) + tile_assignment = mesh.get_logical_mesh().transpose(permutation + + missing_axes) + + # For any tuples in the partition_spec, the grouped axes will be adjacent + # after the permutation. Combine these dimensions into a single axis. + for i, spec in enumerate(tiled_dims): + if isinstance(spec, tuple): + shape = tile_assignment.shape + tile_assignment = tile_assignment.reshape(shape[:i] + (-1,) + + shape[i + len(spec):]) + + return tile_assignment + + +# Produce group assignment for partial replication. Partial replication tiles +# groups (a.k.a. sub-groups) where the shards are fully replicated within each +# sub-group. `replication_groups` is a list of groups as lists, where each group +# contains the participating device IDs. `group_assignment` describes the group +# placement and the overall mesh, where each element is the group ID. +# The tile_assignment should be the result of `_get_tile_assignment` so that all +# tiled dimensions are in the first axes and replicated dimensions are in the +# remaining axes. +def _get_group_assignment(sharding_type: ShardingType, + tile_assignment: np.ndarray, tensor_rank: int, + replicate_dims: Set[int]) -> Tuple[List, List]: + group_assignment = list() + replication_groups = list() + if sharding_type is ShardingType.PARTIAL: + # Shard across groups and replicate within subgroups; replicated dims + # will be used to group replication devices. + tile_shape = tile_assignment.shape + # When creating the tile assignment, the mesh is permuted so that the first + # few axes are used for tiling. + tile_dims = range(tensor_rank - len(replicate_dims)) + group_list = [tile_assignment] + for d in tile_dims: + _group_list = list() + for group_members in group_list: + _group_list += np.split(group_members, tile_shape[d], d) + group_list = _group_list + replication_groups = [group.flatten().tolist() for group in group_list] + + mesh_axis = itertools.count() + group_tile_shape = [ + 1 if d in replicate_dims else tile_shape[next(mesh_axis)] + for d in range(tensor_rank) + ] + group_assignment = np.arange(len(replication_groups)).reshape( + tuple(group_tile_shape)).tolist() + return group_assignment, replication_groups + + +def _translate_named_partition_spec(mesh: Mesh, partition_spec: Tuple): + _partition_spec = list() + for p in partition_spec: + if type(p) is tuple: + assert not any(type(x) is tuple + for x in p), 'Partition spec cannot contain nested tuples' + _partition_spec.append(_translate_named_partition_spec(mesh, p)) + elif (p is None) or (type(p) is int): + _partition_spec.append(p) + elif type(p) is str: + idx = mesh.get_axis_name_idx(p) + if idx is None: + raise ValueError(f"Axis name {p} is not defined in the given mesh") + _partition_spec.append(idx) + else: + raise ValueError( + f"Spec type {type(p)} is not supported in partition spec") + return tuple(_partition_spec) + + +def _mark_manual_sharding( + t: Union[torch.Tensor, XLAShardedTensor]) -> XLAShardedTensor: + """ + This API is meant to be paired with the upcoming pause_spmd&resume_spmd APIs. + Don't use it alone. + """ + manual_sharding = torch_xla._XLAC.OpSharding([], [], [], ShardingType.MANUAL) + torch_xla._XLAC._mark_manual_sharding( + unwrap_sharded_tensor(t), manual_sharding) + return wrap_as_sharded_tensor(t) + + +def enable_manual_sharding(t: Union[torch.Tensor, XLAShardedTensor], + partition_spec: Tuple[Union[Tuple, int, str, None]], + *, + mesh: Mesh = None) -> XLAShardedTensor: + """ + This API enables manual sharding for the given tensor. Manual sharding disables SPMD sharding proporgation and auto + partition for the given tensor and all subsequential tensors that produced by an op that uses the given tensor as + input, and therefore allows the user to manually call collectives for the tensor and subsequential tensors. It + requires the user to provide the partition spec to shard the tensor before enabling the manual sharding. To be noted, + the leaf tensors need to pass to disable_manual_sharding before ending the graph. + """ + mesh = get_global_mesh() if mesh is None else mesh + t = mark_sharding(unwrap_sharded_tensor(t), mesh, partition_spec) + t = torch_xla._XLAC._spmd_full_to_shard_shape(unwrap_sharded_tensor(t)) + return wrap_as_sharded_tensor(t) + + +def disable_manual_sharding(t: Union[torch.Tensor, XLAShardedTensor], + partition_spec: Tuple[Union[Tuple, int, str, None]], + full_shape: torch.Size, + *, + mesh: Mesh = None) -> XLAShardedTensor: + """ + This API disables manual sharding for the given tensor. The partition_spec and full_shape are used to construct the + output tensor as if the input tensor has not been manual sharded. + """ + mesh = get_global_mesh() if mesh is None else mesh + t = _mark_manual_sharding(unwrap_sharded_tensor(t)) + t = torch_xla._XLAC._spmd_shard_to_full_shape( + unwrap_sharded_tensor(t), mesh.get_op_sharding(partition_spec), + full_shape, t.dtype) + return wrap_as_sharded_tensor(t) + + +
[docs]def mark_sharding( + t: Union[torch.Tensor, XLAShardedTensor], mesh: Mesh, + partition_spec: Tuple[Union[Tuple, int, str, None]]) -> XLAShardedTensor: + """ + Annotates the tensor provided with XLA partition spec. Internally, + it annotates the corresponding XLATensor as sharded for the XLA SpmdPartitioner pass. + + Args: + t (Union[torch.Tensor, XLAShardedTensor]): input tensor to be annotated with partition_spec. + + mesh (Mesh): describes the logical XLA device topology and the underlying device IDs. + + partition_spec (Tuple[Tuple, int, str, None]): A tuple of device_mesh dimension index or + `None`. Each index is an int, str if the mesh axis is named, or tuple of int or str. + This specifies how each input rank is sharded (index to mesh_shape) or replicated (None). + When a tuple is specified, the corresponding input tensor axis will be sharded along all + logical axes in the tuple. Note that the order the mesh axes are specified in the tuple + will impact the resulting sharding. + + dynamo_custom_op (bool): if set to True, it calls the dynamo custom op variant of mark_sharding + to make itself recognizeable and traceable by dynamo. + + Example: + + >>> import torch_xla.runtime as xr + >>> import torch_xla.distributed.spmd as xs + >>> mesh_shape = (4, 2) + >>> num_devices = xr.global_runtime_device_count() + >>> device_ids = np.array(range(num_devices)) + >>> mesh = Mesh(device_ids, mesh_shape, ('x', 'y')) + >>> input = torch.randn(8, 32).to(xm.xla_device()) + >>> xs.mark_sharding(input, mesh, (0, None)) # 4-way data parallel + >>> linear = nn.Linear(32, 10).to(xm.xla_device()) + >>> xs.mark_sharding(linear.weight, mesh, (None, 1)) # 2-way model parallel + """ + num_devices = xr.global_runtime_device_count() + assert num_devices > 0, "This requires XLA supported device(s)." + assert mesh.size() == num_devices, \ + f"{mesh.mesh_shape} is not mappable over {num_devices} devices." + # We only allow fully specified `partition_spec` to be applicable, as opposed + # to filling in the unspecified replicated dims. Fully specified `partiion_spec` + # should be of the same rank as `t`. This is to support partial replication + # where the group assignment may vary with different input ranks. + assert len(t.shape) == len(partition_spec), \ + f"Partition spec length ({len(partition_spec)}) should be equal to the input rank ({len(t.shape)})." + + op_sharding = mesh.get_op_sharding(partition_spec) + annotate_func = torch_xla._XLAC._xla_mark_sharding + annotate_func(unwrap_sharded_tensor(t), op_sharding) + return wrap_as_sharded_tensor(t)
+ + +
[docs]def clear_sharding(t: Union[torch.Tensor, XLAShardedTensor]) -> torch.Tensor: + """ + Clear sharding annotation from the input tensor and return a `cpu` casted tensor. This + is a in place operation but will also return the same torch.Tensor back. + + Args: + t (Union[torch.Tensor, XLAShardedTensor]): Tensor that we want to clear the sharding + + Return: + t (torch.Tensor): tensor that without sharding. + + Example: + + >>> import torch_xla.distributed.spmd as xs + >>> torch_xla.runtime.use_spmd() + >>> t1 = torch.randn(8,8).to(torch_xla.device()) + >>> mesh = xs.get_1d_mesh() + >>> xs.mark_sharding(t1, mesh, (0, None)) + >>> xs.clear_sharding(t1) + """ + torch_xla._XLAC._xla_clear_sharding(unwrap_sharded_tensor(t)) + if isinstance(t, XLAShardedTensor): + return t.global_tensor + return t
+ + +def wrap_as_sharded_tensor( + t: Union[torch.Tensor, XLAShardedTensor]) -> XLAShardedTensor: + if not isinstance(t, XLAShardedTensor): + return XLAShardedTensor(t) + return t + + +def unwrap_sharded_tensor( + t: Union[torch.Tensor, XLAShardedTensor]) -> torch.Tensor: + if isinstance(t, XLAShardedTensor): + return t.global_tensor + return t + + +def wrap_if_sharded(x: Any) -> Any: + """ + If the input is a sharded tensor, return an XLAShardedTensor wrapping it. + Otherwise, returns the input. + """ + if (isinstance(x, torch.Tensor) and not isinstance(x, XLAShardedTensor) and + x.device.type == 'xla' and + torch_xla._XLAC._get_xla_sharding_type(x) is not None): + return XLAShardedTensor(x) + return x + + +@dataclass +class ShardingSpec: + mesh: Mesh + partition_spec: Tuple[Union[int, None]] + minibatch: Optional[bool] = False + + # Derived fields + _tile_assignment: List[int] = field(init=False) + _group_assignment: List[int] = field(init=False) + _replication_groups: List[int] = field(init=False) + _sharding_type: ShardingType = field(init=False) + + def __post_init__(self): + mesh = self.mesh + partition_spec = _translate_named_partition_spec(mesh, self.partition_spec) + tile_assignment = _get_tile_assignment(mesh, partition_spec) + self._tile_assignment = tile_assignment.tolist() + self._sharding_type = _get_sharding_type(partition_spec, + xr.global_runtime_device_count()) + replicate_dims = {i for i, d in enumerate(partition_spec) if d is None} + self._group_assignment, self._replication_groups = _get_group_assignment( + self._sharding_type, tile_assignment, len(partition_spec), + replicate_dims) + + def xla_spec(self, t: torch.Tensor) -> Union['XlaShardingSpec', None]: + """ + Create an XlaShardingSpec for the given tensor. If the tensor is + incompatible with the ShardingSpec, returns None. + """ + if not self.can_apply(t): + return None + return torch_xla._XLAC.XlaShardingSpec(t, self._tile_assignment, + self._group_assignment, + self._replication_groups, + int(self._sharding_type), + self.minibatch) + + def can_apply(self, t: torch.Tensor) -> bool: + """ + Test whether the ShardingSpec is compatible with the given torch.Tensor. + """ + return len(t.shape) == len(self.partition_spec) + + def apply(self, t: torch.Tensor): + # TODO(yeounoh) use virtual device interface when available. + assert (t.device == xm.xla_device()) + mark_sharding(t, self.mesh, self.partition_spec) + + +class XLAPatchedLinear(torch.autograd.Function): + """ + A patched version of `torch.nn.functional.linear` that uses einsum instead + of torch.matmul which will flatten the tensors to 2D and collide the sharded + dimensions. The torch.matmul default behavior makes it very hard for XLA compiler + to propagate the sharding annotation. + + TODO (alanwaketan): Let's patch it on the dispatcher level. + """ + + @staticmethod + def forward(ctx, input, weight, bias=None): + # bias is an optional argument + ctx.save_for_backward(input, weight, bias) + with torch.no_grad(): + product = torch.einsum('...n,mn->...m', input, weight) + if bias is None: + return product + return product + bias + + @staticmethod + def backward(ctx, grad_output): + input, weight, bias = ctx.saved_tensors + grad_input = grad_weight = grad_bias = None + + if ctx.needs_input_grad[0]: + grad_input = torch.einsum('...m,mn->...n', grad_output, weight) + if ctx.needs_input_grad[1]: + grad_weight = torch.einsum('...m,...n->mn', grad_output, input) + if bias is not None and ctx.needs_input_grad[2]: + grad_bias = torch.einsum('...m->m', grad_output) + + return grad_input, grad_weight, grad_bias + + +def xla_patched_nn_linear_forward(m, input): + return XLAPatchedLinear.apply(input, m.weight, m.bias) + + +def apply_backward_optimization_barrier(m: torch.nn.Module): + """ + Register a full backward hook that apply an optimization barrier to the given module. + This will prevent the XLA compiler from fusing the module's backward pass with others. + It's useful to prevent gigantic buffers being allocated to synchronize the gradients. + """ + + def optimization_barrier(module, grad_input, grad_output): + from torch_xla.utils.checkpoint import CheckpointFunction + gradients = [] + for param in module.parameters(): + if param.grad != None: + gradients.append(param.grad) + xm.optimization_barrier_( + CheckpointFunction._extract_tensors_from_list(gradients + + list(grad_input))) + + m.register_full_backward_hook(optimization_barrier) +
+ +
+ +
+ + +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/_modules/torch_xla/distributed/xla_multiprocessing.html b/release/2.5/_modules/torch_xla/distributed/xla_multiprocessing.html new file mode 100644 index 00000000000..5e1cd5f551e --- /dev/null +++ b/release/2.5/_modules/torch_xla/distributed/xla_multiprocessing.html @@ -0,0 +1,825 @@ + + + + + + + + + + + + torch_xla.distributed.xla_multiprocessing — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +

Source code for torch_xla.distributed.xla_multiprocessing

+import torch.multiprocessing
+from torch_xla import runtime as xr
+from torch_xla._internal import pjrt
+
+
+
[docs]def spawn(fn, + args=(), + nprocs=None, + join=True, + daemon=False, + start_method='spawn'): + """Enables multi processing based replication. + + Args: + fn (callable): The function to be called for each device which takes part of + the replication. The function will be called with a first argument being + the global index of the process within the replication, followed by the + arguments passed in `args`. + args (tuple): The arguments for `fn`. + Default: Empty tuple + nprocs (int): The number of processes/devices for the replication. At the + moment, if specified, can be either 1 or the maximum number of devices. + join (bool): Whether the call should block waiting for the completion of the + processes which have being spawned. + Default: True + daemon (bool): Whether the processes being spawned should have the `daemon` + flag set (see Python multi-processing API). + Default: False + start_method (string): The Python `multiprocessing` process creation method. + Default: `spawn` + + Returns: + The same object returned by the `torch.multiprocessing.spawn` API. If + `nprocs` is 1 the `fn` function will be called directly, and the API will + return None. + """ + return pjrt.spawn(fn, nprocs, start_method, args)
+ + +class MpModelWrapper(object): + """Wraps a model to minimize host memory usage when `fork` method is used. + + This class should be used together with the `spawn(..., start_method='fork')` + API to minimize the use of host memory. + Instead of creating models on each multiprocessing process, hence replicating + the model's initial host memory, the model is created once at global scope, + and then moved into each device inside the `spawn()` target function. + Example:: + + WRAPPED_MODEL = xmp.MpModelWrapper(MyNetwork()) + + def _mp_fn(index, ...): + device = xm.xla_device() + model = WRAPPED_MODEL.to(device) + ... + + torch_xla.launch(_mp_fn, ..., start_method='fork') + + This method has two advantages. First it uses only one copy of the memory + pages to host the original model weights, and second it serializes the move + of the wrapped model into each device, by lowering the load onto the system + memory during the process. + """ + + def __init__(self, model): + """Creates a new `MpModelWrapper` object. + + Args: + model (torch.nn.Module): The model to be wrapped. Should be on PyTorch CPU + device (which is the default when creating new models). + """ + self._model = model + self._lock = torch.multiprocessing.Lock() + + def to(self, device): + """Retrieves the model moved onto the specified device. + + Args: + device (torch.device): The device where the model should be moved onto. + Returns: + The model on the specified device. + """ + with self._lock: + self._model.to(device) + return self._model + + +class MpSerialExecutor(object): + """Utility to run a function in a serialized fashion among multi-core processes. + + Example:: + + # At global scope. + SERIAL_EXEC = xmp.MpSerialExecutor() + + def load_dataset(path): + return maybe_download_and_load(path) + + def _mp_fn(index, ...): + # Avoid all cores downloading the same data with the serial executor. + dataset = SERIAL_EXEC.run(lambda: load_dataset('/tmp/mnist-data')) + ... + + torch_xla.launch(_mp_fn, ...) + """ + + def __init__(self): + self._lock = torch.multiprocessing.Lock() + + def run(self, fn): + """Runs the provided function serialized WRT each per-core process. + + Args: + fn (callable): The function to run in a serialized fashion. + Returns: + The `fn` return value. + """ + with self._lock: + return fn() +
+ +
+ +
+ + +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/_modules/torch_xla/experimental/eager.html b/release/2.5/_modules/torch_xla/experimental/eager.html new file mode 100644 index 00000000000..93583db3b15 --- /dev/null +++ b/release/2.5/_modules/torch_xla/experimental/eager.html @@ -0,0 +1,746 @@ + + + + + + + + + + + + torch_xla.experimental.eager — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +

Source code for torch_xla.experimental.eager

+import functools
+from contextlib import contextmanager
+
+import torch_xla
+import logging
+
+
+
[docs]def eager_mode(enable: bool): + """Configure torch_xla's default executation mode. + + Under eager mode only functions that was `torch_xla.compile`d will be + traced and compiled. Other torch ops will be executed eagerly. + """ + torch_xla._XLAC._set_use_eager_mode(enable)
+ + +def is_eager_mode() -> bool: + """Return True if torch_xla is currently under eager mode + """ + return torch_xla._XLAC._get_use_eager_mode() + + +@contextmanager +def eager_mode_context(enable: bool): + """Context manager to enable/disable the eager mode. + """ + saved_eager_mode = is_eager_mode() + eager_mode(enable) + try: + yield saved_eager_mode + finally: + eager_mode(saved_eager_mode) + + +def compile(func): + # can's use deprecated wrapper at import time due to circular dependency + logging.warning( + 'torch_xla.experimental.compile is deprecated. Use torch_xla.compile instead.' + ) + return torch_xla.compile(func) +
+ +
+ +
+ + +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/_modules/torch_xla/runtime.html b/release/2.5/_modules/torch_xla/runtime.html new file mode 100644 index 00000000000..a74c48b578c --- /dev/null +++ b/release/2.5/_modules/torch_xla/runtime.html @@ -0,0 +1,1003 @@ + + + + + + + + + + + + torch_xla.runtime — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +

Source code for torch_xla.runtime

+import functools
+import logging
+import os
+import warnings
+from typing import Dict, List, Optional, TypeVar
+
+import torch
+import torch.cuda
+import torch_xla
+import torch_xla.core.xla_env_vars as xenv
+import torch_xla.core.xla_model as xm
+import torch_xla.utils.utils as xu
+import torch_xla._internal.utils as _utils
+import torch_xla._internal.tpu as tpu
+from torch_xla.experimental import plugins
+from torch_xla import runtime
+
+R = TypeVar('R')
+FN = TypeVar('FN')
+
+# Note [Dynamo WORLD_SIEZ and ORDINAL]
+# Belows are workaround to cache the ordinal and world_size such that
+# Dynamo won't do graph breaks when runtime.world_size() and runtime.global_ordinal() are called.
+_WORLD_SIZE = None
+_ORDINAL = None
+
+
+def _init_world_size_ordinal():
+  global _WORLD_SIZE, _ORDINAL
+
+  # Dynamo doesn't support XRT or multithreaded runtime. See Note [V3-8 Threading]
+  if runtime.addressable_device_count() > 1:
+    return
+
+  if _WORLD_SIZE is None:
+    _WORLD_SIZE = runtime.world_size()
+    _ORDINAL = runtime.global_ordinal()
+
+
+def set_device_type(pjrt_device: str) -> None:
+  """Sets the current PjRt device type.
+
+  Must be run before using any XLA devices.
+
+  Args:
+    pjrt_device: 'TPU' or 'CPU'
+  """
+  if torch_xla._XLAC._xla_runtime_is_initialized() and os.environ.get(
+      xenv.PJRT_DEVICE) != pjrt_device:
+    raise RuntimeError(
+        "Can't change device type after XLA runtime is initialized")
+
+  os.environ[xenv.PJRT_DEVICE] = pjrt_device
+
+
+def _maybe_select_default_device():
+  if xu.getenv_as(xenv.PJRT_SELECT_DEFAULT_DEVICE, str,
+                  '1') == '0' or xenv.PJRT_DEVICE in os.environ:
+    return
+
+  # Check for libtpu _and_ the TPU device
+  if torch_xla._found_libtpu and tpu.num_available_chips() > 0:
+    logging.warning('libtpu.so and TPU device found. Setting PJRT_DEVICE=TPU.')
+    os.environ[xenv.PJRT_DEVICE] = 'TPU'
+  elif xu.getenv_as(xenv.GPU_NUM_DEVICES, int, 0) > 0:
+    logging.warning('GPU_NUM_DEVICES is set. Setting PJRT_DEVICE=CUDA')
+    os.environ[xenv.PJRT_DEVICE] = 'CUDA'
+  elif torch.cuda.is_available() and torch.cuda.device_count() > 0:
+    num_devices_str = str(torch.cuda.device_count())
+    logging.warning(
+        'Found CUDA without GPU_NUM_DEVICES. Defaulting to PJRT_DEVICE=CUDA with GPU_NUM_DEVICES='
+        + num_devices_str)
+    os.environ[xenv.PJRT_DEVICE] = 'CUDA'
+    os.environ[xenv.GPU_NUM_DEVICES] = num_devices_str
+  elif torch_xla._found_libneuronxla:
+    logging.warning('Found libneuronpjrt.so. Setting PJRT_DEVICE=NEURON.')
+    os.environ[xenv.PJRT_DEVICE] = 'NEURON'
+  else:
+    logging.warning('Defaulting to PJRT_DEVICE=CPU')
+    os.environ[xenv.PJRT_DEVICE] = 'CPU'
+
+
+
[docs]def device_type() -> Optional[str]: + """Returns the current PjRt device type. + + Selects a default device if none has been configured + + Returns: + A string representation of the device. + """ + pjrt_device = xu.getenv_as(xenv.PJRT_DEVICE, str) + return pjrt_device.split('_')[0] if pjrt_device else pjrt_device
+ + +def is_bf16_supported(): + """Returns whether torch.bfloat16 is supported on this environment. + """ + try: + torch.tensor([1.], dtype=torch.bfloat16, device=xm.xla_device()) + return True + except Exception as e: + return False + + +def xla_device(n: Optional[int] = None, + devkind: Optional[str] = None) -> torch.device: + """Returns an XLA device. + + Args: + n: Index of XLA device within visibible devices. If not set, use local + ordinal (default 0) to select an addressable device. + devkind: Type of device to return. Should match `device_type()`. + + Returns: + A `torch.device` representing an XLA device. + """ + if n is None: + return torch.device(torch_xla._XLAC._xla_get_default_device()) + + devices = xm.get_xla_supported_devices(devkind=devkind) + if n > len(devices): + raise IndexError('Device index {} out of range in {}'.format(n, devices)) + + device = devices[n] + torch_xla._XLAC._xla_set_default_device(device) + return torch.device(device) + + +
[docs]def local_process_count() -> int: + """Returns the number of processes running on this host.""" + return xu.getenv_as(xenv.PJRT_LOCAL_PROCESS_COUNT, int, defval=1)
+ + +
[docs]def global_device_count() -> int: + """Returns the total number of devices across all processes/hosts.""" + return len(torch_xla._XLAC._xla_get_all_devices())
+ + +
[docs]def world_size() -> int: + """Returns the total number of processes participating in the job.""" + global _WORLD_SIZE + if _WORLD_SIZE is not None: + return _WORLD_SIZE + if torch_xla._XLAC._xla_get_replication_devices_count() == 0: + _WORLD_SIZE = 1 + else: + _WORLD_SIZE = global_device_count() + return _WORLD_SIZE
+ + +
[docs]def local_device_count() -> int: + """Returns the total number of devices on this host. + + Assumes each process has the same number of addressable devices. + """ + return local_process_count() * addressable_device_count()
+ + +
[docs]def addressable_device_count() -> int: + """Returns the number of devices visible to this process.""" + return torch_xla._XLAC._xla_num_devices()
+ + +
[docs]def global_ordinal() -> int: + """Returns global ordinal of this thread within all processes. + + Global ordinal is in range [0, global_device_count). Global ordinals are not + guaranteed to have any predictable relationship to the TPU worker ID nor are + they guaranteed to be contiguous on each host.""" + global _ORDINAL + if _ORDINAL is not None: + return _ORDINAL + return torch_xla._XLAC._xla_get_default_device_ordinal()
+ + +
[docs]def local_ordinal() -> int: + """Returns local ordinal of this thread within this host. + + Local ordinal is in range [0, local_device_count).""" + local_rank = xu.getenv_as(xenv.PJRT_LOCAL_PROCESS_RANK, int, 0) + devices_per_process = addressable_device_count() + return local_rank * devices_per_process + xla_device().index
+ + +def process_index() -> int: + return torch_xla._XLAC._xla_get_process_index() + + +def process_count() -> int: + return torch_xla._XLAC._xla_get_num_processes() + + +def host_index() -> int: + if plugins.using_dynamic_plugins(): + return plugins.default().host_index() + elif device_type() == 'TPU': + return tpu.worker_id() + + # TODO: Update this when we support multi-host GPU + return 0 + + +# API below will be used to query physcial device attribute. +def runtime_device_attributes(device: str) -> Dict[str, object]: + return torch_xla._XLAC._xla_get_device_attributes(device) + + +def global_runtime_device_attributes() -> List[Dict[str, object]]: + return torch_xla._XLAC._xla_get_all_device_attributes() + + +
[docs]@functools.lru_cache() +def global_runtime_device_count() -> int: + """Returns the total number of runtime devices across all processes/hosts, especially useful for SPMD.""" + return len(torch_xla._XLAC._xla_get_all_runtime_devices())
+ + +def addressable_runtime_device_count() -> int: + """Returns the number of devices visible to this process.""" + return torch_xla._XLAC._xla_num_runtime_devices() + + +# TODO(yeounoh) introduce SPMD configuration. +
[docs]def use_spmd(auto: Optional[bool] = False): + """API to enable SPMD mode. This is a recommended way to enable SPMD. + + This forces SPMD mode if some tensors are already initialized on non-SPMD + devices. This means that those tensors would be replicated across the devices. + + Args: + auto (bool): Whether to enable the auto-sharding. Read + https://github.com/pytorch/xla/blob/master/docs/spmd_advanced.md#auto-sharding + for more detail + """ + if os.environ.get("XLA_USE_SPMD") is not None: + warnings.warn("XLA_USE_SPMD is being deprecated. " + "Use torch_xla.runtime.use_spmd() " + "without setting XLA_USE_SPMD env-var.") + + if torch_xla._XLAC._xla_get_spmd_config_is_locked( + ) and not xu.check_env_flag("XLA_USE_SPMD"): + warnings.warn( + "Replicating tensors already initialized on non-virtual XLA device for SPMD " + "to force SPMD mode. This is one-time overhead to setup, and to minimize such, " + "please set SPMD mode before initializting tensors " + "(i.e., call use_spmd() in the beginning of the program).") + torch_xla._XLAC._xla_force_spmd_device() + xm.wait_device_ops() + + # TODO(yeounoh) we can drop envvar in the future + os.environ["XLA_USE_SPMD"] = "1" + if auto: + torch_xla._XLAC._xla_set_auto_sharding() + os.environ["XLA_AUTO_SPMD"] = "1" + + if device_type() == 'NEURON': + # In case of Neuron, reset the initialization environment to accommodate SPMD. + try: + from torch_neuronx.initialization import initialize + + initialize() + except ImportError: + pass
+ + +
[docs]def is_spmd(): + """Returns if SPMD is set for execution.""" + # TODO(yeounoh) replace this when we fully deprecate the flag. + return xu.check_env_flag('XLA_USE_SPMD')
+ + +
[docs]def get_master_ip() -> str: + """Retrieve the master worker IP for the runtime. This calls into + backend-specific discovery APIs. + + Returns: + master worker's IP address as a string.""" + if device_type() == 'TPU': + return tpu.discover_master_worker_ip() + raise RuntimeError(f'IP discovery not supported for device: {device_type()}')
+ + +
[docs]def initialize_cache(path: str, readonly: bool = False): + """Initializes the persistent compilation cache. This API must be called + before any computations have been performed. + + Args: + path (str): The path at which to store the persistent cache. + readonly (bool): Whether or not this worker should have write access to the cache. + """ + assert not torch_xla._XLAC._xla_computation_cache_is_initialized( + ), "Computation cache has already been initialized" + + # TODO(jonbolin): Consider moving away from environment variables to control + # the cache. + os.environ['XLA_PERSISTENT_CACHE_PATH'] = path + os.environ['XLA_PERSISTENT_CACHE_READ_ONLY'] = '1' if readonly else '0'
+
+ +
+ +
+ + +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/_modules/torch_xla/torch_xla.html b/release/2.5/_modules/torch_xla/torch_xla.html new file mode 100644 index 00000000000..7875958cc0e --- /dev/null +++ b/release/2.5/_modules/torch_xla/torch_xla.html @@ -0,0 +1,939 @@ + + + + + + + + + + + + torch_xla.torch_xla — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +

Source code for torch_xla.torch_xla

+import sys
+import collections
+import contextlib
+import functools
+import uuid
+from typing import Any, Callable, List, Optional, Tuple
+import weakref
+
+import torch
+import torch.distributed as dist
+import torch_xla
+import torch_xla.core.xla_model as xm
+import torch_xla.core.xla_env_vars as xenv
+import torch_xla.distributed.xla_multiprocessing as xmp
+import torch_xla.runtime as xr
+import torch_xla.utils.utils as xu
+
+
+
[docs]def device(index: int = None) -> torch.device: + """Returns a given instance of an XLA device. + + If SPMD enables, returns a virtual device that wraps all devices available + to this process. + + Args: + index: index of the XLA device to be returned. Corresponds to index in + `torch_xla.devices()`. + + Returns: + An XLA `torch.device`. + """ + + return xm.xla_device(index)
+ + +
[docs]def devices() -> List[torch.device]: + """Returns all devices available in the current process. + + Returns: + A list of XLA `torch.devices`. + """ + + return [torch.device(d) for d in xm.get_xla_supported_devices()]
+ + +def real_devices() -> List[str]: + """Returns local XLA device types and indices. + + Returns: + A list strings representing the XLA devices available in the current + process, e.g. `['TPU:0', 'TPU:1', ...]`. + """ + + return torch_xla._XLAC._xla_real_devices() + + +
[docs]def device_count() -> int: + """Returns number of addressable devices in the current process.""" + return len(real_devices())
+ + +
[docs]def sync(wait: bool = False): + """Launches all pending graph operations. + + Args: + wait (bool): whether to block the current process until the execution finished. + + """ + torch_xla._XLAC._xla_step_marker( + torch_xla._XLAC._xla_get_default_device(), + [], + wait=wait, + ) + devctx = xm._run_step_closures() + torch_xla._XLAC._set_all_reduce_token(devctx.device, None)
+ + +def step(): + """Wraps code that should be dispatched to the runtime. + + Experimental: `xla.step` is still a work in progress. Some code that currently + works with `xla.step` but does not follow best practices will become errors in + future releases. See https://github.com/pytorch/xla/issues/6751 for context. + """ + return compile() + + +# Keeps track of the alive functions. This allow us to remove session entries in the +# C++ side for functions that are no longer alive. +_compiled_id_to_functions_ref = weakref.WeakValueDictionary() + + +
[docs]def compile( + f: Optional[Callable] = None, + full_graph: Optional[bool] = False, + name: Optional[str] = None, + num_different_graphs_allowed: Optional[int] = None, +): + """ + Optimizes given model/function using torch_xla's LazyTensor tracing mode. + PyTorch/XLA will trace the given function with given inputs and then generate + graphs to represent the pytorch operations happens within this function. This + graph will be compiled by the XLA and executed on the accelerator(decided by the + tensor's device). Eager mode will be disabled for the compiled region of the funciton. + + Args: + model (Callable): Module/function to optimize, if not passed this function will + act as a context manager. + full_graph (Optional[bool]): Whether this compile should generate a single graph. If set to True + and multiple graphs will be generated torch_xla will throw an error with debug info + and exit. + name (Optional[name]): Name of the compiled program. The name of the function `f` will be used + if not specified. This name will be used in the `PT_XLA_DEBUG` messages as well as HLO/IR dump + file. + num_different_graphs_allowed (Optional[int]): number of different traced graphs of the given + model/function that we are allowed to have. An error will be raised in case this limit + is exceeded. + + Example:: + + # usage 1 + @torch_xla.compile() + def foo(x): + return torch.sin(x) + torch.cos(x) + + def foo2(x): + return torch.sin(x) + torch.cos(x) + # usage 2 + compiled_foo2 = torch_xla.compile(foo2) + + # usage 3 + with torch_xla.compile(): + res = foo2(x) + """ + if name is None and f is not None: + if hasattr(f, '__name__'): + name = f.__name__ + elif hasattr(f, '__str__'): + name = f.__str__() + + if f is not None: + current_id = f"{name}_{id(f)}" + else: + current_id = str(uuid.uuid4()) + + # Check whether the function/module that corresponds with current_id is still alive. If it's not, + # we can remove it from the session's map in the C++ side, so we can start a fresh session. + # + # This solves the issue where there are 2 different local-scoped functions with the same name. + # Since they are local-scoped, they might end-up with the same id. And, since they have the same + # name, their current_id will be the same, even though they are different functions. + # + # This issue was observed when running test_dynamic_shape_detector.py. + if current_id not in _compiled_id_to_functions_ref: + torch_xla._XLAC._dynamic_shape_detector_remove_session(current_id) + + if f is not None: + _compiled_id_to_functions_ref[current_id] = f + + def _clear_pending_ops_before_compile(): + sync() + + @contextlib.contextmanager + def _compile(): + saved_eager_mode_status = torch_xla._XLAC._get_use_eager_mode() + saved_allow_execution = torch_xla._XLAC._get_allow_execution() + saved_current_graph_name = torch_xla._XLAC._get_current_graph_name() + torch_xla._XLAC._set_use_eager_mode(False) + if name is not None: + torch_xla._XLAC._set_current_graph_name(name + '_clear_pending') + # Clear pending operations + _clear_pending_ops_before_compile() + + if name is not None: + torch_xla._XLAC._set_current_graph_name(name) + + # if full_graph sets to true execution can not happen before the sync below + torch_xla._XLAC._set_allow_execution(not full_graph) + + if num_different_graphs_allowed is not None: + torch_xla._XLAC._dynamic_shape_detector_set_max_num_different_graphs_allowed( + num_different_graphs_allowed) + torch_xla._XLAC._dynamic_shape_detector_start_session(current_id) + + try: + yield + finally: + torch_xla._XLAC._set_allow_execution(saved_allow_execution) + if num_different_graphs_allowed is not None: + torch_xla._XLAC._dynamic_shape_detector_end_session() + # Collect the traced graph after running the target function and + # execute the graph. + sync() + torch_xla._XLAC._set_use_eager_mode(saved_eager_mode_status) + torch_xla._XLAC._set_current_graph_name(saved_current_graph_name) + + return _compile() if f is None else _compile()(f)
+ + +
[docs]def manual_seed(seed, device=None): + """Set the seed for generating random numbers for the current XLA device. + + Args: + seed (integer): The state to be set. + device (torch.device, optional): The device where the RNG state needs to be set. + If missing the default device seed will be set. + """ + xm.set_rng_state(seed, device)
+ + +# TODO(wcromar): Update args to type ParamSpec. +def launch( + fn: Callable, + args: Tuple = (), + start_method: str = 'spawn', + debug_single_process: bool = False, +): + """ Entry to launch multiprocess. + + Raises: + NotImplementedError: SPMD is not supported yet. + """ + if xr.is_spmd(): + # TODO(piz): SPMD is specified differently from mp. Skip for now. + raise NotImplementedError( + 'launch function does not support SPMD at this time') + + nprocs = 1 if debug_single_process else None + + if dist.is_torchelastic_launched(): + fn(xu.getenv_as(xenv.LOCAL_RANK, int), *args) + else: + xmp.spawn(fn, args=args, nprocs=nprocs, start_method=start_method) +
+ +
+ +
+ + +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/_sources/debug.rst.txt b/release/2.5/_sources/debug.rst.txt new file mode 100644 index 00000000000..7c6a6eee671 --- /dev/null +++ b/release/2.5/_sources/debug.rst.txt @@ -0,0 +1 @@ +.. mdinclude:: ../../TROUBLESHOOTING.md \ No newline at end of file diff --git a/release/2.5/_sources/eager_mode.rst.txt b/release/2.5/_sources/eager_mode.rst.txt new file mode 100644 index 00000000000..05e7d359e1d --- /dev/null +++ b/release/2.5/_sources/eager_mode.rst.txt @@ -0,0 +1 @@ +.. mdinclude:: ../eager.md \ No newline at end of file diff --git a/release/2.5/_sources/gpu.rst.txt b/release/2.5/_sources/gpu.rst.txt new file mode 100644 index 00000000000..79d8385467a --- /dev/null +++ b/release/2.5/_sources/gpu.rst.txt @@ -0,0 +1 @@ +.. mdinclude:: ../gpu.md \ No newline at end of file diff --git a/release/2.5/_sources/index.rst.txt b/release/2.5/_sources/index.rst.txt new file mode 100644 index 00000000000..edd6bcc5372 --- /dev/null +++ b/release/2.5/_sources/index.rst.txt @@ -0,0 +1,108 @@ +:github_url: https://github.com/pytorch/xla + +PyTorch/XLA documentation +=================================== +PyTorch/XLA is a Python package that uses the XLA deep learning compiler to connect the PyTorch deep learning framework and Cloud TPUs. + +.. toctree:: + :hidden: + + self + +.. toctree:: + :glob: + :maxdepth: 1 + :caption: Docs + + * + +.. mdinclude:: ../../API_GUIDE.md + +PyTorch/XLA API +================================== + +torch_xla +---------------------------------- +.. automodule:: torch_xla +.. autofunction:: device +.. autofunction:: devices +.. autofunction:: device_count +.. autofunction:: sync +.. autofunction:: compile +.. autofunction:: manual_seed + +runtime +---------------------------------- +.. automodule:: torch_xla.runtime +.. autofunction:: device_type +.. autofunction:: local_process_count +.. autofunction:: local_device_count +.. autofunction:: addressable_device_count +.. autofunction:: global_device_count +.. autofunction:: global_runtime_device_count +.. autofunction:: world_size +.. autofunction:: global_ordinal +.. autofunction:: local_ordinal +.. autofunction:: get_master_ip +.. autofunction:: use_spmd +.. autofunction:: is_spmd +.. autofunction:: initialize_cache + + +xla_model +---------------------------------- + +.. automodule:: torch_xla.core.xla_model +.. autofunction:: xla_device +.. autofunction:: xla_device_hw +.. autofunction:: is_master_ordinal +.. autofunction:: all_reduce +.. autofunction:: all_gather +.. autofunction:: all_to_all +.. autofunction:: add_step_closure +.. autofunction:: wait_device_ops +.. autofunction:: optimizer_step +.. autofunction:: save +.. autofunction:: rendezvous +.. autofunction:: mesh_reduce +.. autofunction:: set_rng_state +.. autofunction:: get_rng_state +.. autofunction:: get_memory_info +.. autofunction:: get_stablehlo +.. autofunction:: get_stablehlo_bytecode + +distributed +---------------------------------- + +.. automodule:: torch_xla.distributed.parallel_loader +.. autoclass:: MpDeviceLoader + +.. automodule:: torch_xla.distributed.xla_multiprocessing +.. autofunction:: spawn + +spmd +---------------------------------- +.. automodule:: torch_xla.distributed.spmd +.. autofunction:: mark_sharding +.. autofunction:: clear_sharding +.. autofunction:: set_global_mesh +.. autofunction:: get_global_mesh +.. autofunction:: get_1d_mesh +.. autoclass:: Mesh +.. autoclass:: HybridMesh + +experimental +---------------------------------- +.. automodule:: torch_xla.experimental +.. autofunction:: eager_mode + +debug +---------------------------------- + +.. automodule:: torch_xla.debug.metrics +.. autofunction:: metrics_report +.. autofunction:: short_metrics_report +.. autofunction:: counter_names +.. autofunction:: counter_value +.. autofunction:: metric_names +.. autofunction:: metric_data diff --git a/release/2.5/_sources/multi_process_distributed.rst.txt b/release/2.5/_sources/multi_process_distributed.rst.txt new file mode 100644 index 00000000000..f8f25e5c05a --- /dev/null +++ b/release/2.5/_sources/multi_process_distributed.rst.txt @@ -0,0 +1,2 @@ +.. mdinclude:: ../ddp.md +.. mdinclude:: ../fsdp.md \ No newline at end of file diff --git a/release/2.5/_sources/notes/source_of_recompilation.md.txt b/release/2.5/_sources/notes/source_of_recompilation.md.txt new file mode 100644 index 00000000000..bb253cb5fc4 --- /dev/null +++ b/release/2.5/_sources/notes/source_of_recompilation.md.txt @@ -0,0 +1,176 @@ +# Source of recompilations in torch_xla + +## Let’s first start with some facts/constraints: + +1. Graph compilations in XLA are pretty expensive. +2. XLA handles static shape only. In other words, even for the same IR graph, XLA recompiles when input shape changes. +3. Recompilations hurts torch_xla perf a lot when it happens, and it’s hard to understand and debug from a normal python user POV. + +Often when recompilation happens we say we just need dynamic shape support and then rest assured that when dynamic shape is supported in the future, all the recompilations will be magically gone. But this is not true, XLA now has pretty good bounded dynamic shapes coverage already, but we still see recompilations and they are expected. + +***This doc aims to provide a detailed explanation of a few common sources of recompilations, and what do we need to get rid of them. It will mainly focus on explaining the problem to beginners without any context. To make it easy to understand, the “solutions” proposed here may rely on impractical assumptions.* ** + +## #1. From input dataset. + +Yes it’s pretty common that input dataset contains examples with different shapes, e.g. sentences with varying length or images with different sizes. Without normalization, it’ll cause recompilation for every new input shape. + +Tensorflow graph mode users are more used to do padding/bucketization (`tf.pad`) to normalize input shapes to one or a few buckets. But this is kinda anti-pattern for PyTorch eager frontend users (which is the same user lazy tensor frontend is trying to target) since different input shapes just doesn’t matter for eager CPU/CUDA backend. + +**Proposed workaround:** okay now let’s say we can work around this problem by teaching our users to do padding/bucketization (it’s hard in practice :P). What’s next? + +## #2. From operator output + +There are certain operators semantically are data-dependent and produce dynamic shape outputs: e.g. `torch.nonzero` returns indices of nonzero elements in its input tensor. So even your input tensors to this operator always have the same shape, it might produce different shape outputs and cause recompilations. + +### 2.1 Bounded dynamic shape can fix the case when you use the tensor with dynamic shape as a Tensor, without querying its real dimension. + +**Proposed workaround:** let’s say now XLA supports bounded dynamic shape for all operators, is it good enough? + +* by bounded dynamic shape it means we can pad the tensor to a theoretical max, trading more memory usage for less recompilation/faster speed. + +Well, sort of. Let’s see the following example: + + +``` +a = torch.tensor([1, 2, 0, 1, 3], device='xla') +b = torch.nonzero(a) +c = b * 2 +d = c + 1 +print(torch_xla._XLAC._get_xla_tensors_text([d])) +``` + +In the example above every node below `b` in the graph (namely `c, d` and everything depend on them) will have dynamic shape, it’s pretty obvious that `b` has dynamic shape in dimension 0 as shown below: + + +``` + %9 = (s64[<=5,1]{1,0}, s64[]) aten::nonzero(%8), num_outputs=2 # b + %10 = s64[5,1]{1,0} aten::mul(%9.0, %3) # c + %11 = s64[5,1]{1,0} aten::add(%10, %2), ROOT=0 # d +``` + +Although it's not shown directly in the graph, `c & d` indeed also have dynamic shape (in other words, [5, 1] is just padded shape and it's masked). + +``` +print(torch_xla._XLAC._get_xla_tensor_dimension_size(d, 0)) # prints 4 instead of 5 +``` + +You can see that in this case as long as the input tensor `a` has shape `[5]` we only compile the graph once. Bounded dynamic shape support helped! + +### 2.2 what if real dimension is queried on a tensor with dynamic shape? + +This is actually pretty commonly used since not all PyTorch computation are done in the form of Tensors. + +For example, `tensor.size()` in PyTorch returns a tuple of ints instead of a Tensor of dtype=int. When `tensor` is a dynamic shape tensor, this op basically forces XLA to cut the graph and evaluate so that we can return the correct scalar (otherwise it’ll just return the padded shape which is wrong). + +What’s made it worse is that many PyTorch takes scalar inputs as well. After you do `s = tensor.size(0)` and use `s` in other operators it also becomes a dynamic source. In this case we probably know how to pad it and its upper bound, but we cannot do it since it’s not even a Tensor! + + +``` + a = torch.tensor([1, 2, 0, 1, 3], device='xla') + b = torch.nonzero(a) + s = a.size(0) # evaluation happens! nit: we use size() for simplicity, the actual API is _get_xla_tensor_dimension_size. + c = torch.rand(s, device='xla') # c can be of any shape between [0, 5] which causes more recompilations! + d = c + 1 +``` + +So this one is actually hard to solve without PyTorch frontend’s help. What do we need? + +In short, we need a Tensor world! + +For example, + +* `tensor.size()` should return a Tensor so that it can be a Tensor with dynamic shape and kept in the graph without early evaluation. +* Tensor accessor, e.g. for 2D tensor, `tensor[0][0]` now returns a value but this need to return a tensor as well. +* Implicitly this means all operators currently taking int/float/double as input need a Tensor overload as well. THIS IS A BIG ASK as it can easily explode our operator set. + * It’s easier if we can make scalar to Tensor conversion really cheap so that we can only care about the Tensor overload. + * In practice not all ops takes scalars from previous computation, so we’ve been adding Tensor variants by ad-hoc requests. + * This is also a common ask from tracing base approaches I think. + +Okay now that we assume every op in PyTorch has a Tensor verison we need, are we done? + +## #3. From control flow + +No! We actually only solved the problem without data dependent control flow... + +See the example below: + +``` +if x[0][0] == 3: + bla +else: + blabla +``` + +Even if `x[0][0]` was a Tensor, we need to execute/materialize its value for python interpreter to proceed. And different branch choices in multiple control flows combined means we have a lot of graph to compile as well! + +For now we just have no way to fix this. To fix it we need to lower the control flow from python to graph! Without too much thinking in implementation we can do this in two ways: + +* ask users to explicitly use a control flow op instead of python if/else/while/for. This is currently supported as [customized API in torch_xla](https://github.com/pytorch/xla/blob/master/torch_xla/core/xla_builder.py#L563-L574) but not widely adopted in users’ code. (python users are used to if/else/for and it’s hard to switch them to a uglier API unless there’s a huge perf win). +* parse python source. code to get the control flow statement automatically. This is like Torchscript and somehow merge the torchscripted graph into the lazily trace graph properly (including shape info etc). I haven’t thought through the steps of how to implement this indeed :P + +But either solution above requires non-trivial amount of effort, either on user side or on the framework side. That’s why we currently just take the hit of early evaluation & multiple compilations as a short term solution given the bandwidth we have. + +Okay so now we assume that also have control flow lowered in the graph automagically, are we gold? + +YES! Now you have your whole computation represented in a graph of Tensor operations, including control flow so that compilers can now consume and do their smart tricks! But tbh at this point your program is no longer very PyTorch-y. + + +## Conclusion: + +There’re actually multiple sources of recompilation and bounded dynamic shape support cannot solve all of them. The proposed workarounds in this doc are definitely sometimes impractical, and there might be better ways to fix each source properly that I’m totally unaware of. But I hope as we keep smashing our way to an ideal lazy tensor stack in this doc, it’s now easier for you understand what’re the remaining blockers ahead of us. + + +## Appendix: + +1. NNC uses symbolic shapes, does that help? + +Yes but partially. By having symbolic shape, your compilation optimization no longer requires concrete shape values. In other words your generated kernel are more general than XLA’s static shape ones. + +And which exactly problem does it help? + +It helps with cases like #1 and #2.1. + + +``` +shape [3, 5] -> add -> transpose -> ... -> mul +shape [6, 2] -> add -> transpose -> ... -> mul + +# with symbolic shape +shape [x, y] -> add -> transpose -> ... -> mul +``` + +With symbolic shape your generated kernel doesn’t recompile as XLA does with static shapes. + +XLA solves this problem in the other way, by using padding/bucketization (for #1) and bounded dynamic shape (for #2.1). + +Brian Hirsh(@bdhirsh) asked some really good questions in the comment, moving here to make them more visible: + +2. Is it worth sticking a TORCH_WARN in the XLA kernels of ops that produce data-dependent output shapes? + +Yea torch_warn is useful in telling users "hey your program won't run blazing fast". But for these data dependent ops, there isn't an easy rewrite for them unless users change the logic in their model. (another example is torch.unique()) + +3. How ops like nonzero impact our ability to devirtualize sizes()? If we want to devirtualize sizes(), we’ll need to be able to eagerly compute sizes for each op - won’t that mean we’re forced to evaluate the graph every time we hit an op like nonzero? Vs. right now, it sounds like we don't actually force an evaluation when a user calls nonzero()? + +Yea great question! So in the current form it’s not a hard blocker since size() on XLA Tensors doesn’t carry source of truth size information. As shown in the example, the source of truth lives in IRValue and can be retrieved by `_get_xla_tensor_dimension_size` only. So if we decide to devirtualize size it’ll just enforce this discrepancy. + +As a followup if we have `size()` return Tensor instead of values as mentioned in the proposed workarounds above. In that case size() won’t be able to devirtualize since it becomes an operator (taking in Tensor and produce Tensor, have different implementations for different backends.) + +4. If I, e.g. call `torch.add(input, 1)` in a loop, where input varies in size from 1-1000, normally we would have to compile 1000 different graphs - but with dynamic shapes, it sounds like XLA will internally be able to generate a single graph where it says “use this graph if the input size is <=1000”. My question is: is “dynamic shape” a property of just the graph? Or of both the graph and the input. I.e. if my code were instead calling `x = torch.add(input, 1); x.sizes()` in a loop, does x have a dynamic shape at this point, meaning we’d need to run the graph to get the sizes? Or are we able to make it an eagerly computed property even in the presence of graphs with dynamic shapes. + +Yea in this case you'll compile 1000 different graphs. Dynamic shapes means its input has dynamic dimension in it. So when you query `x.sizes()` (currently need use get_dimention_size to get the correct size) it'll trigger *execution* (since the size didn't change it doesn't trigger recompilation). Without the line accessing size, it won't trigger any recompilation/execution when input has dynamic dimension. + +5. Would an alternative of making control flow available in the graph be just to come up with a way to ensure that XLA graphs don't include control flow? i.e. if we have a model with a single conditional in the middle, then get XLA to produce 3 graphs: 1 for everything before the conditional, 1 for the if branch, and 1 for the else branch. That would mean you don't get the exponential blowup of new graphs for every combination of paths taken, but (a) the graphs are smaller and provide fewer optimization opportunities, and (b) it would probably be pretty non-trivial to get XLA to recognize where a conditional path is taken. + +Great point! So if we could break them up into smaller graphs it's indeed feasible. But in practice this pattern is annoying: + +``` +y = +x = y + 2 +if x[0] == 2 : + z = y +1 +else: + z = y - 1 +``` + +Note you'll evaluate x using a subgraph when you hit control flow, but there might be previous variable included in the branch computation as well (like` y` is just one node smaller than x, but it wasn't materizalized when you evaluate `x`). So you're actually evaluating 1 small graph and two big graphs for this example. And with more control flow involved, y could get updated in multiple branches which still produces different combo of large graphs. + diff --git a/release/2.5/_sources/quantized_ops.rst.txt b/release/2.5/_sources/quantized_ops.rst.txt new file mode 100644 index 00000000000..1ebe49105a3 --- /dev/null +++ b/release/2.5/_sources/quantized_ops.rst.txt @@ -0,0 +1 @@ +.. mdinclude:: ../quantized_ops.md \ No newline at end of file diff --git a/release/2.5/_sources/runtime.rst.txt b/release/2.5/_sources/runtime.rst.txt new file mode 100644 index 00000000000..3aca8f3dfe1 --- /dev/null +++ b/release/2.5/_sources/runtime.rst.txt @@ -0,0 +1 @@ +.. mdinclude:: ../pjrt.md \ No newline at end of file diff --git a/release/2.5/_sources/spmd.rst.txt b/release/2.5/_sources/spmd.rst.txt new file mode 100644 index 00000000000..6765a5d24a6 --- /dev/null +++ b/release/2.5/_sources/spmd.rst.txt @@ -0,0 +1,4 @@ +.. mdinclude:: ../spmd_basic.md +.. mdinclude:: ../fsdpv2.md +.. mdinclude:: ../spmd_advanced.md +.. mdinclude:: ../spmd_distributed_checkpoint.md \ No newline at end of file diff --git a/release/2.5/_sources/torch_compile.rst.txt b/release/2.5/_sources/torch_compile.rst.txt new file mode 100644 index 00000000000..505163227f0 --- /dev/null +++ b/release/2.5/_sources/torch_compile.rst.txt @@ -0,0 +1 @@ +.. mdinclude:: ../dynamo.md \ No newline at end of file diff --git a/release/2.5/_static/_sphinx_javascript_frameworks_compat.js b/release/2.5/_static/_sphinx_javascript_frameworks_compat.js new file mode 100644 index 00000000000..8549469dc29 --- /dev/null +++ b/release/2.5/_static/_sphinx_javascript_frameworks_compat.js @@ -0,0 +1,134 @@ +/* + * _sphinx_javascript_frameworks_compat.js + * ~~~~~~~~~~ + * + * Compatability shim for jQuery and underscores.js. + * + * WILL BE REMOVED IN Sphinx 6.0 + * xref RemovedInSphinx60Warning + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + + +/** + * small helper function to urldecode strings + * + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL + */ +jQuery.urldecode = function(x) { + if (!x) { + return x + } + return decodeURIComponent(x.replace(/\+/g, ' ')); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} diff --git a/release/2.5/_static/basic.css b/release/2.5/_static/basic.css new file mode 100644 index 00000000000..eeb0519a69b --- /dev/null +++ b/release/2.5/_static/basic.css @@ -0,0 +1,899 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} +a.brackets:before, +span.brackets > a:before{ + content: "["; +} + +a.brackets:after, +span.brackets > a:after { + content: "]"; +} + + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} +dl.footnote > dt, +dl.citation > dt { + float: left; + margin-right: 0.5em; +} + +dl.footnote > dd, +dl.citation > dd { + margin-bottom: 0em; +} + +dl.footnote > dd:after, +dl.citation > dd:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} +dl.field-list > dt:after { + content: ":"; +} + + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/release/2.5/_static/css/pytorch_theme.css b/release/2.5/_static/css/pytorch_theme.css new file mode 100644 index 00000000000..0e54497643c --- /dev/null +++ b/release/2.5/_static/css/pytorch_theme.css @@ -0,0 +1,118 @@ +body { + font-family: "Lato","proxima-nova","Helvetica Neue",Arial,sans-serif; +} + +/* Default header fonts are ugly */ +h1, h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend, p.caption { + font-family: "Lato","proxima-nova","Helvetica Neue",Arial,sans-serif; +} + +/* Use white for docs background */ +.wy-side-nav-search { + background-color: #fff; +} + +.wy-nav-content-wrap, .wy-menu li.current > a { + background-color: #fff; +} + +@media screen and (min-width: 1400px) { + .wy-nav-content-wrap { + background-color: rgba(0, 0, 0, 0.0470588); + } + + .wy-nav-content { + background-color: #fff; + } +} + +/* Fixes for mobile */ +.wy-nav-top { + background-color: #fff; + background-image: url('../img/pytorch-logo-dark.svg'); + background-repeat: no-repeat; + background-position: center; + padding: 0; + margin: 0.4045em 0.809em; + color: #333; +} + +.wy-nav-top > a { + display: none; +} + +@media screen and (max-width: 768px) { + .wy-side-nav-search>a img.logo { + height: 60px; + } +} + +/* This is needed to ensure that logo above search scales properly */ +.wy-side-nav-search a { + display: block; +} + +/* This ensures that multiple constructors will remain in separate lines. */ +.rst-content dl:not(.docutils) dt { + display: table; +} + +/* Use our red for literals (it's very similar to the original color) */ +.rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal { + color: #F05732; +} + +.rst-content tt.xref, a .rst-content tt, .rst-content tt.xref, +.rst-content code.xref, a .rst-content tt, a .rst-content code { + color: #404040; +} + +/* Change link colors (except for the menu) */ + +a { + color: #F05732; +} + +a:hover { + color: #F05732; +} + + +a:visited { + color: #D44D2C; +} + +.wy-menu a { + color: #b3b3b3; +} + +.wy-menu a:hover { + color: #b3b3b3; +} + +/* Default footer text is quite big */ +footer { + font-size: 80%; +} + +footer .rst-footer-buttons { + font-size: 125%; /* revert footer settings - 1/80% = 125% */ +} + +footer p { + font-size: 100%; +} + +/* For hidden headers that appear in TOC tree */ +/* see http://stackoverflow.com/a/32363545/3343043 */ +.rst-content .hidden-section { + display: none; +} + +nav .hidden-section { + display: inherit; +} + +.wy-side-nav-search>div.version { + color: #000; +} diff --git a/release/2.5/_static/css/theme.css b/release/2.5/_static/css/theme.css new file mode 100644 index 00000000000..c04db2d2c84 --- /dev/null +++ b/release/2.5/_static/css/theme.css @@ -0,0 +1,12594 @@ +@charset "UTF-8"; +/*! + * Bootstrap v4.0.0 (https://getbootstrap.com) + * Copyright 2011-2018 The Bootstrap Authors + * Copyright 2011-2018 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + */ +:root { + --blue: #007bff; + --indigo: #6610f2; + --purple: #6f42c1; + --pink: #e83e8c; + --red: #dc3545; + --orange: #fd7e14; + --yellow: #ffc107; + --green: #28a745; + --teal: #20c997; + --cyan: #17a2b8; + --white: #fff; + --gray: #6c757d; + --gray-dark: #343a40; + --primary: #007bff; + --secondary: #6c757d; + --success: #28a745; + --info: #17a2b8; + --warning: #ffc107; + --danger: #dc3545; + --light: #f8f9fa; + --dark: #343a40; + --breakpoint-xs: 0; + --breakpoint-sm: 576px; + --breakpoint-md: 768px; + --breakpoint-lg: 992px; + --breakpoint-xl: 1200px; + --font-family-sans-serif: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + --font-family-monospace: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; +} + +*, +*::before, +*::after { + -webkit-box-sizing: border-box; + box-sizing: border-box; +} + +html { + font-family: sans-serif; + line-height: 1.15; + -webkit-text-size-adjust: 100%; + -ms-text-size-adjust: 100%; + -ms-overflow-style: scrollbar; + -webkit-tap-highlight-color: rgba(0, 0, 0, 0); +} + +@-ms-viewport { + width: device-width; +} +article, aside, dialog, figcaption, figure, footer, header, hgroup, main, nav, section { + display: block; +} + +body { + margin: 0; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + font-size: 1rem; + font-weight: 400; + line-height: 1.5; + color: #212529; + text-align: left; + background-color: #fff; +} + +[tabindex="-1"]:focus { + outline: 0 !important; +} + +hr { + -webkit-box-sizing: content-box; + box-sizing: content-box; + height: 0; + overflow: visible; +} + +h1, h2, h3, h4, h5, h6 { + margin-top: 0; + margin-bottom: 0.5rem; +} + +p { + margin-top: 0; + margin-bottom: 1rem; +} + +abbr[title], +abbr[data-original-title] { + text-decoration: underline; + -webkit-text-decoration: underline dotted; + text-decoration: underline dotted; + cursor: help; + border-bottom: 0; +} + +address { + margin-bottom: 1rem; + font-style: normal; + line-height: inherit; +} + +ol, +ul, +dl { + margin-top: 0; + margin-bottom: 1rem; +} + +ol ol, +ul ul, +ol ul, +ul ol { + margin-bottom: 0; +} + +dt { + font-weight: 700; +} + +dd { + margin-bottom: .5rem; + margin-left: 0; +} + +blockquote { + margin: 0 0 1rem; +} + +dfn { + font-style: italic; +} + +b, +strong { + font-weight: bolder; +} + +small { + font-size: 80%; +} + +sub, +sup { + position: relative; + font-size: 75%; + line-height: 0; + vertical-align: baseline; +} + +sub { + bottom: -.25em; +} + +sup { + top: -.5em; +} + +a { + color: #007bff; + text-decoration: none; + background-color: transparent; + -webkit-text-decoration-skip: objects; +} +a:hover { + color: #0056b3; + text-decoration: underline; +} + +a:not([href]):not([tabindex]) { + color: inherit; + text-decoration: none; +} +a:not([href]):not([tabindex]):hover, a:not([href]):not([tabindex]):focus { + color: inherit; + text-decoration: none; +} +a:not([href]):not([tabindex]):focus { + outline: 0; +} + +pre, +code, +kbd, +samp { + font-family: monospace, monospace; + font-size: 1em; +} + +pre { + margin-top: 0; + margin-bottom: 1rem; + overflow: auto; + -ms-overflow-style: scrollbar; +} + +figure { + margin: 0 0 1rem; +} + +img { + vertical-align: middle; + border-style: none; +} + +svg:not(:root) { + overflow: hidden; +} + +table { + border-collapse: collapse; +} + +caption { + padding-top: 0.75rem; + padding-bottom: 0.75rem; + color: #6c757d; + text-align: left; + caption-side: bottom; +} + +th { + text-align: inherit; +} + +label { + display: inline-block; + margin-bottom: .5rem; +} + +button { + border-radius: 0; +} + +button:focus { + outline: 1px dotted; + outline: 5px auto -webkit-focus-ring-color; +} + +input, +button, +select, +optgroup, +textarea { + margin: 0; + font-family: inherit; + font-size: inherit; + line-height: inherit; +} + +button, +input { + overflow: visible; +} + +button, +select { + text-transform: none; +} + +button, +html [type="button"], +[type="reset"], +[type="submit"] { + -webkit-appearance: button; +} + +button::-moz-focus-inner, +[type="button"]::-moz-focus-inner, +[type="reset"]::-moz-focus-inner, +[type="submit"]::-moz-focus-inner { + padding: 0; + border-style: none; +} + +input[type="radio"], +input[type="checkbox"] { + -webkit-box-sizing: border-box; + box-sizing: border-box; + padding: 0; +} + +input[type="date"], +input[type="time"], +input[type="datetime-local"], +input[type="month"] { + -webkit-appearance: listbox; +} + +textarea { + overflow: auto; + resize: vertical; +} + +fieldset { + min-width: 0; + padding: 0; + margin: 0; + border: 0; +} + +legend { + display: block; + width: 100%; + max-width: 100%; + padding: 0; + margin-bottom: .5rem; + font-size: 1.5rem; + line-height: inherit; + color: inherit; + white-space: normal; +} + +progress { + vertical-align: baseline; +} + +[type="number"]::-webkit-inner-spin-button, +[type="number"]::-webkit-outer-spin-button { + height: auto; +} + +[type="search"] { + outline-offset: -2px; + -webkit-appearance: none; +} + +[type="search"]::-webkit-search-cancel-button, +[type="search"]::-webkit-search-decoration { + -webkit-appearance: none; +} + +::-webkit-file-upload-button { + font: inherit; + -webkit-appearance: button; +} + +output { + display: inline-block; +} + +summary { + display: list-item; + cursor: pointer; +} + +template { + display: none; +} + +[hidden] { + display: none !important; +} + +h1, h2, h3, h4, h5, h6, +.h1, .h2, .h3, .h4, .h5, .h6 { + margin-bottom: 0.5rem; + font-family: inherit; + font-weight: 500; + line-height: 1.2; + color: inherit; +} + +h1, .h1 { + font-size: 2.5rem; +} + +h2, .h2 { + font-size: 2rem; +} + +h3, .h3 { + font-size: 1.75rem; +} + +h4, .h4 { + font-size: 1.5rem; +} + +h5, .h5 { + font-size: 1.25rem; +} + +h6, .h6 { + font-size: 1rem; +} + +.lead { + font-size: 1.25rem; + font-weight: 300; +} + +.display-1 { + font-size: 6rem; + font-weight: 300; + line-height: 1.2; +} + +.display-2 { + font-size: 5.5rem; + font-weight: 300; + line-height: 1.2; +} + +.display-3 { + font-size: 4.5rem; + font-weight: 300; + line-height: 1.2; +} + +.display-4 { + font-size: 3.5rem; + font-weight: 300; + line-height: 1.2; +} + +hr { + margin-top: 1rem; + margin-bottom: 1rem; + border: 0; + border-top: 1px solid rgba(0, 0, 0, 0.1); +} + +small, +.small { + font-size: 80%; + font-weight: 400; +} + +mark, +.mark { + padding: 0.2em; + background-color: #fcf8e3; +} + +.list-unstyled { + padding-left: 0; + list-style: none; +} + +.list-inline { + padding-left: 0; + list-style: none; +} + +.list-inline-item { + display: inline-block; +} +.list-inline-item:not(:last-child) { + margin-right: 0.5rem; +} + +.initialism { + font-size: 90%; + text-transform: uppercase; +} + +.blockquote { + margin-bottom: 1rem; + font-size: 1.25rem; +} + +.blockquote-footer { + display: block; + font-size: 80%; + color: #6c757d; +} +.blockquote-footer::before { + content: "\2014 \00A0"; +} + +.img-fluid { + max-width: 100%; + height: auto; +} + +.img-thumbnail { + padding: 0.25rem; + background-color: #fff; + border: 1px solid #dee2e6; + border-radius: 0.25rem; + max-width: 100%; + height: auto; +} + +.figure { + display: inline-block; +} + +.figure-img { + margin-bottom: 0.5rem; + line-height: 1; +} + +.figure-caption { + font-size: 90%; + color: #6c757d; +} + +code, +kbd, +pre, +samp { + font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; +} + +code { + font-size: 87.5%; + color: #e83e8c; + word-break: break-word; +} +a > code { + color: inherit; +} + +kbd { + padding: 0.2rem 0.4rem; + font-size: 87.5%; + color: #fff; + background-color: #212529; + border-radius: 0.2rem; +} +kbd kbd { + padding: 0; + font-size: 100%; + font-weight: 700; +} + +pre { + display: block; + font-size: 87.5%; + color: #212529; +} +pre code { + font-size: inherit; + color: inherit; + word-break: normal; +} + +.pre-scrollable { + max-height: 340px; + overflow-y: scroll; +} + +.container { + width: 100%; + padding-right: 15px; + padding-left: 15px; + margin-right: auto; + margin-left: auto; +} +@media (min-width: 576px) { + .container { + max-width: 540px; + } +} +@media (min-width: 768px) { + .container { + max-width: 720px; + } +} +@media (min-width: 992px) { + .container { + max-width: 960px; + } +} +@media (min-width: 1200px) { + .container { + max-width: 1140px; + } +} + +.container-fluid { + width: 100%; + padding-right: 15px; + padding-left: 15px; + margin-right: auto; + margin-left: auto; +} + +.row { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + margin-right: -15px; + margin-left: -15px; +} + +.no-gutters { + margin-right: 0; + margin-left: 0; +} +.no-gutters > .col, +.no-gutters > [class*="col-"] { + padding-right: 0; + padding-left: 0; +} + +.col-1, .col-2, .col-3, .col-4, .col-5, .col-6, .col-7, .col-8, .col-9, .col-10, .col-11, .col-12, .col, +.col-auto, .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12, .col-sm, +.col-sm-auto, .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12, .col-md, +.col-md-auto, .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12, .col-lg, +.col-lg-auto, .col-xl-1, .col-xl-2, .col-xl-3, .col-xl-4, .col-xl-5, .col-xl-6, .col-xl-7, .col-xl-8, .col-xl-9, .col-xl-10, .col-xl-11, .col-xl-12, .col-xl, +.col-xl-auto { + position: relative; + width: 100%; + min-height: 1px; + padding-right: 15px; + padding-left: 15px; +} + +.col { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; +} + +.col-auto { + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: none; +} + +.col-1 { + -webkit-box-flex: 0; + -ms-flex: 0 0 8.3333333333%; + flex: 0 0 8.3333333333%; + max-width: 8.3333333333%; +} + +.col-2 { + -webkit-box-flex: 0; + -ms-flex: 0 0 16.6666666667%; + flex: 0 0 16.6666666667%; + max-width: 16.6666666667%; +} + +.col-3 { + -webkit-box-flex: 0; + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; +} + +.col-4 { + -webkit-box-flex: 0; + -ms-flex: 0 0 33.3333333333%; + flex: 0 0 33.3333333333%; + max-width: 33.3333333333%; +} + +.col-5 { + -webkit-box-flex: 0; + -ms-flex: 0 0 41.6666666667%; + flex: 0 0 41.6666666667%; + max-width: 41.6666666667%; +} + +.col-6 { + -webkit-box-flex: 0; + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; +} + +.col-7 { + -webkit-box-flex: 0; + -ms-flex: 0 0 58.3333333333%; + flex: 0 0 58.3333333333%; + max-width: 58.3333333333%; +} + +.col-8 { + -webkit-box-flex: 0; + -ms-flex: 0 0 66.6666666667%; + flex: 0 0 66.6666666667%; + max-width: 66.6666666667%; +} + +.col-9 { + -webkit-box-flex: 0; + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; +} + +.col-10 { + -webkit-box-flex: 0; + -ms-flex: 0 0 83.3333333333%; + flex: 0 0 83.3333333333%; + max-width: 83.3333333333%; +} + +.col-11 { + -webkit-box-flex: 0; + -ms-flex: 0 0 91.6666666667%; + flex: 0 0 91.6666666667%; + max-width: 91.6666666667%; +} + +.col-12 { + -webkit-box-flex: 0; + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; +} + +.order-first { + -webkit-box-ordinal-group: 0; + -ms-flex-order: -1; + order: -1; +} + +.order-last { + -webkit-box-ordinal-group: 14; + -ms-flex-order: 13; + order: 13; +} + +.order-0 { + -webkit-box-ordinal-group: 1; + -ms-flex-order: 0; + order: 0; +} + +.order-1 { + -webkit-box-ordinal-group: 2; + -ms-flex-order: 1; + order: 1; +} + +.order-2 { + -webkit-box-ordinal-group: 3; + -ms-flex-order: 2; + order: 2; +} + +.order-3 { + -webkit-box-ordinal-group: 4; + -ms-flex-order: 3; + order: 3; +} + +.order-4 { + -webkit-box-ordinal-group: 5; + -ms-flex-order: 4; + order: 4; +} + +.order-5 { + -webkit-box-ordinal-group: 6; + -ms-flex-order: 5; + order: 5; +} + +.order-6 { + -webkit-box-ordinal-group: 7; + -ms-flex-order: 6; + order: 6; +} + +.order-7 { + -webkit-box-ordinal-group: 8; + -ms-flex-order: 7; + order: 7; +} + +.order-8 { + -webkit-box-ordinal-group: 9; + -ms-flex-order: 8; + order: 8; +} + +.order-9 { + -webkit-box-ordinal-group: 10; + -ms-flex-order: 9; + order: 9; +} + +.order-10 { + -webkit-box-ordinal-group: 11; + -ms-flex-order: 10; + order: 10; +} + +.order-11 { + -webkit-box-ordinal-group: 12; + -ms-flex-order: 11; + order: 11; +} + +.order-12 { + -webkit-box-ordinal-group: 13; + -ms-flex-order: 12; + order: 12; +} + +.offset-1 { + margin-left: 8.3333333333%; +} + +.offset-2 { + margin-left: 16.6666666667%; +} + +.offset-3 { + margin-left: 25%; +} + +.offset-4 { + margin-left: 33.3333333333%; +} + +.offset-5 { + margin-left: 41.6666666667%; +} + +.offset-6 { + margin-left: 50%; +} + +.offset-7 { + margin-left: 58.3333333333%; +} + +.offset-8 { + margin-left: 66.6666666667%; +} + +.offset-9 { + margin-left: 75%; +} + +.offset-10 { + margin-left: 83.3333333333%; +} + +.offset-11 { + margin-left: 91.6666666667%; +} + +@media (min-width: 576px) { + .col-sm { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; + } + + .col-sm-auto { + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: none; + } + + .col-sm-1 { + -webkit-box-flex: 0; + -ms-flex: 0 0 8.3333333333%; + flex: 0 0 8.3333333333%; + max-width: 8.3333333333%; + } + + .col-sm-2 { + -webkit-box-flex: 0; + -ms-flex: 0 0 16.6666666667%; + flex: 0 0 16.6666666667%; + max-width: 16.6666666667%; + } + + .col-sm-3 { + -webkit-box-flex: 0; + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } + + .col-sm-4 { + -webkit-box-flex: 0; + -ms-flex: 0 0 33.3333333333%; + flex: 0 0 33.3333333333%; + max-width: 33.3333333333%; + } + + .col-sm-5 { + -webkit-box-flex: 0; + -ms-flex: 0 0 41.6666666667%; + flex: 0 0 41.6666666667%; + max-width: 41.6666666667%; + } + + .col-sm-6 { + -webkit-box-flex: 0; + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + + .col-sm-7 { + -webkit-box-flex: 0; + -ms-flex: 0 0 58.3333333333%; + flex: 0 0 58.3333333333%; + max-width: 58.3333333333%; + } + + .col-sm-8 { + -webkit-box-flex: 0; + -ms-flex: 0 0 66.6666666667%; + flex: 0 0 66.6666666667%; + max-width: 66.6666666667%; + } + + .col-sm-9 { + -webkit-box-flex: 0; + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; + } + + .col-sm-10 { + -webkit-box-flex: 0; + -ms-flex: 0 0 83.3333333333%; + flex: 0 0 83.3333333333%; + max-width: 83.3333333333%; + } + + .col-sm-11 { + -webkit-box-flex: 0; + -ms-flex: 0 0 91.6666666667%; + flex: 0 0 91.6666666667%; + max-width: 91.6666666667%; + } + + .col-sm-12 { + -webkit-box-flex: 0; + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; + } + + .order-sm-first { + -webkit-box-ordinal-group: 0; + -ms-flex-order: -1; + order: -1; + } + + .order-sm-last { + -webkit-box-ordinal-group: 14; + -ms-flex-order: 13; + order: 13; + } + + .order-sm-0 { + -webkit-box-ordinal-group: 1; + -ms-flex-order: 0; + order: 0; + } + + .order-sm-1 { + -webkit-box-ordinal-group: 2; + -ms-flex-order: 1; + order: 1; + } + + .order-sm-2 { + -webkit-box-ordinal-group: 3; + -ms-flex-order: 2; + order: 2; + } + + .order-sm-3 { + -webkit-box-ordinal-group: 4; + -ms-flex-order: 3; + order: 3; + } + + .order-sm-4 { + -webkit-box-ordinal-group: 5; + -ms-flex-order: 4; + order: 4; + } + + .order-sm-5 { + -webkit-box-ordinal-group: 6; + -ms-flex-order: 5; + order: 5; + } + + .order-sm-6 { + -webkit-box-ordinal-group: 7; + -ms-flex-order: 6; + order: 6; + } + + .order-sm-7 { + -webkit-box-ordinal-group: 8; + -ms-flex-order: 7; + order: 7; + } + + .order-sm-8 { + -webkit-box-ordinal-group: 9; + -ms-flex-order: 8; + order: 8; + } + + .order-sm-9 { + -webkit-box-ordinal-group: 10; + -ms-flex-order: 9; + order: 9; + } + + .order-sm-10 { + -webkit-box-ordinal-group: 11; + -ms-flex-order: 10; + order: 10; + } + + .order-sm-11 { + -webkit-box-ordinal-group: 12; + -ms-flex-order: 11; + order: 11; + } + + .order-sm-12 { + -webkit-box-ordinal-group: 13; + -ms-flex-order: 12; + order: 12; + } + + .offset-sm-0 { + margin-left: 0; + } + + .offset-sm-1 { + margin-left: 8.3333333333%; + } + + .offset-sm-2 { + margin-left: 16.6666666667%; + } + + .offset-sm-3 { + margin-left: 25%; + } + + .offset-sm-4 { + margin-left: 33.3333333333%; + } + + .offset-sm-5 { + margin-left: 41.6666666667%; + } + + .offset-sm-6 { + margin-left: 50%; + } + + .offset-sm-7 { + margin-left: 58.3333333333%; + } + + .offset-sm-8 { + margin-left: 66.6666666667%; + } + + .offset-sm-9 { + margin-left: 75%; + } + + .offset-sm-10 { + margin-left: 83.3333333333%; + } + + .offset-sm-11 { + margin-left: 91.6666666667%; + } +} +@media (min-width: 768px) { + .col-md { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; + } + + .col-md-auto { + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: none; + } + + .col-md-1 { + -webkit-box-flex: 0; + -ms-flex: 0 0 8.3333333333%; + flex: 0 0 8.3333333333%; + max-width: 8.3333333333%; + } + + .col-md-2 { + -webkit-box-flex: 0; + -ms-flex: 0 0 16.6666666667%; + flex: 0 0 16.6666666667%; + max-width: 16.6666666667%; + } + + .col-md-3 { + -webkit-box-flex: 0; + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } + + .col-md-4 { + -webkit-box-flex: 0; + -ms-flex: 0 0 33.3333333333%; + flex: 0 0 33.3333333333%; + max-width: 33.3333333333%; + } + + .col-md-5 { + -webkit-box-flex: 0; + -ms-flex: 0 0 41.6666666667%; + flex: 0 0 41.6666666667%; + max-width: 41.6666666667%; + } + + .col-md-6 { + -webkit-box-flex: 0; + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + + .col-md-7 { + -webkit-box-flex: 0; + -ms-flex: 0 0 58.3333333333%; + flex: 0 0 58.3333333333%; + max-width: 58.3333333333%; + } + + .col-md-8 { + -webkit-box-flex: 0; + -ms-flex: 0 0 66.6666666667%; + flex: 0 0 66.6666666667%; + max-width: 66.6666666667%; + } + + .col-md-9 { + -webkit-box-flex: 0; + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; + } + + .col-md-10 { + -webkit-box-flex: 0; + -ms-flex: 0 0 83.3333333333%; + flex: 0 0 83.3333333333%; + max-width: 83.3333333333%; + } + + .col-md-11 { + -webkit-box-flex: 0; + -ms-flex: 0 0 91.6666666667%; + flex: 0 0 91.6666666667%; + max-width: 91.6666666667%; + } + + .col-md-12 { + -webkit-box-flex: 0; + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; + } + + .order-md-first { + -webkit-box-ordinal-group: 0; + -ms-flex-order: -1; + order: -1; + } + + .order-md-last { + -webkit-box-ordinal-group: 14; + -ms-flex-order: 13; + order: 13; + } + + .order-md-0 { + -webkit-box-ordinal-group: 1; + -ms-flex-order: 0; + order: 0; + } + + .order-md-1 { + -webkit-box-ordinal-group: 2; + -ms-flex-order: 1; + order: 1; + } + + .order-md-2 { + -webkit-box-ordinal-group: 3; + -ms-flex-order: 2; + order: 2; + } + + .order-md-3 { + -webkit-box-ordinal-group: 4; + -ms-flex-order: 3; + order: 3; + } + + .order-md-4 { + -webkit-box-ordinal-group: 5; + -ms-flex-order: 4; + order: 4; + } + + .order-md-5 { + -webkit-box-ordinal-group: 6; + -ms-flex-order: 5; + order: 5; + } + + .order-md-6 { + -webkit-box-ordinal-group: 7; + -ms-flex-order: 6; + order: 6; + } + + .order-md-7 { + -webkit-box-ordinal-group: 8; + -ms-flex-order: 7; + order: 7; + } + + .order-md-8 { + -webkit-box-ordinal-group: 9; + -ms-flex-order: 8; + order: 8; + } + + .order-md-9 { + -webkit-box-ordinal-group: 10; + -ms-flex-order: 9; + order: 9; + } + + .order-md-10 { + -webkit-box-ordinal-group: 11; + -ms-flex-order: 10; + order: 10; + } + + .order-md-11 { + -webkit-box-ordinal-group: 12; + -ms-flex-order: 11; + order: 11; + } + + .order-md-12 { + -webkit-box-ordinal-group: 13; + -ms-flex-order: 12; + order: 12; + } + + .offset-md-0 { + margin-left: 0; + } + + .offset-md-1 { + margin-left: 8.3333333333%; + } + + .offset-md-2 { + margin-left: 16.6666666667%; + } + + .offset-md-3 { + margin-left: 25%; + } + + .offset-md-4 { + margin-left: 33.3333333333%; + } + + .offset-md-5 { + margin-left: 41.6666666667%; + } + + .offset-md-6 { + margin-left: 50%; + } + + .offset-md-7 { + margin-left: 58.3333333333%; + } + + .offset-md-8 { + margin-left: 66.6666666667%; + } + + .offset-md-9 { + margin-left: 75%; + } + + .offset-md-10 { + margin-left: 83.3333333333%; + } + + .offset-md-11 { + margin-left: 91.6666666667%; + } +} +@media (min-width: 992px) { + .col-lg { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; + } + + .col-lg-auto { + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: none; + } + + .col-lg-1 { + -webkit-box-flex: 0; + -ms-flex: 0 0 8.3333333333%; + flex: 0 0 8.3333333333%; + max-width: 8.3333333333%; + } + + .col-lg-2 { + -webkit-box-flex: 0; + -ms-flex: 0 0 16.6666666667%; + flex: 0 0 16.6666666667%; + max-width: 16.6666666667%; + } + + .col-lg-3 { + -webkit-box-flex: 0; + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } + + .col-lg-4 { + -webkit-box-flex: 0; + -ms-flex: 0 0 33.3333333333%; + flex: 0 0 33.3333333333%; + max-width: 33.3333333333%; + } + + .col-lg-5 { + -webkit-box-flex: 0; + -ms-flex: 0 0 41.6666666667%; + flex: 0 0 41.6666666667%; + max-width: 41.6666666667%; + } + + .col-lg-6 { + -webkit-box-flex: 0; + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + + .col-lg-7 { + -webkit-box-flex: 0; + -ms-flex: 0 0 58.3333333333%; + flex: 0 0 58.3333333333%; + max-width: 58.3333333333%; + } + + .col-lg-8 { + -webkit-box-flex: 0; + -ms-flex: 0 0 66.6666666667%; + flex: 0 0 66.6666666667%; + max-width: 66.6666666667%; + } + + .col-lg-9 { + -webkit-box-flex: 0; + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; + } + + .col-lg-10 { + -webkit-box-flex: 0; + -ms-flex: 0 0 83.3333333333%; + flex: 0 0 83.3333333333%; + max-width: 83.3333333333%; + } + + .col-lg-11 { + -webkit-box-flex: 0; + -ms-flex: 0 0 91.6666666667%; + flex: 0 0 91.6666666667%; + max-width: 91.6666666667%; + } + + .col-lg-12 { + -webkit-box-flex: 0; + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; + } + + .order-lg-first { + -webkit-box-ordinal-group: 0; + -ms-flex-order: -1; + order: -1; + } + + .order-lg-last { + -webkit-box-ordinal-group: 14; + -ms-flex-order: 13; + order: 13; + } + + .order-lg-0 { + -webkit-box-ordinal-group: 1; + -ms-flex-order: 0; + order: 0; + } + + .order-lg-1 { + -webkit-box-ordinal-group: 2; + -ms-flex-order: 1; + order: 1; + } + + .order-lg-2 { + -webkit-box-ordinal-group: 3; + -ms-flex-order: 2; + order: 2; + } + + .order-lg-3 { + -webkit-box-ordinal-group: 4; + -ms-flex-order: 3; + order: 3; + } + + .order-lg-4 { + -webkit-box-ordinal-group: 5; + -ms-flex-order: 4; + order: 4; + } + + .order-lg-5 { + -webkit-box-ordinal-group: 6; + -ms-flex-order: 5; + order: 5; + } + + .order-lg-6 { + -webkit-box-ordinal-group: 7; + -ms-flex-order: 6; + order: 6; + } + + .order-lg-7 { + -webkit-box-ordinal-group: 8; + -ms-flex-order: 7; + order: 7; + } + + .order-lg-8 { + -webkit-box-ordinal-group: 9; + -ms-flex-order: 8; + order: 8; + } + + .order-lg-9 { + -webkit-box-ordinal-group: 10; + -ms-flex-order: 9; + order: 9; + } + + .order-lg-10 { + -webkit-box-ordinal-group: 11; + -ms-flex-order: 10; + order: 10; + } + + .order-lg-11 { + -webkit-box-ordinal-group: 12; + -ms-flex-order: 11; + order: 11; + } + + .order-lg-12 { + -webkit-box-ordinal-group: 13; + -ms-flex-order: 12; + order: 12; + } + + .offset-lg-0 { + margin-left: 0; + } + + .offset-lg-1 { + margin-left: 8.3333333333%; + } + + .offset-lg-2 { + margin-left: 16.6666666667%; + } + + .offset-lg-3 { + margin-left: 25%; + } + + .offset-lg-4 { + margin-left: 33.3333333333%; + } + + .offset-lg-5 { + margin-left: 41.6666666667%; + } + + .offset-lg-6 { + margin-left: 50%; + } + + .offset-lg-7 { + margin-left: 58.3333333333%; + } + + .offset-lg-8 { + margin-left: 66.6666666667%; + } + + .offset-lg-9 { + margin-left: 75%; + } + + .offset-lg-10 { + margin-left: 83.3333333333%; + } + + .offset-lg-11 { + margin-left: 91.6666666667%; + } +} +@media (min-width: 1200px) { + .col-xl { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + max-width: 100%; + } + + .col-xl-auto { + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + width: auto; + max-width: none; + } + + .col-xl-1 { + -webkit-box-flex: 0; + -ms-flex: 0 0 8.3333333333%; + flex: 0 0 8.3333333333%; + max-width: 8.3333333333%; + } + + .col-xl-2 { + -webkit-box-flex: 0; + -ms-flex: 0 0 16.6666666667%; + flex: 0 0 16.6666666667%; + max-width: 16.6666666667%; + } + + .col-xl-3 { + -webkit-box-flex: 0; + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } + + .col-xl-4 { + -webkit-box-flex: 0; + -ms-flex: 0 0 33.3333333333%; + flex: 0 0 33.3333333333%; + max-width: 33.3333333333%; + } + + .col-xl-5 { + -webkit-box-flex: 0; + -ms-flex: 0 0 41.6666666667%; + flex: 0 0 41.6666666667%; + max-width: 41.6666666667%; + } + + .col-xl-6 { + -webkit-box-flex: 0; + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + + .col-xl-7 { + -webkit-box-flex: 0; + -ms-flex: 0 0 58.3333333333%; + flex: 0 0 58.3333333333%; + max-width: 58.3333333333%; + } + + .col-xl-8 { + -webkit-box-flex: 0; + -ms-flex: 0 0 66.6666666667%; + flex: 0 0 66.6666666667%; + max-width: 66.6666666667%; + } + + .col-xl-9 { + -webkit-box-flex: 0; + -ms-flex: 0 0 75%; + flex: 0 0 75%; + max-width: 75%; + } + + .col-xl-10 { + -webkit-box-flex: 0; + -ms-flex: 0 0 83.3333333333%; + flex: 0 0 83.3333333333%; + max-width: 83.3333333333%; + } + + .col-xl-11 { + -webkit-box-flex: 0; + -ms-flex: 0 0 91.6666666667%; + flex: 0 0 91.6666666667%; + max-width: 91.6666666667%; + } + + .col-xl-12 { + -webkit-box-flex: 0; + -ms-flex: 0 0 100%; + flex: 0 0 100%; + max-width: 100%; + } + + .order-xl-first { + -webkit-box-ordinal-group: 0; + -ms-flex-order: -1; + order: -1; + } + + .order-xl-last { + -webkit-box-ordinal-group: 14; + -ms-flex-order: 13; + order: 13; + } + + .order-xl-0 { + -webkit-box-ordinal-group: 1; + -ms-flex-order: 0; + order: 0; + } + + .order-xl-1 { + -webkit-box-ordinal-group: 2; + -ms-flex-order: 1; + order: 1; + } + + .order-xl-2 { + -webkit-box-ordinal-group: 3; + -ms-flex-order: 2; + order: 2; + } + + .order-xl-3 { + -webkit-box-ordinal-group: 4; + -ms-flex-order: 3; + order: 3; + } + + .order-xl-4 { + -webkit-box-ordinal-group: 5; + -ms-flex-order: 4; + order: 4; + } + + .order-xl-5 { + -webkit-box-ordinal-group: 6; + -ms-flex-order: 5; + order: 5; + } + + .order-xl-6 { + -webkit-box-ordinal-group: 7; + -ms-flex-order: 6; + order: 6; + } + + .order-xl-7 { + -webkit-box-ordinal-group: 8; + -ms-flex-order: 7; + order: 7; + } + + .order-xl-8 { + -webkit-box-ordinal-group: 9; + -ms-flex-order: 8; + order: 8; + } + + .order-xl-9 { + -webkit-box-ordinal-group: 10; + -ms-flex-order: 9; + order: 9; + } + + .order-xl-10 { + -webkit-box-ordinal-group: 11; + -ms-flex-order: 10; + order: 10; + } + + .order-xl-11 { + -webkit-box-ordinal-group: 12; + -ms-flex-order: 11; + order: 11; + } + + .order-xl-12 { + -webkit-box-ordinal-group: 13; + -ms-flex-order: 12; + order: 12; + } + + .offset-xl-0 { + margin-left: 0; + } + + .offset-xl-1 { + margin-left: 8.3333333333%; + } + + .offset-xl-2 { + margin-left: 16.6666666667%; + } + + .offset-xl-3 { + margin-left: 25%; + } + + .offset-xl-4 { + margin-left: 33.3333333333%; + } + + .offset-xl-5 { + margin-left: 41.6666666667%; + } + + .offset-xl-6 { + margin-left: 50%; + } + + .offset-xl-7 { + margin-left: 58.3333333333%; + } + + .offset-xl-8 { + margin-left: 66.6666666667%; + } + + .offset-xl-9 { + margin-left: 75%; + } + + .offset-xl-10 { + margin-left: 83.3333333333%; + } + + .offset-xl-11 { + margin-left: 91.6666666667%; + } +} +.table { + width: 100%; + max-width: 100%; + margin-bottom: 1rem; + background-color: transparent; +} +.table th, +.table td { + padding: 0.75rem; + vertical-align: top; + border-top: 1px solid #dee2e6; +} +.table thead th { + vertical-align: bottom; + border-bottom: 2px solid #dee2e6; +} +.table tbody + tbody { + border-top: 2px solid #dee2e6; +} +.table .table { + background-color: #fff; +} + +.table-sm th, +.table-sm td { + padding: 0.3rem; +} + +.table-bordered { + border: 1px solid #dee2e6; +} +.table-bordered th, +.table-bordered td { + border: 1px solid #dee2e6; +} +.table-bordered thead th, +.table-bordered thead td { + border-bottom-width: 2px; +} + +.table-striped tbody tr:nth-of-type(odd) { + background-color: rgba(0, 0, 0, 0.05); +} + +.table-hover tbody tr:hover { + background-color: rgba(0, 0, 0, 0.075); +} + +.table-primary, +.table-primary > th, +.table-primary > td { + background-color: #b8daff; +} + +.table-hover .table-primary:hover { + background-color: #9fcdff; +} +.table-hover .table-primary:hover > td, +.table-hover .table-primary:hover > th { + background-color: #9fcdff; +} + +.table-secondary, +.table-secondary > th, +.table-secondary > td { + background-color: #d6d8db; +} + +.table-hover .table-secondary:hover { + background-color: #c8cbcf; +} +.table-hover .table-secondary:hover > td, +.table-hover .table-secondary:hover > th { + background-color: #c8cbcf; +} + +.table-success, +.table-success > th, +.table-success > td { + background-color: #c3e6cb; +} + +.table-hover .table-success:hover { + background-color: #b1dfbb; +} +.table-hover .table-success:hover > td, +.table-hover .table-success:hover > th { + background-color: #b1dfbb; +} + +.table-info, +.table-info > th, +.table-info > td { + background-color: #bee5eb; +} + +.table-hover .table-info:hover { + background-color: #abdde5; +} +.table-hover .table-info:hover > td, +.table-hover .table-info:hover > th { + background-color: #abdde5; +} + +.table-warning, +.table-warning > th, +.table-warning > td { + background-color: #ffeeba; +} + +.table-hover .table-warning:hover { + background-color: #ffe8a1; +} +.table-hover .table-warning:hover > td, +.table-hover .table-warning:hover > th { + background-color: #ffe8a1; +} + +.table-danger, +.table-danger > th, +.table-danger > td { + background-color: #f5c6cb; +} + +.table-hover .table-danger:hover { + background-color: #f1b0b7; +} +.table-hover .table-danger:hover > td, +.table-hover .table-danger:hover > th { + background-color: #f1b0b7; +} + +.table-light, +.table-light > th, +.table-light > td { + background-color: #fdfdfe; +} + +.table-hover .table-light:hover { + background-color: #ececf6; +} +.table-hover .table-light:hover > td, +.table-hover .table-light:hover > th { + background-color: #ececf6; +} + +.table-dark, +.table-dark > th, +.table-dark > td { + background-color: #c6c8ca; +} + +.table-hover .table-dark:hover { + background-color: #b9bbbe; +} +.table-hover .table-dark:hover > td, +.table-hover .table-dark:hover > th { + background-color: #b9bbbe; +} + +.table-active, +.table-active > th, +.table-active > td { + background-color: rgba(0, 0, 0, 0.075); +} + +.table-hover .table-active:hover { + background-color: rgba(0, 0, 0, 0.075); +} +.table-hover .table-active:hover > td, +.table-hover .table-active:hover > th { + background-color: rgba(0, 0, 0, 0.075); +} + +.table .thead-dark th { + color: #fff; + background-color: #212529; + border-color: #32383e; +} +.table .thead-light th { + color: #495057; + background-color: #e9ecef; + border-color: #dee2e6; +} + +.table-dark { + color: #fff; + background-color: #212529; +} +.table-dark th, +.table-dark td, +.table-dark thead th { + border-color: #32383e; +} +.table-dark.table-bordered { + border: 0; +} +.table-dark.table-striped tbody tr:nth-of-type(odd) { + background-color: rgba(255, 255, 255, 0.05); +} +.table-dark.table-hover tbody tr:hover { + background-color: rgba(255, 255, 255, 0.075); +} + +@media (max-width: 575.98px) { + .table-responsive-sm { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + -ms-overflow-style: -ms-autohiding-scrollbar; + } + .table-responsive-sm > .table-bordered { + border: 0; + } +} +@media (max-width: 767.98px) { + .table-responsive-md { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + -ms-overflow-style: -ms-autohiding-scrollbar; + } + .table-responsive-md > .table-bordered { + border: 0; + } +} +@media (max-width: 991.98px) { + .table-responsive-lg { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + -ms-overflow-style: -ms-autohiding-scrollbar; + } + .table-responsive-lg > .table-bordered { + border: 0; + } +} +@media (max-width: 1199.98px) { + .table-responsive-xl { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + -ms-overflow-style: -ms-autohiding-scrollbar; + } + .table-responsive-xl > .table-bordered { + border: 0; + } +} +.table-responsive { + display: block; + width: 100%; + overflow-x: auto; + -webkit-overflow-scrolling: touch; + -ms-overflow-style: -ms-autohiding-scrollbar; +} +.table-responsive > .table-bordered { + border: 0; +} + +.form-control { + display: block; + width: 100%; + padding: 0.375rem 0.75rem; + font-size: 1rem; + line-height: 1.5; + color: #495057; + background-color: #fff; + background-clip: padding-box; + border: 1px solid #ced4da; + border-radius: 0.25rem; + -webkit-transition: border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; + transition: border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; + transition: border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; + transition: border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; +} +.form-control::-ms-expand { + background-color: transparent; + border: 0; +} +.form-control:focus { + color: #495057; + background-color: #fff; + border-color: #80bdff; + outline: 0; + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} +.form-control::-webkit-input-placeholder { + color: #6c757d; + opacity: 1; +} +.form-control::-moz-placeholder { + color: #6c757d; + opacity: 1; +} +.form-control:-ms-input-placeholder { + color: #6c757d; + opacity: 1; +} +.form-control::-ms-input-placeholder { + color: #6c757d; + opacity: 1; +} +.form-control::placeholder { + color: #6c757d; + opacity: 1; +} +.form-control:disabled, .form-control[readonly] { + background-color: #e9ecef; + opacity: 1; +} + +select.form-control:not([size]):not([multiple]) { + height: calc(2.25rem + 2px); +} +select.form-control:focus::-ms-value { + color: #495057; + background-color: #fff; +} + +.form-control-file, +.form-control-range { + display: block; + width: 100%; +} + +.col-form-label { + padding-top: calc(0.375rem + 1px); + padding-bottom: calc(0.375rem + 1px); + margin-bottom: 0; + font-size: inherit; + line-height: 1.5; +} + +.col-form-label-lg { + padding-top: calc(0.5rem + 1px); + padding-bottom: calc(0.5rem + 1px); + font-size: 1.25rem; + line-height: 1.5; +} + +.col-form-label-sm { + padding-top: calc(0.25rem + 1px); + padding-bottom: calc(0.25rem + 1px); + font-size: 0.875rem; + line-height: 1.5; +} + +.form-control-plaintext { + display: block; + width: 100%; + padding-top: 0.375rem; + padding-bottom: 0.375rem; + margin-bottom: 0; + line-height: 1.5; + background-color: transparent; + border: solid transparent; + border-width: 1px 0; +} +.form-control-plaintext.form-control-sm, .input-group-sm > .form-control-plaintext.form-control, +.input-group-sm > .input-group-prepend > .form-control-plaintext.input-group-text, +.input-group-sm > .input-group-append > .form-control-plaintext.input-group-text, +.input-group-sm > .input-group-prepend > .form-control-plaintext.btn, +.input-group-sm > .input-group-append > .form-control-plaintext.btn, .form-control-plaintext.form-control-lg, .input-group-lg > .form-control-plaintext.form-control, +.input-group-lg > .input-group-prepend > .form-control-plaintext.input-group-text, +.input-group-lg > .input-group-append > .form-control-plaintext.input-group-text, +.input-group-lg > .input-group-prepend > .form-control-plaintext.btn, +.input-group-lg > .input-group-append > .form-control-plaintext.btn { + padding-right: 0; + padding-left: 0; +} + +.form-control-sm, .input-group-sm > .form-control, +.input-group-sm > .input-group-prepend > .input-group-text, +.input-group-sm > .input-group-append > .input-group-text, +.input-group-sm > .input-group-prepend > .btn, +.input-group-sm > .input-group-append > .btn { + padding: 0.25rem 0.5rem; + font-size: 0.875rem; + line-height: 1.5; + border-radius: 0.2rem; +} + +select.form-control-sm:not([size]):not([multiple]), .input-group-sm > select.form-control:not([size]):not([multiple]), +.input-group-sm > .input-group-prepend > select.input-group-text:not([size]):not([multiple]), +.input-group-sm > .input-group-append > select.input-group-text:not([size]):not([multiple]), +.input-group-sm > .input-group-prepend > select.btn:not([size]):not([multiple]), +.input-group-sm > .input-group-append > select.btn:not([size]):not([multiple]) { + height: calc(1.8125rem + 2px); +} + +.form-control-lg, .input-group-lg > .form-control, +.input-group-lg > .input-group-prepend > .input-group-text, +.input-group-lg > .input-group-append > .input-group-text, +.input-group-lg > .input-group-prepend > .btn, +.input-group-lg > .input-group-append > .btn { + padding: 0.5rem 1rem; + font-size: 1.25rem; + line-height: 1.5; + border-radius: 0.3rem; +} + +select.form-control-lg:not([size]):not([multiple]), .input-group-lg > select.form-control:not([size]):not([multiple]), +.input-group-lg > .input-group-prepend > select.input-group-text:not([size]):not([multiple]), +.input-group-lg > .input-group-append > select.input-group-text:not([size]):not([multiple]), +.input-group-lg > .input-group-prepend > select.btn:not([size]):not([multiple]), +.input-group-lg > .input-group-append > select.btn:not([size]):not([multiple]) { + height: calc(2.875rem + 2px); +} + +.form-group { + margin-bottom: 1rem; +} + +.form-text { + display: block; + margin-top: 0.25rem; +} + +.form-row { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + margin-right: -5px; + margin-left: -5px; +} +.form-row > .col, +.form-row > [class*="col-"] { + padding-right: 5px; + padding-left: 5px; +} + +.form-check { + position: relative; + display: block; + padding-left: 1.25rem; +} + +.form-check-input { + position: absolute; + margin-top: 0.3rem; + margin-left: -1.25rem; +} +.form-check-input:disabled ~ .form-check-label { + color: #6c757d; +} + +.form-check-label { + margin-bottom: 0; +} + +.form-check-inline { + display: -webkit-inline-box; + display: -ms-inline-flexbox; + display: inline-flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + padding-left: 0; + margin-right: 0.75rem; +} +.form-check-inline .form-check-input { + position: static; + margin-top: 0; + margin-right: 0.3125rem; + margin-left: 0; +} + +.valid-feedback { + display: none; + width: 100%; + margin-top: 0.25rem; + font-size: 80%; + color: #28a745; +} + +.valid-tooltip { + position: absolute; + top: 100%; + z-index: 5; + display: none; + max-width: 100%; + padding: .5rem; + margin-top: .1rem; + font-size: .875rem; + line-height: 1; + color: #fff; + background-color: rgba(40, 167, 69, 0.8); + border-radius: .2rem; +} + +.was-validated .form-control:valid, .form-control.is-valid, +.was-validated .custom-select:valid, +.custom-select.is-valid { + border-color: #28a745; +} +.was-validated .form-control:valid:focus, .form-control.is-valid:focus, +.was-validated .custom-select:valid:focus, +.custom-select.is-valid:focus { + border-color: #28a745; + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); +} +.was-validated .form-control:valid ~ .valid-feedback, +.was-validated .form-control:valid ~ .valid-tooltip, .form-control.is-valid ~ .valid-feedback, +.form-control.is-valid ~ .valid-tooltip, +.was-validated .custom-select:valid ~ .valid-feedback, +.was-validated .custom-select:valid ~ .valid-tooltip, +.custom-select.is-valid ~ .valid-feedback, +.custom-select.is-valid ~ .valid-tooltip { + display: block; +} + +.was-validated .form-check-input:valid ~ .form-check-label, .form-check-input.is-valid ~ .form-check-label { + color: #28a745; +} +.was-validated .form-check-input:valid ~ .valid-feedback, +.was-validated .form-check-input:valid ~ .valid-tooltip, .form-check-input.is-valid ~ .valid-feedback, +.form-check-input.is-valid ~ .valid-tooltip { + display: block; +} + +.was-validated .custom-control-input:valid ~ .custom-control-label, .custom-control-input.is-valid ~ .custom-control-label { + color: #28a745; +} +.was-validated .custom-control-input:valid ~ .custom-control-label::before, .custom-control-input.is-valid ~ .custom-control-label::before { + background-color: #71dd8a; +} +.was-validated .custom-control-input:valid ~ .valid-feedback, +.was-validated .custom-control-input:valid ~ .valid-tooltip, .custom-control-input.is-valid ~ .valid-feedback, +.custom-control-input.is-valid ~ .valid-tooltip { + display: block; +} +.was-validated .custom-control-input:valid:checked ~ .custom-control-label::before, .custom-control-input.is-valid:checked ~ .custom-control-label::before { + background-color: #34ce57; +} +.was-validated .custom-control-input:valid:focus ~ .custom-control-label::before, .custom-control-input.is-valid:focus ~ .custom-control-label::before { + -webkit-box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(40, 167, 69, 0.25); + box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(40, 167, 69, 0.25); +} + +.was-validated .custom-file-input:valid ~ .custom-file-label, .custom-file-input.is-valid ~ .custom-file-label { + border-color: #28a745; +} +.was-validated .custom-file-input:valid ~ .custom-file-label::before, .custom-file-input.is-valid ~ .custom-file-label::before { + border-color: inherit; +} +.was-validated .custom-file-input:valid ~ .valid-feedback, +.was-validated .custom-file-input:valid ~ .valid-tooltip, .custom-file-input.is-valid ~ .valid-feedback, +.custom-file-input.is-valid ~ .valid-tooltip { + display: block; +} +.was-validated .custom-file-input:valid:focus ~ .custom-file-label, .custom-file-input.is-valid:focus ~ .custom-file-label { + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); +} + +.invalid-feedback { + display: none; + width: 100%; + margin-top: 0.25rem; + font-size: 80%; + color: #dc3545; +} + +.invalid-tooltip { + position: absolute; + top: 100%; + z-index: 5; + display: none; + max-width: 100%; + padding: .5rem; + margin-top: .1rem; + font-size: .875rem; + line-height: 1; + color: #fff; + background-color: rgba(220, 53, 69, 0.8); + border-radius: .2rem; +} + +.was-validated .form-control:invalid, .form-control.is-invalid, +.was-validated .custom-select:invalid, +.custom-select.is-invalid { + border-color: #dc3545; +} +.was-validated .form-control:invalid:focus, .form-control.is-invalid:focus, +.was-validated .custom-select:invalid:focus, +.custom-select.is-invalid:focus { + border-color: #dc3545; + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); +} +.was-validated .form-control:invalid ~ .invalid-feedback, +.was-validated .form-control:invalid ~ .invalid-tooltip, .form-control.is-invalid ~ .invalid-feedback, +.form-control.is-invalid ~ .invalid-tooltip, +.was-validated .custom-select:invalid ~ .invalid-feedback, +.was-validated .custom-select:invalid ~ .invalid-tooltip, +.custom-select.is-invalid ~ .invalid-feedback, +.custom-select.is-invalid ~ .invalid-tooltip { + display: block; +} + +.was-validated .form-check-input:invalid ~ .form-check-label, .form-check-input.is-invalid ~ .form-check-label { + color: #dc3545; +} +.was-validated .form-check-input:invalid ~ .invalid-feedback, +.was-validated .form-check-input:invalid ~ .invalid-tooltip, .form-check-input.is-invalid ~ .invalid-feedback, +.form-check-input.is-invalid ~ .invalid-tooltip { + display: block; +} + +.was-validated .custom-control-input:invalid ~ .custom-control-label, .custom-control-input.is-invalid ~ .custom-control-label { + color: #dc3545; +} +.was-validated .custom-control-input:invalid ~ .custom-control-label::before, .custom-control-input.is-invalid ~ .custom-control-label::before { + background-color: #efa2a9; +} +.was-validated .custom-control-input:invalid ~ .invalid-feedback, +.was-validated .custom-control-input:invalid ~ .invalid-tooltip, .custom-control-input.is-invalid ~ .invalid-feedback, +.custom-control-input.is-invalid ~ .invalid-tooltip { + display: block; +} +.was-validated .custom-control-input:invalid:checked ~ .custom-control-label::before, .custom-control-input.is-invalid:checked ~ .custom-control-label::before { + background-color: #e4606d; +} +.was-validated .custom-control-input:invalid:focus ~ .custom-control-label::before, .custom-control-input.is-invalid:focus ~ .custom-control-label::before { + -webkit-box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(220, 53, 69, 0.25); + box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(220, 53, 69, 0.25); +} + +.was-validated .custom-file-input:invalid ~ .custom-file-label, .custom-file-input.is-invalid ~ .custom-file-label { + border-color: #dc3545; +} +.was-validated .custom-file-input:invalid ~ .custom-file-label::before, .custom-file-input.is-invalid ~ .custom-file-label::before { + border-color: inherit; +} +.was-validated .custom-file-input:invalid ~ .invalid-feedback, +.was-validated .custom-file-input:invalid ~ .invalid-tooltip, .custom-file-input.is-invalid ~ .invalid-feedback, +.custom-file-input.is-invalid ~ .invalid-tooltip { + display: block; +} +.was-validated .custom-file-input:invalid:focus ~ .custom-file-label, .custom-file-input.is-invalid:focus ~ .custom-file-label { + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); +} + +.form-inline { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; +} +.form-inline .form-check { + width: 100%; +} +@media (min-width: 576px) { + .form-inline label { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + margin-bottom: 0; + } + .form-inline .form-group { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-flex: 0; + -ms-flex: 0 0 auto; + flex: 0 0 auto; + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + margin-bottom: 0; + } + .form-inline .form-control { + display: inline-block; + width: auto; + vertical-align: middle; + } + .form-inline .form-control-plaintext { + display: inline-block; + } + .form-inline .input-group { + width: auto; + } + .form-inline .form-check { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + width: auto; + padding-left: 0; + } + .form-inline .form-check-input { + position: relative; + margin-top: 0; + margin-right: 0.25rem; + margin-left: 0; + } + .form-inline .custom-control { + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + } + .form-inline .custom-control-label { + margin-bottom: 0; + } +} + +.btn { + display: inline-block; + font-weight: 400; + text-align: center; + white-space: nowrap; + vertical-align: middle; + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + border: 1px solid transparent; + padding: 0.375rem 0.75rem; + font-size: 1rem; + line-height: 1.5; + border-radius: 0.25rem; + -webkit-transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; + transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; + transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; + transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out; +} +.btn:hover, .btn:focus { + text-decoration: none; +} +.btn:focus, .btn.focus { + outline: 0; + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} +.btn.disabled, .btn:disabled { + opacity: 0.65; +} +.btn:not(:disabled):not(.disabled) { + cursor: pointer; +} +.btn:not(:disabled):not(.disabled):active, .btn:not(:disabled):not(.disabled).active { + background-image: none; +} + +a.btn.disabled, +fieldset:disabled a.btn { + pointer-events: none; +} + +.btn-primary { + color: #fff; + background-color: #007bff; + border-color: #007bff; +} +.btn-primary:hover { + color: #fff; + background-color: #0069d9; + border-color: #0062cc; +} +.btn-primary:focus, .btn-primary.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); +} +.btn-primary.disabled, .btn-primary:disabled { + color: #fff; + background-color: #007bff; + border-color: #007bff; +} +.btn-primary:not(:disabled):not(.disabled):active, .btn-primary:not(:disabled):not(.disabled).active, .show > .btn-primary.dropdown-toggle { + color: #fff; + background-color: #0062cc; + border-color: #005cbf; +} +.btn-primary:not(:disabled):not(.disabled):active:focus, .btn-primary:not(:disabled):not(.disabled).active:focus, .show > .btn-primary.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); +} + +.btn-secondary { + color: #fff; + background-color: #6c757d; + border-color: #6c757d; +} +.btn-secondary:hover { + color: #fff; + background-color: #5a6268; + border-color: #545b62; +} +.btn-secondary:focus, .btn-secondary.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); + box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); +} +.btn-secondary.disabled, .btn-secondary:disabled { + color: #fff; + background-color: #6c757d; + border-color: #6c757d; +} +.btn-secondary:not(:disabled):not(.disabled):active, .btn-secondary:not(:disabled):not(.disabled).active, .show > .btn-secondary.dropdown-toggle { + color: #fff; + background-color: #545b62; + border-color: #4e555b; +} +.btn-secondary:not(:disabled):not(.disabled):active:focus, .btn-secondary:not(:disabled):not(.disabled).active:focus, .show > .btn-secondary.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); + box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); +} + +.btn-success { + color: #fff; + background-color: #28a745; + border-color: #28a745; +} +.btn-success:hover { + color: #fff; + background-color: #218838; + border-color: #1e7e34; +} +.btn-success:focus, .btn-success.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); +} +.btn-success.disabled, .btn-success:disabled { + color: #fff; + background-color: #28a745; + border-color: #28a745; +} +.btn-success:not(:disabled):not(.disabled):active, .btn-success:not(:disabled):not(.disabled).active, .show > .btn-success.dropdown-toggle { + color: #fff; + background-color: #1e7e34; + border-color: #1c7430; +} +.btn-success:not(:disabled):not(.disabled):active:focus, .btn-success:not(:disabled):not(.disabled).active:focus, .show > .btn-success.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); +} + +.btn-info { + color: #fff; + background-color: #17a2b8; + border-color: #17a2b8; +} +.btn-info:hover { + color: #fff; + background-color: #138496; + border-color: #117a8b; +} +.btn-info:focus, .btn-info.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); + box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); +} +.btn-info.disabled, .btn-info:disabled { + color: #fff; + background-color: #17a2b8; + border-color: #17a2b8; +} +.btn-info:not(:disabled):not(.disabled):active, .btn-info:not(:disabled):not(.disabled).active, .show > .btn-info.dropdown-toggle { + color: #fff; + background-color: #117a8b; + border-color: #10707f; +} +.btn-info:not(:disabled):not(.disabled):active:focus, .btn-info:not(:disabled):not(.disabled).active:focus, .show > .btn-info.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); + box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); +} + +.btn-warning { + color: #212529; + background-color: #ffc107; + border-color: #ffc107; +} +.btn-warning:hover { + color: #212529; + background-color: #e0a800; + border-color: #d39e00; +} +.btn-warning:focus, .btn-warning.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); + box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); +} +.btn-warning.disabled, .btn-warning:disabled { + color: #212529; + background-color: #ffc107; + border-color: #ffc107; +} +.btn-warning:not(:disabled):not(.disabled):active, .btn-warning:not(:disabled):not(.disabled).active, .show > .btn-warning.dropdown-toggle { + color: #212529; + background-color: #d39e00; + border-color: #c69500; +} +.btn-warning:not(:disabled):not(.disabled):active:focus, .btn-warning:not(:disabled):not(.disabled).active:focus, .show > .btn-warning.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); + box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); +} + +.btn-danger { + color: #fff; + background-color: #dc3545; + border-color: #dc3545; +} +.btn-danger:hover { + color: #fff; + background-color: #c82333; + border-color: #bd2130; +} +.btn-danger:focus, .btn-danger.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); +} +.btn-danger.disabled, .btn-danger:disabled { + color: #fff; + background-color: #dc3545; + border-color: #dc3545; +} +.btn-danger:not(:disabled):not(.disabled):active, .btn-danger:not(:disabled):not(.disabled).active, .show > .btn-danger.dropdown-toggle { + color: #fff; + background-color: #bd2130; + border-color: #b21f2d; +} +.btn-danger:not(:disabled):not(.disabled):active:focus, .btn-danger:not(:disabled):not(.disabled).active:focus, .show > .btn-danger.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); +} + +.btn-light { + color: #212529; + background-color: #f8f9fa; + border-color: #f8f9fa; +} +.btn-light:hover { + color: #212529; + background-color: #e2e6ea; + border-color: #dae0e5; +} +.btn-light:focus, .btn-light.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); + box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); +} +.btn-light.disabled, .btn-light:disabled { + color: #212529; + background-color: #f8f9fa; + border-color: #f8f9fa; +} +.btn-light:not(:disabled):not(.disabled):active, .btn-light:not(:disabled):not(.disabled).active, .show > .btn-light.dropdown-toggle { + color: #212529; + background-color: #dae0e5; + border-color: #d3d9df; +} +.btn-light:not(:disabled):not(.disabled):active:focus, .btn-light:not(:disabled):not(.disabled).active:focus, .show > .btn-light.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); + box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); +} + +.btn-dark { + color: #fff; + background-color: #343a40; + border-color: #343a40; +} +.btn-dark:hover { + color: #fff; + background-color: #23272b; + border-color: #1d2124; +} +.btn-dark:focus, .btn-dark.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); + box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); +} +.btn-dark.disabled, .btn-dark:disabled { + color: #fff; + background-color: #343a40; + border-color: #343a40; +} +.btn-dark:not(:disabled):not(.disabled):active, .btn-dark:not(:disabled):not(.disabled).active, .show > .btn-dark.dropdown-toggle { + color: #fff; + background-color: #1d2124; + border-color: #171a1d; +} +.btn-dark:not(:disabled):not(.disabled):active:focus, .btn-dark:not(:disabled):not(.disabled).active:focus, .show > .btn-dark.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); + box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); +} + +.btn-outline-primary { + color: #007bff; + background-color: transparent; + background-image: none; + border-color: #007bff; +} +.btn-outline-primary:hover { + color: #fff; + background-color: #007bff; + border-color: #007bff; +} +.btn-outline-primary:focus, .btn-outline-primary.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); +} +.btn-outline-primary.disabled, .btn-outline-primary:disabled { + color: #007bff; + background-color: transparent; +} +.btn-outline-primary:not(:disabled):not(.disabled):active, .btn-outline-primary:not(:disabled):not(.disabled).active, .show > .btn-outline-primary.dropdown-toggle { + color: #fff; + background-color: #007bff; + border-color: #007bff; +} +.btn-outline-primary:not(:disabled):not(.disabled):active:focus, .btn-outline-primary:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-primary.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.5); +} + +.btn-outline-secondary { + color: #6c757d; + background-color: transparent; + background-image: none; + border-color: #6c757d; +} +.btn-outline-secondary:hover { + color: #fff; + background-color: #6c757d; + border-color: #6c757d; +} +.btn-outline-secondary:focus, .btn-outline-secondary.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); + box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); +} +.btn-outline-secondary.disabled, .btn-outline-secondary:disabled { + color: #6c757d; + background-color: transparent; +} +.btn-outline-secondary:not(:disabled):not(.disabled):active, .btn-outline-secondary:not(:disabled):not(.disabled).active, .show > .btn-outline-secondary.dropdown-toggle { + color: #fff; + background-color: #6c757d; + border-color: #6c757d; +} +.btn-outline-secondary:not(:disabled):not(.disabled):active:focus, .btn-outline-secondary:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-secondary.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); + box-shadow: 0 0 0 0.2rem rgba(108, 117, 125, 0.5); +} + +.btn-outline-success { + color: #28a745; + background-color: transparent; + background-image: none; + border-color: #28a745; +} +.btn-outline-success:hover { + color: #fff; + background-color: #28a745; + border-color: #28a745; +} +.btn-outline-success:focus, .btn-outline-success.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); +} +.btn-outline-success.disabled, .btn-outline-success:disabled { + color: #28a745; + background-color: transparent; +} +.btn-outline-success:not(:disabled):not(.disabled):active, .btn-outline-success:not(:disabled):not(.disabled).active, .show > .btn-outline-success.dropdown-toggle { + color: #fff; + background-color: #28a745; + border-color: #28a745; +} +.btn-outline-success:not(:disabled):not(.disabled):active:focus, .btn-outline-success:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-success.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); +} + +.btn-outline-info { + color: #17a2b8; + background-color: transparent; + background-image: none; + border-color: #17a2b8; +} +.btn-outline-info:hover { + color: #fff; + background-color: #17a2b8; + border-color: #17a2b8; +} +.btn-outline-info:focus, .btn-outline-info.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); + box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); +} +.btn-outline-info.disabled, .btn-outline-info:disabled { + color: #17a2b8; + background-color: transparent; +} +.btn-outline-info:not(:disabled):not(.disabled):active, .btn-outline-info:not(:disabled):not(.disabled).active, .show > .btn-outline-info.dropdown-toggle { + color: #fff; + background-color: #17a2b8; + border-color: #17a2b8; +} +.btn-outline-info:not(:disabled):not(.disabled):active:focus, .btn-outline-info:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-info.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); + box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); +} + +.btn-outline-warning { + color: #ffc107; + background-color: transparent; + background-image: none; + border-color: #ffc107; +} +.btn-outline-warning:hover { + color: #212529; + background-color: #ffc107; + border-color: #ffc107; +} +.btn-outline-warning:focus, .btn-outline-warning.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); + box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); +} +.btn-outline-warning.disabled, .btn-outline-warning:disabled { + color: #ffc107; + background-color: transparent; +} +.btn-outline-warning:not(:disabled):not(.disabled):active, .btn-outline-warning:not(:disabled):not(.disabled).active, .show > .btn-outline-warning.dropdown-toggle { + color: #212529; + background-color: #ffc107; + border-color: #ffc107; +} +.btn-outline-warning:not(:disabled):not(.disabled):active:focus, .btn-outline-warning:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-warning.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); + box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); +} + +.btn-outline-danger { + color: #dc3545; + background-color: transparent; + background-image: none; + border-color: #dc3545; +} +.btn-outline-danger:hover { + color: #fff; + background-color: #dc3545; + border-color: #dc3545; +} +.btn-outline-danger:focus, .btn-outline-danger.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); +} +.btn-outline-danger.disabled, .btn-outline-danger:disabled { + color: #dc3545; + background-color: transparent; +} +.btn-outline-danger:not(:disabled):not(.disabled):active, .btn-outline-danger:not(:disabled):not(.disabled).active, .show > .btn-outline-danger.dropdown-toggle { + color: #fff; + background-color: #dc3545; + border-color: #dc3545; +} +.btn-outline-danger:not(:disabled):not(.disabled):active:focus, .btn-outline-danger:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-danger.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); + box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); +} + +.btn-outline-light { + color: #f8f9fa; + background-color: transparent; + background-image: none; + border-color: #f8f9fa; +} +.btn-outline-light:hover { + color: #212529; + background-color: #f8f9fa; + border-color: #f8f9fa; +} +.btn-outline-light:focus, .btn-outline-light.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); + box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); +} +.btn-outline-light.disabled, .btn-outline-light:disabled { + color: #f8f9fa; + background-color: transparent; +} +.btn-outline-light:not(:disabled):not(.disabled):active, .btn-outline-light:not(:disabled):not(.disabled).active, .show > .btn-outline-light.dropdown-toggle { + color: #212529; + background-color: #f8f9fa; + border-color: #f8f9fa; +} +.btn-outline-light:not(:disabled):not(.disabled):active:focus, .btn-outline-light:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-light.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); + box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); +} + +.btn-outline-dark { + color: #343a40; + background-color: transparent; + background-image: none; + border-color: #343a40; +} +.btn-outline-dark:hover { + color: #fff; + background-color: #343a40; + border-color: #343a40; +} +.btn-outline-dark:focus, .btn-outline-dark.focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); + box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); +} +.btn-outline-dark.disabled, .btn-outline-dark:disabled { + color: #343a40; + background-color: transparent; +} +.btn-outline-dark:not(:disabled):not(.disabled):active, .btn-outline-dark:not(:disabled):not(.disabled).active, .show > .btn-outline-dark.dropdown-toggle { + color: #fff; + background-color: #343a40; + border-color: #343a40; +} +.btn-outline-dark:not(:disabled):not(.disabled):active:focus, .btn-outline-dark:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-dark.dropdown-toggle:focus { + -webkit-box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); + box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); +} + +.btn-link { + font-weight: 400; + color: #007bff; + background-color: transparent; +} +.btn-link:hover { + color: #0056b3; + text-decoration: underline; + background-color: transparent; + border-color: transparent; +} +.btn-link:focus, .btn-link.focus { + text-decoration: underline; + border-color: transparent; + -webkit-box-shadow: none; + box-shadow: none; +} +.btn-link:disabled, .btn-link.disabled { + color: #6c757d; +} + +.btn-lg, .btn-group-lg > .btn { + padding: 0.5rem 1rem; + font-size: 1.25rem; + line-height: 1.5; + border-radius: 0.3rem; +} + +.btn-sm, .btn-group-sm > .btn { + padding: 0.25rem 0.5rem; + font-size: 0.875rem; + line-height: 1.5; + border-radius: 0.2rem; +} + +.btn-block { + display: block; + width: 100%; +} +.btn-block + .btn-block { + margin-top: 0.5rem; +} + +input[type="submit"].btn-block, +input[type="reset"].btn-block, +input[type="button"].btn-block { + width: 100%; +} + +.fade { + opacity: 0; + -webkit-transition: opacity 0.15s linear; + transition: opacity 0.15s linear; +} +.fade.show { + opacity: 1; +} + +.collapse { + display: none; +} +.collapse.show { + display: block; +} + +tr.collapse.show { + display: table-row; +} + +tbody.collapse.show { + display: table-row-group; +} + +.collapsing { + position: relative; + height: 0; + overflow: hidden; + -webkit-transition: height 0.35s ease; + transition: height 0.35s ease; +} + +.dropup, +.dropdown { + position: relative; +} + +.dropdown-toggle::after { + display: inline-block; + width: 0; + height: 0; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0.3em solid; + border-right: 0.3em solid transparent; + border-bottom: 0; + border-left: 0.3em solid transparent; +} +.dropdown-toggle:empty::after { + margin-left: 0; +} + +.dropdown-menu { + position: absolute; + top: 100%; + left: 0; + z-index: 1000; + display: none; + float: left; + min-width: 10rem; + padding: 0.5rem 0; + margin: 0.125rem 0 0; + font-size: 1rem; + color: #212529; + text-align: left; + list-style: none; + background-color: #fff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.15); + border-radius: 0.25rem; +} + +.dropup .dropdown-menu { + margin-top: 0; + margin-bottom: 0.125rem; +} +.dropup .dropdown-toggle::after { + display: inline-block; + width: 0; + height: 0; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0; + border-right: 0.3em solid transparent; + border-bottom: 0.3em solid; + border-left: 0.3em solid transparent; +} +.dropup .dropdown-toggle:empty::after { + margin-left: 0; +} + +.dropright .dropdown-menu { + margin-top: 0; + margin-left: 0.125rem; +} +.dropright .dropdown-toggle::after { + display: inline-block; + width: 0; + height: 0; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0.3em solid transparent; + border-bottom: 0.3em solid transparent; + border-left: 0.3em solid; +} +.dropright .dropdown-toggle:empty::after { + margin-left: 0; +} +.dropright .dropdown-toggle::after { + vertical-align: 0; +} + +.dropleft .dropdown-menu { + margin-top: 0; + margin-right: 0.125rem; +} +.dropleft .dropdown-toggle::after { + display: inline-block; + width: 0; + height: 0; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; +} +.dropleft .dropdown-toggle::after { + display: none; +} +.dropleft .dropdown-toggle::before { + display: inline-block; + width: 0; + height: 0; + margin-right: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0.3em solid transparent; + border-right: 0.3em solid; + border-bottom: 0.3em solid transparent; +} +.dropleft .dropdown-toggle:empty::after { + margin-left: 0; +} +.dropleft .dropdown-toggle::before { + vertical-align: 0; +} + +.dropdown-divider { + height: 0; + margin: 0.5rem 0; + overflow: hidden; + border-top: 1px solid #e9ecef; +} + +.dropdown-item { + display: block; + width: 100%; + padding: 0.25rem 1.5rem; + clear: both; + font-weight: 400; + color: #212529; + text-align: inherit; + white-space: nowrap; + background-color: transparent; + border: 0; +} +.dropdown-item:hover, .dropdown-item:focus { + color: #16181b; + text-decoration: none; + background-color: #f8f9fa; +} +.dropdown-item.active, .dropdown-item:active { + color: #fff; + text-decoration: none; + background-color: #007bff; +} +.dropdown-item.disabled, .dropdown-item:disabled { + color: #6c757d; + background-color: transparent; +} + +.dropdown-menu.show { + display: block; +} + +.dropdown-header { + display: block; + padding: 0.5rem 1.5rem; + margin-bottom: 0; + font-size: 0.875rem; + color: #6c757d; + white-space: nowrap; +} + +.btn-group, +.btn-group-vertical { + position: relative; + display: -webkit-inline-box; + display: -ms-inline-flexbox; + display: inline-flex; + vertical-align: middle; +} +.btn-group > .btn, +.btn-group-vertical > .btn { + position: relative; + -webkit-box-flex: 0; + -ms-flex: 0 1 auto; + flex: 0 1 auto; +} +.btn-group > .btn:hover, +.btn-group-vertical > .btn:hover { + z-index: 1; +} +.btn-group > .btn:focus, .btn-group > .btn:active, .btn-group > .btn.active, +.btn-group-vertical > .btn:focus, +.btn-group-vertical > .btn:active, +.btn-group-vertical > .btn.active { + z-index: 1; +} +.btn-group .btn + .btn, +.btn-group .btn + .btn-group, +.btn-group .btn-group + .btn, +.btn-group .btn-group + .btn-group, +.btn-group-vertical .btn + .btn, +.btn-group-vertical .btn + .btn-group, +.btn-group-vertical .btn-group + .btn, +.btn-group-vertical .btn-group + .btn-group { + margin-left: -1px; +} + +.btn-toolbar { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; +} +.btn-toolbar .input-group { + width: auto; +} + +.btn-group > .btn:first-child { + margin-left: 0; +} +.btn-group > .btn:not(:last-child):not(.dropdown-toggle), +.btn-group > .btn-group:not(:last-child) > .btn { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} +.btn-group > .btn:not(:first-child), +.btn-group > .btn-group:not(:first-child) > .btn { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} + +.dropdown-toggle-split { + padding-right: 0.5625rem; + padding-left: 0.5625rem; +} +.dropdown-toggle-split::after { + margin-left: 0; +} + +.btn-sm + .dropdown-toggle-split, .btn-group-sm > .btn + .dropdown-toggle-split { + padding-right: 0.375rem; + padding-left: 0.375rem; +} + +.btn-lg + .dropdown-toggle-split, .btn-group-lg > .btn + .dropdown-toggle-split { + padding-right: 0.75rem; + padding-left: 0.75rem; +} + +.btn-group-vertical { + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + -webkit-box-align: start; + -ms-flex-align: start; + align-items: flex-start; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; +} +.btn-group-vertical .btn, +.btn-group-vertical .btn-group { + width: 100%; +} +.btn-group-vertical > .btn + .btn, +.btn-group-vertical > .btn + .btn-group, +.btn-group-vertical > .btn-group + .btn, +.btn-group-vertical > .btn-group + .btn-group { + margin-top: -1px; + margin-left: 0; +} +.btn-group-vertical > .btn:not(:last-child):not(.dropdown-toggle), +.btn-group-vertical > .btn-group:not(:last-child) > .btn { + border-bottom-right-radius: 0; + border-bottom-left-radius: 0; +} +.btn-group-vertical > .btn:not(:first-child), +.btn-group-vertical > .btn-group:not(:first-child) > .btn { + border-top-left-radius: 0; + border-top-right-radius: 0; +} + +.btn-group-toggle > .btn, +.btn-group-toggle > .btn-group > .btn { + margin-bottom: 0; +} +.btn-group-toggle > .btn input[type="radio"], +.btn-group-toggle > .btn input[type="checkbox"], +.btn-group-toggle > .btn-group > .btn input[type="radio"], +.btn-group-toggle > .btn-group > .btn input[type="checkbox"] { + position: absolute; + clip: rect(0, 0, 0, 0); + pointer-events: none; +} + +.input-group { + position: relative; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + -webkit-box-align: stretch; + -ms-flex-align: stretch; + align-items: stretch; + width: 100%; +} +.input-group > .form-control, +.input-group > .custom-select, +.input-group > .custom-file { + position: relative; + -webkit-box-flex: 1; + -ms-flex: 1 1 auto; + flex: 1 1 auto; + width: 1%; + margin-bottom: 0; +} +.input-group > .form-control:focus, +.input-group > .custom-select:focus, +.input-group > .custom-file:focus { + z-index: 3; +} +.input-group > .form-control + .form-control, +.input-group > .form-control + .custom-select, +.input-group > .form-control + .custom-file, +.input-group > .custom-select + .form-control, +.input-group > .custom-select + .custom-select, +.input-group > .custom-select + .custom-file, +.input-group > .custom-file + .form-control, +.input-group > .custom-file + .custom-select, +.input-group > .custom-file + .custom-file { + margin-left: -1px; +} +.input-group > .form-control:not(:last-child), +.input-group > .custom-select:not(:last-child) { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} +.input-group > .form-control:not(:first-child), +.input-group > .custom-select:not(:first-child) { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} +.input-group > .custom-file { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; +} +.input-group > .custom-file:not(:last-child) .custom-file-label, .input-group > .custom-file:not(:last-child) .custom-file-label::before { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} +.input-group > .custom-file:not(:first-child) .custom-file-label, .input-group > .custom-file:not(:first-child) .custom-file-label::before { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} + +.input-group-prepend, +.input-group-append { + display: -webkit-box; + display: -ms-flexbox; + display: flex; +} +.input-group-prepend .btn, +.input-group-append .btn { + position: relative; + z-index: 2; +} +.input-group-prepend .btn + .btn, +.input-group-prepend .btn + .input-group-text, +.input-group-prepend .input-group-text + .input-group-text, +.input-group-prepend .input-group-text + .btn, +.input-group-append .btn + .btn, +.input-group-append .btn + .input-group-text, +.input-group-append .input-group-text + .input-group-text, +.input-group-append .input-group-text + .btn { + margin-left: -1px; +} + +.input-group-prepend { + margin-right: -1px; +} + +.input-group-append { + margin-left: -1px; +} + +.input-group-text { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + padding: 0.375rem 0.75rem; + margin-bottom: 0; + font-size: 1rem; + font-weight: 400; + line-height: 1.5; + color: #495057; + text-align: center; + white-space: nowrap; + background-color: #e9ecef; + border: 1px solid #ced4da; + border-radius: 0.25rem; +} +.input-group-text input[type="radio"], +.input-group-text input[type="checkbox"] { + margin-top: 0; +} + +.input-group > .input-group-prepend > .btn, +.input-group > .input-group-prepend > .input-group-text, +.input-group > .input-group-append:not(:last-child) > .btn, +.input-group > .input-group-append:not(:last-child) > .input-group-text, +.input-group > .input-group-append:last-child > .btn:not(:last-child):not(.dropdown-toggle), +.input-group > .input-group-append:last-child > .input-group-text:not(:last-child) { + border-top-right-radius: 0; + border-bottom-right-radius: 0; +} + +.input-group > .input-group-append > .btn, +.input-group > .input-group-append > .input-group-text, +.input-group > .input-group-prepend:not(:first-child) > .btn, +.input-group > .input-group-prepend:not(:first-child) > .input-group-text, +.input-group > .input-group-prepend:first-child > .btn:not(:first-child), +.input-group > .input-group-prepend:first-child > .input-group-text:not(:first-child) { + border-top-left-radius: 0; + border-bottom-left-radius: 0; +} + +.custom-control { + position: relative; + display: block; + min-height: 1.5rem; + padding-left: 1.5rem; +} + +.custom-control-inline { + display: -webkit-inline-box; + display: -ms-inline-flexbox; + display: inline-flex; + margin-right: 1rem; +} + +.custom-control-input { + position: absolute; + z-index: -1; + opacity: 0; +} +.custom-control-input:checked ~ .custom-control-label::before { + color: #fff; + background-color: #007bff; +} +.custom-control-input:focus ~ .custom-control-label::before { + -webkit-box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(0, 123, 255, 0.25); + box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} +.custom-control-input:active ~ .custom-control-label::before { + color: #fff; + background-color: #b3d7ff; +} +.custom-control-input:disabled ~ .custom-control-label { + color: #6c757d; +} +.custom-control-input:disabled ~ .custom-control-label::before { + background-color: #e9ecef; +} + +.custom-control-label { + margin-bottom: 0; +} +.custom-control-label::before { + position: absolute; + top: 0.25rem; + left: 0; + display: block; + width: 1rem; + height: 1rem; + pointer-events: none; + content: ""; + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + background-color: #dee2e6; +} +.custom-control-label::after { + position: absolute; + top: 0.25rem; + left: 0; + display: block; + width: 1rem; + height: 1rem; + content: ""; + background-repeat: no-repeat; + background-position: center center; + background-size: 50% 50%; +} + +.custom-checkbox .custom-control-label::before { + border-radius: 0.25rem; +} +.custom-checkbox .custom-control-input:checked ~ .custom-control-label::before { + background-color: #007bff; +} +.custom-checkbox .custom-control-input:checked ~ .custom-control-label::after { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='%23fff' d='M6.564.75l-3.59 3.612-1.538-1.55L0 4.26 2.974 7.25 8 2.193z'/%3E%3C/svg%3E"); +} +.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::before { + background-color: #007bff; +} +.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::after { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 4'%3E%3Cpath stroke='%23fff' d='M0 2h4'/%3E%3C/svg%3E"); +} +.custom-checkbox .custom-control-input:disabled:checked ~ .custom-control-label::before { + background-color: rgba(0, 123, 255, 0.5); +} +.custom-checkbox .custom-control-input:disabled:indeterminate ~ .custom-control-label::before { + background-color: rgba(0, 123, 255, 0.5); +} + +.custom-radio .custom-control-label::before { + border-radius: 50%; +} +.custom-radio .custom-control-input:checked ~ .custom-control-label::before { + background-color: #007bff; +} +.custom-radio .custom-control-input:checked ~ .custom-control-label::after { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='%23fff'/%3E%3C/svg%3E"); +} +.custom-radio .custom-control-input:disabled:checked ~ .custom-control-label::before { + background-color: rgba(0, 123, 255, 0.5); +} + +.custom-select { + display: inline-block; + width: 100%; + height: calc(2.25rem + 2px); + padding: 0.375rem 1.75rem 0.375rem 0.75rem; + line-height: 1.5; + color: #495057; + vertical-align: middle; + background: #fff url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 5'%3E%3Cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right 0.75rem center; + background-size: 8px 10px; + border: 1px solid #ced4da; + border-radius: 0.25rem; + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; +} +.custom-select:focus { + border-color: #80bdff; + outline: 0; + -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.075), 0 0 5px rgba(128, 189, 255, 0.5); + box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.075), 0 0 5px rgba(128, 189, 255, 0.5); +} +.custom-select:focus::-ms-value { + color: #495057; + background-color: #fff; +} +.custom-select[multiple], .custom-select[size]:not([size="1"]) { + height: auto; + padding-right: 0.75rem; + background-image: none; +} +.custom-select:disabled { + color: #6c757d; + background-color: #e9ecef; +} +.custom-select::-ms-expand { + opacity: 0; +} + +.custom-select-sm { + height: calc(1.8125rem + 2px); + padding-top: 0.375rem; + padding-bottom: 0.375rem; + font-size: 75%; +} + +.custom-select-lg { + height: calc(2.875rem + 2px); + padding-top: 0.375rem; + padding-bottom: 0.375rem; + font-size: 125%; +} + +.custom-file { + position: relative; + display: inline-block; + width: 100%; + height: calc(2.25rem + 2px); + margin-bottom: 0; +} + +.custom-file-input { + position: relative; + z-index: 2; + width: 100%; + height: calc(2.25rem + 2px); + margin: 0; + opacity: 0; +} +.custom-file-input:focus ~ .custom-file-control { + border-color: #80bdff; + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} +.custom-file-input:focus ~ .custom-file-control::before { + border-color: #80bdff; +} +.custom-file-input:lang(en) ~ .custom-file-label::after { + content: "Browse"; +} + +.custom-file-label { + position: absolute; + top: 0; + right: 0; + left: 0; + z-index: 1; + height: calc(2.25rem + 2px); + padding: 0.375rem 0.75rem; + line-height: 1.5; + color: #495057; + background-color: #fff; + border: 1px solid #ced4da; + border-radius: 0.25rem; +} +.custom-file-label::after { + position: absolute; + top: 0; + right: 0; + bottom: 0; + z-index: 3; + display: block; + height: calc(calc(2.25rem + 2px) - 1px * 2); + padding: 0.375rem 0.75rem; + line-height: 1.5; + color: #495057; + content: "Browse"; + background-color: #e9ecef; + border-left: 1px solid #ced4da; + border-radius: 0 0.25rem 0.25rem 0; +} + +.nav { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + padding-left: 0; + margin-bottom: 0; + list-style: none; +} + +.nav-link { + display: block; + padding: 0.5rem 1rem; +} +.nav-link:hover, .nav-link:focus { + text-decoration: none; +} +.nav-link.disabled { + color: #6c757d; +} + +.nav-tabs { + border-bottom: 1px solid #dee2e6; +} +.nav-tabs .nav-item { + margin-bottom: -1px; +} +.nav-tabs .nav-link { + border: 1px solid transparent; + border-top-left-radius: 0.25rem; + border-top-right-radius: 0.25rem; +} +.nav-tabs .nav-link:hover, .nav-tabs .nav-link:focus { + border-color: #e9ecef #e9ecef #dee2e6; +} +.nav-tabs .nav-link.disabled { + color: #6c757d; + background-color: transparent; + border-color: transparent; +} +.nav-tabs .nav-link.active, +.nav-tabs .nav-item.show .nav-link { + color: #495057; + background-color: #fff; + border-color: #dee2e6 #dee2e6 #fff; +} +.nav-tabs .dropdown-menu { + margin-top: -1px; + border-top-left-radius: 0; + border-top-right-radius: 0; +} + +.nav-pills .nav-link { + border-radius: 0.25rem; +} +.nav-pills .nav-link.active, +.nav-pills .show > .nav-link { + color: #fff; + background-color: #007bff; +} + +.nav-fill .nav-item { + -webkit-box-flex: 1; + -ms-flex: 1 1 auto; + flex: 1 1 auto; + text-align: center; +} + +.nav-justified .nav-item { + -ms-flex-preferred-size: 0; + flex-basis: 0; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + text-align: center; +} + +.tab-content > .tab-pane { + display: none; +} +.tab-content > .active { + display: block; +} + +.navbar { + position: relative; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: justify; + -ms-flex-pack: justify; + justify-content: space-between; + padding: 0.5rem 1rem; +} +.navbar > .container, +.navbar > .container-fluid { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: justify; + -ms-flex-pack: justify; + justify-content: space-between; +} + +.navbar-brand { + display: inline-block; + padding-top: 0.3125rem; + padding-bottom: 0.3125rem; + margin-right: 1rem; + font-size: 1.25rem; + line-height: inherit; + white-space: nowrap; +} +.navbar-brand:hover, .navbar-brand:focus { + text-decoration: none; +} + +.navbar-nav { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + padding-left: 0; + margin-bottom: 0; + list-style: none; +} +.navbar-nav .nav-link { + padding-right: 0; + padding-left: 0; +} +.navbar-nav .dropdown-menu { + position: static; + float: none; +} + +.navbar-text { + display: inline-block; + padding-top: 0.5rem; + padding-bottom: 0.5rem; +} + +.navbar-collapse { + -ms-flex-preferred-size: 100%; + flex-basis: 100%; + -webkit-box-flex: 1; + -ms-flex-positive: 1; + flex-grow: 1; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; +} + +.navbar-toggler { + padding: 0.25rem 0.75rem; + font-size: 1.25rem; + line-height: 1; + background-color: transparent; + border: 1px solid transparent; + border-radius: 0.25rem; +} +.navbar-toggler:hover, .navbar-toggler:focus { + text-decoration: none; +} +.navbar-toggler:not(:disabled):not(.disabled) { + cursor: pointer; +} + +.navbar-toggler-icon { + display: inline-block; + width: 1.5em; + height: 1.5em; + vertical-align: middle; + content: ""; + background: no-repeat center center; + background-size: 100% 100%; +} + +@media (max-width: 575.98px) { + .navbar-expand-sm > .container, + .navbar-expand-sm > .container-fluid { + padding-right: 0; + padding-left: 0; + } +} +@media (min-width: 576px) { + .navbar-expand-sm { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; + } + .navbar-expand-sm .navbar-nav { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-direction: row; + flex-direction: row; + } + .navbar-expand-sm .navbar-nav .dropdown-menu { + position: absolute; + } + .navbar-expand-sm .navbar-nav .dropdown-menu-right { + right: 0; + left: auto; + } + .navbar-expand-sm .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; + } + .navbar-expand-sm > .container, + .navbar-expand-sm > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; + } + .navbar-expand-sm .navbar-collapse { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; + } + .navbar-expand-sm .navbar-toggler { + display: none; + } + .navbar-expand-sm .dropup .dropdown-menu { + top: auto; + bottom: 100%; + } +} +@media (max-width: 767.98px) { + .navbar-expand-md > .container, + .navbar-expand-md > .container-fluid { + padding-right: 0; + padding-left: 0; + } +} +@media (min-width: 768px) { + .navbar-expand-md { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; + } + .navbar-expand-md .navbar-nav { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-direction: row; + flex-direction: row; + } + .navbar-expand-md .navbar-nav .dropdown-menu { + position: absolute; + } + .navbar-expand-md .navbar-nav .dropdown-menu-right { + right: 0; + left: auto; + } + .navbar-expand-md .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; + } + .navbar-expand-md > .container, + .navbar-expand-md > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; + } + .navbar-expand-md .navbar-collapse { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; + } + .navbar-expand-md .navbar-toggler { + display: none; + } + .navbar-expand-md .dropup .dropdown-menu { + top: auto; + bottom: 100%; + } +} +@media (max-width: 991.98px) { + .navbar-expand-lg > .container, + .navbar-expand-lg > .container-fluid { + padding-right: 0; + padding-left: 0; + } +} +@media (min-width: 992px) { + .navbar-expand-lg { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; + } + .navbar-expand-lg .navbar-nav { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-direction: row; + flex-direction: row; + } + .navbar-expand-lg .navbar-nav .dropdown-menu { + position: absolute; + } + .navbar-expand-lg .navbar-nav .dropdown-menu-right { + right: 0; + left: auto; + } + .navbar-expand-lg .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; + } + .navbar-expand-lg > .container, + .navbar-expand-lg > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; + } + .navbar-expand-lg .navbar-collapse { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; + } + .navbar-expand-lg .navbar-toggler { + display: none; + } + .navbar-expand-lg .dropup .dropdown-menu { + top: auto; + bottom: 100%; + } +} +@media (max-width: 1199.98px) { + .navbar-expand-xl > .container, + .navbar-expand-xl > .container-fluid { + padding-right: 0; + padding-left: 0; + } +} +@media (min-width: 1200px) { + .navbar-expand-xl { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; + } + .navbar-expand-xl .navbar-nav { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-direction: row; + flex-direction: row; + } + .navbar-expand-xl .navbar-nav .dropdown-menu { + position: absolute; + } + .navbar-expand-xl .navbar-nav .dropdown-menu-right { + right: 0; + left: auto; + } + .navbar-expand-xl .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; + } + .navbar-expand-xl > .container, + .navbar-expand-xl > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; + } + .navbar-expand-xl .navbar-collapse { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; + } + .navbar-expand-xl .navbar-toggler { + display: none; + } + .navbar-expand-xl .dropup .dropdown-menu { + top: auto; + bottom: 100%; + } +} +.navbar-expand { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row nowrap; + flex-flow: row nowrap; + -webkit-box-pack: start; + -ms-flex-pack: start; + justify-content: flex-start; +} +.navbar-expand > .container, +.navbar-expand > .container-fluid { + padding-right: 0; + padding-left: 0; +} +.navbar-expand .navbar-nav { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-direction: row; + flex-direction: row; +} +.navbar-expand .navbar-nav .dropdown-menu { + position: absolute; +} +.navbar-expand .navbar-nav .dropdown-menu-right { + right: 0; + left: auto; +} +.navbar-expand .navbar-nav .nav-link { + padding-right: 0.5rem; + padding-left: 0.5rem; +} +.navbar-expand > .container, +.navbar-expand > .container-fluid { + -ms-flex-wrap: nowrap; + flex-wrap: nowrap; +} +.navbar-expand .navbar-collapse { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + -ms-flex-preferred-size: auto; + flex-basis: auto; +} +.navbar-expand .navbar-toggler { + display: none; +} +.navbar-expand .dropup .dropdown-menu { + top: auto; + bottom: 100%; +} + +.navbar-light .navbar-brand { + color: rgba(0, 0, 0, 0.9); +} +.navbar-light .navbar-brand:hover, .navbar-light .navbar-brand:focus { + color: rgba(0, 0, 0, 0.9); +} +.navbar-light .navbar-nav .nav-link { + color: rgba(0, 0, 0, 0.5); +} +.navbar-light .navbar-nav .nav-link:hover, .navbar-light .navbar-nav .nav-link:focus { + color: rgba(0, 0, 0, 0.7); +} +.navbar-light .navbar-nav .nav-link.disabled { + color: rgba(0, 0, 0, 0.3); +} +.navbar-light .navbar-nav .show > .nav-link, +.navbar-light .navbar-nav .active > .nav-link, +.navbar-light .navbar-nav .nav-link.show, +.navbar-light .navbar-nav .nav-link.active { + color: rgba(0, 0, 0, 0.9); +} +.navbar-light .navbar-toggler { + color: rgba(0, 0, 0, 0.5); + border-color: rgba(0, 0, 0, 0.1); +} +.navbar-light .navbar-toggler-icon { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(0, 0, 0, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E"); +} +.navbar-light .navbar-text { + color: rgba(0, 0, 0, 0.5); +} +.navbar-light .navbar-text a { + color: rgba(0, 0, 0, 0.9); +} +.navbar-light .navbar-text a:hover, .navbar-light .navbar-text a:focus { + color: rgba(0, 0, 0, 0.9); +} + +.navbar-dark .navbar-brand { + color: #fff; +} +.navbar-dark .navbar-brand:hover, .navbar-dark .navbar-brand:focus { + color: #fff; +} +.navbar-dark .navbar-nav .nav-link { + color: rgba(255, 255, 255, 0.5); +} +.navbar-dark .navbar-nav .nav-link:hover, .navbar-dark .navbar-nav .nav-link:focus { + color: rgba(255, 255, 255, 0.75); +} +.navbar-dark .navbar-nav .nav-link.disabled { + color: rgba(255, 255, 255, 0.25); +} +.navbar-dark .navbar-nav .show > .nav-link, +.navbar-dark .navbar-nav .active > .nav-link, +.navbar-dark .navbar-nav .nav-link.show, +.navbar-dark .navbar-nav .nav-link.active { + color: #fff; +} +.navbar-dark .navbar-toggler { + color: rgba(255, 255, 255, 0.5); + border-color: rgba(255, 255, 255, 0.1); +} +.navbar-dark .navbar-toggler-icon { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(255, 255, 255, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E"); +} +.navbar-dark .navbar-text { + color: rgba(255, 255, 255, 0.5); +} +.navbar-dark .navbar-text a { + color: #fff; +} +.navbar-dark .navbar-text a:hover, .navbar-dark .navbar-text a:focus { + color: #fff; +} + +.card { + position: relative; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + min-width: 0; + word-wrap: break-word; + background-color: #fff; + background-clip: border-box; + border: 1px solid rgba(0, 0, 0, 0.125); + border-radius: 0.25rem; +} +.card > hr { + margin-right: 0; + margin-left: 0; +} +.card > .list-group:first-child .list-group-item:first-child { + border-top-left-radius: 0.25rem; + border-top-right-radius: 0.25rem; +} +.card > .list-group:last-child .list-group-item:last-child { + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; +} + +.card-body { + -webkit-box-flex: 1; + -ms-flex: 1 1 auto; + flex: 1 1 auto; + padding: 1.25rem; +} + +.card-title { + margin-bottom: 0.75rem; +} + +.card-subtitle { + margin-top: -0.375rem; + margin-bottom: 0; +} + +.card-text:last-child { + margin-bottom: 0; +} + +.card-link:hover { + text-decoration: none; +} +.card-link + .card-link { + margin-left: 1.25rem; +} + +.card-header { + padding: 0.75rem 1.25rem; + margin-bottom: 0; + background-color: rgba(0, 0, 0, 0.03); + border-bottom: 1px solid rgba(0, 0, 0, 0.125); +} +.card-header:first-child { + border-radius: calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0; +} +.card-header + .list-group .list-group-item:first-child { + border-top: 0; +} + +.card-footer { + padding: 0.75rem 1.25rem; + background-color: rgba(0, 0, 0, 0.03); + border-top: 1px solid rgba(0, 0, 0, 0.125); +} +.card-footer:last-child { + border-radius: 0 0 calc(0.25rem - 1px) calc(0.25rem - 1px); +} + +.card-header-tabs { + margin-right: -0.625rem; + margin-bottom: -0.75rem; + margin-left: -0.625rem; + border-bottom: 0; +} + +.card-header-pills { + margin-right: -0.625rem; + margin-left: -0.625rem; +} + +.card-img-overlay { + position: absolute; + top: 0; + right: 0; + bottom: 0; + left: 0; + padding: 1.25rem; +} + +.card-img { + width: 100%; + border-radius: calc(0.25rem - 1px); +} + +.card-img-top { + width: 100%; + border-top-left-radius: calc(0.25rem - 1px); + border-top-right-radius: calc(0.25rem - 1px); +} + +.card-img-bottom { + width: 100%; + border-bottom-right-radius: calc(0.25rem - 1px); + border-bottom-left-radius: calc(0.25rem - 1px); +} + +.card-deck { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; +} +.card-deck .card { + margin-bottom: 15px; +} +@media (min-width: 576px) { + .card-deck { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + margin-right: -15px; + margin-left: -15px; + } + .card-deck .card { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-flex: 1; + -ms-flex: 1 0 0%; + flex: 1 0 0%; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + margin-right: 15px; + margin-bottom: 0; + margin-left: 15px; + } +} + +.card-group { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; +} +.card-group > .card { + margin-bottom: 15px; +} +@media (min-width: 576px) { + .card-group { + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + } + .card-group > .card { + -webkit-box-flex: 1; + -ms-flex: 1 0 0%; + flex: 1 0 0%; + margin-bottom: 0; + } + .card-group > .card + .card { + margin-left: 0; + border-left: 0; + } + .card-group > .card:first-child { + border-top-right-radius: 0; + border-bottom-right-radius: 0; + } + .card-group > .card:first-child .card-img-top, + .card-group > .card:first-child .card-header { + border-top-right-radius: 0; + } + .card-group > .card:first-child .card-img-bottom, + .card-group > .card:first-child .card-footer { + border-bottom-right-radius: 0; + } + .card-group > .card:last-child { + border-top-left-radius: 0; + border-bottom-left-radius: 0; + } + .card-group > .card:last-child .card-img-top, + .card-group > .card:last-child .card-header { + border-top-left-radius: 0; + } + .card-group > .card:last-child .card-img-bottom, + .card-group > .card:last-child .card-footer { + border-bottom-left-radius: 0; + } + .card-group > .card:only-child { + border-radius: 0.25rem; + } + .card-group > .card:only-child .card-img-top, + .card-group > .card:only-child .card-header { + border-top-left-radius: 0.25rem; + border-top-right-radius: 0.25rem; + } + .card-group > .card:only-child .card-img-bottom, + .card-group > .card:only-child .card-footer { + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; + } + .card-group > .card:not(:first-child):not(:last-child):not(:only-child) { + border-radius: 0; + } + .card-group > .card:not(:first-child):not(:last-child):not(:only-child) .card-img-top, + .card-group > .card:not(:first-child):not(:last-child):not(:only-child) .card-img-bottom, + .card-group > .card:not(:first-child):not(:last-child):not(:only-child) .card-header, + .card-group > .card:not(:first-child):not(:last-child):not(:only-child) .card-footer { + border-radius: 0; + } +} + +.card-columns .card { + margin-bottom: 0.75rem; +} +@media (min-width: 576px) { + .card-columns { + -webkit-column-count: 3; + -moz-column-count: 3; + column-count: 3; + -webkit-column-gap: 1.25rem; + -moz-column-gap: 1.25rem; + column-gap: 1.25rem; + } + .card-columns .card { + display: inline-block; + width: 100%; + } +} + +.breadcrumb { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; + padding: 0.75rem 1rem; + margin-bottom: 1rem; + list-style: none; + background-color: #e9ecef; + border-radius: 0.25rem; +} + +.breadcrumb-item + .breadcrumb-item::before { + display: inline-block; + padding-right: 0.5rem; + padding-left: 0.5rem; + color: #6c757d; + content: "/"; +} +.breadcrumb-item + .breadcrumb-item:hover::before { + text-decoration: underline; +} +.breadcrumb-item + .breadcrumb-item:hover::before { + text-decoration: none; +} +.breadcrumb-item.active { + color: #6c757d; +} + +.pagination { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + padding-left: 0; + list-style: none; + border-radius: 0.25rem; +} + +.page-link { + position: relative; + display: block; + padding: 0.5rem 0.75rem; + margin-left: -1px; + line-height: 1.25; + color: #007bff; + background-color: #fff; + border: 1px solid #dee2e6; +} +.page-link:hover { + color: #0056b3; + text-decoration: none; + background-color: #e9ecef; + border-color: #dee2e6; +} +.page-link:focus { + z-index: 2; + outline: 0; + -webkit-box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} +.page-link:not(:disabled):not(.disabled) { + cursor: pointer; +} + +.page-item:first-child .page-link { + margin-left: 0; + border-top-left-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; +} +.page-item:last-child .page-link { + border-top-right-radius: 0.25rem; + border-bottom-right-radius: 0.25rem; +} +.page-item.active .page-link { + z-index: 1; + color: #fff; + background-color: #007bff; + border-color: #007bff; +} +.page-item.disabled .page-link { + color: #6c757d; + pointer-events: none; + cursor: auto; + background-color: #fff; + border-color: #dee2e6; +} + +.pagination-lg .page-link { + padding: 0.75rem 1.5rem; + font-size: 1.25rem; + line-height: 1.5; +} +.pagination-lg .page-item:first-child .page-link { + border-top-left-radius: 0.3rem; + border-bottom-left-radius: 0.3rem; +} +.pagination-lg .page-item:last-child .page-link { + border-top-right-radius: 0.3rem; + border-bottom-right-radius: 0.3rem; +} + +.pagination-sm .page-link { + padding: 0.25rem 0.5rem; + font-size: 0.875rem; + line-height: 1.5; +} +.pagination-sm .page-item:first-child .page-link { + border-top-left-radius: 0.2rem; + border-bottom-left-radius: 0.2rem; +} +.pagination-sm .page-item:last-child .page-link { + border-top-right-radius: 0.2rem; + border-bottom-right-radius: 0.2rem; +} + +.badge { + display: inline-block; + padding: 0.25em 0.4em; + font-size: 75%; + font-weight: 700; + line-height: 1; + text-align: center; + white-space: nowrap; + vertical-align: baseline; + border-radius: 0.25rem; +} +.badge:empty { + display: none; +} + +.btn .badge { + position: relative; + top: -1px; +} + +.badge-pill { + padding-right: 0.6em; + padding-left: 0.6em; + border-radius: 10rem; +} + +.badge-primary { + color: #fff; + background-color: #007bff; +} +.badge-primary[href]:hover, .badge-primary[href]:focus { + color: #fff; + text-decoration: none; + background-color: #0062cc; +} + +.badge-secondary { + color: #fff; + background-color: #6c757d; +} +.badge-secondary[href]:hover, .badge-secondary[href]:focus { + color: #fff; + text-decoration: none; + background-color: #545b62; +} + +.badge-success { + color: #fff; + background-color: #28a745; +} +.badge-success[href]:hover, .badge-success[href]:focus { + color: #fff; + text-decoration: none; + background-color: #1e7e34; +} + +.badge-info { + color: #fff; + background-color: #17a2b8; +} +.badge-info[href]:hover, .badge-info[href]:focus { + color: #fff; + text-decoration: none; + background-color: #117a8b; +} + +.badge-warning { + color: #212529; + background-color: #ffc107; +} +.badge-warning[href]:hover, .badge-warning[href]:focus { + color: #212529; + text-decoration: none; + background-color: #d39e00; +} + +.badge-danger { + color: #fff; + background-color: #dc3545; +} +.badge-danger[href]:hover, .badge-danger[href]:focus { + color: #fff; + text-decoration: none; + background-color: #bd2130; +} + +.badge-light { + color: #212529; + background-color: #f8f9fa; +} +.badge-light[href]:hover, .badge-light[href]:focus { + color: #212529; + text-decoration: none; + background-color: #dae0e5; +} + +.badge-dark { + color: #fff; + background-color: #343a40; +} +.badge-dark[href]:hover, .badge-dark[href]:focus { + color: #fff; + text-decoration: none; + background-color: #1d2124; +} + +.jumbotron { + padding: 2rem 1rem; + margin-bottom: 2rem; + background-color: #e9ecef; + border-radius: 0.3rem; +} +@media (min-width: 576px) { + .jumbotron { + padding: 4rem 2rem; + } +} + +.jumbotron-fluid { + padding-right: 0; + padding-left: 0; + border-radius: 0; +} + +.alert { + position: relative; + padding: 0.75rem 1.25rem; + margin-bottom: 1rem; + border: 1px solid transparent; + border-radius: 0.25rem; +} + +.alert-heading { + color: inherit; +} + +.alert-link { + font-weight: 700; +} + +.alert-dismissible { + padding-right: 4rem; +} +.alert-dismissible .close { + position: absolute; + top: 0; + right: 0; + padding: 0.75rem 1.25rem; + color: inherit; +} + +.alert-primary { + color: #004085; + background-color: #cce5ff; + border-color: #b8daff; +} +.alert-primary hr { + border-top-color: #9fcdff; +} +.alert-primary .alert-link { + color: #002752; +} + +.alert-secondary { + color: #383d41; + background-color: #e2e3e5; + border-color: #d6d8db; +} +.alert-secondary hr { + border-top-color: #c8cbcf; +} +.alert-secondary .alert-link { + color: #202326; +} + +.alert-success { + color: #155724; + background-color: #d4edda; + border-color: #c3e6cb; +} +.alert-success hr { + border-top-color: #b1dfbb; +} +.alert-success .alert-link { + color: #0b2e13; +} + +.alert-info { + color: #0c5460; + background-color: #d1ecf1; + border-color: #bee5eb; +} +.alert-info hr { + border-top-color: #abdde5; +} +.alert-info .alert-link { + color: #062c33; +} + +.alert-warning { + color: #856404; + background-color: #fff3cd; + border-color: #ffeeba; +} +.alert-warning hr { + border-top-color: #ffe8a1; +} +.alert-warning .alert-link { + color: #533f03; +} + +.alert-danger { + color: #721c24; + background-color: #f8d7da; + border-color: #f5c6cb; +} +.alert-danger hr { + border-top-color: #f1b0b7; +} +.alert-danger .alert-link { + color: #491217; +} + +.alert-light { + color: #818182; + background-color: #fefefe; + border-color: #fdfdfe; +} +.alert-light hr { + border-top-color: #ececf6; +} +.alert-light .alert-link { + color: #686868; +} + +.alert-dark { + color: #1b1e21; + background-color: #d6d8d9; + border-color: #c6c8ca; +} +.alert-dark hr { + border-top-color: #b9bbbe; +} +.alert-dark .alert-link { + color: #040505; +} + +@-webkit-keyframes progress-bar-stripes { + from { + background-position: 1rem 0; + } + to { + background-position: 0 0; + } +} + +@keyframes progress-bar-stripes { + from { + background-position: 1rem 0; + } + to { + background-position: 0 0; + } +} +.progress { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + height: 1rem; + overflow: hidden; + font-size: 0.75rem; + background-color: #e9ecef; + border-radius: 0.25rem; +} + +.progress-bar { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + color: #fff; + text-align: center; + background-color: #007bff; + -webkit-transition: width 0.6s ease; + transition: width 0.6s ease; +} + +.progress-bar-striped { + background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-size: 1rem 1rem; +} + +.progress-bar-animated { + -webkit-animation: progress-bar-stripes 1s linear infinite; + animation: progress-bar-stripes 1s linear infinite; +} + +.media { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: start; + -ms-flex-align: start; + align-items: flex-start; +} + +.media-body { + -webkit-box-flex: 1; + -ms-flex: 1; + flex: 1; +} + +.list-group { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + padding-left: 0; + margin-bottom: 0; +} + +.list-group-item-action { + width: 100%; + color: #495057; + text-align: inherit; +} +.list-group-item-action:hover, .list-group-item-action:focus { + color: #495057; + text-decoration: none; + background-color: #f8f9fa; +} +.list-group-item-action:active { + color: #212529; + background-color: #e9ecef; +} + +.list-group-item { + position: relative; + display: block; + padding: 0.75rem 1.25rem; + margin-bottom: -1px; + background-color: #fff; + border: 1px solid rgba(0, 0, 0, 0.125); +} +.list-group-item:first-child { + border-top-left-radius: 0.25rem; + border-top-right-radius: 0.25rem; +} +.list-group-item:last-child { + margin-bottom: 0; + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; +} +.list-group-item:hover, .list-group-item:focus { + z-index: 1; + text-decoration: none; +} +.list-group-item.disabled, .list-group-item:disabled { + color: #6c757d; + background-color: #fff; +} +.list-group-item.active { + z-index: 2; + color: #fff; + background-color: #007bff; + border-color: #007bff; +} + +.list-group-flush .list-group-item { + border-right: 0; + border-left: 0; + border-radius: 0; +} +.list-group-flush:first-child .list-group-item:first-child { + border-top: 0; +} +.list-group-flush:last-child .list-group-item:last-child { + border-bottom: 0; +} + +.list-group-item-primary { + color: #004085; + background-color: #b8daff; +} +.list-group-item-primary.list-group-item-action:hover, .list-group-item-primary.list-group-item-action:focus { + color: #004085; + background-color: #9fcdff; +} +.list-group-item-primary.list-group-item-action.active { + color: #fff; + background-color: #004085; + border-color: #004085; +} + +.list-group-item-secondary { + color: #383d41; + background-color: #d6d8db; +} +.list-group-item-secondary.list-group-item-action:hover, .list-group-item-secondary.list-group-item-action:focus { + color: #383d41; + background-color: #c8cbcf; +} +.list-group-item-secondary.list-group-item-action.active { + color: #fff; + background-color: #383d41; + border-color: #383d41; +} + +.list-group-item-success { + color: #155724; + background-color: #c3e6cb; +} +.list-group-item-success.list-group-item-action:hover, .list-group-item-success.list-group-item-action:focus { + color: #155724; + background-color: #b1dfbb; +} +.list-group-item-success.list-group-item-action.active { + color: #fff; + background-color: #155724; + border-color: #155724; +} + +.list-group-item-info { + color: #0c5460; + background-color: #bee5eb; +} +.list-group-item-info.list-group-item-action:hover, .list-group-item-info.list-group-item-action:focus { + color: #0c5460; + background-color: #abdde5; +} +.list-group-item-info.list-group-item-action.active { + color: #fff; + background-color: #0c5460; + border-color: #0c5460; +} + +.list-group-item-warning { + color: #856404; + background-color: #ffeeba; +} +.list-group-item-warning.list-group-item-action:hover, .list-group-item-warning.list-group-item-action:focus { + color: #856404; + background-color: #ffe8a1; +} +.list-group-item-warning.list-group-item-action.active { + color: #fff; + background-color: #856404; + border-color: #856404; +} + +.list-group-item-danger { + color: #721c24; + background-color: #f5c6cb; +} +.list-group-item-danger.list-group-item-action:hover, .list-group-item-danger.list-group-item-action:focus { + color: #721c24; + background-color: #f1b0b7; +} +.list-group-item-danger.list-group-item-action.active { + color: #fff; + background-color: #721c24; + border-color: #721c24; +} + +.list-group-item-light { + color: #818182; + background-color: #fdfdfe; +} +.list-group-item-light.list-group-item-action:hover, .list-group-item-light.list-group-item-action:focus { + color: #818182; + background-color: #ececf6; +} +.list-group-item-light.list-group-item-action.active { + color: #fff; + background-color: #818182; + border-color: #818182; +} + +.list-group-item-dark { + color: #1b1e21; + background-color: #c6c8ca; +} +.list-group-item-dark.list-group-item-action:hover, .list-group-item-dark.list-group-item-action:focus { + color: #1b1e21; + background-color: #b9bbbe; +} +.list-group-item-dark.list-group-item-action.active { + color: #fff; + background-color: #1b1e21; + border-color: #1b1e21; +} + +.close { + float: right; + font-size: 1.5rem; + font-weight: 700; + line-height: 1; + color: #000; + text-shadow: 0 1px 0 #fff; + opacity: .5; +} +.close:hover, .close:focus { + color: #000; + text-decoration: none; + opacity: .75; +} +.close:not(:disabled):not(.disabled) { + cursor: pointer; +} + +button.close { + padding: 0; + background-color: transparent; + border: 0; + -webkit-appearance: none; +} + +.modal-open { + overflow: hidden; +} + +.modal { + position: fixed; + top: 0; + right: 0; + bottom: 0; + left: 0; + z-index: 1050; + display: none; + overflow: hidden; + outline: 0; +} +.modal-open .modal { + overflow-x: hidden; + overflow-y: auto; +} + +.modal-dialog { + position: relative; + width: auto; + margin: 0.5rem; + pointer-events: none; +} +.modal.fade .modal-dialog { + -webkit-transition: -webkit-transform 0.3s ease-out; + transition: -webkit-transform 0.3s ease-out; + transition: transform 0.3s ease-out; + transition: transform 0.3s ease-out, -webkit-transform 0.3s ease-out; + -webkit-transform: translate(0, -25%); + transform: translate(0, -25%); +} +.modal.show .modal-dialog { + -webkit-transform: translate(0, 0); + transform: translate(0, 0); +} + +.modal-dialog-centered { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + min-height: calc(100% - (0.5rem * 2)); +} + +.modal-content { + position: relative; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + width: 100%; + pointer-events: auto; + background-color: #fff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.2); + border-radius: 0.3rem; + outline: 0; +} + +.modal-backdrop { + position: fixed; + top: 0; + right: 0; + bottom: 0; + left: 0; + z-index: 1040; + background-color: #000; +} +.modal-backdrop.fade { + opacity: 0; +} +.modal-backdrop.show { + opacity: 0.5; +} + +.modal-header { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: start; + -ms-flex-align: start; + align-items: flex-start; + -webkit-box-pack: justify; + -ms-flex-pack: justify; + justify-content: space-between; + padding: 1rem; + border-bottom: 1px solid #e9ecef; + border-top-left-radius: 0.3rem; + border-top-right-radius: 0.3rem; +} +.modal-header .close { + padding: 1rem; + margin: -1rem -1rem -1rem auto; +} + +.modal-title { + margin-bottom: 0; + line-height: 1.5; +} + +.modal-body { + position: relative; + -webkit-box-flex: 1; + -ms-flex: 1 1 auto; + flex: 1 1 auto; + padding: 1rem; +} + +.modal-footer { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: end; + -ms-flex-pack: end; + justify-content: flex-end; + padding: 1rem; + border-top: 1px solid #e9ecef; +} +.modal-footer > :not(:first-child) { + margin-left: .25rem; +} +.modal-footer > :not(:last-child) { + margin-right: .25rem; +} + +.modal-scrollbar-measure { + position: absolute; + top: -9999px; + width: 50px; + height: 50px; + overflow: scroll; +} + +@media (min-width: 576px) { + .modal-dialog { + max-width: 500px; + margin: 1.75rem auto; + } + + .modal-dialog-centered { + min-height: calc(100% - (1.75rem * 2)); + } + + .modal-sm { + max-width: 300px; + } +} +@media (min-width: 992px) { + .modal-lg { + max-width: 800px; + } +} +.tooltip { + position: absolute; + z-index: 1070; + display: block; + margin: 0; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + font-style: normal; + font-weight: 400; + line-height: 1.5; + text-align: left; + text-align: start; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-break: normal; + word-spacing: normal; + white-space: normal; + line-break: auto; + font-size: 0.875rem; + word-wrap: break-word; + opacity: 0; +} +.tooltip.show { + opacity: 0.9; +} +.tooltip .arrow { + position: absolute; + display: block; + width: 0.8rem; + height: 0.4rem; +} +.tooltip .arrow::before { + position: absolute; + content: ""; + border-color: transparent; + border-style: solid; +} + +.bs-tooltip-top, .bs-tooltip-auto[x-placement^="top"] { + padding: 0.4rem 0; +} +.bs-tooltip-top .arrow, .bs-tooltip-auto[x-placement^="top"] .arrow { + bottom: 0; +} +.bs-tooltip-top .arrow::before, .bs-tooltip-auto[x-placement^="top"] .arrow::before { + top: 0; + border-width: 0.4rem 0.4rem 0; + border-top-color: #000; +} + +.bs-tooltip-right, .bs-tooltip-auto[x-placement^="right"] { + padding: 0 0.4rem; +} +.bs-tooltip-right .arrow, .bs-tooltip-auto[x-placement^="right"] .arrow { + left: 0; + width: 0.4rem; + height: 0.8rem; +} +.bs-tooltip-right .arrow::before, .bs-tooltip-auto[x-placement^="right"] .arrow::before { + right: 0; + border-width: 0.4rem 0.4rem 0.4rem 0; + border-right-color: #000; +} + +.bs-tooltip-bottom, .bs-tooltip-auto[x-placement^="bottom"] { + padding: 0.4rem 0; +} +.bs-tooltip-bottom .arrow, .bs-tooltip-auto[x-placement^="bottom"] .arrow { + top: 0; +} +.bs-tooltip-bottom .arrow::before, .bs-tooltip-auto[x-placement^="bottom"] .arrow::before { + bottom: 0; + border-width: 0 0.4rem 0.4rem; + border-bottom-color: #000; +} + +.bs-tooltip-left, .bs-tooltip-auto[x-placement^="left"] { + padding: 0 0.4rem; +} +.bs-tooltip-left .arrow, .bs-tooltip-auto[x-placement^="left"] .arrow { + right: 0; + width: 0.4rem; + height: 0.8rem; +} +.bs-tooltip-left .arrow::before, .bs-tooltip-auto[x-placement^="left"] .arrow::before { + left: 0; + border-width: 0.4rem 0 0.4rem 0.4rem; + border-left-color: #000; +} + +.tooltip-inner { + max-width: 200px; + padding: 0.25rem 0.5rem; + color: #fff; + text-align: center; + background-color: #000; + border-radius: 0.25rem; +} + +.popover { + position: absolute; + top: 0; + left: 0; + z-index: 1060; + display: block; + max-width: 276px; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + font-style: normal; + font-weight: 400; + line-height: 1.5; + text-align: left; + text-align: start; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-break: normal; + word-spacing: normal; + white-space: normal; + line-break: auto; + font-size: 0.875rem; + word-wrap: break-word; + background-color: #fff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.2); + border-radius: 0.3rem; +} +.popover .arrow { + position: absolute; + display: block; + width: 1rem; + height: 0.5rem; + margin: 0 0.3rem; +} +.popover .arrow::before, .popover .arrow::after { + position: absolute; + display: block; + content: ""; + border-color: transparent; + border-style: solid; +} + +.bs-popover-top, .bs-popover-auto[x-placement^="top"] { + margin-bottom: 0.5rem; +} +.bs-popover-top .arrow, .bs-popover-auto[x-placement^="top"] .arrow { + bottom: calc((0.5rem + 1px) * -1); +} +.bs-popover-top .arrow::before, .bs-popover-auto[x-placement^="top"] .arrow::before, +.bs-popover-top .arrow::after, +.bs-popover-auto[x-placement^="top"] .arrow::after { + border-width: 0.5rem 0.5rem 0; +} +.bs-popover-top .arrow::before, .bs-popover-auto[x-placement^="top"] .arrow::before { + bottom: 0; + border-top-color: rgba(0, 0, 0, 0.25); +} +.bs-popover-top .arrow::after, .bs-popover-auto[x-placement^="top"] .arrow::after { + bottom: 1px; + border-top-color: #fff; +} + +.bs-popover-right, .bs-popover-auto[x-placement^="right"] { + margin-left: 0.5rem; +} +.bs-popover-right .arrow, .bs-popover-auto[x-placement^="right"] .arrow { + left: calc((0.5rem + 1px) * -1); + width: 0.5rem; + height: 1rem; + margin: 0.3rem 0; +} +.bs-popover-right .arrow::before, .bs-popover-auto[x-placement^="right"] .arrow::before, +.bs-popover-right .arrow::after, +.bs-popover-auto[x-placement^="right"] .arrow::after { + border-width: 0.5rem 0.5rem 0.5rem 0; +} +.bs-popover-right .arrow::before, .bs-popover-auto[x-placement^="right"] .arrow::before { + left: 0; + border-right-color: rgba(0, 0, 0, 0.25); +} +.bs-popover-right .arrow::after, .bs-popover-auto[x-placement^="right"] .arrow::after { + left: 1px; + border-right-color: #fff; +} + +.bs-popover-bottom, .bs-popover-auto[x-placement^="bottom"] { + margin-top: 0.5rem; +} +.bs-popover-bottom .arrow, .bs-popover-auto[x-placement^="bottom"] .arrow { + top: calc((0.5rem + 1px) * -1); +} +.bs-popover-bottom .arrow::before, .bs-popover-auto[x-placement^="bottom"] .arrow::before, +.bs-popover-bottom .arrow::after, +.bs-popover-auto[x-placement^="bottom"] .arrow::after { + border-width: 0 0.5rem 0.5rem 0.5rem; +} +.bs-popover-bottom .arrow::before, .bs-popover-auto[x-placement^="bottom"] .arrow::before { + top: 0; + border-bottom-color: rgba(0, 0, 0, 0.25); +} +.bs-popover-bottom .arrow::after, .bs-popover-auto[x-placement^="bottom"] .arrow::after { + top: 1px; + border-bottom-color: #fff; +} +.bs-popover-bottom .popover-header::before, .bs-popover-auto[x-placement^="bottom"] .popover-header::before { + position: absolute; + top: 0; + left: 50%; + display: block; + width: 1rem; + margin-left: -0.5rem; + content: ""; + border-bottom: 1px solid #f7f7f7; +} + +.bs-popover-left, .bs-popover-auto[x-placement^="left"] { + margin-right: 0.5rem; +} +.bs-popover-left .arrow, .bs-popover-auto[x-placement^="left"] .arrow { + right: calc((0.5rem + 1px) * -1); + width: 0.5rem; + height: 1rem; + margin: 0.3rem 0; +} +.bs-popover-left .arrow::before, .bs-popover-auto[x-placement^="left"] .arrow::before, +.bs-popover-left .arrow::after, +.bs-popover-auto[x-placement^="left"] .arrow::after { + border-width: 0.5rem 0 0.5rem 0.5rem; +} +.bs-popover-left .arrow::before, .bs-popover-auto[x-placement^="left"] .arrow::before { + right: 0; + border-left-color: rgba(0, 0, 0, 0.25); +} +.bs-popover-left .arrow::after, .bs-popover-auto[x-placement^="left"] .arrow::after { + right: 1px; + border-left-color: #fff; +} + +.popover-header { + padding: 0.5rem 0.75rem; + margin-bottom: 0; + font-size: 1rem; + color: inherit; + background-color: #f7f7f7; + border-bottom: 1px solid #ebebeb; + border-top-left-radius: calc(0.3rem - 1px); + border-top-right-radius: calc(0.3rem - 1px); +} +.popover-header:empty { + display: none; +} + +.popover-body { + padding: 0.5rem 0.75rem; + color: #212529; +} + +.carousel { + position: relative; +} + +.carousel-inner { + position: relative; + width: 100%; + overflow: hidden; +} + +.carousel-item { + position: relative; + display: none; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + width: 100%; + -webkit-transition: -webkit-transform 0.6s ease; + transition: -webkit-transform 0.6s ease; + transition: transform 0.6s ease; + transition: transform 0.6s ease, -webkit-transform 0.6s ease; + -webkit-backface-visibility: hidden; + backface-visibility: hidden; + -webkit-perspective: 1000px; + perspective: 1000px; +} + +.carousel-item.active, +.carousel-item-next, +.carousel-item-prev { + display: block; +} + +.carousel-item-next, +.carousel-item-prev { + position: absolute; + top: 0; +} + +.carousel-item-next.carousel-item-left, +.carousel-item-prev.carousel-item-right { + -webkit-transform: translateX(0); + transform: translateX(0); +} +@supports (transform-style: preserve-3d) { + .carousel-item-next.carousel-item-left, + .carousel-item-prev.carousel-item-right { + -webkit-transform: translate3d(0, 0, 0); + transform: translate3d(0, 0, 0); + } +} + +.carousel-item-next, +.active.carousel-item-right { + -webkit-transform: translateX(100%); + transform: translateX(100%); +} +@supports (transform-style: preserve-3d) { + .carousel-item-next, + .active.carousel-item-right { + -webkit-transform: translate3d(100%, 0, 0); + transform: translate3d(100%, 0, 0); + } +} + +.carousel-item-prev, +.active.carousel-item-left { + -webkit-transform: translateX(-100%); + transform: translateX(-100%); +} +@supports (transform-style: preserve-3d) { + .carousel-item-prev, + .active.carousel-item-left { + -webkit-transform: translate3d(-100%, 0, 0); + transform: translate3d(-100%, 0, 0); + } +} + +.carousel-control-prev, +.carousel-control-next { + position: absolute; + top: 0; + bottom: 0; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + width: 15%; + color: #fff; + text-align: center; + opacity: 0.5; +} +.carousel-control-prev:hover, .carousel-control-prev:focus, +.carousel-control-next:hover, +.carousel-control-next:focus { + color: #fff; + text-decoration: none; + outline: 0; + opacity: .9; +} + +.carousel-control-prev { + left: 0; +} + +.carousel-control-next { + right: 0; +} + +.carousel-control-prev-icon, +.carousel-control-next-icon { + display: inline-block; + width: 20px; + height: 20px; + background: transparent no-repeat center center; + background-size: 100% 100%; +} + +.carousel-control-prev-icon { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3E%3Cpath d='M5.25 0l-4 4 4 4 1.5-1.5-2.5-2.5 2.5-2.5-1.5-1.5z'/%3E%3C/svg%3E"); +} + +.carousel-control-next-icon { + background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3E%3Cpath d='M2.75 0l-1.5 1.5 2.5 2.5-2.5 2.5 1.5 1.5 4-4-4-4z'/%3E%3C/svg%3E"); +} + +.carousel-indicators { + position: absolute; + right: 0; + bottom: 10px; + left: 0; + z-index: 15; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-pack: center; + -ms-flex-pack: center; + justify-content: center; + padding-left: 0; + margin-right: 15%; + margin-left: 15%; + list-style: none; +} +.carousel-indicators li { + position: relative; + -webkit-box-flex: 0; + -ms-flex: 0 1 auto; + flex: 0 1 auto; + width: 30px; + height: 3px; + margin-right: 3px; + margin-left: 3px; + text-indent: -999px; + background-color: rgba(255, 255, 255, 0.5); +} +.carousel-indicators li::before { + position: absolute; + top: -10px; + left: 0; + display: inline-block; + width: 100%; + height: 10px; + content: ""; +} +.carousel-indicators li::after { + position: absolute; + bottom: -10px; + left: 0; + display: inline-block; + width: 100%; + height: 10px; + content: ""; +} +.carousel-indicators .active { + background-color: #fff; +} + +.carousel-caption { + position: absolute; + right: 15%; + bottom: 20px; + left: 15%; + z-index: 10; + padding-top: 20px; + padding-bottom: 20px; + color: #fff; + text-align: center; +} + +.align-baseline { + vertical-align: baseline !important; +} + +.align-top { + vertical-align: top !important; +} + +.align-middle { + vertical-align: middle !important; +} + +.align-bottom { + vertical-align: bottom !important; +} + +.align-text-bottom { + vertical-align: text-bottom !important; +} + +.align-text-top { + vertical-align: text-top !important; +} + +.bg-primary { + background-color: #007bff !important; +} + +a.bg-primary:hover, a.bg-primary:focus, +button.bg-primary:hover, +button.bg-primary:focus { + background-color: #0062cc !important; +} + +.bg-secondary { + background-color: #6c757d !important; +} + +a.bg-secondary:hover, a.bg-secondary:focus, +button.bg-secondary:hover, +button.bg-secondary:focus { + background-color: #545b62 !important; +} + +.bg-success { + background-color: #28a745 !important; +} + +a.bg-success:hover, a.bg-success:focus, +button.bg-success:hover, +button.bg-success:focus { + background-color: #1e7e34 !important; +} + +.bg-info { + background-color: #17a2b8 !important; +} + +a.bg-info:hover, a.bg-info:focus, +button.bg-info:hover, +button.bg-info:focus { + background-color: #117a8b !important; +} + +.bg-warning { + background-color: #ffc107 !important; +} + +a.bg-warning:hover, a.bg-warning:focus, +button.bg-warning:hover, +button.bg-warning:focus { + background-color: #d39e00 !important; +} + +.bg-danger { + background-color: #dc3545 !important; +} + +a.bg-danger:hover, a.bg-danger:focus, +button.bg-danger:hover, +button.bg-danger:focus { + background-color: #bd2130 !important; +} + +.bg-light { + background-color: #f8f9fa !important; +} + +a.bg-light:hover, a.bg-light:focus, +button.bg-light:hover, +button.bg-light:focus { + background-color: #dae0e5 !important; +} + +.bg-dark { + background-color: #343a40 !important; +} + +a.bg-dark:hover, a.bg-dark:focus, +button.bg-dark:hover, +button.bg-dark:focus { + background-color: #1d2124 !important; +} + +.bg-white { + background-color: #fff !important; +} + +.bg-transparent { + background-color: transparent !important; +} + +.border { + border: 1px solid #dee2e6 !important; +} + +.border-top { + border-top: 1px solid #dee2e6 !important; +} + +.border-right { + border-right: 1px solid #dee2e6 !important; +} + +.border-bottom { + border-bottom: 1px solid #dee2e6 !important; +} + +.border-left { + border-left: 1px solid #dee2e6 !important; +} + +.border-0 { + border: 0 !important; +} + +.border-top-0 { + border-top: 0 !important; +} + +.border-right-0 { + border-right: 0 !important; +} + +.border-bottom-0 { + border-bottom: 0 !important; +} + +.border-left-0 { + border-left: 0 !important; +} + +.border-primary { + border-color: #007bff !important; +} + +.border-secondary { + border-color: #6c757d !important; +} + +.border-success { + border-color: #28a745 !important; +} + +.border-info { + border-color: #17a2b8 !important; +} + +.border-warning { + border-color: #ffc107 !important; +} + +.border-danger { + border-color: #dc3545 !important; +} + +.border-light { + border-color: #f8f9fa !important; +} + +.border-dark { + border-color: #343a40 !important; +} + +.border-white { + border-color: #fff !important; +} + +.rounded { + border-radius: 0.25rem !important; +} + +.rounded-top { + border-top-left-radius: 0.25rem !important; + border-top-right-radius: 0.25rem !important; +} + +.rounded-right { + border-top-right-radius: 0.25rem !important; + border-bottom-right-radius: 0.25rem !important; +} + +.rounded-bottom { + border-bottom-right-radius: 0.25rem !important; + border-bottom-left-radius: 0.25rem !important; +} + +.rounded-left { + border-top-left-radius: 0.25rem !important; + border-bottom-left-radius: 0.25rem !important; +} + +.rounded-circle { + border-radius: 50% !important; +} + +.rounded-0 { + border-radius: 0 !important; +} + +.clearfix::after { + display: block; + clear: both; + content: ""; +} + +.d-none { + display: none !important; +} + +.d-inline { + display: inline !important; +} + +.d-inline-block { + display: inline-block !important; +} + +.d-block { + display: block !important; +} + +.d-table { + display: table !important; +} + +.d-table-row { + display: table-row !important; +} + +.d-table-cell { + display: table-cell !important; +} + +.d-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; +} + +.d-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; +} + +@media (min-width: 576px) { + .d-sm-none { + display: none !important; + } + + .d-sm-inline { + display: inline !important; + } + + .d-sm-inline-block { + display: inline-block !important; + } + + .d-sm-block { + display: block !important; + } + + .d-sm-table { + display: table !important; + } + + .d-sm-table-row { + display: table-row !important; + } + + .d-sm-table-cell { + display: table-cell !important; + } + + .d-sm-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + } + + .d-sm-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} +@media (min-width: 768px) { + .d-md-none { + display: none !important; + } + + .d-md-inline { + display: inline !important; + } + + .d-md-inline-block { + display: inline-block !important; + } + + .d-md-block { + display: block !important; + } + + .d-md-table { + display: table !important; + } + + .d-md-table-row { + display: table-row !important; + } + + .d-md-table-cell { + display: table-cell !important; + } + + .d-md-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + } + + .d-md-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} +@media (min-width: 992px) { + .d-lg-none { + display: none !important; + } + + .d-lg-inline { + display: inline !important; + } + + .d-lg-inline-block { + display: inline-block !important; + } + + .d-lg-block { + display: block !important; + } + + .d-lg-table { + display: table !important; + } + + .d-lg-table-row { + display: table-row !important; + } + + .d-lg-table-cell { + display: table-cell !important; + } + + .d-lg-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + } + + .d-lg-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} +@media (min-width: 1200px) { + .d-xl-none { + display: none !important; + } + + .d-xl-inline { + display: inline !important; + } + + .d-xl-inline-block { + display: inline-block !important; + } + + .d-xl-block { + display: block !important; + } + + .d-xl-table { + display: table !important; + } + + .d-xl-table-row { + display: table-row !important; + } + + .d-xl-table-cell { + display: table-cell !important; + } + + .d-xl-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + } + + .d-xl-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} +@media print { + .d-print-none { + display: none !important; + } + + .d-print-inline { + display: inline !important; + } + + .d-print-inline-block { + display: inline-block !important; + } + + .d-print-block { + display: block !important; + } + + .d-print-table { + display: table !important; + } + + .d-print-table-row { + display: table-row !important; + } + + .d-print-table-cell { + display: table-cell !important; + } + + .d-print-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; + } + + .d-print-inline-flex { + display: -webkit-inline-box !important; + display: -ms-inline-flexbox !important; + display: inline-flex !important; + } +} +.embed-responsive { + position: relative; + display: block; + width: 100%; + padding: 0; + overflow: hidden; +} +.embed-responsive::before { + display: block; + content: ""; +} +.embed-responsive .embed-responsive-item, +.embed-responsive iframe, +.embed-responsive embed, +.embed-responsive object, +.embed-responsive video { + position: absolute; + top: 0; + bottom: 0; + left: 0; + width: 100%; + height: 100%; + border: 0; +} + +.embed-responsive-21by9::before { + padding-top: 42.8571428571%; +} + +.embed-responsive-16by9::before { + padding-top: 56.25%; +} + +.embed-responsive-4by3::before { + padding-top: 75%; +} + +.embed-responsive-1by1::before { + padding-top: 100%; +} + +.flex-row { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: row !important; + flex-direction: row !important; +} + +.flex-column { + -webkit-box-orient: vertical !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: column !important; + flex-direction: column !important; +} + +.flex-row-reverse { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; +} + +.flex-column-reverse { + -webkit-box-orient: vertical !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; +} + +.flex-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; +} + +.flex-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; +} + +.flex-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; +} + +.justify-content-start { + -webkit-box-pack: start !important; + -ms-flex-pack: start !important; + justify-content: flex-start !important; +} + +.justify-content-end { + -webkit-box-pack: end !important; + -ms-flex-pack: end !important; + justify-content: flex-end !important; +} + +.justify-content-center { + -webkit-box-pack: center !important; + -ms-flex-pack: center !important; + justify-content: center !important; +} + +.justify-content-between { + -webkit-box-pack: justify !important; + -ms-flex-pack: justify !important; + justify-content: space-between !important; +} + +.justify-content-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; +} + +.align-items-start { + -webkit-box-align: start !important; + -ms-flex-align: start !important; + align-items: flex-start !important; +} + +.align-items-end { + -webkit-box-align: end !important; + -ms-flex-align: end !important; + align-items: flex-end !important; +} + +.align-items-center { + -webkit-box-align: center !important; + -ms-flex-align: center !important; + align-items: center !important; +} + +.align-items-baseline { + -webkit-box-align: baseline !important; + -ms-flex-align: baseline !important; + align-items: baseline !important; +} + +.align-items-stretch { + -webkit-box-align: stretch !important; + -ms-flex-align: stretch !important; + align-items: stretch !important; +} + +.align-content-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; +} + +.align-content-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; +} + +.align-content-center { + -ms-flex-line-pack: center !important; + align-content: center !important; +} + +.align-content-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; +} + +.align-content-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; +} + +.align-content-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; +} + +.align-self-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; +} + +.align-self-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; +} + +.align-self-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; +} + +.align-self-center { + -ms-flex-item-align: center !important; + align-self: center !important; +} + +.align-self-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; +} + +.align-self-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; +} + +@media (min-width: 576px) { + .flex-sm-row { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: row !important; + flex-direction: row !important; + } + + .flex-sm-column { + -webkit-box-orient: vertical !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: column !important; + flex-direction: column !important; + } + + .flex-sm-row-reverse { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; + } + + .flex-sm-column-reverse { + -webkit-box-orient: vertical !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; + } + + .flex-sm-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; + } + + .flex-sm-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; + } + + .flex-sm-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; + } + + .justify-content-sm-start { + -webkit-box-pack: start !important; + -ms-flex-pack: start !important; + justify-content: flex-start !important; + } + + .justify-content-sm-end { + -webkit-box-pack: end !important; + -ms-flex-pack: end !important; + justify-content: flex-end !important; + } + + .justify-content-sm-center { + -webkit-box-pack: center !important; + -ms-flex-pack: center !important; + justify-content: center !important; + } + + .justify-content-sm-between { + -webkit-box-pack: justify !important; + -ms-flex-pack: justify !important; + justify-content: space-between !important; + } + + .justify-content-sm-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; + } + + .align-items-sm-start { + -webkit-box-align: start !important; + -ms-flex-align: start !important; + align-items: flex-start !important; + } + + .align-items-sm-end { + -webkit-box-align: end !important; + -ms-flex-align: end !important; + align-items: flex-end !important; + } + + .align-items-sm-center { + -webkit-box-align: center !important; + -ms-flex-align: center !important; + align-items: center !important; + } + + .align-items-sm-baseline { + -webkit-box-align: baseline !important; + -ms-flex-align: baseline !important; + align-items: baseline !important; + } + + .align-items-sm-stretch { + -webkit-box-align: stretch !important; + -ms-flex-align: stretch !important; + align-items: stretch !important; + } + + .align-content-sm-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; + } + + .align-content-sm-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; + } + + .align-content-sm-center { + -ms-flex-line-pack: center !important; + align-content: center !important; + } + + .align-content-sm-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; + } + + .align-content-sm-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; + } + + .align-content-sm-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; + } + + .align-self-sm-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; + } + + .align-self-sm-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; + } + + .align-self-sm-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; + } + + .align-self-sm-center { + -ms-flex-item-align: center !important; + align-self: center !important; + } + + .align-self-sm-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; + } + + .align-self-sm-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; + } +} +@media (min-width: 768px) { + .flex-md-row { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: row !important; + flex-direction: row !important; + } + + .flex-md-column { + -webkit-box-orient: vertical !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: column !important; + flex-direction: column !important; + } + + .flex-md-row-reverse { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; + } + + .flex-md-column-reverse { + -webkit-box-orient: vertical !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; + } + + .flex-md-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; + } + + .flex-md-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; + } + + .flex-md-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; + } + + .justify-content-md-start { + -webkit-box-pack: start !important; + -ms-flex-pack: start !important; + justify-content: flex-start !important; + } + + .justify-content-md-end { + -webkit-box-pack: end !important; + -ms-flex-pack: end !important; + justify-content: flex-end !important; + } + + .justify-content-md-center { + -webkit-box-pack: center !important; + -ms-flex-pack: center !important; + justify-content: center !important; + } + + .justify-content-md-between { + -webkit-box-pack: justify !important; + -ms-flex-pack: justify !important; + justify-content: space-between !important; + } + + .justify-content-md-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; + } + + .align-items-md-start { + -webkit-box-align: start !important; + -ms-flex-align: start !important; + align-items: flex-start !important; + } + + .align-items-md-end { + -webkit-box-align: end !important; + -ms-flex-align: end !important; + align-items: flex-end !important; + } + + .align-items-md-center { + -webkit-box-align: center !important; + -ms-flex-align: center !important; + align-items: center !important; + } + + .align-items-md-baseline { + -webkit-box-align: baseline !important; + -ms-flex-align: baseline !important; + align-items: baseline !important; + } + + .align-items-md-stretch { + -webkit-box-align: stretch !important; + -ms-flex-align: stretch !important; + align-items: stretch !important; + } + + .align-content-md-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; + } + + .align-content-md-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; + } + + .align-content-md-center { + -ms-flex-line-pack: center !important; + align-content: center !important; + } + + .align-content-md-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; + } + + .align-content-md-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; + } + + .align-content-md-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; + } + + .align-self-md-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; + } + + .align-self-md-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; + } + + .align-self-md-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; + } + + .align-self-md-center { + -ms-flex-item-align: center !important; + align-self: center !important; + } + + .align-self-md-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; + } + + .align-self-md-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; + } +} +@media (min-width: 992px) { + .flex-lg-row { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: row !important; + flex-direction: row !important; + } + + .flex-lg-column { + -webkit-box-orient: vertical !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: column !important; + flex-direction: column !important; + } + + .flex-lg-row-reverse { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; + } + + .flex-lg-column-reverse { + -webkit-box-orient: vertical !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; + } + + .flex-lg-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; + } + + .flex-lg-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; + } + + .flex-lg-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; + } + + .justify-content-lg-start { + -webkit-box-pack: start !important; + -ms-flex-pack: start !important; + justify-content: flex-start !important; + } + + .justify-content-lg-end { + -webkit-box-pack: end !important; + -ms-flex-pack: end !important; + justify-content: flex-end !important; + } + + .justify-content-lg-center { + -webkit-box-pack: center !important; + -ms-flex-pack: center !important; + justify-content: center !important; + } + + .justify-content-lg-between { + -webkit-box-pack: justify !important; + -ms-flex-pack: justify !important; + justify-content: space-between !important; + } + + .justify-content-lg-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; + } + + .align-items-lg-start { + -webkit-box-align: start !important; + -ms-flex-align: start !important; + align-items: flex-start !important; + } + + .align-items-lg-end { + -webkit-box-align: end !important; + -ms-flex-align: end !important; + align-items: flex-end !important; + } + + .align-items-lg-center { + -webkit-box-align: center !important; + -ms-flex-align: center !important; + align-items: center !important; + } + + .align-items-lg-baseline { + -webkit-box-align: baseline !important; + -ms-flex-align: baseline !important; + align-items: baseline !important; + } + + .align-items-lg-stretch { + -webkit-box-align: stretch !important; + -ms-flex-align: stretch !important; + align-items: stretch !important; + } + + .align-content-lg-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; + } + + .align-content-lg-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; + } + + .align-content-lg-center { + -ms-flex-line-pack: center !important; + align-content: center !important; + } + + .align-content-lg-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; + } + + .align-content-lg-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; + } + + .align-content-lg-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; + } + + .align-self-lg-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; + } + + .align-self-lg-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; + } + + .align-self-lg-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; + } + + .align-self-lg-center { + -ms-flex-item-align: center !important; + align-self: center !important; + } + + .align-self-lg-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; + } + + .align-self-lg-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; + } +} +@media (min-width: 1200px) { + .flex-xl-row { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: row !important; + flex-direction: row !important; + } + + .flex-xl-column { + -webkit-box-orient: vertical !important; + -webkit-box-direction: normal !important; + -ms-flex-direction: column !important; + flex-direction: column !important; + } + + .flex-xl-row-reverse { + -webkit-box-orient: horizontal !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: row-reverse !important; + flex-direction: row-reverse !important; + } + + .flex-xl-column-reverse { + -webkit-box-orient: vertical !important; + -webkit-box-direction: reverse !important; + -ms-flex-direction: column-reverse !important; + flex-direction: column-reverse !important; + } + + .flex-xl-wrap { + -ms-flex-wrap: wrap !important; + flex-wrap: wrap !important; + } + + .flex-xl-nowrap { + -ms-flex-wrap: nowrap !important; + flex-wrap: nowrap !important; + } + + .flex-xl-wrap-reverse { + -ms-flex-wrap: wrap-reverse !important; + flex-wrap: wrap-reverse !important; + } + + .justify-content-xl-start { + -webkit-box-pack: start !important; + -ms-flex-pack: start !important; + justify-content: flex-start !important; + } + + .justify-content-xl-end { + -webkit-box-pack: end !important; + -ms-flex-pack: end !important; + justify-content: flex-end !important; + } + + .justify-content-xl-center { + -webkit-box-pack: center !important; + -ms-flex-pack: center !important; + justify-content: center !important; + } + + .justify-content-xl-between { + -webkit-box-pack: justify !important; + -ms-flex-pack: justify !important; + justify-content: space-between !important; + } + + .justify-content-xl-around { + -ms-flex-pack: distribute !important; + justify-content: space-around !important; + } + + .align-items-xl-start { + -webkit-box-align: start !important; + -ms-flex-align: start !important; + align-items: flex-start !important; + } + + .align-items-xl-end { + -webkit-box-align: end !important; + -ms-flex-align: end !important; + align-items: flex-end !important; + } + + .align-items-xl-center { + -webkit-box-align: center !important; + -ms-flex-align: center !important; + align-items: center !important; + } + + .align-items-xl-baseline { + -webkit-box-align: baseline !important; + -ms-flex-align: baseline !important; + align-items: baseline !important; + } + + .align-items-xl-stretch { + -webkit-box-align: stretch !important; + -ms-flex-align: stretch !important; + align-items: stretch !important; + } + + .align-content-xl-start { + -ms-flex-line-pack: start !important; + align-content: flex-start !important; + } + + .align-content-xl-end { + -ms-flex-line-pack: end !important; + align-content: flex-end !important; + } + + .align-content-xl-center { + -ms-flex-line-pack: center !important; + align-content: center !important; + } + + .align-content-xl-between { + -ms-flex-line-pack: justify !important; + align-content: space-between !important; + } + + .align-content-xl-around { + -ms-flex-line-pack: distribute !important; + align-content: space-around !important; + } + + .align-content-xl-stretch { + -ms-flex-line-pack: stretch !important; + align-content: stretch !important; + } + + .align-self-xl-auto { + -ms-flex-item-align: auto !important; + align-self: auto !important; + } + + .align-self-xl-start { + -ms-flex-item-align: start !important; + align-self: flex-start !important; + } + + .align-self-xl-end { + -ms-flex-item-align: end !important; + align-self: flex-end !important; + } + + .align-self-xl-center { + -ms-flex-item-align: center !important; + align-self: center !important; + } + + .align-self-xl-baseline { + -ms-flex-item-align: baseline !important; + align-self: baseline !important; + } + + .align-self-xl-stretch { + -ms-flex-item-align: stretch !important; + align-self: stretch !important; + } +} +.float-left { + float: left !important; +} + +.float-right { + float: right !important; +} + +.float-none { + float: none !important; +} + +@media (min-width: 576px) { + .float-sm-left { + float: left !important; + } + + .float-sm-right { + float: right !important; + } + + .float-sm-none { + float: none !important; + } +} +@media (min-width: 768px) { + .float-md-left { + float: left !important; + } + + .float-md-right { + float: right !important; + } + + .float-md-none { + float: none !important; + } +} +@media (min-width: 992px) { + .float-lg-left { + float: left !important; + } + + .float-lg-right { + float: right !important; + } + + .float-lg-none { + float: none !important; + } +} +@media (min-width: 1200px) { + .float-xl-left { + float: left !important; + } + + .float-xl-right { + float: right !important; + } + + .float-xl-none { + float: none !important; + } +} +.position-static { + position: static !important; +} + +.position-relative { + position: relative !important; +} + +.position-absolute { + position: absolute !important; +} + +.position-fixed { + position: fixed !important; +} + +.position-sticky { + position: sticky !important; +} + +.fixed-top { + position: fixed; + top: 0; + right: 0; + left: 0; + z-index: 1030; +} + +.fixed-bottom { + position: fixed; + right: 0; + bottom: 0; + left: 0; + z-index: 1030; +} + +@supports (position: sticky) { + .sticky-top { + position: sticky; + top: 0; + z-index: 1020; + } +} + +.sr-only { + position: absolute; + width: 1px; + height: 1px; + padding: 0; + overflow: hidden; + clip: rect(0, 0, 0, 0); + white-space: nowrap; + -webkit-clip-path: inset(50%); + clip-path: inset(50%); + border: 0; +} + +.sr-only-focusable:active, .sr-only-focusable:focus { + position: static; + width: auto; + height: auto; + overflow: visible; + clip: auto; + white-space: normal; + -webkit-clip-path: none; + clip-path: none; +} + +.w-25 { + width: 25% !important; +} + +.w-50 { + width: 50% !important; +} + +.w-75 { + width: 75% !important; +} + +.w-100 { + width: 100% !important; +} + +.h-25 { + height: 25% !important; +} + +.h-50 { + height: 50% !important; +} + +.h-75 { + height: 75% !important; +} + +.h-100 { + height: 100% !important; +} + +.mw-100 { + max-width: 100% !important; +} + +.mh-100 { + max-height: 100% !important; +} + +.m-0 { + margin: 0 !important; +} + +.mt-0, +.my-0 { + margin-top: 0 !important; +} + +.mr-0, +.mx-0 { + margin-right: 0 !important; +} + +.mb-0, +.my-0 { + margin-bottom: 0 !important; +} + +.ml-0, +.mx-0 { + margin-left: 0 !important; +} + +.m-1 { + margin: 0.25rem !important; +} + +.mt-1, +.my-1 { + margin-top: 0.25rem !important; +} + +.mr-1, +.mx-1 { + margin-right: 0.25rem !important; +} + +.mb-1, +.my-1 { + margin-bottom: 0.25rem !important; +} + +.ml-1, +.mx-1 { + margin-left: 0.25rem !important; +} + +.m-2 { + margin: 0.5rem !important; +} + +.mt-2, +.my-2 { + margin-top: 0.5rem !important; +} + +.mr-2, +.mx-2 { + margin-right: 0.5rem !important; +} + +.mb-2, +.my-2 { + margin-bottom: 0.5rem !important; +} + +.ml-2, +.mx-2 { + margin-left: 0.5rem !important; +} + +.m-3 { + margin: 1rem !important; +} + +.mt-3, +.my-3 { + margin-top: 1rem !important; +} + +.mr-3, +.mx-3 { + margin-right: 1rem !important; +} + +.mb-3, +.my-3 { + margin-bottom: 1rem !important; +} + +.ml-3, +.mx-3 { + margin-left: 1rem !important; +} + +.m-4 { + margin: 1.5rem !important; +} + +.mt-4, +.my-4 { + margin-top: 1.5rem !important; +} + +.mr-4, +.mx-4 { + margin-right: 1.5rem !important; +} + +.mb-4, +.my-4 { + margin-bottom: 1.5rem !important; +} + +.ml-4, +.mx-4 { + margin-left: 1.5rem !important; +} + +.m-5 { + margin: 3rem !important; +} + +.mt-5, +.my-5 { + margin-top: 3rem !important; +} + +.mr-5, +.mx-5 { + margin-right: 3rem !important; +} + +.mb-5, +.my-5 { + margin-bottom: 3rem !important; +} + +.ml-5, +.mx-5 { + margin-left: 3rem !important; +} + +.p-0 { + padding: 0 !important; +} + +.pt-0, +.py-0 { + padding-top: 0 !important; +} + +.pr-0, +.px-0 { + padding-right: 0 !important; +} + +.pb-0, +.py-0 { + padding-bottom: 0 !important; +} + +.pl-0, +.px-0 { + padding-left: 0 !important; +} + +.p-1 { + padding: 0.25rem !important; +} + +.pt-1, +.py-1 { + padding-top: 0.25rem !important; +} + +.pr-1, +.px-1 { + padding-right: 0.25rem !important; +} + +.pb-1, +.py-1 { + padding-bottom: 0.25rem !important; +} + +.pl-1, +.px-1 { + padding-left: 0.25rem !important; +} + +.p-2 { + padding: 0.5rem !important; +} + +.pt-2, +.py-2 { + padding-top: 0.5rem !important; +} + +.pr-2, +.px-2 { + padding-right: 0.5rem !important; +} + +.pb-2, +.py-2 { + padding-bottom: 0.5rem !important; +} + +.pl-2, +.px-2 { + padding-left: 0.5rem !important; +} + +.p-3 { + padding: 1rem !important; +} + +.pt-3, +.py-3 { + padding-top: 1rem !important; +} + +.pr-3, +.px-3 { + padding-right: 1rem !important; +} + +.pb-3, +.py-3 { + padding-bottom: 1rem !important; +} + +.pl-3, +.px-3 { + padding-left: 1rem !important; +} + +.p-4 { + padding: 1.5rem !important; +} + +.pt-4, +.py-4 { + padding-top: 1.5rem !important; +} + +.pr-4, +.px-4 { + padding-right: 1.5rem !important; +} + +.pb-4, +.py-4 { + padding-bottom: 1.5rem !important; +} + +.pl-4, +.px-4 { + padding-left: 1.5rem !important; +} + +.p-5 { + padding: 3rem !important; +} + +.pt-5, +.py-5 { + padding-top: 3rem !important; +} + +.pr-5, +.px-5 { + padding-right: 3rem !important; +} + +.pb-5, +.py-5 { + padding-bottom: 3rem !important; +} + +.pl-5, +.px-5 { + padding-left: 3rem !important; +} + +.m-auto { + margin: auto !important; +} + +.mt-auto, +.my-auto { + margin-top: auto !important; +} + +.mr-auto, +.mx-auto { + margin-right: auto !important; +} + +.mb-auto, +.my-auto { + margin-bottom: auto !important; +} + +.ml-auto, +.mx-auto { + margin-left: auto !important; +} + +@media (min-width: 576px) { + .m-sm-0 { + margin: 0 !important; + } + + .mt-sm-0, + .my-sm-0 { + margin-top: 0 !important; + } + + .mr-sm-0, + .mx-sm-0 { + margin-right: 0 !important; + } + + .mb-sm-0, + .my-sm-0 { + margin-bottom: 0 !important; + } + + .ml-sm-0, + .mx-sm-0 { + margin-left: 0 !important; + } + + .m-sm-1 { + margin: 0.25rem !important; + } + + .mt-sm-1, + .my-sm-1 { + margin-top: 0.25rem !important; + } + + .mr-sm-1, + .mx-sm-1 { + margin-right: 0.25rem !important; + } + + .mb-sm-1, + .my-sm-1 { + margin-bottom: 0.25rem !important; + } + + .ml-sm-1, + .mx-sm-1 { + margin-left: 0.25rem !important; + } + + .m-sm-2 { + margin: 0.5rem !important; + } + + .mt-sm-2, + .my-sm-2 { + margin-top: 0.5rem !important; + } + + .mr-sm-2, + .mx-sm-2 { + margin-right: 0.5rem !important; + } + + .mb-sm-2, + .my-sm-2 { + margin-bottom: 0.5rem !important; + } + + .ml-sm-2, + .mx-sm-2 { + margin-left: 0.5rem !important; + } + + .m-sm-3 { + margin: 1rem !important; + } + + .mt-sm-3, + .my-sm-3 { + margin-top: 1rem !important; + } + + .mr-sm-3, + .mx-sm-3 { + margin-right: 1rem !important; + } + + .mb-sm-3, + .my-sm-3 { + margin-bottom: 1rem !important; + } + + .ml-sm-3, + .mx-sm-3 { + margin-left: 1rem !important; + } + + .m-sm-4 { + margin: 1.5rem !important; + } + + .mt-sm-4, + .my-sm-4 { + margin-top: 1.5rem !important; + } + + .mr-sm-4, + .mx-sm-4 { + margin-right: 1.5rem !important; + } + + .mb-sm-4, + .my-sm-4 { + margin-bottom: 1.5rem !important; + } + + .ml-sm-4, + .mx-sm-4 { + margin-left: 1.5rem !important; + } + + .m-sm-5 { + margin: 3rem !important; + } + + .mt-sm-5, + .my-sm-5 { + margin-top: 3rem !important; + } + + .mr-sm-5, + .mx-sm-5 { + margin-right: 3rem !important; + } + + .mb-sm-5, + .my-sm-5 { + margin-bottom: 3rem !important; + } + + .ml-sm-5, + .mx-sm-5 { + margin-left: 3rem !important; + } + + .p-sm-0 { + padding: 0 !important; + } + + .pt-sm-0, + .py-sm-0 { + padding-top: 0 !important; + } + + .pr-sm-0, + .px-sm-0 { + padding-right: 0 !important; + } + + .pb-sm-0, + .py-sm-0 { + padding-bottom: 0 !important; + } + + .pl-sm-0, + .px-sm-0 { + padding-left: 0 !important; + } + + .p-sm-1 { + padding: 0.25rem !important; + } + + .pt-sm-1, + .py-sm-1 { + padding-top: 0.25rem !important; + } + + .pr-sm-1, + .px-sm-1 { + padding-right: 0.25rem !important; + } + + .pb-sm-1, + .py-sm-1 { + padding-bottom: 0.25rem !important; + } + + .pl-sm-1, + .px-sm-1 { + padding-left: 0.25rem !important; + } + + .p-sm-2 { + padding: 0.5rem !important; + } + + .pt-sm-2, + .py-sm-2 { + padding-top: 0.5rem !important; + } + + .pr-sm-2, + .px-sm-2 { + padding-right: 0.5rem !important; + } + + .pb-sm-2, + .py-sm-2 { + padding-bottom: 0.5rem !important; + } + + .pl-sm-2, + .px-sm-2 { + padding-left: 0.5rem !important; + } + + .p-sm-3 { + padding: 1rem !important; + } + + .pt-sm-3, + .py-sm-3 { + padding-top: 1rem !important; + } + + .pr-sm-3, + .px-sm-3 { + padding-right: 1rem !important; + } + + .pb-sm-3, + .py-sm-3 { + padding-bottom: 1rem !important; + } + + .pl-sm-3, + .px-sm-3 { + padding-left: 1rem !important; + } + + .p-sm-4 { + padding: 1.5rem !important; + } + + .pt-sm-4, + .py-sm-4 { + padding-top: 1.5rem !important; + } + + .pr-sm-4, + .px-sm-4 { + padding-right: 1.5rem !important; + } + + .pb-sm-4, + .py-sm-4 { + padding-bottom: 1.5rem !important; + } + + .pl-sm-4, + .px-sm-4 { + padding-left: 1.5rem !important; + } + + .p-sm-5 { + padding: 3rem !important; + } + + .pt-sm-5, + .py-sm-5 { + padding-top: 3rem !important; + } + + .pr-sm-5, + .px-sm-5 { + padding-right: 3rem !important; + } + + .pb-sm-5, + .py-sm-5 { + padding-bottom: 3rem !important; + } + + .pl-sm-5, + .px-sm-5 { + padding-left: 3rem !important; + } + + .m-sm-auto { + margin: auto !important; + } + + .mt-sm-auto, + .my-sm-auto { + margin-top: auto !important; + } + + .mr-sm-auto, + .mx-sm-auto { + margin-right: auto !important; + } + + .mb-sm-auto, + .my-sm-auto { + margin-bottom: auto !important; + } + + .ml-sm-auto, + .mx-sm-auto { + margin-left: auto !important; + } +} +@media (min-width: 768px) { + .m-md-0 { + margin: 0 !important; + } + + .mt-md-0, + .my-md-0 { + margin-top: 0 !important; + } + + .mr-md-0, + .mx-md-0 { + margin-right: 0 !important; + } + + .mb-md-0, + .my-md-0 { + margin-bottom: 0 !important; + } + + .ml-md-0, + .mx-md-0 { + margin-left: 0 !important; + } + + .m-md-1 { + margin: 0.25rem !important; + } + + .mt-md-1, + .my-md-1 { + margin-top: 0.25rem !important; + } + + .mr-md-1, + .mx-md-1 { + margin-right: 0.25rem !important; + } + + .mb-md-1, + .my-md-1 { + margin-bottom: 0.25rem !important; + } + + .ml-md-1, + .mx-md-1 { + margin-left: 0.25rem !important; + } + + .m-md-2 { + margin: 0.5rem !important; + } + + .mt-md-2, + .my-md-2 { + margin-top: 0.5rem !important; + } + + .mr-md-2, + .mx-md-2 { + margin-right: 0.5rem !important; + } + + .mb-md-2, + .my-md-2 { + margin-bottom: 0.5rem !important; + } + + .ml-md-2, + .mx-md-2 { + margin-left: 0.5rem !important; + } + + .m-md-3 { + margin: 1rem !important; + } + + .mt-md-3, + .my-md-3 { + margin-top: 1rem !important; + } + + .mr-md-3, + .mx-md-3 { + margin-right: 1rem !important; + } + + .mb-md-3, + .my-md-3 { + margin-bottom: 1rem !important; + } + + .ml-md-3, + .mx-md-3 { + margin-left: 1rem !important; + } + + .m-md-4 { + margin: 1.5rem !important; + } + + .mt-md-4, + .my-md-4 { + margin-top: 1.5rem !important; + } + + .mr-md-4, + .mx-md-4 { + margin-right: 1.5rem !important; + } + + .mb-md-4, + .my-md-4 { + margin-bottom: 1.5rem !important; + } + + .ml-md-4, + .mx-md-4 { + margin-left: 1.5rem !important; + } + + .m-md-5 { + margin: 3rem !important; + } + + .mt-md-5, + .my-md-5 { + margin-top: 3rem !important; + } + + .mr-md-5, + .mx-md-5 { + margin-right: 3rem !important; + } + + .mb-md-5, + .my-md-5 { + margin-bottom: 3rem !important; + } + + .ml-md-5, + .mx-md-5 { + margin-left: 3rem !important; + } + + .p-md-0 { + padding: 0 !important; + } + + .pt-md-0, + .py-md-0 { + padding-top: 0 !important; + } + + .pr-md-0, + .px-md-0 { + padding-right: 0 !important; + } + + .pb-md-0, + .py-md-0 { + padding-bottom: 0 !important; + } + + .pl-md-0, + .px-md-0 { + padding-left: 0 !important; + } + + .p-md-1 { + padding: 0.25rem !important; + } + + .pt-md-1, + .py-md-1 { + padding-top: 0.25rem !important; + } + + .pr-md-1, + .px-md-1 { + padding-right: 0.25rem !important; + } + + .pb-md-1, + .py-md-1 { + padding-bottom: 0.25rem !important; + } + + .pl-md-1, + .px-md-1 { + padding-left: 0.25rem !important; + } + + .p-md-2 { + padding: 0.5rem !important; + } + + .pt-md-2, + .py-md-2 { + padding-top: 0.5rem !important; + } + + .pr-md-2, + .px-md-2 { + padding-right: 0.5rem !important; + } + + .pb-md-2, + .py-md-2 { + padding-bottom: 0.5rem !important; + } + + .pl-md-2, + .px-md-2 { + padding-left: 0.5rem !important; + } + + .p-md-3 { + padding: 1rem !important; + } + + .pt-md-3, + .py-md-3 { + padding-top: 1rem !important; + } + + .pr-md-3, + .px-md-3 { + padding-right: 1rem !important; + } + + .pb-md-3, + .py-md-3 { + padding-bottom: 1rem !important; + } + + .pl-md-3, + .px-md-3 { + padding-left: 1rem !important; + } + + .p-md-4 { + padding: 1.5rem !important; + } + + .pt-md-4, + .py-md-4 { + padding-top: 1.5rem !important; + } + + .pr-md-4, + .px-md-4 { + padding-right: 1.5rem !important; + } + + .pb-md-4, + .py-md-4 { + padding-bottom: 1.5rem !important; + } + + .pl-md-4, + .px-md-4 { + padding-left: 1.5rem !important; + } + + .p-md-5 { + padding: 3rem !important; + } + + .pt-md-5, + .py-md-5 { + padding-top: 3rem !important; + } + + .pr-md-5, + .px-md-5 { + padding-right: 3rem !important; + } + + .pb-md-5, + .py-md-5 { + padding-bottom: 3rem !important; + } + + .pl-md-5, + .px-md-5 { + padding-left: 3rem !important; + } + + .m-md-auto { + margin: auto !important; + } + + .mt-md-auto, + .my-md-auto { + margin-top: auto !important; + } + + .mr-md-auto, + .mx-md-auto { + margin-right: auto !important; + } + + .mb-md-auto, + .my-md-auto { + margin-bottom: auto !important; + } + + .ml-md-auto, + .mx-md-auto { + margin-left: auto !important; + } +} +@media (min-width: 992px) { + .m-lg-0 { + margin: 0 !important; + } + + .mt-lg-0, + .my-lg-0 { + margin-top: 0 !important; + } + + .mr-lg-0, + .mx-lg-0 { + margin-right: 0 !important; + } + + .mb-lg-0, + .my-lg-0 { + margin-bottom: 0 !important; + } + + .ml-lg-0, + .mx-lg-0 { + margin-left: 0 !important; + } + + .m-lg-1 { + margin: 0.25rem !important; + } + + .mt-lg-1, + .my-lg-1 { + margin-top: 0.25rem !important; + } + + .mr-lg-1, + .mx-lg-1 { + margin-right: 0.25rem !important; + } + + .mb-lg-1, + .my-lg-1 { + margin-bottom: 0.25rem !important; + } + + .ml-lg-1, + .mx-lg-1 { + margin-left: 0.25rem !important; + } + + .m-lg-2 { + margin: 0.5rem !important; + } + + .mt-lg-2, + .my-lg-2 { + margin-top: 0.5rem !important; + } + + .mr-lg-2, + .mx-lg-2 { + margin-right: 0.5rem !important; + } + + .mb-lg-2, + .my-lg-2 { + margin-bottom: 0.5rem !important; + } + + .ml-lg-2, + .mx-lg-2 { + margin-left: 0.5rem !important; + } + + .m-lg-3 { + margin: 1rem !important; + } + + .mt-lg-3, + .my-lg-3 { + margin-top: 1rem !important; + } + + .mr-lg-3, + .mx-lg-3 { + margin-right: 1rem !important; + } + + .mb-lg-3, + .my-lg-3 { + margin-bottom: 1rem !important; + } + + .ml-lg-3, + .mx-lg-3 { + margin-left: 1rem !important; + } + + .m-lg-4 { + margin: 1.5rem !important; + } + + .mt-lg-4, + .my-lg-4 { + margin-top: 1.5rem !important; + } + + .mr-lg-4, + .mx-lg-4 { + margin-right: 1.5rem !important; + } + + .mb-lg-4, + .my-lg-4 { + margin-bottom: 1.5rem !important; + } + + .ml-lg-4, + .mx-lg-4 { + margin-left: 1.5rem !important; + } + + .m-lg-5 { + margin: 3rem !important; + } + + .mt-lg-5, + .my-lg-5 { + margin-top: 3rem !important; + } + + .mr-lg-5, + .mx-lg-5 { + margin-right: 3rem !important; + } + + .mb-lg-5, + .my-lg-5 { + margin-bottom: 3rem !important; + } + + .ml-lg-5, + .mx-lg-5 { + margin-left: 3rem !important; + } + + .p-lg-0 { + padding: 0 !important; + } + + .pt-lg-0, + .py-lg-0 { + padding-top: 0 !important; + } + + .pr-lg-0, + .px-lg-0 { + padding-right: 0 !important; + } + + .pb-lg-0, + .py-lg-0 { + padding-bottom: 0 !important; + } + + .pl-lg-0, + .px-lg-0 { + padding-left: 0 !important; + } + + .p-lg-1 { + padding: 0.25rem !important; + } + + .pt-lg-1, + .py-lg-1 { + padding-top: 0.25rem !important; + } + + .pr-lg-1, + .px-lg-1 { + padding-right: 0.25rem !important; + } + + .pb-lg-1, + .py-lg-1 { + padding-bottom: 0.25rem !important; + } + + .pl-lg-1, + .px-lg-1 { + padding-left: 0.25rem !important; + } + + .p-lg-2 { + padding: 0.5rem !important; + } + + .pt-lg-2, + .py-lg-2 { + padding-top: 0.5rem !important; + } + + .pr-lg-2, + .px-lg-2 { + padding-right: 0.5rem !important; + } + + .pb-lg-2, + .py-lg-2 { + padding-bottom: 0.5rem !important; + } + + .pl-lg-2, + .px-lg-2 { + padding-left: 0.5rem !important; + } + + .p-lg-3 { + padding: 1rem !important; + } + + .pt-lg-3, + .py-lg-3 { + padding-top: 1rem !important; + } + + .pr-lg-3, + .px-lg-3 { + padding-right: 1rem !important; + } + + .pb-lg-3, + .py-lg-3 { + padding-bottom: 1rem !important; + } + + .pl-lg-3, + .px-lg-3 { + padding-left: 1rem !important; + } + + .p-lg-4 { + padding: 1.5rem !important; + } + + .pt-lg-4, + .py-lg-4 { + padding-top: 1.5rem !important; + } + + .pr-lg-4, + .px-lg-4 { + padding-right: 1.5rem !important; + } + + .pb-lg-4, + .py-lg-4 { + padding-bottom: 1.5rem !important; + } + + .pl-lg-4, + .px-lg-4 { + padding-left: 1.5rem !important; + } + + .p-lg-5 { + padding: 3rem !important; + } + + .pt-lg-5, + .py-lg-5 { + padding-top: 3rem !important; + } + + .pr-lg-5, + .px-lg-5 { + padding-right: 3rem !important; + } + + .pb-lg-5, + .py-lg-5 { + padding-bottom: 3rem !important; + } + + .pl-lg-5, + .px-lg-5 { + padding-left: 3rem !important; + } + + .m-lg-auto { + margin: auto !important; + } + + .mt-lg-auto, + .my-lg-auto { + margin-top: auto !important; + } + + .mr-lg-auto, + .mx-lg-auto { + margin-right: auto !important; + } + + .mb-lg-auto, + .my-lg-auto { + margin-bottom: auto !important; + } + + .ml-lg-auto, + .mx-lg-auto { + margin-left: auto !important; + } +} +@media (min-width: 1200px) { + .m-xl-0 { + margin: 0 !important; + } + + .mt-xl-0, + .my-xl-0 { + margin-top: 0 !important; + } + + .mr-xl-0, + .mx-xl-0 { + margin-right: 0 !important; + } + + .mb-xl-0, + .my-xl-0 { + margin-bottom: 0 !important; + } + + .ml-xl-0, + .mx-xl-0 { + margin-left: 0 !important; + } + + .m-xl-1 { + margin: 0.25rem !important; + } + + .mt-xl-1, + .my-xl-1 { + margin-top: 0.25rem !important; + } + + .mr-xl-1, + .mx-xl-1 { + margin-right: 0.25rem !important; + } + + .mb-xl-1, + .my-xl-1 { + margin-bottom: 0.25rem !important; + } + + .ml-xl-1, + .mx-xl-1 { + margin-left: 0.25rem !important; + } + + .m-xl-2 { + margin: 0.5rem !important; + } + + .mt-xl-2, + .my-xl-2 { + margin-top: 0.5rem !important; + } + + .mr-xl-2, + .mx-xl-2 { + margin-right: 0.5rem !important; + } + + .mb-xl-2, + .my-xl-2 { + margin-bottom: 0.5rem !important; + } + + .ml-xl-2, + .mx-xl-2 { + margin-left: 0.5rem !important; + } + + .m-xl-3 { + margin: 1rem !important; + } + + .mt-xl-3, + .my-xl-3 { + margin-top: 1rem !important; + } + + .mr-xl-3, + .mx-xl-3 { + margin-right: 1rem !important; + } + + .mb-xl-3, + .my-xl-3 { + margin-bottom: 1rem !important; + } + + .ml-xl-3, + .mx-xl-3 { + margin-left: 1rem !important; + } + + .m-xl-4 { + margin: 1.5rem !important; + } + + .mt-xl-4, + .my-xl-4 { + margin-top: 1.5rem !important; + } + + .mr-xl-4, + .mx-xl-4 { + margin-right: 1.5rem !important; + } + + .mb-xl-4, + .my-xl-4 { + margin-bottom: 1.5rem !important; + } + + .ml-xl-4, + .mx-xl-4 { + margin-left: 1.5rem !important; + } + + .m-xl-5 { + margin: 3rem !important; + } + + .mt-xl-5, + .my-xl-5 { + margin-top: 3rem !important; + } + + .mr-xl-5, + .mx-xl-5 { + margin-right: 3rem !important; + } + + .mb-xl-5, + .my-xl-5 { + margin-bottom: 3rem !important; + } + + .ml-xl-5, + .mx-xl-5 { + margin-left: 3rem !important; + } + + .p-xl-0 { + padding: 0 !important; + } + + .pt-xl-0, + .py-xl-0 { + padding-top: 0 !important; + } + + .pr-xl-0, + .px-xl-0 { + padding-right: 0 !important; + } + + .pb-xl-0, + .py-xl-0 { + padding-bottom: 0 !important; + } + + .pl-xl-0, + .px-xl-0 { + padding-left: 0 !important; + } + + .p-xl-1 { + padding: 0.25rem !important; + } + + .pt-xl-1, + .py-xl-1 { + padding-top: 0.25rem !important; + } + + .pr-xl-1, + .px-xl-1 { + padding-right: 0.25rem !important; + } + + .pb-xl-1, + .py-xl-1 { + padding-bottom: 0.25rem !important; + } + + .pl-xl-1, + .px-xl-1 { + padding-left: 0.25rem !important; + } + + .p-xl-2 { + padding: 0.5rem !important; + } + + .pt-xl-2, + .py-xl-2 { + padding-top: 0.5rem !important; + } + + .pr-xl-2, + .px-xl-2 { + padding-right: 0.5rem !important; + } + + .pb-xl-2, + .py-xl-2 { + padding-bottom: 0.5rem !important; + } + + .pl-xl-2, + .px-xl-2 { + padding-left: 0.5rem !important; + } + + .p-xl-3 { + padding: 1rem !important; + } + + .pt-xl-3, + .py-xl-3 { + padding-top: 1rem !important; + } + + .pr-xl-3, + .px-xl-3 { + padding-right: 1rem !important; + } + + .pb-xl-3, + .py-xl-3 { + padding-bottom: 1rem !important; + } + + .pl-xl-3, + .px-xl-3 { + padding-left: 1rem !important; + } + + .p-xl-4 { + padding: 1.5rem !important; + } + + .pt-xl-4, + .py-xl-4 { + padding-top: 1.5rem !important; + } + + .pr-xl-4, + .px-xl-4 { + padding-right: 1.5rem !important; + } + + .pb-xl-4, + .py-xl-4 { + padding-bottom: 1.5rem !important; + } + + .pl-xl-4, + .px-xl-4 { + padding-left: 1.5rem !important; + } + + .p-xl-5 { + padding: 3rem !important; + } + + .pt-xl-5, + .py-xl-5 { + padding-top: 3rem !important; + } + + .pr-xl-5, + .px-xl-5 { + padding-right: 3rem !important; + } + + .pb-xl-5, + .py-xl-5 { + padding-bottom: 3rem !important; + } + + .pl-xl-5, + .px-xl-5 { + padding-left: 3rem !important; + } + + .m-xl-auto { + margin: auto !important; + } + + .mt-xl-auto, + .my-xl-auto { + margin-top: auto !important; + } + + .mr-xl-auto, + .mx-xl-auto { + margin-right: auto !important; + } + + .mb-xl-auto, + .my-xl-auto { + margin-bottom: auto !important; + } + + .ml-xl-auto, + .mx-xl-auto { + margin-left: auto !important; + } +} +.text-justify { + text-align: justify !important; +} + +.text-nowrap { + white-space: nowrap !important; +} + +.text-truncate { + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.text-left { + text-align: left !important; +} + +.text-right { + text-align: right !important; +} + +.text-center { + text-align: center !important; +} + +@media (min-width: 576px) { + .text-sm-left { + text-align: left !important; + } + + .text-sm-right { + text-align: right !important; + } + + .text-sm-center { + text-align: center !important; + } +} +@media (min-width: 768px) { + .text-md-left { + text-align: left !important; + } + + .text-md-right { + text-align: right !important; + } + + .text-md-center { + text-align: center !important; + } +} +@media (min-width: 992px) { + .text-lg-left { + text-align: left !important; + } + + .text-lg-right { + text-align: right !important; + } + + .text-lg-center { + text-align: center !important; + } +} +@media (min-width: 1200px) { + .text-xl-left { + text-align: left !important; + } + + .text-xl-right { + text-align: right !important; + } + + .text-xl-center { + text-align: center !important; + } +} +.text-lowercase { + text-transform: lowercase !important; +} + +.text-uppercase { + text-transform: uppercase !important; +} + +.text-capitalize { + text-transform: capitalize !important; +} + +.font-weight-light { + font-weight: 300 !important; +} + +.font-weight-normal { + font-weight: 400 !important; +} + +.font-weight-bold { + font-weight: 700 !important; +} + +.font-italic { + font-style: italic !important; +} + +.text-white { + color: #fff !important; +} + +.text-primary { + color: #007bff !important; +} + +a.text-primary:hover, a.text-primary:focus { + color: #0062cc !important; +} + +.text-secondary { + color: #6c757d !important; +} + +a.text-secondary:hover, a.text-secondary:focus { + color: #545b62 !important; +} + +.text-success { + color: #28a745 !important; +} + +a.text-success:hover, a.text-success:focus { + color: #1e7e34 !important; +} + +.text-info { + color: #17a2b8 !important; +} + +a.text-info:hover, a.text-info:focus { + color: #117a8b !important; +} + +.text-warning { + color: #ffc107 !important; +} + +a.text-warning:hover, a.text-warning:focus { + color: #d39e00 !important; +} + +.text-danger { + color: #dc3545 !important; +} + +a.text-danger:hover, a.text-danger:focus { + color: #bd2130 !important; +} + +.text-light { + color: #f8f9fa !important; +} + +a.text-light:hover, a.text-light:focus { + color: #dae0e5 !important; +} + +.text-dark { + color: #343a40 !important; +} + +a.text-dark:hover, a.text-dark:focus { + color: #1d2124 !important; +} + +.text-muted { + color: #6c757d !important; +} + +.text-hide { + font: 0/0 a; + color: transparent; + text-shadow: none; + background-color: transparent; + border: 0; +} + +.visible { + visibility: visible !important; +} + +.invisible { + visibility: hidden !important; +} + +@media print { + *, + *::before, + *::after { + text-shadow: none !important; + -webkit-box-shadow: none !important; + box-shadow: none !important; + } + + a:not(.btn) { + text-decoration: underline; + } + + abbr[title]::after { + content: " (" attr(title) ")"; + } + + pre { + white-space: pre-wrap !important; + } + + pre, + blockquote { + border: 1px solid #999; + page-break-inside: avoid; + } + + thead { + display: table-header-group; + } + + tr, + img { + page-break-inside: avoid; + } + + p, + h2, + h3 { + orphans: 3; + widows: 3; + } + + h2, + h3 { + page-break-after: avoid; + } + + @page { + size: a3; + } + body { + min-width: 992px !important; + } + + .container { + min-width: 992px !important; + } + + .navbar { + display: none; + } + + .badge { + border: 1px solid #000; + } + + .table { + border-collapse: collapse !important; + } + .table td, + .table th { + background-color: #fff !important; + } + + .table-bordered th, + .table-bordered td { + border: 1px solid #ddd !important; + } +} +/*Github syntax highlighting theme via Rouge*/ +.highlight table td { + padding: 5px; +} + +.highlight table pre { + margin: 0; +} + +.highlight .cm { + color: #999988; + font-style: italic; +} + +.highlight .cp { + color: #999999; + font-weight: bold; +} + +.highlight .c1 { + color: #999988; + font-style: italic; +} + +.highlight .cs { + color: #999999; + font-weight: bold; + font-style: italic; +} + +.highlight .c, .highlight .cd { + color: #999988; + font-style: italic; +} + +.highlight .err { + color: #a61717; + background-color: #e3d2d2; +} + +.highlight .gd { + color: #000000; + background-color: #ffdddd; +} + +.highlight .ge { + color: #000000; + font-style: italic; +} + +.highlight .gr { + color: #aa0000; +} + +.highlight .gh { + color: #999999; +} + +.highlight .gi { + color: #000000; + background-color: #ddffdd; +} + +.highlight .go { + color: #888888; +} + +.highlight .gp { + color: #555555; +} + +.highlight .gs { + font-weight: bold; +} + +.highlight .gu { + color: #aaaaaa; +} + +.highlight .gt { + color: #aa0000; +} + +.highlight .kc { + color: #000000; + font-weight: bold; +} + +.highlight .kd { + color: #000000; + font-weight: bold; +} + +.highlight .kn { + color: #000000; + font-weight: bold; +} + +.highlight .kp { + color: #000000; + font-weight: bold; +} + +.highlight .kr { + color: #000000; + font-weight: bold; +} + +.highlight .kt { + color: #445588; + font-weight: bold; +} + +.highlight .k, .highlight .kv { + color: #000000; + font-weight: bold; +} + +.highlight .mf { + color: #009999; +} + +.highlight .mh { + color: #009999; +} + +.highlight .il { + color: #009999; +} + +.highlight .mi { + color: #009999; +} + +.highlight .mo { + color: #009999; +} + +.highlight .m, .highlight .mb, .highlight .mx { + color: #009999; +} + +.highlight .sb { + color: #d14; +} + +.highlight .sc { + color: #d14; +} + +.highlight .sd { + color: #d14; +} + +.highlight .s2 { + color: #d14; +} + +.highlight .se { + color: #d14; +} + +.highlight .sh { + color: #d14; +} + +.highlight .si { + color: #d14; +} + +.highlight .sx { + color: #d14; +} + +.highlight .sr { + color: #009926; +} + +.highlight .s1 { + color: #d14; +} + +.highlight .ss { + color: #990073; +} + +.highlight .s { + color: #d14; +} + +.highlight .na { + color: #008080; +} + +.highlight .bp { + color: #525252; +} + +.highlight .nb { + color: #0086B3; +} + +.highlight .nc { + color: #445588; + font-weight: bold; +} + +.highlight .no { + color: #008080; +} + +.highlight .nd { + color: #3c5d5d; + font-weight: bold; +} + +.highlight .ni { + color: #800080; +} + +.highlight .ne { + color: #990000; + font-weight: bold; +} + +.highlight .nf { + color: #990000; + font-weight: bold; +} + +.highlight .nl { + color: #990000; + font-weight: bold; +} + +.highlight .nn { + color: #555555; +} + +.highlight .nt { + color: #000080; +} + +.highlight .vc { + color: #008080; +} + +.highlight .vg { + color: #008080; +} + +.highlight .vi { + color: #008080; +} + +.highlight .nv { + color: #008080; +} + +.highlight .ow { + color: #000000; + font-weight: bold; +} + +.highlight .o { + color: #000000; + font-weight: bold; +} + +.highlight .n { + color: #000000; + font-weight: bold; +} + +.highlight .p { + color: #000000; + font-weight: bold; +} + +.highlight .w { + color: #bbbbbb; +} + +.highlight { + background-color: #f8f8f8; +} + +@font-face { + font-family: FreightSans; + font-weight: 700; + font-style: normal; + src: url("../fonts/FreightSans/freight-sans-bold.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-bold.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 700; + font-style: italic; + src: url("../fonts/FreightSans/freight-sans-bold-italic.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-bold-italic.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 500; + font-style: normal; + src: url("../fonts/FreightSans/freight-sans-medium.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-medium.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 500; + font-style: italic; + src: url("../fonts/FreightSans/freight-sans-medium-italic.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-medium-italic.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 100; + font-style: normal; + src: url("../fonts/FreightSans/freight-sans-light.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-light.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 100; + font-style: italic; + src: url("../fonts/FreightSans/freight-sans-light-italic.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-light-italic.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 400; + font-style: italic; + src: url("../fonts/FreightSans/freight-sans-book-italic.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-book-italic.woff") format("woff"); +} +@font-face { + font-family: FreightSans; + font-weight: 400; + font-style: normal; + src: url("../fonts/FreightSans/freight-sans-book.woff2") format("woff2"), url("../fonts/FreightSans/freight-sans-book.woff") format("woff"); +} +@font-face { + font-family: IBMPlexMono; + font-weight: 600; + font-style: normal; + unicode-range: u+0020-007f; + src: local("IBMPlexMono-SemiBold"), url("../fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2") format("woff2"), url("../fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff") format("woff"); +} +@font-face { + font-family: IBMPlexMono; + font-weight: 500; + font-style: normal; + unicode-range: u+0020-007f; + src: local("IBMPlexMono-Medium"), url("../fonts/IBMPlexMono/IBMPlexMono-Medium.woff2") format("woff2"), url("../fonts/IBMPlexMono/IBMPlexMono-Medium.woff") format("woff"); +} +@font-face { + font-family: IBMPlexMono; + font-weight: 400; + font-style: normal; + unicode-range: u+0020-007f; + src: local("IBMPlexMono-Regular"), url("../fonts/IBMPlexMono/IBMPlexMono-Regular.woff2") format("woff2"), url("../fonts/IBMPlexMono/IBMPlexMono-Regular.woff") format("woff"); +} +@font-face { + font-family: IBMPlexMono; + font-weight: 300; + font-style: normal; + unicode-range: u+0020-007f; + src: local("IBMPlexMono-Light"), url("../fonts/IBMPlexMono/IBMPlexMono-Light.woff2") format("woff2"), url("../fonts/IBMPlexMono/IBMPlexMono-Light.woff") format("woff"); +} +html { + position: relative; + min-height: 100%; + font-size: 12px; +} +@media screen and (min-width: 768px) { + html { + font-size: 16px; + } +} + +* { + -webkit-box-sizing: border-box; + box-sizing: border-box; +} + +body { + font-family: FreightSans, Helvetica Neue, Helvetica, Arial, sans-serif; +} + +a:link, +a:visited, +a:hover { + text-decoration: none; + color: #e44c2c; +} + +a.with-right-arrow, .btn.with-right-arrow { + padding-right: 1.375rem; + position: relative; + background-image: url("../images/chevron-right-orange.svg"); + background-size: 6px 13px; + background-position: center right 5px; + background-repeat: no-repeat; +} +@media screen and (min-width: 768px) { + a.with-right-arrow, .btn.with-right-arrow { + background-size: 8px 14px; + background-position: center right 12px; + padding-right: 2rem; + } +} + +::-webkit-input-placeholder { + color: #e44c2c; +} + +::-moz-placeholder { + color: #e44c2c; +} + +:-ms-input-placeholder { + color: #e44c2c; +} + +:-moz-placeholder { + color: #e44c2c; +} + +.email-subscribe-form input.email { + color: #e44c2c; + border: none; + border-bottom: 1px solid #939393; + width: 100%; + background-color: transparent; + outline: none; + font-size: 1.125rem; + letter-spacing: 0.25px; + line-height: 2.25rem; +} +.email-subscribe-form input[type="submit"] { + position: absolute; + right: 0; + top: 10px; + height: 15px; + width: 15px; + background-image: url("../images/arrow-right-with-tail.svg"); + background-color: transparent; + background-repeat: no-repeat; + background-size: 15px 15px; + background-position: center center; + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; + border: 0; +} + +.email-subscribe-form-fields-wrapper { + position: relative; +} + +.anchorjs-link { + color: #6c6c6d !important; +} +@media screen and (min-width: 768px) { + .anchorjs-link:hover { + color: inherit; + text-decoration: none !important; + } +} + +.pytorch-article #table-of-contents { + display: none; +} + +code, kbd, pre, samp { + font-family: IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace; +} +code span, kbd span, pre span, samp span { + font-family: IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace; +} + +pre { + padding: 1.125rem; +} +pre code { + font-size: 0.875rem; +} +pre.highlight { + background-color: #f3f4f7; + line-height: 1.3125rem; +} + +code.highlighter-rouge { + color: #6c6c6d; + background-color: #f3f4f7; + padding: 2px 6px; +} + +a:link code.highlighter-rouge, +a:visited code.highlighter-rouge, +a:hover code.highlighter-rouge { + color: #4974D1; +} +a:link.has-code, +a:visited.has-code, +a:hover.has-code { + color: #4974D1; +} + +p code, +h1 code, +h2 code, +h3 code, +h4 code, +h5 code, +h6 code { + font-size: 78.5%; +} + +pre { + white-space: pre-wrap; + white-space: -moz-pre-wrap; + white-space: -pre-wrap; + white-space: -o-pre-wrap; + word-wrap: break-word; +} + +.header-holder { + height: 68px; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + left: 0; + margin-left: auto; + margin-right: auto; + position: fixed; + right: 0; + top: 0; + width: 100%; + z-index: 9999; + background-color: #ffffff; + border-bottom: 1px solid #e2e2e2; +} +@media screen and (min-width: 1200px) { + .header-holder { + height: 90px; + } +} + +.header-container { + position: relative; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; +} +.header-container:before, .header-container:after { + content: ""; + display: table; +} +.header-container:after { + clear: both; +} +.header-container { + *zoom: 1; +} +@media screen and (min-width: 1200px) { + .header-container { + display: block; + } +} + +.header-logo { + height: 23px; + width: 93px; + background-image: url("../images/logo.svg"); + background-repeat: no-repeat; + background-size: 93px 23px; + display: block; + float: left; + z-index: 10; +} +@media screen and (min-width: 1200px) { + .header-logo { + background-size: 108px 27px; + position: absolute; + height: 27px; + width: 108px; + top: 4px; + float: none; + } +} + +.main-menu-open-button { + background-image: url("../images/icon-menu-dots.svg"); + background-position: center center; + background-size: 25px 7px; + background-repeat: no-repeat; + width: 25px; + height: 17px; + position: absolute; + right: 0; + top: 4px; +} +@media screen and (min-width: 1200px) { + .main-menu-open-button { + display: none; + } +} + +.header-holder .main-menu { + display: none; +} +@media screen and (min-width: 1200px) { + .header-holder .main-menu { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + -webkit-box-pack: end; + -ms-flex-pack: end; + justify-content: flex-end; + } +} +.header-holder .main-menu ul { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + margin: 0; +} +.header-holder .main-menu ul li { + display: inline-block; + margin-right: 34px; + position: relative; + color: #262626; +} + +.header-holder .main-menu ul li a:hover { + color: #262626; +} + +.header-holder .main-menu ul li.active:after { + content: "•"; + bottom: -24px; + color: #262626; + font-size: 1.375rem; + left: 0; + position: absolute; + right: 0; + text-align: center; +} + + +.header-holder .main-menu ul li.active a { + color: #262626; +} +.header-holder .main-menu ul li.docs-active:after { + content: "•"; + bottom: -24px; + color: #e44c2c; + font-size: 1.375rem; + left: -24px; + position: absolute; + right: 0; + text-align: center; +} +.header-holder .main-menu ul li:last-of-type { + margin-right: 0; +} +.header-holder .main-menu ul li a { + color: #ffffff; + font-size: 1.2rem; + letter-spacing: 0; + line-height: 2.125rem; + text-align: center; + text-decoration: none; +} +@media screen and (min-width: 1200px) { + .header-holder .main-menu ul li a:hover { + color: #e44c2c; + } +} + +.mobile-main-menu { + display: none; +} +.mobile-main-menu.open { + background-color: #262626; + display: block; + height: 100%; + left: 0; + margin-left: auto; + margin-right: auto; + min-height: 100%; + position: fixed; + right: 0; + top: 0; + width: 100%; + z-index: 99999; +} + +.mobile-main-menu .container-fluid { + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + height: 68px; + position: relative; +} +.mobile-main-menu .container-fluid:before, .mobile-main-menu .container-fluid:after { + content: ""; + display: table; +} +.mobile-main-menu .container-fluid:after { + clear: both; +} +.mobile-main-menu .container-fluid { + *zoom: 1; +} + +.mobile-main-menu.open ul { + list-style-type: none; + padding: 0; +} +.mobile-main-menu.open ul li a, .mobile-main-menu.open .resources-mobile-menu-title { + font-size: 2rem; + color: #ffffff; + letter-spacing: 0; + line-height: 4rem; + text-decoration: none; +} +.mobile-main-menu.open ul li.active a { + color: #e44c2c; +} + +.main-menu-close-button { + background-image: url("../images/icon-close.svg"); + background-position: center center; + background-repeat: no-repeat; + background-size: 24px 24px; + height: 24px; + position: absolute; + right: 0; + width: 24px; + top: -4px; +} + +.mobile-main-menu-header-container { + position: relative; +} + +.mobile-main-menu-links-container { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + padding-left: 2.8125rem; + height: 90vh; + margin-top: -25px; + padding-top: 50%; + overflow-y: scroll; +} +.mobile-main-menu-links-container .main-menu { + height: 100vh; +} + +.mobile-main-menu-links-container ul.resources-mobile-menu-items li { + padding-left: 15px; +} + +.mobile-main-menu-links-container ul.resources-mobile-menu-items li a { + font-size: 18px; + line-height: 34px; +} + +.site-footer { + padding: 2.5rem 0; + width: 100%; + background: #000000; + background-size: 100%; + margin-left: 0; + margin-right: 0; + position: relative; + z-index: 201; +} +@media screen and (min-width: 768px) { + .site-footer { + padding: 5rem 0; + } +} + +.site-footer p { + color: #ffffff; +} +.site-footer ul { + list-style-type: none; + padding-left: 0; + margin-bottom: 0; +} + +.site-footer ul li { + font-size: 1.125rem; + line-height: 2rem; + color: #A0A0A1; + padding-bottom: 0.375rem; +} +.site-footer ul li.list-title { + padding-bottom: 0.75rem; + color: #ffffff; +} +.site-footer a:link, +.site-footer a:visited { + color: inherit; +} +@media screen and (min-width: 768px) { + .site-footer a:hover { + color: #e44c2c; + } +} + +.site-footer .privacy-policy { + background: #000000; + display: flex; + border-bottom: 1px solid white; + padding-bottom: 10px; +} + +.site-footer .privacy-policy-links { + background: #000000; + display: flex; + padding-top: 1rem; + padding-right: 1rem; + display: inline-flex; + color: white; +} + +.site-footer .footer-links-wrapper { + display: flex; + flex-wrap: wrap; + border-bottom: 1px solid white; + padding-bottom: 1rem; +} + +.site-footer .copyright { + padding-top: 1rem; + padding-right: 1rem; + display: inline-flex; + color: white; +} + +.site-footer .copyright p { + color: white; +} + +.site-footer .copyright a { + color: red; +} + +.docs-tutorials-resources { + background-color: #262626; + color: #ffffff; + padding-top: 2.5rem; + padding-bottom: 2.5rem; + position: relative; + z-index: 201; +} +@media screen and (min-width: 768px) { + .docs-tutorials-resources { + padding-top: 5rem; + padding-bottom: 5rem; + } +} +.docs-tutorials-resources p { + color: #929292; + font-size: 1.125rem; +} +.docs-tutorials-resources h2 { + font-size: 1.5rem; + letter-spacing: -0.25px; + text-transform: none; + margin-bottom: 0.25rem; +} +@media screen and (min-width: 768px) { + .docs-tutorials-resources h2 { + margin-bottom: 1.25rem; + } +} +.docs-tutorials-resources .col-md-4 { + margin-bottom: 2rem; + text-align: center; +} +@media screen and (min-width: 768px) { + .docs-tutorials-resources .col-md-4 { + margin-bottom: 0; + } +} +.docs-tutorials-resources .with-right-arrow { + margin-left: 12px; +} +.docs-tutorials-resources .with-right-arrow:hover { + background-image: url("../images/chevron-right-white.svg"); +} +.docs-tutorials-resources p { + font-size: 1rem; + line-height: 1.5rem; + letter-spacing: 0.22px; + color: #939393; + margin-bottom: 0; +} +@media screen and (min-width: 768px) { + .docs-tutorials-resources p { + margin-bottom: 1.25rem; + } +} +.docs-tutorials-resources a { + font-size: 1.125rem; + color: #e44c2c; +} +.docs-tutorials-resources a:hover { + color: #ffffff; +} + +.footer-container { + position: relative; +} + +@media screen and (min-width: 768px) { + .footer-logo-wrapper { + position: absolute; + top: 0; + left: 30px; + } +} + +.footer-logo { + background-image: url("../images/logo-icon.svg"); + background-position: center; + background-repeat: no-repeat; + background-size: 20px 24px; + display: block; + height: 24px; + margin-bottom: 2.8125rem; + width: 20px; +} +@media screen and (min-width: 768px) { + .footer-logo { + background-size: 29px 36px; + height: 36px; + margin-bottom: 0; + margin-bottom: 0; + width: 29px; + } +} + +.footer-links-wrapper { + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -ms-flex-wrap: wrap; + flex-wrap: wrap; +} +@media screen and (min-width: 768px) { + .footer-links-wrapper { + -ms-flex-wrap: initial; + flex-wrap: initial; + -webkit-box-pack: end; + -ms-flex-pack: end; + justify-content: flex-end; + } +} + +.footer-links-col { + margin-bottom: 3.75rem; + width: 50%; +} +@media screen and (min-width: 768px) { + .footer-links-col { + margin-bottom: 0; + width: 14%; + margin-right: 23px; + } + .footer-links-col.follow-us-col { + width: 18%; + margin-right: 0; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + .footer-links-col { + width: 18%; + margin-right: 30px; + } +} + +.footer-social-icons { + margin: 8.5625rem 0 2.5rem 0; +} +.footer-social-icons a { + height: 32px; + width: 32px; + display: inline-block; + background-color: #CCCDD1; + border-radius: 50%; + margin-right: 5px; +} +.footer-social-icons a.facebook { + background-image: url("../images/logo-facebook-dark.svg"); + background-position: center center; + background-size: 9px 18px; + background-repeat: no-repeat; +} +.footer-social-icons a.twitter { + background-image: url("../images/logo-twitter-dark.svg"); + background-position: center center; + background-size: 17px 17px; + background-repeat: no-repeat; +} +.footer-social-icons a.youtube { + background-image: url("../images/logo-youtube-dark.svg"); + background-position: center center; + background-repeat: no-repeat; +} + +.site-footer .mc-field-group { + margin-top: -2px; +} + +article.pytorch-article { + max-width: 920px; + margin: 0 auto; +} +article.pytorch-article h2, +article.pytorch-article h3, +article.pytorch-article h4, +article.pytorch-article h5, +article.pytorch-article h6 { + margin: 1.375rem 0; + color: #262626; +} +article.pytorch-article h2 { + font-size: 1.625rem; + letter-spacing: 1.33px; + line-height: 2rem; + text-transform: none; +} +article.pytorch-article h3 { + font-size: 1.5rem; + letter-spacing: -0.25px; + line-height: 1.875rem; + text-transform: none; +} +article.pytorch-article h4, +article.pytorch-article h5, +article.pytorch-article h6 { + font-size: 1.125rem; + letter-spacing: -0.19px; + line-height: 1.875rem; +} +article.pytorch-article p { + margin-bottom: 1.125rem; +} +article.pytorch-article p, +article.pytorch-article ul li, +article.pytorch-article ol li, +article.pytorch-article dl dt, +article.pytorch-article dl dd, +article.pytorch-article blockquote { + font-size: 1rem; + color: #262626; + letter-spacing: 0.01px; + font-weight: 500; +} +article.pytorch-article table { + margin-bottom: 2.5rem; + width: 100%; +} +article.pytorch-article table thead { + border-bottom: 1px solid #cacaca; +} +article.pytorch-article table th { + padding: 0.625rem; + color: #262626; +} +article.pytorch-article table td { + padding: 0.3125rem; +} +article.pytorch-article table tr th:first-of-type, +article.pytorch-article table tr td:first-of-type { + padding-left: 0; +} +article.pytorch-article table.docutils.field-list th.field-name { + padding: 0.3125rem; + padding-left: 0; +} +article.pytorch-article table.docutils.field-list td.field-body { + padding: 0.3125rem; +} +article.pytorch-article table.docutils.field-list td.field-body p:last-of-type { + margin-bottom: 0; +} +article.pytorch-article ul, +article.pytorch-article ol { + margin: 1.5rem 0 3.125rem 0; +} +@media screen and (min-width: 768px) { + article.pytorch-article ul, + article.pytorch-article ol { + padding-left: 6.25rem; + } +} +article.pytorch-article ul li, +article.pytorch-article ol li { + margin-bottom: 0.625rem; +} +article.pytorch-article dl { + margin-bottom: 1.5rem; +} +article.pytorch-article dl dt { + margin-bottom: 0.75rem; +} +article.pytorch-article pre { + margin-bottom: 2.5rem; +} +article.pytorch-article hr { + margin-top: 4.6875rem; + margin-bottom: 4.6875rem; +} +article.pytorch-article blockquote { + margin: 0 auto; + margin-bottom: 2.5rem; + width: 65%; +} +article.pytorch-article img { + width: 100%; +} + +html { + height: 100%; +} +@media screen and (min-width: 768px) { + html { + font-size: 16px; + } +} + +body { + background: #ffffff; + height: 100%; + margin: 0; +} +body.no-scroll { + height: 100%; + overflow: hidden; +} + +p { + margin-top: 0; + margin-bottom: 1.125rem; +} +p a:link, +p a:visited, +p a:hover { + color: #e44c2c; + text-decoration: none; +} +@media screen and (min-width: 768px) { + p a:hover { + text-decoration: underline; + } +} +p a:link, +p a:visited, +p a:hover { + color: #ee4c2c; +} + +.wy-breadcrumbs li a { + color: #ee4c2c; +} + +ul.pytorch-breadcrumbs { + padding-left: 0; + list-style-type: none; +} +ul.pytorch-breadcrumbs li { + display: inline-block; + font-size: 0.875rem; +} +ul.pytorch-breadcrumbs a { + color: #ee4c2c; + text-decoration: none; +} + +.table-of-contents-link-wrapper { + display: block; + margin-top: 0; + padding: 1.25rem 1.875rem; + background-color: #f3f4f7; + position: relative; + color: #262626; + font-size: 1.25rem; +} +.table-of-contents-link-wrapper.is-open .toggle-table-of-contents { + -webkit-transform: rotate(180deg); + transform: rotate(180deg); +} +@media screen and (min-width: 1200px) { + .table-of-contents-link-wrapper { + display: none; + } +} + +.toggle-table-of-contents { + background-image: url("../images/chevron-down-grey.svg"); + background-position: center center; + background-repeat: no-repeat; + background-size: 18px 18px; + height: 100%; + position: absolute; + right: 21px; + width: 30px; + top: 0; +} + +.tutorials-header .header-logo { + background-image: url("../images/logo-dark.svg"); +} +.tutorials-header .main-menu ul li a { + color: #262626; +} + +.tutorials-header .main-menu ul li a:hover { + color: #262626; +} + +.tutorials-header .main-menu .no-dropdown { + display: block; +} + +.tutorials-header .main-menu .no-dropdown a { + color: #262626; +} + +.tutorials-header .main-menu .no-dropdown a:hover { + color: #262626 !important; + border-bottom: 2px solid #262626; +} + +.tutorials-header .main-menu-open-button { + background-image: url("../images/icon-menu-dots-dark.svg"); +} + +.rst-content footer .rating-hr.hr-top { + margin-bottom: -0.0625rem; +} +.rst-content footer .rating-hr.hr-bottom { + margin-top: -0.0625rem; +} +.rst-content footer .rating-container { + display: -webkit-inline-box; + display: -ms-inline-flexbox; + display: inline-flex; + font-size: 1.125rem; +} +.rst-content footer .rating-container .rating-prompt, .rst-content footer .rating-container .was-helpful-thank-you { + padding: 0.625rem 1.25rem 0.625rem 1.25rem; +} +.rst-content footer .rating-container .was-helpful-thank-you { + display: none; +} +.rst-content footer .rating-container .rating-prompt.yes-link, .rst-content footer .rating-container .rating-prompt.no-link { + color: #e44c2c; + cursor: pointer; +} +.rst-content footer .rating-container .rating-prompt.yes-link:hover, .rst-content footer .rating-container .rating-prompt.no-link:hover { + background-color: #e44c2c; + color: #ffffff; +} +.rst-content footer .rating-container .stars-outer { + display: inline-block; + position: relative; + font-family: FontAwesome; + padding: 0.625rem 1.25rem 0.625rem 1.25rem; +} +.rst-content footer .rating-container .stars-outer i { + cursor: pointer; +} +.rst-content footer .rating-container .stars-outer .star-fill { + color: #ee4c2c; +} +.rst-content footer div[role="contentinfo"] { + padding-top: 2.5rem; +} +.rst-content footer div[role="contentinfo"] p { + margin-bottom: 0; +} + +h1 { + font-size: 2rem; + letter-spacing: 1.78px; + line-height: 2.5rem; + margin: 1.375rem 0; +} + +span.pre { + color: #6c6c6d; + background-color: #f3f4f7; + padding: 2px 0px; +} + +pre { + padding: 1.375rem; +} + +.highlight .c1 { + color: #6c6c6d; +} + +.headerlink { + display: none !important; +} + +a:link.has-code, +a:hover.has-code, +a:visited.has-code { + color: #4974D1; +} +a:link.has-code span, +a:hover.has-code span, +a:visited.has-code span { + color: #4974D1; +} + +article.pytorch-article ul, +article.pytorch-article ol { + padding-left: 1.875rem; + margin: 0; +} +article.pytorch-article ul li, +article.pytorch-article ol li { + margin: 0; + line-height: 1.75rem; +} +article.pytorch-article ul p, +article.pytorch-article ol p { + line-height: 1.75rem; + margin-bottom: 0; +} +article.pytorch-article ul ul, +article.pytorch-article ul ol, +article.pytorch-article ol ul, +article.pytorch-article ol ol { + margin: 0; +} +article.pytorch-article h1 { + font-weight: 600; + word-wrap: break-word; +} +article.pytorch-article h2, +article.pytorch-article h3, +article.pytorch-article h4, +article.pytorch-article h5, +article.pytorch-article h6 { + font-weight: normal; +} +article.pytorch-article h1 a, +article.pytorch-article h2 a, +article.pytorch-article h3 a, +article.pytorch-article h4 a, +article.pytorch-article h5 a, +article.pytorch-article h6 a { + color: #262626; +} +article.pytorch-article p.caption { + margin-top: 1.25rem; +} + +article.pytorch-article .section:first-of-type h1:first-of-type { + margin-top: 0; +} + +article.pytorch-article .sphx-glr-thumbcontainer { + margin: 0; + border: 1px solid #d6d7d8; + border-radius: 0; + width: 45%; + text-align: center; + margin-bottom: 5%; +} +@media screen and (max-width: 1200px) { + article.pytorch-article .sphx-glr-thumbcontainer:nth-child(odd) { + margin-left: 0; + margin-right: 2.5%; + } + article.pytorch-article .sphx-glr-thumbcontainer:nth-child(even) { + margin-right: 0; + margin-left: 2.5%; + } + article.pytorch-article .sphx-glr-thumbcontainer .figure { + width: 40%; + } +} +@media screen and (min-width: 1201px) { + article.pytorch-article .sphx-glr-thumbcontainer { + margin-right: 3%; + margin-bottom: 3%; + width: 30%; + } +} +article.pytorch-article .sphx-glr-thumbcontainer .caption-text a { + font-size: 1rem; + color: #262626; + letter-spacing: 0; + line-height: 1.5rem; + text-decoration: none; +} +article.pytorch-article .sphx-glr-thumbcontainer:hover { + -webkit-box-shadow: none; + box-shadow: none; + border-bottom-color: #ffffff; +} +article.pytorch-article .sphx-glr-thumbcontainer:hover .figure:before { + bottom: 100%; +} +article.pytorch-article .sphx-glr-thumbcontainer .figure { + width: 80%; +} +article.pytorch-article .sphx-glr-thumbcontainer .figure:before { + content: ""; + display: block; + position: absolute; + top: 0; + bottom: 35%; + left: 0; + right: 0; + background: #8A94B3; + opacity: 0.10; +} +article.pytorch-article .sphx-glr-thumbcontainer .figure a.reference.internal { + text-align: left; +} +@media screen and (min-width: 768px) { + article.pytorch-article .sphx-glr-thumbcontainer:after { + content: ""; + display: block; + width: 0; + height: 1px; + position: absolute; + bottom: 0; + left: 0; + background-color: #e44c2c; + -webkit-transition: width .250s ease-in-out; + transition: width .250s ease-in-out; + } + article.pytorch-article .sphx-glr-thumbcontainer:hover:after { + width: 100%; + } +} +@media screen and (min-width: 768px) { + article.pytorch-article .sphx-glr-thumbcontainer:after { + background-color: #ee4c2c; + } +} + +article.pytorch-article .section :not(dt) > code { + color: #262626; + border-top: solid 2px #ffffff; + background-color: #ffffff; + border-bottom: solid 2px #ffffff; + padding: 0px 3px; + -webkit-box-decoration-break: clone; + box-decoration-break: clone; +} +article.pytorch-article .section :not(dt) > code .pre { + outline: 0px; + padding: 0px; +} +article.pytorch-article .function dt, article.pytorch-article .attribute dt, article.pytorch-article .class .attribute dt, article.pytorch-article .class dt { + position: relative; + background: #f3f4f7; + padding: 0.5rem; + border-left: 3px solid #ee4c2c; + word-wrap: break-word; + padding-right: 100px; +} + +article.pytorch-article .class dt.field-odd, article.pytorch-article .class dt.field-even, +article.pytorch-article .function dt.field-odd, article.pytorch-article .function dt.field-even, +article.pytorch-article .attribute dt.field-odd, article.pytorch-article .attribute dt.field-even, +article.pytorch-article .class .attribute dt.field-odd,article.pytorch-article .class .attribute dt.field-even, +article.pytorch-article .class .method dt.field-odd, article.pytorch-article .class .staticmethod dt.field-odd, +article.pytorch-article .class .method dt.field-even, article.pytorch-article .class .staticmethod dt.field-even +{ + background: none; + padding-right: -20px; + border-top: none; + border-left: none; + padding-left: 0.0rem; + padding-top: 0.0rem; + padding-bottom: 0.0rem; + font-weight: 700; +} + +article.pytorch-article .function dt.sig { + position: relative; + background: #f3f4f7; + padding: 0.8rem; + border-left: 3px solid #ee4c2c; + word-wrap: break-word; + padding-right: 100px; + font-weight: 500; +} + +article.pytorch-article .function dt { + background: #ffffff; + padding-right: -20px; + border-left: none; + border-top: none; + padding-left: 0.2rem; + padding-top: 0.0rem; + padding-bottom: 0.0rem; + font-weight: 700; +} + +article.pytorch-article .class dl.py.property dt.sig { + border-left: 3px solid #ee4c2c; + border-top: none; + padding-left: 0.2rem; +} + +article.pytorch-article .function dt em.property, article.pytorch-article .attribute dt em.property, article.pytorch-article .class dt em.property { + font-family: inherit; +} +article.pytorch-article .function dt em, article.pytorch-article .attribute dt em, article.pytorch-article .class .attribute dt em, article.pytorch-article .class dt em, article.pytorch-article .function dt .sig-paren, article.pytorch-article .attribute dt .sig-paren, article.pytorch-article .class dt .sig-paren { + font-family: IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace; + font-size: 87.5%; +} +article.pytorch-article .function dt a, article.pytorch-article .attribute dt a, article.pytorch-article .class .attribute dt a, article.pytorch-article .class dt a { + right: 30px; + padding-right: 0; + top: 50%; + -webkit-transform: perspective(1px) translateY(-50%); + transform: perspective(1px) translateY(-50%); +} +article.pytorch-article .function dt:hover .viewcode-link, article.pytorch-article .attribute dt:hover .viewcode-link, article.pytorch-article .class dt:hover .viewcode-link { + color: #ee4c2c; +} +article.pytorch-article .function .anchorjs-link, article.pytorch-article .attribute .anchorjs-link, article.pytorch-article .class .anchorjs-link { + display: inline; + position: absolute; + right: 8px; + font-size: 1.5625rem !important; + padding-left: 0; +} +article.pytorch-article .function dt > code, article.pytorch-article .attribute dt > code, article.pytorch-article .class .attribute dt > code, article.pytorch-article .class dt > code { + color: #262626; + border-top: solid 2px #f3f4f7; + background-color: #f3f4f7; + border-bottom: solid 2px #f3f4f7; + -webkit-box-decoration-break: clone; + box-decoration-break: clone; +} +article.pytorch-article .function .viewcode-link, article.pytorch-article .attribute .viewcode-link, article.pytorch-article .class .viewcode-link { + padding-left: 0.6rem; + position: absolute; + font-size: 0.875rem; + color: #979797; + letter-spacing: 0; + line-height: 1.5rem; + text-transform: uppercase; +} +article.pytorch-article .function dd, article.pytorch-article .attribute dd, article.pytorch-article .class .attribute dd, article.pytorch-article .class dd { + padding-left: 3.75rem; +} +article.pytorch-article .function dd p, article.pytorch-article .attribute dd p, article.pytorch-article .class .attribute dd p, article.pytorch-article .class dd p { + color: #262626; +} +article.pytorch-article .function table tbody tr th.field-name, article.pytorch-article .attribute table tbody tr th.field-name, article.pytorch-article .class table tbody tr th.field-name { + white-space: nowrap; + color: #262626; + width: 20%; +} +@media screen and (min-width: 768px) { + article.pytorch-article .function table tbody tr th.field-name, article.pytorch-article .attribute table tbody tr th.field-name, article.pytorch-article .class table tbody tr th.field-name { + width: 15%; + } +} +article.pytorch-article .function table tbody tr td.field-body, article.pytorch-article .attribute table tbody tr td.field-body, article.pytorch-article .class table tbody tr td.field-body { + padding: 0.625rem; + width: 80%; + color: #262626; +} +@media screen and (min-width: 768px) { + article.pytorch-article .function table tbody tr td.field-body, article.pytorch-article .attribute table tbody tr td.field-body, article.pytorch-article .class table tbody tr td.field-body { + width: 85%; + } +} +@media screen and (min-width: 1600px) { + article.pytorch-article .function table tbody tr td.field-body, article.pytorch-article .attribute table tbody tr td.field-body, article.pytorch-article .class table tbody tr td.field-body { + padding-left: 1.25rem; + } +} +article.pytorch-article .function table tbody tr td.field-body p, article.pytorch-article .attribute table tbody tr td.field-body p, article.pytorch-article .class table tbody tr td.field-body p { + padding-left: 0px; +} +article.pytorch-article .function table tbody tr td.field-body p:last-of-type, article.pytorch-article .attribute table tbody tr td.field-body p:last-of-type, article.pytorch-article .class table tbody tr td.field-body p:last-of-type { + margin-bottom: 0; +} +article.pytorch-article .function table tbody tr td.field-body ol, article.pytorch-article .attribute table tbody tr td.field-body ol, article.pytorch-article .class table tbody tr td.field-body ol, article.pytorch-article .function table tbody tr td.field-body ul, article.pytorch-article .attribute table tbody tr td.field-body ul, article.pytorch-article .class table tbody tr td.field-body ul { + padding-left: 1rem; + padding-bottom: 0; +} +article.pytorch-article .function table.docutils.field-list, article.pytorch-article .attribute table.docutils.field-list, article.pytorch-article .class table.docutils.field-list { + margin-bottom: 0.75rem; +} +article.pytorch-article .attribute .has-code { + float: none; +} +article.pytorch-article .class dt { + border-left: none; + border-top: 3px solid #ee4c2c; + padding-left: 4em; +} +article.pytorch-article .class dt em.property { + position: absolute; + left: 0.5rem; +} +article.pytorch-article .class dd .docutils dt { + padding-left: 0.5rem; +} +article.pytorch-article .class em.property { + text-transform: uppercase; + font-style: normal; + color: #ee4c2c; + font-size: 1rem; + letter-spacing: 0; + padding-right: 0.75rem; +} +article.pytorch-article .class dl dt em.property { + position: static; + left: 0; + padding-right: 0; +} + +article.pytorch-article .class .method dt, +article.pytorch-article .class .staticmethod dt { + border-left: 3px solid #ee4c2c; + border-top: none; +} + +article.pytorch-article .class .method dt, +article.pytorch-article .class .staticmethod dt { + padding-left: 0.5rem; +} +article.pytorch-article .class .attribute dt { + border-top: none; +} +article.pytorch-article .class .attribute dt em.property { + position: relative; + left: 0; +} +article.pytorch-article table { + table-layout: fixed; +} + +div.deprecated p { + display: inline; +} +div.deprecated, +div.versionchanged { + margin-top: 0.5rem; + padding: 0.5rem; + margin-bottom: 0.5rem; + border: none; +} +div.versionadded { + margin: 1rem 0; +} +div.deprecated p:last-child, +div.versionchanged p:last-child, +div.versionadded p:last-child { + margin-bottom: 0 +} +div.deprecated { + color: #b94a48; + background-color: #fdede9; +} +div.versionchanged { + background-color: #fffbe8; +} + +article.pytorch-article .note, +article.pytorch-article .warning, +article.pytorch-article .tip, +article.pytorch-article .seealso, +article.pytorch-article .hint, +article.pytorch-article .important, +article.pytorch-article .caution, +article.pytorch-article .danger, +article.pytorch-article .attention, +article.pytorch-article .error { + background: #f3f4f7; + margin-top: 1.875rem; + margin-bottom: 1.125rem; +} +article.pytorch-article .note .admonition-title, +article.pytorch-article .warning .admonition-title, +article.pytorch-article .tip .admonition-title, +article.pytorch-article .seealso .admonition-title, +article.pytorch-article .hint .admonition-title, +article.pytorch-article .important .admonition-title, +article.pytorch-article .caution .admonition-title, +article.pytorch-article .danger .admonition-title, +article.pytorch-article .attention .admonition-title, +article.pytorch-article .error .admonition-title { + color: #ffffff; + letter-spacing: 1px; + text-transform: uppercase; + margin-bottom: 1.125rem; + padding: 3px 0 3px 1.375rem; + position: relative; + font-size: 0.875rem; +} +article.pytorch-article .note .admonition-title:before, +article.pytorch-article .warning .admonition-title:before, +article.pytorch-article .tip .admonition-title:before, +article.pytorch-article .seealso .admonition-title:before, +article.pytorch-article .hint .admonition-title:before, +article.pytorch-article .important .admonition-title:before, +article.pytorch-article .caution .admonition-title:before, +article.pytorch-article .danger .admonition-title:before, +article.pytorch-article .attention .admonition-title:before, +article.pytorch-article .error .admonition-title:before { + content: "\2022"; + position: absolute; + left: 9px; + color: #ffffff; + top: 2px; +} +article.pytorch-article .note p:nth-child(n + 2), +article.pytorch-article .warning p:nth-child(n + 2), +article.pytorch-article .tip p:nth-child(n + 2), +article.pytorch-article .seealso p:nth-child(n + 2), +article.pytorch-article .hint p:nth-child(n + 2), +article.pytorch-article .important p:nth-child(n + 2), +article.pytorch-article .caution p:nth-child(n + 2), +article.pytorch-article .danger p:nth-child(n + 2), +article.pytorch-article .attention p:nth-child(n + 2), +article.pytorch-article .error p:nth-child(n + 2) { + padding: 0 1.375rem; +} +article.pytorch-article .note table, +article.pytorch-article .warning table, +article.pytorch-article .tip table, +article.pytorch-article .seealso table, +article.pytorch-article .hint table, +article.pytorch-article .important table, +article.pytorch-article .caution table, +article.pytorch-article .danger table, +article.pytorch-article .attention table, +article.pytorch-article .error table { + margin: 0 2rem; + width: auto; +} +article.pytorch-article .note :not(dt) > code, +article.pytorch-article .warning :not(dt) > code, +article.pytorch-article .tip :not(dt) > code, +article.pytorch-article .seealso :not(dt) > code, +article.pytorch-article .hint :not(dt) > code, +article.pytorch-article .important :not(dt) > code, +article.pytorch-article .caution :not(dt) > code, +article.pytorch-article .danger :not(dt) > code, +article.pytorch-article .attention :not(dt) > code, +article.pytorch-article .error :not(dt) > code { + border-top: solid 2px #ffffff; + background-color: #ffffff; + border-bottom: solid 2px #ffffff; + padding: 0px 3px; + -webkit-box-decoration-break: clone; + box-decoration-break: clone; + outline: 1px solid #e9e9e9; +} +article.pytorch-article .note :not(dt) > code .pre, +article.pytorch-article .warning :not(dt) > code .pre, +article.pytorch-article .tip :not(dt) > code .pre, +article.pytorch-article .seealso :not(dt) > code .pre, +article.pytorch-article .hint :not(dt) > code .pre, +article.pytorch-article .important :not(dt) > code .pre, +article.pytorch-article .caution :not(dt) > code .pre, +article.pytorch-article .danger :not(dt) > code .pre, +article.pytorch-article .attention :not(dt) > code .pre, +article.pytorch-article .error :not(dt) > code .pre { + outline: 0px; + padding: 0px; +} +article.pytorch-article .note pre, +article.pytorch-article .warning pre, +article.pytorch-article .tip pre, +article.pytorch-article .seealso pre, +article.pytorch-article .hint pre, +article.pytorch-article .important pre, +article.pytorch-article .caution pre, +article.pytorch-article .danger pre, +article.pytorch-article .attention pre, +article.pytorch-article .error pre { + margin-bottom: 0; +} +article.pytorch-article .note .highlight, +article.pytorch-article .warning .highlight, +article.pytorch-article .tip .highlight, +article.pytorch-article .seealso .highlight, +article.pytorch-article .hint .highlight, +article.pytorch-article .important .highlight, +article.pytorch-article .caution .highlight, +article.pytorch-article .danger .highlight, +article.pytorch-article .attention .highlight, +article.pytorch-article .error .highlight { + margin: 0 2rem 1.125rem 2rem; +} +article.pytorch-article .note ul, +article.pytorch-article .note ol, +article.pytorch-article .warning ul, +article.pytorch-article .warning ol, +article.pytorch-article .tip ul, +article.pytorch-article .tip ol, +article.pytorch-article .seealso ul, +article.pytorch-article .seealso ol, +article.pytorch-article .hint ul, +article.pytorch-article .hint ol, +article.pytorch-article .important ul, +article.pytorch-article .important ol, +article.pytorch-article .caution ul, +article.pytorch-article .caution ol, +article.pytorch-article .danger ul, +article.pytorch-article .danger ol, +article.pytorch-article .attention ul, +article.pytorch-article .attention ol, +article.pytorch-article .error ul, +article.pytorch-article .error ol { + padding-left: 3.25rem; +} +article.pytorch-article .note ul li, +article.pytorch-article .note ol li, +article.pytorch-article .warning ul li, +article.pytorch-article .warning ol li, +article.pytorch-article .tip ul li, +article.pytorch-article .tip ol li, +article.pytorch-article .seealso ul li, +article.pytorch-article .seealso ol li, +article.pytorch-article .hint ul li, +article.pytorch-article .hint ol li, +article.pytorch-article .important ul li, +article.pytorch-article .important ol li, +article.pytorch-article .caution ul li, +article.pytorch-article .caution ol li, +article.pytorch-article .danger ul li, +article.pytorch-article .danger ol li, +article.pytorch-article .attention ul li, +article.pytorch-article .attention ol li, +article.pytorch-article .error ul li, +article.pytorch-article .error ol li { + color: #262626; +} +article.pytorch-article .note p, +article.pytorch-article .warning p, +article.pytorch-article .tip p, +article.pytorch-article .seealso p, +article.pytorch-article .hint p, +article.pytorch-article .important p, +article.pytorch-article .caution p, +article.pytorch-article .danger p, +article.pytorch-article .attention p, +article.pytorch-article .error p { + margin-top: 1.125rem; +} +article.pytorch-article .note .admonition-title { + background: #54c7ec; +} +article.pytorch-article .warning .admonition-title { + background: #e94f3b; +} +article.pytorch-article .tip .admonition-title { + background: #6bcebb; +} +article.pytorch-article .seealso .admonition-title { + background: #6bcebb; +} +article.pytorch-article .hint .admonition-title { + background: #a2cdde; +} +article.pytorch-article .important .admonition-title { + background: #5890ff; +} +article.pytorch-article .caution .admonition-title { + background: #f7923a; +} +article.pytorch-article .danger .admonition-title { + background: #db2c49; +} +article.pytorch-article .attention .admonition-title { + background: #f5a623; +} +article.pytorch-article .error .admonition-title { + background: #cc2f90; +} +article.pytorch-article .sphx-glr-download-link-note.admonition.note, +article.pytorch-article .reference.download.internal, article.pytorch-article .sphx-glr-signature { + display: none; +} +article.pytorch-article .admonition > p:last-of-type { + margin-bottom: 0; + padding-bottom: 1.125rem !important; +} + +.pytorch-article div.sphx-glr-download a { + background-color: #f3f4f7; + background-image: url("../images/arrow-down-orange.svg"); + background-repeat: no-repeat; + background-position: left 10px center; + background-size: 15px 15px; + border-radius: 0; + border: none; + display: block; + text-align: left; + padding: 0.9375rem 3.125rem; + position: relative; + margin: 1.25rem auto; +} +@media screen and (min-width: 768px) { + .pytorch-article div.sphx-glr-download a:after { + content: ""; + display: block; + width: 0; + height: 1px; + position: absolute; + bottom: 0; + left: 0; + background-color: #e44c2c; + -webkit-transition: width .250s ease-in-out; + transition: width .250s ease-in-out; + } + .pytorch-article div.sphx-glr-download a:hover:after { + width: 100%; + } +} +@media screen and (min-width: 768px) { + .pytorch-article div.sphx-glr-download a:after { + background-color: #ee4c2c; + } +} +@media screen and (min-width: 768px) { + .pytorch-article div.sphx-glr-download a { + background-position: left 20px center; + } +} +.pytorch-article div.sphx-glr-download a:hover { + -webkit-box-shadow: none; + box-shadow: none; + text-decoration: none; + background-image: url("../images/arrow-down-orange.svg"); + background-color: #f3f4f7; +} +.pytorch-article div.sphx-glr-download a span.pre { + background-color: transparent; + font-size: 1.125rem; + padding: 0; + color: #262626; +} +.pytorch-article div.sphx-glr-download a code, .pytorch-article div.sphx-glr-download a kbd, .pytorch-article div.sphx-glr-download a pre, .pytorch-article div.sphx-glr-download a samp, .pytorch-article div.sphx-glr-download a span.pre { + font-family: FreightSans, Helvetica Neue, Helvetica, Arial, sans-serif; +} + +.pytorch-article p.sphx-glr-script-out { + margin-bottom: 1.125rem; +} + +.pytorch-article div.sphx-glr-script-out { + margin-bottom: 2.5rem; +} +.pytorch-article div.sphx-glr-script-out .highlight { + margin-left: 0; + margin-top: 0; +} +.pytorch-article div.sphx-glr-script-out .highlight pre { + background-color: #fdede9; + padding: 1.5625rem; + color: #837b79; +} +.pytorch-article div.sphx-glr-script-out + p { + margin-top: unset; +} + +article.pytorch-article .wy-table-responsive table { + border: none; + border-color: #ffffff !important; + table-layout: fixed; +} +article.pytorch-article .wy-table-responsive table thead tr { + border-bottom: 2px solid #6c6c6d; +} +article.pytorch-article .wy-table-responsive table thead th { + line-height: 1.75rem; + padding-left: 0.9375rem; + padding-right: 0.9375rem; +} +article.pytorch-article .wy-table-responsive table tbody .row-odd { + background-color: #f3f4f7; +} +article.pytorch-article .wy-table-responsive table tbody td { + color: #6c6c6d; + white-space: normal; + padding: 0.9375rem; + font-size: 1rem; + line-height: 1.375rem; +} +article.pytorch-article .wy-table-responsive table tbody td .pre { + background: #ffffff; + color: #ee4c2c; + font-size: 87.5%; +} +article.pytorch-article .wy-table-responsive table tbody td code { + font-size: 87.5%; +} + +a[rel~="prev"], a[rel~="next"] { + padding: 0.375rem 0 0 0; +} + +img.next-page, +img.previous-page { + width: 8px; + height: 10px; + position: relative; + top: -1px; +} + +img.previous-page { + -webkit-transform: scaleX(-1); + transform: scaleX(-1); +} + +.rst-footer-buttons { + margin-top: 1.875rem; + margin-bottom: 1.875rem; +} +.rst-footer-buttons .btn:focus, +.rst-footer-buttons .btn.focus { + -webkit-box-shadow: none; + box-shadow: none; +} + +article.pytorch-article blockquote { + margin-left: 3.75rem; + color: #6c6c6d; +} + +article.pytorch-article .caption { + color: #6c6c6d; + letter-spacing: 0.25px; + line-height: 2.125rem; +} + +article.pytorch-article .math { + color: #262626; + width: auto; + text-align: center; +} +article.pytorch-article .math img { + width: auto; +} + +.pytorch-breadcrumbs-wrapper { + width: 100%; +} +@media screen and (min-width: 1201px) { + .pytorch-breadcrumbs-wrapper { + float: left; + margin-left: 3%; + width: 75%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-breadcrumbs-wrapper { + width: 850px; + margin-left: 1.875rem; + } +} +.pytorch-breadcrumbs-wrapper .pytorch-breadcrumbs-aside { + float: right; +} +.pytorch-breadcrumbs-wrapper .pytorch-breadcrumbs-aside .fa.fa-github { + margin-top: 5px; + display: block; +} + +.pytorch-article .container { + padding-left: 0; + padding-right: 0; + max-width: none; +} + +a:link, +a:visited, +a:hover { + color: #ee4c2c; +} + +::-webkit-input-placeholder { + color: #ee4c2c; +} + +::-moz-placeholder { + color: #ee4c2c; +} + +:-ms-input-placeholder { + color: #ee4c2c; +} + +:-moz-placeholder { + color: #ee4c2c; +} + +@media screen and (min-width: 768px) { + .site-footer a:hover { + color: #ee4c2c; + } +} + +.docs-tutorials-resources a { + color: #ee4c2c; +} + +.header-holder { + position: relative; + z-index: 201; +} + +.header-holder .main-menu ul li.active:after { + color: #ee4c2c; +} +.header-holder .main-menu ul li.active a { + color: #ee4c2c; +} +@media screen and (min-width: 1200px) { + .header-holder .main-menu ul li a:hover { + color: #ee4c2c; + } +} + +.mobile-main-menu.open ul li.active a { + color: #ee4c2c; +} + +.version { + padding-bottom: 1rem; +} + +.pytorch-call-to-action-links { + padding-top: 0; + display: -webkit-box; + display: -ms-flexbox; + display: flex; +} +@media screen and (min-width: 768px) { + .pytorch-call-to-action-links { + padding-top: 2.5rem; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + .pytorch-call-to-action-links { + padding-top: 0; + } +} +@media (min-width: 1200px) and (max-width: 1239px) { + .pytorch-call-to-action-links { + padding-top: 2.5rem; + } +} +.pytorch-call-to-action-links #tutorial-type { + display: none; +} +.pytorch-call-to-action-links .call-to-action-img, .pytorch-call-to-action-links .call-to-action-notebook-img { + height: 1.375rem; + width: 1.375rem; + margin-right: 10px; +} +.pytorch-call-to-action-links .call-to-action-notebook-img { + height: 1rem; +} +.pytorch-call-to-action-links a { + padding-right: 1.25rem; + color: #000000; + cursor: pointer; +} +.pytorch-call-to-action-links a:hover { + color: #e44c2c; +} +.pytorch-call-to-action-links a .call-to-action-desktop-view { + display: none; +} +@media screen and (min-width: 768px) { + .pytorch-call-to-action-links a .call-to-action-desktop-view { + display: block; + } +} +.pytorch-call-to-action-links a .call-to-action-mobile-view { + display: block; +} +@media screen and (min-width: 768px) { + .pytorch-call-to-action-links a .call-to-action-mobile-view { + display: none; + } +} +.pytorch-call-to-action-links a #google-colab-link, .pytorch-call-to-action-links a #download-notebook-link, +.pytorch-call-to-action-links a #github-view-link { + padding-bottom: 0.625rem; + border-bottom: 1px solid #f3f4f7; + padding-right: 2.5rem; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; +} +.pytorch-call-to-action-links a #google-colab-link:hover, .pytorch-call-to-action-links a #download-notebook-link:hover, +.pytorch-call-to-action-links a #github-view-link:hover { + border-bottom-color: #e44c2c; + color: #e44c2c; +} + +#tutorial-cards-container #tutorial-cards { + width: 100%; +} +#tutorial-cards-container .tutorials-nav { + padding-left: 0; + padding-right: 0; + padding-bottom: 0; +} +#tutorial-cards-container .tutorials-hr { + margin-top: 1rem; + margin-bottom: 1rem; +} +#tutorial-cards-container .card.tutorials-card { + border-radius: 0; + border-color: #f3f4f7; + height: 98px; + margin-bottom: 1.25rem; + margin-bottom: 1.875rem; + overflow: scroll; + background-color: #f3f4f7; + cursor: pointer; +} +@media screen and (min-width: 1240px) { + #tutorial-cards-container .card.tutorials-card { + height: 200px; + overflow: inherit; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + #tutorial-cards-container .card.tutorials-card { + height: 200px; + overflow: scroll; + } +} +#tutorial-cards-container .card.tutorials-card .tutorials-image { + position: absolute; + top: 0px; + right: 0px; + height: 96px; + width: 96px; + opacity: 0.5; +} +#tutorial-cards-container .card.tutorials-card .tutorials-image img { + height: 100%; + width: 100%; +} +@media screen and (min-width: 768px) { + #tutorial-cards-container .card.tutorials-card .tutorials-image { + height: 100%; + width: 25%; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + #tutorial-cards-container .card.tutorials-card .tutorials-image { + height: 100%; + width: 198px; + } +} +#tutorial-cards-container .card.tutorials-card .tutorials-image:before { + content: ''; + position: absolute; + top: 0; + left: 0; + bottom: 0; + right: 0; + z-index: 1; + background: #000000; + opacity: .075; +} +#tutorial-cards-container .card.tutorials-card .card-title-container { + width: 70%; + display: -webkit-inline-box; + display: -ms-inline-flexbox; + display: inline-flex; +} +@media screen and (min-width: 768px) { + #tutorial-cards-container .card.tutorials-card .card-title-container { + width: 75%; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + #tutorial-cards-container .card.tutorials-card .card-title-container { + width: 70%; + } +} +#tutorial-cards-container .card.tutorials-card .card-title-container h4 { + margin-bottom: 1.125rem; + margin-top: 0; + font-size: 1.5rem; +} +#tutorial-cards-container .card.tutorials-card p.card-summary, #tutorial-cards-container .card.tutorials-card p.tags { + font-size: 0.9375rem; + line-height: 1.5rem; + margin-bottom: 0; + color: #6c6c6d; + font-weight: 400; + width: 70%; +} +@media screen and (min-width: 768px) { + #tutorial-cards-container .card.tutorials-card p.card-summary, #tutorial-cards-container .card.tutorials-card p.tags { + width: 75%; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + #tutorial-cards-container .card.tutorials-card p.card-summary, #tutorial-cards-container .card.tutorials-card p.tags { + width: 70%; + } +} +#tutorial-cards-container .card.tutorials-card p.tags { + margin-top: 30px; + text-overflow: ellipsis; + white-space: nowrap; + overflow: hidden; +} +#tutorial-cards-container .card.tutorials-card h4 { + color: #262626; + margin-bottom: 1.125rem; +} +#tutorial-cards-container .card.tutorials-card a { + height: 100%; +} +@media screen and (min-width: 768px) { + #tutorial-cards-container .card.tutorials-card a { + min-height: 190px; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + #tutorial-cards-container .card.tutorials-card a { + min-height: 234px; + } +} +@media screen and (min-width: 768px) { + #tutorial-cards-container .card.tutorials-card:after { + content: ""; + display: block; + width: 0; + height: 1px; + position: absolute; + bottom: 0; + left: 0; + background-color: #e44c2c; + -webkit-transition: width .250s ease-in-out; + transition: width .250s ease-in-out; + } + #tutorial-cards-container .card.tutorials-card:hover:after { + width: 100%; + } +} +#tutorial-cards-container .card.tutorials-card:hover { + background-color: #ffffff; + border: 1px solid #e2e2e2; + border-bottom: none; +} +#tutorial-cards-container .card.tutorials-card:hover p.card-summary { + color: #262626; +} +#tutorial-cards-container .card.tutorials-card:hover .tutorials-image { + opacity: unset; +} +#tutorial-cards-container .tutorial-tags-container { + width: 75%; +} +#tutorial-cards-container .tutorial-tags-container.active { + width: 0; +} +#tutorial-cards-container .tutorial-filter-menu ul { + list-style-type: none; + padding-left: 1.25rem; +} +#tutorial-cards-container .tutorial-filter-menu ul li { + padding-right: 1.25rem; + word-break: break-all; +} +#tutorial-cards-container .tutorial-filter-menu ul li a { + color: #979797; +} +#tutorial-cards-container .tutorial-filter-menu ul li a:hover { + color: #e44c2c; +} +#tutorial-cards-container .tutorial-filter { + cursor: pointer; +} +#tutorial-cards-container .filter-btn { + color: #979797; + border: 1px solid #979797; + display: inline-block; + text-align: center; + white-space: nowrap; + vertical-align: middle; + padding: 0.375rem 0.75rem; + font-size: 1rem; + line-height: 1.5; + margin-bottom: 5px; +} +#tutorial-cards-container .filter-btn:hover { + border: 1px solid #e44c2c; + color: #e44c2c; +} +#tutorial-cards-container .filter-btn.selected { + background-color: #e44c2c; + border: 1px solid #e44c2c; + color: #ffffff; +} +#tutorial-cards-container .all-tag-selected { + background-color: #979797; + color: #ffffff; +} +#tutorial-cards-container .all-tag-selected:hover { + border-color: #979797; + color: #ffffff; +} +#tutorial-cards-container .pagination .page { + border: 1px solid #dee2e6; + padding: 0.5rem 0.75rem; +} +#tutorial-cards-container .pagination .active .page { + background-color: #dee2e6; +} + +article.pytorch-article .tutorials-callout-container { + padding-bottom: 50px; +} +article.pytorch-article .tutorials-callout-container .col-md-6 { + padding-bottom: 10px; +} +article.pytorch-article .tutorials-callout-container .text-container { + padding: 10px 0px 30px 0px; + padding-bottom: 10px; +} +article.pytorch-article .tutorials-callout-container .text-container .body-paragraph { + color: #666666; + font-weight: 300; + font-size: 1.125rem; + line-height: 1.875rem; +} +article.pytorch-article .tutorials-callout-container .btn.callout-button { + font-size: 1.125rem; + border-radius: 0; + border: none; + background-color: #f3f4f7; + color: #6c6c6d; + font-weight: 400; + position: relative; + letter-spacing: 0.25px; +} +@media screen and (min-width: 768px) { + article.pytorch-article .tutorials-callout-container .btn.callout-button:after { + content: ""; + display: block; + width: 0; + height: 1px; + position: absolute; + bottom: 0; + left: 0; + background-color: #e44c2c; + -webkit-transition: width .250s ease-in-out; + transition: width .250s ease-in-out; + } + article.pytorch-article .tutorials-callout-container .btn.callout-button:hover:after { + width: 100%; + } +} +article.pytorch-article .tutorials-callout-container .btn.callout-button a { + color: inherit; +} + +.pytorch-container { + margin: 0 auto; + padding: 0 1.875rem; + width: auto; + position: relative; +} +@media screen and (min-width: 1201px) { + .pytorch-container { + padding: 0; + } +} +@media screen and (min-width: 1200px) { + .pytorch-container { + margin-left: 25%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-container { + margin-left: 350px; + } +} +.pytorch-container:before, .pytorch-container:after { + content: ""; + display: table; +} +.pytorch-container:after { + clear: both; +} +.pytorch-container { + *zoom: 1; +} + +.pytorch-content-wrap { + background-color: #ffffff; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + position: relative; + padding-top: 0; +} +.pytorch-content-wrap:before, .pytorch-content-wrap:after { + content: ""; + display: table; +} +.pytorch-content-wrap:after { + clear: both; +} +.pytorch-content-wrap { + *zoom: 1; +} +@media screen and (min-width: 1201px) { + .pytorch-content-wrap { + padding-top: 45px; + float: left; + width: 100%; + display: block; + } +} +@media screen and (min-width: 1600px) { + .pytorch-content-wrap { + width: 100%; + } +} + +.pytorch-content { + background: #ffffff; + width: 100%; + max-width: 700px; + position: relative; +} + +.pytorch-content-left { + min-height: 100vh; + margin-top: 2.5rem; + width: 100%; +} +@media screen and (min-width: 1201px) { + .pytorch-content-left { + margin-top: 0; + margin-left: 3%; + width: 75%; + float: left; + } +} +@media screen and (min-width: 1600px) { + .pytorch-content-left { + width: 850px; + margin-left: 30px; + } +} +.pytorch-content-left .main-content { + padding-top: 0.9375rem; +} +.pytorch-content-left .main-content ul.simple { + padding-bottom: 1.25rem; +} +.pytorch-content-left .main-content .note:nth-child(1), .pytorch-content-left .main-content .warning:nth-child(1) { + margin-top: 0; +} + +.pytorch-content-right { + display: none; + position: relative; + overflow-x: hidden; + overflow-y: hidden; +} +@media screen and (min-width: 1201px) { + .pytorch-content-right { + display: block; + margin-left: 0; + width: 19%; + float: left; + height: 100%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-content-right { + width: 280px; + } +} + +@media screen and (min-width: 1201px) { + .pytorch-side-scroll { + position: relative; + overflow-x: hidden; + overflow-n: scroll; + height: 100%; + } +} + +.pytorch-menu-vertical { + padding: 1.25rem 1.875rem 2.5rem 1.875rem; +} +@media screen and (min-width: 1201px) { + .pytorch-menu-vertical { + display: block; + padding-top: 0; + padding-right: 13.5%; + padding-bottom: 5.625rem; + } +} +@media screen and (min-width: 1600px) { + .pytorch-menu-vertical { + padding-left: 0; + padding-right: 1.5625rem; + } +} + +.pytorch-left-menu { + display: none; + background-color: #f3f4f7; + color: #262626; + overflow: scroll; +} +@media screen and (min-width: 1201px) { + .pytorch-left-menu { + display: block; + overflow-x: hidden; + overflow-y: hidden; + padding-bottom: 110px; + padding: 0 1.875rem 0 0; + width: 25%; + z-index: 200; + float: left; + } + .pytorch-left-menu.make-fixed { + position: fixed; + top: 0; + bottom: 0; + left: 0; + float: none; + } +} +@media screen and (min-width: 1600px) { + .pytorch-left-menu { + padding: 0 0 0 1.875rem; + width: 350px; + } +} + +.expand-menu, .hide-menu { + color: #6c6c6d; + padding-left: 10px; + cursor: pointer; +} + +.collapse { + display: none; +} + +.left-nav-top-caption { + padding-top: 1rem; +} + +.pytorch-left-menu p.caption { + color: #262626; + display: block; + font-size: 1rem; + line-height: 1.375rem; + margin-bottom: 1rem; + text-transform: none; + white-space: normal; +} + +.pytorch-left-menu-search { + margin-bottom: 2.5rem; +} +@media screen and (min-width: 1201px) { + .pytorch-left-menu-search { + margin: 1.25rem 0.625rem 1.875rem 0; + } +} + +.pytorch-left-menu-search ::-webkit-input-placeholder { + color: #262626; +} +.pytorch-left-menu-search ::-moz-placeholder { + color: #262626; +} +.pytorch-left-menu-search :-ms-input-placeholder { + color: #262626; +} +.pytorch-left-menu-search ::-ms-input-placeholder { + color: #262626; +} +.pytorch-left-menu-search ::placeholder { + color: #262626; +} + +.pytorch-left-menu-search :focus::-webkit-input-placeholder { + color: transparent; +} +.pytorch-left-menu-search :focus::-moz-placeholder { + color: transparent; +} +.pytorch-left-menu-search :focus:-ms-input-placeholder { + color: transparent; +} +.pytorch-left-menu-search :focus::-ms-input-placeholder { + color: transparent; +} +.pytorch-left-menu-search :focus::placeholder { + color: transparent; +} + +.pytorch-left-menu-search input[type=text] { + border-radius: 0; + padding: 0.5rem 0.75rem; + border-color: #ffffff; + color: #262626; + border-style: solid; + font-size: 1rem; + width: 100%; + background-color: #f3f4f7; + background-image: url("../images/search-icon.svg"); + background-repeat: no-repeat; + background-size: 18px 18px; + background-position: 12px 10px; + padding-left: 40px; + background-color: #ffffff; +} +.pytorch-left-menu-search input[type=text]:focus { + outline: 0; +} + +@media screen and (min-width: 1201px) { + .pytorch-left-menu .pytorch-side-scroll { + width: 120%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-left-menu .pytorch-side-scroll { + width: 340px; + } +} + +.pytorch-right-menu { + min-height: 100px; + overflow-x: hidden; + overflow-y: hidden; + left: 0; + z-index: 200; + padding-top: 0; + position: relative; +} +@media screen and (min-width: 1201px) { + .pytorch-right-menu { + width: 100%; + } + .pytorch-right-menu.scrolling-fixed { + position: fixed; + top: 45px; + left: 83.5%; + width: 14%; + } + .pytorch-right-menu.scrolling-absolute { + position: absolute; + left: 0; + } +} +@media screen and (min-width: 1600px) { + .pytorch-right-menu { + left: 0; + width: 380px; + } + .pytorch-right-menu.scrolling-fixed { + position: fixed; + top: 45px; + left: 1230px; + } + .pytorch-right-menu.scrolling-absolute { + position: absolute; + left: 0; + } +} + +.pytorch-left-menu ul, +.pytorch-right-menu ul { + list-style-type: none; + padding-left: 0; + margin-bottom: 2.5rem; +} +.pytorch-left-menu > ul, +.pytorch-right-menu > ul { + margin-bottom: 2.5rem; +} +.pytorch-left-menu a:link, +.pytorch-left-menu a:visited, +.pytorch-left-menu a:hover, +.pytorch-right-menu a:link, +.pytorch-right-menu a:visited, +.pytorch-right-menu a:hover { + color: #6c6c6d; + font-size: 0.875rem; + line-height: 1rem; + padding: 0; + text-decoration: none; +} +.pytorch-left-menu a:link.reference.internal, +.pytorch-left-menu a:visited.reference.internal, +.pytorch-left-menu a:hover.reference.internal, +.pytorch-right-menu a:link.reference.internal, +.pytorch-right-menu a:visited.reference.internal, +.pytorch-right-menu a:hover.reference.internal { + margin-bottom: 0.3125rem; + position: relative; +} +.pytorch-left-menu li code, +.pytorch-right-menu li code { + border: none; + background: inherit; + color: inherit; + padding-left: 0; + padding-right: 0; +} +.pytorch-left-menu li span.toctree-expand, +.pytorch-right-menu li span.toctree-expand { + display: block; + float: left; + margin-left: -1.2em; + font-size: 0.8em; + line-height: 1.6em; +} +.pytorch-left-menu li.on a, .pytorch-left-menu li.current > a, +.pytorch-right-menu li.on a, +.pytorch-right-menu li.current > a { + position: relative; + border: none; +} +.pytorch-left-menu li.on a span.toctree-expand, .pytorch-left-menu li.current > a span.toctree-expand, +.pytorch-right-menu li.on a span.toctree-expand, +.pytorch-right-menu li.current > a span.toctree-expand { + display: block; + font-size: 0.8em; + line-height: 1.6em; +} +.pytorch-left-menu li.toctree-l1.current > a, +.pytorch-right-menu li.toctree-l1.current > a { + color: #ee4c2c; +} +.pytorch-left-menu li.toctree-l1.current > a:before, +.pytorch-right-menu li.toctree-l1.current > a:before { + content: "\2022"; + display: inline-block; + position: absolute; + left: -15px; + top: -10%; + font-size: 1.375rem; + color: #ee4c2c; +} +@media screen and (min-width: 1201px) { + .pytorch-left-menu li.toctree-l1.current > a:before, + .pytorch-right-menu li.toctree-l1.current > a:before { + left: -20px; + } +} +.pytorch-left-menu li.toctree-l1.current li.toctree-l2 > ul, .pytorch-left-menu li.toctree-l2.current li.toctree-l3 > ul, +.pytorch-right-menu li.toctree-l1.current li.toctree-l2 > ul, +.pytorch-right-menu li.toctree-l2.current li.toctree-l3 > ul { + display: none; +} +.pytorch-left-menu li.toctree-l1.current li.toctree-l2.current > ul, .pytorch-left-menu li.toctree-l2.current li.toctree-l3.current > ul, +.pytorch-right-menu li.toctree-l1.current li.toctree-l2.current > ul, +.pytorch-right-menu li.toctree-l2.current li.toctree-l3.current > ul { + display: block; +} +.pytorch-left-menu li.toctree-l2.current li.toctree-l3 > a, +.pytorch-right-menu li.toctree-l2.current li.toctree-l3 > a { + display: block; +} +.pytorch-left-menu li.toctree-l3, +.pytorch-right-menu li.toctree-l3 { + font-size: 0.9em; +} +.pytorch-left-menu li.toctree-l3.current li.toctree-l4 > a, +.pytorch-right-menu li.toctree-l3.current li.toctree-l4 > a { + display: block; +} +.pytorch-left-menu li.toctree-l4, +.pytorch-right-menu li.toctree-l4 { + font-size: 0.9em; +} +.pytorch-left-menu li.current ul, +.pytorch-right-menu li.current ul { + display: block; +} +.pytorch-left-menu li ul, +.pytorch-right-menu li ul { + margin-bottom: 0; + display: none; +} +.pytorch-left-menu li ul li a, +.pytorch-right-menu li ul li a { + margin-bottom: 0; +} +.pytorch-left-menu a, +.pytorch-right-menu a { + display: inline-block; + position: relative; +} +.pytorch-left-menu a:hover, +.pytorch-right-menu a:hover { + cursor: pointer; +} +.pytorch-left-menu a:active, +.pytorch-right-menu a:active { + cursor: pointer; +} + +.pytorch-left-menu ul { + padding-left: 0; +} + +.pytorch-right-menu a:link, +.pytorch-right-menu a:visited, +.pytorch-right-menu a:hover { + color: #6c6c6d; +} +.pytorch-right-menu a:link span.pre, +.pytorch-right-menu a:visited span.pre, +.pytorch-right-menu a:hover span.pre { + color: #6c6c6d; +} +.pytorch-right-menu a.reference.internal.expanded:before { + content: "-"; + font-family: monospace; + position: absolute; + left: -12px; +} +.pytorch-right-menu a.reference.internal.not-expanded:before { + content: "+"; + font-family: monospace; + position: absolute; + left: -12px; +} +.pytorch-right-menu li.active > a { + color: #ee4c2c; +} +.pytorch-right-menu li.active > a span.pre, .pytorch-right-menu li.active > a:before { + color: #ee4c2c; +} +.pytorch-right-menu li.active > a:after { + content: "\2022"; + color: #e44c2c; + display: inline-block; + font-size: 1.375rem; + left: -17px; + position: absolute; + top: 1px; +} +.pytorch-right-menu .pytorch-side-scroll > ul > li > ul > li { + margin-bottom: 0; +} +.pytorch-right-menu ul ul { + padding-left: 0; +} +.pytorch-right-menu ul ul li { + padding-left: 0px; +} +.pytorch-right-menu ul ul li a.reference.internal { + padding-left: 0; +} +.pytorch-right-menu ul ul li ul { + display: none; + padding-left: 10px; +} +.pytorch-right-menu ul ul li li a.reference.internal { + padding-left: 0; +} +.pytorch-right-menu li ul { + display: block; +} + +.pytorch-right-menu .pytorch-side-scroll { + padding-top: 20px; +} +@media screen and (min-width: 1201px) { + .pytorch-right-menu .pytorch-side-scroll { + width: 120%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-right-menu .pytorch-side-scroll { + width: 400px; + } +} +.pytorch-right-menu .pytorch-side-scroll > ul { + padding-left: 10%; + padding-right: 10%; + margin-bottom: 0; +} +@media screen and (min-width: 1600px) { + .pytorch-right-menu .pytorch-side-scroll > ul { + padding-left: 25px; + } +} +.pytorch-right-menu .pytorch-side-scroll > ul > li > a.reference.internal { + color: #262626; + font-weight: 500; +} +.pytorch-right-menu .pytorch-side-scroll ul li { + position: relative; +} + +#pytorch-right-menu .side-scroll-highlight { + color: #ee4c2c; +} + +.header-container { + max-width: none; + margin-top: 4px; +} +@media screen and (min-width: 1201px) { + .header-container { + margin-top: 0; + } +} +@media screen and (min-width: 1600px) { + .header-container { + margin-top: 0; + } +} + +.container-fluid.header-holder { + padding-right: 0; + padding-left: 0; +} + +.header-holder .container { + max-width: none; + padding-right: 1.875rem; + padding-left: 1.875rem; +} +@media screen and (min-width: 1201px) { + .header-holder .container { + padding-right: 1.875rem; + padding-left: 1.875rem; + } +} + +.header-holder .main-menu { + -webkit-box-pack: unset; + -ms-flex-pack: unset; + justify-content: unset; + position: relative; +} + +.header-holder .main-menu .github-icon { + background-image: url("../images/pytorch-github.svg"); + color: white; + display: block; + width: 33px; + height: 33px; + position: relative; + background-size: 23px 23px; + background-position: 5px 4px; + background-repeat: no-repeat; + border-radius: 25px; +} + +.header-holder .main-menu .github-icon:hover { + background-color:#88888833; +} + +@media screen (min-width: 1201px) { + .header-holder .main-menu .github-icon { + background-image: url("../images/pytorch-github.svg"); + color: white; + display: block; + width: 33px; + height: 33px; + position: relative; + background-size: 23px 23px; + background-position: 5px 4px; + background-repeat: no-repeat; + border-radius: 25px; + } +} + +@media screen (min-width: 1201px) { + .header-holder .main-menu .github-icon:hover { + background-color:#88888833; + } +} + +.header-holder .main-menu .search-icon { + background-image: url("../images/search-icon-black.svg"); + color: transparent; + display: block; + width: 33px; + height: 33px; + position: relative; + background-size: 21px 21px; + background-position: 6px 5px; + background-repeat: no-repeat; + border-radius: 25px; + cursor pointer +} + +.header-holder .main-menu .search-icon:hover { + background-color:#88888833; +} + + +@media screen and (min-width: 1201px) { + .header-holder .main-menu ul { + padding-left: 0; + margin-left: 26%; + } +} +@media screen and (min-width: 1600px) { + .header-holder .main-menu ul { + padding-left: 38px; + margin-left: 310px; + } +} + +.pytorch-page-level-bar { + display: none; + -webkit-box-align: center; + -ms-flex-align: center; + align-items: center; + background-color: #ffffff; + border-bottom: 1px solid #e2e2e2; + width: 100%; + z-index: 201; +} +@media screen and (min-width: 1201px) { + .pytorch-page-level-bar { + left: 0; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + height: 45px; + padding-left: 0; + width: 100%; + position: absolute; + z-index: 1; + } + .pytorch-page-level-bar.left-menu-is-fixed { + position: fixed; + top: 0; + left: 25%; + padding-left: 0; + right: 0; + width: 75%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-page-level-bar { + left: 0; + right: 0; + width: auto; + z-index: 1; + } + .pytorch-page-level-bar.left-menu-is-fixed { + left: 350px; + right: 0; + width: auto; + } +} +.pytorch-page-level-bar ul, .pytorch-page-level-bar li { + margin: 0; +} + +.pytorch-shortcuts-wrapper { + display: none; +} +@media screen and (min-width: 1201px) { + .pytorch-shortcuts-wrapper { + font-size: 0.875rem; + float: left; + margin-left: 2%; + } +} +@media screen and (min-width: 1600px) { + .pytorch-shortcuts-wrapper { + margin-left: 1.875rem; + } +} + +.cookie-banner-wrapper { + display: none; +} +.cookie-banner-wrapper .container { + padding-left: 1.875rem; + padding-right: 1.875rem; + max-width: 1240px; +} +.cookie-banner-wrapper.is-visible { + display: block; + position: fixed; + bottom: 0; + background-color: #f3f4f7; + min-height: 100px; + width: 100%; + z-index: 401; + border-top: 3px solid #ededee; +} +.cookie-banner-wrapper .gdpr-notice { + color: #6c6c6d; + margin-top: 1.5625rem; + text-align: left; + max-width: 1440px; +} +@media screen and (min-width: 768px) { + .cookie-banner-wrapper .gdpr-notice { + width: 77%; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + .cookie-banner-wrapper .gdpr-notice { + width: inherit; + } +} +.cookie-banner-wrapper .gdpr-notice .cookie-policy-link { + color: #343434; +} +.cookie-banner-wrapper .close-button { + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; + background: transparent; + border: 1px solid #f3f4f7; + height: 1.3125rem; + position: absolute; + bottom: 42px; + right: 0; + top: 0; + cursor: pointer; + outline: none; +} +@media screen and (min-width: 768px) { + .cookie-banner-wrapper .close-button { + right: 20%; + top: inherit; + } +} +@media (min-width: 768px) and (max-width: 1239px) { + .cookie-banner-wrapper .close-button { + right: 0; + top: 0; + } +} + +.main-menu ul li .resources-dropdown a { + cursor: default; +} +.main-menu ul li .dropdown-menu { + border-radius: 0; + padding: 0; +} +.main-menu ul li .dropdown-menu .dropdown-item { + color: #6c6c6d; + border-bottom: 1px solid #e2e2e2; +} +.main-menu ul li .dropdown-menu .dropdown-item:last-of-type { + border-bottom-color: transparent; +} +.main-menu ul li .dropdown-menu .dropdown-item:hover { + background-color: #e44c2c; +} +.main-menu ul li .dropdown-menu .dropdown-item p { + font-size: 1rem; + color: #979797; +} +.main-menu ul li .dropdown-menu a.dropdown-item:hover { + color: #ffffff; +} +.main-menu ul li .dropdown-menu a.dropdown-item:hover p { + color: #ffffff; +} + +.resources-dropdown-menu { + left: -75px; + width: 300px; + display: none; + position: absolute; + z-index: 1000; + display: none; + float: left; + min-width: 10rem; + padding: 0.5rem 0; + font-size: 1rem; + color: #212529; + text-align: left; + list-style: none; + background-color: #ffffff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.15); + border-radius: 0.25rem; +} + +.resources-dropdown:hover .resources-dropdown-menu { + display: block; +} + +.main-menu ul li .resources-dropdown-menu { + border-radius: 0; + padding: 0; +} +.main-menu ul li.active:hover .resources-dropdown-menu { + display: block; +} + +.main-menu ul li .resources-dropdown-menu .dropdown-item { + color: #262626; + border-bottom: 1px solid #e2e2e2; +} + +.resources-dropdown .with-down-orange-arrow { + padding-right: 2rem; + position: relative; + background: url("../images/chevron-down-orange.svg"); + background-size: 14px 18px; + background-position: top 7px right 10px; + background-repeat: no-repeat; +} + +.with-down-arrow { + padding-right: 2rem; + position: relative; + background-image: url("../images/chevron-down-black.svg"); + background-size: 14px 18px; + background-position: top 7px right 10px; + background-repeat: no-repeat; + +} + + +.with-down-arrow a { + color: #262626; +} + +.main-menu ul li a.with-down-arrow:hover { + color: #262626; +} + +.with-down-arrow:hover { + background-repeat: no-repeat; +} + +.header-holder .main-menu ul li .resources-dropdown .doc-dropdown-option { + padding-top: 1rem; +} + +.header-holder .main-menu ul li a.nav-dropdown-item { + display: block; + font-size: 1rem; + line-height: 1.3125rem; + width: 100%; + padding: 0.25rem 1.5rem; + clear: both; + font-weight: 400; + color: #757575; + text-align: left; + background-color: transparent; + border-bottom: 1px solid #e2e2e2; + cursor: pointer; +} + +.header-holder .main-menu ul li a.nav-dropdown-item p { + margin-bottom: .5rem; +} + +.header-holder .main-menu ul li a.nav-dropdown-item:last-of-type { + border-bottom-color: transparent; +} +.header-holder .main-menu ul li a.nav-dropdown-item:hover { + background-color: #e44c2c; + color: white; +} +.header-holder .main-menu ul li a.nav-dropdown-item .dropdown-title { + font-size: 1.125rem; + color: #212529; + letter-spacing: 0; + line-height: 34px; +} + +.header-holder .main-menu ul li a.nav-dropdown-item:hover .dropdown-title { + background-color: #e44c2c; + color: white; +} + +/*# sourceMappingURL=theme.css.map */ diff --git a/release/2.5/_static/doctools.js b/release/2.5/_static/doctools.js new file mode 100644 index 00000000000..527b876ca63 --- /dev/null +++ b/release/2.5/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/release/2.5/_static/documentation_options.js b/release/2.5/_static/documentation_options.js new file mode 100644 index 00000000000..ac1f83182f2 --- /dev/null +++ b/release/2.5/_static/documentation_options.js @@ -0,0 +1,14 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: 'master', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/release/2.5/_static/file.png b/release/2.5/_static/file.png new file mode 100644 index 00000000000..a858a410e4f Binary files /dev/null and b/release/2.5/_static/file.png differ diff --git a/release/2.5/_static/fonts/FreightSans/freight-sans-bold-italic.woff b/release/2.5/_static/fonts/FreightSans/freight-sans-bold-italic.woff new file mode 100644 index 00000000000..e317248423c Binary files /dev/null and b/release/2.5/_static/fonts/FreightSans/freight-sans-bold-italic.woff differ diff --git a/release/2.5/_static/fonts/FreightSans/freight-sans-bold-italic.woff2 b/release/2.5/_static/fonts/FreightSans/freight-sans-bold-italic.woff2 new file mode 100644 index 00000000000..cec2dc94fbb Binary files /dev/null and b/release/2.5/_static/fonts/FreightSans/freight-sans-bold-italic.woff2 differ diff --git a/release/2.5/_static/fonts/FreightSans/freight-sans-bold.woff b/release/2.5/_static/fonts/FreightSans/freight-sans-bold.woff new file mode 100644 index 00000000000..de46625edfc Binary files /dev/null and b/release/2.5/_static/fonts/FreightSans/freight-sans-bold.woff differ diff --git a/release/2.5/_static/fonts/FreightSans/freight-sans-bold.woff2 b/release/2.5/_static/fonts/FreightSans/freight-sans-bold.woff2 new file mode 100644 index 00000000000..dc05cd82bc4 Binary files /dev/null and b/release/2.5/_static/fonts/FreightSans/freight-sans-bold.woff2 differ diff --git a/release/2.5/_static/fonts/FreightSans/freight-sans-book-italic.woff b/release/2.5/_static/fonts/FreightSans/freight-sans-book-italic.woff new file mode 100644 index 00000000000..a50e5038a40 Binary files /dev/null and b/release/2.5/_static/fonts/FreightSans/freight-sans-book-italic.woff differ diff --git a/release/2.5/_static/fonts/FreightSans/freight-sans-book-italic.woff2 b/release/2.5/_static/fonts/FreightSans/freight-sans-book-italic.woff2 new file mode 100644 index 00000000000..fe284db6614 Binary files /dev/null and b/release/2.5/_static/fonts/FreightSans/freight-sans-book-italic.woff2 differ diff --git a/release/2.5/_static/fonts/FreightSans/freight-sans-book.woff b/release/2.5/_static/fonts/FreightSans/freight-sans-book.woff new file mode 100644 index 00000000000..6ab8775f00b Binary files /dev/null and b/release/2.5/_static/fonts/FreightSans/freight-sans-book.woff differ diff --git a/release/2.5/_static/fonts/FreightSans/freight-sans-book.woff2 b/release/2.5/_static/fonts/FreightSans/freight-sans-book.woff2 new file mode 100644 index 00000000000..2688739f1f0 Binary files /dev/null and b/release/2.5/_static/fonts/FreightSans/freight-sans-book.woff2 differ diff --git a/release/2.5/_static/fonts/FreightSans/freight-sans-light-italic.woff b/release/2.5/_static/fonts/FreightSans/freight-sans-light-italic.woff new file mode 100644 index 00000000000..beda58d4e21 Binary files /dev/null and b/release/2.5/_static/fonts/FreightSans/freight-sans-light-italic.woff differ diff --git a/release/2.5/_static/fonts/FreightSans/freight-sans-light-italic.woff2 b/release/2.5/_static/fonts/FreightSans/freight-sans-light-italic.woff2 new file mode 100644 index 00000000000..e2fa0134b1a Binary files /dev/null and b/release/2.5/_static/fonts/FreightSans/freight-sans-light-italic.woff2 differ diff --git a/release/2.5/_static/fonts/FreightSans/freight-sans-light.woff b/release/2.5/_static/fonts/FreightSans/freight-sans-light.woff new file mode 100644 index 00000000000..226a0bf8358 Binary files /dev/null and b/release/2.5/_static/fonts/FreightSans/freight-sans-light.woff differ diff --git a/release/2.5/_static/fonts/FreightSans/freight-sans-light.woff2 b/release/2.5/_static/fonts/FreightSans/freight-sans-light.woff2 new file mode 100644 index 00000000000..6d8ff2c045b Binary files /dev/null and b/release/2.5/_static/fonts/FreightSans/freight-sans-light.woff2 differ diff --git a/release/2.5/_static/fonts/FreightSans/freight-sans-medium-italic.woff b/release/2.5/_static/fonts/FreightSans/freight-sans-medium-italic.woff new file mode 100644 index 00000000000..a42115d63b3 Binary files /dev/null and b/release/2.5/_static/fonts/FreightSans/freight-sans-medium-italic.woff differ diff --git a/release/2.5/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 b/release/2.5/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 new file mode 100644 index 00000000000..16a7713a451 Binary files /dev/null and b/release/2.5/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 differ diff --git a/release/2.5/_static/fonts/FreightSans/freight-sans-medium.woff b/release/2.5/_static/fonts/FreightSans/freight-sans-medium.woff new file mode 100644 index 00000000000..5ea34539c6f Binary files /dev/null and b/release/2.5/_static/fonts/FreightSans/freight-sans-medium.woff differ diff --git a/release/2.5/_static/fonts/FreightSans/freight-sans-medium.woff2 b/release/2.5/_static/fonts/FreightSans/freight-sans-medium.woff2 new file mode 100644 index 00000000000..c58b6a528bb Binary files /dev/null and b/release/2.5/_static/fonts/FreightSans/freight-sans-medium.woff2 differ diff --git a/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff b/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff new file mode 100644 index 00000000000..cf37a5c50bd Binary files /dev/null and b/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff differ diff --git a/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2 b/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2 new file mode 100644 index 00000000000..955a6eab5bb Binary files /dev/null and b/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2 differ diff --git a/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff b/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff new file mode 100644 index 00000000000..fc65a679c22 Binary files /dev/null and b/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff differ diff --git a/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2 b/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2 new file mode 100644 index 00000000000..c352e40e34a Binary files /dev/null and b/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2 differ diff --git a/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff b/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff new file mode 100644 index 00000000000..7d63d89f24b Binary files /dev/null and b/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff differ diff --git a/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 b/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 new file mode 100644 index 00000000000..d0d7ded9079 Binary files /dev/null and b/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 differ diff --git a/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff b/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff new file mode 100644 index 00000000000..1da7753cf28 Binary files /dev/null and b/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff differ diff --git a/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 b/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 new file mode 100644 index 00000000000..79dffdb85f7 Binary files /dev/null and b/release/2.5/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 differ diff --git a/release/2.5/_static/images/arrow-down-orange.svg b/release/2.5/_static/images/arrow-down-orange.svg new file mode 100644 index 00000000000..e9d8e9ecf24 --- /dev/null +++ b/release/2.5/_static/images/arrow-down-orange.svg @@ -0,0 +1,19 @@ + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + \ No newline at end of file diff --git a/release/2.5/_static/images/arrow-right-with-tail.svg b/release/2.5/_static/images/arrow-right-with-tail.svg new file mode 100644 index 00000000000..5843588fca6 --- /dev/null +++ b/release/2.5/_static/images/arrow-right-with-tail.svg @@ -0,0 +1,19 @@ + + + + Page 1 + Created with Sketch. + + + + + + + + + + + + + + \ No newline at end of file diff --git a/release/2.5/_static/images/chevron-down-black.svg b/release/2.5/_static/images/chevron-down-black.svg new file mode 100644 index 00000000000..097bc076ecf --- /dev/null +++ b/release/2.5/_static/images/chevron-down-black.svg @@ -0,0 +1,16 @@ + + + Created with Sketch. + + + + + + + + + + + + + diff --git a/release/2.5/_static/images/chevron-down-grey.svg b/release/2.5/_static/images/chevron-down-grey.svg new file mode 100644 index 00000000000..82d6514f250 --- /dev/null +++ b/release/2.5/_static/images/chevron-down-grey.svg @@ -0,0 +1,18 @@ + + + + +Created with Sketch. + + + + + + + + + + + + diff --git a/release/2.5/_static/images/chevron-down-orange.svg b/release/2.5/_static/images/chevron-down-orange.svg new file mode 100644 index 00000000000..fd79a57854c --- /dev/null +++ b/release/2.5/_static/images/chevron-down-orange.svg @@ -0,0 +1,16 @@ + + + Created with Sketch. + + + + + + + + + + + + + diff --git a/release/2.5/_static/images/chevron-down-white.svg b/release/2.5/_static/images/chevron-down-white.svg new file mode 100644 index 00000000000..e6c94e27b64 --- /dev/null +++ b/release/2.5/_static/images/chevron-down-white.svg @@ -0,0 +1,16 @@ + + + Created with Sketch. + + + + + + + + + + + + + diff --git a/release/2.5/_static/images/chevron-right-orange.svg b/release/2.5/_static/images/chevron-right-orange.svg new file mode 100644 index 00000000000..7033fc93bf4 --- /dev/null +++ b/release/2.5/_static/images/chevron-right-orange.svg @@ -0,0 +1,17 @@ + + + + +Page 1 +Created with Sketch. + + + + + + + + + + diff --git a/release/2.5/_static/images/chevron-right-white.svg b/release/2.5/_static/images/chevron-right-white.svg new file mode 100644 index 00000000000..dd9e77f2616 --- /dev/null +++ b/release/2.5/_static/images/chevron-right-white.svg @@ -0,0 +1,17 @@ + + + + +Page 1 +Created with Sketch. + + + + + + + + + + \ No newline at end of file diff --git a/release/2.5/_static/images/home-footer-background.jpg b/release/2.5/_static/images/home-footer-background.jpg new file mode 100644 index 00000000000..b307bb57f48 Binary files /dev/null and b/release/2.5/_static/images/home-footer-background.jpg differ diff --git a/release/2.5/_static/images/icon-close.svg b/release/2.5/_static/images/icon-close.svg new file mode 100644 index 00000000000..348964e79f7 --- /dev/null +++ b/release/2.5/_static/images/icon-close.svg @@ -0,0 +1,21 @@ + + + + Page 1 + Created with Sketch. + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/release/2.5/_static/images/icon-menu-dots-dark.svg b/release/2.5/_static/images/icon-menu-dots-dark.svg new file mode 100644 index 00000000000..fa2ad044b3f --- /dev/null +++ b/release/2.5/_static/images/icon-menu-dots-dark.svg @@ -0,0 +1,42 @@ + + + + Page 1 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/release/2.5/_static/images/logo-dark.svg b/release/2.5/_static/images/logo-dark.svg new file mode 100644 index 00000000000..9b4c1a56ac6 --- /dev/null +++ b/release/2.5/_static/images/logo-dark.svg @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/release/2.5/_static/images/logo-facebook-dark.svg b/release/2.5/_static/images/logo-facebook-dark.svg new file mode 100644 index 00000000000..cff17915c4f --- /dev/null +++ b/release/2.5/_static/images/logo-facebook-dark.svg @@ -0,0 +1,8 @@ + + + + + + diff --git a/release/2.5/_static/images/logo-icon.svg b/release/2.5/_static/images/logo-icon.svg new file mode 100644 index 00000000000..575f6823e47 --- /dev/null +++ b/release/2.5/_static/images/logo-icon.svg @@ -0,0 +1,12 @@ + + + + + + + + + diff --git a/release/2.5/_static/images/logo-twitter-dark.svg b/release/2.5/_static/images/logo-twitter-dark.svg new file mode 100644 index 00000000000..1572570f88c --- /dev/null +++ b/release/2.5/_static/images/logo-twitter-dark.svg @@ -0,0 +1,16 @@ + + + + + + + + diff --git a/release/2.5/_static/images/logo-youtube-dark.svg b/release/2.5/_static/images/logo-youtube-dark.svg new file mode 100644 index 00000000000..e3cfedd79d1 --- /dev/null +++ b/release/2.5/_static/images/logo-youtube-dark.svg @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/release/2.5/_static/images/logo.svg b/release/2.5/_static/images/logo.svg new file mode 100644 index 00000000000..f8d44b98425 --- /dev/null +++ b/release/2.5/_static/images/logo.svg @@ -0,0 +1,31 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/release/2.5/_static/images/pytorch-colab.svg b/release/2.5/_static/images/pytorch-colab.svg new file mode 100644 index 00000000000..2ab15e2f307 --- /dev/null +++ b/release/2.5/_static/images/pytorch-colab.svg @@ -0,0 +1,24 @@ + + + + + + + + + + + + diff --git a/release/2.5/_static/images/pytorch-download.svg b/release/2.5/_static/images/pytorch-download.svg new file mode 100644 index 00000000000..cc37d638e92 --- /dev/null +++ b/release/2.5/_static/images/pytorch-download.svg @@ -0,0 +1,10 @@ + + + + + + diff --git a/release/2.5/_static/images/pytorch-github.svg b/release/2.5/_static/images/pytorch-github.svg new file mode 100644 index 00000000000..2c2570da1de --- /dev/null +++ b/release/2.5/_static/images/pytorch-github.svg @@ -0,0 +1,15 @@ + + + + + + diff --git a/release/2.5/_static/images/pytorch-x.svg b/release/2.5/_static/images/pytorch-x.svg new file mode 100644 index 00000000000..74856ea9fda --- /dev/null +++ b/release/2.5/_static/images/pytorch-x.svg @@ -0,0 +1,10 @@ + + + + + + + diff --git a/release/2.5/_static/images/search-icon.svg b/release/2.5/_static/images/search-icon.svg new file mode 100644 index 00000000000..ebb0df86773 --- /dev/null +++ b/release/2.5/_static/images/search-icon.svg @@ -0,0 +1,19 @@ + + + + Created with Sketch. + + + + + + + + + + + + + + + diff --git a/release/2.5/_static/images/view-page-source-icon.svg b/release/2.5/_static/images/view-page-source-icon.svg new file mode 100644 index 00000000000..6f5bbe0748f --- /dev/null +++ b/release/2.5/_static/images/view-page-source-icon.svg @@ -0,0 +1,13 @@ + + + + + + + + + + diff --git a/release/2.5/_static/img/IRgraph_markstep.png b/release/2.5/_static/img/IRgraph_markstep.png new file mode 100644 index 00000000000..2a9ad5ce54f Binary files /dev/null and b/release/2.5/_static/img/IRgraph_markstep.png differ diff --git a/release/2.5/_static/img/IRgraph_no_markstep.png b/release/2.5/_static/img/IRgraph_no_markstep.png new file mode 100644 index 00000000000..282d3907104 Binary files /dev/null and b/release/2.5/_static/img/IRgraph_no_markstep.png differ diff --git a/release/2.5/_static/img/ci_test_dependency.png b/release/2.5/_static/img/ci_test_dependency.png new file mode 100644 index 00000000000..e4b2c397ba0 Binary files /dev/null and b/release/2.5/_static/img/ci_test_dependency.png differ diff --git a/release/2.5/_static/img/ci_test_dependency_gpu.png b/release/2.5/_static/img/ci_test_dependency_gpu.png new file mode 100644 index 00000000000..68cd77ec90c Binary files /dev/null and b/release/2.5/_static/img/ci_test_dependency_gpu.png differ diff --git a/release/2.5/_static/img/ddp_md_mnist_with_real_data.png b/release/2.5/_static/img/ddp_md_mnist_with_real_data.png new file mode 100644 index 00000000000..f83c5182be6 Binary files /dev/null and b/release/2.5/_static/img/ddp_md_mnist_with_real_data.png differ diff --git a/release/2.5/_static/img/dynamic_shape_mlp_perf.png b/release/2.5/_static/img/dynamic_shape_mlp_perf.png new file mode 100644 index 00000000000..109008991f1 Binary files /dev/null and b/release/2.5/_static/img/dynamic_shape_mlp_perf.png differ diff --git a/release/2.5/_static/img/gpt2_2b_step_time_vs_batch.png b/release/2.5/_static/img/gpt2_2b_step_time_vs_batch.png new file mode 100644 index 00000000000..aafa90d6d93 Binary files /dev/null and b/release/2.5/_static/img/gpt2_2b_step_time_vs_batch.png differ diff --git a/release/2.5/_static/img/gpt2_v4_8_mfu_batch.png b/release/2.5/_static/img/gpt2_v4_8_mfu_batch.png new file mode 100644 index 00000000000..0247e85b3c1 Binary files /dev/null and b/release/2.5/_static/img/gpt2_v4_8_mfu_batch.png differ diff --git a/release/2.5/_static/img/image-1.png b/release/2.5/_static/img/image-1.png new file mode 100644 index 00000000000..1eddfc654c5 Binary files /dev/null and b/release/2.5/_static/img/image-1.png differ diff --git a/release/2.5/_static/img/image-2.png b/release/2.5/_static/img/image-2.png new file mode 100644 index 00000000000..1349a8cfda1 Binary files /dev/null and b/release/2.5/_static/img/image-2.png differ diff --git a/release/2.5/_static/img/image-3.png b/release/2.5/_static/img/image-3.png new file mode 100644 index 00000000000..fc3d1112ef5 Binary files /dev/null and b/release/2.5/_static/img/image-3.png differ diff --git a/release/2.5/_static/img/image-4.png b/release/2.5/_static/img/image-4.png new file mode 100644 index 00000000000..0d27d0bd4d4 Binary files /dev/null and b/release/2.5/_static/img/image-4.png differ diff --git a/release/2.5/_static/img/image.png b/release/2.5/_static/img/image.png new file mode 100644 index 00000000000..bf049acdbbc Binary files /dev/null and b/release/2.5/_static/img/image.png differ diff --git a/release/2.5/_static/img/llama2_2b_bsz128.png b/release/2.5/_static/img/llama2_2b_bsz128.png new file mode 100644 index 00000000000..ddf28875a79 Binary files /dev/null and b/release/2.5/_static/img/llama2_2b_bsz128.png differ diff --git a/release/2.5/_static/img/mesh_spmd2.png b/release/2.5/_static/img/mesh_spmd2.png new file mode 100644 index 00000000000..cd7bf793711 Binary files /dev/null and b/release/2.5/_static/img/mesh_spmd2.png differ diff --git a/release/2.5/_static/img/perf_auto_vs_manual.png b/release/2.5/_static/img/perf_auto_vs_manual.png new file mode 100644 index 00000000000..4ef5f18c3b2 Binary files /dev/null and b/release/2.5/_static/img/perf_auto_vs_manual.png differ diff --git a/release/2.5/_static/img/pytorch-logo-dark.svg b/release/2.5/_static/img/pytorch-logo-dark.svg new file mode 100644 index 00000000000..717a3ce942f --- /dev/null +++ b/release/2.5/_static/img/pytorch-logo-dark.svg @@ -0,0 +1,24 @@ + + + + + + + + + + + + + diff --git a/release/2.5/_static/img/pytorchXLA_flow.svg b/release/2.5/_static/img/pytorchXLA_flow.svg new file mode 100644 index 00000000000..3812141ce48 --- /dev/null +++ b/release/2.5/_static/img/pytorchXLA_flow.svg @@ -0,0 +1 @@ + diff --git a/release/2.5/_static/img/spmd_debug_1.png b/release/2.5/_static/img/spmd_debug_1.png new file mode 100644 index 00000000000..21e6f0554ed Binary files /dev/null and b/release/2.5/_static/img/spmd_debug_1.png differ diff --git a/release/2.5/_static/img/spmd_debug_1_light.png b/release/2.5/_static/img/spmd_debug_1_light.png new file mode 100644 index 00000000000..9f2f060b2d0 Binary files /dev/null and b/release/2.5/_static/img/spmd_debug_1_light.png differ diff --git a/release/2.5/_static/img/spmd_debug_2.png b/release/2.5/_static/img/spmd_debug_2.png new file mode 100644 index 00000000000..66e544f3355 Binary files /dev/null and b/release/2.5/_static/img/spmd_debug_2.png differ diff --git a/release/2.5/_static/img/spmd_debug_2_light.png b/release/2.5/_static/img/spmd_debug_2_light.png new file mode 100644 index 00000000000..87deb04ce43 Binary files /dev/null and b/release/2.5/_static/img/spmd_debug_2_light.png differ diff --git a/release/2.5/_static/img/spmd_mode.png b/release/2.5/_static/img/spmd_mode.png new file mode 100644 index 00000000000..dd9b5cc69cc Binary files /dev/null and b/release/2.5/_static/img/spmd_mode.png differ diff --git a/release/2.5/_static/img/torchbench_pjrt_vs_xrt.svg b/release/2.5/_static/img/torchbench_pjrt_vs_xrt.svg new file mode 100644 index 00000000000..effe9b72be8 --- /dev/null +++ b/release/2.5/_static/img/torchbench_pjrt_vs_xrt.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/release/2.5/_static/img/torchbench_tfrt_vs_se.svg b/release/2.5/_static/img/torchbench_tfrt_vs_se.svg new file mode 100644 index 00000000000..161f0433b0a --- /dev/null +++ b/release/2.5/_static/img/torchbench_tfrt_vs_se.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/release/2.5/_static/jquery-3.6.0.js b/release/2.5/_static/jquery-3.6.0.js new file mode 100644 index 00000000000..fc6c299b73e --- /dev/null +++ b/release/2.5/_static/jquery-3.6.0.js @@ -0,0 +1,10881 @@ +/*! + * jQuery JavaScript Library v3.6.0 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright OpenJS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2021-03-02T17:08Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var flat = arr.flat ? function( array ) { + return arr.flat.call( array ); +} : function( array ) { + return arr.concat.apply( [], array ); +}; + + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + +var isFunction = function isFunction( obj ) { + + // Support: Chrome <=57, Firefox <=52 + // In some browsers, typeof returns "function" for HTML elements + // (i.e., `typeof document.createElement( "object" ) === "function"`). + // We don't want to classify *any* DOM node as a function. + // Support: QtWeb <=3.8.5, WebKit <=534.34, wkhtmltopdf tool <=0.12.5 + // Plus for old WebKit, typeof returns "function" for HTML collections + // (e.g., `typeof document.getElementsByTagName("div") === "function"`). (gh-4756) + return typeof obj === "function" && typeof obj.nodeType !== "number" && + typeof obj.item !== "function"; + }; + + +var isWindow = function isWindow( obj ) { + return obj != null && obj === obj.window; + }; + + +var document = window.document; + + + + var preservedScriptAttributes = { + type: true, + src: true, + nonce: true, + noModule: true + }; + + function DOMEval( code, node, doc ) { + doc = doc || document; + + var i, val, + script = doc.createElement( "script" ); + + script.text = code; + if ( node ) { + for ( i in preservedScriptAttributes ) { + + // Support: Firefox 64+, Edge 18+ + // Some browsers don't support the "nonce" property on scripts. + // On the other hand, just using `getAttribute` is not enough as + // the `nonce` attribute is reset to an empty string whenever it + // becomes browsing-context connected. + // See https://github.com/whatwg/html/issues/2369 + // See https://html.spec.whatwg.org/#nonce-attributes + // The `node.getAttribute` check was added for the sake of + // `jQuery.globalEval` so that it can fake a nonce-containing node + // via an object. + val = node[ i ] || node.getAttribute && node.getAttribute( i ); + if ( val ) { + script.setAttribute( i, val ); + } + } + } + doc.head.appendChild( script ).parentNode.removeChild( script ); + } + + +function toType( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; +} +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.6.0", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + even: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return ( i + 1 ) % 2; + } ) ); + }, + + odd: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return i % 2; + } ) ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + copy = options[ name ]; + + // Prevent Object.prototype pollution + // Prevent never-ending loop + if ( name === "__proto__" || target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + src = target[ name ]; + + // Ensure proper type for the source value + if ( copyIsArray && !Array.isArray( src ) ) { + clone = []; + } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { + clone = {}; + } else { + clone = src; + } + copyIsArray = false; + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + // Evaluates a script in a provided context; falls back to the global one + // if not specified. + globalEval: function( code, options, doc ) { + DOMEval( code, { nonce: options && options.nonce }, doc ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return flat( ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), + function( _i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); + } ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = toType( obj ); + + if ( isFunction( obj ) || isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.6 + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://js.foundation/ + * + * Date: 2021-02-16 + */ +( function( window ) { +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + nonnativeSelectorCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ( {} ).hasOwnProperty, + arr = [], + pop = arr.pop, + pushNative = arr.push, + push = arr.push, + slice = arr.slice, + + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[ i ] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + + "ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram + identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + + "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + + // "Attribute values must be CSS identifiers [capture 5] + // or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + + whitespace + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + + "*" ), + rdescend = new RegExp( whitespace + "|>" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + + whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + + whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rhtml = /HTML$/i, + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), + funescape = function( escape, nonHex ) { + var high = "0x" + escape.slice( 1 ) - 0x10000; + + return nonHex ? + + // Strip the backslash prefix from a non-hex escape sequence + nonHex : + + // Replace a hexadecimal escape sequence with the encoded Unicode code point + // Support: IE <=11+ + // For values outside the Basic Multilingual Plane (BMP), manually construct a + // surrogate pair + high < 0 ? + String.fromCharCode( high + 0x10000 ) : + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + inDisabledFieldset = addCombinator( + function( elem ) { + return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + ( arr = slice.call( preferredDoc.childNodes ) ), + preferredDoc.childNodes + ); + + // Support: Android<4.0 + // Detect silently failing push.apply + // eslint-disable-next-line no-unused-expressions + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + pushNative.apply( target, slice.call( els ) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + + // Can't trust NodeList.length + while ( ( target[ j++ ] = els[ i++ ] ) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + setDocument( context ); + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { + + // ID selector + if ( ( m = match[ 1 ] ) ) { + + // Document context + if ( nodeType === 9 ) { + if ( ( elem = context.getElementById( m ) ) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && ( elem = newContext.getElementById( m ) ) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[ 2 ] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !nonnativeSelectorCache[ selector + " " ] && + ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && + + // Support: IE 8 only + // Exclude object elements + ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { + + newSelector = selector; + newContext = context; + + // qSA considers elements outside a scoping root when evaluating child or + // descendant combinators, which is not what we want. + // In such cases, we work around the behavior by prefixing every selector in the + // list with an ID selector referencing the scope context. + // The technique has to be used as well when a leading combinator is used + // as such selectors are not recognized by querySelectorAll. + // Thanks to Andrew Dupont for this technique. + if ( nodeType === 1 && + ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + + // We can use :scope instead of the ID hack if the browser + // supports it & if we're not changing the context. + if ( newContext !== context || !support.scope ) { + + // Capture the context ID, setting it first if necessary + if ( ( nid = context.getAttribute( "id" ) ) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", ( nid = expando ) ); + } + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + + toSelector( groups[ i ] ); + } + newSelector = groups.join( "," ); + } + + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + nonnativeSelectorCache( selector, true ); + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return ( cache[ key + " " ] = value ); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement( "fieldset" ); + + try { + return !!fn( el ); + } catch ( e ) { + return false; + } finally { + + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split( "|" ), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[ i ] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( ( cur = cur.nextSibling ) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return ( name === "input" || name === "button" ) && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + inDisabledFieldset( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction( function( argument ) { + argument = +argument; + return markFunction( function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ ( j = matchIndexes[ i ] ) ] ) { + seed[ j ] = !( matches[ j ] = seed[ j ] ); + } + } + } ); + } ); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + var namespace = elem && elem.namespaceURI, + docElem = elem && ( elem.ownerDocument || elem ).documentElement; + + // Support: IE <=8 + // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes + // https://bugs.jquery.com/ticket/4833 + return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9 - 11+, Edge 12 - 18+ + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( preferredDoc != document && + ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, + // Safari 4 - 5 only, Opera <=11.6 - 12.x only + // IE/Edge & older browsers don't support the :scope pseudo-class. + // Support: Safari 6.0 only + // Safari 6.0 supports :scope but it's an alias of :root there. + support.scope = assert( function( el ) { + docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); + return typeof el.querySelectorAll !== "undefined" && + !el.querySelectorAll( ":scope fieldset div" ).length; + } ); + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert( function( el ) { + el.className = "i"; + return !el.getAttribute( "className" ); + } ); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert( function( el ) { + el.appendChild( document.createComment( "" ) ); + return !el.getElementsByTagName( "*" ).length; + } ); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert( function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + } ); + + // ID filter and find + if ( support.getById ) { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute( "id" ) === attrId; + }; + }; + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode( "id" ); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( ( elem = elems[ i++ ] ) ) { + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find[ "TAG" ] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { + + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert( function( el ) { + + var input; + + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll( "[selected]" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push( "~=" ); + } + + // Support: IE 11+, Edge 15 - 18+ + // IE 11/Edge don't find elements on a `[name='']` query in some cases. + // Adding a temporary attribute to the document before the selection works + // around the issue. + // Interestingly, IE 10 & older don't seem to have the issue. + input = document.createElement( "input" ); + input.setAttribute( "name", "" ); + el.appendChild( input ); + if ( !el.querySelectorAll( "[name='']" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + + whitespace + "*(?:''|\"\")" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll( ":checked" ).length ) { + rbuggyQSA.push( ":checked" ); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push( ".#.+[+~]" ); + } + + // Support: Firefox <=3.6 - 5 only + // Old Firefox doesn't throw on a badly-escaped identifier. + el.querySelectorAll( "\\\f" ); + rbuggyQSA.push( "[\\r\\n\\f]" ); + } ); + + assert( function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement( "input" ); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll( "[name=d]" ).length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: Opera 10 - 11 only + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll( "*,:x" ); + rbuggyQSA.push( ",.*:" ); + } ); + } + + if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector ) ) ) ) { + + assert( function( el ) { + + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + } ); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + ) ); + } : + function( a, b ) { + if ( b ) { + while ( ( b = b.parentNode ) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { + + // Choose the first element that is related to our preferred document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( a == document || a.ownerDocument == preferredDoc && + contains( preferredDoc, a ) ) { + return -1; + } + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( b == document || b.ownerDocument == preferredDoc && + contains( preferredDoc, b ) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + return a == document ? -1 : + b == document ? 1 : + /* eslint-enable eqeqeq */ + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( ( cur = cur.parentNode ) ) { + ap.unshift( cur ); + } + cur = b; + while ( ( cur = cur.parentNode ) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[ i ] === bp[ i ] ) { + i++; + } + + return i ? + + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[ i ], bp[ i ] ) : + + // Otherwise nodes in our document sort first + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + ap[ i ] == preferredDoc ? -1 : + bp[ i ] == preferredDoc ? 1 : + /* eslint-enable eqeqeq */ + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + setDocument( elem ); + + if ( support.matchesSelector && documentIsHTML && + !nonnativeSelectorCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch ( e ) { + nonnativeSelectorCache( expr, true ); + } + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( context.ownerDocument || context ) != document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( elem.ownerDocument || elem ) != document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return ( sel + "" ).replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + + // If no nodeType, this is expected to be an array + while ( ( node = elem[ i++ ] ) ) { + + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[ 1 ] = match[ 1 ].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[ 3 ] = ( match[ 3 ] || match[ 4 ] || + match[ 5 ] || "" ).replace( runescape, funescape ); + + if ( match[ 2 ] === "~=" ) { + match[ 3 ] = " " + match[ 3 ] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[ 1 ] = match[ 1 ].toLowerCase(); + + if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { + + // nth-* requires argument + if ( !match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[ 4 ] = +( match[ 4 ] ? + match[ 5 ] + ( match[ 6 ] || 1 ) : + 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); + match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); + + // other types prohibit arguments + } else if ( match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[ 6 ] && match[ 2 ]; + + if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[ 3 ] ) { + match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + + // Get excess from tokenize (recursively) + ( excess = tokenize( unquoted, true ) ) && + + // advance to the next closing parenthesis + ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { + + // excess is a negative index + match[ 0 ] = match[ 0 ].slice( 0, excess ); + match[ 2 ] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { + return true; + } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + ( pattern = new RegExp( "(^|" + whitespace + + ")" + className + "(" + whitespace + "|$)" ) ) && classCache( + className, function( elem ) { + return pattern.test( + typeof elem.className === "string" && elem.className || + typeof elem.getAttribute !== "undefined" && + elem.getAttribute( "class" ) || + "" + ); + } ); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + /* eslint-disable max-len */ + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + /* eslint-enable max-len */ + + }; + }, + + "CHILD": function( type, what, _argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, _context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( ( node = node[ dir ] ) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( ( node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + + // Use previously-cached element index if available + if ( useCache ) { + + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + + // Use the same loop as above to seek `elem` from the start + while ( ( node = ++nodeIndex && node && node[ dir ] || + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || + ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction( function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[ i ] ); + seed[ idx ] = !( matches[ idx ] = matched[ i ] ); + } + } ) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + + // Potentially complex pseudos + "not": markFunction( function( selector ) { + + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction( function( seed, matches, _context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( ( elem = unmatched[ i ] ) ) { + seed[ i ] = !( matches[ i ] = elem ); + } + } + } ) : + function( elem, _context, xml ) { + input[ 0 ] = elem; + matcher( input, null, xml, results ); + + // Don't keep the element (issue #299) + input[ 0 ] = null; + return !results.pop(); + }; + } ), + + "has": markFunction( function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + } ), + + "contains": markFunction( function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; + }; + } ), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + + // lang value must be a valid identifier + if ( !ridentifier.test( lang || "" ) ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( ( elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); + return false; + }; + } ), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && + ( !document.hasFocus || document.hasFocus() ) && + !!( elem.type || elem.href || ~elem.tabIndex ); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return ( nodeName === "input" && !!elem.checked ) || + ( nodeName === "option" && !!elem.selected ); + }, + + "selected": function( elem ) { + + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + // eslint-disable-next-line no-unused-expressions + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos[ "empty" ]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( ( attr = elem.getAttribute( "type" ) ) == null || + attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo( function() { + return [ 0 ]; + } ), + + "last": createPositionalPseudo( function( _matchIndexes, length ) { + return [ length - 1 ]; + } ), + + "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + } ), + + "even": createPositionalPseudo( function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "odd": createPositionalPseudo( function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? + argument + length : + argument > length ? + length : + argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ) + } +}; + +Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || ( match = rcomma.exec( soFar ) ) ) { + if ( match ) { + + // Don't consume trailing commas as valid + soFar = soFar.slice( match[ 0 ].length ) || soFar; + } + groups.push( ( tokens = [] ) ); + } + + matched = false; + + // Combinators + if ( ( match = rcombinators.exec( soFar ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + + // Cast descendant combinators to space + type: match[ 0 ].replace( rtrim, " " ) + } ); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || + ( match = preFilters[ type ]( match ) ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + type: type, + matches: match + } ); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[ i ].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || ( elem[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || + ( outerCache[ elem.uniqueID ] = {} ); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( ( oldCache = uniqueCache[ key ] ) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return ( newCache[ 2 ] = oldCache[ 2 ] ); + } else { + + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[ i ]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[ 0 ]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[ i ], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( ( elem = unmatched[ i ] ) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction( function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( + selector || "*", + context.nodeType ? [ context ] : context, + [] + ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( ( elem = temp[ i ] ) ) { + matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) ) { + + // Restore matcherIn since elem is not yet a final match + temp.push( ( matcherIn[ i ] = elem ) ); + } + } + postFinder( null, ( matcherOut = [] ), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) && + ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { + + seed[ temp ] = !( results[ temp ] = elem ); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + } ); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[ 0 ].type ], + implicitRelative = leadingRelative || Expr.relative[ " " ], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + ( checkContext = context ).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { + matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; + } else { + matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[ j ].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens + .slice( 0, i - 1 ) + .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), + + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), + len = elems.length; + + if ( outermost ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + outermostContext = context == document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( !context && elem.ownerDocument != document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( ( matcher = elementMatchers[ j++ ] ) ) { + if ( matcher( elem, context || document, xml ) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + + // They will have gone through all possible matchers + if ( ( elem = !matcher && elem ) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( ( matcher = setMatchers[ j++ ] ) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !( unmatched[ i ] || setMatched[ i ] ) ) { + setMatched[ i ] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[ i ] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( + selector, + matcherFromGroupMatchers( elementMatchers, setMatchers ) + ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( ( selector = compiled.selector || selector ) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[ 0 ] = match[ 0 ].slice( 0 ); + if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { + + context = ( Expr.find[ "ID" ]( token.matches[ 0 ] + .replace( runescape, funescape ), context ) || [] )[ 0 ]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[ i ]; + + // Abort if we hit a combinator + if ( Expr.relative[ ( type = token.type ) ] ) { + break; + } + if ( ( find = Expr.find[ type ] ) ) { + + // Search, expanding context for leading sibling combinators + if ( ( seed = find( + token.matches[ 0 ].replace( runescape, funescape ), + rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || + context + ) ) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert( function( el ) { + + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; +} ); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert( function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute( "href" ) === "#"; +} ) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + } ); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert( function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +} ) ) { + addHandle( "value", function( elem, _name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + } ); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert( function( el ) { + return el.getAttribute( "disabled" ) == null; +} ) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; + } + } ); +} + +return Sizzle; + +} )( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +} +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Filtered directly for both simple and complex selectors + return jQuery.filter( qualifier, elements, not ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, _i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, _i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, _i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( elem.contentDocument != null && + + // Support: IE 11+ + // elements with no `data` attribute has an object + // `contentDocument` with a `null` prototype. + getProto( elem.contentDocument ) ) { + + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && toType( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( _i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // rejected_handlers.disable + // fulfilled_handlers.disable + tuples[ 3 - i ][ 3 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock, + + // progress_handlers.lock + tuples[ 0 ][ 3 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the primary Deferred + primary = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + primary.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, primary.done( updateFunc( i ) ).resolve, primary.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( primary.state() === "pending" || + isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return primary.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), primary.reject ); + } + + return primary.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( toType( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, _key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; + + +// Matches dashed string for camelizing +var rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g; + +// Used by camelCase as callback to replace() +function fcamelCase( _all, letter ) { + return letter.toUpperCase(); +} + +// Convert dashed to camelCase; used by the css and data modules +// Support: IE <=9 - 11, Edge 12 - 15 +// Microsoft forgot to hump their vendor prefix (#9572) +function camelCase( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); +} +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( camelCase ); + } else { + key = camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var documentElement = document.documentElement; + + + + var isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ); + }, + composed = { composed: true }; + + // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only + // Check attachment across shadow DOM boundaries when possible (gh-3504) + // Support: iOS 10.0-10.2 only + // Early iOS 10 versions support `attachShadow` but not `getRootNode`, + // leading to errors. We need to check for `getRootNode`. + if ( documentElement.getRootNode ) { + isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ) || + elem.getRootNode( composed ) === elem.ownerDocument; + }; + } +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + isAttached( elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, scale, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = elem.nodeType && + ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Support: Firefox <=54 + // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) + initial = initial / 2; + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + while ( maxIterations-- ) { + + // Evaluate and update our best guess (doubling guesses that zero out). + // Finish if the scale equals or crosses 1 (making the old*new product non-positive). + jQuery.style( elem, prop, initialInUnit + unit ); + if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { + maxIterations = 0; + } + initialInUnit = initialInUnit / scale; + + } + + initialInUnit = initialInUnit * 2; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); + +var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); + + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; + + // Support: IE <=9 only + // IE <=9 replaces "; + support.option = !!div.lastChild; +} )(); + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +// Support: IE <=9 only +if ( !support.option ) { + wrapMap.optgroup = wrapMap.option = [ 1, "" ]; +} + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, attached, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( toType( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + attached = isAttached( elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( attached ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +var rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 - 11+ +// focus() and blur() are asynchronous, except when they are no-op. +// So expect focus to be synchronous when the element is already active, +// and blur to be synchronous when the element is not already active. +// (focus and blur are always synchronous in other supported browsers, +// this just defines when we can count on it). +function expectSync( elem, type ) { + return ( elem === safeActiveElement() ) === ( type === "focus" ); +} + +// Support: IE <=9 only +// Accessing document.activeElement can throw unexpectedly +// https://bugs.jquery.com/ticket/13393 +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Only attach events to objects that accept data + if ( !acceptData( elem ) ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = Object.create( null ); + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( nativeEvent ), + + handlers = ( + dataPriv.get( this, "events" ) || Object.create( null ) + )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // If the event is namespaced, then each handler is only invoked if it is + // specially universal or its namespaces are a superset of the event's. + if ( !event.rnamespace || handleObj.namespace === false || + event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + click: { + + // Utilize native event to ensure correct state for checkable inputs + setup: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Claim the first handler + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + // dataPriv.set( el, "click", ... ) + leverageNative( el, "click", returnTrue ); + } + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Force setup before triggering a click + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + leverageNative( el, "click" ); + } + + // Return non-false to allow normal event-path propagation + return true; + }, + + // For cross-browser consistency, suppress native .click() on links + // Also prevent it if we're currently inside a leveraged native-event stack + _default: function( event ) { + var target = event.target; + return rcheckableType.test( target.type ) && + target.click && nodeName( target, "input" ) && + dataPriv.get( target, "click" ) || + nodeName( target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +// Ensure the presence of an event listener that handles manually-triggered +// synthetic events by interrupting progress until reinvoked in response to +// *native* events that it fires directly, ensuring that state changes have +// already occurred before other listeners are invoked. +function leverageNative( el, type, expectSync ) { + + // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add + if ( !expectSync ) { + if ( dataPriv.get( el, type ) === undefined ) { + jQuery.event.add( el, type, returnTrue ); + } + return; + } + + // Register the controller as a special universal handler for all event namespaces + dataPriv.set( el, type, false ); + jQuery.event.add( el, type, { + namespace: false, + handler: function( event ) { + var notAsync, result, + saved = dataPriv.get( this, type ); + + if ( ( event.isTrigger & 1 ) && this[ type ] ) { + + // Interrupt processing of the outer synthetic .trigger()ed event + // Saved data should be false in such cases, but might be a leftover capture object + // from an async native handler (gh-4350) + if ( !saved.length ) { + + // Store arguments for use when handling the inner native event + // There will always be at least one argument (an event object), so this array + // will not be confused with a leftover capture object. + saved = slice.call( arguments ); + dataPriv.set( this, type, saved ); + + // Trigger the native event and capture its result + // Support: IE <=9 - 11+ + // focus() and blur() are asynchronous + notAsync = expectSync( this, type ); + this[ type ](); + result = dataPriv.get( this, type ); + if ( saved !== result || notAsync ) { + dataPriv.set( this, type, false ); + } else { + result = {}; + } + if ( saved !== result ) { + + // Cancel the outer synthetic event + event.stopImmediatePropagation(); + event.preventDefault(); + + // Support: Chrome 86+ + // In Chrome, if an element having a focusout handler is blurred by + // clicking outside of it, it invokes the handler synchronously. If + // that handler calls `.remove()` on the element, the data is cleared, + // leaving `result` undefined. We need to guard against this. + return result && result.value; + } + + // If this is an inner synthetic event for an event with a bubbling surrogate + // (focus or blur), assume that the surrogate already propagated from triggering the + // native event and prevent that from happening again here. + // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the + // bubbling surrogate propagates *after* the non-bubbling base), but that seems + // less bad than duplication. + } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { + event.stopPropagation(); + } + + // If this is a native event triggered above, everything is now in order + // Fire an inner synthetic event with the original arguments + } else if ( saved.length ) { + + // ...and capture the result + dataPriv.set( this, type, { + value: jQuery.event.trigger( + + // Support: IE <=9 - 11+ + // Extend with the prototype to reset the above stopImmediatePropagation() + jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), + saved.slice( 1 ), + this + ) + } ); + + // Abort handling of the native event + event.stopImmediatePropagation(); + } + } + } ); +} + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || Date.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + code: true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + which: true +}, jQuery.event.addProp ); + +jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { + jQuery.event.special[ type ] = { + + // Utilize native event if possible so blur/focus sequence is correct + setup: function() { + + // Claim the first handler + // dataPriv.set( this, "focus", ... ) + // dataPriv.set( this, "blur", ... ) + leverageNative( this, type, expectSync ); + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function() { + + // Force setup before trigger + leverageNative( this, type ); + + // Return non-false to allow normal event-path propagation + return true; + }, + + // Suppress native focus or blur as it's already being fired + // in leverageNative. + _default: function() { + return true; + }, + + delegateType: delegateType + }; +} ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + // Support: IE <=10 - 11, Edge 12 - 13 only + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( elem ).children( "tbody" )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { + elem.type = elem.type.slice( 5 ); + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.get( src ); + events = pdataOld.events; + + if ( events ) { + dataPriv.remove( dest, "handle events" ); + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = flat( args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + valueIsFunction = isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( valueIsFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( valueIsFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl && !node.noModule ) { + jQuery._evalUrl( node.src, { + nonce: node.nonce || node.getAttribute( "nonce" ) + }, doc ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && isAttached( node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html; + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = isAttached( elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + +var swap = function( elem, options, callback ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.call( elem ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + +var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + container.style.cssText = "position:absolute;left:-11111px;width:60px;" + + "margin-top:1px;padding:0;border:0"; + div.style.cssText = + "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + + "margin:auto;border:1px;padding:1px;" + + "width:60%;top:1%"; + documentElement.appendChild( container ).appendChild( div ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; + + // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 + // Some styles come back with percentage values, even though they shouldn't + div.style.right = "60%"; + pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; + + // Support: IE 9 - 11 only + // Detect misreporting of content dimensions for box-sizing:border-box elements + boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; + + // Support: IE 9 only + // Detect overflow:scroll screwiness (gh-3699) + // Support: Chrome <=64 + // Don't get tricked when zoom affects offsetWidth (gh-4029) + div.style.position = "absolute"; + scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + function roundPixelMeasures( measure ) { + return Math.round( parseFloat( measure ) ); + } + + var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, + reliableTrDimensionsVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + jQuery.extend( support, { + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelBoxStyles: function() { + computeStyleTests(); + return pixelBoxStylesVal; + }, + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + }, + scrollboxSize: function() { + computeStyleTests(); + return scrollboxSizeVal; + }, + + // Support: IE 9 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Behavior in IE 9 is more subtle than in newer versions & it passes + // some versions of this test; make sure not to make it pass there! + // + // Support: Firefox 70+ + // Only Firefox includes border widths + // in computed dimensions. (gh-4529) + reliableTrDimensions: function() { + var table, tr, trChild, trStyle; + if ( reliableTrDimensionsVal == null ) { + table = document.createElement( "table" ); + tr = document.createElement( "tr" ); + trChild = document.createElement( "div" ); + + table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate"; + tr.style.cssText = "border:1px solid"; + + // Support: Chrome 86+ + // Height set through cssText does not get applied. + // Computed height then comes back as 0. + tr.style.height = "1px"; + trChild.style.height = "9px"; + + // Support: Android 8 Chrome 86+ + // In our bodyBackground.html iframe, + // display for all div elements is set to "inline", + // which causes a problem only in Android 8 Chrome 86. + // Ensuring the div is display: block + // gets around this issue. + trChild.style.display = "block"; + + documentElement + .appendChild( table ) + .appendChild( tr ) + .appendChild( trChild ); + + trStyle = window.getComputedStyle( tr ); + reliableTrDimensionsVal = ( parseInt( trStyle.height, 10 ) + + parseInt( trStyle.borderTopWidth, 10 ) + + parseInt( trStyle.borderBottomWidth, 10 ) ) === tr.offsetHeight; + + documentElement.removeChild( table ); + } + return reliableTrDimensionsVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !isAttached( elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style, + vendorProps = {}; + +// Return a vendor-prefixed property or undefined +function vendorPropName( name ) { + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a potentially-mapped jQuery.cssProps or vendor prefixed property +function finalPropName( name ) { + var final = jQuery.cssProps[ name ] || vendorProps[ name ]; + + if ( final ) { + return final; + } + if ( name in emptyStyle ) { + return name; + } + return vendorProps[ name ] = vendorPropName( name ) || name; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }; + +function setPositiveNumber( _elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { + var i = dimension === "width" ? 1 : 0, + extra = 0, + delta = 0; + + // Adjustment may not be necessary + if ( box === ( isBorderBox ? "border" : "content" ) ) { + return 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin + if ( box === "margin" ) { + delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); + } + + // If we get here with a content-box, we're seeking "padding" or "border" or "margin" + if ( !isBorderBox ) { + + // Add padding + delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // For "border" or "margin", add border + if ( box !== "padding" ) { + delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + + // But still keep track of it otherwise + } else { + extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + + // If we get here with a border-box (content + padding + border), we're seeking "content" or + // "padding" or "margin" + } else { + + // For "content", subtract padding + if ( box === "content" ) { + delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // For "content" or "padding", subtract border + if ( box !== "margin" ) { + delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + // Account for positive content-box scroll gutter when requested by providing computedVal + if ( !isBorderBox && computedVal >= 0 ) { + + // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border + // Assuming integer scroll gutter, subtract the rest and round down + delta += Math.max( 0, Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + computedVal - + delta - + extra - + 0.5 + + // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter + // Use an explicit zero to avoid NaN (gh-3964) + ) ) || 0; + } + + return delta; +} + +function getWidthOrHeight( elem, dimension, extra ) { + + // Start with computed style + var styles = getStyles( elem ), + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). + // Fake content-box until we know it's needed to know the true value. + boxSizingNeeded = !support.boxSizingReliable() || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + valueIsBorderBox = isBorderBox, + + val = curCSS( elem, dimension, styles ), + offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); + + // Support: Firefox <=54 + // Return a confounding non-pixel value or feign ignorance, as appropriate. + if ( rnumnonpx.test( val ) ) { + if ( !extra ) { + return val; + } + val = "auto"; + } + + + // Support: IE 9 - 11 only + // Use offsetWidth/offsetHeight for when box sizing is unreliable. + // In those cases, the computed value can be trusted to be border-box. + if ( ( !support.boxSizingReliable() && isBorderBox || + + // Support: IE 10 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Interestingly, in some cases IE 9 doesn't suffer from this issue. + !support.reliableTrDimensions() && nodeName( elem, "tr" ) || + + // Fall back to offsetWidth/offsetHeight when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + val === "auto" || + + // Support: Android <=4.1 - 4.3 only + // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) + !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && + + // Make sure the element is visible & connected + elem.getClientRects().length ) { + + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Where available, offsetWidth/offsetHeight approximate border box dimensions. + // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the + // retrieved value as a content box dimension. + valueIsBorderBox = offsetProp in elem; + if ( valueIsBorderBox ) { + val = elem[ offsetProp ]; + } + } + + // Normalize "" and auto + val = parseFloat( val ) || 0; + + // Adjust for the element's box model + return ( val + + boxModelAdjustment( + elem, + dimension, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles, + + // Provide the current computed size to request scroll gutter calculation (gh-3589) + val + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "gridArea": true, + "gridColumn": true, + "gridColumnEnd": true, + "gridColumnStart": true, + "gridRow": true, + "gridRowEnd": true, + "gridRowStart": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: {}, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append + // "px" to a few hardcoded values. + if ( type === "number" && !isCustomProp ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( _i, dimension ) { + jQuery.cssHooks[ dimension ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, dimension, extra ); + } ) : + getWidthOrHeight( elem, dimension, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = getStyles( elem ), + + // Only read styles.position if the test has a chance to fail + // to avoid forcing a reflow. + scrollboxSizeBuggy = !support.scrollboxSize() && + styles.position === "absolute", + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) + boxSizingNeeded = scrollboxSizeBuggy || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + subtract = extra ? + boxModelAdjustment( + elem, + dimension, + extra, + isBorderBox, + styles + ) : + 0; + + // Account for unreliable border-box dimensions by comparing offset* to computed and + // faking a content-box to get border and padding (gh-3699) + if ( isBorderBox && scrollboxSizeBuggy ) { + subtract -= Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + parseFloat( styles[ dimension ] ) - + boxModelAdjustment( elem, dimension, "border", false, styles ) - + 0.5 + ); + } + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ dimension ] = value; + value = jQuery.css( elem, dimension ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( prefix !== "margin" ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && ( + jQuery.cssHooks[ tween.prop ] || + tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = Date.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 15 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY and Edge just mirrors + // the overflowX value there. + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + result.stop.bind( result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = Date.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +function classesToArray( value ) { + if ( Array.isArray( value ) ) { + return value; + } + if ( typeof value === "string" ) { + return value.match( rnothtmlwhite ) || []; + } + return []; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value, + isValidValue = type === "string" || Array.isArray( value ); + + if ( typeof stateVal === "boolean" && isValidValue ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( isValidValue ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = classesToArray( value ); + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, valueIsFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + valueIsFunction = isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( valueIsFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +support.focusin = "onfocusin" in window; + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + stopPropagationCallback = function( e ) { + e.stopPropagation(); + }; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = lastElement = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + lastElement = cur; + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || Object.create( null ) )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + + if ( event.isPropagationStopped() ) { + lastElement.addEventListener( type, stopPropagationCallback ); + } + + elem[ type ](); + + if ( event.isPropagationStopped() ) { + lastElement.removeEventListener( type, stopPropagationCallback ); + } + + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + + // Handle: regular nodes (via `this.ownerDocument`), window + // (via `this.document`) & document (via `this`). + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = { guid: Date.now() }; + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml, parserErrorElem; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) {} + + parserErrorElem = xml && xml.getElementsByTagName( "parsererror" )[ 0 ]; + if ( !xml || parserErrorElem ) { + jQuery.error( "Invalid XML: " + ( + parserErrorElem ? + jQuery.map( parserErrorElem.childNodes, function( el ) { + return el.textContent; + } ).join( "\n" ) : + data + ) ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && toType( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + if ( a == null ) { + return ""; + } + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ).filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ).map( function( _i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + +originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() + " " ] = + ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) + .concat( match[ 2 ] ); + } + } + match = responseHeaders[ key.toLowerCase() + " " ]; + } + return match == null ? null : match.join( ", " ); + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 15 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available and should be processed, append data to url + if ( s.data && ( s.processData || typeof s.data === "string" ) ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Use a noop converter for missing script but not if jsonp + if ( !isSuccess && + jQuery.inArray( "script", s.dataTypes ) > -1 && + jQuery.inArray( "json", s.dataTypes ) < 0 ) { + s.converters[ "text script" ] = function() {}; + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( _i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + +jQuery.ajaxPrefilter( function( s ) { + var i; + for ( i in s.headers ) { + if ( i.toLowerCase() === "content-type" ) { + s.contentType = s.headers[ i ] || ""; + } + } +} ); + + +jQuery._evalUrl = function( url, options, doc ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + + // Only evaluate the response if it is successful (gh-4126) + // dataFilter is not invoked for failure responses, so using it instead + // of the default converter is kludgy but it works. + converters: { + "text script": function() {} + }, + dataFilter: function( response ) { + jQuery.globalEval( response, options, doc ); + } + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var htmlIsFunction = isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.ontimeout = + xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain or forced-by-attrs requests + if ( s.crossDomain || s.scriptAttrs ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +
+

Troubleshooting

+

Note that the information in this section is subject to be removed in future releases of the PyTorch/XLA software, +since many of them are peculiar to a given internal implementation which might change.

+
+

Sanity Check

+

Before performing any in depth debugging, we want to do a sanity check on the installed PyTorch/XLA.

+
+

Check PyTorch/XLA Version

+

PyTorch and PyTorch/XLA version should match. Check out our README for more detials on versions available.

+
vm:~$ python
+>>> import torch
+>>> import torch_xla
+>>> print(torch.__version__)
+2.1.0+cu121
+>>> print(torch_xla.__version__)
+2.1.0
+
+
+
+
+

Perform A Simple Calculation

+
vm:~$ export PJRT_DEVICE=TPU
+vm:~$ python3
+>>> import torch
+>>> import torch_xla.core.xla_model as xm
+>>> t1 = torch.tensor(100, device=xm.xla_device())
+>>> t2 = torch.tensor(200, device=xm.xla_device())
+>>> print(t1 + t2)
+tensor(300, device='xla:0')
+
+
+
+
+

Run Resnet With Fake Data

+

For nightly

+
vm:~$ git clone https://github.com/pytorch/xla.git
+vm:~$ python xla/test/test_train_mp_imagenet.py --fake_data
+
+
+

For release version x.y, you want to use the branch rx.y. For example if you installed 2.1 release, you should do

+
vm:~$ git clone --branch r2.1 https://github.com/pytorch/xla.git
+vm:~$ python xla/test/test_train_mp_imagenet.py --fake_data
+
+
+

If you can get the resnet to run we can conclude that torch_xla is installed correctly.

+
+
+
+

Performance Debugging

+

To diagnose performance issues, we can use the execution metrics and counters provided by PyTorch/XLA +The first thing to check when model is slow is to generate a metrics report.

+

Metrics report is extremely helpful in diagnosing issues. Please try to include it in your bug +report sent to us if you have it.

+
+
+

PyTorch/XLA Debugging Tool

+

You can enable the PyTorch/XLA debugging tool by setting PT_XLA_DEBUG_LEVEL=2, which provides a couple useful debugging features. You can also lower the debug level to 1 to slip the execution analysis.

+
+

Perform A Auto-Metrics Analysis

+

The debugging tool will analyze the metrics report and provide a summary. Some example output would be

+
pt-xla-profiler: CompileTime too frequent: 21 counts during 11 steps
+pt-xla-profiler: TransferFromDeviceTime too frequent: 11 counts during 11 steps
+pt-xla-profiler: Op(s) not lowered: aten::_ctc_loss, aten::_ctc_loss_backward,  Please open a GitHub issue with the above op lowering requests.
+pt-xla-profiler: CompileTime too frequent: 23 counts during 12 steps
+pt-xla-profiler: TransferFromDeviceTime too frequent: 12 counts during 12 steps
+
+
+
+
+

Compilation & Execution Analysis

+

The debugging tool will analyze every compilation and execution for your model. Some example output would be

+
Compilation Analysis: ================================================================================
+Compilation Analysis: Compilation Cause
+Compilation Analysis:   mark_step in parallel loader at step end
+Compilation Analysis: Graph Info:
+Compilation Analysis:   Graph Hash: c74c3b91b855b2b123f833b0d5f86943
+Compilation Analysis:   Number of Graph Inputs: 35
+Compilation Analysis:   Number of Graph Outputs: 107
+Compilation Analysis: Python Frame Triggered Execution:
+Compilation Analysis:   mark_step (/workspaces/dk3/pytorch/xla/torch_xla/core/xla_model.py:1055)
+Compilation Analysis:   next (/workspaces/dk3/pytorch/xla/torch_xla/distributed/parallel_loader.py:44)
+Compilation Analysis:   __next__ (/workspaces/dk3/pytorch/xla/torch_xla/distributed/parallel_loader.py:32)
+Compilation Analysis:   train_loop_fn (/workspaces/dk3/pytorch/xla/examples/train_decoder_only_base.py:48)
+Compilation Analysis:   start_training (/workspaces/dk3/pytorch/xla/examples/train_decoder_only_base.py:65)
+Compilation Analysis:   <module> (/workspaces/dk3/pytorch/xla/examples/train_decoder_only_base.py:73)
+Compilation Analysis: --------------------------------------------------------------------------------
+Compilation Analysis: ================================================================================
+
+Post Compilation Analysis: ================================================================================
+Post Compilation Analysis: Graph input size: 1.548000 GB
+Post Compilation Analysis: Graph output size: 7.922460 GB
+Post Compilation Analysis: Aliased Input size: 1.547871 GB
+Post Compilation Analysis: Intermediate tensor size: 12.124478 GB
+Post Compilation Analysis: Compiled program size: 0.028210 GB
+Post Compilation Analysis: --------------------------------------------------------------------------------
+Post Compilation Analysis: ================================================================================
+
+Execution Analysis: ================================================================================
+Execution Analysis: Execution Cause
+Execution Analysis:   mark_step in parallel loader at step end
+Execution Analysis: Graph Info:
+Execution Analysis:   Graph Hash: c74c3b91b855b2b123f833b0d5f86943
+Execution Analysis:   Number of Graph Inputs: 35
+Execution Analysis:   Number of Graph Outputs: 107
+Execution Analysis: Python Frame Triggered Execution:
+Execution Analysis:   mark_step (/workspaces/dk3/pytorch/xla/torch_xla/core/xla_model.py:1055)
+Execution Analysis:   next (/workspaces/dk3/pytorch/xla/torch_xla/distributed/parallel_loader.py:44)
+Execution Analysis:   __next__ (/workspaces/dk3/pytorch/xla/torch_xla/distributed/parallel_loader.py:32)
+Execution Analysis:   train_loop_fn (/workspaces/dk3/pytorch/xla/examples/train_decoder_only_base.py:48)
+Execution Analysis:   start_training (/workspaces/dk3/pytorch/xla/examples/train_decoder_only_base.py:65)
+Execution Analysis:   <module> (/workspaces/dk3/pytorch/xla/examples/train_decoder_only_base.py:73)
+Execution Analysis: --------------------------------------------------------------------------------
+Execution Analysis: ================================================================================
+
+
+

Some common causes of Compilation/Executation are

+
    +
  1. User manually call mark_step.

  2. +
  3. Parallel loader call mark_step for every x (configurable) batch.

  4. +
  5. Exiting a profiler StepTrace region.

  6. +
  7. Dynamo decide to compile/execute the graph.

  8. +
  9. User trying to access(often due to logging) the value of a tensor before the mark_step.

  10. +
+

The executation caused by 1-4 are expected, and we want to avoid 5 by either reduce the frequency of accessing tensor values or manually add a mark_step before accessing.

+

Users should expect to see this Compilation Cause + Executation Cause pairs for first couple steps. After the model stabilize users should expect to only see Execution Cause(you can disable execution analysis by PT_XLA_DEBUG_LEVEL=1). To use PyTorch/XLA efficiently, we expect the same models code to be run for every step and compilation only happen once for every graph. If you keep seeing Compilation Cause, you should try to dump the IR/HLO following this section and compare the graphs for each step and understand the source of the differences.

+

Following section will explain how to get and understand a more detail metrics report.

+
+
+
+

Get A Metrics Report

+

Put the following line in your program to generate a report:

+
import torch_xla.debug.metrics as met
+
+# For short report that only contains a few key metrics.
+print(met.short_metrics_report())
+# For full report that includes all metrics.
+print(met.metrics_report())
+
+
+
+
+

Understand The Metrics Report

+

The report includes things like:

+
    +
  • how many time we issue XLA compilations and time spent on issuing.

  • +
  • how many times we execute and time spent on execution

  • +
  • how many device data handles we create/destroy etc.

  • +
+

This information is reported in terms of percentiles of the samples. An example is:

+
Metric: CompileTime
+  TotalSamples: 202
+  Counter: 06m09s401ms746.001us
+  ValueRate: 778ms572.062us / second
+  Rate: 0.425201 / second
+  Percentiles: 1%=001ms32.778us; 5%=001ms61.283us; 10%=001ms79.236us; 20%=001ms110.973us; 50%=001ms228.773us; 80%=001ms339.183us; 90%=001ms434.305us; 95%=002ms921.063us; 99%=21s102ms853.173us
+
+
+

We also provide counters, which are named integer variables which track internal software status. For example:

+
Counter: CachedSyncTensors
+  Value: 395
+
+
+

In this report, any counter that starts with aten:: +indicates a context switch between the XLA device and CPU, which can be a +potential performance optimization area in the model code.

+

Counters are useful to understand which operations are routed back to the CPU engine of PyTorch. +They are fully qualified with their C++ namespace:

+
Counter: aten::nonzero
+  Value: 33
+
+
+

If you see aten:: ops other than nonzero and _local_scalar_dense, that usually means a missing +lowering in PyTorch/XLA. Feel free to open a feature request for it on GitHub issues.

+
+
+

Clear The Metrics Report

+

If you want to clear the metrics between steps/epochs, you can use

+
import torch_xla.debug.metrics as met
+
+met.clear_all()
+
+
+
+
+

PyTorch/XLA + Dynamo Debugging Tool

+

You can enable the PyTorch/XLA + Dynamo debugging tool by setting XLA_DYNAMO_DEBUG=1.

+
+
+

Performance Profiling

+

To profile your workload in depth to understand bottlenecks please check the following resources:

+ +
+
+

Simple Benchmarking

+

Take a look at ``examples/train_resnet_benchmark.py` <https://github.com/pytorch/xla/blob/master/examples/train_resnet_benchmark.py>`_ for how to benchmark a PyTorch/XLA model.

+
+
+

Known Performance Caveats

+

PyTorch/XLA behaves semantically like regular PyTorch and XLA tensors share the full tensor interface with CPU & GPU tensors. +However, constraints in XLA/hardware and the lazy evaluation model suggest certain patterns might result in bad performance.

+

If your model shows bad performance, keep in mind the following caveats:

+
    +
  1. XLA/TPU yield degraded performance with too many recompilations.

    +

    XLA compilation is expensive. PyTorch/XLA automatically recompiles the graph every time new shapes are encountered. +Usually models should stabilize within a few steps and you can see huge speedup for the rest of training.

    +

    In order to avoid recompilations, not only must shapes be constant, but computations across XLA devices in all hosts should also be constant.

    +

    Possible sources:

    +
      +
    • Direct or indirect uses of nonzero introduce dynamic shapes; for example, masked indexing base[index] where index is a mask tensor.

    • +
    • Loops with a different number of iterations between steps can result in different execution graphs, thus require recompilations.

    • +
    +

    Solution:

    +
      +
    • Tensor shapes should be the same between iterations, or a low number of shape variations should be used.

    • +
    • Pad tensors to fixed sizes when possible.

    • +
    +
  2. +
  3. Certain operations don’t have native translations to XLA.

    +

    For these operations PyTorch/XLA automatically transfers to the CPU memory, evaluates on CPU, and transfers the result back to the XLA device. +Doing too many such operations during the training step can lead to significant slowdowns.

    +

    Possible sources:

    +
      +
    • The item() operation explicitly asks to evaluate the result. Don’t use it unless it’s necessary.

    • +
    +

    Solution:

    +
      +
    • For most ops we can lower them to XLA to fix it. Checkout metrics report section to find out the missing ops and open a feature request on GitHub.

    • +
    • Even when a PyTorch tensor is known as a scalar, avoid using tensor.item(). Keep it as a tensor and use tensor operations on it.

    • +
    • Use torch.where to substitute control flow when applicable. +E.g. The control flow with item() used in clip_grad*norm* is problematic and impacts performance, so we have patched clip_grad_norm_ by calling torch.where instead, which gives us a dramatic performance improvement. +.. code-block:: python

      +
      +

      … +else:

      +
      +

      device = parameters[0].device +total_norm = torch.zeros([], device=device if parameters else None) +for p in parameters:

      +
      +

      param_norm = p.grad.data.norm(norm_type) ** norm_type +total_norm.add_(param_norm)

      +
      +

      total_norm = (total_norm ** (1. / norm_type))

      +
      +

      clip_coef = torch.tensor(max_norm, device=device) / (total_norm + 1e-6) +for p in parameters:

      +
      +

      p.grad.data.mul_(torch.where(clip_coef < 1, clip_coef, torch.tensor(1., device=device)))

      +
      +
      +
    • +
    +
  4. +
  5. Iterators in ``torch_xla.distributed.data_parallel`` may drop the last few batches in the input iterator.

    +

    This is to make sure we do the same amount of work on all XLA devices.

    +

    Solution:

    +
      +
    • When dataset is small, and there are too few steps, this may result in a no-op epoch. Therefore, it is better to use +small batch sizes in those cases.

    • +
    +
  6. +
+
+
+

XLA Tensor Quirks

+
    +
  1. XLA tensor internals are opaque. XLA tensors always appear to be +contiguous and without storage. Networks should not try to check the strides +of XLA tensors.

  2. +
  3. XLA tensors should be moved to the CPU before saving them. Saving +XLA tensors directly causes them to be loaded back on the device(s) they were +saved from. If a device is unavailable at load time then the load will fail. +Moving XLA tensors to the CPU before saving them lets you decide which +device(s) to put the loaded tensors on. This is necessary if you want to +load the tensors on a machine without XLA devices. Care should be taken +moving the XLA tensors to the CPU before saving them, however, as moving +tensors across device types does not preserve view relationships. Instead, +views should be reconstructed as necessary after the tensors are loaded.

  4. +
  5. Copying an XLA Tensor with Python’s copy.copy returns a deep copy, not a +shallow copy. Use a view of an XLA tensor to get a shallow copy of it.

  6. +
  7. Handling shared weights. Modules can share weights by setting the +Parameters of one module to another. This “tying” of module weights should +be done AFTER the modules are moved to an XLA device. Otherwise two +independent copies of the shared tensor will be made on the XLA device.

  8. +
+
+
+

More Debugging Tools

+

We don’t expect users to use tools in this section to debug their models. But we might ask for +them when you submit a bug report since they provide additional information that metrics report +doesn’t have.

+
    +
  • print(torch_xla._XLAC._get_xla_tensors_text([res])) where res is the result tensor prints out the IR.

  • +
  • print(torch_xla._XLAC._get_xla_tensors_hlo([res])) where res is the result tensor prints out the generated XLA HLO.

  • +
+

Note these functions must be called prior to mark_step(), otherwise the tensor will already be materialized.

+
+

Environment Variables

+

There are also a number of environment variables which control the behavior of the PyTorch/XLA +software stack.

+

Setting such variables will cause different degrees of performance degradation, so they should +only be enabled for debugging.

+
    +
  • XLA_IR_DEBUG: Enables the Python stack trace to be captured where creating IR nodes, +hence allowing to understand which PyTorch operation was responsible for generating the IR.

  • +
  • XLA_HLO_DEBUG: Enables the Python stack frame captured when _XLA_IRDEBUG is active, +to be propagated to the XLA HLO metadata.

  • +
  • XLA_SAVE_TENSORS_FILE: The path to a file which will be used to dump the IR graphs during +execution. Note that the file can become really big if the option is left enabled and the +PyTorch program let run for long time. The graphs are appended to the file, so to have a clean +sheet from run to run, the file should be explicitly removed.

  • +
  • XLA_SAVE_TENSORS_FMT: The format of the graphs stored within the _XLA_SAVE_TENSORSFILE +file. Can be text (the default), dot (the Graphviz format) or hlo.

  • +
  • XLA_FLAGS=--xla_dump_to: If set to =/tmp/dir_name, XLA compiler will dump the unoptimized and optimzed HLO per compilation.

  • +
  • XLA_METRICS_FILE: If set, the path to a local file where the internal metrics will be +saved at every step. Metrics will be appended to the file, if already existing.

  • +
  • XLA_SAVE_HLO_FILE: If set, the path to a local file where, in case of compilation/execution +error, the offending HLO graph will be saved.

  • +
  • XLA_SYNC_WAIT: Forces the XLA tensor sync operation to wait for its completion, before +moving to the next step.

  • +
  • XLA_USE_EAGER_DEBUG_MODE: Forces the XLA tensor to execute eagerly, meaning compile and execute the torch operations one +by one. This is useful to bypass the long compilation time but overall step time will be a lot slower and memory usage will be higher +since all compiler optimizaiton will be skipped.

  • +
  • TF_CPP_LOG_THREAD_ID: If set to 1, the TF logs will show the thread ID +helping with debugging multithreaded processes.

  • +
  • TF_CPP_VMODULE: Environment variable used for TF VLOGs and takes the +form of TF_CPP_VMODULE=name=value,.... Note that for VLOGs you must set +TF_CPP_MIN_LOG_LEVEL=0.

  • +
  • TF_CPP_MIN_LOG_LEVEL: Level to print messages for. TF_CPP_MIN_LOG_LEVEL=0 will turn +on INFO logging, TF_CPP_MIN_LOG_LEVEL=1 WARNING and so on. Our PyTorch/XLA TF_VLOG uses +tensorflow::INFO level by default so to see VLOGs set TF_CPP_MIN_LOG_LEVEL=0.

  • +
  • XLA_DUMP_HLO_GRAPH: If set to =1 in case of a compilation or execution error the +offending HLO graph will be dumped as part of the runtime error raised by xla_util.cc.

  • +
+
+
+

Common Debugging Environment Variables Combinations

+
    +
  • Record the graph execution in the IR format

    +
    XLA_IR_DEBUG=1 XLA_HLO_DEBUG=1 XLA_SAVE_TENSORS_FMT="text" XLA_SAVE_TENSORS_FILE="/tmp/save1.ir"
    +
    +
    +
  • +
  • Record the graph execution in the HLO format

    +
    XLA_IR_DEBUG=1 XLA_HLO_DEBUG=1 XLA_SAVE_TENSORS_FMT="hlo" XLA_SAVE_TENSORS_FILE="/tmp/save1.hlo"
    +
    +
    +
  • +
  • Show debugging VLOG for runtime and graph compilation/execution

    +
    TF_CPP_MIN_LOG_LEVEL=0 TF_CPP_VMODULE="xla_graph_executor=5,pjrt_computation_client=3"
    +
    +
    +
  • +
+
+
+

Reproducing PyTorch/XLA CI/CD unit test failures.

+

You may see some test failures for a PR such as:

+
To execute this test, run the following from the base repo dir:
+    PYTORCH_TEST_WITH_SLOW=1 python ../test/test_torch.py -k test_put_xla_uint8
+
+
+

Running this directly in the command line does not work. You need to set the environment variable TORCH_TEST_DEVICES to your local pytorch/xla/test/pytorch_test_base.py. For example:

+

TORCH_TEST_DEVICES=/path/to/pytorch/xla/test/pytorch_test_base.py PYTORCH_TEST_WITH_SLOW=1 python ../test/test_torch.py -k test_put_xla_uint8 should work.

+
+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/eager_mode.html b/release/2.5/eager_mode.html new file mode 100644 index 00000000000..ffc5cde9f49 --- /dev/null +++ b/release/2.5/eager_mode.html @@ -0,0 +1,847 @@ + + + + + + + + + + + + Eager Mode + Compile API — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • Eager Mode + Compile API
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +
+

Eager Mode + Compile API

+

In this doc we will go over how to use PyTorch/XLA’s new experimental eager mode with the compile API. The goal is to make PyTorch/XLA experience more aligned with the native PyTorch and make development process easier.

+
+

Background

+

Currently PyTorch/XLA runs on the LazyTensor tracing mode by default. In the following code

+
import torch
+import torch_xla
+import torchvision
+
+device = torch_xla.device()
+model = torchvision.models.resnet18().to(device)
+input = torch.randn(64, 3, 224, 224).to(device)
+
+# model tracing
+res = model(input)
+
+# model execution, same as `xm.mark_step`
+torch_xla.sync()
+
+
+

The actual model compilation and device execution happens when torch_xla.sync is called. There are multiple drawback of this approach.

+
    +
  1. Users are often confused about when the framework is tracing and when the framework is executing.

  2. +
  3. Non-core model code(data preprocessing for example) often generates some small pending execution that gets leaked into the main graph(step function) and causes recompilation. The recompilation of the whole graph is usually very expensive.

  4. +
  5. It is hard to debug when/why recompilation happens.

  6. +
+

To mitigate above issues we want to introduce the new UX with eager and compile.

+
+
+

Basic Usage

+
import torch
+import torch_xla
+import torchvision
+
+# Run ops eagerly by default
+torch_xla.experimental.eager_mode(True)
+
+device = torch_xla.device()
+model = torchvision.models.resnet18().to(device)
+
+# Mark the function to be compiled
+compiled_model = torch_xla.compile(model)
+input = torch.randn(64, 3, 224, 224).to(device)
+
+# Compilation and execution happens right away.
+res = compiled_model(input)
+
+
+

Note that

+
    +
  1. Currently user has to manually enable the eager mode by torch_xla.experimental.eager_mode(True).

  2. +
  3. The region of the code that wants to be compiled should be wrapped by torch_xla.compile.

  4. +
+

The implementation of the torch_xla.compile is actually pretty straight forward, it disable the eager mode when entering the target function and start tracing. It will call the torch_xla.sync() when target function returns and reenable the eager mode. You can expect the same perfomrance by using the eager + compile API compared to the existing mark_step/sync approach.

+
+

Inference

+
torch_xla.experimental.eager_mode(True)
+
+compiled_model = torch.compile(model, backend="openxla")
+
+
+

It is recommened to use the torch.compile instead of torch_xla.compile for inference to reduce the tracing overhad.

+
+
+

Training

+
torch_xla.experimental.eager_mode(True)
+
+def step_fn(model, data, target, loss_fn, optimizer):
+    optimizer.zero_grad()
+    logits = model(data)
+    loss = loss_fn(logits, target)
+    loss.backward()
+    optimizer.step()
+    return loss
+
+step_fn = torch_xla.compile(step_fn)
+
+
+

In training we asked user to refactor the step_fn out because it is usually better to compile the model’s forward, backward and optimizer together. The long term goal is to also use torch.compile for training but right now we recommend user to use torch_xla.compile(for perfomrance reason).

+
+
+
+

Benchmark

+

I run a 2 layer decoder only model training(it is pretty much just a llama2) with fake data on a single chip of v4-8 for 300 steps. Below is the number I observed.

+ + + + + + + + + + + + + + + + + + + + +
+ token/s + +
Tracing mode(base line) + 147 +
Eager mode + 65 +
Eager + torch_xla compile + 147 +

Eager mode can achieve ~45% performance of the fully compiled model for the decoder only model. The trainer I used to test can be found here and here. Note that perfomrane of the eager mode is very model dependent. When I tried to run the resnet50, the eager mode perfomrance is ~1% of the compiled mode. We don’t exepct user to use eager mode to execute the main training loop. Eager mode is meant to be used to handle non-core part of the training/inference logic(Data preprocessing, random number generations etc) or debug.

+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/genindex.html b/release/2.5/genindex.html new file mode 100644 index 00000000000..8ffe46bdfd6 --- /dev/null +++ b/release/2.5/genindex.html @@ -0,0 +1,1026 @@ + + + + + + + + + + + + Index — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ + +

Index

+ +
+ A + | C + | D + | E + | G + | H + | I + | L + | M + | O + | R + | S + | T + | U + | W + | X + +
+

A

+ + + +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

E

+ + +
+ +

G

+ + + +
+ +

H

+ + +
+ +

I

+ + + +
+ +

L

+ + + +
+ +

M

+ + + +
+ +

O

+ + +
+ +

R

+ + +
+ +

S

+ + + +
+ +

T

+ + + +
    +
  • + torch_xla + +
  • +
  • + torch_xla.core.xla_model + +
  • +
  • + torch_xla.debug.metrics + +
  • +
  • + torch_xla.distributed.parallel_loader + +
  • +
    +
  • + torch_xla.distributed.spmd + +
  • +
  • + torch_xla.distributed.xla_multiprocessing + +
  • +
  • + torch_xla.experimental + +
  • +
  • + torch_xla.runtime + +
  • +
+ +

U

+ + +
+ +

W

+ + + +
+ +

X

+ + + +
+ + + +
+ +
+ + +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/gpu.html b/release/2.5/gpu.html new file mode 100644 index 00000000000..fbe60131b0c --- /dev/null +++ b/release/2.5/gpu.html @@ -0,0 +1,911 @@ + + + + + + + + + + + + How to run with PyTorch/XLA:GPU — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • How to run with PyTorch/XLA:GPU
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +
+

How to run with PyTorch/XLA:GPU

+

PyTorch/XLA enables PyTorch users to utilize the XLA compiler which supports accelerators including TPU, GPU, and CPU. This doc will go over the basic steps to run PyTorch/XLA on a nvidia GPU instances.

+
+

Create a GPU instance

+

You can either use a local machine with GPU attached or a GPU VM on the cloud. For example in Google Cloud you can follow this doc to create the GPU VM.

+
+
+

Environment Setup

+

Make sure you have cuda driver installed on the host.

+
+

Docker

+

Pytorch/XLA currently publish prebuilt docker images and wheels with cuda11.8/12.1 and python 3.8. We recommend users to create a docker container with corresponding config. For a full list of docker images and wheels, please refer to this doc.

+
sudo docker pull us-central1-docker.pkg.dev/tpu-pytorch-releases/docker/xla:nightly_3.8_cuda_12.1
+
+# Installing the NVIDIA Container Toolkit per https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
+# For example
+curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
+  && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \
+    sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
+    sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
+sudo apt-get update
+sudo apt-get install -y nvidia-container-toolkit
+
+# Configuring the NVIDIA Container Toolkit
+sudo nvidia-ctk runtime configure --runtime=docker
+sudo systemctl restart docker
+
+sudo docker run --shm-size=16g --net=host --gpus all -it -d us-central1-docker.pkg.dev/tpu-pytorch-releases/docker/xla:nightly_3.8_cuda_12.1 bin/bash
+sudo docker exec -it $(sudo docker ps | awk 'NR==2 { print $1 }') /bin/bash
+
+
+

Note that you need to restart the docker to make gpu devices visible in the docker container. After logging into the docker, you can use nvidia-smi to verify the device is setup correctly.

+
(pytorch) root@20ab2c7a2d06:/# nvidia-smi
+Thu Dec  8 06:24:29 2022
++-----------------------------------------------------------------------------+
+| NVIDIA-SMI 510.47.03    Driver Version: 510.47.03    CUDA Version: 11.6     |
+|-------------------------------+----------------------+----------------------+
+| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
+| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
+|                               |                      |               MIG M. |
+|===============================+======================+======================|
+|   0  Tesla V100-SXM2...  Off  | 00000000:00:04.0 Off |                    0 |
+| N/A   36C    P0    38W / 300W |      0MiB / 16384MiB |      1%      Default |
+|                               |                      |                  N/A |
++-------------------------------+----------------------+----------------------+
+
++-----------------------------------------------------------------------------+
+| Processes:                                                                  |
+|  GPU   GI   CI        PID   Type   Process name                  GPU Memory |
+|        ID   ID                                                   Usage      |
+|=============================================================================|
+|  No running processes found                                                 |
++-----------------------------------------------------------------------------+
+
+
+
+
+

Check environment variable

+

Make sure PATH and LD_LIBRARY_PATH environment variables account for cuda. Please do a echo $PATH and echo $LD_LIBRARY_PATH to verify. If not, please follow link to do so. Example:

+
echo "export PATH=\$PATH:/usr/local/cuda-12.1/bin" >> ~/.bashrc
+echo "export LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:/usr/local/cuda-12.1/lib64" >> ~/.bashrc
+source ~/.bashrc
+
+
+
+
+

Wheel

+
+

**NOTE:** The wheel file is compatible only with x86_64 linux based architecutre. To check the architecture of your linux system, execute the following command:

+
uname -a
+
+
+
+
pip3 install torch==2.4.0
+# GPU whl for python 3.10 + cuda 12.1
+pip3 install https://storage.googleapis.com/pytorch-xla-releases/wheels/cuda/12.1/torch_xla-2.4.0-cp310-cp310-manylinux_2_28_x86_64.whl
+
+
+

Wheels for other Python version and CUDA version can be found here.

+
+
+
+

Run some simple models

+

In order to run below examples, you need to clone the pytorch/xla repository.

+
+

MP_ImageNet Example

+

This example uses ImageNet. It is included in what we already cloned in our Docker container.

+
(pytorch) root@20ab2c7a2d06:/# export GPU_NUM_DEVICES=1 PJRT_DEVICE=CUDA
+(pytorch) root@20ab2c7a2d06:/# git clone --recursive https://github.com/pytorch/xla.git
+(pytorch) root@20ab2c7a2d06:/# python xla/test/test_train_mp_imagenet.py --fake_data
+==> Preparing data..
+Epoch 1 train begin 06:12:38
+| Training Device=xla:0/0 Epoch=1 Step=0 Loss=6.89059 Rate=2.82 GlobalRate=2.82 Time=06:13:23
+| Training Device=xla:0/0 Epoch=1 Step=20 Loss=6.79297 Rate=117.16 GlobalRate=45.84 Time=06:13:36
+| Training Device=xla:0/0 Epoch=1 Step=40 Loss=6.43628 Rate=281.16 GlobalRate=80.49 Time=06:13:43
+| Training Device=xla:0/0 Epoch=1 Step=60 Loss=5.83108 Rate=346.88 GlobalRate=108.82 Time=06:13:49
+| Training Device=xla:0/0 Epoch=1 Step=80 Loss=4.99023 Rate=373.62 GlobalRate=132.43 Time=06:13:56
+| Training Device=xla:0/0 Epoch=1 Step=100 Loss=3.92699 Rate=384.33 GlobalRate=152.40 Time=06:14:02
+| Training Device=xla:0/0 Epoch=1 Step=120 Loss=2.68816 Rate=388.35 GlobalRate=169.49 Time=06:14:09
+
+
+
+
+

ResNet Example

+

This example uses ResNet.

+
(pytorch) root@20ab2c7a2d06:/# python3 /xla/examples/train_resnet_base.py
+1:35PM UTC on Jun 08, 2024
+epoch: 1, step: 0, loss: 6.887794017791748, rate: 8.746502586051985
+epoch: 1, step: 10, loss: 6.877807140350342, rate: 238.4789458412044
+epoch: 1, step: 20, loss: 6.867819786071777, rate: 329.86095958663503
+epoch: 1, step: 30, loss: 6.857839584350586, rate: 367.3038003653586
+epoch: 1, step: 40, loss: 6.847847938537598, rate: 381.53141087190835
+epoch: 1, step: 50, loss: 6.837860584259033, rate: 387.80462249591113
+...
+epoch: 1, step: 260, loss: 6.628140926361084, rate: 391.135639565343
+epoch: 1, step: 270, loss: 6.618192195892334, rate: 391.6901797745233
+epoch: 1, step: 280, loss: 6.608224391937256, rate: 391.1602680460045
+epoch: 1, step: 290, loss: 6.598264217376709, rate: 391.6731498290759
+Epoch 1 train end  1:36PM UTC
+
+
+
+
+
+

AMP (AUTOMATIC MIXED PRECISION)

+

AMP is very useful on GPU training and PyTorch/XLA reuse Cuda’s AMP rule. You can checkout our mnist example and imagenet example. Note that we also used a modified version of optimizers to avoid the additional sync between device and host.

+
+
+

Develop PyTorch/XLA on a GPU instance (build PyTorch/XLA from source with GPU support)

+
    +
  1. Inside a GPU VM, create a docker container from a development docker image. For example:

  2. +
+
sudo docker pull us-central1-docker.pkg.dev/tpu-pytorch-releases/docker/development:3.8_cuda_12.1
+
+# Installing the NVIDIA Container Toolkit per https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
+# For example
+curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
+  && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \
+    sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
+    sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
+sudo apt-get update
+sudo apt-get install -y nvidia-container-toolkit
+
+# Configuring the NVIDIA Container Toolkit
+sudo nvidia-ctk runtime configure --runtime=docker
+sudo systemctl restart docker
+
+sudo docker run --shm-size=16g --net=host --gpus all -it -d us-central1-docker.pkg.dev/tpu-pytorch-releases/docker/development:3.8_cuda_12.1
+sudo docker exec -it $(sudo docker ps | awk 'NR==2 { print $1 }') /bin/bash
+
+
+
    +
  1. Build PyTorch and PyTorch/XLA from source.

  2. +
+

Make sure PATH and LD_LIBRARY_PATH environment variables account for cuda. See the above for more info.

+
git clone https://github.com/pytorch/pytorch.git
+cd pytorch
+USE_CUDA=1 python setup.py install
+USE_CUDA=1 python setup.py bdist_wheel # Required for hermetic Python in PyTorch/XLA build setup.
+
+git clone https://github.com/pytorch/xla.git
+cd xla
+XLA_CUDA=1 python setup.py install
+
+
+
    +
  1. Verify if PyTorch and PyTorch/XLA have been installed successfully.

  2. +
+

If you can run the tests in the section +Run some simple models successfully, then PyTorch and +PyTorch/XLA should have been installed successfully.

+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/index.html b/release/2.5/index.html new file mode 100644 index 00000000000..b4832e6cec1 --- /dev/null +++ b/release/2.5/index.html @@ -0,0 +1,2130 @@ + + + + + + + + + + + + PyTorch/XLA documentation — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ + +
+

PyTorch on XLA Devices

+

PyTorch runs on XLA devices, like TPUs, with the +torch_xla package. This document describes +how to run your models on these devices.

+
+

Creating an XLA Tensor

+

PyTorch/XLA adds a new xla device type to PyTorch. This device type works just +like other PyTorch device types. For example, here’s how to create and +print an XLA tensor:

+
import torch
+import torch_xla
+import torch_xla.core.xla_model as xm
+
+t = torch.randn(2, 2, device=xm.xla_device())
+print(t.device)
+print(t)
+
+
+

This code should look familiar. PyTorch/XLA uses the same interface as regular +PyTorch with a few additions. Importing torch_xla initializes PyTorch/XLA, and +xm.xla_device() returns the current XLA device. This may be a CPU or TPU +depending on your environment.

+
+
+

XLA Tensors are PyTorch Tensors

+

PyTorch operations can be performed on XLA tensors just like CPU or CUDA tensors.

+

For example, XLA tensors can be added together:

+
t0 = torch.randn(2, 2, device=xm.xla_device())
+t1 = torch.randn(2, 2, device=xm.xla_device())
+print(t0 + t1)
+
+
+

Or matrix multiplied:

+
print(t0.mm(t1))
+
+
+

Or used with neural network modules:

+
l_in = torch.randn(10, device=xm.xla_device())
+linear = torch.nn.Linear(10, 20).to(xm.xla_device())
+l_out = linear(l_in)
+print(l_out)
+
+
+

Like other device types, XLA tensors only work with other XLA tensors on the +same device. So code like

+
l_in = torch.randn(10, device=xm.xla_device())
+linear = torch.nn.Linear(10, 20)
+l_out = linear(l_in)
+print(l_out)
+# Input tensor is not an XLA tensor: torch.FloatTensor
+
+
+

will throw an error since the torch.nn.Linear module is on the CPU.

+
+
+

Running Models on XLA Devices

+

Building a new PyTorch network or converting an existing one to run on XLA +devices requires only a few lines of XLA-specific code. The following snippets +highlight these lines when running on a single device and multiple devices with XLA +multi-processing.

+
+

Running on a Single XLA Device

+

The following snippet shows a network training on a single XLA device:

+
import torch_xla.core.xla_model as xm
+
+device = xm.xla_device()
+model = MNIST().train().to(device)
+loss_fn = nn.NLLLoss()
+optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)
+
+for data, target in train_loader:
+  optimizer.zero_grad()
+  data = data.to(device)
+  target = target.to(device)
+  output = model(data)
+  loss = loss_fn(output, target)
+  loss.backward()
+
+  optimizer.step()
+  xm.mark_step()
+
+
+

This snippet highlights how easy it is to switch your model to run on XLA. The +model definition, dataloader, optimizer and training loop can work on any device. +The only XLA-specific code is a couple lines that acquire the XLA device and +mark the step. Calling +xm.mark_step() at the end of each training +iteration causes XLA to execute its current graph and update the model’s +parameters. See XLA Tensor Deep Dive for more on +how XLA creates graphs and runs operations.

+
+
+

Running on Multiple XLA Devices with Multi-processing

+

PyTorch/XLA makes it easy to accelerate training by running on multiple XLA +devices. The following snippet shows how:

+
import torch_xla
+import torch_xla.core.xla_model as xm
+import torch_xla.distributed.parallel_loader as pl
+
+def _mp_fn(index):
+  device = xm.xla_device()
+  mp_device_loader = pl.MpDeviceLoader(train_loader, device)
+
+  model = MNIST().train().to(device)
+  loss_fn = nn.NLLLoss()
+  optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)
+
+  for data, target in mp_device_loader:
+    optimizer.zero_grad()
+    output = model(data)
+    loss = loss_fn(output, target)
+    loss.backward()
+    xm.optimizer_step(optimizer)
+
+if __name__ == '__main__':
+  torch_xla.launch(_mp_fn, args=())
+
+
+

There are three differences between this multi-device snippet and the previous +single device snippet. Let’s go over then one by one.

+
    +
  • torch_xla.launch()

    +
      +
    • Creates the processes that each run an XLA device.

    • +
    • This function is a wrapper of multithreading spawn to allow user run the script with torchrun command line also. Each process will only be able to access the device assigned to the current process. For example on a TPU v4-8, there will be 4 processes being spawn up and each process will own a TPU device.

    • +
    • Note that if you print the xm.xla_device() on each process you will see xla:0 on all devices. This is because each process can only see one device. This does not mean multi-process is not functioning. The only execution is with PJRT runtime on TPU v2 and TPU v3 since there will be #devices/2 processes and each process will have 2 threads(check this doc for more details).

    • +
    +
  • +
  • MpDeviceLoader

    +
      +
    • Loads the training data onto each device.

    • +
    • MpDeviceLoader can wrap on a torch dataloader. It can preload the data to the device and overlap the dataloading with device execution to improve the performance.

    • +
    • MpDeviceLoader also call xm.mark_step for you every batches_per_execution(default to 1) batch being yield.

    • +
    +
  • +
  • xm.optimizer_step(optimizer)

    +
      +
    • Consolidates the gradients between devices and issues the XLA device step computation.

    • +
    • It is pretty much a all_reduce_gradients + optimizer.step() + mark_step and returns the loss being reduced.

    • +
    +
  • +
+

The model definition, optimizer definition and training loop remain the same.

+
+

NOTE: It is important to note that, when using multi-processing, the user can start +retrieving and accessing XLA devices only from within the target function of +torch_xla.launch() (or any function which has torch_xla.launch() as parent in the call +stack).

+
+

See the +full multiprocessing example +for more on training a network on multiple XLA devices with multi-processing.

+
+
+

Running on TPU Pods

+

Multi-host setup for different accelerators can be very different. This doc will talk about the device independent bits of multi-host training and will use the TPU + PJRT runtime(currently available on 1.13 and 2.x releases) as an example.

+

Before you being, please take a look at our user guide at here which will explain some Google Cloud basis like how to use gcloud command and how to setup your project. You can also check here for all Cloud TPU Howto. This doc will focus on the PyTorch/XLA perspective of the Setup.

+

Let’s assume you have the above mnist example from above section in a train_mnist_xla.py. If it is a single host multi device training, you would ssh to the TPUVM and run command like

+
PJRT_DEVICE=TPU python3 train_mnist_xla.py
+
+
+

Now in order to run the same models on a TPU v4-16 (which has 2 host, each with 4 TPU devices), you will need to

+
    +
  • Make sure each host can access the training script and training data. This is usually done by using the gcloud scp command or gcloud ssh command to copy the training scripts to all hosts.

  • +
  • Run the same training command on all hosts at the same time.

  • +
+
gcloud alpha compute tpus tpu-vm ssh $USER-pjrt --zone=$ZONE --project=$PROJECT --worker=all --command="PJRT_DEVICE=TPU python3 train_mnist_xla.py"
+
+
+

Above gcloud ssh command will ssh to all hosts in TPUVM Pod and run the same command at the same time..

+
+

NOTE: You need to run run above gcloud command outside of the TPUVM vm.

+
+

The model code and training script is the same for the multi-process training and the multi-host training. PyTorch/XLA and the underlying infrastructure will make sure each device is aware of the global topology and each device’s local and global ordinal. Cross-device communication will happen across all devices instead of local devices.

+

For more details regarding PJRT runtime and how to run it on pod, please refer to this doc. For more information about PyTorch/XLA and TPU pod and a complete guide to run a resnet50 with fakedata on TPU pod, please refer to this guide.

+
+
+
+

XLA Tensor Deep Dive

+

Using XLA tensors and devices requires changing only a few lines of code. But +even though XLA tensors act a lot like CPU and CUDA tensors, their internals are +different. This section describes what makes XLA tensors unique.

+
+

XLA Tensors are Lazy

+

CPU and CUDA tensors launch operations immediately or eagerly. XLA tensors, +on the other hand, are lazy. They record operations in a graph until the +results are needed. Deferring execution like this lets XLA optimize it. A graph +of multiple separate operations might be fused into a single optimized +operation, for example.

+

Lazy execution is generally invisible to the caller. PyTorch/XLA automatically +constructs the graphs, sends them to XLA devices, and synchronizes when +copying data between an XLA device and the CPU. Inserting a barrier when +taking an optimizer step explicitly synchronizes the CPU and the XLA device. For +more information about our lazy tensor design, you can read this paper.

+
+
+

Memory Layout

+

The internal data representation of XLA tensors is opaque to the user. They +do not expose their storage and they always appear to be contiguous, unlike +CPU and CUDA tensors. This allows XLA to adjust a tensor’s memory layout for +better performance.

+
+
+

Moving XLA Tensors to and from the CPU

+

XLA tensors can be moved from the CPU to an XLA device and from an XLA device +to the CPU. If a view is moved then the data its viewing is also copied to the +other device and the view relationship is not preserved. Put another way, +once data is copied to another device it has no relationship with its +previous device or any tensors on it. Again, depending on how your code operates, +appreciating and accommodating this transition can be important.

+
+
+

Saving and Loading XLA Tensors

+

XLA tensors should be moved to the CPU before saving, as in the following +snippet:

+
import torch
+import torch_xla
+import torch_xla.core.xla_model as xm
+
+device = xm.xla_device()
+
+t0 = torch.randn(2, 2, device=device)
+t1 = torch.randn(2, 2, device=device)
+
+tensors = (t0.cpu(), t1.cpu())
+
+torch.save(tensors, 'tensors.pt')
+
+tensors = torch.load('tensors.pt')
+
+t0 = tensors[0].to(device)
+t1 = tensors[1].to(device)
+
+
+

This lets you put the loaded tensors on any available device, not just the one on which they were initialized.

+

Per the above note on moving XLA tensors to the CPU, care must be taken when +working with views. Instead of saving views it is recommended that you recreate +them after the tensors have been loaded and moved to their destination device(s).

+

A utility API is provided to save data by taking care of previously moving it +to CPU:

+
import torch
+import torch_xla
+import torch_xla.core.xla_model as xm
+
+xm.save(model.state_dict(), path)
+
+
+

In case of multiple devices, the above API will only save the data for the master +device ordinal (0).

+

In case where memory is limited compared to the size of the model parameters, an +API is provided that reduces the memory footprint on the host:

+
import torch_xla.utils.serialization as xser
+
+xser.save(model.state_dict(), path)
+
+
+

This API streams XLA tensors to CPU one at a time, reducing the amount of host +memory used, but it requires a matching load API to restore:

+
import torch_xla.utils.serialization as xser
+
+state_dict = xser.load(path)
+model.load_state_dict(state_dict)
+
+
+

Directly saving XLA tensors is possible but not recommended. XLA +tensors are always loaded back to the device they were saved from, and if +that device is unavailable the load will fail. PyTorch/XLA, like all of PyTorch, +is under active development and this behavior may change in the future.

+
+
+
+

Compilation Caching

+

The XLA compiler converts the traced HLO into an executable which runs on +the devices. Compilation can be time consuming, and in cases where the HLO +doesn’t change across executions, the compilation result can be persisted to +disk for reuse, significantly reducing development iteration time.

+

Note that if the HLO changes between executions, a recompilation will still +occur.

+

This is currently an experimental opt-in API, which must be activated before +any computations are executed. Initialization is done through the +initialize_cache API:

+
import torch_xla.runtime as xr
+xr.initialize_cache('YOUR_CACHE_PATH', readonly=False)
+
+
+

This will initialize a persistent compilation cache at the specified path. The +readonly parameter can be used to control whether the worker will be able to +write to the cache, which can be useful when a shared cache mount is used for +an SPMD workload.

+

If you want to use persistent compilation cache in the multi process training(with torch_xla.launch or xmp.spawn), you should use the different path for different process.

+
def _mp_fn(index):
+  # cache init needs to happens inside the mp_fn.
+  xr.initialize_cache(f'/tmp/xla_cache_{index}', readonly=False)
+  ....
+
+if __name__ == '__main__':
+  torch_xla.launch(_mp_fn, args=())
+
+
+

If you don’t have the access to the index, you can use xr.global_ordinal(). Check out the runnable example in here.

+
+
+

Further Reading

+

Additional documentation is available at the +PyTorch/XLA repo. More examples of running +networks on TPUs are available +here.

+
+
+
+

PyTorch/XLA API

+
+

torch_xla

+
+
+torch_xla.device(index: Optional[int] = None) device[source]
+

Returns a given instance of an XLA device.

+

If SPMD enables, returns a virtual device that wraps all devices available +to this process.

+
+
Parameters
+

index – index of the XLA device to be returned. Corresponds to index in +torch_xla.devices().

+
+
Returns
+

An XLA torch.device.

+
+
+
+ +
+
+torch_xla.devices() List[device][source]
+

Returns all devices available in the current process.

+
+
Returns
+

A list of XLA torch.devices.

+
+
+
+ +
+
+torch_xla.device_count() int[source]
+

Returns number of addressable devices in the current process.

+
+ +
+
+torch_xla.sync(wait: bool = False)[source]
+

Launches all pending graph operations.

+
+
Parameters
+

wait (bool) – whether to block the current process until the execution finished.

+
+
+
+ +
+
+torch_xla.compile(f: Optional[Callable] = None, full_graph: Optional[bool] = False, name: Optional[str] = None, num_different_graphs_allowed: Optional[int] = None)[source]
+

Optimizes given model/function using torch_xla’s LazyTensor tracing mode. +PyTorch/XLA will trace the given function with given inputs and then generate +graphs to represent the pytorch operations happens within this function. This +graph will be compiled by the XLA and executed on the accelerator(decided by the +tensor’s device). Eager mode will be disabled for the compiled region of the funciton.

+
+
Parameters
+
    +
  • model (Callable) – Module/function to optimize, if not passed this function will +act as a context manager.

  • +
  • full_graph (Optional[bool]) – Whether this compile should generate a single graph. If set to True +and multiple graphs will be generated torch_xla will throw an error with debug info +and exit.

  • +
  • name (Optional[name]) – Name of the compiled program. The name of the function f will be used +if not specified. This name will be used in the PT_XLA_DEBUG messages as well as HLO/IR dump +file.

  • +
  • num_different_graphs_allowed (Optional[python:int]) – number of different traced graphs of the given +model/function that we are allowed to have. An error will be raised in case this limit +is exceeded.

  • +
+
+
+

Example:

+
# usage 1
+@torch_xla.compile()
+def foo(x):
+  return torch.sin(x) + torch.cos(x)
+
+def foo2(x):
+  return torch.sin(x) + torch.cos(x)
+# usage 2
+compiled_foo2 = torch_xla.compile(foo2)
+
+# usage 3
+with torch_xla.compile():
+  res = foo2(x)
+
+
+
+ +
+
+torch_xla.manual_seed(seed, device=None)[source]
+

Set the seed for generating random numbers for the current XLA device.

+
+
Parameters
+
    +
  • seed (python:integer) – The state to be set.

  • +
  • device (torch.device, optional) – The device where the RNG state needs to be set. +If missing the default device seed will be set.

  • +
+
+
+
+ +
+
+

runtime

+
+
+torch_xla.runtime.device_type() Optional[str][source]
+

Returns the current PjRt device type.

+

Selects a default device if none has been configured

+
+
Returns
+

A string representation of the device.

+
+
+
+ +
+
+torch_xla.runtime.local_process_count() int[source]
+

Returns the number of processes running on this host.

+
+ +
+
+torch_xla.runtime.local_device_count() int[source]
+

Returns the total number of devices on this host.

+

Assumes each process has the same number of addressable devices.

+
+ +
+
+torch_xla.runtime.addressable_device_count() int[source]
+

Returns the number of devices visible to this process.

+
+ +
+
+torch_xla.runtime.global_device_count() int[source]
+

Returns the total number of devices across all processes/hosts.

+
+ +
+
+torch_xla.runtime.global_runtime_device_count() int[source]
+

Returns the total number of runtime devices across all processes/hosts, especially useful for SPMD.

+
+ +
+
+torch_xla.runtime.world_size() int[source]
+

Returns the total number of processes participating in the job.

+
+ +
+
+torch_xla.runtime.global_ordinal() int[source]
+

Returns global ordinal of this thread within all processes.

+

Global ordinal is in range [0, global_device_count). Global ordinals are not +guaranteed to have any predictable relationship to the TPU worker ID nor are +they guaranteed to be contiguous on each host.

+
+ +
+
+torch_xla.runtime.local_ordinal() int[source]
+

Returns local ordinal of this thread within this host.

+

Local ordinal is in range [0, local_device_count).

+
+ +
+
+torch_xla.runtime.get_master_ip() str[source]
+

Retrieve the master worker IP for the runtime. This calls into +backend-specific discovery APIs.

+
+
Returns
+

master worker’s IP address as a string.

+
+
+
+ +
+
+torch_xla.runtime.use_spmd(auto: Optional[bool] = False)[source]
+

API to enable SPMD mode. This is a recommended way to enable SPMD.

+

This forces SPMD mode if some tensors are already initialized on non-SPMD +devices. This means that those tensors would be replicated across the devices.

+
+
Parameters
+

auto (bool) – Whether to enable the auto-sharding. Read +https://github.com/pytorch/xla/blob/master/docs/spmd_advanced.md#auto-sharding +for more detail

+
+
+
+ +
+
+torch_xla.runtime.is_spmd()[source]
+

Returns if SPMD is set for execution.

+
+ +
+
+torch_xla.runtime.initialize_cache(path: str, readonly: bool = False)[source]
+

Initializes the persistent compilation cache. This API must be called +before any computations have been performed.

+
+
Parameters
+
    +
  • path (str) – The path at which to store the persistent cache.

  • +
  • readonly (bool) – Whether or not this worker should have write access to the cache.

  • +
+
+
+
+ +
+
+

xla_model

+
+
+torch_xla.core.xla_model.xla_device(n: Optional[int] = None, devkind: Optional[str] = None) device[source]
+

Returns a given instance of an XLA device.

+
+
Parameters
+
    +
  • n (python:int, optional) – The specific instance (ordinal) to be returned. If +specified, the specific XLA device instance will be returned. Otherwise +the first device of devkind will be returned.

  • +
  • devkind (string..., optional) – If specified, device type such as TPU, +CUDA, CPU, or custom PJRT device. Deprecated.

  • +
+
+
Returns
+

A torch.device with the requested instance.

+
+
+
+ +
+
+torch_xla.core.xla_model.xla_device_hw(device: Union[str, device]) str[source]
+

Returns the hardware type of the given device.

+
+
Parameters
+

device (string or torch.device) – The xla device that will be mapped to the +real device.

+
+
Returns
+

A string representation of the hardware type of the given device.

+
+
+
+ +
+
+torch_xla.core.xla_model.is_master_ordinal(local: bool = True) bool[source]
+

Checks whether the current process is the master ordinal (0).

+
+
Parameters
+

local (bool) – Whether the local or global master ordinal should be checked. +In case of multi-host replication, there is only one global master ordinal +(host 0, device 0), while there are NUM_HOSTS local master ordinals. +Default: True

+
+
Returns
+

A boolean indicating whether the current process is the master ordinal.

+
+
+
+ +
+
+torch_xla.core.xla_model.all_reduce(reduce_type: str, inputs: Union[Tensor, List[Tensor]], scale: float = 1.0, groups: Optional[List[List[int]]] = None, pin_layout: bool = True) Union[Tensor, List[Tensor]][source]
+

Performs an inplace reduce operation on the input tensor(s).

+
+
Parameters
+
    +
  • reduce_type (string) – One of xm.REDUCE_SUM, xm.REDUCE_MUL, +xm.REDUCE_AND, xm.REDUCE_OR, xm.REDUCE_MIN and +xm.REDUCE_MAX.

  • +
  • inputs – Either a single torch.Tensor or a list of torch.Tensor to +perform the all reduce op to.

  • +
  • scale (python:float) – A default scaling value to be applied after the reduce. +Default: 1.0

  • +
  • groups (list, optional) –

    A list of list, representing the replica groups for +the all_reduce() operation. Example: [[0, 1, 2, 3], [4, 5, 6, 7]]

    +
    +

    defines two groups, one with the [0, 1, 2, 3] replicas and one with +the [4, 5, 6, 7] replicas. If None there will be only one group with +all the replicas in it.

    +
    +

  • +
  • pin_layout (bool, optional) – whether to pin the layout for this communication op. +Layout pining can prevent potential data corruption when each process that +participate in the communication has slightly different program, but it might +cause some xla compilation to fail. Unpin the layout when you see error message +like “HloModule has a mix of layout constrained”.

  • +
+
+
Returns
+

If a single torch.Tensor is passed, the return value is a torch.Tensor +holding the reduced value (across the replicas). If a list/tuple is passed, +this function performs an inplace all-reduce op on the input tensors, and +returns the list/tuple itself.

+
+
+
+ +
+
+torch_xla.core.xla_model.all_gather(value: Tensor, dim: int = 0, groups: Optional[List[List[int]]] = None, output: Optional[Tensor] = None, pin_layout: bool = True) Tensor[source]
+

Performs an all-gather operation along a given dimension.

+
+
Parameters
+
    +
  • value (torch.Tensor) – The input tensor.

  • +
  • dim (python:int) – The gather dimension. +Default: 0

  • +
  • groups (list, optional) –

    A list of list, representing the replica groups for +the all_gather() operation. Example: [[0, 1, 2, 3], [4, 5, 6, 7]]

    +
    +

    defines two groups, one with the [0, 1, 2, 3] replicas and one with +the [4, 5, 6, 7] replicas. If None there will be only one group with +all the replicas in it.

    +
    +

  • +
  • output (torch.Tensor) – Optional output tensor.

  • +
  • pin_layout (bool, optional) – whether to pin the layout for this communication op. +Layout pining can prevent potential data corruption when each process that +participate in the communication has slightly different program, but it might +cause some xla compilation to fail. Unpin the layout when you see error message +like “HloModule has a mix of layout constrained”.

  • +
+
+
Returns
+

A tensor which has, in the dim dimension, all the values from the +participating replicas.

+
+
+
+ +
+
+torch_xla.core.xla_model.all_to_all(value: Tensor, split_dimension: int, concat_dimension: int, split_count: int, groups: Optional[List[List[int]]] = None, pin_layout: bool = True) Tensor[source]
+

Performs an XLA AllToAll() operation on the input tensor.

+

See: https://www.tensorflow.org/xla/operation_semantics#alltoall

+
+
Parameters
+
    +
  • value (torch.Tensor) – The input tensor.

  • +
  • split_dimension (python:int) – The dimension upon which the split should happen.

  • +
  • concat_dimension (python:int) – The dimension upon which the concat should happen.

  • +
  • split_count (python:int) – The split count.

  • +
  • groups (list, optional) –

    A list of list, representing the replica groups for +the all_reduce() operation. Example: [[0, 1, 2, 3], [4, 5, 6, 7]]

    +
    +

    defines two groups, one with the [0, 1, 2, 3] replicas and one with +the [4, 5, 6, 7] replicas. If None there will be only one group with +all the replicas in it.

    +
    +

  • +
  • pin_layout (bool, optional) – whether to pin the layout for this communication op. +Layout pining can prevent potential data corruption when each process that +participate in the communication has slightly different program, but it might +cause some xla compilation to fail. Unpin the layout when you see error message +like “HloModule has a mix of layout constrained”.

  • +
+
+
Returns
+

The result torch.Tensor of the all_to_all() operation.

+
+
+
+ +
+
+torch_xla.core.xla_model.add_step_closure(closure: Callable[[...], Any], args: Tuple[Any] = (), run_async: bool = False)[source]
+

Adds a closure to the list of the ones to be run at the end of the step.

+

Many times during model training there is the need to print/report (print to +console, post to tensorboard, etc…) information which require the content of +intermediary tensors to be inspected. +Inspecting different tensors content in different points of the model code +requires many executions and typically causes performance issues. +Adding a step closure will ensure that it will be run after the barrier, when +all the live tensors will be already materialized to device data. +Live tensors which will include the ones captured by the closure arguments. +So using add_step_closure() will ensure a single execution will be +performed, even when multiple closures are queued, requiring multiple tensors +to be inspected. +Step closures will be run sequentially in the order they have been queued. +Note that even though using this API the execution will be optimized, it is +advised to throttle the printing/reporting events once every N steps.

+
+
Parameters
+
    +
  • closure (callable) – The function to be called.

  • +
  • args (tuple) – The arguments to be passed to the closure.

  • +
  • run_async – If True, run the closure asynchronously.

  • +
+
+
+
+ +
+
+torch_xla.core.xla_model.wait_device_ops(devices: List[str] = [])[source]
+

Waits for all the async operations on the given devices to complete.

+
+
Parameters
+

devices (string..., optional) – The devices whose async ops need to be waited +for. If empty, all the local devices will be waited for.

+
+
+
+ +
+
+torch_xla.core.xla_model.optimizer_step(optimizer: Optimizer, barrier: bool = False, optimizer_args: Dict = {}, groups: Optional[List[List[int]]] = None, pin_layout: bool = True)[source]
+

Run the provided optimizer step and sync gradidents across all devices.

+
+
Parameters
+
    +
  • optimizer (torch.Optimizer) – The torch.Optimizer instance whose +step() function needs to be called. The step() function will be called +with the optimizer_args named arguments.

  • +
  • barrier (bool, optional) – Whether the XLA tensor barrier should be issued in +this API. If using the PyTorch XLA ParallelLoader or DataParallel +support, this is not necessary as the barrier will be issued by the XLA +data loader iterator next() call. +Default: False

  • +
  • optimizer_args (dict, optional) – Named arguments dictionary for the +optimizer.step() call.

  • +
  • groups (list, optional) –

    A list of list, representing the replica groups for +the all_reduce() operation. Example: [[0, 1, 2, 3], [4, 5, 6, 7]]

    +
    +

    defines two groups, one with the [0, 1, 2, 3] replicas and one with +the [4, 5, 6, 7] replicas. If None there will be only one group with +all the replicas in it.

    +
    +

  • +
  • pin_layout (bool, optional) – whether to pin the layout when reducing gradients. +See xm.all_reduce for details.

  • +
+
+
Returns
+

The same value returned by the optimizer.step() call.

+
+
+

Example

+
>>> import torch_xla.core.xla_model as xm
+>>> xm.optimizer_step(self.optimizer)
+
+
+
+ +
+
+torch_xla.core.xla_model.save(data: Any, file_or_path: Union[str, TextIO], master_only: bool = True, global_master: bool = False)[source]
+

Saves the input data into a file.

+

The saved data is transferred to PyTorch CPU device before being saved, so a +following torch.load() will load CPU data. +Care must be taken when working with views. Instead of saving views it’s +recommended that you recreate them after the tensors have been loaded and +moved to their destination device(s).

+
+
Parameters
+
    +
  • data – The input data to be saved. Any nested combination of Python objects +(list, tuples, sets, dicts, …).

  • +
  • file_or_path – The destination for the data saving operation. Either a file +path or a Python file object. If master_only is False the path or +file objects must point to different destinations as otherwise all the +writes from the same host will override each other.

  • +
  • master_only (bool, optional) – Whether only the master device should save the +data. If False, the file_or_path argument should be a different file or +path for each of the ordinals taking part to the replication, otherwise +all the replicas on the same host will be writing to the same location. +Default: True

  • +
  • global_master (bool, optional) – When master_only is True this flag +controls whether every host’s master (if global_master is False) +saves the content, or only the global master (ordinal 0). +Default: False

  • +
+
+
+

Example

+
>>> import torch_xla.core.xla_model as xm
+>>> xm.wait_device_ops() # wait for all pending operations to finish.
+>>> xm.save(obj_to_save, path_to_save)
+>>> xm.rendezvous('torch_xla.core.xla_model.save') # multi process context only
+
+
+
+ +
+
+torch_xla.core.xla_model.rendezvous(tag: str, payload: bytes = b'', replicas: List[int] = []) List[bytes][source]
+

Waits for all the mesh clients to reach the named rendezvous.

+

Note: PJRT does not support the XRT mesh server, so this is effectively an +alias to xla_rendezvous.

+
+
Parameters
+
    +
  • tag (string) – The name of the rendezvous to join.

  • +
  • payload (bytes, optional) – The payload to be sent to the rendezvous.

  • +
  • replicas (list, python:int) – The replica ordinals taking part of the rendezvous. +Empty means all replicas in the mesh. +Default: []

  • +
+
+
Returns
+

The payloads exchanged by all the other cores, with the payload of core +ordinal i at position i in the returned tuple.

+
+
+

Example

+
>>> import torch_xla.core.xla_model as xm
+>>> xm.rendezvous('example')
+
+
+
+ +
+
+torch_xla.core.xla_model.mesh_reduce(tag: str, data, reduce_fn: Callable[[...], Any]) Union[Any, ToXlaTensorArena][source]
+

Performs an out-of-graph client mesh reduction.

+
+
Parameters
+
    +
  • tag (string) – The name of the rendezvous to join.

  • +
  • data – The data to be reduced. The reduce_fn callable will receive a list +with the copies of the same data coming from all the mesh client processes +(one per core).

  • +
  • reduce_fn (callable) – A function which receives a list of data-like +objects and returns the reduced result.

  • +
+
+
Returns
+

The reduced value.

+
+
+

Example

+
>>> import torch_xla.core.xla_model as xm
+>>> import numpy as np
+>>> accuracy = xm.mesh_reduce('test_accuracy', accuracy, np.mean)
+
+
+
+ +
+
+torch_xla.core.xla_model.set_rng_state(seed: int, device: Optional[str] = None)[source]
+

Sets the random number generator state.

+
+
Parameters
+
    +
  • seed (python:integer) – The state to be set.

  • +
  • device (string, optional) – The device where the RNG state needs to be set. +If missing the default device seed will be set.

  • +
+
+
+
+ +
+
+torch_xla.core.xla_model.get_rng_state(device: Optional[str] = None) int[source]
+

Gets the current running random number generator state.

+
+
Parameters
+

device (string, optional) – The device whose RNG state needs to be retrieved. +If missing the default device seed will be set.

+
+
Returns
+

The RNG state, as integer.

+
+
+
+ +
+
+torch_xla.core.xla_model.get_memory_info(device: Optional[device] = None) MemoryInfo[source]
+

Retrieves the device memory usage.

+
+
Parameters
+
    +
  • device – Optional[torch.device] The device whose memory information are requested.

  • +
  • device. (If not passed will use the default) –

  • +
+
+
Returns
+

MemoryInfo dict with memory usage for the given device.

+
+
+

Example

+
>>> xm.get_memory_info()
+{'bytes_used': 290816, 'bytes_limit': 34088157184}
+
+
+
+ +
+
+torch_xla.core.xla_model.get_stablehlo(tensors: Optional[List[Tensor]] = None) str[source]
+

Get StableHLO for the computation graph in string format.

+

If tensors is not empty, the graph with tensors as outputs will be dump. +If tensors is empty, the whole computation graph will be dump.

+

For inference graph, it is recommended to pass the model outputs to tensors. +For training graph, it is not straightforward to identify the “outputs”. Using empty tensors is recommended.

+

To enable source line info in StableHLO, please set env var XLA_HLO_DEBUG=1.

+
+
Parameters
+

tensors (list[torch.Tensor], optional) – Tensors that represent the output/root of the StableHLO graph.

+
+
Returns
+

StableHLO Module in string format.

+
+
+
+ +
+
+torch_xla.core.xla_model.get_stablehlo_bytecode(tensors: Optional[Tensor] = None) bytes[source]
+

Get StableHLO for the computation graph in bytecode format.

+

If tensors is not empty, the graph with tensors as outputs will be dump. +If tensors is empty, the whole computation graph will be dump.

+

For inference graph, it is recommended to pass the model outputs to tensors. +For training graph, it is not straightforward to identify the “outputs”. Using empty tensors is recommended.

+
+
Parameters
+

tensors (list[torch.Tensor], optional) – Tensors that represent the output/root of the StableHLO graph.

+
+
Returns
+

StableHLO Module in bytecode format.

+
+
+
+ +
+
+

distributed

+
+
+class torch_xla.distributed.parallel_loader.MpDeviceLoader(loader, device, **kwargs)[source]
+

Wraps an existing PyTorch DataLoader with background data upload.

+

This class should only be using with multi-processing data parallelism. It will wrap +the dataloader passed in with ParallelLoader and return the per_device_loader for the +current device.

+
+
Parameters
+
    +
  • loader (torch.utils.data.DataLoader) – The PyTorch DataLoader to be +wrapped.

  • +
  • device (torch.device…) – The device where the data has to be sent.

  • +
  • kwargs – Named arguments for the ParallelLoader constructor.

  • +
+
+
+

Example

+
>>> device = torch_xla.device()
+>>> train_device_loader = MpDeviceLoader(train_loader, device)
+
+
+
+ +
+
+torch_xla.distributed.xla_multiprocessing.spawn(fn, args=(), nprocs=None, join=True, daemon=False, start_method='spawn')[source]
+

Enables multi processing based replication.

+
+
Parameters
+
    +
  • fn (callable) – The function to be called for each device which takes part of +the replication. The function will be called with a first argument being +the global index of the process within the replication, followed by the +arguments passed in args.

  • +
  • args (tuple) – The arguments for fn. +Default: Empty tuple

  • +
  • nprocs (python:int) – The number of processes/devices for the replication. At the +moment, if specified, can be either 1 or the maximum number of devices.

  • +
  • join (bool) – Whether the call should block waiting for the completion of the +processes which have being spawned. +Default: True

  • +
  • daemon (bool) – Whether the processes being spawned should have the daemon +flag set (see Python multi-processing API). +Default: False

  • +
  • start_method (string) – The Python multiprocessing process creation method. +Default: spawn

  • +
+
+
Returns
+

The same object returned by the torch.multiprocessing.spawn API. If +nprocs is 1 the fn function will be called directly, and the API will +return None.

+
+
+
+ +
+
+

spmd

+
+
+torch_xla.distributed.spmd.mark_sharding(t: Union[Tensor, XLAShardedTensor], mesh: Mesh, partition_spec: Tuple[Optional[Union[Tuple, int, str]]]) XLAShardedTensor[source]
+

Annotates the tensor provided with XLA partition spec. Internally, +it annotates the corresponding XLATensor as sharded for the XLA SpmdPartitioner pass.

+
+
Parameters
+
    +
  • t (Union[torch.Tensor, XLAShardedTensor]) – input tensor to be annotated with partition_spec.

  • +
  • mesh (Mesh) – describes the logical XLA device topology and the underlying device IDs.

  • +
  • partition_spec (Tuple[Tuple, python:int, str, None]) – A tuple of device_mesh dimension index or +None. Each index is an int, str if the mesh axis is named, or tuple of int or str. +This specifies how each input rank is sharded (index to mesh_shape) or replicated (None). +When a tuple is specified, the corresponding input tensor axis will be sharded along all +logical axes in the tuple. Note that the order the mesh axes are specified in the tuple +will impact the resulting sharding.

  • +
  • dynamo_custom_op (bool) – if set to True, it calls the dynamo custom op variant of mark_sharding +to make itself recognizeable and traceable by dynamo.

  • +
+
+
+

Example

+
>>> import torch_xla.runtime as xr
+>>> import torch_xla.distributed.spmd as xs
+>>> mesh_shape = (4, 2)
+>>> num_devices = xr.global_runtime_device_count()
+>>> device_ids = np.array(range(num_devices))
+>>> mesh = Mesh(device_ids, mesh_shape, ('x', 'y'))
+>>> input = torch.randn(8, 32).to(xm.xla_device())
+>>> xs.mark_sharding(input, mesh, (0, None)) # 4-way data parallel
+>>> linear = nn.Linear(32, 10).to(xm.xla_device())
+>>> xs.mark_sharding(linear.weight, mesh, (None, 1)) # 2-way model parallel
+
+
+
+ +
+
+torch_xla.distributed.spmd.clear_sharding(t: Union[Tensor, XLAShardedTensor]) Tensor[source]
+

Clear sharding annotation from the input tensor and return a cpu casted tensor. This +is a in place operation but will also return the same torch.Tensor back.

+
+
Parameters
+

t (Union[torch.Tensor, XLAShardedTensor]) – Tensor that we want to clear the sharding

+
+
Returns
+

tensor that without sharding.

+
+
Return type
+

t (torch.Tensor)

+
+
+

Example

+
>>> import torch_xla.distributed.spmd as xs
+>>> torch_xla.runtime.use_spmd()
+>>> t1 = torch.randn(8,8).to(torch_xla.device())
+>>> mesh = xs.get_1d_mesh()
+>>> xs.mark_sharding(t1, mesh, (0, None))
+>>> xs.clear_sharding(t1)
+
+
+
+ +
+
+torch_xla.distributed.spmd.set_global_mesh(mesh: Mesh)[source]
+

Set the global mesh that can be used for the current process.

+
+
Parameters
+

mesh – (Mesh) Mesh object that will be the global mesh.

+
+
+

Example

+
>>> import torch_xla.distributed.spmd as xs
+>>> mesh = xs.get_1d_mesh("data")
+>>> xs.set_global_mesh(mesh)
+
+
+
+ +
+
+torch_xla.distributed.spmd.get_global_mesh() Optional[Mesh][source]
+

Get the global mesh for the current process.

+
+
Returns
+

(Optional[Mesh]) Mesh object if global mesh is set, otherwise return None.

+
+
Return type
+

mesh

+
+
+

Example

+
>>> import torch_xla.distributed.spmd as xs
+>>> xs.get_global_mesh()
+
+
+
+ +
+
+torch_xla.distributed.spmd.get_1d_mesh(axis_name: Optional[str] = None) Mesh[source]
+

Helper function to return the mesh with all devices in one dimension.

+
+
Parameters
+

axis_name – (Optional[str]) optional string to represent the axis name of the mesh

+
+
Returns
+

Mesh object

+
+
Return type
+

Mesh

+
+
+

Example

+
>>> # This example is assuming 1 TPU v4-8
+>>> import torch_xla.distributed.spmd as xs
+>>> mesh = xs.get_1d_mesh("data")
+>>> print(mesh.mesh_shape)
+(4,)
+>>> print(mesh.axis_names)
+('data',)
+
+
+
+ +
+
+class torch_xla.distributed.spmd.Mesh(device_ids: Union[ndarray, List], mesh_shape: Tuple[int, ...], axis_names: Optional[Tuple[str, ...]] = None)[source]
+

Describe the logical XLA device topology mesh and the underlying resources.

+
+
Parameters
+
    +
  • device_ids (Union[np.ndarray, List]) – A raveled list of devices (IDs) in a custom order. The list is reshaped +to an mesh_shape array, filling the elements using C-like index order.

  • +
  • mesh_shape (Tuple[python:int, ...]) – A int tuple describing the logical topology shape +of the device mesh, and each element describes the number of devices in +the corresponding axis.

  • +
  • axis_names (Tuple[str, ...]) – A sequence of resource axis names to be assigned to the dimensions +of the devices argument. Its length should match the rank of devices.

  • +
+
+
+

Example

+
>>> mesh_shape = (4, 2)
+>>> num_devices = len(xm.get_xla_supported_devices())
+>>> device_ids = np.array(range(num_devices))
+>>> mesh = Mesh(device_ids, mesh_shape, ('x', 'y'))
+>>> mesh.get_logical_mesh()
+>>> array([[0, 1],
+          [2, 3],
+          [4, 5],
+          [6, 7]])
+>>> mesh.shape()
+OrderedDict([('x', 4), ('y', 2)])
+
+
+
+ +
+
+class torch_xla.distributed.spmd.HybridMesh(*, ici_mesh_shape: Tuple[int, ...], dcn_mesh_shape: Optional[Tuple[int, ...]] = None, axis_names: Optional[Tuple[str, ...]] = None)[source]
+
+
Creates a hybrid device mesh of devices connected with ICI and DCN networks.

The shape of logical mesh should be ordered by increasing network-intensity +e.g. [replica, data, model] where mdl has the most network communication +requirements.

+
+
+
+
Parameters
+
    +
  • ici_mesh_shape – shape of the logical mesh for inner connected devices.

  • +
  • dcn_mesh_shape – shape of logical mesh for outer connected devices.

  • +
+
+
+

Example

+
>>> # This example is assuming 2 slices of v4-8.
+>>> ici_mesh_shape = (1, 4, 1) # (data, fsdp, tensor)
+>>> dcn_mesh_shape = (2, 1, 1)
+>>> mesh = HybridMesh(ici_mesh_shape, dcn_mesh_shape, ('data','fsdp','tensor'))
+>>> print(mesh.shape())
+>>> >> OrderedDict([('data', 2), ('fsdp', 4), ('tensor', 1)])
+
+
+
+ +
+
+

experimental

+
+
+torch_xla.experimental.eager_mode(enable: bool)[source]
+

Configure torch_xla’s default executation mode.

+

Under eager mode only functions that was `torch_xla.compile`d will be +traced and compiled. Other torch ops will be executed eagerly.

+
+ +
+
+

debug

+
+
+torch_xla.debug.metrics.metrics_report()[source]
+

Retrieves a string containing the full metrics and counters report.

+
+ +
+
+torch_xla.debug.metrics.short_metrics_report(counter_names: Optional[list] = None, metric_names: Optional[list] = None)[source]
+

Retrieves a string containing the full metrics and counters report.

+
+
Parameters
+
    +
  • counter_names (list) – The list of counter names whose data needs to be printed.

  • +
  • metric_names (list) – The list of metric names whose data needs to be printed.

  • +
+
+
+
+ +
+
+torch_xla.debug.metrics.counter_names()[source]
+

Retrieves all the currently active counter names.

+
+ +
+
+torch_xla.debug.metrics.counter_value(name)[source]
+

Returns the value of an active counter.

+
+
Parameters
+

name (string) – The name of the counter whose value needs to be retrieved.

+
+
Returns
+

The counter value as integer.

+
+
+
+ +
+
+torch_xla.debug.metrics.metric_names()[source]
+

Retrieves all the currently active metric names.

+
+ +
+
+torch_xla.debug.metrics.metric_data(name)[source]
+

Returns the data of an active metric.

+
+
Parameters
+

name (string) – The name of the metric whose data needs to be retrieved.

+
+
Returns
+

The metric data, which is a tuple of (TOTAL_SAMPLES, ACCUMULATOR, SAMPLES). +The TOTAL_SAMPLES is the total number of samples which have been posted to +the metric. A metric retains only a given number of samples (in a circular +buffer). +The ACCUMULATOR is the sum of the samples over TOTAL_SAMPLES. +The SAMPLES is a list of (TIME, VALUE) tuples.

+
+
+
+ +
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/multi_process_distributed.html b/release/2.5/multi_process_distributed.html new file mode 100644 index 00000000000..af1c129fd9c --- /dev/null +++ b/release/2.5/multi_process_distributed.html @@ -0,0 +1,1134 @@ + + + + + + + + + + + + How to do DistributedDataParallel(DDP) — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • How to do DistributedDataParallel(DDP)
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +
+

How to do DistributedDataParallel(DDP)

+

This document shows how to use torch.nn.parallel.DistributedDataParallel in xla, +and further describes its difference against the native xla data parallel +approach. You can find a minimum runnable example here.

+
+

Background / Motivation

+

Customers have long requested the ability to use PyTorch’s +DistributedDataParallel API with xla. And here we enable it as an experimental +feature.

+
+
+

How to use DistributedDataParallel

+

For those who switched from the PyTorch eager mode to XLA, here are all the +changes you need to do to convert your eager DDP model into XLA model. We assume +that you already know how to use XLA on a single +device.

+
    +
  1. Import xla specific distributed packages:

  2. +
+
import torch_xla
+import torch_xla.runtime as xr
+import torch_xla.distributed.xla_backend
+
+
+
    +
  1. Init xla process group similar to other process groups such as nccl and gloo.

  2. +
+
dist.init_process_group("xla", rank=rank, world_size=world_size)
+
+
+
    +
  1. Use xla specific APIs to get rank and world_size if you need to.

  2. +
+
new_rank = xr.global_ordinal()
+world_size = xr.world_size()
+
+
+
    +
  1. Pass gradient_as_bucket_view=True to the DDP wrapper.

  2. +
+
ddp_model = DDP(model, gradient_as_bucket_view=True)
+
+
+
    +
  1. Finally launch your model with xla specific launcher.

  2. +
+
torch_xla.launch(demo_fn)
+
+
+

Here we have put everything together (the example is actually taken from the +DDP tutorial). +The way you code it is pretty similar to the eager experience. Just with xla +specific touches on a single device plus the above five changes to your script.

+
import os
+import sys
+import tempfile
+import torch
+import torch.distributed as dist
+import torch.nn as nn
+import torch.optim as optim
+
+from torch.nn.parallel import DistributedDataParallel as DDP
+
+# additional imports for xla
+import torch_xla
+import torch_xla.core.xla_model as xm
+import torch_xla.runtime as xr
+import torch_xla.distributed.xla_backend
+
+def setup(rank, world_size):
+    os.environ['MASTER_ADDR'] = 'localhost'
+    os.environ['MASTER_PORT'] = '12355'
+
+    # initialize the xla process group
+    dist.init_process_group("xla", rank=rank, world_size=world_size)
+
+def cleanup():
+    dist.destroy_process_group()
+
+class ToyModel(nn.Module):
+    def __init__(self):
+        super(ToyModel, self).__init__()
+        self.net1 = nn.Linear(10, 1000000)
+        self.relu = nn.ReLU()
+        self.net2 = nn.Linear(1000000, 5)
+
+    def forward(self, x):
+        return self.net2(self.relu(self.net1(x)))
+
+def demo_basic(rank):
+    # xla specific APIs to get rank, world_size.
+    new_rank = xr.global_ordinal()
+    assert new_rank == rank
+    world_size = xr.world_size()
+
+    print(f"Running basic DDP example on rank {rank}.")
+    setup(rank, world_size)
+
+    # create model and move it to XLA device
+    device = xm.xla_device()
+    model = ToyModel().to(device)
+    # currently, graident_as_bucket_view is needed to make DDP work for xla
+    ddp_model = DDP(model, gradient_as_bucket_view=True)
+
+    loss_fn = nn.MSELoss()
+    optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)
+
+    optimizer.zero_grad()
+    outputs = ddp_model(torch.randn(20, 10).to(device))
+    labels = torch.randn(20, 5).to(device)
+    loss_fn(outputs, labels).backward()
+    optimizer.step()
+    # xla specific API to execute the graph
+    xm.mark_step()
+
+    cleanup()
+
+
+def run_demo(demo_fn):
+    # xla specific launcher
+    torch_xla.launch(demo_fn)
+
+if __name__ == "__main__":
+    run_demo(demo_basic)
+
+
+
+
+

Benchmarking

+
+

Resnet50 with fake data

+

The following results are collected with the command: python +test/test_train_mp_imagenet.py --fake_data --model=resnet50 --num_epochs=1 on a +TPU VM V3-8 environment with ToT PyTorch and PyTorch/XLA. And the statistical +metrics are produced by using the script in this pull +request. The unit for the rate is +images per second.

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Type + Mean + Median + 90th % + Std Dev + CV +
xm.optimizer_step + 418.54 + 419.22 + 430.40 + 9.76 + 0.02 +
DDP + 395.97 + 395.54 + 407.13 + 7.60 + 0.02 +

The performance difference between our native approach for distributed data +parallel and DistributedDataParallel wrapper is: 1 - 395.97 / 418.54 = 5.39%. +This result seems reasonable given the DDP wrapper introduces extra overheads on +tracing the DDP runtime.

+
+
+

MNIST with fake data

+

The following results are collected with the command: python +test/test_train_mp_mnist.py --fake_data on a TPU VM V3-8 environment with ToT +PyTorch and PyTorch/XLA. And the statistical metrics are produced by using the +script in this pull request. The +unit for the rate is images per second.

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Type + Mean + Median + 90th % + Std Dev + CV +
xm.optimizer_step + 17864.19 + 20108.96 + 24351.74 + 5866.83 + 0.33 +
DDP + 10701.39 + 11770.00 + 14313.78 + 3102.92 + 0.29 +

The performance difference between our native approach for distributed data +parallel and DistributedDataParallel wrapper is: 1 - 14313.78 / 24351.74 = +41.22%. Here we compare 90th % instead since the dataset is small and first a +few rounds are heavily impacted by data loading. This slowdown is huge but makes +sense given the model is small. The additional DDP runtime tracing overhead is +hard to amortize.

+
+
+

MNIST with real data

+

The following results are collected with the command: python +test/test_train_mp_mnist.py --logdir mnist/ on a TPU VM V3-8 environment with +ToT PyTorch and PyTorch/XLA.

+learning_curves +

And we can observe that the DDP wrapper converges slower than the native XLA +approach even though it still achieves a high accuracy rate at 97.48% at the +end. (The native approach achieves 99%.)

+
+
+
+

Disclaimer

+

This feature is still experimental and under active development. Use it in +cautions and feel free to file any bugs to the xla github +repo. For those who are interested in the +native xla data parallel approach, here is the +tutorial.

+

Here are some of the known issues that are under investigation:

+
    +
  • gradient_as_bucket_view=True needs to be enforced.

  • +
  • There are some issues while being used with torch.utils.data.DataLoader. ​​test_train_mp_mnist.py with real data crashes before exiting.

  • +
+
+
+

Fully Sharded Data Parallel (FSDP) in PyTorch XLA

+

Fully Sharded Data Parallel (FSDP) in PyTorch XLA is a utility for sharding Module parameters across data-parallel workers.

+

Example usage:

+
import torch
+import torch_xla.core.xla_model as xm
+import torch_xla.runtime as xr
+from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as FSDP
+
+model = FSDP(my_module)
+optim = torch.optim.Adam(model.parameters(), lr=0.0001)
+output = model(x, y)
+loss = output.sum()
+loss.backward()
+optim.step()
+
+
+

It is also possible to shard individual layers separately and have an outer wrapper handle any leftover parameters.

+

Notes:

+
    +
  • The XlaFullyShardedDataParallel class supports both the ZeRO-2 optimizer (sharding gradients and optimizer states) and the ZeRO-3 optimizer (sharding parameters, gradients, and optimizer states) in https://arxiv.org/abs/1910.02054.

    +
      +
    • The ZeRO-3 optimizer should be implemented via nested FSDP with reshard_after_forward=True. See test/test_train_mp_mnist_fsdp_with_ckpt.py and test/test_train_mp_imagenet_fsdp.py for an example.

    • +
    • For large models that cannot fit into a single TPU memory or the host CPU memory, one should interleave submodule construction with inner FSDP wrapping. See ``FSDPViTModel` <https://github.com/ronghanghu/vit_10b_fsdp_example/blob/master/run_vit_training.py>`_ for an example.

    • +
    +
  • +
  • a simple wrapper checkpoint_module is provided (based on torch_xla.utils.checkpoint.checkpoint from https://github.com/pytorch/xla/pull/3524) to perform gradient checkpointing over a given nn.Module instance. See test/test_train_mp_mnist_fsdp_with_ckpt.py and test/test_train_mp_imagenet_fsdp.py for an example.

  • +
  • Auto-wrapping submodules: instead of manually nested FSDP wrapping, one can also specify an auto_wrap_policy argument to automatically wrap the submodules with inner FSDP. size_based_auto_wrap_policy in torch_xla.distributed.fsdp.wrap is an example of auto_wrap_policy callable, this policy wraps layers with the number of parameters larger than 100M. transformer_auto_wrap_policy in torch_xla.distributed.fsdp.wrap is an example of auto_wrap_policy callable for transformer-like model architectures.

  • +
+

For example, to automatically wrap all torch.nn.Conv2d submodules with inner FSDP, one can use:

+
from torch_xla.distributed.fsdp.wrap import transformer_auto_wrap_policy
+auto_wrap_policy = partial(transformer_auto_wrap_policy, transformer_layer_cls={torch.nn.Conv2d})
+
+
+

Additionally, one can also specify an auto_wrapper_callable argument to use a custom callable wrapper for the submodules (the default wrapper is just the XlaFullyShardedDataParallel class itself). For example, one can use the following to apply gradient checkpointing (i.e. activation checkpointing/rematerialization) to each auto-wrapped submodule.

+
from torch_xla.distributed.fsdp import checkpoint_module
+auto_wrapper_callable = lambda m, *args, **kwargs: XlaFullyShardedDataParallel(
+    checkpoint_module(m), *args, **kwargs)
+
+
+
    +
  • When stepping the optimizer, directly call optimizer.step and do not call xm.optimizer_step. The latter reduces the gradient across ranks, which is not needed for FSDP (where the parameters are already sharded).

  • +
  • When saving model and optimizer checkpoints during training, each training process needs to save its own checkpoint of the (sharded) model and optimizer state dicts (use master_only=False and set different paths for each rank in xm.save). When resuming, it needs to load the checkpoint for the corresponding rank.

  • +
  • Please also save model.get_shard_metadata() along with model.state_dict() as follows and use consolidate_sharded_model_checkpoints to stitch the sharded model checkpoints together into a full model state dict. See test/test_train_mp_mnist_fsdp_with_ckpt.py for an example. +.. code-block:: python3

    +
    +
    +
    ckpt = {

    ‘model’: model.state_dict(), +‘shard_metadata’: model.get_shard_metadata(), +‘optimizer’: optimizer.state_dict(),

    +
    +
    +

    } +ckpt_path = f’/tmp/rank-{xr.global_ordinal()}-of-{xr.world_size()}.pth’ +xm.save(ckpt, ckpt_path, master_only=False)

    +
    +
  • +
  • The checkpoint consolidation script can also be launched from the command line as follows. +.. code-block:: bash

    +
    +

    # consolidate the saved checkpoints via command line tool +python3 -m torch_xla.distributed.fsdp.consolidate_sharded_ckpts –ckpt_prefix /path/to/your_sharded_checkpoint_files –ckpt_suffix “_rank--of-.pth”

    +
    +
  • +
+

The implementation of this class is largely inspired by and mostly follows the structure of fairscale.nn.FullyShardedDataParallel in https://fairscale.readthedocs.io/en/stable/api/nn/fsdp.html. One of the biggest differences from fairscale.nn.FullyShardedDataParallel is that in XLA we don’t have explicit parameter storage, so here we resort to a different approach to free full parameters for ZeRO-3.

+
+
+

Example training scripts on MNIST and ImageNet

+ +
+

Installation

+

FSDP is available on PyTorch/XLA 1.12 release and newer nightly. Please refer to https://github.com/pytorch/xla#-available-images-and-wheels for installation guide.

+
+
+

Clone PyTorch/XLA repo

+
git clone --recursive https://github.com/pytorch/pytorch
+cd pytorch/
+git clone --recursive https://github.com/pytorch/xla.git
+cd ~/
+
+
+
+
+

Train MNIST on v3-8 TPU

+

It gets around 98.9 accuracy for 2 epochs:

+
python3 ~/pytorch/xla/test/test_train_mp_mnist_fsdp_with_ckpt.py \
+  --batch_size 16 --drop_last --num_epochs 2 \
+  --use_nested_fsdp --use_gradient_checkpointing
+
+
+

This script automatically tests checkpoint consolidation at the end. You can also manually consolidate the sharded checkpoints via

+
# consolidate the saved checkpoints via command line tool
+python3 -m torch_xla.distributed.fsdp.consolidate_sharded_ckpts \
+  --ckpt_prefix /tmp/mnist-fsdp/final_ckpt \
+  --ckpt_suffix "_rank-*-of-*.pth"
+
+
+
+
+

Train ImageNet with ResNet-50 on v3-8 TPU

+

It gets around 75.9 accuracy for 100 epochs; download ImageNet-1k to /datasets/imagenet-1k:

+
python3 ~/pytorch/xla/test/test_train_mp_imagenet_fsdp.py \
+  --datadir /datasets/imagenet-1k --drop_last \
+  --model resnet50 --test_set_batch_size 64 --eval_interval 10 \
+  --lr 0.4 --batch_size 128 --num_warmup_epochs 5 --lr_scheduler_divide_every_n_epochs 30 --lr_scheduler_divisor 10 --num_epochs 100 \
+  --use_nested_fsdp
+
+
+

You can also add --use_gradient_checkpointing (which needs to be used along with --use_nested_fsdp or --auto_wrap_policy) to apply gradient checkpointing on the residual blocks.

+
+
+
+
+

Example training scripts on TPU pod (with 10 billion parameters)

+

To train large models that cannot fit into a single TPU, one should apply auto-wrap or manually wrap the submodules with inner FSDP when building the entire model to implement the ZeRO-3 algorithm.

+

Please see https://github.com/ronghanghu/vit_10b_fsdp_example for an example of sharded training of a Vision Transformer (ViT) model using this XLA FSDP PR.

+
+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/notes/source_of_recompilation.html b/release/2.5/notes/source_of_recompilation.html new file mode 100644 index 00000000000..588e33f2ce8 --- /dev/null +++ b/release/2.5/notes/source_of_recompilation.html @@ -0,0 +1,868 @@ + + + + + + + + + + + + Source of recompilations in torch_xla — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • Source of recompilations in torch_xla
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +
+

Source of recompilations in torch_xla

+
+

Let’s first start with some facts/constraints:

+
    +
  1. Graph compilations in XLA are pretty expensive.

  2. +
  3. XLA handles static shape only. In other words, even for the same IR graph, XLA recompiles when input shape changes.

  4. +
  5. Recompilations hurts torch_xla perf a lot when it happens, and it’s hard to understand and debug from a normal python user POV.

  6. +
+

Often when recompilation happens we say we just need dynamic shape support and then rest assured that when dynamic shape is supported in the future, all the recompilations will be magically gone. But this is not true, XLA now has pretty good bounded dynamic shapes coverage already, but we still see recompilations and they are expected.

+

**This doc aims to provide a detailed explanation of a few common sources of recompilations, and what do we need to get rid of them. It will mainly focus on explaining the problem to beginners without any context. To make it easy to understand, the “solutions” proposed here may rely on impractical assumptions. **

+
+
+

#1. From input dataset.

+

Yes it’s pretty common that input dataset contains examples with different shapes, e.g. sentences with varying length or images with different sizes. Without normalization, it’ll cause recompilation for every new input shape.

+

Tensorflow graph mode users are more used to do padding/bucketization (tf.pad) to normalize input shapes to one or a few buckets. But this is kinda anti-pattern for PyTorch eager frontend users (which is the same user lazy tensor frontend is trying to target) since different input shapes just doesn’t matter for eager CPU/CUDA backend.

+

Proposed workaround: okay now let’s say we can work around this problem by teaching our users to do padding/bucketization (it’s hard in practice :P). What’s next?

+
+
+

#2. From operator output

+

There are certain operators semantically are data-dependent and produce dynamic shape outputs: e.g. torch.nonzero returns indices of nonzero elements in its input tensor. So even your input tensors to this operator always have the same shape, it might produce different shape outputs and cause recompilations.

+
+

2.1 Bounded dynamic shape can fix the case when you use the tensor with dynamic shape as a Tensor, without querying its real dimension.

+

Proposed workaround: let’s say now XLA supports bounded dynamic shape for all operators, is it good enough?

+
    +
  • by bounded dynamic shape it means we can pad the tensor to a theoretical max, trading more memory usage for less recompilation/faster speed.

  • +
+

Well, sort of. Let’s see the following example:

+
a = torch.tensor([1, 2, 0, 1, 3], device='xla')
+b = torch.nonzero(a)
+c = b * 2
+d = c + 1
+print(torch_xla._XLAC._get_xla_tensors_text([d]))
+
+
+

In the example above every node below b in the graph (namely c, d and everything depend on them) will have dynamic shape, it’s pretty obvious that b has dynamic shape in dimension 0 as shown below:

+
%9 = (s64[<=5,1]{1,0}, s64[]) aten::nonzero(%8), num_outputs=2 # b
+%10 = s64[5,1]{1,0} aten::mul(%9.0, %3) # c
+%11 = s64[5,1]{1,0} aten::add(%10, %2), ROOT=0 # d
+
+
+

Although it’s not shown directly in the graph, c & d indeed also have dynamic shape (in other words, [5, 1] is just padded shape and it’s masked).

+
print(torch_xla._XLAC._get_xla_tensor_dimension_size(d, 0)) # prints 4 instead of 5
+
+
+

You can see that in this case as long as the input tensor a has shape [5] we only compile the graph once. Bounded dynamic shape support helped!

+
+
+

2.2 what if real dimension is queried on a tensor with dynamic shape?

+

This is actually pretty commonly used since not all PyTorch computation are done in the form of Tensors.

+

For example, tensor.size() in PyTorch returns a tuple of ints instead of a Tensor of dtype=int. When tensor is a dynamic shape tensor, this op basically forces XLA to cut the graph and evaluate so that we can return the correct scalar (otherwise it’ll just return the padded shape which is wrong).

+

What’s made it worse is that many PyTorch takes scalar inputs as well. After you do s = tensor.size(0) and use s in other operators it also becomes a dynamic source. In this case we probably know how to pad it and its upper bound, but we cannot do it since it’s not even a Tensor!

+
a = torch.tensor([1, 2, 0, 1, 3], device='xla')
+b = torch.nonzero(a)
+s = a.size(0) # evaluation happens! nit: we use size() for simplicity, the actual API is _get_xla_tensor_dimension_size.
+c = torch.rand(s, device='xla') # c can be of any shape between [0, 5] which causes more recompilations!
+d = c + 1
+
+
+

So this one is actually hard to solve without PyTorch frontend’s help. What do we need?

+

In short, we need a Tensor world!

+

For example,

+
    +
  • tensor.size() should return a Tensor so that it can be a Tensor with dynamic shape and kept in the graph without early evaluation.

  • +
  • Tensor accessor, e.g. for 2D tensor, tensor[0][0] now returns a value but this need to return a tensor as well.

  • +
  • Implicitly this means all operators currently taking int/float/double as input need a Tensor overload as well. THIS IS A BIG ASK as it can easily explode our operator set.

    +
      +
    • It’s easier if we can make scalar to Tensor conversion really cheap so that we can only care about the Tensor overload.

    • +
    • In practice not all ops takes scalars from previous computation, so we’ve been adding Tensor variants by ad-hoc requests.

    • +
    • This is also a common ask from tracing base approaches I think.

    • +
    +
  • +
+

Okay now that we assume every op in PyTorch has a Tensor verison we need, are we done?

+
+
+
+

#3. From control flow

+

No! We actually only solved the problem without data dependent control flow…

+

See the example below:

+
if x[0][0] == 3:
+  bla
+else:
+  blabla
+
+
+

Even if x[0][0] was a Tensor, we need to execute/materialize its value for python interpreter to proceed. And different branch choices in multiple control flows combined means we have a lot of graph to compile as well!

+

For now we just have no way to fix this. To fix it we need to lower the control flow from python to graph! Without too much thinking in implementation we can do this in two ways:

+
    +
  • ask users to explicitly use a control flow op instead of python if/else/while/for. This is currently supported as customized API in torch_xla but not widely adopted in users’ code. (python users are used to if/else/for and it’s hard to switch them to a uglier API unless there’s a huge perf win).

  • +
  • parse python source. code to get the control flow statement automatically. This is like Torchscript and somehow merge the torchscripted graph into the lazily trace graph properly (including shape info etc). I haven’t thought through the steps of how to implement this indeed :P

  • +
+

But either solution above requires non-trivial amount of effort, either on user side or on the framework side. That’s why we currently just take the hit of early evaluation & multiple compilations as a short term solution given the bandwidth we have.

+

Okay so now we assume that also have control flow lowered in the graph automagically, are we gold?

+

YES! Now you have your whole computation represented in a graph of Tensor operations, including control flow so that compilers can now consume and do their smart tricks! But tbh at this point your program is no longer very PyTorch-y.

+
+
+

Conclusion:

+

There’re actually multiple sources of recompilation and bounded dynamic shape support cannot solve all of them. The proposed workarounds in this doc are definitely sometimes impractical, and there might be better ways to fix each source properly that I’m totally unaware of. But I hope as we keep smashing our way to an ideal lazy tensor stack in this doc, it’s now easier for you understand what’re the remaining blockers ahead of us.

+
+
+

Appendix:

+
    +
  1. NNC uses symbolic shapes, does that help?

  2. +
+

Yes but partially. By having symbolic shape, your compilation optimization no longer requires concrete shape values. In other words your generated kernel are more general than XLA’s static shape ones.

+

And which exactly problem does it help?

+

It helps with cases like #1 and #2.1.

+
shape [3, 5] -> add -> transpose -> ... -> mul
+shape [6, 2] -> add -> transpose -> ... -> mul
+
+# with symbolic shape
+shape [x, y] -> add -> transpose -> ... -> mul
+
+
+

With symbolic shape your generated kernel doesn’t recompile as XLA does with static shapes.

+

XLA solves this problem in the other way, by using padding/bucketization (for #1) and bounded dynamic shape (for #2.1).

+

Brian Hirsh(@bdhirsh) asked some really good questions in the comment, moving here to make them more visible:

+
    +
  1. Is it worth sticking a TORCH_WARN in the XLA kernels of ops that produce data-dependent output shapes?

  2. +
+

Yea torch_warn is useful in telling users “hey your program won’t run blazing fast”. But for these data dependent ops, there isn’t an easy rewrite for them unless users change the logic in their model. (another example is torch.unique())

+
    +
  1. How ops like nonzero impact our ability to devirtualize sizes()? If we want to devirtualize sizes(), we’ll need to be able to eagerly compute sizes for each op - won’t that mean we’re forced to evaluate the graph every time we hit an op like nonzero? Vs. right now, it sounds like we don’t actually force an evaluation when a user calls nonzero()?

  2. +
+

Yea great question! So in the current form it’s not a hard blocker since size() on XLA Tensors doesn’t carry source of truth size information. As shown in the example, the source of truth lives in IRValue and can be retrieved by _get_xla_tensor_dimension_size only. So if we decide to devirtualize size it’ll just enforce this discrepancy.

+

As a followup if we have size() return Tensor instead of values as mentioned in the proposed workarounds above. In that case size() won’t be able to devirtualize since it becomes an operator (taking in Tensor and produce Tensor, have different implementations for different backends.)

+
    +
  1. If I, e.g. call torch.add(input, 1) in a loop, where input varies in size from 1-1000, normally we would have to compile 1000 different graphs - but with dynamic shapes, it sounds like XLA will internally be able to generate a single graph where it says “use this graph if the input size is <=1000”. My question is: is “dynamic shape” a property of just the graph? Or of both the graph and the input. I.e. if my code were instead calling x = torch.add(input, 1); x.sizes() in a loop, does x have a dynamic shape at this point, meaning we’d need to run the graph to get the sizes? Or are we able to make it an eagerly computed property even in the presence of graphs with dynamic shapes.

  2. +
+

Yea in this case you’ll compile 1000 different graphs. Dynamic shapes means its input has dynamic dimension in it. So when you query x.sizes() (currently need use get_dimention_size to get the correct size) it’ll trigger execution (since the size didn’t change it doesn’t trigger recompilation). Without the line accessing size, it won’t trigger any recompilation/execution when input has dynamic dimension.

+
    +
  1. Would an alternative of making control flow available in the graph be just to come up with a way to ensure that XLA graphs don’t include control flow? i.e. if we have a model with a single conditional in the middle, then get XLA to produce 3 graphs: 1 for everything before the conditional, 1 for the if branch, and 1 for the else branch. That would mean you don’t get the exponential blowup of new graphs for every combination of paths taken, but (a) the graphs are smaller and provide fewer optimization opportunities, and (b) it would probably be pretty non-trivial to get XLA to recognize where a conditional path is taken.

  2. +
+

Great point! So if we could break them up into smaller graphs it’s indeed feasible. But in practice this pattern is annoying:

+
y = <some computation>
+x = y + 2
+if x[0] == 2 :
+  z = y +1
+else:
+  z = y - 1
+
+
+

Note you’ll evaluate x using a subgraph when you hit control flow, but there might be previous variable included in the branch computation as well (likey is just one node smaller than x, but it wasn’t materizalized when you evaluate x). So you’re actually evaluating 1 small graph and two big graphs for this example. And with more control flow involved, y could get updated in multiple branches which still produces different combo of large graphs.

+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/objects.inv b/release/2.5/objects.inv new file mode 100644 index 00000000000..18c5102334f Binary files /dev/null and b/release/2.5/objects.inv differ diff --git a/release/2.5/py-modindex.html b/release/2.5/py-modindex.html new file mode 100644 index 00000000000..034ade316ef --- /dev/null +++ b/release/2.5/py-modindex.html @@ -0,0 +1,759 @@ + + + + + + + + + + + + Python Module Index — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+ + +
+ + +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/quantized_ops.html b/release/2.5/quantized_ops.html new file mode 100644 index 00000000000..fef81ab2017 --- /dev/null +++ b/release/2.5/quantized_ops.html @@ -0,0 +1,887 @@ + + + + + + + + + + + + Quantized Operations for XLA device (Experimental feature) — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • Quantized Operations for XLA device (Experimental feature)
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +
+

Quantized Operations for XLA device (Experimental feature)

+
+

This document outlines how to utilize quantized operations to enable quantization on XLA devices.

+

XLA Quantized ops offer a high-level abstraction for quantized operations (e.g., blockwise int4 quantized matrix multiplication). These ops are analogous to quantized CUDA kernels (example) in the CUDA ecosystem, providing similar functionality and performance benefits within the XLA framework.

+

NOTE: Currently this is classified as experimental feature. It’s API specifics +will change in the next (2.5) release.

+
+

How to use:

+

XLA quantized operations can be used as torch op, or a torch.nn.Module that wraps the torch.op. These 2 options give model developers the flexibility to choose the best way to integrate XLA quantized ops into their solution.

+

Both torch op and nn.Module are compatible with torch.compile( backend='openxla').

+
+

Call XLA quantized op in model code

+

Users can call XLA quantized ops in the same way as calling other regular PyTorch ops. This provides maximum flexibility in integrating XLA quantized ops into their applications. The quantized ops work in both eager mode and Dynamo, with regular PyTorch CPU tensor and XLA tensor.

+

Note Please check the docstring of the quantized ops for the layout of the quantized weights.

+
import torch
+import torch_xla.core.xla_model as xm
+import torch_xla.experimental.xla_quantized_matmul
+
+N_INPUT_FEATURES=10
+N_OUTPUT_FEATURES=20
+x = torch.randn((3, N_INPUT_FEATURES), dtype=torch.bfloat16)
+w_int = torch.randint(-128, 127, (N_OUTPUT_FEATURES, N_INPUT_FEATURES), dtype=torch.int8)
+scaler = torch.randn((N_OUTPUT_FEATURES,), dtype=torch.bfloat16)
+
+# Call with torch CPU tensor (For debugging purpose)
+matmul_output = torch.ops.xla.quantized_matmul(x, w_int, scaler)
+
+device = xm.xla_device()
+x_xla = x.to(device)
+w_int_xla = w_int.to(device)
+scaler_xla = scaler.to(device)
+
+# Call with XLA Tensor to run on XLA device
+matmul_output_xla = torch.ops.xla.quantized_matmul(x_xla, w_int_xla, scaler_xla)
+
+# Use with torch.compile(backend='openxla')
+def f(x, w, s):
+  return torch.ops.xla.quantized_matmul(x, w, s)
+
+f_dynamo = torch.compile(f, backend="openxla")
+dynamo_out_xla = f_dynamo(x_xla, w_int_xla, scaler_xla)
+
+
+

It’s common to wrap the quantized op into a custom nn.Module in model developers model code:

+
class MyQLinearForXLABackend(torch.nn.Module):
+  def __init__(self):
+    self.weight = ...
+    self.scaler = ...
+
+  def load_weight(self, w, scaler):
+    # Load quantized Linear weights
+    # Customized way to preprocess the weights
+    ...
+    self.weight = processed_w
+    self.scaler = processed_scaler
+
+
+  def forward(self, x):
+    # Do some random stuff with x
+    ...
+    matmul_output = torch.ops.xla.quantized_matmul(x, self.weight, self.scaler)
+    # Do some random stuff with matmul_output
+    ...
+
+
+
+
+

Module Swap

+

Alternatively, users can also use the nn.Module that wraps the XLA quantized ops and do module swap in the model code:

+
orig_model = MyModel()
+# Quantize the model and get quantized weights
+q_weights = quantize(orig_model)
+# Process the quantized weight to the format that XLA quantized op expects.
+q_weights_for_xla = process_for_xla(q_weights)
+
+# Do module swap
+q_linear = XlaQuantizedLinear(self.linear.in_features,
+                              self.linear.out_features)
+q_linear.load_quantized_weight(q_weights_for_xla)
+orig_model.linear = q_linear
+
+
+
+
+
+

Supported Quantized Operations:

+
+

Matrix Multiply

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Weight Quantization Type

Activation Quantization Type

Dtype

Supported

per-channel (sym/asym)

N/A

W8A16

Yes

per-channel (sym/asym)

N/A

W4A16

Yes

per-channel

per-token

W8A8

No

per-channel

per-token

W4A8

No

blockwise (sym/asym)

N/A

W8A16

Yes

blockwise (sym/asym)

N/A

W4A16

Yes

blockwise

per-token

W8A8

No

blockwise

per-token

W4A8

No

+

Note W[X]A[Y] refers to Weight in X-bit, Activation in Y-bit. If X/Y is 4 or 8, it refers to int4/8. 16 for bfloat16 format.

+
+
+

Embedding

+

To be added

+
+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/runtime.html b/release/2.5/runtime.html new file mode 100644 index 00000000000..c7be471a6fb --- /dev/null +++ b/release/2.5/runtime.html @@ -0,0 +1,1154 @@ + + + + + + + + + + + + PJRT Runtime — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +
+

PJRT Runtime

+

PyTorch/XLA has migrated from the TensorFlow-based XRT runtime to the PJRT +runtime +used by JAX.

+

If you encounter a bug with PJRT, please file an issue on GitHub with the +runtime tag.

+

New features in PyTorch/XLA r2.1:

+
    +
  • PJRT is stable in PyTorch/XLA r2.1!

  • +
  • Public runtime APIs have moved from torch_xla.experimental.pjrt to +torch_xla.runtime.

    +
      +
    • The pjrt:// init method has been renamed to xla://, and it is registered +by torch_xla.distributed.xla_backend.

    • +
    • The previous torch_xla.experimental.* names are still available in this +release for compatibility.

    • +
    +
  • +
  • torchrun is now supported when using init_method='xla://'.

  • +
  • New plugins for XPU and Neuron via the PJRT C API.

  • +
+

New features in PyTorch/XLA r2.0:

+
    +
  • PJRT will be configured by default if you don’t pass in any other runtime +configuration. If you continue to set XRT configuration (XRT_TPU_CONFIG), +this change has no impact

  • +
  • New TPU runtime implementation in libtpu improves performance by up to 30%.

  • +
  • New xm.rendezvous implementation that scales to thousands of TPU cores

  • +
  • [experimental] torch.distributed support for TPU v2 and v3, including +pjrt:// init_method

  • +
+
+

TL;DR

+
    +
  • To use the PJRT preview runtime, set the PJRT_DEVICE environment variable to +CPU, TPU, or CUDA

  • +
  • In XRT, all distributed workloads are multiprocess, with one process per +device. On TPU v2 and v3 in PJRT, workloads are multiprocess and multithreaded +(4 processes with 2 threads each), so your workload should be thread-safe. See +Multithreading on TPU v2/v3 and the +Multiprocessing section of the API +guide +for more information. Key differences to keep in mind:

    +
      +
    • To initialize a model in a thread-safe way, either broadcast the parameters +across replicas after initialization +(torch_xla.experimental.pjrt.broadcast_master_param) or load each +replica’s parameters from a common checkpoint.

    • +
    • For other random number generation, use torch.Generator where possible. +The global torch RNG is not thread-safe, even if you set the same +torch.manual_seed across replicas.

    • +
    • To use torch.distributed, import torch_xla.experimental.pjrt_backend and +use the xla:// init_method.

    • +
    • These steps are optional for GPU and TPU v4.

    • +
    +
  • +
+

Sample diff from XRT to PJRT:

+
 import os
+
+ import torch
+ import torch.nn as nn
+ from torch.nn.parallel import DistributedDataParallel as DDP
+ import torch.optim as optim
+ import torch.distributed as dist
+ import torch_xla
+ import torch_xla.core.xla_model as xm
+ import torch_xla.distributed.parallel_loader as pl
+ import torch_xla.distributed.xla_backend
++import torch_xla.runtime as xr
+
+
+ def _mp_fn(index):
+   device = xm.xla_device()
+-  dist.init_process_group('xla', rank=xr.global_ordinal(), world_size=xr.world_size())
++  dist.init_process_group('xla', init_method='xla://')
+
+   torch.manual_seed(42)
+   model = nn.Linear(128, 10).to(device)
+
++  # Optional for TPU v4 and GPU
++  xm.broadcast_master_param(model)
+   model = DDP(model, gradient_as_bucket_view=True)
+
+   loss_fn = nn.MSELoss()
+   optimizer = optim.SGD(model.parameters(), lr=.001)
+
+   for i in range(10):
+     data, target = torch.randn((128, 128), device=device), torch.randn((128, 10), device=device)
+
+     optimizer.zero_grad()
+     output = model(data)
+     loss = loss_fn(output, target)
+     loss.backward()
+
+     optimizer.step()
+     xm.mark_step()
+
+   # Print mean parameters so we can confirm they're the same across replicas
+   print([p.mean() for p in model.parameters()])
+
+ if __name__ == '__main__':
+-  os.environ['XRT_TPU_CONFIG'] = 'localservice;0;localhost:51011'
+-  os.environ['MASTER_ADDR'] = 'localhost'
+-  os.environ['MASTER_PORT'] = '12355'
+
++  # Recommended: set PJRT_DEVICE to your local device type
++  os.environ['PJRT_DEVICE'] = 'TPU'
+
+   torch_xla.launch(_mp_fn)
+
+
+
+
+

Benefits

+
    +
  • Simple runtime configuration: just set PJRT_DEVICE to TPU, CPU, or CUDA +and start using XLA! Or, let PJRT select a device automatically based on your +environment.

  • +
  • Improved performance: reduced overhead from gRPC means faster end-to-end +execution. On TorchBench 2.0, we observed a >35% improvement in training time +on TPU v4.

  • +
  • Easy pod execution: just copy your code to each TPU worker, and execute them +all at the same time with gcloud compute tpus tpuvm ssh --worker=all.

  • +
  • Better scaling: removes XRT’s limitation on parameter +sizes and supports up to 2048 TPU +chips.

  • +
+
+
+

Quickstart

+

To start using PJRT with PyTorch/XLA, all you need to do is set the +PJRT_DEVICE environment variable. If you’re working on a TPU v2 or v3, keep +reading to learn about the differences between TPU v2 and v3 and v4.

+
+

CPU

+

On any machine with PyTorch/XLA installed, you can run our MNIST example on CPU +like this:

+
PJRT_DEVICE=CPU python3 xla/test/test_train_mp_mnist.py --fake_data
+
+
+
+
+

TPU

+

To create a new TPU with PyTorch/XLA r2.0 installed:

+
gcloud alpha compute tpus tpu-vm create $USER-pjrt --accelerator-type=v4-8 --version=tpu-vm-v4-pt-2.0 --zone=us-central2-b --project=$PROJECT
+
+
+

On a v4-8, you can run our ResNet50 example like this:

+
git clone --depth=1 --branch r2.0 https://github.com/pytorch/xla.git
+PJRT_DEVICE=TPU python3 xla/test/test_train_mp_imagenet.py --fake_data --batch_size=256 --num_epochs=1
+
+
+

By default, PJRT will use all TPU chips. To use only one TPU chip, configure +TPU_PROCESS_BOUNDS and TPU_VISIBLE_CHIPS:

+
TPU_PROCESS_BOUNDS=1,1,1 TPU_VISIBLE_CHIPS=0 PJRT_DEVICE=TPU python3 xla/test/test_train_mp_imagenet.py --fake_data --batch_size=256 --num_epochs=1
+
+
+
+

Pods

+

On TPU Pods, use gcloud to run your command on each TPU in parallel:

+
gcloud alpha compute tpus tpu-vm ssh $USER-pjrt --zone=us-central2-b --project=$PROJECT --worker=all --command="git clone --depth=1 --branch r1.13 https://github.com/pytorch/xla.git"
+gcloud alpha compute tpus tpu-vm ssh $USER-pjrt --zone=us-central2-b --project=$PROJECT --worker=all --command="PJRT_DEVICE=TPU python3 xla/test/test_train_mp_imagenet.py --fake_data --batch_size=256 --num_epochs=1"
+
+
+
+
+

Docker

+

You can also use Docker to run your workload in a container with PyTorch/XLA +preinstalled:

+
export DOCKER_IMAGE=gcr.io/...
+
+# Optional: authenticate docker if your image is in a private GCP repository
+gcloud compute tpus tpu-vm ssh $USER-pjrt --zone=us-central2-b --project=$PROJECT --worker=all --command "sudo gcloud auth configure-docker"
+
+# Run your workload
+gcloud compute tpus tpu-vm ssh $USER-pjrt --zone=us-central2-b --project=$PROJECT --worker=all --command "sudo docker run --rm --privileged --net=host -e PJRT_DEVICE=TPU $DOCKER_IMAGE python pytorch/xla/test/test_train_mp_imagenet.py --fake_data"
+
+
+

Note that docker run requires privileged access to the host (--privileged) +to expose the TPU device to the container. Docker on TPU pods is only supported +with host networking --net=host at this time. See the Cloud TPU documentation +for more information.

+
+
+
+

GPU

+
+
+

Single-node GPU training

+

To use GPUs with PJRT, simply set PJRT_DEVICE=CUDA and configure +GPU_NUM_DEVICES to the number of devices on the host. For example:

+
PJRT_DEVICE=CUDA GPU_NUM_DEVICES=4 python3 xla/test/test_train_mp_imagenet.py --fake_data --batch_size=128 --num_epochs=1
+
+
+

You can also use torchrun to initiate the single-node multi-GPU training. For example,

+
PJRT_DEVICE=CUDA torchrun --nnodes 1 --nproc-per-node ${NUM_GPU_DEVICES} xla/test/test_train_mp_imagenet.py --fake_data --pjrt_distributed --batch_size=128 --num_epochs=1
+
+
+

In the above example, --nnodes means how many machines (physical machines or VMs) to be used (it is 1 since we do single-node training). --nproc-per-node means how many GPU devices to be used.

+
+
+

Multi-node GPU training

+

Note that this feature only works for cuda 12+. Similar to how PyTorch uses multi-node training, you can run the command as below:

+
PJRT_DEVICE=CUDA torchrun \
+--nnodes=${NUMBER_GPU_VM} \
+--node_rank=${CURRENT_NODE_RANK} \
+--nproc_per_node=${NUMBER_LOCAL_GPU_DEVICES} \
+--rdzv_endpoint=<internal_ip_address:port> multinode_training.py
+
+
+
    +
  • --nnodes: how many GPU machines to be used.

  • +
  • --node_rank: the index of the current GPU machines. The value can be 0, 1, …, ${NUMBER_GPU_VM}-1.

  • +
  • --nproc_per_node: the number of GPU devices to be used on the current machine.

  • +
  • –rdzv_endpoint: the endpoint of the GPU machine with node_rank==0, in the form host:port`. The``hostwill be the internal IP address. Theport` can be any available port on the machine. For single-node training/inference, this parameter can be omitted.

  • +
+

For example, if you want to train on 2 GPU machines: machine_0 and machine_1, on the first GPU machine machine_0, run

+
# PJRT_DEVICE=CUDA torchrun \
+--nnodes=2 \
+--node_rank=0 \
+--nproc_per_node=4 \
+--rdzv_endpoint="<MACHINE_0_INTERNAL_IP_ADDRESS>:12355" pytorch/xla/test/test_train_mp_imagenet.py  --fake_data --pjrt_distributed --batch_size=128 --num_epochs=1
+
+
+

On the second GPU machine, run

+
# PJRT_DEVICE=CUDA torchrun \
+--nnodes=2 \
+--node_rank=1 \
+--nproc_per_node=4 \
+--rdzv_endpoint="<MACHINE_0_INTERNAL_IP_ADDRESS>:12355" pytorch/xla/test/test_train_mp_imagenet.py  --fake_data --pjrt_distributed --batch_size=128 --num_epochs=1
+
+
+

the difference between the 2 commands above are --node_rank and potentially --nproc_per_node if you want to use different number of GPU devices on each machine. All the rest are identical. For more information about torchrun, please refer to this page.

+
+
+
+

Differences from XRT

+

Although in most cases we expect PJRT and XRT to work mostly interchangeably +from the end-user’s perspective (especially on TPU v4), there are some subtle +differences that are important to keep in mind. Importantly, XRT was designed +around the TPU Node architecture, so it will always spawn a client and a server +process, even on TPU VMs. Thus, every batch of inputs has additional latency +from serializing and deserializing data to send it over the network.

+

PJRT uses the local device directly with no intermediate server process. In the +default configuration, PJRT will create one process per TPU chip, or 4 processes +per TPU host. See the Cloud TPU +documentation for +more information about TPU architecture.

+
    +
  • Performance gains are possible for workloads constrained overhead from .

  • +
  • Under XRT, the server process is the only process that interacts with the TPU +devices, and client processes don’t have direct access to the TPU devices. +When profiling a single-host TPU (e.g. v3-8 or v4-8), you would normally see 8 +device traces (one for each TPU core). With PJRT, each process has one chip, +and a profile from that process will show only 2 TPU cores.

    +
      +
    • For the same reason, profiling does not work on TPU Pods with XRT, because +the server process runs independently from the user’s model code. PJRT does +not have that constraint, so it is possible to profile 2 TPU cores per +process in a TPU Pod.

    • +
    +
  • +
  • PJRT only supports the TPU VM architecture and we have no plans to support the +TPU Node architecture with PJRT.

  • +
  • Runtime configuration is significantly simpler with PJRT. xla_dist is not +required to run TPU Pod workloads. Instead, copy your code to each TPU host +([gcloud compute tpus tpu-vm +scp](https://cloud.google.com/sdk/gcloud/reference/alpha/compute/tpus/tpu-vm/scp)) +and run the code on each host in parallel (e.g. [gcloud compute tpus tpu-vm +ssh --workers=all --command="PJRT_DEVICE=TPU python +run.py"](https://cloud.google.com/sdk/gcloud/reference/alpha/compute/tpus/tpu-vm/ssh))

  • +
  • xm.rendezvous has been reimplemented using XLA-native collective +communication to enhance stability on large TPU pods. See below for more +details.

  • +
+
+

Multithreading on TPU v2/v3

+

On TPU v2 and v3, distributed workloads always run multithreaded, since each +TPU core exposes two TPU cores as devices and only one process may open a TPU +chip at a time. In its default configuration, xmp.spawn automatically spawns +as many processes as possible (4 per TPU host) and creates two threads per +process (one per TPU core).

+

Note: on TPU v4, each TPU chip is represented as one PyTorch device, so +distributed workloads will run across 4 processes, each with only one thread. +This is identical to XRT’s behavior.

+

In most cases, this will not require substantial changes to your existing code. +The main change you will have to make in most cases is to model initialization. +Because torch‘s global RNG is shared between threads, results will vary +between threads and runs even if you set torch.manual_seed to the same value +in every replica. To get consistent parameters between replicas, either use +torch_xla.experimental.pjrt.broadcast_master_param to broadcast one replica’s +parameters to all other replicas, or load each replica’s parameters from a +common checkpoint.

+
+
+

Changes to xm.rendezvous

+

New in PyTorch/XLA r2.0

+

With XRT, worker 0 runs a mesh master service, and all processes on all workers +connect to that service over gRPC. In practice, we found that running a single +mesh master process was unreliable on TPU pods with thousands of chips due to +the number of inbound connections to worker 0. A single client process timing +out could cause a failure and force the entire workload to restart.

+

Thus, we have reimplemented xm.rendezvous with native XLA collective +communication, which is much more stable and well-tested on large TPU pods. This +imposes two new constraints compared to the XRT implementation:

+
    +
  • Because the payload has to become part of the XLA graph, xm.mark_step is +called both before and after the data is transferred. Calling xm.rendezvous +in the middle of model code may force an unwanted compilation.

  • +
  • Because XLA does not permit collective operations to run on a subset of +workers, all workers must participate in the rendezvous.

  • +
+

If you require the old behavior of xm.rendezvous (i.e. communicating data +without altering the XLA graph and/or synchronizing a subset of workers), +consider using +``torch.distributed.barrier` <https://pytorch.org/docs/stable/distributed.html#torch.distributed.barrier>`_ +or +``torch.distributed.all_gather_object` <https://pytorch.org/docs/stable/distributed.html#torch.distributed.all_gather_object>`_ +with a gloo process group. If you are also using the xla torch.distributed +backend, you can use torch.new_group to create a gloo subgroup. See this +example +from the PyTorch documentation. Keep in mind these constraints:

+
    +
  • torch.distributed is not fully supported on TPU v2/v3. Only a subset of +operations with the xla backend are implemented, and gloo will likely not +work as expected in a multithreaded context.

  • +
  • In our experiments, gloo does not scale well to thousands of TPU chips, so +expect this alternative to be less reliable than using xm.rendezvous with +PJRT at large scales.

  • +
+
+
+

PJRT and torch.distributed

+

New in PyTorch/XLA r2.0

+

When using PJRT with torch.distributed and +[torch.nn.parallel.DistributedDataParallel](https://github.com/pytorch/xla/blob/master/docs/ddp.md) +we strongly recommend using the new xla:// init_method, which automatically +finds the replica IDs, world size, and master IP by querying the runtime. For +example:

+
import torch
+import torch_xla
+import torch.distributed as dist
+import torch_xla.core.xla_model as xm
+from torch_xla.experimental import pjrt
+
+# Required for `xla://` init_method and `xla` backend
+import torch_xla.distributed.xla_backend
+
+def _all_gather(index: int):
+  # No need to pass in `rank` or `world_size`
+  dist.init_process_group('xla', init_method='xla://')
+
+  t = torch.tensor([index], dtype=torch.int32, device=xm.xla_device())
+  output = [torch.zeros_like(t) for _ in range(dist.get_world_size())]
+  dist.all_gather(output, t)
+
+  xm.mark_step()
+  print(output)
+
+if __name__ == '__main__':
+  torch_xla.launch(_all_gather)
+
+
+

Note: Although the xla:// init_method is not required on TPU v4, it is still +recommended. If you use env://, MASTER_ADDR must be set to IP host that has +device 0, which is not always worker 0. The xla:// init_method finds this +IP automatically.

+

Note: For TPU v2/v3, you still need to import +torch_xla.experimental.pjrt_backend, as TPU v2/v3 support in +torch.distributed is still experimental.

+

For more information about using DistributedDataParallel on PyTorch/XLA, see +``ddp.md` <./ddp.md>`_ on TPU V4. For an example that uses DDP and PJRT together, +run the following example script on a TPU:

+
PJRT_DEVICE=TPU python xla/test/test_train_mp_mnist.py --ddp --pjrt_distributed --fake_data --num_epochs 1
+
+
+
+
+
+

Performance

+

TorchBench shows improvements in average training time across tasks with PJRT +compared to XRT, with an average improvement of over 35% on TPU v4-8. The +benefits vary significantly by task and model type, ranging from 0% to 175%. +The following chart shows the breakdown by task:

+PJRT vs XRT +
+

New TPU runtime

+

New in PyTorch/XLA r2.0

+

The PyTorch/XLA r2.0 release introduces support for the PJRT Plugin +API, +used to access the new TFRT-based TPU runtime in libtpu. This is now the +default runtime when PJRT_DEVICE=TPU is set. The legacy StreamExecutor-based +TPU runtime used in 1.13 will still be available with PJRT_DEVICE=TPU_LEGACY +in the 2.0 release, but it will be removed in a future version. If you encounter +an issue that only happens on TPU and not TPU_LEGACY, please file an issue +on GitHub.

+

In most cases, we expect performance to be similar between the two runtimes, but +in some cases, the new runtime may be up to 30% faster. The following chart +shows the breakdown by task:

+TFRT vs StreamExecutor +

Note: the improvements shown in this chart are also included in the PJRT vs XRT +comparison.

+
+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/search.html b/release/2.5/search.html new file mode 100644 index 00000000000..fadd49514aa --- /dev/null +++ b/release/2.5/search.html @@ -0,0 +1,722 @@ + + + + + + + + + + + + Search — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ + + + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ + + + +
+ +
+ +
+ +
+ + +
+
+ +
+
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/searchindex.js b/release/2.5/searchindex.js new file mode 100644 index 00000000000..80a3bea95db --- /dev/null +++ b/release/2.5/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({"docnames": ["debug", "eager_mode", "gpu", "index", "multi_process_distributed", "notes/source_of_recompilation", "quantized_ops", "runtime", "spmd", "torch_compile"], "filenames": ["debug.rst", "eager_mode.rst", "gpu.rst", "index.rst", "multi_process_distributed.rst", "notes/source_of_recompilation.md", "quantized_ops.rst", "runtime.rst", "spmd.rst", "torch_compile.rst"], "titles": ["Troubleshooting", "Eager Mode + Compile API", "How to run with PyTorch/XLA:GPU", "PyTorch/XLA documentation", "How to do DistributedDataParallel(DDP)", "Source of recompilations in torch_xla", "Quantized Operations for XLA device (Experimental feature)", "PJRT Runtime", "PyTorch/XLA SPMD User Guide", "TorchDynamo(torch.compile) integration in PyTorch XLA"], "terms": {"note": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], "inform": [0, 3, 5, 7], "thi": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], "section": [0, 2, 3, 7, 8], "i": [0, 1, 2, 3, 4, 6, 7, 9], "subject": 0, "remov": [0, 7], "futur": [0, 3, 5, 7, 8], "releas": [0, 2, 3, 4, 6, 7, 8, 9], "softwar": 0, "sinc": [0, 3, 4, 5, 7, 8, 9], "mani": [0, 3, 5, 7], "them": [0, 3, 5, 7], "ar": [0, 1, 4, 5, 6, 7, 8, 9], "peculiar": 0, "given": [0, 3, 4, 5, 8], "intern": [0, 3, 5, 7, 8], "implement": [0, 1, 4, 5, 7, 8, 9], "which": [0, 2, 3, 4, 5, 7, 8, 9], "might": [0, 3, 5], "chang": [0, 3, 4, 5, 6, 8], "befor": [0, 3, 4, 5, 7, 8, 9], "ani": [0, 3, 4, 5, 7, 8], "depth": [0, 7], "we": [0, 1, 2, 3, 4, 5, 7, 8, 9], "want": [0, 1, 3, 5, 7, 8, 9], "do": [0, 2, 3, 5, 6, 7, 8], "instal": [0, 2, 7], "should": [0, 1, 2, 3, 4, 5, 7, 8], "match": [0, 3], "out": [0, 1, 3, 7, 8, 9], "our": [0, 2, 3, 4, 5, 7, 8, 9], "readm": 0, "detial": 0, "avail": [0, 3, 4, 5, 7], "vm": [0, 2, 3, 4, 7], "python": [0, 2, 3, 4, 5, 7, 8, 9], "import": [0, 1, 3, 4, 6, 7, 8, 9], "torch": [0, 1, 2, 3, 4, 5, 6], "torch_xla": [0, 1, 2, 4, 6, 7, 8, 9], "print": [0, 2, 3, 4, 5, 7, 8, 9], "__version__": 0, "2": [0, 1, 2, 3, 4, 6, 7, 9], "1": [0, 1, 2, 3, 4, 7, 8, 9], "0": [0, 2, 3, 4, 5, 7, 8, 9], "cu121": 0, "export": [0, 2, 7], "pjrt_devic": [0, 2, 3, 7], "tpu": [0, 2, 9], "python3": [0, 2, 3, 4, 7], "core": [0, 1, 3, 4, 6, 7, 8, 9], "xla_model": [0, 4, 6, 7, 8, 9], "xm": [0, 1, 3, 4, 6, 8, 9], "t1": [0, 3, 8], "100": [0, 2, 4], "devic": [0, 1, 2, 4, 5, 7, 8, 9], "xla_devic": [0, 3, 4, 6, 7, 8, 9], "t2": [0, 8], "200": 0, "300": [0, 1], "For": [0, 2, 3, 4, 5, 6, 7, 8, 9], "nightli": [0, 4, 8], "git": [0, 2, 4, 7], "clone": [0, 2, 7], "http": [0, 2, 3, 4, 7, 8], "github": [0, 2, 3, 4, 7, 8], "com": [0, 2, 3, 4, 7, 8], "test_train_mp_imagenet": [0, 2, 4, 7], "py": [0, 2, 3, 4, 7, 8], "fake_data": [0, 2, 4, 7], "x": [0, 3, 4, 5, 6, 8], "y": [0, 2, 3, 4, 5, 6, 8], "you": [0, 1, 2, 3, 4, 7, 8, 9], "us": [0, 1, 2, 3, 7, 9], "branch": [0, 5, 7], "rx": 0, "exampl": [0, 1, 3, 5, 6, 7, 9], "r2": [0, 7, 8], "If": [0, 2, 3, 5, 6, 7, 8], "can": [0, 1, 2, 3, 4, 6, 7, 8, 9], "conclud": 0, "correctli": [0, 2, 8], "To": [0, 1, 2, 3, 4, 5, 6, 7, 8], "diagnos": 0, "issu": [0, 1, 3, 4, 7, 8], "counter": [0, 3], "provid": [0, 3, 4, 5, 6, 8, 9], "first": [0, 3, 4, 7, 8], "thing": 0, "when": [0, 1, 3, 4, 7, 8, 9], "model": [0, 1, 4, 5, 7, 8, 9], "slow": 0, "gener": [0, 1, 3, 5, 7], "extrem": 0, "help": [0, 5], "pleas": [0, 2, 3, 4, 6, 7, 8], "try": [0, 5], "includ": [0, 2, 3, 5, 7, 8], "your": [0, 2, 3, 4, 5, 7, 8], "bug": [0, 4, 7], "sent": [0, 3], "u": [0, 2, 5, 7, 9], "have": [0, 2, 3, 4, 5, 7, 8, 9], "enabl": [0, 1, 2, 3, 4, 6, 8], "set": [0, 3, 4, 5, 7, 8, 9], "pt_xla_debug_level": 0, "coupl": [0, 3], "featur": [0, 3, 4, 7, 8], "also": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], "lower": [0, 5], "level": [0, 6, 8, 9], "slip": 0, "analyz": 0, "summari": 0, "some": [0, 1, 3, 4, 6, 7, 8], "output": [0, 3, 4, 7, 9], "would": [0, 3, 5, 7], "pt": [0, 3, 7], "compiletim": 0, "too": [0, 5], "frequent": 0, "21": 0, "count": [0, 3], "dure": [0, 3, 4, 8, 9], "11": [0, 2, 5], "step": [0, 1, 2, 3, 4, 5, 7, 8, 9], "transferfromdevicetim": 0, "op": [0, 1, 3, 5, 8], "": [0, 1, 2, 3, 4, 6, 7, 8, 9], "aten": [0, 5], "_ctc_loss": 0, "_ctc_loss_backward": 0, "open": [0, 7], "abov": [0, 1, 2, 3, 4, 5, 7, 8, 9], "request": [0, 3, 4, 5, 8], "23": [0, 2], "12": [0, 2, 4, 7, 9], "everi": [0, 3, 5, 7, 8, 9], "caus": [0, 1, 3, 5, 7], "mark_step": [0, 1, 3, 4, 7], "parallel": [0, 3, 7], "loader": [0, 3, 9], "end": [0, 2, 3, 4, 7, 8], "graph": [0, 1, 3, 4, 5, 7, 8, 9], "info": [0, 2, 3, 5, 8], "hash": 0, "c74c3b91b855b2b123f833b0d5f86943": 0, "number": [0, 1, 3, 4, 7, 8], "input": [0, 1, 3, 7, 8], "35": [0, 2, 7], "107": 0, "frame": 0, "trigger": [0, 5], "workspac": 0, "dk3": 0, "1055": 0, "next": [0, 3, 5, 6, 8], "distribut": [0, 4], "parallel_load": [0, 3, 7], "44": 0, "__next__": 0, "32": [0, 3], "train_loop_fn": 0, "train_decoder_only_bas": 0, "48": [0, 4], "start_train": 0, "65": [0, 1], "modul": [0, 3, 4, 8], "73": 0, "post": [0, 3], "size": [0, 2, 3, 5, 7, 8], "548000": 0, "gb": 0, "7": [0, 3, 4, 9], "922460": 0, "alias": 0, "547871": 0, "intermedi": [0, 7], "124478": 0, "program": [0, 3, 5, 8, 9], "028210": 0, "user": [0, 1, 2, 3, 5, 6, 7, 9], "manual": [0, 1, 4], "call": [0, 1, 3, 4, 5, 7, 8, 9], "configur": [0, 2, 3, 7, 8], "batch": [0, 3, 7, 8], "exit": [0, 3, 4], "steptrac": 0, "region": [0, 1, 3, 8], "decid": [0, 3, 5], "access": [0, 3, 5, 7, 8], "often": [0, 1, 5], "due": [0, 7], "log": [0, 2], "valu": [0, 3, 5, 7, 8], "4": [0, 2, 3, 4, 5, 6, 7, 8, 9], "expect": [0, 1, 5, 6, 7, 9], "avoid": [0, 2], "5": [0, 2, 3, 4, 5, 6], "either": [0, 2, 3, 5, 7], "reduc": [0, 1, 3, 4, 7], "frequenc": 0, "add": [0, 3, 4, 5, 9], "see": [0, 2, 3, 4, 5, 7, 9], "pair": 0, "after": [0, 2, 3, 5, 7, 8], "stabil": [0, 7], "onli": [0, 1, 2, 3, 5, 7, 8, 9], "disabl": [0, 1, 3], "effici": [0, 9], "same": [0, 1, 3, 5, 6, 7, 8], "code": [0, 1, 3, 4, 5, 7, 8, 9], "happen": [0, 1, 3, 5, 7], "onc": [0, 3, 5, 8, 9], "keep": [0, 5, 7], "dump": [0, 3], "ir": [0, 3, 5], "hlo": [0, 3], "follow": [0, 1, 2, 3, 4, 5, 7, 8], "compar": [0, 1, 3, 4, 7, 9], "each": [0, 3, 4, 5, 7, 8, 9], "sourc": [0, 3], "differ": [0, 3, 4, 5, 8], "explain": [0, 3, 5, 8], "how": [0, 1, 3, 5, 7], "detail": [0, 3, 5, 7], "put": [0, 3, 4], "line": [0, 1, 3, 4, 5], "met": 0, "short": [0, 5], "contain": [0, 2, 3, 5, 7], "few": [0, 3, 4, 5, 8], "kei": [0, 7, 8], "short_metrics_report": [0, 3], "full": [0, 2, 3, 4], "all": [0, 2, 3, 4, 5, 7, 8], "metrics_report": [0, 3], "like": [0, 3, 4, 5, 7, 8], "time": [0, 2, 3, 5, 7, 8, 9], "spent": 0, "handl": [0, 1, 4, 5, 8], "creat": [0, 4, 7, 8], "destroi": 0, "etc": [0, 1, 2, 3, 5, 8], "term": [0, 1, 5], "percentil": 0, "sampl": [0, 3, 7], "an": [0, 4, 5, 7, 8, 9], "totalsampl": 0, "202": 0, "06m09s401ms746": 0, "001u": 0, "valuer": 0, "778ms572": 0, "062u": 0, "second": [0, 4, 7, 8], "rate": [0, 2, 4], "425201": 0, "001ms32": 0, "778u": 0, "001ms61": 0, "283u": 0, "10": [0, 2, 3, 5, 6, 7, 8, 9], "001ms79": 0, "236u": 0, "20": [0, 2, 3, 4, 6], "001ms110": 0, "973u": 0, "50": [0, 2], "001ms228": 0, "773u": 0, "80": [0, 2], "001ms339": 0, "183u": 0, "90": 0, "001ms434": 0, "305u": 0, "95": 0, "002ms921": 0, "063u": 0, "99": [0, 4], "21s102ms853": 0, "173u": 0, "name": [0, 2, 3, 5, 7, 8], "integ": [0, 3], "track": [0, 8], "statu": 0, "cachedsynctensor": 0, "395": [0, 4], "In": [0, 1, 2, 3, 5, 7, 8, 9], "start": [0, 1, 3, 7], "indic": [0, 3, 5], "context": [0, 3, 5, 7], "switch": [0, 3, 4, 5], "between": [0, 2, 3, 4, 5, 7, 8], "cpu": [0, 2, 4, 5, 6, 8], "potenti": [0, 3, 7, 8], "optim": [0, 1, 2, 3, 4, 5, 7, 8, 9], "area": 0, "oper": [0, 3, 7, 8], "rout": 0, "back": [0, 3, 8], "engin": 0, "thei": [0, 3, 5, 7, 8], "fulli": [0, 1, 3, 7], "qualifi": 0, "c": [0, 3, 5, 7], "namespac": 0, "nonzero": [0, 5], "33": [0, 2, 4, 9], "other": [0, 2, 3, 4, 5, 6, 7, 8], "than": [0, 4, 5, 7], "_local_scalar_dens": 0, "usual": [0, 1, 3], "mean": [0, 3, 4, 5, 7, 8], "miss": [0, 3], "feel": [0, 4], "free": [0, 4], "epoch": [0, 2, 4], "clear_al": 0, "xla_dynamo_debug": 0, "workload": [0, 3, 7, 8], "bottleneck": 0, "resourc": [0, 3], "offici": 0, "tutori": [0, 4, 8], "colab": 0, "notebook": 0, "mnist": [0, 2, 3, 7], "train": [0, 2, 3, 8], "script": [0, 3, 7], "util": [0, 2, 3, 4, 6, 8], "captur": [0, 3], "take": [0, 3, 5, 8], "look": [0, 3, 8], "train_resnet_benchmark": 0, "blob": [0, 3, 4, 7, 8], "master": [0, 3, 4, 7, 8], "_": [0, 4, 7, 9], "behav": 0, "semant": [0, 5], "regular": [0, 3, 6], "share": [0, 2, 3, 7, 8], "interfac": [0, 3, 8], "gpu": [0, 3, 8], "howev": [0, 8], "constraint": [0, 7], "hardwar": [0, 3], "lazi": [0, 5, 8, 9], "evalu": [0, 5], "suggest": 0, "certain": [0, 5], "pattern": [0, 5, 9], "result": [0, 3, 4, 7, 8], "bad": 0, "show": [0, 3, 4, 7], "mind": [0, 7], "yield": [0, 3], "degrad": 0, "recompil": [0, 1, 3], "expens": [0, 1, 5], "automat": [0, 3, 4, 5, 7, 8], "new": [0, 1, 3, 5, 8, 9], "shape": [0, 3, 8], "encount": [0, 7], "within": [0, 3, 6, 8], "huge": [0, 4, 5], "speedup": [0, 9], "rest": [0, 5, 7], "order": [0, 2, 3, 8], "must": [0, 3, 7, 8], "constant": [0, 8], "comput": [0, 2, 3, 5, 7, 8], "across": [0, 3, 4, 7, 8], "host": [0, 2, 3, 4, 7, 8], "possibl": [0, 3, 4, 7, 8], "direct": [0, 7], "indirect": 0, "introduc": [0, 1, 4, 7, 8], "dynam": [0, 9], "mask": [0, 5], "index": [0, 3, 7], "base": [0, 1, 2, 3, 4, 5, 7, 8], "where": [0, 3, 4, 5, 7, 8], "loop": [0, 1, 3, 5, 8], "iter": [0, 3, 8, 9], "thu": [0, 2, 7], "requir": [0, 2, 3, 5, 7, 8], "solut": [0, 5, 6], "low": 0, "variat": 0, "pad": [0, 5], "fix": [0, 8, 9], "don": [0, 1, 3, 4, 5, 7], "t": [0, 1, 3, 4, 5, 7, 8], "nativ": [0, 1, 2, 4, 7, 8], "translat": 0, "transfer": [0, 3, 7, 8], "memori": [0, 2, 4, 5], "lead": 0, "signific": [0, 9], "slowdown": [0, 4], "item": 0, "explicitli": [0, 3, 5], "ask": [0, 1, 5], "unless": [0, 5], "necessari": [0, 3], "most": [0, 3, 7, 9], "checkout": [0, 2], "find": [0, 4, 7, 8], "even": [0, 3, 4, 5, 7], "scalar": [0, 5], "substitut": 0, "control": [0, 3, 8], "flow": 0, "applic": [0, 6, 8], "e": [0, 3, 4, 5, 6, 7, 8], "g": [0, 2, 3, 5, 6, 7, 8], "clip_grad": 0, "norm": 0, "problemat": 0, "impact": [0, 3, 4, 5, 7], "so": [0, 2, 3, 4, 5, 7, 8], "patch": 0, "clip_grad_norm_": 0, "instead": [0, 1, 3, 4, 5, 7, 8, 9], "give": [0, 6, 8], "dramat": 0, "improv": [0, 3, 7, 8, 9], "block": [0, 3, 4, 8], "els": [0, 5], "paramet": [0, 3, 7, 8], "total_norm": 0, "zero": [0, 4, 8], "none": [0, 3, 8], "p": [0, 2, 5, 7], "param_norm": 0, "grad": 0, "norm_typ": 0, "add_": 0, "clip_coef": 0, "max_norm": 0, "1e": [0, 9], "6": [0, 2, 3, 5], "mul_": 0, "data_parallel": 0, "mai": [0, 3, 5, 7, 8], "drop": 0, "last": 0, "make": [0, 1, 2, 3, 4, 5, 7, 8, 9], "sure": [0, 2, 3], "amount": [0, 3, 5], "work": [0, 3, 4, 5, 6, 7, 8, 9], "dataset": [0, 4], "small": [0, 1, 4, 5, 9], "therefor": 0, "better": [0, 1, 3, 5, 7, 9], "those": [0, 3, 4], "case": [0, 3, 7, 8, 9], "opaqu": [0, 3], "alwai": [0, 3, 5, 7, 8], "appear": [0, 3], "contigu": [0, 3], "without": [0, 3, 7, 8], "storag": [0, 2, 3, 4, 8], "network": [0, 3, 7, 8], "stride": 0, "move": [0, 4, 5, 7, 8], "save": [0, 4, 8], "directli": [0, 3, 4, 5, 7, 8], "load": [0, 4, 6, 7, 8], "were": [0, 3, 5], "from": [0, 4, 8, 9], "unavail": [0, 3], "fail": [0, 3, 8], "let": [0, 3, 7, 8, 9], "machin": [0, 2, 7], "care": [0, 3, 5], "taken": [0, 3, 4, 5, 8], "type": [0, 2, 3, 4, 6, 7], "doe": [0, 3, 5, 7, 8], "preserv": [0, 3], "view": [0, 3], "relationship": [0, 3], "reconstruct": 0, "copi": [0, 3, 7], "return": [0, 1, 3, 4, 5, 6, 8, 9], "deep": 0, "shallow": 0, "weight": [0, 3, 6, 8], "one": [0, 3, 4, 5, 7, 8, 9], "anoth": [0, 3, 5], "ty": 0, "done": [0, 3, 5], "otherwis": [0, 3, 5, 8], "two": [0, 3, 5, 7, 8], "independ": [0, 3, 7], "made": [0, 5, 8], "But": [0, 3, 5], "submit": 0, "addit": [0, 2, 3, 4, 7], "doesn": [0, 3, 5, 8], "_xlac": [0, 5], "_get_xla_tensors_text": [0, 5], "re": [0, 1, 3, 5, 7, 8], "_get_xla_tensors_hlo": 0, "function": [0, 1, 3, 6, 8, 9], "prior": [0, 8], "alreadi": [0, 2, 3, 4, 5, 8], "materi": [0, 3, 5, 8], "There": [0, 1, 3, 4, 5, 8, 9], "behavior": [0, 3, 7], "stack": [0, 3, 5, 8], "degre": 0, "xla_ir_debug": 0, "trace": [0, 1, 3, 4, 5, 7, 8, 9], "node": [0, 5], "henc": [0, 9], "allow": [0, 3, 8], "wa": [0, 3, 5, 7, 8], "respons": [0, 8, 9], "xla_hlo_debug": [0, 3], "_xla_ir": 0, "activ": [0, 3, 4, 6], "propag": 0, "metadata": 0, "xla_save_tensors_fil": 0, "path": [0, 2, 3, 4, 5], "file": [0, 2, 3, 4, 7], "becom": [0, 5, 7], "realli": [0, 5, 9], "big": [0, 5], "option": [0, 3, 6, 7, 8], "left": 0, "long": [0, 1, 4, 5], "append": 0, "clean": [0, 9], "sheet": 0, "xla_save_tensors_fmt": 0, "format": [0, 3, 6, 9], "store": [0, 3], "_xla_save_tensor": 0, "text": 0, "default": [0, 1, 2, 3, 4, 7, 8], "dot": 0, "graphviz": 0, "xla_flag": 0, "xla_dump_to": 0, "tmp": [0, 3, 4], "dir_nam": 0, "unoptim": 0, "optimz": 0, "per": [0, 2, 3, 4, 6, 7, 9], "xla_metrics_fil": 0, "local": [0, 2, 3, 7, 8], "exist": [0, 1, 3, 7, 8, 9], "xla_save_hlo_fil": 0, "error": [0, 3], "offend": 0, "xla_sync_wait": 0, "forc": [0, 3, 5, 7], "sync": [0, 1, 2, 3], "wait": [0, 3], "its": [0, 3, 4, 7, 8, 9], "complet": [0, 3], "xla_use_eager_debug_mod": 0, "eagerli": [0, 1, 3, 5], "bypass": 0, "overal": 0, "lot": [0, 3, 5], "slower": [0, 4], "usag": [0, 2, 3, 4, 5, 8], "higher": [0, 8], "optimizaiton": 0, "skip": [0, 9], "tf_cpp_log_thread_id": 0, "tf": [0, 5], "thread": [0, 3, 7, 8], "id": [0, 2, 3, 7], "multithread": [0, 3], "process": [0, 1, 2, 4, 6, 7, 8], "tf_cpp_vmodul": 0, "vlog": 0, "form": [0, 5, 7], "tf_cpp_min_log_level": 0, "messag": [0, 3], "turn": 0, "warn": 0, "tf_vlog": 0, "tensorflow": [0, 3, 5, 7], "xla_dump_hlo_graph": 0, "part": [0, 1, 3, 7, 8], "runtim": [0, 2, 4, 8], "rais": [0, 3], "xla_util": 0, "cc": 0, "record": [0, 3], "save1": 0, "xla_graph_executor": 0, "pjrt_computation_cli": 0, "3": [0, 1, 2, 3, 4, 6, 8, 9], "pr": [0, 4], "repo": [0, 3], "dir": 0, "pytorch_test_with_slow": 0, "test_torch": 0, "k": 0, "test_put_xla_uint8": 0, "command": [0, 2, 3, 4, 7], "need": [0, 2, 3, 4, 5, 7, 8], "torch_test_devic": 0, "pytorch_test_bas": 0, "doc": [1, 2, 5, 7, 8], "go": [1, 2, 3, 8], "over": [1, 2, 3, 4, 7, 8], "pytorch": [1, 5, 6, 7], "xla": [1, 5, 7], "experiment": [1, 4, 7, 8, 9], "The": [1, 2, 3, 4, 5, 6, 7, 8, 9], "goal": 1, "experi": [1, 4, 7, 8], "more": [1, 2, 3, 5, 7, 8], "align": 1, "develop": [1, 3, 4, 6, 8, 9], "easier": [1, 5], "current": [1, 2, 3, 4, 5, 6, 7, 8, 9], "run": [1, 4, 5, 6, 7, 9], "lazytensor": [1, 3], "torchvis": [1, 9], "resnet18": [1, 9], "randn": [1, 3, 4, 6, 7, 8, 9], "64": [1, 4, 9], "224": 1, "execut": [1, 2, 3, 4, 5, 7, 8, 9], "actual": [1, 4, 5, 8], "multipl": [1, 5, 6, 9], "drawback": 1, "approach": [1, 4, 5], "confus": 1, "about": [1, 3, 5, 7], "framework": [1, 3, 5, 6], "non": [1, 3, 5, 8], "data": [1, 2, 3, 5, 7, 9], "preprocess": [1, 6], "pend": [1, 3], "get": [1, 2, 3, 4, 5, 6, 7], "leak": 1, "main": [1, 7, 8], "whole": [1, 3, 5, 9], "veri": [1, 2, 3, 5], "It": [1, 2, 3, 4, 5, 6, 8, 9], "hard": [1, 4, 5, 9], "debug": [1, 5, 6], "why": [1, 5], "mitig": 1, "ux": 1, "eager_mod": [1, 3], "true": [1, 3, 4, 5, 7, 8], "mark": [1, 3], "compiled_model": 1, "right": [1, 5, 9], "awai": 1, "ha": [1, 3, 5, 7, 8], "wrap": [1, 3, 4, 6, 8], "pretti": [1, 3, 4, 5], "straight": 1, "forward": [1, 4, 6, 8, 9], "enter": 1, "target": [1, 3, 5, 7, 9], "reenabl": 1, "perfomr": 1, "backend": [1, 3, 5, 6, 7, 8, 9], "openxla": [1, 6, 9], "recommen": 1, "overhad": 1, "def": [1, 3, 4, 6, 7, 8, 9], "step_fn": 1, "loss_fn": [1, 3, 4, 7, 9], "zero_grad": [1, 3, 4, 7], "logit": [1, 8], "loss": [1, 2, 3, 4, 7, 8, 9], "backward": [1, 3, 4, 7, 8, 9], "refactor": 1, "becaus": [1, 3, 7, 8], "togeth": [1, 3, 4, 7, 8], "now": [1, 3, 5, 7, 8], "recommend": [1, 2, 3, 7, 8], "reason": [1, 4, 7], "layer": [1, 4, 8], "decod": 1, "much": [1, 3, 5, 7, 9], "just": [1, 3, 4, 5, 7, 8], "llama2": 1, "fake": [1, 8], "singl": [1, 4, 5, 8, 9], "chip": [1, 7], "v4": [1, 3, 7, 8, 9], "8": [1, 2, 3, 5, 6, 7, 8, 9], "below": [1, 2, 5, 7, 8], "observ": [1, 4, 7], "token": [1, 6], "147": 1, "achiev": [1, 4], "45": [1, 2], "perform": [1, 3, 4, 6, 8, 9], "trainer": 1, "test": [1, 2, 4, 7], "found": [1, 2, 7], "here": [1, 2, 3, 4, 5, 8, 9], "perfomran": 1, "depend": [1, 3, 5], "tri": 1, "resnet50": [1, 3, 7, 9], "exepct": 1, "meant": 1, "logic": [1, 3, 5, 8], "random": [1, 3, 6, 7], "compil": [2, 5, 6, 7], "acceler": [2, 3, 7], "basic": [2, 4, 5], "nvidia": 2, "attach": [2, 8], "cloud": [2, 3, 7, 8, 9], "googl": [2, 3, 7], "cuda": [2, 3, 5, 6, 7], "driver": 2, "publish": 2, "prebuilt": 2, "imag": [2, 4, 5, 7], "cuda11": 2, "correspond": [2, 3, 4, 8], "config": 2, "list": [2, 3, 8], "refer": [2, 3, 4, 6, 7, 8], "sudo": [2, 7], "pull": [2, 4], "central1": 2, "pkg": 2, "dev": [2, 4], "nightly_3": 2, "8_cuda_12": 2, "toolkit": 2, "datacent": 2, "latest": 2, "guid": [2, 3, 4, 7], "html": [2, 4, 7], "curl": 2, "fssl": 2, "io": [2, 4, 7], "libnvidia": 2, "gpgkei": 2, "gpg": 2, "dearmor": 2, "o": [2, 4, 7], "usr": 2, "keyr": 2, "l": 2, "stabl": [2, 4, 7], "deb": 2, "sed": 2, "sign": 2, "tee": 2, "apt": 2, "d": [2, 3, 5], "updat": [2, 3, 5, 8], "ctk": 2, "systemctl": 2, "restart": [2, 7], "shm": 2, "16g": 2, "net": [2, 7], "bin": 2, "bash": [2, 4], "exec": 2, "awk": 2, "nr": 2, "visibl": [2, 3, 5], "smi": 2, "verifi": 2, "root": [2, 3, 5], "20ab2c7a2d06": 2, "dec": 2, "06": 2, "24": 2, "29": [2, 4, 9], "2022": 2, "510": 2, "47": 2, "03": 2, "version": [2, 7, 8], "persist": [2, 3, 8], "m": [2, 4, 5], "bu": 2, "disp": 2, "A": [2, 3, 5, 6, 7, 8], "volatil": 2, "uncorr": 2, "ecc": 2, "fan": 2, "temp": 2, "perf": [2, 5], "pwr": 2, "cap": 2, "mig": 2, "tesla": 2, "v100": 2, "sxm2": 2, "off": 2, "00000000": 2, "00": [2, 4], "04": [2, 9], "n": [2, 3, 6], "36c": 2, "p0": 2, "38w": 2, "300w": 2, "0mib": 2, "16384mib": 2, "gi": 2, "ci": 2, "pid": 2, "No": [2, 5, 6, 7], "ld_library_path": 2, "account": 2, "echo": 2, "link": 2, "bashrc": 2, "lib64": 2, "compat": [2, 6, 7, 8], "x86_64": 2, "linux": 2, "architecutr": 2, "architectur": [2, 4, 7], "system": [2, 8], "unam": 2, "pip3": 2, "whl": 2, "googleapi": 2, "cp310": 2, "manylinux_2_28_x86_64": 2, "repositori": [2, 7], "imagenet": 2, "what": [2, 3], "gpu_num_devic": [2, 7], "recurs": [2, 4, 8], "prepar": 2, "begin": [2, 8], "38": 2, "89059": 2, "82": 2, "globalr": 2, "13": [2, 3, 4, 7], "79297": 2, "117": 2, "16": [2, 3, 4, 6, 8], "84": 2, "36": 2, "40": [2, 4], "43628": 2, "281": 2, "49": [2, 9], "43": [2, 9], "60": [2, 4], "83108": 2, "346": 2, "88": [2, 8], "108": 2, "99023": 2, "373": 2, "62": [2, 9], "132": 2, "56": 2, "92699": 2, "384": 2, "152": 2, "14": 2, "02": [2, 4], "120": 2, "68816": 2, "388": 2, "169": 2, "09": 2, "train_resnet_bas": 2, "35pm": 2, "utc": 2, "jun": 2, "08": 2, "2024": 2, "887794017791748": 2, "746502586051985": 2, "877807140350342": 2, "238": 2, "4789458412044": 2, "867819786071777": 2, "329": 2, "86095958663503": 2, "30": [2, 4, 7], "857839584350586": 2, "367": 2, "3038003653586": 2, "847847938537598": 2, "381": 2, "53141087190835": 2, "837860584259033": 2, "387": 2, "80462249591113": 2, "260": 2, "628140926361084": 2, "391": 2, "135639565343": 2, "270": 2, "618192195892334": 2, "6901797745233": 2, "280": 2, "608224391937256": 2, "1602680460045": 2, "290": 2, "598264217376709": 2, "6731498290759": 2, "36pm": 2, "reus": [2, 3], "rule": 2, "modifi": [2, 8, 9], "insid": [2, 3, 8], "cd": [2, 4], "use_cuda": 2, "bdist_wheel": 2, "hermet": 2, "xla_cuda": 2, "been": [2, 3, 5, 7, 8], "successfulli": 2, "packag": [3, 4], "learn": [3, 7], "connect": [3, 7, 8], "troubleshoot": 3, "eager": [3, 4, 5, 6], "mode": [3, 4, 5, 6], "distributeddataparallel": [3, 7], "ddp": [3, 7], "quantiz": 3, "pjrt": [3, 8], "shard": 3, "fsdp": 3, "via": [3, 4, 7], "advanc": 3, "topic": 3, "checkpoint": [3, 4, 7], "torchdynamo": 3, "integr": [3, 6], "describ": [3, 4, 8], "familiar": [3, 8], "initi": [3, 4, 7, 8], "environ": [3, 4, 7, 8], "ad": [3, 5, 6, 8, 9], "t0": 3, "Or": [3, 5, 7], "matrix": 3, "multipli": [3, 8], "mm": 3, "neural": 3, "l_in": 3, "linear": [3, 4, 6, 7], "nn": [3, 4, 6, 7, 8, 9], "l_out": 3, "floattensor": 3, "throw": 3, "build": [3, 4], "convert": [3, 4], "specif": [3, 4, 6], "snippet": [3, 8], "highlight": 3, "nllloss": 3, "sgd": [3, 4, 7, 9], "lr": [3, 4, 7, 8, 9], "momentum": 3, "train_load": [3, 8], "easi": [3, 5, 7], "definit": [3, 5], "dataload": [3, 4, 8], "acquir": 3, "pl": [3, 7, 8], "_mp_fn": [3, 7], "mp_device_load": 3, "mpdeviceload": [3, 8], "optimizer_step": [3, 4], "__name__": [3, 4, 7], "__main__": [3, 4, 7], "launch": [3, 4, 7, 9], "arg": [3, 4], "three": 3, "previou": [3, 5, 7], "wrapper": [3, 4, 8], "spawn": [3, 7], "torchrun": [3, 7], "abl": [3, 5, 8], "assign": 3, "being": [3, 4, 8], "up": [3, 5, 7, 8], "own": [3, 4], "v2": 3, "v3": 3, "check": [3, 6, 8], "onto": 3, "preload": 3, "overlap": [3, 8, 9], "batches_per_execut": 3, "consolid": [3, 4], "gradient": [3, 4], "all_reduce_gradi": 3, "remain": [3, 5], "retriev": [3, 5, 8, 9], "parent": 3, "multiprocess": [3, 7], "setup": [3, 4], "talk": 3, "bit": [3, 6], "basi": 3, "gcloud": [3, 7], "project": [3, 7], "howto": 3, "focu": [3, 5], "perspect": [3, 7], "assum": [3, 4, 5, 8], "train_mnist_xla": 3, "ssh": [3, 7], "tpuvm": [3, 7, 8], "scp": [3, 7], "alpha": [3, 7], "zone": [3, 7], "worker": [3, 4, 7, 8], "outsid": 3, "underli": 3, "infrastructur": 3, "awar": 3, "global": [3, 7, 8], "topologi": [3, 8], "ordin": 3, "cross": [3, 8], "commun": [3, 7, 8, 9], "regard": [3, 9], "fakedata": 3, "though": [3, 4], "act": 3, "uniqu": [3, 5], "immedi": [3, 8], "hand": 3, "until": [3, 8], "defer": 3, "separ": [3, 4, 8, 9], "fuse": 3, "invis": 3, "caller": 3, "construct": [3, 4, 8], "send": [3, 7, 8], "synchron": [3, 7, 8], "insert": 3, "barrier": [3, 7], "design": [3, 7, 8, 9], "paper": 3, "represent": [3, 8], "expos": [3, 7, 8], "unlik": 3, "adjust": 3, "wai": [3, 4, 5, 6, 7, 8, 9], "again": 3, "appreci": 3, "accommod": 3, "transit": 3, "recreat": 3, "destin": 3, "previous": 3, "state_dict": [3, 4, 8], "limit": [3, 7], "footprint": 3, "serial": [3, 7], "xser": 3, "stream": 3, "restor": [3, 8], "load_state_dict": [3, 8], "under": [3, 4, 7], "consum": [3, 5], "disk": 3, "significantli": [3, 7], "still": [3, 4, 5, 7, 8], "occur": 3, "opt": 3, "through": [3, 5, 8], "initialize_cach": 3, "xr": [3, 4, 7, 8], "your_cache_path": 3, "readonli": 3, "fals": [3, 4, 8], "specifi": [3, 4, 8], "whether": 3, "write": [3, 8], "mount": 3, "xmp": [3, 7], "init": [3, 4, 7, 9], "mp_fn": 3, "f": [3, 4, 6, 8], "xla_cache_": 3, "global_ordin": [3, 4, 7], "runnabl": [3, 4, 8], "int": [3, 5, 7, 8], "instanc": [3, 4, 8], "virtual": [3, 8], "device_count": [3, 8], "address": [3, 7, 8], "bool": 3, "finish": 3, "callabl": [3, 4], "full_graph": 3, "str": 3, "num_different_graphs_allow": 3, "repres": [3, 5, 7], "funciton": 3, "pass": [3, 4, 7, 8], "manag": [3, 8], "pt_xla_debug": 3, "well": [3, 5, 7, 8], "exceed": 3, "foo": 3, "sin": 3, "co": 3, "foo2": 3, "compiled_foo2": 3, "manual_se": [3, 7], "seed": 3, "state": [3, 4, 8], "rng": [3, 7], "device_typ": 3, "select": [3, 7, 8], "string": [3, 8], "local_process_count": 3, "local_device_count": 3, "total": [3, 5, 8], "addressable_device_count": 3, "global_device_count": 3, "global_runtime_device_count": [3, 8], "especi": [3, 7, 8, 9], "world_siz": [3, 4, 7, 8], "particip": [3, 7], "job": [3, 9], "rang": [3, 7, 8], "guarante": 3, "predict": 3, "nor": 3, "local_ordin": 3, "get_master_ip": 3, "ip": [3, 7, 8], "discoveri": 3, "use_spmd": [3, 8], "auto": [3, 4], "replic": [3, 8], "spmd_advanc": 3, "md": [3, 7], "is_spmd": 3, "devkind": 3, "custom": [3, 4, 5, 6, 8], "deprec": 3, "xla_device_hw": 3, "union": 3, "map": 3, "real": [3, 9], "is_master_ordin": 3, "while": [3, 4, 5], "num_host": 3, "boolean": 3, "all_reduc": 3, "reduce_typ": 3, "scale": [3, 7, 8, 9], "float": [3, 5], "group": [3, 4, 7, 8], "pin_layout": 3, "inplac": [3, 8], "One": [3, 4], "reduce_sum": 3, "reduce_mul": 3, "reduce_and": 3, "reduce_or": 3, "reduce_min": 3, "reduce_max": 3, "appli": [3, 4, 8], "replica": [3, 7], "defin": [3, 8], "pin": 3, "pine": 3, "prevent": [3, 8, 9], "corrupt": 3, "slightli": 3, "unpin": 3, "hlomodul": 3, "mix": [3, 8], "constrain": [3, 7], "hold": [3, 8], "tupl": [3, 5, 8], "itself": [3, 4], "all_gath": [3, 7], "dim": 3, "gather": [3, 8], "along": [3, 4], "dimens": [3, 8], "all_to_al": 3, "split_dimens": 3, "concat_dimens": 3, "split_count": 3, "alltoal": 3, "www": 3, "org": [3, 4, 7], "operation_semant": 3, "upon": 3, "split": 3, "concat": 3, "add_step_closur": 3, "closur": 3, "run_async": 3, "ones": [3, 5], "report": 3, "consol": 3, "tensorboard": 3, "content": 3, "intermediari": 3, "inspect": 3, "point": [3, 5], "typic": 3, "ensur": [3, 5, 8], "live": [3, 5], "argument": [3, 4, 9], "queu": 3, "sequenti": 3, "advis": 3, "throttl": 3, "event": 3, "asynchron": [3, 8], "wait_device_op": 3, "async": [3, 9], "whose": 3, "empti": 3, "optimizer_arg": 3, "dict": [3, 4], "gradid": 3, "parallelload": [3, 8], "dataparallel": 3, "support": [3, 4, 5, 7, 8, 9], "dictionari": 3, "self": [3, 4, 6, 8], "file_or_path": 3, "textio": 3, "master_onli": [3, 4], "global_mast": 3, "nest": [3, 4], "combin": [3, 5], "object": [3, 8], "overrid": 3, "locat": 3, "flag": 3, "obj_to_sav": 3, "path_to_sav": 3, "rendezv": 3, "tag": [3, 7], "payload": [3, 7], "byte": 3, "b": [3, 5, 7, 8, 9], "mesh": [3, 7], "client": [3, 7], "reach": 3, "xrt": 3, "server": [3, 7], "effect": 3, "alia": 3, "xla_rendezv": 3, "join": 3, "exchang": 3, "posit": 3, "mesh_reduc": 3, "reduce_fn": 3, "toxlatensorarena": 3, "reduct": 3, "receiv": 3, "come": [3, 5], "numpi": [3, 8], "np": [3, 8], "accuraci": [3, 4], "test_accuraci": 3, "set_rng_stat": 3, "get_rng_stat": 3, "get_memory_info": 3, "memoryinfo": 3, "bytes_us": 3, "290816": 3, "bytes_limit": 3, "34088157184": 3, "get_stablehlo": 3, "stablehlo": 3, "infer": [3, 7, 8], "straightforward": 3, "identifi": [3, 8], "env": [3, 7, 8], "var": [3, 8], "get_stablehlo_bytecod": 3, "bytecod": [3, 9], "class": [3, 4, 6, 8], "kwarg": [3, 4, 8], "background": [3, 8], "upload": [3, 8], "per_device_load": [3, 8], "constructor": 3, "train_device_load": 3, "xla_multiprocess": 3, "fn": 3, "nproc": [3, 7], "daemon": 3, "start_method": 3, "At": 3, "moment": 3, "maximum": [3, 6], "creation": 3, "method": [3, 7, 8], "mark_shard": [3, 8], "xlashardedtensor": 3, "partition_spec": [3, 8], "annot": [3, 8], "partit": 3, "spec": 3, "xlatensor": [3, 8], "spmdpartition": [3, 8], "device_mesh": [3, 8], "axi": [3, 8], "rank": [3, 4, 7, 8], "mesh_shap": [3, 8], "ax": [3, 8], "dynamo_custom_op": 3, "dynamo": [3, 6, 9], "variant": [3, 5], "recogniz": 3, "traceabl": 3, "num_devic": [3, 8], "device_id": [3, 8], "arrai": [3, 8], "clear_shard": 3, "clear": 3, "cast": 3, "place": [3, 8], "get_1d_mesh": 3, "set_global_mesh": 3, "get_global_mesh": 3, "axis_nam": [3, 8], "helper": 3, "ndarrai": 3, "ravel": 3, "reshap": 3, "fill": 3, "element": [3, 5, 8], "sequenc": 3, "Its": 3, "length": [3, 5], "len": 3, "get_xla_supported_devic": 3, "get_logical_mesh": 3, "ordereddict": [3, 8], "hybridmesh": [3, 8], "ici_mesh_shap": [3, 8], "dcn_mesh_shap": [3, 8], "hybrid": 3, "ici": 3, "dcn": [3, 8], "increas": 3, "intens": 3, "mdl": 3, "inner": [3, 4, 8], "outer": [3, 4, 8], "slice": [3, 8], "metric": [3, 4], "counter_nam": 3, "metric_nam": 3, "counter_valu": 3, "metric_data": 3, "total_sampl": 3, "accumul": 3, "retain": 3, "circular": 3, "buffer": 3, "sum": [3, 4, 8], "document": [4, 6, 7], "further": 4, "against": 4, "minimum": [4, 8], "abil": [4, 5], "api": [4, 5, 6, 7, 8, 9], "And": [4, 5, 8], "who": 4, "know": [4, 5], "xla_backend": [4, 7, 8], "similar": [4, 6, 7], "nccl": 4, "gloo": [4, 7, 8], "dist": [4, 7, 8], "init_process_group": [4, 7, 8], "new_rank": 4, "gradient_as_bucket_view": [4, 7], "ddp_model": 4, "final": [4, 8], "launcher": 4, "demo_fn": 4, "everyth": [4, 5], "touch": [4, 8], "plu": 4, "five": 4, "sy": 4, "tempfil": 4, "master_addr": [4, 7], "localhost": [4, 7], "master_port": [4, 7], "12355": [4, 7], "cleanup": 4, "destroy_process_group": 4, "toymodel": 4, "__init__": [4, 6], "super": [4, 9], "net1": 4, "1000000": 4, "relu": 4, "net2": 4, "demo_bas": 4, "assert": 4, "graident_as_bucket_view": 4, "mseloss": [4, 7], "001": [4, 7], "label": 4, "run_demo": 4, "collect": [4, 7, 8, 9], "num_epoch": [4, 7], "tot": 4, "statist": 4, "produc": [4, 5], "unit": 4, "median": 4, "90th": 4, "std": 4, "cv": 4, "418": 4, "54": 4, "419": 4, "22": 4, "430": 4, "9": [4, 5], "76": 4, "97": 4, "407": 4, "39": 4, "seem": 4, "extra": [4, 8], "overhead": [4, 7, 9], "test_train_mp_mnist": [4, 7], "17864": 4, "19": [4, 9], "20108": 4, "96": 4, "24351": 4, "74": 4, "5866": 4, "83": 4, "10701": 4, "11770": 4, "14313": 4, "78": 4, "3102": 4, "92": 4, "41": [4, 9], "round": 4, "heavili": [4, 9], "sens": 4, "amort": 4, "logdir": 4, "converg": 4, "high": [4, 6], "caution": 4, "interest": 4, "known": 4, "investig": 4, "enforc": [4, 5], "crash": 4, "xlafullyshardeddataparallel": 4, "my_modul": [4, 8], "adam": [4, 8], "0001": [4, 8], "individu": [4, 8], "leftov": [4, 8], "both": [4, 5, 6, 7, 8, 9], "arxiv": 4, "ab": 4, "1910": 4, "02054": 4, "reshard_after_forward": 4, "test_train_mp_mnist_fsdp_with_ckpt": 4, "test_train_mp_imagenet_fsdp": 4, "larg": [4, 5, 7, 8], "cannot": [4, 5], "fit": 4, "interleav": 4, "submodul": 4, "fsdpvitmodel": 4, "ronghanghu": 4, "vit_10b_fsdp_exampl": 4, "run_vit_train": 4, "simpl": [4, 7, 8], "checkpoint_modul": [4, 8], "3524": 4, "auto_wrap_polici": [4, 8], "size_based_auto_wrap_polici": 4, "polici": [4, 8], "larger": [4, 9], "100m": 4, "transformer_auto_wrap_polici": [4, 8], "transform": [4, 8], "conv2d": 4, "partial": [4, 5, 8], "transformer_layer_cl": [4, 8], "addition": 4, "auto_wrapper_cal": 4, "remateri": 4, "lambda": 4, "latter": 4, "resum": 4, "get_shard_metadata": 4, "consolidate_sharded_model_checkpoint": 4, "stitch": 4, "ckpt": 4, "shard_metadata": 4, "ckpt_path": 4, "pth": 4, "tool": 4, "consolidate_sharded_ckpt": 4, "ckpt_prefix": 4, "your_sharded_checkpoint_fil": 4, "ckpt_suffix": 4, "_rank": 4, "inspir": 4, "mostli": [4, 7], "structur": [4, 8], "fairscal": 4, "fullyshardeddataparallel": 4, "readthedoc": 4, "en": 4, "biggest": [4, 9], "explicit": 4, "resort": 4, "train_resnet_fsdp_auto_wrap": 4, "newer": 4, "wheel": 4, "around": [4, 5, 7], "98": 4, "batch_siz": [4, 7], "drop_last": 4, "use_nested_fsdp": 4, "use_gradient_checkpoint": 4, "final_ckpt": 4, "75": 4, "download": 4, "1k": 4, "datadir": 4, "test_set_batch_s": 4, "eval_interv": 4, "128": [4, 6, 7], "num_warmup_epoch": 4, "lr_scheduler_divide_every_n_epoch": 4, "lr_scheduler_divisor": 4, "residu": 4, "entir": [4, 7], "algorithm": [4, 8], "vision": 4, "vit": 4, "static": 5, "word": 5, "hurt": 5, "understand": 5, "normal": [5, 7, 8], "pov": 5, "sai": 5, "assur": 5, "magic": 5, "gone": 5, "good": [5, 8], "coverag": 5, "aim": [5, 8], "explan": 5, "common": [5, 6, 7, 8], "rid": 5, "mainli": 5, "problem": 5, "beginn": 5, "propos": 5, "reli": 5, "impract": 5, "assumpt": 5, "ye": [5, 6], "sentenc": 5, "vari": [5, 7, 8], "ll": 5, "bucket": [5, 8], "kinda": 5, "anti": 5, "frontend": 5, "matter": 5, "workaround": 5, "okai": 5, "teach": 5, "practic": [5, 7, 8], "enough": 5, "theoret": 5, "max": [5, 8], "trade": 5, "less": [5, 7, 9], "faster": [5, 7, 9], "speed": [5, 9], "sort": 5, "obviou": 5, "shown": [5, 7], "s64": 5, "num_output": 5, "mul": 5, "although": [5, 7], "inde": 5, "_get_xla_tensor_dimension_s": 5, "commonli": 5, "dtype": [5, 6, 7], "cut": 5, "correct": 5, "wrong": 5, "wors": 5, "probabl": 5, "upper": 5, "nit": 5, "simplic": 5, "rand": 5, "solv": 5, "world": [5, 7, 8, 9], "kept": 5, "earli": 5, "accessor": 5, "2d": [5, 8], "implicitli": 5, "doubl": 5, "overload": 5, "easili": [5, 9], "explod": 5, "convers": 5, "cheap": 5, "ve": 5, "hoc": 5, "think": 5, "verison": 5, "bla": 5, "blabla": 5, "interpret": 5, "proce": 5, "choic": 5, "wide": 5, "adopt": 5, "uglier": 5, "win": 5, "pars": 5, "statement": 5, "torchscript": 5, "somehow": 5, "merg": 5, "lazili": [5, 8], "properli": 5, "haven": 5, "thought": 5, "trivial": 5, "effort": [5, 8], "side": 5, "That": 5, "hit": 5, "bandwidth": 5, "automag": 5, "gold": 5, "smart": 5, "trick": 5, "tbh": 5, "longer": 5, "sometim": 5, "unawar": 5, "hope": 5, "smash": 5, "ideal": [5, 9], "blocker": 5, "ahead": 5, "nnc": 5, "symbol": 5, "By": [5, 7], "concret": 5, "kernel": [5, 6], "exactli": 5, "transpos": 5, "With": [5, 7, 9], "brian": 5, "hirsh": 5, "bdhirsh": 5, "question": 5, "comment": 5, "worth": 5, "stick": 5, "torch_warn": 5, "yea": 5, "tell": 5, "hei": 5, "won": 5, "blaze": 5, "fast": 5, "isn": [5, 8], "rewrit": [5, 8], "devirtu": 5, "v": [5, 7], "sound": 5, "great": 5, "carri": [5, 8], "truth": 5, "As": [5, 8], "irvalu": 5, "discrep": 5, "followup": 5, "mention": [5, 9], "1000": 5, "my": [5, 8], "properti": 5, "presenc": 5, "get_dimention_s": 5, "didn": 5, "altern": [5, 6, 7], "condit": 5, "middl": [5, 7], "exponenti": 5, "blowup": 5, "smaller": 5, "fewer": 5, "opportun": 5, "recogn": [5, 9], "could": [5, 7, 8], "break": 5, "feasibl": 5, "annoi": 5, "z": 5, "subgraph": 5, "variabl": [5, 7], "wasn": 5, "materiz": 5, "involv": [5, 8], "combo": 5, "outlin": 6, "offer": [6, 8], "abstract": [6, 8], "blockwis": 6, "int4": 6, "These": [6, 7, 8], "analog": 6, "ecosystem": 6, "benefit": [6, 8], "classifi": 6, "flexibl": 6, "choos": [6, 8], "best": [6, 9], "tensor": [6, 7, 8, 9], "docstr": 6, "layout": 6, "xla_quantized_matmul": 6, "n_input_featur": 6, "n_output_featur": 6, "bfloat16": 6, "w_int": 6, "randint": 6, "127": 6, "int8": 6, "scaler": 6, "purpos": 6, "matmul_output": 6, "quantized_matmul": 6, "x_xla": 6, "w_int_xla": 6, "scaler_xla": 6, "matmul_output_xla": 6, "w": 6, "f_dynamo": 6, "dynamo_out_xla": 6, "myqlinearforxlabackend": 6, "load_weight": 6, "processed_w": 6, "processed_scal": 6, "stuff": 6, "orig_model": 6, "mymodel": 6, "q_weight": 6, "q_weights_for_xla": 6, "process_for_xla": 6, "q_linear": 6, "xlaquantizedlinear": 6, "in_featur": 6, "out_featur": 6, "load_quantized_weight": 6, "channel": 6, "sym": 6, "asym": 6, "w8a16": 6, "w4a16": 6, "w8a8": 6, "w4a8": 6, "migrat": 7, "jax": 7, "public": 7, "renam": 7, "regist": [7, 8], "init_method": [7, 8], "plugin": 7, "xpu": 7, "neuron": 7, "continu": [7, 9], "xrt_tpu_config": 7, "libtpu": 7, "thousand": 7, "preview": 7, "On": [7, 8], "safe": 7, "broadcast": 7, "broadcast_master_param": 7, "pjrt_backend": 7, "diff": 7, "42": 7, "confirm": 7, "localservic": 7, "51011": 7, "grpc": 7, "torchbench": 7, "2048": 7, "read": 7, "central2": 7, "256": 7, "tpu_process_bound": 7, "tpu_visible_chip": 7, "r1": 7, "preinstal": 7, "docker_imag": 7, "gcr": 7, "authent": 7, "privat": 7, "gcp": 7, "auth": 7, "rm": 7, "privileg": 7, "simpli": 7, "nnode": 7, "num_gpu_devic": 7, "pjrt_distribut": 7, "physic": [7, 8], "number_gpu_vm": 7, "node_rank": 7, "current_node_rank": 7, "nproc_per_nod": 7, "number_local_gpu_devic": 7, "rdzv_endpoint": 7, "internal_ip_address": 7, "port": 7, "multinode_train": 7, "endpoint": 7, "omit": [7, 8], "machine_0": 7, "machine_1": 7, "machine_0_internal_ip_address": 7, "ident": 7, "page": 7, "interchang": 7, "subtl": 7, "importantli": 7, "latenc": 7, "deseri": 7, "gain": 7, "interact": 7, "profil": 7, "plan": 7, "simpler": 7, "xla_dist": 7, "sdk": 7, "reimplement": 7, "enhanc": 7, "substanti": 7, "consist": 7, "servic": 7, "unreli": 7, "inbound": 7, "failur": 7, "impos": 7, "unwant": 7, "permit": 7, "subset": 7, "old": 7, "alter": 7, "consid": 7, "all_gather_object": 7, "new_group": 7, "subgroup": 7, "reliabl": 7, "strongli": 7, "queri": 7, "_all_gath": 7, "int32": 7, "zeros_lik": 7, "get_world_s": 7, "averag": 7, "task": 7, "175": 7, "chart": 7, "breakdown": 7, "tfrt": 7, "legaci": 7, "streamexecutor": 7, "tpu_legaci": 7, "comparison": [7, 8], "discuss": 8, "gspmd": 8, "overview": 8, "illustr": 8, "ml": 8, "proper": 8, "hint": 8, "figur": 8, "strategi": 8, "th": 8, "concept": 8, "librari": 8, "cluster": 8, "interconnect": 8, "almost": 8, "encourag": 8, "fist": 8, "express": 8, "paral": 8, "fsdpv2": 8, "famou": 8, "enjoi": 8, "bring": 8, "tabl": 8, "review": 8, "proceed": 8, "spmd_fully_sharded_data_parallel": 8, "spmdfullyshardeddataparallel": 8, "autowrap": 8, "decoderlay": 8, "functool": 8, "decoder_only_model": 8, "shard_output": 8, "fall": 8, "categori": 8, "0th": 8, "children": 8, "infinit": 8, "fork": 8, "hf": 8, "demonstr": 8, "cover": 8, "proced": 8, "src": 8, "_input_sharding_": 8, "4d": 8, "input_shard": 8, "shardingspec": 8, "input_mesh": 8, "s1": 8, "s2": 8, "s3": 8, "s4": 8, "_after": 8, "_the": 8, "unnecessari": 8, "forth": 8, "techniqu": 8, "decis": 8, "nice": 8, "arrang": 8, "center": 8, "box": 8, "multislic": 8, "accept": 8, "denot": 8, "hardcod": 8, "rfc": 8, "delai": 8, "except": 8, "satisfi": 8, "subclass": 8, "__torch_dispatch__": 8, "invok": [8, 9], "global_tensor": 8, "special": 8, "strictli": 8, "local_shard": 8, "xlashard": 8, "4e8e5511555073ce8b6d1a436bf808c9333dcac6": 8, "xla_sharded_tensor": 8, "l12": 8, "ongo": 8, "distributedtensor": 8, "prototyp": 8, "proof": 8, "distribute_tensor": 8, "devicemesh": 8, "big_tensor": 8, "100000": 8, "my_dtensor": 8, "stai": 8, "tune": 8, "upcom": [8, 9], "dynamo_mark_shard": 8, "placement": 8, "visual": 8, "multi": 8, "visualize_tensor_shard": 8, "visualize_shard": 8, "rich": 8, "2x2": 8, "generated_t": 8, "use_color": 8, "style": 8, "tile": 8, "partial_repl": 8, "envvar": 8, "xla_auto_spmd": 8, "_tensor": 8, "distribute_modul": 8, "auto_polici": 8, "mymodul": 8, "sharded_model": 8, "behvaior": 8, "xla_auto_use_group_shard": 8, "reshard": 8, "xla_auto_spmd_mesh": 8, "unset": 8, "dedic": 8, "planner": 8, "spmdsaveplann": 8, "spmdloadplann": 8, "dist_cp": 8, "distributed_checkpoint": 8, "xc": 8, "storage_writ": 8, "filesystemwrit": 8, "checkpoint_dir": 8, "desir": 8, "storage_read": 8, "filesystemread": 8, "checkpointmanag": 8, "all_step": 8, "save_async": 8, "written": 8, "unblock": 8, "durat": 8, "dispatch": 8, "preemption": 8, "detect": 8, "termin": 8, "provis": 8, "queuedresourc": 8, "autocheckpoint": 8, "chkpt_on_preempt": 8, "fsspec": 8, "filesystem": 8, "gc": 8, "prime_optim": 8, "chkpt_mgr": 8, "tracked_step": 8, "highest": 8, "best_step": 8, "prime": 8, "enumer": 8, "present": 8, "attempt": 8, "unprim": 8, "destruct": 8, "discov": 8, "jit": 9, "unmodifi": 9, "hook": 9, "bridg": 9, "torchfx": 9, "technologi": 9, "fx": 9, "a_xla": 9, "b_xla": 9, "compiled_cod": 9, "eval_model": 9, "xla_resnet18": 9, "eval": 9, "dynamo_resnet18": 9, "no_grad": 9, "resent18": 9, "binari": 9, "analysi": 9, "bench": 9, "59": 9, "resnext50_32x4d": 9, "91": 9, "alexnet": 9, "28": 9, "mobilenet_v2": 9, "18": 9, "mnasnet1_0": 9, "68": 9, "vgg16": 9, "bert_pytorch": 9, "squeezenet1_1": 9, "timm_vision_transform": 9, "52": 9, "geomean": 9, "team": 9, "train_model": 9, "crossentropyloss": 9, "pred": 9, "train_model_main": 9, "dynamo_train_model": 9, "xla_optim": 9, "weight_decai": 9, "extract": 9, "07": 9, "81": 9, "87": 9, "fwd": 9, "bwd": 9, "e2": 9, "hide": 9, "cost": 9, "scenario": 9, "promis": 9, "complex": 9, "tradit": 9, "seen": 9, "expand": 9, "excit": 9, "invest": 9, "upstream": 9, "matur": 9, "stori": 9}, "objects": {"": [[3, 0, 0, "-", "torch_xla"]], "torch_xla": [[3, 1, 1, "", "compile"], [3, 1, 1, "", "device"], [3, 1, 1, "", "device_count"], [3, 1, 1, "", "devices"], [3, 0, 0, "-", "experimental"], [3, 1, 1, "", "manual_seed"], [3, 0, 0, "-", "runtime"], [3, 1, 1, "", "sync"]], "torch_xla.core": [[3, 0, 0, "-", "xla_model"]], "torch_xla.core.xla_model": [[3, 1, 1, "", "add_step_closure"], [3, 1, 1, "", "all_gather"], [3, 1, 1, "", "all_reduce"], [3, 1, 1, "", "all_to_all"], [3, 1, 1, "", "get_memory_info"], [3, 1, 1, "", "get_rng_state"], [3, 1, 1, "", "get_stablehlo"], [3, 1, 1, "", "get_stablehlo_bytecode"], [3, 1, 1, "", "is_master_ordinal"], [3, 1, 1, "", "mesh_reduce"], [3, 1, 1, "", "optimizer_step"], [3, 1, 1, "", "rendezvous"], [3, 1, 1, "", "save"], [3, 1, 1, "", "set_rng_state"], [3, 1, 1, "", "wait_device_ops"], [3, 1, 1, "", "xla_device"], [3, 1, 1, "", "xla_device_hw"]], "torch_xla.debug": [[3, 0, 0, "-", "metrics"]], "torch_xla.debug.metrics": [[3, 1, 1, "", "counter_names"], [3, 1, 1, "", "counter_value"], [3, 1, 1, "", "metric_data"], [3, 1, 1, "", "metric_names"], [3, 1, 1, "", "metrics_report"], [3, 1, 1, "", "short_metrics_report"]], "torch_xla.distributed": [[3, 0, 0, "-", "parallel_loader"], [3, 0, 0, "-", "spmd"], [3, 0, 0, "-", "xla_multiprocessing"]], "torch_xla.distributed.parallel_loader": [[3, 2, 1, "", "MpDeviceLoader"]], "torch_xla.distributed.spmd": [[3, 2, 1, "", "HybridMesh"], [3, 2, 1, "", "Mesh"], [3, 1, 1, "", "clear_sharding"], [3, 1, 1, "", "get_1d_mesh"], [3, 1, 1, "", "get_global_mesh"], [3, 1, 1, "", "mark_sharding"], [3, 1, 1, "", "set_global_mesh"]], "torch_xla.distributed.xla_multiprocessing": [[3, 1, 1, "", "spawn"]], "torch_xla.experimental": [[3, 1, 1, "", "eager_mode"]], "torch_xla.runtime": [[3, 1, 1, "", "addressable_device_count"], [3, 1, 1, "", "device_type"], [3, 1, 1, "", "get_master_ip"], [3, 1, 1, "", "global_device_count"], [3, 1, 1, "", "global_ordinal"], [3, 1, 1, "", "global_runtime_device_count"], [3, 1, 1, "", "initialize_cache"], [3, 1, 1, "", "is_spmd"], [3, 1, 1, "", "local_device_count"], [3, 1, 1, "", "local_ordinal"], [3, 1, 1, "", "local_process_count"], [3, 1, 1, "", "use_spmd"], [3, 1, 1, "", "world_size"]]}, "objtypes": {"0": "py:module", "1": "py:function", "2": "py:class"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "function", "Python function"], "2": ["py", "class", "Python class"]}, "titleterms": {"troubleshoot": 0, "saniti": 0, "check": [0, 2], "pytorch": [0, 2, 3, 4, 8, 9], "xla": [0, 2, 3, 4, 6, 8, 9], "version": 0, "perform": [0, 7], "A": 0, "simpl": [0, 2], "calcul": 0, "run": [0, 2, 3, 8], "resnet": [0, 2, 4], "With": 0, "fake": [0, 4], "data": [0, 4, 8], "debug": [0, 3, 8], "tool": [0, 8], "auto": [0, 8], "metric": 0, "analysi": 0, "compil": [0, 1, 3, 8, 9], "execut": 0, "get": 0, "report": 0, "understand": 0, "The": 0, "clear": 0, "dynamo": 0, "profil": 0, "benchmark": [0, 1, 4], "known": 0, "caveat": 0, "tensor": [0, 3, 5], "quirk": 0, "more": 0, "environ": [0, 2], "variabl": [0, 2], "common": 0, "combin": 0, "reproduc": 0, "ci": 0, "cd": 0, "unit": 0, "test": 0, "failur": 0, "eager": 1, "mode": [1, 8], "api": [1, 3], "background": [1, 4], "basic": 1, "usag": 1, "infer": [1, 9], "train": [1, 4, 7, 9], "how": [2, 4, 6, 8], "gpu": [2, 7], "creat": [2, 3], "instanc": 2, "setup": 2, "docker": [2, 7], "wheel": 2, "some": [2, 5], "model": [2, 3, 6], "mp_imagenet": 2, "exampl": [2, 4, 8], "amp": 2, "automat": 2, "mix": 2, "precis": 2, "develop": 2, "build": 2, "from": [2, 3, 5, 7], "sourc": [2, 5], "support": [2, 6], "document": 3, "doc": 3, "devic": [3, 6], "an": 3, "ar": 3, "singl": [3, 7], "multipl": 3, "multi": [3, 7], "process": 3, "tpu": [3, 4, 7, 8], "pod": [3, 4, 7, 8], "deep": 3, "dive": 3, "lazi": 3, "memori": 3, "layout": 3, "move": 3, "cpu": [3, 7], "save": 3, "load": 3, "cach": 3, "further": [3, 8], "read": [3, 8], "torch_xla": [3, 5], "runtim": [3, 7], "xla_model": 3, "distribut": [3, 7, 8], "spmd": [3, 8], "experiment": [3, 6], "do": 4, "distributeddataparallel": 4, "ddp": 4, "motiv": 4, "us": [4, 5, 6, 8], "resnet50": 4, "mnist": 4, "real": [4, 5], "disclaim": 4, "fulli": [4, 8], "shard": [4, 8], "parallel": [4, 8], "fsdp": [4, 8], "script": 4, "imagenet": 4, "instal": 4, "clone": 4, "repo": 4, "v3": [4, 7], "8": 4, "50": 4, "10": 4, "billion": 4, "paramet": 4, "recompil": 5, "let": 5, "": 5, "first": 5, "start": 5, "fact": 5, "constraint": 5, "1": 5, "input": 5, "dataset": 5, "2": [5, 8], "oper": [5, 6], "output": [5, 8], "bound": 5, "dynam": 5, "shape": 5, "can": 5, "fix": 5, "case": 5, "when": 5, "you": 5, "without": 5, "queri": 5, "its": 5, "dimens": 5, "what": [5, 8], "i": [5, 8], "3": 5, "control": 5, "flow": 5, "conclus": 5, "appendix": 5, "quantiz": 6, "featur": [6, 9], "call": 6, "op": 6, "code": 6, "modul": 6, "swap": 6, "matrix": 6, "multipli": 6, "embed": 6, "pjrt": 7, "tl": 7, "dr": 7, "benefit": 7, "quickstart": 7, "node": 7, "differ": 7, "xrt": 7, "multithread": 7, "v2": 7, "chang": 7, "xm": 7, "rendezv": 7, "torch": [7, 8, 9], "new": 7, "user": 8, "guid": 8, "mesh": 8, "partit": 8, "spec": 8, "via": 8, "gradient": 8, "checkpoint": 8, "huggingfac": 8, "llama": 8, "advanc": 8, "topic": 8, "hybrid": 8, "xlashardedtensor": 8, "dtensor": 8, "integr": [8, 9], "activ": 8, "torchdynamo": 9, "gap": 9, "take": 9, "awai": 9}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinx.ext.todo": 2, "sphinx.ext.viewcode": 1, "sphinx": 57}, "alltitles": {"Troubleshooting": [[0, "troubleshooting"]], "Sanity Check": [[0, "sanity-check"]], "Check PyTorch/XLA Version": [[0, "check-pytorch-xla-version"]], "Perform A Simple Calculation": [[0, "perform-a-simple-calculation"]], "Run Resnet With Fake Data": [[0, "run-resnet-with-fake-data"]], "Performance Debugging": [[0, "performance-debugging"]], "PyTorch/XLA Debugging Tool": [[0, "pytorch-xla-debugging-tool"]], "Perform A Auto-Metrics Analysis": [[0, "perform-a-auto-metrics-analysis"]], "Compilation & Execution Analysis": [[0, "compilation-execution-analysis"]], "Get A Metrics Report": [[0, "get-a-metrics-report"]], "Understand The Metrics Report": [[0, "understand-the-metrics-report"]], "Clear The Metrics Report": [[0, "clear-the-metrics-report"]], "PyTorch/XLA + Dynamo Debugging Tool": [[0, "pytorch-xla-dynamo-debugging-tool"]], "Performance Profiling": [[0, "performance-profiling"]], "Simple Benchmarking": [[0, "simple-benchmarking"]], "Known Performance Caveats": [[0, "known-performance-caveats"]], "XLA Tensor Quirks": [[0, "xla-tensor-quirks"]], "More Debugging Tools": [[0, "more-debugging-tools"]], "Environment Variables": [[0, "environment-variables"]], "Common Debugging Environment Variables Combinations": [[0, "common-debugging-environment-variables-combinations"]], "Reproducing PyTorch/XLA CI/CD unit test failures.": [[0, "reproducing-pytorch-xla-ci-cd-unit-test-failures"]], "Eager Mode + Compile API": [[1, "eager-mode-compile-api"]], "Background": [[1, "background"]], "Basic Usage": [[1, "basic-usage"]], "Inference": [[1, "inference"], [9, "inference"]], "Training": [[1, "training"], [9, "training"]], "Benchmark": [[1, "benchmark"]], "How to run with PyTorch/XLA:GPU": [[2, "how-to-run-with-pytorch-xla-gpu"]], "Create a GPU instance": [[2, "create-a-gpu-instance"]], "Environment Setup": [[2, "environment-setup"]], "Docker": [[2, "docker"], [7, "docker"]], "Check environment variable": [[2, "check-environment-variable"]], "Wheel": [[2, "wheel"]], "Run some simple models": [[2, "run-some-simple-models"]], "MP_ImageNet Example": [[2, "mp-imagenet-example"]], "ResNet Example": [[2, "resnet-example"]], "AMP (AUTOMATIC MIXED PRECISION)": [[2, "amp-automatic-mixed-precision"]], "Develop PyTorch/XLA on a GPU instance (build PyTorch/XLA from source with GPU support)": [[2, "develop-pytorch-xla-on-a-gpu-instance-build-pytorch-xla-from-source-with-gpu-support"]], "PyTorch/XLA documentation": [[3, "pytorch-xla-documentation"]], "Docs": [[3, null]], "PyTorch on XLA Devices": [[3, "pytorch-on-xla-devices"]], "Creating an XLA Tensor": [[3, "creating-an-xla-tensor"]], "XLA Tensors are PyTorch Tensors": [[3, "xla-tensors-are-pytorch-tensors"]], "Running Models on XLA Devices": [[3, "running-models-on-xla-devices"]], "Running on a Single XLA Device": [[3, "running-on-a-single-xla-device"]], "Running on Multiple XLA Devices with Multi-processing": [[3, "running-on-multiple-xla-devices-with-multi-processing"]], "Running on TPU Pods": [[3, "running-on-tpu-pods"]], "XLA Tensor Deep Dive": [[3, "id3"]], "XLA Tensors are Lazy": [[3, "xla-tensors-are-lazy"]], "Memory Layout": [[3, "memory-layout"]], "Moving XLA Tensors to and from the CPU": [[3, "moving-xla-tensors-to-and-from-the-cpu"]], "Saving and Loading XLA Tensors": [[3, "saving-and-loading-xla-tensors"]], "Compilation Caching": [[3, "compilation-caching"]], "Further Reading": [[3, "further-reading"], [8, "further-reading"]], "PyTorch/XLA API": [[3, "pytorch-xla-api"]], "torch_xla": [[3, "module-torch_xla"]], "runtime": [[3, "module-torch_xla.runtime"]], "xla_model": [[3, "module-torch_xla.core.xla_model"]], "distributed": [[3, "module-torch_xla.distributed.parallel_loader"]], "spmd": [[3, "module-torch_xla.distributed.spmd"]], "experimental": [[3, "module-torch_xla.experimental"]], "debug": [[3, "module-torch_xla.debug.metrics"]], "How to do DistributedDataParallel(DDP)": [[4, "how-to-do-distributeddataparallel-ddp"]], "Background / Motivation": [[4, "background-motivation"]], "How to use DistributedDataParallel": [[4, "how-to-use-distributeddataparallel"]], "Benchmarking": [[4, "benchmarking"]], "Resnet50 with fake data": [[4, "resnet50-with-fake-data"]], "MNIST with fake data": [[4, "mnist-with-fake-data"]], "MNIST with real data": [[4, "mnist-with-real-data"]], "Disclaimer": [[4, "disclaimer"]], "Fully Sharded Data Parallel (FSDP) in PyTorch XLA": [[4, "fully-sharded-data-parallel-fsdp-in-pytorch-xla"]], "Example training scripts on MNIST and ImageNet": [[4, "example-training-scripts-on-mnist-and-imagenet"]], "Installation": [[4, "installation"]], "Clone PyTorch/XLA repo": [[4, "clone-pytorch-xla-repo"]], "Train MNIST on v3-8 TPU": [[4, "train-mnist-on-v3-8-tpu"]], "Train ImageNet with ResNet-50 on v3-8 TPU": [[4, "train-imagenet-with-resnet-50-on-v3-8-tpu"]], "Example training scripts on TPU pod (with 10 billion parameters)": [[4, "example-training-scripts-on-tpu-pod-with-10-billion-parameters"]], "Source of recompilations in torch_xla": [[5, "source-of-recompilations-in-torch-xla"]], "Let\u2019s first start with some facts/constraints:": [[5, "lets-first-start-with-some-facts-constraints"]], "#1. From input dataset.": [[5, "from-input-dataset"]], "#2. From operator output": [[5, "from-operator-output"]], "2.1 Bounded dynamic shape can fix the case when you use the tensor with dynamic shape as a Tensor, without querying its real dimension.": [[5, "bounded-dynamic-shape-can-fix-the-case-when-you-use-the-tensor-with-dynamic-shape-as-a-tensor-without-querying-its-real-dimension"]], "2.2 what if real dimension is queried on a tensor with dynamic shape?": [[5, "what-if-real-dimension-is-queried-on-a-tensor-with-dynamic-shape"]], "#3. From control flow": [[5, "from-control-flow"]], "Conclusion:": [[5, "conclusion"]], "Appendix:": [[5, "appendix"]], "Quantized Operations for XLA device (Experimental feature)": [[6, "quantized-operations-for-xla-device-experimental-feature"]], "How to use:": [[6, "how-to-use"]], "Call XLA quantized op in model code": [[6, "call-xla-quantized-op-in-model-code"]], "Module Swap": [[6, "module-swap"]], "Supported Quantized Operations:": [[6, "supported-quantized-operations"]], "Matrix Multiply": [[6, "matrix-multiply"]], "Embedding": [[6, "embedding"]], "PJRT Runtime": [[7, "pjrt-runtime"]], "TL;DR": [[7, "tl-dr"]], "Benefits": [[7, "benefits"]], "Quickstart": [[7, "quickstart"]], "CPU": [[7, "cpu"]], "TPU": [[7, "tpu"]], "Pods": [[7, "pods"]], "GPU": [[7, "gpu"]], "Single-node GPU training": [[7, "single-node-gpu-training"]], "Multi-node GPU training": [[7, "multi-node-gpu-training"]], "Differences from XRT": [[7, "differences-from-xrt"]], "Multithreading on TPU v2/v3": [[7, "id3"]], "Changes to xm.rendezvous": [[7, "changes-to-xm-rendezvous"]], "PJRT and torch.distributed": [[7, "pjrt-and-torch-distributed"]], "Performance": [[7, "performance"]], "New TPU runtime": [[7, "new-tpu-runtime"]], "PyTorch/XLA SPMD User Guide": [[8, "pytorch-xla-spmd-user-guide"]], "What is PyTorch/XLA SPMD?": [[8, "what-is-pytorch-xla-spmd"]], "How to use PyTorch/XLA SPMD?": [[8, "how-to-use-pytorch-xla-spmd"]], "SPMD Mode": [[8, "spmd-mode"]], "Mesh": [[8, "mesh"]], "Partition Spec": [[8, "partition-spec"]], "Fully Sharded Data Parallel(FSDP) via SPMD": [[8, "fully-sharded-data-parallel-fsdp-via-spmd"]], "Sharding output": [[8, "sharding-output"]], "Gradient checkpointing": [[8, "gradient-checkpointing"]], "HuggingFace Llama 2 Example": [[8, "huggingface-llama-2-example"]], "PyTorch/XLA SPMD advanced topics": [[8, "pytorch-xla-spmd-advanced-topics"]], "Hybrid Mesh": [[8, "hybrid-mesh"]], "Running SPMD on TPU Pod": [[8, "running-spmd-on-tpu-pod"]], "XLAShardedTensor": [[8, "xlashardedtensor"]], "DTensor Integration": [[8, "dtensor-integration"]], "Activation Sharding for torch.compile": [[8, "activation-sharding-for-torch-compile"]], "SPMD Debugging Tool": [[8, "spmd-debugging-tool"]], "Auto-Sharding": [[8, "auto-sharding"]], "Distributed Checkpointing": [[8, "distributed-checkpointing"]], "TorchDynamo(torch.compile) integration in PyTorch XLA": [[9, "torchdynamo-torch-compile-integration-in-pytorch-xla"]], "Integration": [[9, "integration"]], "Feature gaps": [[9, "feature-gaps"]], "Take away": [[9, "take-away"]]}, "indexentries": {"hybridmesh (class in torch_xla.distributed.spmd)": [[3, "torch_xla.distributed.spmd.HybridMesh"]], "mesh (class in torch_xla.distributed.spmd)": [[3, "torch_xla.distributed.spmd.Mesh"]], "mpdeviceloader (class in torch_xla.distributed.parallel_loader)": [[3, "torch_xla.distributed.parallel_loader.MpDeviceLoader"]], "add_step_closure() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.add_step_closure"]], "addressable_device_count() (in module torch_xla.runtime)": [[3, "torch_xla.runtime.addressable_device_count"]], "all_gather() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.all_gather"]], "all_reduce() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.all_reduce"]], "all_to_all() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.all_to_all"]], "clear_sharding() (in module torch_xla.distributed.spmd)": [[3, "torch_xla.distributed.spmd.clear_sharding"]], "compile() (in module torch_xla)": [[3, "torch_xla.compile"]], "counter_names() (in module torch_xla.debug.metrics)": [[3, "torch_xla.debug.metrics.counter_names"]], "counter_value() (in module torch_xla.debug.metrics)": [[3, "torch_xla.debug.metrics.counter_value"]], "device() (in module torch_xla)": [[3, "torch_xla.device"]], "device_count() (in module torch_xla)": [[3, "torch_xla.device_count"]], "device_type() (in module torch_xla.runtime)": [[3, "torch_xla.runtime.device_type"]], "devices() (in module torch_xla)": [[3, "torch_xla.devices"]], "eager_mode() (in module torch_xla.experimental)": [[3, "torch_xla.experimental.eager_mode"]], "get_1d_mesh() (in module torch_xla.distributed.spmd)": [[3, "torch_xla.distributed.spmd.get_1d_mesh"]], "get_global_mesh() (in module torch_xla.distributed.spmd)": [[3, "torch_xla.distributed.spmd.get_global_mesh"]], "get_master_ip() (in module torch_xla.runtime)": [[3, "torch_xla.runtime.get_master_ip"]], "get_memory_info() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.get_memory_info"]], "get_rng_state() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.get_rng_state"]], "get_stablehlo() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.get_stablehlo"]], "get_stablehlo_bytecode() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.get_stablehlo_bytecode"]], "global_device_count() (in module torch_xla.runtime)": [[3, "torch_xla.runtime.global_device_count"]], "global_ordinal() (in module torch_xla.runtime)": [[3, "torch_xla.runtime.global_ordinal"]], "global_runtime_device_count() (in module torch_xla.runtime)": [[3, "torch_xla.runtime.global_runtime_device_count"]], "initialize_cache() (in module torch_xla.runtime)": [[3, "torch_xla.runtime.initialize_cache"]], "is_master_ordinal() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.is_master_ordinal"]], "is_spmd() (in module torch_xla.runtime)": [[3, "torch_xla.runtime.is_spmd"]], "local_device_count() (in module torch_xla.runtime)": [[3, "torch_xla.runtime.local_device_count"]], "local_ordinal() (in module torch_xla.runtime)": [[3, "torch_xla.runtime.local_ordinal"]], "local_process_count() (in module torch_xla.runtime)": [[3, "torch_xla.runtime.local_process_count"]], "manual_seed() (in module torch_xla)": [[3, "torch_xla.manual_seed"]], "mark_sharding() (in module torch_xla.distributed.spmd)": [[3, "torch_xla.distributed.spmd.mark_sharding"]], "mesh_reduce() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.mesh_reduce"]], "metric_data() (in module torch_xla.debug.metrics)": [[3, "torch_xla.debug.metrics.metric_data"]], "metric_names() (in module torch_xla.debug.metrics)": [[3, "torch_xla.debug.metrics.metric_names"]], "metrics_report() (in module torch_xla.debug.metrics)": [[3, "torch_xla.debug.metrics.metrics_report"]], "module": [[3, "module-torch_xla"], [3, "module-torch_xla.core.xla_model"], [3, "module-torch_xla.debug.metrics"], [3, "module-torch_xla.distributed.parallel_loader"], [3, "module-torch_xla.distributed.spmd"], [3, "module-torch_xla.distributed.xla_multiprocessing"], [3, "module-torch_xla.experimental"], [3, "module-torch_xla.runtime"]], "optimizer_step() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.optimizer_step"]], "rendezvous() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.rendezvous"]], "save() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.save"]], "set_global_mesh() (in module torch_xla.distributed.spmd)": [[3, "torch_xla.distributed.spmd.set_global_mesh"]], "set_rng_state() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.set_rng_state"]], "short_metrics_report() (in module torch_xla.debug.metrics)": [[3, "torch_xla.debug.metrics.short_metrics_report"]], "spawn() (in module torch_xla.distributed.xla_multiprocessing)": [[3, "torch_xla.distributed.xla_multiprocessing.spawn"]], "sync() (in module torch_xla)": [[3, "torch_xla.sync"]], "torch_xla": [[3, "module-torch_xla"]], "torch_xla.core.xla_model": [[3, "module-torch_xla.core.xla_model"]], "torch_xla.debug.metrics": [[3, "module-torch_xla.debug.metrics"]], "torch_xla.distributed.parallel_loader": [[3, "module-torch_xla.distributed.parallel_loader"]], "torch_xla.distributed.spmd": [[3, "module-torch_xla.distributed.spmd"]], "torch_xla.distributed.xla_multiprocessing": [[3, "module-torch_xla.distributed.xla_multiprocessing"]], "torch_xla.experimental": [[3, "module-torch_xla.experimental"]], "torch_xla.runtime": [[3, "module-torch_xla.runtime"]], "use_spmd() (in module torch_xla.runtime)": [[3, "torch_xla.runtime.use_spmd"]], "wait_device_ops() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.wait_device_ops"]], "world_size() (in module torch_xla.runtime)": [[3, "torch_xla.runtime.world_size"]], "xla_device() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.xla_device"]], "xla_device_hw() (in module torch_xla.core.xla_model)": [[3, "torch_xla.core.xla_model.xla_device_hw"]]}}) \ No newline at end of file diff --git a/release/2.5/spmd.html b/release/2.5/spmd.html new file mode 100644 index 00000000000..878a86eadb7 --- /dev/null +++ b/release/2.5/spmd.html @@ -0,0 +1,1184 @@ + + + + + + + + + + + + PyTorch/XLA SPMD User Guide — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • PyTorch/XLA SPMD User Guide
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +
+

PyTorch/XLA SPMD User Guide

+

In this user guide, we discuss how GSPMD is integrated in PyTorch/XLA, and provide a design overview to illustrate how the SPMD sharding annotation API and its constructs work.

+
+

What is PyTorch/XLA SPMD?

+

GSPMD is an automatic parallelization system for common ML workloads. The XLA compiler will transform the single device program into a partitioned one with proper collectives, based on the user provided sharding hints. This feature allows developers to write PyTorch programs as if they are on a single large device without any custom sharded computation ops and/or collective communications to scale.

+alt_text +

*Figure 1. Comparison of two different execution strategies, (a) for non-SPMD and (b) for SPMD.*

+
+
+

How to use PyTorch/XLA SPMD?

+

Here is an simple example of using SPMD

+
import numpy as np
+import torch
+import torch_xla.core.xla_model as xm
+import torch_xla.runtime as xr
+import torch_xla.distributed.spmd as xs
+from torch_xla.distributed.spmd import Mesh
+
+
+# Enable XLA SPMD execution mode.
+xr.use_spmd()
+
+
+# Device mesh, this and partition spec as well as the input tensor shape define the individual shard shape.
+num_devices = xr.global_runtime_device_count()
+mesh_shape = (num_devices, 1)
+device_ids = np.array(range(num_devices))
+mesh = Mesh(device_ids, mesh_shape, ('data', 'model'))
+
+
+t = torch.randn(8, 4).to(xm.xla_device())
+
+
+# Mesh partitioning, each device holds 1/8-th of the input
+partition_spec = ('data', 'model')
+xs.mark_sharding(t, mesh, partition_spec)
+
+
+

Let’s explain these concepts one by one

+
+

SPMD Mode

+

In order to use SPMD, you need to enable it via xr.use_spmd(). In SPMD mode there is only one logical device. Distributed computation and collective is handled by the mark_sharding. Note that user can not mix SPMD with other distributed libraries.

+
+
+

Mesh

+

For a given cluster of devices, a physical mesh is a representation of the interconnect topology.

+
    +
  1. mesh_shape is a tuple that will be multiplied to the total number of physical devices.

  2. +
  3. device_ids is almost always np.array(range(num_devices)).

  4. +
  5. Users are also encouraged to give each mesh dimension a name. In the above example, the first mesh dimension is the data dimension and the second mesh dimension is the model dimension.

  6. +
+

You can also check more mesh info via

+
>>> mesh.shape()
+OrderedDict([('data', 4), ('model', 1)])
+
+
+
+
+

Partition Spec

+

partition_spec has the same rank as the input tensor. Each dimension describes how the corresponding input tensor dimension is sharded across the device mesh. In the above example tensor t’s fist dimension is being sharded at data dimension and the second dimension is being sharded at model dimension.

+

User can also shard tensor that has different dimensions from the mesh shape.

+
t1 = torch.randn(8, 8, 16).to(device)
+t2 = torch.randn(8).to(device)
+
+# First dimension is being replicated.
+xs.mark_sharding(t1, mesh, (None, 'data', 'model'))
+
+# First dimension is being sharded at data dimension.
+# model dimension is used for replication when omitted.
+xs.mark_sharding(t2, mesh, ('data',))
+
+# First dimension is sharded across both mesh axes.
+xs.mark_sharding( t2, mesh, (('data', 'model'),))
+
+
+
+
+
+

Further Reading

+
    +
  1. Example to use SPMD to express data parallism.

  2. +
  3. Example to use SPMD to express FSDP(Fully Sharded Data Parallel).

  4. +
  5. SPMD advanced topics

  6. +
  7. Spmd Distributed Checkpoint

  8. +
+
+
+
+

Fully Sharded Data Parallel(FSDP) via SPMD

+

Fully Sharded Data Parallel via SPMD or FSDPv2 is an utility that re-expresses the famous FSDP algorithm in SPMD. This is +an experimental feature that aiming to offer a familiar interface for users to enjoy all the benefits that SPMD brings into +the table. The design doc is here.

+

Please review the SPMD user guide before proceeding. You can also find a minimum runnable example here.

+

Example usage:

+
import torch
+import torch_xla.core.xla_model as xm
+import torch_xla.distributed.spmd as xs
+from torch_xla.experimental.spmd_fully_sharded_data_parallel import SpmdFullyShardedDataParallel as FSDPv2
+
+# Define the mesh following common SPMD practice
+num_devices = xr.global_runtime_device_count()
+mesh_shape = (num_devices, 1)
+device_ids = np.array(range(num_devices))
+# To be noted, the mesh must have an axis named 'fsdp', which the weights and activations will be sharded on.
+mesh = xs.Mesh(device_ids, mesh_shape, ('fsdp', 'model'))
+
+# Shard the input, and assume x is a 2D tensor.
+x = xs.mark_sharding(x, mesh, ('fsdp', None))
+
+# As normal FSDP, but an extra mesh is needed.
+model = FSDPv2(my_module, mesh)
+optim = torch.optim.Adam(model.parameters(), lr=0.0001)
+output = model(x, y)
+loss = output.sum()
+loss.backward()
+optim.step()
+
+
+

It is also possible to shard individual layers separately and have an outer wrapper handle any leftover parameters. Here is an example to autowrap each DecoderLayer.

+
from torch_xla.distributed.fsdp.wrap import transformer_auto_wrap_policy
+
+# Apply FSDP sharding on each DecoderLayer layer.
+auto_wrap_policy = functools.partial(
+    transformer_auto_wrap_policy,
+    transformer_layer_cls={
+        decoder_only_model.DecoderLayer
+    },
+)
+model = FSDPv2(
+    model, mesh=mesh, auto_wrap_policy=auto_wrap_policy)
+
+
+
+

Sharding output

+

To ensure the XLA compiler correctly implements the FSDP algorithm, we need to shard both weights and activations. This means sharding the output of the forward method. Since the forward function output can vary, we offer shard_output to shard activations in cases where your module output doesn’t fall into one of these categories:

+
    +
  1. A single tensor

  2. +
  3. A tuple of tensors where the 0th element is the activation.

  4. +
+

Example usage:

+
def shard_output(output, mesh):
+    xs.mark_sharding(output.logits, mesh, ('fsdp', None, None))
+
+model = FSDPv2(my_module, mesh, shard_output)
+
+
+
+
+

Gradient checkpointing

+

Currently, gradient checkpointing needs to be applied to the module before the FSDP wrapper. Otherwise, recursively loop into children modules will end up with infinite loop. We will fix this issue in the future releases.

+

Example usage:

+
from torch_xla.distributed.fsdp import checkpoint_module
+
+model = FSDPv2(checkpoint_module(my_module), mesh)
+
+
+
+
+

HuggingFace Llama 2 Example

+

We have a fork of HF Llama 2 to demonstrate a potential integration here.

+
+
+
+

PyTorch/XLA SPMD advanced topics

+

In this doc we will cover some advance topic on GSPMD. Please read SPMD user guide before procedding to this doc.

+

PyTorch/XLA SPMD takes a single-device program, shards and executes it in parallel. The SPMD execution requires using the native PyTorch DataLoader, which transfers data synchronously from the host to XLA devices. This blocks the training during the input data transfer every step. To improve the native data loading performance, we made PyTorch/XLA ParallelLoader support input sharding directly (src), when passed the optional kwarg _input_sharding_:

+
# MpDeviceLoader returns ParallelLoader.per_device_loader as iterator
+train_loader = pl.MpDeviceLoader(
+        train_loader,  # wraps PyTorch DataLoader
+        device,
+          # assume 4d input and we want to shard at the batch dimension.
+        input_sharding=xs.ShardingSpec(input_mesh, ('data', None, None, None)))
+
+
+

It is also possible to specify a different input_sharding for each element of the batch if they are different shapes:

+
# if batch = next(train_loader) looks like
+# {'x': <tensor of shape [s1, s2, s3, s4]>, 'y': <tensor for shape [s1, s2]>}
+
+# MpDeviceLoader returns ParallelLoader.per_device_loader as iterator
+train_loader = pl.MpDeviceLoader(
+        train_loader,  # wraps PyTorch DataLoader
+        device,
+          # specify different sharding for each input of the batch.
+        input_sharding={
+          'x': xs.ShardingSpec(input_mesh, ('data', None, None, None)),
+          'y': xs.ShardingSpec(input_mesh, ('data', None))
+        }
+)
+
+
+

PyTorch/XLA normally transfers tensor data asynchronously from host to device once the tensor is defined. This is to overlap the data transfer with the graph tracing time. However, because GSPMD allows the user to modify the tensor sharding _after _the tensor has been defined, we need an optimization to prevent unnecessary transfer of tensor data back and forth between host and device. We introduce Virtual Device Optimization, a technique to place the tensor data on a virtual device SPMD:0 first, before uploading to the physical devices when all the sharding decisions are finalized. Every tensor data in SPMD mode is placed on a virtual device, SPMD:0. The virtual device is exposed to the user as an XLA device XLA:0 with the actual shards on physical devices, like TPU:0, TPU:1, etc.

+
+

Hybrid Mesh

+

Mesh nicely abstracts how the physical device mesh is constructed. Users can arrange devices in any shape and order using the logical mesh. However, one can define a more performant mesh based on the physical topology, especially when it involves Data Center Network (DCN) cross slice connections. HybridMesh creates a mesh which gives good performance out of the box for such multislice environments. It accepts ici_mesh_shape and dcn_mesh_shape which denote logical mesh shapes of inner and outer network.

+
from torch_xla.distributed.spmd import HybridMesh
+
+# This example is assuming 2 slices of v4-8.
+# - ici_mesh_shape: shape of the logical mesh for inner connected devices.
+# - dcn_mesh_shape: shape of logical mesh for outer connected devices.
+ici_mesh_shape = (1, 4, 1) # (data, fsdp, tensor)
+dcn_mesh_shape = (2, 1, 1)
+
+mesh = HybridMesh(ici_mesh_shape, dcn_mesh_shape, ('data','fsdp','tensor'))
+print(mesh.shape())
+>> OrderedDict([('data', 2), ('fsdp', 4), ('tensor', 1)])
+
+
+
+

Running SPMD on TPU Pod

+

There is no code change required to go from single TPU host to TPU Pod if you construct your mesh and partition spec based on the number of devices instead of some hardcode constant. To run the PyTorch/XLA workload on TPU Pod, please refer to the Pods section of our PJRT guide.

+
+
+

XLAShardedTensor

+

xs.mark_sharding is a inplace op that will attach the sharding annotation to the input tensor, but it also return a XLAShardedTensor python object.

+

The main use case for XLAShardedTensor [RFC] is to annotate a native torch.tensor (on a single device) with a sharding spec. The annotation takes place immediately, but the actual sharding of the tensor is delayed as the computation is carried out lazily, except for the input tensors which are sharded without delay. Once a tensor is annotated and wrapped inside a XLAShardedTensor, it can be passed to existing PyTorch ops and nn.Module layers as torch.Tensor. This is important to ensure that the same PyTorch layers and tensor ops can be stacked together with XLAShardedTensor. This means that the user does not need to rewrite the existing ops and model codes for sharded computation. Namely, XLAShardedTensor will satisfy the following requirements:

+
    +
  • XLAShardedTensor is a torch.Tensor subclass and works directly with native torch ops and module.layers. We use __torch_dispatch__ to send XLAShardedTensor to the XLA backend. PyTorch/XLA retrieves attached sharding annotations to trace the graph and invokes XLA SPMDPartitioner.

  • +
  • Internally, XLAShardedTensor (and its global_tensor input) is backed by XLATensor with a special data structure holding references to the sharded device data.

  • +
  • The sharded tensor after lazy execution may be gathered and materialized back to the host as global_tensor when requested on the host (e.g., printing the value of the global tensor.

  • +
  • The handles to the local shards are materialized strictly after the lazy execution. XLAShardedTensor exposes local_shards to return the local shards on addressable devices as List[[XLAShard](https://github.com/pytorch/xla/blob/4e8e5511555073ce8b6d1a436bf808c9333dcac6/torch_xla/distributed/spmd/xla_sharded_tensor.py#L12)].

  • +
+

There is also an ongoing effort to integrate XLAShardedTensor into DistributedTensor API to support XLA backend [RFC].

+
+
+

DTensor Integration

+

PyTorch has prototype-released DTensor in 2.1. +We are integrating PyTorch/XLA SPMD into DTensor API RFC. We have a proof-of-concept integration for distribute_tensor, which calls mark_sharding annotation API to shard a tensor and its computation using XLA:

+
import torch
+from torch.distributed import DeviceMesh, Shard, distribute_tensor
+
+# distribute_tensor now works with `xla` backend using PyTorch/XLA SPMD.
+mesh = DeviceMesh("xla", list(range(world_size)))
+big_tensor = torch.randn(100000, 88)
+my_dtensor = distribute_tensor(big_tensor, mesh, [Shard(0)])
+
+
+

This feature is experimental and stay tuned for more updates, examples and tutorials in the upcoming releases.

+
+
+

Activation Sharding for torch.compile

+

In the 2.3 release, PyTorch/XLA added the custom op dynamo_mark_sharding which can be used to perform the activation sharding in a torch.compile region. This is part of our ongoing effort to make torch.compile + GSPMD to be the recommended way of doing the model inference using PyTorch/XLA. Example of using this custom op:

+
# Activation output sharding
+device_ids = [i for i in range(self.num_devices)] # List[int]
+mesh_shape = [self.num_devices//2, 1, 2] # List[int]
+axis_names = "('data', 'model')" # string version of axis_names
+partition_spec = "('data', 'model')" # string version of partition spec
+torch.ops.xla.dynamo_mark_sharding(output, device_ids, mesh_shape, axis_names, partition_spec)
+
+
+
+
+

SPMD Debugging Tool

+

We provide a shard placement visualization debug tool for PyTorch/XLA SPMD user on TPU/GPU/CPU with single-host/multi-host: you could use visualize_tensor_sharding to visualize sharded tensor, or you could use visualize_sharding to visualize sharing string. Here are two code examples on TPU single-host(v4-8) with visualize_tensor_sharding or visualize_sharding:

+
    +
  • Code snippet used visualize_tensor_sharding and visualization result:

  • +
+
import rich
+
+# Here, mesh is a 2x2 mesh with axes 'x' and 'y'
+t = torch.randn(8, 4, device='xla')
+xs.mark_sharding(t, mesh, ('x', 'y'))
+
+# A tensor's sharding can be visualized using the `visualize_tensor_sharding` method
+from torch_xla.distributed.spmd.debugging import visualize_tensor_sharding
+generated_table = visualize_tensor_sharding(t, use_color=False)
+
+
+ + + visualize_tensor_sharding example on TPU v4-8(single-host) +
    +
  • Code snippet used visualize_sharding and visualization result:

  • +
+
from torch_xla.distributed.spmd.debugging import visualize_sharding
+sharding = '{devices=[2,2]0,1,2,3}'
+generated_table = visualize_sharding(sharding, use_color=False)
+
+
+ + + visualize_sharding example on TPU v4-8(single-host) +

You could use these examples on TPU/GPU/CPU single-host and modify it to run on multi-host. And you could modify it to sharding-style tiled, partial_replication and replicated.

+
+
+

Auto-Sharding

+

We are introducing a new PyTorch/XLA SPMD feature, called auto-sharding, RFC. This is an experimental feature in r2.3 and nightly, that supports XLA:TPU and a single TPUVM host.

+

PyTorch/XLA auto-sharding can be enabled by one of the following:

+
    +
  • Setting envvar XLA_AUTO_SPMD=1

  • +
  • Calling the SPMD API in the beginning of your code:

  • +
+
import torch_xla.runtime as xr
+xr.use_spmd(auto=True)
+
+
+
    +
  • Calling pytorch.distributed._tensor.distribute_module with auto-policy and xla:

  • +
+
import torch_xla.runtime as xr
+from torch.distributed._tensor import DeviceMesh, distribute_module
+from torch_xla.distributed.spmd import auto_policy
+
+device_count = xr.global_runtime_device_count()
+device_mesh = DeviceMesh("xla", list(range(device_count)))
+
+# Currently, model should be loaded to xla device via distribute_module.
+model = MyModule()  # nn.module
+sharded_model = distribute_module(model, device_mesh, auto_policy)
+
+
+

Optionally, one can set the following options/env-vars to control the behvaior of +the XLA-based auto-sharding pass:

+
    +
  • XLA_AUTO_USE_GROUP_SHARDING: group resharding of the parameters. Set by default.

  • +
  • XLA_AUTO_SPMD_MESH: logical mesh shape to be used for auto-sharding. For example, +XLA_AUTO_SPMD_MESH=2,2 corresponds to a 2-by-2 mesh with 4 global devices. If unset, +a default device mesh shape of num_devices,1 will be used.

  • +
+
+
+
+
+

Distributed Checkpointing

+

PyTorch/XLA SPMD is compatible with the torch.distributed.checkpoint library through a dedicated Planner instance. Users are able to synchronously save and load checkpoints through this common interface.

+

The SPMDSavePlanner and SPMDLoadPlanner (src) classes enable the save and load functions to operate directly on the shards of an XLAShardedTensor, enabling all of the benefits of distributed checkpointing in SPMD training.

+

Here is a demonstration of the synchronous distributed checkpointing API:

+
import torch.distributed.checkpoint as dist_cp
+import torch_xla.experimental.distributed_checkpoint as xc
+
+# Saving a state_dict
+state_dict = {
+    "model": model.state_dict(),
+    "optim": optim.state_dict(),
+}
+
+dist_cp.save(
+    state_dict=state_dict,
+    storage_writer=dist_cp.FileSystemWriter(CHECKPOINT_DIR),
+    planner=xc.SPMDSavePlanner(),
+)
+...
+
+# Loading the model's state_dict from the checkpoint. The model should
+# already be on the XLA device and have the desired sharding applied.
+state_dict = {
+    "model": model.state_dict(),
+}
+
+dist_cp.load(
+    state_dict=state_dict,
+    storage_reader=dist_cp.FileSystemReader(CHECKPOINT_DIR),
+    planner=xc.SPMDLoadPlanner(),
+)
+model.load_state_dict(state_dict["model"])
+
+
+

The experimental CheckpointManager +interface provides a higher-level API over the torch.distributed.checkpoint +functions to enable a few key features:

+
    +
  • Managed checkpoints: Each checkpoint taken by the CheckpointManager is +identified by the step at which it was taken. All steps tracked are accessible +through the CheckpointManager.all_steps method, and any tracked steps can be +restored using CheckpointManager.restore.

  • +
  • Asynchronous checkpointing: Checkpoints taken through the +CheckpointManager.save_async API are written to persistent storage +asynchronously to unblock training for the duration of the checkpoint. The +input sharded state_dict is first moved to CPU before the checkpoint is +dispatched to a background thread.

  • +
  • Auto-checkpointing on preemption: On Cloud TPU, preemptions can be detected +and a checkpoint taken before the process is terminated. To use, ensure your +TPU is provisioned through a QueuedResource with +Autocheckpointing enabled, +and ensure the chkpt_on_preemption parameter is set when constructing the +CheckpointManager (this option is enabled by default).

  • +
  • FSSpec Support: CheckpointManager uses an fsspec storage backend to enable +checkpointing directly to any fsspec-compatible filesystem, including GCS.

  • +
+

Example usage of the CheckpointManager is below:

+
from torch_xla.experimental.distributed_checkpoint import CheckpointManager, prime_optimizer
+
+# Create a CheckpointManager to checkpoint every 10 steps into GCS.
+chkpt_mgr = CheckpointManager('gs://my-bucket/my-experiment', 10)
+
+# Select a checkpoint to restore from, and restore if applicable
+tracked_steps = chkpt_mgr.all_steps()
+if tracked_steps:
+    # Choose the highest step
+    best_step = max(tracked_steps)
+    # Before restoring the checkpoint, the optimizer state must be primed
+    # to allow state to be loaded into it.
+    prime_optimizer(optim)
+    state_dict = {'model': model.state_dict(), 'optim': optim.state_dict()}
+    chkpt_mgr.restore(best_step, state_dict)
+    model.load_state_dict(state_dict['model'])
+    optim.load_state_dict(state_dict['optim'])
+
+# Call `save` or `save_async` every step within the train loop. These methods
+# return True when a checkpoint is taken.
+for step, data in enumerate(dataloader):
+    ...
+    state_dict = {'model': model.state_dict(), 'optim': optim.state_dict()}
+    if chkpt_mgr.save_async(step, state_dict):
+        print(f'Checkpoint taken at step {step}')
+
+
+

In distributed checkpointing, the state_dicts are loaded in-place, and only the +required shards of the checkpoint are loaded. Since optimizer states are lazily +created, the state isn’t present until the first optimizer.step call, and +attempts to load an unprimed optimizer will fail.

+

The utility method prime_optimizer is provided for this: it runs a fake train +step by setting all gradients to zero and calling optimizer.step. This is a +destructive method and will touch both model parameters and optimizer state, +so it should only be called just prior to restoration.

+

To use torch.distributed APIs such as distributed checkpointing, a process +group is required. In SPMD mode, the xla backend is not supported since the +compiler is responsible for all collectives.

+

Instead, a CPU process group such as gloo must be used. On TPUs, the xla:// +init_method is still supported to discover the master IP, global world size, +and host rank. An example initialization is below:

+
import torch.distributed as dist
+# Import to register the `xla://` init_method
+import torch_xla.distributed.xla_backend
+import torch_xla.runtime as xr
+
+xr.use_spmd()
+
+# The `xla://` init_method will automatically discover master worker IP, rank,
+# and global world size without requiring environment configuration on TPUs.
+dist.init_process_group('gloo', init_method='xla://')
+
+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/release/2.5/torch_compile.html b/release/2.5/torch_compile.html new file mode 100644 index 00000000000..dabf6ed73c9 --- /dev/null +++ b/release/2.5/torch_compile.html @@ -0,0 +1,832 @@ + + + + + + + + + + + + TorchDynamo(torch.compile) integration in PyTorch XLA — PyTorch/XLA master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + +
+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • TorchDynamo(torch.compile) integration in PyTorch XLA
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + + + + +
+ +
+
+ +
+

TorchDynamo(torch.compile) integration in PyTorch XLA

+

TorchDynamo is a Python-level JIT compiler designed to make unmodified PyTorch programs faster. It provides a clean API for compiler backends to hook in and its biggest feature is to dynamically modify Python bytecode right before it is executed. In the pytorch/xla 2.0 release, PyTorch/XLA provided an experimental backend for the TorchDynamo for both inference and training.

+

The way that XLA bridge works is that Dynamo will provide a TorchFX graph when it recognizes a model pattern and PyTorch/XLA will use existing Lazy Tensor technology to compile the FX graph and return the compiled function.

+
+

Integration

+

Support for PyTorch/XLA and Dynamo currently exists by adding the backend='openxla' argument to torch.compile. For example:

+
import torch
+import torch_xla.core.xla_model as xm
+
+def add(a, b):
+  a_xla = a.to(xm.xla_device())
+  b_xla = b.to(xm.xla_device())
+  return a_xla + b_xla
+
+compiled_code = torch.compile(add, backend='openxla')
+print(compiled_code(torch.randn(10), torch.randn(10)))
+
+
+
+
+

Inference

+

Here is a small code example of running resnet18 with torch.compile

+
import torch
+import torchvision
+import torch_xla.core.xla_model as xm
+
+def eval_model(loader):
+  device = xm.xla_device()
+  xla_resnet18 = torchvision.models.resnet18().to(device)
+  xla_resnet18.eval()
+  dynamo_resnet18 = torch.compile(
+    xla_resnet18, backend='openxla')
+  for data, _ in loader:
+    with torch.no_grad():
+      output = dynamo_resnet18(data)
+
+
+

With the torch.compile you will see that PyTorch/XLA only traces the resent18 model once during the init time and executes the compiled binary every time dynamo_resnet18 is invoked, instead of tracing the model every time. Here is a inference speed analysis to compare Dynamo and Lazy using torch bench on Cloud TPU v4-8

+

resnet18 | 2.59 +resnet50 | 2.64 +resnext50_32x4d | 1.91 +alexnet | 1.28 +mobilenet_v2 | 18.62 +mnasnet1_0 | 2.68 +vgg16 | 1.33 +BERT_pytorch | 7.49 +squeezenet1_1 | 2.29 +timm_vision_transformer | 3.52 +geomean | 3.04

+
+
+

Training

+

PyTorch/XLA also supports Dynamo for training, but it is experimental and we are working with the PyTorch Compiler team to iterate on the implementation. Here is an example of training a resnet18 with torch.compile

+
import torch
+import torchvision
+import torch_xla.core.xla_model as xm
+
+def train_model(model, data, target, optimizer):
+  loss_fn = torch.nn.CrossEntropyLoss()
+  pred = model(data)
+  loss = loss_fn(pred, target)
+  loss.backward()
+  optimizer.step()
+  return pred
+
+def train_model_main(loader):
+  device = xm.xla_device()
+  xla_resnet18 = torchvision.models.resnet18().to(device)
+  xla_resnet18.train()
+  dynamo_train_model = torch.compile(
+        train_model, backend='openxla')
+  for data, target in loader:
+    xla_optimizer = optim.SGD(data, lr=0.1, weight_decay=1e-2)
+    output = dynamo_train_model(xla_resnet18, data, target, xla_optimizer)
+
+
+

We expect to extract and execute 3 graphs per training step instead of 1 graph per training step if you use the Lazy tensor. Here is a training speed analysis to compare Dynamo and Lazy using a torch bench on Cloud TPU v4-8.

+

resnet50 | 1.33 +resnet18 | 1.33 +BERT_pytorch | 3.07 +resnext50_32x4d | 1.43 +alexnet | 1.12 +mobilenet_v2 | 1.4 +mnasnet1_0 | 1.19 +vgg16 | 0.81 +timm_vision_transformer | 1.87 +squeezenet1_1 | 1.41 +geomean | 1.41

+
+

NOTE: We run each model’s fwd and bwd for a single step and then collect the e2e time. In the real world we will run multiple steps at each training job which can easily hide the tracing cost from execution(since it is async). Lazy Tensor will have much better performance in that scenario.

+
+
+
+

Feature gaps

+

There is one gap we want to call out that are preventing us from using the TorchDynamo on larger scale models.

+
    +
  1. TorchDynamo will trace forward and backward into separate graphs. For PyTorch/XLA it is important to let the XLA compiler see the whole step as one graph to best optimize the speed. There is also a fixed overhead to launch every device execution which make executing multiple graphs per training step less ideal.

  2. +
+

This gap compared to Lazy Tensor makes it less efficient in real world training use cases, especially the tracing cost can be overlapped with the execution in training.

+
+
+

Take away

+

TorchDynamo provides a really promising way for the compiler backend to hide the complexity from the user and easily retrieve the modeling code in a graph format. Compared with PyTorch/XLA’s traditional Lazy Tensor way of extracting the graph, TorchDynamo can skip the graph tracing for every iteration, hence providing a much better inference response time.

+

Most models supported by PyTorch/XLA, have seen significant speedup when running inference with the new dynamo-xla bridge. Our community is working hard to expand the set of supported models. Regarding the training feature gaps mentioned above, the PyTorch/XLA community is super excited to improve the training gap in our upcoming development work. The team continues to heavily invest in TorchDynamo and work with the upstream to mature the training story.

+
+
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file