danieldk HF Staff commited on
Commit
8267537
·
verified ·
1 Parent(s): 040cb6f

Build uploaded using `kernels`.

Browse files
Files changed (30) hide show
  1. .gitattributes +7 -0
  2. build/torch210-cxx11-cu126-aarch64-linux/{_cv_utils_cuda_8cfc281.abi3.so → _cv_utils_cuda_d91fc8d.abi3.so} +1 -1
  3. build/torch210-cxx11-cu126-aarch64-linux/_ops.py +3 -3
  4. build/torch210-cxx11-cu126-aarch64-linux/cv_utils/__init__.py +2 -2
  5. build/torch210-cxx11-cu128-aarch64-linux/{_cv_utils_cuda_8cfc281.abi3.so → _cv_utils_cuda_d91fc8d.abi3.so} +1 -1
  6. build/torch210-cxx11-cu128-aarch64-linux/_ops.py +3 -3
  7. build/torch210-cxx11-cu128-aarch64-linux/cv_utils/__init__.py +2 -2
  8. build/torch210-cxx11-cu130-aarch64-linux/{_cv_utils_cuda_8cfc281.abi3.so → _cv_utils_cuda_d91fc8d.abi3.so} +1 -1
  9. build/torch210-cxx11-cu130-aarch64-linux/_ops.py +3 -3
  10. build/torch210-cxx11-cu130-aarch64-linux/cv_utils/__init__.py +2 -2
  11. build/torch211-cxx11-cu126-aarch64-linux/__init__.py +12 -0
  12. build/torch211-cxx11-cu126-aarch64-linux/_cv_utils_cuda_d91fc8d.abi3.so +3 -0
  13. build/torch211-cxx11-cu126-aarch64-linux/_ops.py +9 -0
  14. build/torch211-cxx11-cu126-aarch64-linux/cv_utils/__init__.py +26 -0
  15. build/torch211-cxx11-cu126-aarch64-linux/metadata.json +17 -0
  16. build/torch211-cxx11-cu128-aarch64-linux/__init__.py +12 -0
  17. build/torch211-cxx11-cu128-aarch64-linux/_cv_utils_cuda_d91fc8d.abi3.so +3 -0
  18. build/torch211-cxx11-cu128-aarch64-linux/_ops.py +9 -0
  19. build/torch211-cxx11-cu128-aarch64-linux/cv_utils/__init__.py +26 -0
  20. build/torch211-cxx11-cu128-aarch64-linux/metadata.json +20 -0
  21. build/torch211-cxx11-cu130-aarch64-linux/__init__.py +12 -0
  22. build/torch211-cxx11-cu130-aarch64-linux/_cv_utils_cuda_d91fc8d.abi3.so +3 -0
  23. build/torch211-cxx11-cu130-aarch64-linux/_ops.py +9 -0
  24. build/torch211-cxx11-cu130-aarch64-linux/cv_utils/__init__.py +26 -0
  25. build/torch211-cxx11-cu130-aarch64-linux/metadata.json +18 -0
  26. build/torch29-cxx11-cu129-aarch64-linux/__init__.py +12 -0
  27. build/torch29-cxx11-cu129-aarch64-linux/_cv_utils_cuda_d91fc8d.abi3.so +3 -0
  28. build/torch29-cxx11-cu129-aarch64-linux/_ops.py +9 -0
  29. build/torch29-cxx11-cu129-aarch64-linux/cv_utils/__init__.py +26 -0
  30. build/torch29-cxx11-cu129-aarch64-linux/metadata.json +20 -0
.gitattributes CHANGED
@@ -55,3 +55,10 @@ build/torch29-cxx11-cu126-x86_64-linux/_cv_utils_cuda_8cfc281.abi3.so filter=lfs
55
  build/torch29-cxx11-cu128-x86_64-linux/_cv_utils_cuda_8cfc281.abi3.so filter=lfs diff=lfs merge=lfs -text
56
  build/torch29-cxx11-cu130-x86_64-linux/_cv_utils_cuda_8cfc281.abi3.so filter=lfs diff=lfs merge=lfs -text
57
  build/torch210-cu128-x86_64-windows/_cv_utils_cuda_0561e14.pyd filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
55
  build/torch29-cxx11-cu128-x86_64-linux/_cv_utils_cuda_8cfc281.abi3.so filter=lfs diff=lfs merge=lfs -text
56
  build/torch29-cxx11-cu130-x86_64-linux/_cv_utils_cuda_8cfc281.abi3.so filter=lfs diff=lfs merge=lfs -text
57
  build/torch210-cu128-x86_64-windows/_cv_utils_cuda_0561e14.pyd filter=lfs diff=lfs merge=lfs -text
58
+ build/torch210-cxx11-cu126-aarch64-linux/_cv_utils_cuda_d91fc8d.abi3.so filter=lfs diff=lfs merge=lfs -text
59
+ build/torch210-cxx11-cu128-aarch64-linux/_cv_utils_cuda_d91fc8d.abi3.so filter=lfs diff=lfs merge=lfs -text
60
+ build/torch210-cxx11-cu130-aarch64-linux/_cv_utils_cuda_d91fc8d.abi3.so filter=lfs diff=lfs merge=lfs -text
61
+ build/torch211-cxx11-cu126-aarch64-linux/_cv_utils_cuda_d91fc8d.abi3.so filter=lfs diff=lfs merge=lfs -text
62
+ build/torch211-cxx11-cu128-aarch64-linux/_cv_utils_cuda_d91fc8d.abi3.so filter=lfs diff=lfs merge=lfs -text
63
+ build/torch211-cxx11-cu130-aarch64-linux/_cv_utils_cuda_d91fc8d.abi3.so filter=lfs diff=lfs merge=lfs -text
64
+ build/torch29-cxx11-cu129-aarch64-linux/_cv_utils_cuda_d91fc8d.abi3.so filter=lfs diff=lfs merge=lfs -text
build/torch210-cxx11-cu126-aarch64-linux/{_cv_utils_cuda_8cfc281.abi3.so → _cv_utils_cuda_d91fc8d.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd70e0ed7915339cb40d62b2db8381331cf07586acdbeed4d7cd1bd06c54b6d5
3
  size 3168896
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85951892e9264e890bc78648406d030328cf1082440fe9ee78cdd0c519573cba
3
  size 3168896
build/torch210-cxx11-cu126-aarch64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _cv_utils_cuda_8cfc281
3
- ops = torch.ops._cv_utils_cuda_8cfc281
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_cv_utils_cuda_8cfc281::{op_name}"
 
1
  import torch
2
+ from . import _cv_utils_cuda_d91fc8d
3
+ ops = torch.ops._cv_utils_cuda_d91fc8d
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_cv_utils_cuda_d91fc8d::{op_name}"
build/torch210-cxx11-cu126-aarch64-linux/cv_utils/__init__.py CHANGED
@@ -1,10 +1,10 @@
1
  import ctypes
 
2
  import sys
3
-
4
- import importlib
5
  from pathlib import Path
6
  from types import ModuleType
7
 
 
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
 
1
  import ctypes
2
+ import importlib.util
3
  import sys
 
 
4
  from pathlib import Path
5
  from types import ModuleType
6
 
7
+
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
build/torch210-cxx11-cu128-aarch64-linux/{_cv_utils_cuda_8cfc281.abi3.so → _cv_utils_cuda_d91fc8d.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2203c765753e697bad106474dc3536fd991024509abd6f4cbb2db1c7e9684857
3
  size 3824424
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c80b066ed750b4a5b7f1f11a0d77427f0f0e99118ccbd9b8317c67f5a86adad
3
  size 3824424
build/torch210-cxx11-cu128-aarch64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _cv_utils_cuda_8cfc281
3
- ops = torch.ops._cv_utils_cuda_8cfc281
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_cv_utils_cuda_8cfc281::{op_name}"
 
1
  import torch
2
+ from . import _cv_utils_cuda_d91fc8d
3
+ ops = torch.ops._cv_utils_cuda_d91fc8d
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_cv_utils_cuda_d91fc8d::{op_name}"
build/torch210-cxx11-cu128-aarch64-linux/cv_utils/__init__.py CHANGED
@@ -1,10 +1,10 @@
1
  import ctypes
 
2
  import sys
3
-
4
- import importlib
5
  from pathlib import Path
6
  from types import ModuleType
7
 
 
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
 
1
  import ctypes
2
+ import importlib.util
3
  import sys
 
 
4
  from pathlib import Path
5
  from types import ModuleType
6
 
7
+
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
build/torch210-cxx11-cu130-aarch64-linux/{_cv_utils_cuda_8cfc281.abi3.so → _cv_utils_cuda_d91fc8d.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f62f22326656ca04093c8230d4a418d57f3ecbcfb6b939e9d13226d11dbe587e
3
  size 3633848
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7483b27cbdd39dbe19814440aa97a22fce61635c2cb22ce510a5430d75a99d69
3
  size 3633848
build/torch210-cxx11-cu130-aarch64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _cv_utils_cuda_8cfc281
3
- ops = torch.ops._cv_utils_cuda_8cfc281
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_cv_utils_cuda_8cfc281::{op_name}"
 
1
  import torch
2
+ from . import _cv_utils_cuda_d91fc8d
3
+ ops = torch.ops._cv_utils_cuda_d91fc8d
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_cv_utils_cuda_d91fc8d::{op_name}"
build/torch210-cxx11-cu130-aarch64-linux/cv_utils/__init__.py CHANGED
@@ -1,10 +1,10 @@
1
  import ctypes
 
2
  import sys
3
-
4
- import importlib
5
  from pathlib import Path
6
  from types import ModuleType
7
 
 
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
 
1
  import ctypes
2
+ import importlib.util
3
  import sys
 
 
4
  from pathlib import Path
5
  from types import ModuleType
6
 
7
+
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
build/torch211-cxx11-cu126-aarch64-linux/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import List
3
+
4
+ from ._ops import ops
5
+
6
+ def cc_2d(inputs: torch.Tensor, get_counts: bool) -> List[torch.Tensor]:
7
+ return ops.cc_2d(inputs, get_counts)
8
+
9
+ def generic_nms(dets: torch.Tensor, scores: torch.Tensor, iou_threshold: float, use_iou_matrix: bool) -> torch.Tensor:
10
+ return ops.generic_nms(dets, scores, iou_threshold, use_iou_matrix)
11
+
12
+ __all__ = ["cc_2d", "generic_nms"]
build/torch211-cxx11-cu126-aarch64-linux/_cv_utils_cuda_d91fc8d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dbbfc5bf524a337e8b6900179e1df4a951ce95e2453691a4fb2c9a6ca2df3b9
3
+ size 3165104
build/torch211-cxx11-cu126-aarch64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _cv_utils_cuda_d91fc8d
3
+ ops = torch.ops._cv_utils_cuda_d91fc8d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_cv_utils_cuda_d91fc8d::{op_name}"
build/torch211-cxx11-cu126-aarch64-linux/cv_utils/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch211-cxx11-cu126-aarch64-linux/metadata.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "python-depends": [],
4
+ "backend": {
5
+ "type": "cuda",
6
+ "archs": [
7
+ "7.0",
8
+ "7.2",
9
+ "7.5",
10
+ "8.0",
11
+ "8.6",
12
+ "8.7",
13
+ "8.9",
14
+ "9.0+PTX"
15
+ ]
16
+ }
17
+ }
build/torch211-cxx11-cu128-aarch64-linux/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import List
3
+
4
+ from ._ops import ops
5
+
6
+ def cc_2d(inputs: torch.Tensor, get_counts: bool) -> List[torch.Tensor]:
7
+ return ops.cc_2d(inputs, get_counts)
8
+
9
+ def generic_nms(dets: torch.Tensor, scores: torch.Tensor, iou_threshold: float, use_iou_matrix: bool) -> torch.Tensor:
10
+ return ops.generic_nms(dets, scores, iou_threshold, use_iou_matrix)
11
+
12
+ __all__ = ["cc_2d", "generic_nms"]
build/torch211-cxx11-cu128-aarch64-linux/_cv_utils_cuda_d91fc8d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79587ea9ec0c69d9dbcc51298483eb3794d06adb73d9e463ad7ac1ea52232e7b
3
+ size 3820640
build/torch211-cxx11-cu128-aarch64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _cv_utils_cuda_d91fc8d
3
+ ops = torch.ops._cv_utils_cuda_d91fc8d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_cv_utils_cuda_d91fc8d::{op_name}"
build/torch211-cxx11-cu128-aarch64-linux/cv_utils/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch211-cxx11-cu128-aarch64-linux/metadata.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "python-depends": [],
4
+ "backend": {
5
+ "type": "cuda",
6
+ "archs": [
7
+ "10.0",
8
+ "10.1",
9
+ "12.0+PTX",
10
+ "7.0",
11
+ "7.2",
12
+ "7.5",
13
+ "8.0",
14
+ "8.6",
15
+ "8.7",
16
+ "8.9",
17
+ "9.0"
18
+ ]
19
+ }
20
+ }
build/torch211-cxx11-cu130-aarch64-linux/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import List
3
+
4
+ from ._ops import ops
5
+
6
+ def cc_2d(inputs: torch.Tensor, get_counts: bool) -> List[torch.Tensor]:
7
+ return ops.cc_2d(inputs, get_counts)
8
+
9
+ def generic_nms(dets: torch.Tensor, scores: torch.Tensor, iou_threshold: float, use_iou_matrix: bool) -> torch.Tensor:
10
+ return ops.generic_nms(dets, scores, iou_threshold, use_iou_matrix)
11
+
12
+ __all__ = ["cc_2d", "generic_nms"]
build/torch211-cxx11-cu130-aarch64-linux/_cv_utils_cuda_d91fc8d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a262ed16de99e85ac531e4be6db2cba240b1ba0c2de31c227c7f312455f5fe3e
3
+ size 3630056
build/torch211-cxx11-cu130-aarch64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _cv_utils_cuda_d91fc8d
3
+ ops = torch.ops._cv_utils_cuda_d91fc8d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_cv_utils_cuda_d91fc8d::{op_name}"
build/torch211-cxx11-cu130-aarch64-linux/cv_utils/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch211-cxx11-cu130-aarch64-linux/metadata.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "python-depends": [],
4
+ "backend": {
5
+ "type": "cuda",
6
+ "archs": [
7
+ "10.0",
8
+ "11.0",
9
+ "12.0+PTX",
10
+ "7.5",
11
+ "8.0",
12
+ "8.6",
13
+ "8.7",
14
+ "8.9",
15
+ "9.0"
16
+ ]
17
+ }
18
+ }
build/torch29-cxx11-cu129-aarch64-linux/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import List
3
+
4
+ from ._ops import ops
5
+
6
+ def cc_2d(inputs: torch.Tensor, get_counts: bool) -> List[torch.Tensor]:
7
+ return ops.cc_2d(inputs, get_counts)
8
+
9
+ def generic_nms(dets: torch.Tensor, scores: torch.Tensor, iou_threshold: float, use_iou_matrix: bool) -> torch.Tensor:
10
+ return ops.generic_nms(dets, scores, iou_threshold, use_iou_matrix)
11
+
12
+ __all__ = ["cc_2d", "generic_nms"]
build/torch29-cxx11-cu129-aarch64-linux/_cv_utils_cuda_d91fc8d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e0e09c11e4b5033d3b6b53cdb230726fb96bc9f394a53b2dc466d3b40f522cf
3
+ size 3822096
build/torch29-cxx11-cu129-aarch64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _cv_utils_cuda_d91fc8d
3
+ ops = torch.ops._cv_utils_cuda_d91fc8d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_cv_utils_cuda_d91fc8d::{op_name}"
build/torch29-cxx11-cu129-aarch64-linux/cv_utils/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cu129-aarch64-linux/metadata.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "python-depends": [],
4
+ "backend": {
5
+ "type": "cuda",
6
+ "archs": [
7
+ "10.0",
8
+ "10.1",
9
+ "12.0+PTX",
10
+ "7.0",
11
+ "7.2",
12
+ "7.5",
13
+ "8.0",
14
+ "8.6",
15
+ "8.7",
16
+ "8.9",
17
+ "9.0"
18
+ ]
19
+ }
20
+ }