Update README.md
This commit is contained in:
@@ -56,6 +56,8 @@ import torch
|
||||
|
||||
from depth_anything_v2.dpt import DepthAnythingV2
|
||||
|
||||
DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'
|
||||
|
||||
model_configs = {
|
||||
'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]},
|
||||
'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},
|
||||
@@ -67,7 +69,7 @@ encoder = 'vitl' # or 'vits', 'vitb', 'vitg'
|
||||
|
||||
model = DepthAnythingV2(**model_configs[encoder])
|
||||
model.load_state_dict(torch.load(f'checkpoints/depth_anything_v2_{encoder}.pth', map_location='cpu'))
|
||||
model.eval()
|
||||
model = model.to(DEVICE).eval()
|
||||
|
||||
raw_img = cv2.imread('your/image/path')
|
||||
depth = model.infer_image(raw_img) # HxW raw depth map in numpy
|
||||
@@ -132,6 +134,7 @@ Please refer to [DA-2K benchmark](./DA-2K.md).
|
||||
**We sincerely appreciate all the community support for our Depth Anything series. Thank you a lot!**
|
||||
|
||||
- TensorRT: https://github.com/spacewalk01/depth-anything-tensorrt
|
||||
- ONNX: https://github.com/fabio-sim/Depth-Anything-ONNX
|
||||
- ComfyUI: https://github.com/kijai/ComfyUI-DepthAnythingV2
|
||||
- Transformers.js (real-time depth in web): https://huggingface.co/spaces/Xenova/webgpu-realtime-depth-estimation
|
||||
- Android:
|
||||
|
Reference in New Issue
Block a user