3v324v23 commited on
Commit
f4d7a37
1 Parent(s): eea47e6

feat: intitial commit

Browse files
Files changed (6) hide show
  1. .gitignore +2 -0
  2. app.py +39 -0
  3. best.pt +3 -0
  4. bus.jpg +0 -0
  5. requirements.txt +33 -0
  6. zidane.jpg +0 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .DS_Store
2
+ yolov5s.pt
app.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from PIL import Image
4
+
5
+ import gdown
6
+
7
+ '''
8
+ # a file
9
+ url = "https://drive.google.com/uc?id=1-ZIa4KsSjhup4Pep70uBvI4BjnSUbocX"
10
+ output = "best.pt"
11
+ gdown.download(url, output, quiet=False)
12
+ '''
13
+
14
+ # Images
15
+ torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
16
+ torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/bus.jpg', 'bus.jpg')
17
+
18
+ # Model
19
+ # model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # force_reload=True to update
20
+ model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt', source="local")
21
+
22
+ def yolo(im, size=640):
23
+ g = (size / max(im.size)) # gain
24
+ im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize
25
+
26
+ results = model(im) # inference
27
+ results.render() # updates results.imgs with boxes and labels
28
+ return Image.fromarray(results.imgs[0])
29
+
30
+
31
+ inputs = gr.inputs.Image(type='pil', label="Original Image")
32
+ outputs = gr.outputs.Image(type="pil", label="Output Image")
33
+
34
+ title = "YOLOv5"
35
+ description = "YOLOv5 Gradio demo for object detection. Upload an image or click an example image to use."
36
+ article = "<p style='text-align: center'>YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. <a href='https://github.com/ultralytics/yolov5'>Source code</a> | <a href='https://pytorch.org/hub/ultralytics_yolov5'>PyTorch Hub</a></p>"
37
+
38
+ examples = [['zidane.jpg'], ['bus.jpg']]
39
+ gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article, examples=examples, theme="huggingface").launch(cache_examples=True,enable_queue=True)
best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afb897f715523f8a2a49b542124e667240f77f8c69a41fb789bd161060a63165
3
+ size 699148621
bus.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install -r requirements.txt
2
+
3
+ # base ----------------------------------------
4
+ matplotlib>=3.2.2
5
+ numpy>=1.18.5
6
+ opencv-python-headless
7
+ Pillow
8
+ PyYAML>=5.3.1
9
+ scipy>=1.4.1
10
+ torch>=1.7.0
11
+ torchvision>=0.8.1
12
+ tqdm>=4.41.0
13
+
14
+ # logging -------------------------------------
15
+ tensorboard>=2.4.1
16
+ # wandb
17
+
18
+ # plotting ------------------------------------
19
+ seaborn>=0.11.0
20
+ pandas
21
+
22
+ # export --------------------------------------
23
+ # coremltools>=4.1
24
+ # onnx>=1.9.0
25
+ # scikit-learn==0.19.2 # for coreml quantization
26
+
27
+ # extras --------------------------------------
28
+ # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172
29
+ # pycocotools>=2.0 # COCO mAP
30
+ # albumentations>=1.0.3
31
+ thop # FLOPs computation
32
+
33
+ gdown
zidane.jpg ADDED