diff --git a/Ethosight/Ethosight.egg-info/SOURCES.txt b/Ethosight/Ethosight.egg-info/SOURCES.txt index d62d8cc..21929c5 100644 --- a/Ethosight/Ethosight.egg-info/SOURCES.txt +++ b/Ethosight/Ethosight.egg-info/SOURCES.txt @@ -18,6 +18,7 @@ Ethosight/ReasonerInterface.py Ethosight/__init__.py Ethosight/langchain_reasoner.py Ethosight/llama_index_reasoner.py +Ethosight/shared_models.py Ethosight/utils.py Ethosight.egg-info/PKG-INFO Ethosight.egg-info/SOURCES.txt diff --git a/Ethosight/Ethosight/ChatGPTReasoner.py b/Ethosight/Ethosight/ChatGPTReasoner.py index bed68db..2471c7b 100644 --- a/Ethosight/Ethosight/ChatGPTReasoner.py +++ b/Ethosight/Ethosight/ChatGPTReasoner.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + import os import openai import requests diff --git a/Ethosight/Ethosight/Ethosight.py b/Ethosight/Ethosight/Ethosight.py index 6e0b366..dd4dce1 100644 --- a/Ethosight/Ethosight/Ethosight.py +++ b/Ethosight/Ethosight/Ethosight.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + # Ethosight environment variables # EthosightBackend: "core" or "client" # EthosightBackendURL: URL for EthosightRESTClient backend e.g. http://localhost:8000 diff --git a/Ethosight/Ethosight/EthosightApp.py b/Ethosight/Ethosight/EthosightApp.py index f35b1de..d35d002 100644 --- a/Ethosight/Ethosight/EthosightApp.py +++ b/Ethosight/Ethosight/EthosightApp.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from .EthosightMediaAnalyzer import EthosightMediaAnalyzer, EthosightOutput from .LabelSpaceOptimization import SemanticRelationsOptimization, SemanticSimilarityOptimization from ruamel.yaml import YAML @@ -33,7 +50,7 @@ def __init__(self, app_name, base_dir="ENV"): if not base_dir: raise EthosightAppException("ETHOSIGHT_APP_BASEDIR environment variable is not defined.") if not os.path.isdir(base_dir): - raise EthosightAppException(f"Base directory {base_dir} is not a valid directory.") + raise EthosightAppException(f"Base directory {base_dir} is not a valid directory. Please define a valid directory using ETHOSIGHT_APP_BASEDIR environment variable") # Ensure the app_dir is an absolute path self.app_dir = os.path.join(base_dir, app_name) @@ -50,8 +67,9 @@ def __init__(self, app_name, base_dir="ENV"): self.embeddings_path = self.config.get('embeddings_path') self.labels_path = self.config.get('labels_path') self.setActiveEmbeddingsFromFile(self.embeddings_path, self.labels_path) - self.groundTruthEmbeddings(makeActive=False) - self.reasoner = ChatGPTReasoner() + if self.config['benchmark']['enabled']: + self.groundTruthEmbeddings(makeActive=False) + self.reasoner = self.config['reasoner_type'] def load_config(self, config_file): # Assuming the load_config function reads the YAML config file @@ -1439,6 +1457,10 @@ def add_labels(self, new_labels): for label in current_labels: file.write(f"{label}\n") + if not added_labels: + print("No new labels added.") + return + # compute the new embeddings new_embeddings = self.ethosight.compute_label_embeddings(added_labels) diff --git a/Ethosight/Ethosight/EthosightAppCLI.py b/Ethosight/Ethosight/EthosightAppCLI.py index d4149a3..e02c75f 100755 --- a/Ethosight/Ethosight/EthosightAppCLI.py +++ b/Ethosight/Ethosight/EthosightAppCLI.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + #!/usr/bin/env python3 import click from .EthosightApp import EthosightApp diff --git a/Ethosight/Ethosight/EthosightCLI.py b/Ethosight/Ethosight/EthosightCLI.py index 91b5d04..a78bc36 100755 --- a/Ethosight/Ethosight/EthosightCLI.py +++ b/Ethosight/Ethosight/EthosightCLI.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + #!/usr/bin/env python3 import click import os diff --git a/Ethosight/Ethosight/EthosightCore.py b/Ethosight/Ethosight/EthosightCore.py index 046b6db..e61e546 100644 --- a/Ethosight/Ethosight/EthosightCore.py +++ b/Ethosight/Ethosight/EthosightCore.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + import os import torch from models import imagebind_model @@ -14,7 +31,7 @@ import torch.nn.functional as F class EthosightCore: - def __init__(self, model=None, reasoner=None, gpu=0): + def __init__(self, model=None, reasoner='', gpu=0): #gpu=none means use cpu self.ethosight_dir = get_install_path('Ethosight') if model is None: @@ -258,6 +275,11 @@ def compute_affinity_scores(self, label_to_embeddings, image_path, normalize_fn= top_labels = sorted_labels[:100] top_scores = sorted_scores[:100] + # Clear memory + del vision_embeddings, raw_scores, unique_label_embeddings, inputs + if torch.cuda.is_available(): + torch.cuda.empty_cache() # Clear unused memory + # Return the labels and scores as a dictionary return {'labels': top_labels, 'scores': top_scores} diff --git a/Ethosight/Ethosight/EthosightDatasetCLI.py b/Ethosight/Ethosight/EthosightDatasetCLI.py index e5e994a..04d2e9d 100755 --- a/Ethosight/Ethosight/EthosightDatasetCLI.py +++ b/Ethosight/Ethosight/EthosightDatasetCLI.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + #!/usr/bin/env python3 import os import csv diff --git a/Ethosight/Ethosight/EthosightMediaAnalyzer.py b/Ethosight/Ethosight/EthosightMediaAnalyzer.py index 1152c04..98bf87e 100644 --- a/Ethosight/Ethosight/EthosightMediaAnalyzer.py +++ b/Ethosight/Ethosight/EthosightMediaAnalyzer.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + import os import copy import yaml @@ -65,6 +82,7 @@ class EthosightMediaAnalyzer: 'chatgpt': ChatGPTReasoner, 'langchain': LangchainReasoner, 'llamaindex': LlamaIndexReasoner, + '': '' # 'narsgpt': NARSGPTReasoner, # 'nars': NARSReasoner, } @@ -116,6 +134,8 @@ def load_reasoner(self, config): ReasonerClass = self.REASONER_CLASSES.get(reasoner_type) if ReasonerClass is None: raise ValueError(f"Invalid reasoner type: {reasoner_type}") + if ReasonerClass == '': + return '' return ReasonerClass() # assuming reasoner classes have no-arg constructor diff --git a/Ethosight/Ethosight/EthosightMediaAnalyzerCLI.py b/Ethosight/Ethosight/EthosightMediaAnalyzerCLI.py index 3ff8fb5..6da9ec4 100755 --- a/Ethosight/Ethosight/EthosightMediaAnalyzerCLI.py +++ b/Ethosight/Ethosight/EthosightMediaAnalyzerCLI.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + #!/usr/bin/env python3 import click import yaml diff --git a/Ethosight/Ethosight/EthosightRESTClient.py b/Ethosight/Ethosight/EthosightRESTClient.py index b2790a7..f14d5b9 100644 --- a/Ethosight/Ethosight/EthosightRESTClient.py +++ b/Ethosight/Ethosight/EthosightRESTClient.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + import requests import os from Ethosight.EthosightCore import EthosightCore @@ -62,333 +79,6 @@ def _base64_to_tensor(self, base64_str): return tensor - -# def compute_affinity_scores(self, label_to_embeddings, image_path, normalize_fn='linear', scale=1, verbose=True): -# payload = { -# "label_to_embeddings": label_to_embeddings, -# "normalize_fn": normalize_fn, -# "scale": scale, -# "verbose": verbose -# } -# -# files = {"image_path": (os.path.basename(image_path), open(image_path, 'rb'))} -# response = requests.post(f"{self.url}/compute_affinity_scores", data=payload, files=files) -# response.raise_for_status() -# return response.json() -# def compute_affinity_scores(self, label_to_embeddings, image_path, normalize_fn='linear', scale=1, verbose=True): -# payload = { -# "label_to_embeddings": label_to_embeddings, -# "normalize_fn": normalize_fn, -# "scale": scale, -# "verbose": verbose -# } -# -# files = {"image_path": (os.path.basename(image_path), open(image_path, 'rb'))} -# response = requests.post(f"{self.url}/compute_affinity_scores", data=payload, files=files) -# response.raise_for_status() -# -# # Parse the response to get the results in the desired format -# result = response.json() -# result_dict = { -# 'labels': result['labels'], -# 'scores': result['scores'] -# } -# -# # If verbose, print the results -# if verbose: -# print("\nTop labels for the image:") -# for label, score in zip(result_dict['labels'], result_dict['scores']): -# print(f"{label}: {score}") -# print("\n") -# -# return result_dict -# def compute_affinity_scores(self, label_to_embeddings, image_path, normalize_fn='linear', scale=1, verbose=True): -# # Serialize the label_to_embeddings dictionary -# serializable_embeddings = {label: tensor_to_base64(embedding) for label, embedding in label_to_embeddings.items()} -# serialized_embeddings = json.dumps(serializable_embeddings) -# -# # Construct the payload dictionary -# payload = { -# "data": json.dumps({ -# "label_to_embeddings": serialized_embeddings, -# "normalize_fn": normalize_fn, -# "scale": scale, -# "verbose": verbose -# }) -# } -# -# files = {"image_path": (os.path.basename(image_path), open(image_path, 'rb'))} -# response = requests.post(f"{self.url}/compute_affinity_scores", data=payload, files=files) -# response.raise_for_status() -# -# # Parse the response to get the results in the desired format -# result = response.json() -# result_dict = { -# 'labels': result['labels'], -# 'scores': result['scores'] -# } -# -# # If verbose, print the results -# if verbose: -# print("\nTop labels for the image:") -# for label, score in zip(result_dict['labels'], result_dict['scores']): -# print(f"{label}: {score}") -# print("\n") -# -# return result_dict -# -# -# def compute_affinity_scores(self, label_to_embeddings, image_path, normalize_fn='linear', scale=1, verbose=True): -# # Serialize the label_to_embeddings dictionary -# serializable_embeddings = {label: tensor_to_base64(embedding) for label, embedding in label_to_embeddings.items()} -# serialized_embeddings = json.dumps(serializable_embeddings) -# -# # Construct the payload without additional serialization -# data_content = { -# "label_to_embeddings": serializable_embeddings, -# "normalize_fn": normalize_fn, -# "scale": scale, -# "verbose": verbose -# } -# -# files = {"image_path": (os.path.basename(image_path), open(image_path, 'rb'))} -# -# response = requests.post(f"{self.url}/compute_affinity_scores", data=data_content, files=files) -# -# # Handle the response -# if response.status_code == 422: -# print("Validation error:", response.text) -# response.raise_for_status() -# -# # Parse the response to get the results in the desired format -# result = response.json() -# result_dict = { -# 'labels': result['labels'], -# 'scores': base64_to_tensor(result['scores']) -# } -# -# # If verbose, print the results -# if verbose: -# print("\nTop labels for the image:") -# for label, score in zip(result_dict['labels'], result_dict['scores']): -# print(f"{label}: {score}") -# print("\n") -# -# return result_dict -# def compute_affinity_scores(self, label_to_embeddings, image_path, normalize_fn='linear', scale=1, verbose=True, batch_size=32): -# # Serialize the label_to_embeddings dictionary -# serializable_embeddings = {label: tensor_to_base64(embedding) for label, embedding in label_to_embeddings.items()} -# -# # Construct the data_content dictionary -# data_content = { -# "label_to_embeddings": serializable_embeddings, -# "normalize_fn": normalize_fn, -# "scale": scale, -# "verbose": verbose, -# "batch_size": batch_size -# } -# -# # The data parameter expects a stringified JSON, so we'll serialize data_content -# wrapped_data_content = { -# "data": json.dumps(data_content) -# } -# -# # Attach the image as a file -# files = {"image_path": (os.path.basename(image_path), open(image_path, 'rb'))} -# -# # Send the POST request -# response = requests.post(f"{self.url}/compute_affinity_scores", data=wrapped_data_content, files=files) -# -# # Check for 422 response and print the server's error message -# if response.status_code == 422: -# print("Server validation error:", response.text) -# return -# -# response.raise_for_status() -# -# # Parse the response to get the results in the desired format -# result = response.json() -# result_dict = { -# 'labels': result['labels'], -# 'scores': result['scores'] -# } -# -# # If verbose, print the results -# if verbose: -# print("\nTop labels for the image:") -# for label, score in zip(result_dict['labels'], result_dict['scores']): -# print(f"{label}: {score}") -# print("\n") -# -# return result_dict -# - def compute_affinity_scores_orig(self, label_to_embeddings, image_path, normalize_fn='linear', scale=1, verbose=True, batch_size=32): - # Serialize the label_to_embeddings dictionary - serializable_embeddings = {label: tensor_to_base64(embedding) for label, embedding in label_to_embeddings.items()} - - # Construct the data_content dictionary - data_content = { - "label_to_embeddings": serializable_embeddings, - "normalize_fn": normalize_fn, - "scale": scale, - "verbose": verbose, - "batch_size": batch_size - } - - # Attach the image as a file - files = {"image_path": (os.path.basename(image_path), open(image_path, 'rb'))} - - # Send the POST request with data_content directly as the data parameter - response = requests.post(f"{self.url}/compute_affinity_scores", data=data_content, files=files) - - # Check for 422 response and print the server's error message - if response.status_code == 422: - print("Server validation error:", response.text) - return None - - response.raise_for_status() - - # Parse the response to get the results in the desired format - result = response.json() - result_dict = { - 'labels': result['labels'], - 'scores': result['scores'] - } - - # If verbose, print the results - if verbose and result_dict: - print("\nTop labels for the image:") - for label, score in zip(result_dict['labels'], result_dict['scores']): - print(f"{label}: {score}") - print("\n") - - return result_dict - -# still with the 422 - def compute_affinity_scores_mock1(self, label_to_embeddings, image_path, normalize_fn='linear', scale=1, verbose=True, batch_size=32): - # Mock serialization of the label_to_embeddings dictionary - serializable_embeddings = {label: str(embedding) for label, embedding in label_to_embeddings.items()} - - # Construct the data_content dictionary - data_content = { - "label_to_embeddings": serializable_embeddings, - "normalize_fn": normalize_fn, - "scale": scale, - "verbose": verbose, - "batch_size": batch_size - } - - # Attach the image as a file - files = {"image_path": (os.path.basename(image_path), open(image_path, 'rb'))} - - # Send the POST request with data_content directly as the data parameter - response = requests.post(f"{self.url}/compute_affinity_scores", data=data_content, files=files) - - # Check for 422 response and print the server's error message - if response.status_code == 422: - print("Server validation error:", response.text) - return None - - response.raise_for_status() - - # Parse the response to get the results in the desired format - result = response.json() - result_dict = { - 'labels': result['labels'], - 'scores': result['scores'] - } - - # If verbose, print the results - if verbose and result_dict: - print("\nTop labels for the image:") - for label, score in zip(result_dict['labels'], result_dict['scores']): - print(f"{label}: {score}") - print("\n") - - return result_dict - -# this works - def compute_affinity_scores_mock2(self, label_to_embeddings, image_path, normalize_fn='linear', scale=1, verbose=True, batch_size=32): - serializable_embeddings = {label: tensor_to_base64(embedding) for label, embedding in label_to_embeddings.items()} - - # Mock data_content for testing - data_content = { - #"label_to_embeddings": {"mock_label_1": "mock_embedding_1", "mock_label_2": "mock_embedding_2"}, - "label_to_embeddings": serializable_embeddings, - "normalize_fn": 'linear', - "scale": 1, - "verbose": True, - "batch_size": 32 - } - - # Send the POST request with mock data_content - response = requests.post(f"{self.url}/compute_affinity_scores", json=data_content) - - # Check for 422 response and print the server's error message - if response.status_code == 422: - print("Server validation error:", response.text) - return None - - response.raise_for_status() - - # Parse the response to get the results in the desired format - result = response.json() - result_dict = { - 'labels': result['labels'], - 'scores': result['scores'] - } - - # If verbose, print the mock results - if verbose: - print("\nTop labels for the image:") - for label, score in zip(result_dict['labels'], result_dict['scores']): - print(f"{label}: {score}") - print("\n") - - return result_dict - - def compute_affinity_scores_mock3(self, label_to_embeddings, image_path, normalize_fn='linear', scale=1, verbose=True, batch_size=32): - # Serialize the label_to_embeddings dictionary - serializable_embeddings = {label: tensor_to_base64(embedding) for label, embedding in label_to_embeddings.items()} - - # Construct the data_content dictionary - data_content = { - "label_to_embeddings": serializable_embeddings, - "normalize_fn": normalize_fn, - "scale": scale, - "verbose": verbose, - "batch_size": batch_size - } - - # Attach the image as a file - files = {"image_path": (os.path.basename(image_path), open(image_path, 'rb'))} - - # Send the POST request with data_content directly as the data parameter - response = requests.post(f"{self.url}/compute_affinity_scores", json=data_content, files=files) - - # Check for 422 response and print the server's error message - if response.status_code == 422: - print("Server validation error:", response.text) - return None - - response.raise_for_status() - - # Parse the response to get the results in the desired format - result = response.json() - result_dict = { - 'labels': result['labels'], - 'scores': result['scores'] - } - - # If verbose, print the results - if verbose and result_dict: - print("\nTop labels for the image:") - for label, score in zip(result_dict['labels'], result_dict['scores']): - print(f"{label}: {score}") - print("\n") - - return result_dict - def compute_affinity_scores(self, label_to_embeddings, image_path, normalize_fn='linear', scale=1, verbose=True): # Serialize the label_to_embeddings dictionary as before serializable_embeddings = {label: tensor_to_base64(embedding) for label, embedding in label_to_embeddings.items()} @@ -433,49 +123,6 @@ def compute_affinity_scores(self, label_to_embeddings, image_path, normalize_fn= return result_dict -# produces 422 - def compute_affinity_scores_batched_orig(self, label_to_embeddings, image_paths, normalize_fn='linear', scale=1, verbose=True, batch_size=32): - payload = { - "label_to_embeddings": label_to_embeddings, - "normalize_fn": normalize_fn, - "scale": scale, - "verbose": verbose, - "batch_size": batch_size - } - - files = [("image_paths", (os.path.basename(path), open(path, 'rb'))) for path in image_paths] - response = requests.post(f"{self.url}/compute_affinity_scores_batched", data=payload, files=files) - response.raise_for_status() - return response.json() - - def compute_affinity_scores_batched_bug2(self, label_to_embeddings, image_paths, normalize_fn='linear', scale=1, verbose=True, batch_size=32): - # Serialize the label_to_embeddings dictionary as before - serializable_embeddings = {label: tensor_to_base64(embedding) for label, embedding in label_to_embeddings.items()} - # Serialize the data into a JSON string - payload = json.dumps({ - "label_to_embeddings": serializable_embeddings, - "normalize_fn": normalize_fn, - "scale": scale, - "verbose": verbose, - "batch_size": batch_size - }) - - # Create a list of files to send (including the serialized data and the images) - files = [ - ("data", ("data.json", payload, "application/json")), - *[(f"image_paths", (os.path.basename(path), open(path, 'rb'))) for path in image_paths] - ] - - response = requests.post(f"{self.url}/compute_affinity_scores_batched", files=files) - response.raise_for_status() - - # Don't forget to close the files after the request - for _, (_, file, _) in files[1:]: - file.close() - - return response.json() - - def compute_affinity_scores_batched(self, label_to_embeddings, image_paths, normalize_fn='linear', scale=1, verbose=True, batch_size=32): # Serialize the label_to_embeddings dictionary as before serializable_embeddings = {label: tensor_to_base64(embedding) for label, embedding in label_to_embeddings.items()} diff --git a/Ethosight/Ethosight/EthosightRESTServer.py b/Ethosight/Ethosight/EthosightRESTServer.py index 24601d6..58e49ce 100644 --- a/Ethosight/Ethosight/EthosightRESTServer.py +++ b/Ethosight/Ethosight/EthosightRESTServer.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + #!/usr/bin/env python3 from fastapi import FastAPI, HTTPException, UploadFile, File, Depends, status, Form from fastapi.encoders import jsonable_encoder @@ -44,8 +61,8 @@ class ComputeAffinityScoresBatchedInput(BaseModel): batch_size: int = 32 class EthosightRESTServer: - def __init__(self, mode="blocking", host="0.0.0.0", port=8000, consul_url="localhost", consul_port=8500, gpu=0): - self.core = EthosightCore(gpu=gpu) + def __init__(self, mode="blocking", host="0.0.0.0", port=8000, consul_url="localhost", consul_port=8500, gpu=0, reasoner=""): + self.core = EthosightCore(reasoner=reasoner, gpu=gpu) self.app = FastAPI() self.lock = threading.Lock() self.mode = mode @@ -138,216 +155,6 @@ def test_tensor_to_base64(tensor): logging.debug("Computed embeddings") return {"embeddings": serialized_result} - -# def _initialize_compute_affinity_scores_route(self): -# @self.app.post("/compute_affinity_scores") -# @self.process_request -# def compute_affinity_scores_endpoint(data: ComputeAffinityScoresInput, image_path: UploadFile = File(...)): -# logging.debug(f"Received request with data: {data} and image: {image_path.filename}") -# image_temp_path = f"temp_{uuid.uuid4()}_{image_path.filename}" -# with open(image_temp_path, "wb") as buffer: -# buffer.write(image_path.file.read()) -# -# result = self.core.compute_affinity_scores(data.label_to_embeddings, image_temp_path, data.normalize_fn, data.scale, data.verbose) -# os.remove(image_temp_path) -# #logging.debug(f"Computed affinity scores: {result}") -# logging.debug("Computed affinity scores") -# return result -# def _initialize_compute_affinity_scores_route(self): -# @self.app.post("/compute_affinity_scores") -# @self.process_request -# def compute_affinity_scores_endpoint(data: ComputeAffinityScoresInput, image_path: UploadFile = File(...)): -# logging.debug(f"Incoming request body: {request.body()}") -# -# logging.debug(f"Received request with data: {data} and image: {image_path.filename}") -# -# # Temporary storage for the uploaded image -# image_temp_path = f"temp_{uuid.uuid4()}_{image_path.filename}" -# -# # Save the uploaded image to temporary storage -# with open(image_temp_path, "wb") as buffer: -# buffer.write(image_path.file.read()) -# -# # Compute the affinity scores -# try: -# affinity_scores_result = self.core.compute_affinity_scores(data.label_to_embeddings, image_temp_path, data.normalize_fn, data.scale, data.verbose) -# except Exception as e: -# os.remove(image_temp_path) -# logging.error(f"Error computing affinity scores: {e}") -# raise e # You might want to raise a custom error or message to the client -# -# os.remove(image_temp_path) -# -# # Serialize the scores for transmission -# affinity_scores_result['scores'] = tensor_to_base64(torch.tensor(affinity_scores_result['scores'])) -# -# logging.debug("Computed affinity scores") -# return affinity_scores_result - - def _initialize_compute_affinity_scores_route_orig(self): - @self.app.post("/compute_affinity_scores") - @self.process_request - def compute_affinity_scores_endpoint(data: ComputeAffinityScoresInput, image_path: UploadFile = File(...)): - logging.debug(f"Incoming data: {data}") - logging.debug(f"Incoming image_path: {image_path.filename}") - - # Temporary storage for the uploaded image - image_temp_path = f"temp_{uuid.uuid4()}_{image_path.filename}" - - # Save the uploaded image to temporary storage - with open(image_temp_path, "wb") as buffer: - buffer.write(image_path.file.read()) - - # Compute the affinity scores - try: - affinity_scores_result = self.core.compute_affinity_scores( - data.label_to_embeddings, - image_temp_path, - data.normalize_fn, - data.scale, - data.verbose, - data.batch_size # Include batch_size here - ) - except Exception as e: - os.remove(image_temp_path) - logging.error(f"Error computing affinity scores: {e}") - raise e # Raise a custom error or message to the client - - os.remove(image_temp_path) - - # Serialize the scores for transmission - affinity_scores_result['scores'] = tensor_to_base64(torch.tensor(affinity_scores_result['scores'])) - - logging.debug("Computed affinity scores") - return affinity_scores_result - -# this returns a 422 - def _initialize_compute_affinity_scores_route_mock1(self): - @self.app.post("/compute_affinity_scores") - @self.process_request - def compute_affinity_scores_endpoint(data: ComputeAffinityScoresInput, image_path: UploadFile = File(...)): - logging.debug(f"Incoming data: {data}") - logging.debug(f"Incoming image_path: {image_path.filename}") - - # Temporary storage for the uploaded image - image_temp_path = f"temp_{uuid.uuid4()}_{image_path.filename}" - - # Save the uploaded image to temporary storage - with open(image_temp_path, "wb") as buffer: - buffer.write(image_path.file.read()) - - # Mock the computation for now - affinity_scores_result = { - "labels": ["mock_label_1", "mock_label_2"], - "scores": ["0.9", "0.1"] - } - - os.remove(image_temp_path) - - logging.debug("Mocked affinity scores") - return affinity_scores_result - - def _initialize_compute_affinity_scores_route_mock2(self): - @self.app.post("/compute_affinity_scores") - @self.process_request - def compute_affinity_scores_endpoint(data: dict, image_path: UploadFile = File(...)): - logging.debug(f"Incoming data: {data}") - logging.debug(f"Incoming image_path: {image_path.filename}") - - # Extract label_to_embeddings from the data - label_to_embeddings = data.get("label_to_embeddings", {}) - logging.debug(f"label_to_embeddings: {label_to_embeddings}") - - # Temporary storage for the uploaded image - image_temp_path = f"temp_{uuid.uuid4()}_{image_path.filename}" - - # Save the uploaded image to temporary storage - with open(image_temp_path, "wb") as buffer: - buffer.write(image_path.file.read()) - - # Here you can process the image if needed. - # Since we're mocking the actual processing and just focusing on the flow, - # this step can be skipped for now. - - # Clean up the temporary image - os.remove(image_temp_path) - - # Mock response - affinity_scores_result = { - "labels": ["mock_label_1", "mock_label_2"], - "scores": ["0.9", "0.1"] - } - - logging.debug("Mocked affinity scores") - return affinity_scores_result - - def _initialize_compute_affinity_scores_route_mock3(self): - @self.app.post("/compute_affinity_scores") - @self.process_request - def compute_affinity_scores_endpoint(data: UploadFile = File(...), image: UploadFile = File(...)): - logging.debug(f"Incoming image filename: {image.filename}") - - # Extract and parse JSON data - data_content_str = data.file.read().decode('utf-8') - data_content = json.loads(data_content_str) - - logging.debug(f"Incoming data: {data_content}") - - # Read the image content (for this mock, we won't do anything with it) - image_content = image.file.read() - - # Mock response - affinity_scores_result = { - "labels": ["mock_label_1", "mock_label_2"], - "scores": ["0.9", "0.1"] - } - - logging.debug("Mocked affinity scores") - return affinity_scores_result - - def _initialize_compute_affinity_scores_route_almostworking(self): - @self.app.post("/compute_affinity_scores") - @self.process_request - def compute_affinity_scores_endpoint(data: UploadFile = File(...), image: UploadFile = File(...)): - # Extract and parse JSON data - data_content_str = data.file.read().decode('utf-8') - data_content = json.loads(data_content_str) - - logging.debug(f"Incoming data: {data_content}") - logging.debug(f"Incoming image filename: {image.filename}") - - # Deserialize the label_to_embeddings - label_to_embeddings = {label: self._base64_to_tensor(embedding_b64) for label, embedding_b64 in data_content['label_to_embeddings'].items()} - - # Temporary storage for the uploaded image - image_temp_path = f"temp_{uuid.uuid4()}_{image.filename}" - - # Save the uploaded image to temporary storage - with open(image_temp_path, "wb") as buffer: - buffer.write(image.file.read()) - - # Compute the affinity scores - try: - affinity_scores_result = self.core.compute_affinity_scores( - label_to_embeddings, - image_temp_path, - data_content['normalize_fn'], - data_content['scale'], - data_content['verbose'], - ) - except Exception as e: - os.remove(image_temp_path) - logging.error(f"Error computing affinity scores: {e}") - raise e # Raise a custom error or message to the client - - os.remove(image_temp_path) - - # Serialize the scores for transmission - affinity_scores_result['scores'] = tensor_to_base64(torch.tensor(affinity_scores_result['scores'])) - - logging.debug("Computed affinity scores") - return affinity_scores_result - def _initialize_compute_affinity_scores_route(self): @self.app.post("/compute_affinity_scores") @self.process_request @@ -361,7 +168,7 @@ def compute_affinity_scores_endpoint(data: UploadFile = File(...), image: Upload #logging.debug(f"Incoming data: {data_content}") # Read the image content - image_temp_path = f"temp_{uuid.uuid4()}_{image.filename}" + image_temp_path = f"/tmp/temp_{uuid.uuid4()}_{image.filename}" with open(image_temp_path, "wb") as buffer: buffer.write(image.file.read()) @@ -395,68 +202,6 @@ def compute_affinity_scores_endpoint(data: UploadFile = File(...), image: Upload logging.debug("Computed affinity scores") return affinity_scores_result -#generates our old friend the 422 unprocessable entity :) - def _initialize_compute_affinity_scores_batched_route_orig(self): - @self.app.post("/compute_affinity_scores_batched") - @self.process_request - def compute_affinity_scores_batched_endpoint(data: ComputeAffinityScoresBatchedInput, image_paths: List[UploadFile] = File(...)): - logging.debug(f"Received batched request with data: {data}") - saved_image_paths = [] - for image_file in image_paths: - image_temp_path = f"temp_{uuid.uuid4()}_{image_file.filename}" - with open(image_temp_path, "wb") as buffer: - buffer.write(image_file.file.read()) - saved_image_paths.append(image_temp_path) - - result = self.core.compute_affinity_scores_batched(data.label_to_embeddings, saved_image_paths, data.normalize_fn, data.scale, data.verbose, data.batch_size) - for path in saved_image_paths: - os.remove(path) - logging.debug(f"Computed batched affinity scores: {result}") - return result - -#not aligned with client data - def _initialize_compute_affinity_scores_batched_route_bug1(self): - - @self.app.post("/compute_affinity_scores_batched", response_model=List[Dict[str, List[str]]]) - @self.process_request - def compute_affinity_scores_batched_endpoint(data: UploadFile = File(...), image_paths: List[UploadFile] = File(...)): - logging.debug(f"Received batched request with data: {data.filename}") - - # Extract and parse JSON data - data_content_str = data.file.read().decode('utf-8') - data = json.loads(data_content_str) - - saved_image_paths = [] - for image_file in image_paths: - image_temp_path = f"temp_{uuid.uuid4()}_{image_file.filename}" - with open(image_temp_path, "wb") as buffer: - buffer.write(image_file.file.read()) - saved_image_paths.append(image_temp_path) - - # Decode the base64 encoded tensors - decoded_embeddings = {label: base64_to_tensor(embedding_base64) for label, embedding_base64 in data.label_to_embeddings.items()} - - result = self.core.compute_affinity_scores_batched( - label_to_embeddings=decoded_embeddings, - image_paths=image_files, - normalize_fn=data.normalize_fn, - scale=data.scale, - verbose=data.verbose, - batch_size=data.batch_size - ) - - for path in saved_image_paths: - os.remove(path) - - # Serialize the result - result_serialized = [{ - 'labels': entry['labels'], - 'scores': [str(score) for score in entry['scores']] - } for entry in result] - - logging.debug(f"Serialized batched affinity scores: {result_serialized}") - return jsonable_encoder(result_serialized) - def _initialize_compute_affinity_scores_batched_route(self): @self.app.post("/compute_affinity_scores_batched", response_model=List[Dict[str, List[str]]]) @self.process_request diff --git a/Ethosight/Ethosight/LabelSpaceOptimization.py b/Ethosight/Ethosight/LabelSpaceOptimization.py index 81ae224..bd7bc5c 100644 --- a/Ethosight/Ethosight/LabelSpaceOptimization.py +++ b/Ethosight/Ethosight/LabelSpaceOptimization.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from abc import ABC, abstractmethod from .ChatGPTReasoner import ChatGPTReasoner from tqdm import tqdm diff --git a/Ethosight/Ethosight/NARSGPTReasoner.py b/Ethosight/Ethosight/NARSGPTReasoner.py index 15842d7..d59b98d 100644 --- a/Ethosight/Ethosight/NARSGPTReasoner.py +++ b/Ethosight/Ethosight/NARSGPTReasoner.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + import os import sys import torch diff --git a/Ethosight/Ethosight/NARSReasoner.py b/Ethosight/Ethosight/NARSReasoner.py index 39058d6..3694a19 100644 --- a/Ethosight/Ethosight/NARSReasoner.py +++ b/Ethosight/Ethosight/NARSReasoner.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + import os import sys with open("NARSReasoner_knowledge.nal","r") as f: diff --git a/Ethosight/Ethosight/ReasonerInterface.py b/Ethosight/Ethosight/ReasonerInterface.py index 73dc820..0e2b618 100644 --- a/Ethosight/Ethosight/ReasonerInterface.py +++ b/Ethosight/Ethosight/ReasonerInterface.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from abc import ABC, abstractmethod class AbstractReasoner(ABC): diff --git a/Ethosight/Ethosight/__init__.py b/Ethosight/Ethosight/__init__.py index 39b3da7..deb6f60 100644 --- a/Ethosight/Ethosight/__init__.py +++ b/Ethosight/Ethosight/__init__.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from .Ethosight import Ethosight from .EthosightMediaAnalyzer import EthosightMediaAnalyzer from .ChatGPTReasoner import ChatGPTReasoner diff --git a/Ethosight/Ethosight/langchain_reasoner.py b/Ethosight/Ethosight/langchain_reasoner.py index b25f1ee..df1898f 100644 --- a/Ethosight/Ethosight/langchain_reasoner.py +++ b/Ethosight/Ethosight/langchain_reasoner.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from .ReasonerInterface import AbstractReasoner from langchain.chat_models import ChatOpenAI from langchain.prompts import PromptTemplate diff --git a/Ethosight/Ethosight/llama_index_reasoner.py b/Ethosight/Ethosight/llama_index_reasoner.py index 9211a4a..414831e 100644 --- a/Ethosight/Ethosight/llama_index_reasoner.py +++ b/Ethosight/Ethosight/llama_index_reasoner.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from .ReasonerInterface import AbstractReasoner from llama_index import ( Document, diff --git a/Ethosight/Ethosight/shared_models.py b/Ethosight/Ethosight/shared_models.py index 682375c..2349e0f 100644 --- a/Ethosight/Ethosight/shared_models.py +++ b/Ethosight/Ethosight/shared_models.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + # shared_models.py from pydantic import BaseModel diff --git a/Ethosight/Ethosight/tests/RESTClientHelper.py b/Ethosight/Ethosight/tests/RESTClientHelper.py new file mode 100644 index 0000000..810a9f3 --- /dev/null +++ b/Ethosight/Ethosight/tests/RESTClientHelper.py @@ -0,0 +1,168 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +import requests +import os +from Ethosight.EthosightCore import EthosightCore +from Ethosight.EthosightRESTServer import EthosightRESTServer +from fastapi.testclient import TestClient +import logging +import base64 +import numpy as np +import torch +import json # Ensure json is imported +from typing import Dict, Any + +def tensor_to_base64(tensor): + # Convert the tensor to a numpy array + numpy_array = tensor.numpy() + numpy_bytes = numpy_array.tobytes() + + # Encode the byte array to base64 string + return base64.b64encode(numpy_bytes).decode('utf-8') + + +class RESTClientHelper(EthosightCore): + + def __init__(self, model=None, reasoner=''): + model = "EthosightRESTClientHasNoModel" + super().__init__(model, reasoner) + self.url = '' + server = EthosightRESTServer(mode='blocking', host='127.0.0.1', port=8000, consul_url='localhost', consul_port=8500, + gpu=0, reasoner='') + self.client = TestClient(server.app) + logging.basicConfig(level=logging.DEBUG) + + def compute_label_embeddings(self, labels, batch_size=1200): + payload = { + "labels": labels, + "batch_size": batch_size + } + + logging.debug(f"Sending request to /compute_label_embeddings with payload: {payload}") + + response = self.client.post("/compute_label_embeddings", json=payload) + + # Log the response status code and content + logging.debug(f"Received response with status code: {response.status_code}") + + serialized_embeddings = json.loads(response.json()["embeddings"]) # Deserialize the JSON string into a dictionary + + deserialized_embeddings = self._deserialize_embeddings(serialized_embeddings) + + return response.status_code, deserialized_embeddings + + def _deserialize_embeddings(self, serialized_embeddings): + # Convert the serialized embeddings back to tensors + deserialized_embeddings = {} + for label, base64_str in serialized_embeddings.items(): + deserialized_embeddings[label] = self._base64_to_tensor(base64_str) + return deserialized_embeddings + + def _base64_to_tensor(self, base64_str): + # Decode the base64 string to a numpy byte array + np_bytes = base64.b64decode(base64_str) + # Convert the numpy byte array to a numpy array + np_array = np.frombuffer(np_bytes, dtype=np.float32) + # Convert the numpy array to a PyTorch tensor + tensor = torch.from_numpy(np_array) + return tensor + + + def compute_affinity_scores(self, label_to_embeddings, image_path, normalize_fn='linear', scale=1, verbose=True): + # Serialize the label_to_embeddings dictionary as before + serializable_embeddings = {label: tensor_to_base64(embedding) for label, embedding in label_to_embeddings.items()} + + # Construct the data_content dictionary + serialized_data = json.dumps({ + "label_to_embeddings": serializable_embeddings, + "normalize_fn": normalize_fn, + "scale": scale, + "verbose": verbose, + }) + + # Prepare the files for multipart upload + files = { + "data": ("data.json", serialized_data, "application/json"), + "image": (os.path.basename(image_path), open(image_path, 'rb'), "image/jpeg") + } + + # Send the POST request + # response = requests.post(f"{self.url}/compute_affinity_scores", files=files) + response = self.client.post("/compute_affinity_scores", files=files) + + # Check for 422 response and print the server's error message + if response.status_code == 422: + print("Server validation error:", response.text) + return None + + response.raise_for_status() + + # Parse the response to get the results in the desired format + result = response.json() + result_dict = { + 'labels': result['labels'], + 'scores': result['scores'] + } + + # If verbose, print the mock results + if verbose: + print("\nTop labels for the image:") + for label, score in zip(result_dict['labels'], result_dict['scores']): + print(f"{label}: {score}") + print("\n") + + return response.status_code, result_dict + + + def compute_affinity_scores_batched(self, label_to_embeddings, image_paths, normalize_fn='linear', scale=1, verbose=True, batch_size=32): + # Serialize the label_to_embeddings dictionary as before + serializable_embeddings = {label: tensor_to_base64(embedding) for label, embedding in label_to_embeddings.items()} + # Serialize the data into a JSON string + payload = json.dumps({ + "label_to_embeddings": serializable_embeddings, + "normalize_fn": normalize_fn, + "scale": scale, + "verbose": verbose, + "batch_size": batch_size + }) + + # Create a list of files to send (including the serialized data and the images) + files = [ + ("data", ("data.json", payload, "application/json")), + *[(f"image_paths", (os.path.basename(path), open(path, 'rb'))) for path in image_paths] + ] + + #response = requests.post(f"{self.url}/compute_affinity_scores_batched", files=files) + response = self.client.post("/compute_affinity_scores_batched", files=files) + response.raise_for_status() + + # Don't forget to close the files after the request + for _, (_, file) in files[1:]: + file.close() + + response_data = response.json() + + processed_results = [] + for entry in response_data: + processed_entry = { + 'labels': np.array(entry['labels']), + 'scores': np.array(entry['scores'], dtype=float) + } + processed_results.append(processed_entry) + + return response.status_code, processed_results diff --git a/Ethosight/Ethosight/tests/__init__.py b/Ethosight/Ethosight/tests/__init__.py new file mode 100644 index 0000000..b53ba9c --- /dev/null +++ b/Ethosight/Ethosight/tests/__init__.py @@ -0,0 +1,18 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + + diff --git a/Ethosight/Ethosight/tests/img/bus.jpg b/Ethosight/Ethosight/tests/img/bus.jpg new file mode 100644 index 0000000..24a7dff Binary files /dev/null and b/Ethosight/Ethosight/tests/img/bus.jpg differ diff --git a/Ethosight/Ethosight/tests/locustfile.py b/Ethosight/Ethosight/tests/locustfile.py new file mode 100644 index 0000000..6432f0c --- /dev/null +++ b/Ethosight/Ethosight/tests/locustfile.py @@ -0,0 +1,71 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +from locust import HttpUser, between, task +import json +import os + + +class EthosightUser(HttpUser): + wait_time = between(1, 2) # Simulated users will wait 1-2 seconds between tasks + + embeddings = None + + def on_start(self): + """On start, get the label embeddings to use in other requests.""" + self.get_label_embeddings() + + def get_label_embeddings(self): + data = { + "labels": ["Electronics", "Unauthorized Vehicle", "Unattended Item", "Clothing", "Person", "Alcohol", "Tools"], # Example labels + "batch_size": 32 + } + with self.client.post("/compute_label_embeddings", json=data, name="/compute_label_embeddings", + catch_response=True) as response: + if response.ok: + result = response.json() + self.embeddings = json.loads(result["embeddings"]) + + @task + def compute_label_embeddings(self): + data = { + "labels": ["label1", "label2"], + "batch_size": 32 + } + self.client.post("/compute_label_embeddings", json=data, name="/compute_label_embeddings") + + @task + def compute_affinity_scores(self): + + image_path = os.path.join(os.path.dirname(__file__), 'img', 'bus.jpg') + data = { + "label_to_embeddings": self.embeddings, + "image_path": image_path, + "normalize_fn": "linear", + "scale": 1, + "verbose": True, + "batch_size": 32 + } + files = { + 'data': ("data.json", json.dumps(data), 'application/json'), + 'image': (os.path.basename(image_path), open(image_path, 'rb'), 'image/jpeg') + } + self.client.post("/compute_affinity_scores", files=files, name="/compute_affinity_scores") + + @task + def health_check(self): + self.client.get("/health", name="/health") diff --git a/Ethosight/Ethosight/tests/test_EthosightRESTServer.py b/Ethosight/Ethosight/tests/test_EthosightRESTServer.py new file mode 100644 index 0000000..417521e --- /dev/null +++ b/Ethosight/Ethosight/tests/test_EthosightRESTServer.py @@ -0,0 +1,64 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +from Ethosight.tests.RESTClientHelper import RESTClientHelper +import pytest +import os + + +client = RESTClientHelper() + + +@pytest.fixture(scope="session") +def compute_label_embeddings(): + labels = ["Electronics", "Unauthorized Vehicle", "Unattended Item", "Clothing", "Person", "Alcohol", "Tools"] + batch_size = 2 + image_path = os.path.join(os.path.dirname(__file__), 'img', 'bus.jpg') + status_code, label_to_embeddings = client.compute_label_embeddings(labels, batch_size) + resource = { + 'embeddings': (status_code, label_to_embeddings), + 'image_path': image_path, + 'image_paths': [image_path] + } + + return resource + + +def test_compute_label_embeddings_endpoint(compute_label_embeddings): + status_code, data = compute_label_embeddings['embeddings'] + assert status_code == 200 + assert "Electronics" in data and "Unauthorized Vehicle" in data and "Unattended Item" in data + +def test_compute_affinity_scores(compute_label_embeddings): + label_to_embeddings = compute_label_embeddings["embeddings"][1] + image_path = compute_label_embeddings["image_path"] + status_code, data = client.compute_affinity_scores(label_to_embeddings=label_to_embeddings, image_path=image_path) + assert status_code == 200 + labels = data["labels"] + assert "Electronics" in labels and "Unauthorized Vehicle" in labels and "Unattended Item" in labels + + +def test_compute_affinity_scores_batched(compute_label_embeddings): + label_to_embeddings = compute_label_embeddings["embeddings"][1] + image_paths = compute_label_embeddings["image_paths"] + status_code, data = client.compute_affinity_scores_batched(label_to_embeddings=label_to_embeddings, + image_paths=image_paths) + assert status_code == 200 + labels = data[0]["labels"] + assert "Electronics" in labels and "Unauthorized Vehicle" in labels and "Unattended Item" in labels + + diff --git a/Ethosight/Ethosight/utils.py b/Ethosight/Ethosight/utils.py index 9483e7c..db32829 100644 --- a/Ethosight/Ethosight/utils.py +++ b/Ethosight/Ethosight/utils.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + import pkg_resources import os diff --git a/Ethosight/README.md b/Ethosight/README.md index 9390659..7e2f25d 100644 --- a/Ethosight/README.md +++ b/Ethosight/README.md @@ -6,20 +6,27 @@ ### Setting Up the Environment -> **Note:** The conda environment is currently named `imagebind`. This will be changed to `ethosight` in a future update. -1. If you're setting up for the first time, create the environment using the provided `environment.yml` file: + +> **Note:** These are the steps that for manual installation which are **deprecated** right now, cause currently we are using Ansible for installation and state management. +> You can ignore the steps until *'Registering new users'* part. +> Please follow these instructions the installation and running: [installations](install/readme.md) + +1. If you're setting up for the first time, create the environment using the provided `environment.yml` file in `/install` subfolder: ```bash - conda env create -f environment.yml + conda env create -f install/environment.yml ``` 2. If you're updating an existing environment, use the following command instead: ```bash - conda env update --name imagebind --file environment.yml + conda env update --name ethosight --file install/environment.yml ``` - +3. Then activate the conda environmet: + ``` + conda activate ethosight + ``` ### Installing Ethosight After setting up the environment, you can install Ethosight using pip: @@ -30,7 +37,7 @@ pip install -e . ## Running the System -Before running the system, ensure that the Consul server is running. You can start the Consul server with the following command: +Before running the system, ensure that you have installed [Consul](https://developer.hashicorp.com/consul/install) and [consul-template](https://github.com/hashicorp/consul-template?tab=readme-ov-file#installation), and ensure that the Consul server is running. You can start the Consul server with the following command: ```bash consul agent -dev ``` @@ -44,7 +51,7 @@ Next, ensure that the 'ethosight-nginx' Docker container is running. This contai Here's how to run the `run_nginx.sh` script: ```bash -./run_nginx.sh +./bin/run_nginx.sh ``` After the 'ethosight-nginx' Docker container is running, you can execute the `run_consultemplate.sh` script. This script uses consul-template to dynamically update the Nginx configuration file based on the `nginx.template`. After the configuration file is updated, the script triggers a reload of the Nginx configuration inside the 'ethosight-nginx' Docker container. @@ -52,17 +59,22 @@ After the 'ethosight-nginx' Docker container is running, you can execute the `ru You can run this script with the following command: ```bash -./run_consultemplate.sh +./bin/run_consultemplate.sh ``` ## Running Ethosight GPU Servers +Before running you can modify and set up environment variables in `bin/env_variables_setup.sh` to be appropriate for your system. +Then source them to your session. + +`source ./bin/env_variables_setup.sh` + To run Ethosight GPU Servers, you can use the `runserver.sh` script with the `runserver` argument. This script starts a server that can process requests from the Ethosight system. Here's how to run the `runserver.sh` script: ```bash -./runserver.sh runserver +./bin/runserver.sh runserver --host --port --gpu ``` ## Utilizing Multiple GPU Servers @@ -80,14 +92,88 @@ Before starting the Django server, ensure that the PostgreSQL server is running. Here's how to run the `runpostgress.sh` script: ```bash -./runpostgress.sh +./website/runpostgress.sh `````` -Once the PostgreSQL server is running, you can start the Django server. Make sure you're in the directory where `manage.py` is located, then run the following command: +Once the PostgreSQL server is running, you can start the Django web app inside /website folder. Run the following command if you are running first time the web application: ```bash -python manage.py runserver 8080 +./website/runwebapp.sh ``` -This command starts the Django development server. By default, the server runs on `localhost` on port `8000`. You can view your application by navigating to `http://localhost:8080` in your web browser. +If no need to create a super user and migrations are done, you can simply run the application +```bash +python website/manage.py runserver 8080 +``` +You can view your application by navigating to `http://localhost:8080` in your web browser. +You can create your own ethosight [configuration](#ethosight-configuration) by accessing `http://localhost:8080/admin`. +You can find an example in `./configs` folder. + +## Registering new users. +For registering new users, you have to provide mail sending environment variables in `bin/env_variables_setup.sh` +and request an access code in the registration form. The admin users should approve pending users from the admin panel. +If there's no capability to handle mail sending, you can generate +access codes manually using `genaccesscodes.py`, and use that access code in the form without requesting. + +## Ethosight Configuration + +`ethosight.yml` file is the setup configuration for the ethosight application. +You can find the example file inside `./configs` folder with all possible configurations and their explanations. + + +## CLI +Besides main application represented as UI. Ethosight provides CLI for all core classes and functionalities like +EthosightAppCLI, EthosightCLI, EthosightDatasetCLI, EthosightMediaAnalyzerCLI. + +The main one is `EthosightAppCLI` with bunch of useful methods. Some of them are still on implementation. + +#### EthosightAppCLI +* create_app (app_dir, config_file) - creates new application + * app_dir - the location where the application will be created and run along with config files, embeddings, labels + * config_file - *.yml config file path +* delete_app (app_dir) - deletes application located in app_dir + * app_dir - the application directory +* benchmark (app_dir) - Computes accuracy on a directory of images + * Computes accuracy on a directory of images. +* optimize(app_dir) - optimizes the EthosightApp + * app_dir - the application directory +* run(app_dir, image) - Runs the EthosightApp on a single image + * app_dir - the application directory + * image - image file path +* benchmark_video (app_dir, video_gt_csv_filename) - Runs video benchmarking on a video + * app_dir - the application directory + * video_gt_csv_filename - video file names with ground truths +* rank_affinities (app_dir, json_file_path) - ranks affinities from json results + * app_dir - the application directory + * json_file_path - json file path containing already computed affinity scores +* phase2videobenchmarks (app_dir, phase2_groundtruth_csv) - runs benchmarks on all of the affinity score json files contained in the csv file. these are produced by phase1 Ethosight processing of video datasets + * app_dir - the application directory + * phase2_groundtruth_csv - the csv file path +* add_labels (app_dir, labels) - Adds new labels to the EthosightApp + * app_dir - the application directory + * labels - new labels specified + +#### EthosightCLI +* embed (filename) - Compute the label embeddings from a file of labels + * filename - The name of the file with the labels. Each line in the file should contain one label. + The embeddings will be saved to a file with the same name, but with '.embeddings' as the extension. +* affinities (image_filename, embeddings_filename, output_filename) - Compute affinity scores for an image with respect to the embeddings stored in a file. Save these scores to another file. The default filename for saving is the base name of the image file with the extension ".affinities". + * image_filename - the image file path to compute affinities for + * embeddings_filename - embeddings file path to use for affinities computing + * output_filename (optional) - the output file name +* reason (use_case, label_affinity_scores, prompt_type, outfile, debug=False) - takes in a set of labels with their respective affinity scores, and generates a new set of labels based on the existing ones + * use_case - The use case for the reasoner. Default is "{USE_CASE_DEFAULT}" + * label_affinity_scores - The affinity scores for labels. Can be a string of labels or a path to a file. Default is an empty string + * prompt_type - The type of prompt for the reasoner. Can be "blank_slate" or "iterative". Default is "{PROMPT_TYPE_DEFAULT}" + * outfile - Output file name to write new labels. Default: reasoner.labels. +* summarize (label_affinity_scores, outfile, debug=False) - takes in a set of labels with their respective affinity scores, and generates a new set of labels based on the existing ones + * label_affinity_scores - file path containing labels with affinity scores + * outfile - the output file for newly generated labels + + +### Performance stress testing. + +The project has capability to load test itself. For your environment setup you can run the command: +`locust -f Ethosight/tests/locustfile.py` +Then open `localhost:8089` it will open Locust UI, then you can start load testing for your environment. diff --git a/Ethosight/benchmark/images/robbery/3-abnormal-1.png b/Ethosight/benchmark/images/robbery/3-abnormal-1.png new file mode 100644 index 0000000..1537c90 Binary files /dev/null and b/Ethosight/benchmark/images/robbery/3-abnormal-1.png differ diff --git a/Ethosight/benchmark/images/robbery/3-abnormal-2.png b/Ethosight/benchmark/images/robbery/3-abnormal-2.png new file mode 100644 index 0000000..c6bead8 Binary files /dev/null and b/Ethosight/benchmark/images/robbery/3-abnormal-2.png differ diff --git a/Ethosight/benchmark/images/robbery/3-normal.png b/Ethosight/benchmark/images/robbery/3-normal.png new file mode 100644 index 0000000..9796039 Binary files /dev/null and b/Ethosight/benchmark/images/robbery/3-normal.png differ diff --git a/Ethosight/benchmark/images/robbery/306-abnormal-2.png b/Ethosight/benchmark/images/robbery/306-abnormal-2.png new file mode 100644 index 0000000..030dc3c Binary files /dev/null and b/Ethosight/benchmark/images/robbery/306-abnormal-2.png differ diff --git a/Ethosight/benchmark/images/robbery/306-abnormal-3.png b/Ethosight/benchmark/images/robbery/306-abnormal-3.png new file mode 100644 index 0000000..0ebd6ef Binary files /dev/null and b/Ethosight/benchmark/images/robbery/306-abnormal-3.png differ diff --git a/Ethosight/benchmark/images/robbery/306-abnromal-1.png b/Ethosight/benchmark/images/robbery/306-abnromal-1.png new file mode 100644 index 0000000..fbc517f Binary files /dev/null and b/Ethosight/benchmark/images/robbery/306-abnromal-1.png differ diff --git a/Ethosight/benchmark/images/robbery/372-abnormal-1.png b/Ethosight/benchmark/images/robbery/372-abnormal-1.png new file mode 100644 index 0000000..cc86404 Binary files /dev/null and b/Ethosight/benchmark/images/robbery/372-abnormal-1.png differ diff --git a/Ethosight/benchmark/images/robbery/372-abnormal-2.png b/Ethosight/benchmark/images/robbery/372-abnormal-2.png new file mode 100644 index 0000000..fc99d5d Binary files /dev/null and b/Ethosight/benchmark/images/robbery/372-abnormal-2.png differ diff --git a/Ethosight/benchmark/images/robbery/image-labels.csv b/Ethosight/benchmark/images/robbery/image-labels.csv new file mode 100644 index 0000000..8edd67f --- /dev/null +++ b/Ethosight/benchmark/images/robbery/image-labels.csv @@ -0,0 +1,9 @@ +image_path,label +3-abnormal-1.png,robbery +3-abnormal-2.png,robbery +3-normal.png,normal event +306-abnormal-1.png,robbery +306-abnormal-2.png,robbery +306-abnormal-3.png,normal event +372-abnormal-1.png,robbery +372-abnormal-2.png,normal event \ No newline at end of file diff --git a/Ethosight/bin/Ethosight_openclip.py b/Ethosight/bin/Ethosight_openclip.py index 91cee64..73ed47e 100644 --- a/Ethosight/bin/Ethosight_openclip.py +++ b/Ethosight/bin/Ethosight_openclip.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + import numpy as np import torch import open_clip diff --git a/Ethosight/bin/compare_embeddings.py b/Ethosight/bin/compare_embeddings.py index 1ae4461..ee29706 100644 --- a/Ethosight/bin/compare_embeddings.py +++ b/Ethosight/bin/compare_embeddings.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + import torch def load_embeddings(filename): diff --git a/Ethosight/bin/condacreate.sh b/Ethosight/bin/condacreate.sh index b583a67..d2733ee 100755 --- a/Ethosight/bin/condacreate.sh +++ b/Ethosight/bin/condacreate.sh @@ -1 +1,18 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + conda env create -f environment.yml diff --git a/Ethosight/bin/copyx.sh b/Ethosight/bin/copyx.sh index 460b86e..d1f48e9 100755 --- a/Ethosight/bin/copyx.sh +++ b/Ethosight/bin/copyx.sh @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + #!/bin/bash # Check if the right number of arguments are provided diff --git a/Ethosight/bin/createEthosightGroundtruth.sh b/Ethosight/bin/createEthosightGroundtruth.sh index 66307a2..1379295 100755 --- a/Ethosight/bin/createEthosightGroundtruth.sh +++ b/Ethosight/bin/createEthosightGroundtruth.sh @@ -1,2 +1,19 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + python image_to_csv_groundtruths.py imagelinks/ ethosight.csv diff --git a/Ethosight/bin/env_variables_setup.sh b/Ethosight/bin/env_variables_setup.sh new file mode 100644 index 0000000..ec230a4 --- /dev/null +++ b/Ethosight/bin/env_variables_setup.sh @@ -0,0 +1,37 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +#!/bin/bash + +# EthosightBackend type client +# Not modifiable +export EthosightBackend=client +# EthosightBackendURL is pointing to nginx running +export EthosightBackendURL=http://localhost:80 +# DjangoEthosightAppBaseDir points to the app created on ui side +export DjangoEthosightAppBaseDir=/home/vahagn/projects/EthosightNew/website/EthosightAppBasedir +# EthosightYAMLDirectory points the created config yaml file location +export EthosightYAMLDirectory=/home/vahagn/projects/EthosightNew/configs +# ETHOSIGHT_APP_BASEDIR points to ethosight app created +export ETHOSIGHT_APP_BASEDIR=/home/vahagn/projects/EthosightNew/website/EthosightAppBasedir +# Email sending variables to send access codes to client users. You can modify as per your environment +export EMAIL_HOST=email-smtp.us-east-1.amazonaws.com +export EMAIL_PORT=587 +export EMAIL_USE_TLS=True +export EMAIL_USE_SSL=False +export EMAIL_HOST_USER='' +export EMAIL_HOST_PASSWORD='' \ No newline at end of file diff --git a/Ethosight/bin/environment_vars.sh b/Ethosight/bin/environment_vars.sh new file mode 100755 index 0000000..475992d --- /dev/null +++ b/Ethosight/bin/environment_vars.sh @@ -0,0 +1,13 @@ +#!/bin/bash +export EthosightBackend="client" +export EthosightBackendURL="http://localhost:80" +export DjangoEthosightAppBaseDir="/home/ubuntu/lxdshared/DeepVision/Ethosight/website/EthosightAppBasedir" +export EthosightYAMLDirectory="/home/ubuntu/lxdshared/DeepVision/Ethosight/configs" +export ETHOSIGHT_APP_BASEDIR="/home/ubuntu/lxdshared/DeepVision/Ethosight/website/EthosightAppBasedir" +export EMAIL_HOST="" +export EMAIL_PORT="0" +export EMAIL_USE_TLS="True" +export EMAIL_USE_SSL="False" +export EMAIL_HOST_USER="" +export EMAIL_HOST_PASSWORD="" + diff --git a/Ethosight/bin/genaccesscodes.py b/Ethosight/bin/genaccesscodes.py index 284b5d0..9a21cc0 100755 --- a/Ethosight/bin/genaccesscodes.py +++ b/Ethosight/bin/genaccesscodes.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + #!/usr/bin/env python3 import click import redis diff --git a/Ethosight/bin/groundtruth_creator.py b/Ethosight/bin/groundtruth_creator.py index f4fadc0..0cb1090 100644 --- a/Ethosight/bin/groundtruth_creator.py +++ b/Ethosight/bin/groundtruth_creator.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + import os import pandas as pd import webbrowser diff --git a/Ethosight/bin/image_to_csv_groundtruths.py b/Ethosight/bin/image_to_csv_groundtruths.py index 19aec86..2fb2b26 100644 --- a/Ethosight/bin/image_to_csv_groundtruths.py +++ b/Ethosight/bin/image_to_csv_groundtruths.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + import csv import os import click diff --git a/Ethosight/bin/installNARSGPT.sh b/Ethosight/bin/installNARSGPT.sh index e3b0369..897e968 100755 --- a/Ethosight/bin/installNARSGPT.sh +++ b/Ethosight/bin/installNARSGPT.sh @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + git clone https://github.com/opennars/NARS-GPT cd NARS-GPT sh build.sh diff --git a/Ethosight/bin/run-learningloop.sh b/Ethosight/bin/run-learningloop.sh index 89ee588..d5b0647 100644 --- a/Ethosight/bin/run-learningloop.sh +++ b/Ethosight/bin/run-learningloop.sh @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + #!/bin/bash if (( $# < 2)); then diff --git a/Ethosight/bin/run_consultemplate.sh b/Ethosight/bin/run_consultemplate.sh index 6c6e681..5653448 100755 --- a/Ethosight/bin/run_consultemplate.sh +++ b/Ethosight/bin/run_consultemplate.sh @@ -1,2 +1,21 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + #consul-template -template "nginx.template:nginx.conf" -consul-template -template "nginx.template:/tmp/nginx.conf:docker exec ethosight-nginx nginx -s reload" +SCRIPT_DIR="$(dirname "$(readlink -f "$0")")" +consul-template -template "$SCRIPT_DIR/../config/consul/nginx.template:/tmp/nginx/nginx.conf:docker exec ethosight-nginx nginx -s reload" + diff --git a/Ethosight/bin/run_nginx.sh b/Ethosight/bin/run_nginx.sh index 4c10bf9..6976e0a 100755 --- a/Ethosight/bin/run_nginx.sh +++ b/Ethosight/bin/run_nginx.sh @@ -1,6 +1,36 @@ -cp nginx.conf /tmp/nginx.conf + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +SCRIPT_DIR="$(dirname "$(readlink -f "$0")")" + +docker run --name tmp-nginx -d nginx +docker cp tmp-nginx:/etc/nginx /tmp +docker stop tmp-nginx +docker rm tmp-nginx + +cp "$SCRIPT_DIR/../config/nginx/nginx.conf" /tmp/nginx/nginx.conf + while true ; do - docker rm -f ethosight-nginx - docker run --name ethosight-nginx --network host -v /tmp/nginx.conf:/etc/nginx/nginx.conf:ro -p 80:80 -t nginx - sleep 5 -done \ No newline at end of file + docker rm -f ethosight-nginx + + docker run --name ethosight-nginx --network host \ + -v /tmp/nginx:/etc/nginx:ro \ + -p 80:80 -t nginx + + sleep 5 +done + diff --git a/Ethosight/bin/run_tests.sh b/Ethosight/bin/run_tests.sh index d84af9b..68ef54a 100755 --- a/Ethosight/bin/run_tests.sh +++ b/Ethosight/bin/run_tests.sh @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + #!/bin/bash # Create a new file for the test report diff --git a/Ethosight/bin/runserver.py b/Ethosight/bin/runserver.py index 11bf037..de9805b 100755 --- a/Ethosight/bin/runserver.py +++ b/Ethosight/bin/runserver.py @@ -1,4 +1,19 @@ #!/usr/bin/env python3 +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 import click import os import uvicorn @@ -17,13 +32,14 @@ def cli(): @click.option('--consul-url', default='localhost', help='URL for Consul server. Default is "localhost".') @click.option('--consul-port', default=8500, type=int, help='Port for Consul server. Default is 8500.') @click.option('--gpu', default=0, type=int, help='GPU to use. Default is 0.') -def runserver(host, port, log_level, mode, consul_url, consul_port, gpu): +@click.option('--reasoner', default='', help='Reasoner type like ChatGPTReasoner. Default is no reasoner') +def runserver(host, port, log_level, mode, consul_url, consul_port, gpu, reasoner): """ Run the Ethosight REST Server with the specified options. """ # Assuming the EthosightRESTServer class accepts the consul_url and consul_port as arguments. # If not, you'll need to adjust the instantiation accordingly. - server = EthosightRESTServer(mode=mode, host=host, port=port, consul_url=consul_url, consul_port=consul_port, gpu=gpu) + server = EthosightRESTServer(mode=mode, host=host, port=port, consul_url=consul_url, consul_port=consul_port, gpu=gpu, reasoner=reasoner) uvicorn.run(server.app, host=host, port=port, log_level=log_level) @cli.command() diff --git a/Ethosight/bin/runserver.sh b/Ethosight/bin/runserver.sh new file mode 100644 index 0000000..9895b09 --- /dev/null +++ b/Ethosight/bin/runserver.sh @@ -0,0 +1,21 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +#!/bin/bash + +SCRIPT_DIR="$(dirname "$(readlink -f "$0")")" +python3 "$SCRIPT_DIR/runserver.py" "$@" \ No newline at end of file diff --git a/Ethosight/bin/test2_core1.sh b/Ethosight/bin/test2_core1.sh index 7db354f..397794b 100755 --- a/Ethosight/bin/test2_core1.sh +++ b/Ethosight/bin/test2_core1.sh @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + while true; do EthosightCLI affinities shoplifting.png general.embeddings done diff --git a/Ethosight/bin/test_client1.sh b/Ethosight/bin/test_client1.sh index 20cf2f9..85bac37 100755 --- a/Ethosight/bin/test_client1.sh +++ b/Ethosight/bin/test_client1.sh @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + #!/bin/bash while true; do diff --git a/Ethosight/bin/test_client2.sh b/Ethosight/bin/test_client2.sh index 0d1aaff..55dc9e8 100755 --- a/Ethosight/bin/test_client2.sh +++ b/Ethosight/bin/test_client2.sh @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + #!/bin/bash while true; do diff --git a/Ethosight/bin/test_client3.sh b/Ethosight/bin/test_client3.sh index 0d1aaff..55dc9e8 100755 --- a/Ethosight/bin/test_client3.sh +++ b/Ethosight/bin/test_client3.sh @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + #!/bin/bash while true; do diff --git a/Ethosight/bin/test_summarize_and_qa.sh b/Ethosight/bin/test_summarize_and_qa.sh index aeb6aaf..68e28a5 100644 --- a/Ethosight/bin/test_summarize_and_qa.sh +++ b/Ethosight/bin/test_summarize_and_qa.sh @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + ./EthosightCLI.py summarize --label-affinity-scores shoplifting.affinities -o shoplifting_summary.txt ./EthosightCLI.py ask images/shoplifting.png --background-knowledge "the man is bill gates" --summary-file shoplifting_summary.txt --questions "what is his name? what did he do? diff --git a/Ethosight/bin/ucf_extract_true_negatives.sh b/Ethosight/bin/ucf_extract_true_negatives.sh new file mode 100755 index 0000000..a5f9011 --- /dev/null +++ b/Ethosight/bin/ucf_extract_true_negatives.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# Check if correct number of arguments are passed +if [ "$#" -ne 2 ]; then + echo "Usage: $0 " + exit 1 +fi + +# Assign command line arguments to variables +video_base_dir="$1" +output_base_dir="$2" +annotations_file="ucf_temporal_annotations.txt" # Explicitly set the annotations file name + +# Ensure output directory exists +mkdir -p "$output_base_dir" + +# Read the annotation file +while IFS= read -r line; do + read -ra fields <<< "$line" + filename="${fields[0]}" + label="${fields[1]}" + + # Proceed only if the label is "Normal" + if [ "$label" == "Normal" ]; then + # Find the file in the directory hierarchy + found_files=$(find "$video_base_dir" -type f -name "$filename") + + if [ -z "$found_files" ]; then + echo "Warning: File not found - $filename" + continue + fi + + video_path=$(echo "$found_files" | head -n 1) + + # Extract every 10th frame as a true negative sample + ffmpeg -i "$video_path" -vf "select='not(mod(n,10))'" -vsync vfr "$output_base_dir/tn_${filename%.*}_%03d.png" + fi + +done < "$annotations_file" + diff --git a/Ethosight/bin/ucf_extract_true_negatives_hard.sh b/Ethosight/bin/ucf_extract_true_negatives_hard.sh new file mode 100755 index 0000000..75705c0 --- /dev/null +++ b/Ethosight/bin/ucf_extract_true_negatives_hard.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +# Check if correct number of arguments are passed +if [ "$#" -ne 2 ]; then + echo "Usage: $0 " + exit 1 +fi + +# Assign command line arguments to variables +video_base_dir="$1" +output_base_dir="$2" +annotations_file="ucf_temporal_annotations.txt" # Explicitly set the annotations file name + +# Ensure output directory exists +mkdir -p "$output_base_dir" + +# Read the annotation file +while IFS= read -r line; do + read -ra fields <<< "$line" + filename="${fields[0]}" + label="${fields[1]}" + start_frame="${fields[2]}" + end_frame="${fields[3]}" + + # Skip 'Normal' labeled videos + if [ "$label" == "Normal" ]; then + continue + fi + + # Find the file in the directory hierarchy + found_files=$(find "$video_base_dir" -type f -name "$filename") + + if [ -z "$found_files" ]; then + echo "Warning: File not found - $filename" + continue + fi + + video_path=$(echo "$found_files" | head -n 1) + + # Extract frames outside the known anomaly ranges (before start_frame and after end_frame) + ffmpeg -i "$video_path" -vf "select='not(mod(n,10))*(lt(n,$start_frame) + gt(n,$end_frame))'" -vsync vfr "$output_base_dir/hard_tn_${filename%.*}_%03d.png" + +done < "$annotations_file" + diff --git a/Ethosight/bin/ucf_extract_true_positives.sh b/Ethosight/bin/ucf_extract_true_positives.sh new file mode 100755 index 0000000..43f9e16 --- /dev/null +++ b/Ethosight/bin/ucf_extract_true_positives.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# Check if correct number of arguments are passed +if [ "$#" -ne 2 ]; then + echo "Usage: $0 " + exit 1 +fi + +# Assign command line arguments to variables +video_base_dir="$1" +output_base_dir="$2" + +# Ensure output directory exists +mkdir -p "$output_base_dir" + +# Assuming annotations are in the current directory named 'ucf_temporal_annotations.txt' +while IFS= read -r line; do + # Read fields separated by space + read -ra fields <<< "$line" + filename="${fields[0]}" + start_frame="${fields[2]}" + end_frame="${fields[3]}" + + # Find the file in the directory hierarchy + found_files=$(find "$video_base_dir" -type f -name "$filename") + + # Check if file was found + if [ -z "$found_files" ]; then + echo "Warning: File not found - $filename" + continue + fi + + # Assuming only one file will match, or handling the first match + video_path=$(echo "$found_files" | head -n 1) + + # Extract frames if the start and end frames are not -1 + if [ "$start_frame" -ne -1 ] && [ "$end_frame" -ne -1 ]; then + ffmpeg -i "$video_path" -vf "select='not(mod(n,10))*between(n,$start_frame,$end_frame)'" -vsync vfr "$output_base_dir/${filename%.*}_${start_frame}_to_${end_frame}_%03d.png" + fi + + # Check if there is a second event; fields are indexed starting from 4 and 5 + event2_start="${fields[4]}" + event2_end="${fields[5]}" + if [ "$event2_start" -ne -1 ] && [ "$event2_end" -ne -1 ]; then + ffmpeg -i "$video_path" -vf "select='not(mod(n,10))*between(n,$event2_start,$event2_end)'" -vsync vfr "$output_base_dir/${filename%.*}_${event2_start}_to_${event2_end}_%03d.png" + fi + +done < "ucf_temporal_annotations.txt" + diff --git a/Ethosight/bin/ucf_generate_dataset.sh b/Ethosight/bin/ucf_generate_dataset.sh new file mode 100755 index 0000000..a7f8589 --- /dev/null +++ b/Ethosight/bin/ucf_generate_dataset.sh @@ -0,0 +1,75 @@ +#!/bin/bash + +# Check the number of arguments +if [ "$#" -ne 6 ]; then + echo "Usage: $0 " + exit 1 +fi + +# Assign arguments to variables +true_positive_dir="$1" +frame_set_count="$2" +true_positive_count="$3" +true_negative_dir="$4" +true_negative_count="$5" +outputdir="$6" + +# Create Output Directories +mkdir -p "$outputdir/true_positives" +mkdir -p "$outputdir/true_negatives" + +# Function to Process Directories and Ensure Video Diversity +function process_directory() { + local src_dir=$1 + local set_count=$2 + local total_sets=$3 + local dest_dir=$4 + + declare -A video_counts # Tracks the number of frames each video has contributed + + # Preprocess to find and shuffle files + find "$src_dir" -type f -print0 | shuf --zero-terminated | while IFS= read -r -d $'\0' file; do + local base_name=$(basename "$file") + local video_name="${base_name%_*}" # Extract the base video name without frame index + + # Ensure video count is initialized + if [ -z "${video_counts[$video_name]}" ]; then + video_counts[$video_name]=0 + fi + + # Check if the video has already contributed the desired number of sets + if [ "${video_counts[$video_name]}" -ge 1 ]; then + continue + fi + + # Select and copy the correct number of consecutive frames + local frames=($(find "$src_dir" -type f -name "${video_name}_*.png" | sort)) + local num_frames=${#frames[@]} + if [ "$num_frames" -ge "$set_count" ]; then + for (( i=0; i<=$num_frames-$set_count; i++ )); do + local frame_set=("${frames[@]:i:set_count}") + for frame in "${frame_set[@]}"; do + cp "$frame" "$dest_dir" + done + ((video_counts[$video_name]++)) + break # Break after processing the first valid set to ensure only one set per video + done + fi + + # Check if required sets have been reached + local total_copied=0 + for count in "${video_counts[@]}"; do + ((total_copied+=count)) + done + + if [ "$total_copied" -ge "$total_sets" ]; then + break + fi + done +} + +# Process True Positives and True Negatives +process_directory "$true_positive_dir" "$frame_set_count" "$true_positive_count" "$outputdir/true_positives" +process_directory "$true_negative_dir" "$frame_set_count" "$true_negative_count" "$outputdir/true_negatives" + +echo "Dataset created in '$outputdir'" diff --git a/Ethosight/bin/ucf_readme.txt b/Ethosight/bin/ucf_readme.txt new file mode 100644 index 0000000..d1db7b6 --- /dev/null +++ b/Ethosight/bin/ucf_readme.txt @@ -0,0 +1,23 @@ +https://www.crcv.ucf.edu/projects/real-world/ +https://drive.google.com/drive/folders/1cNVD01er2WBC8OgVpf4O-DggkKjwsniO + +unzipper.sh + will unzip all files in directory + +extract_true_positives.sh + will extract true positives based on temporal annotation file + +extract_true_negatives.sh + will extract true negatives based on "normal" videos + +extract_true_negatives_hard.sh + will extract true negatives based only on the temporal annotation file and does not include normal files + +geneerate_dataset.sh + will create a dataset based on serveral parameters including frame_set_count, true_positive_count, and true_negative_count + + frame_set_count: the number of consecutive frames per sample (consecutive in the dataset e.g. _10, _20, _30 + + true_positive_count, true_negative_count: the number of frame sets per video + + diff --git a/Ethosight/bin/ucf_temporal_annotations.txt b/Ethosight/bin/ucf_temporal_annotations.txt new file mode 100644 index 0000000..e15a2c9 --- /dev/null +++ b/Ethosight/bin/ucf_temporal_annotations.txt @@ -0,0 +1,290 @@ +Abuse028_x264.mp4 Abuse 165 240 -1 -1 +Abuse030_x264.mp4 Abuse 1275 1360 -1 -1 +Arrest001_x264.mp4 Arrest 1185 1485 -1 -1 +Arrest007_x264.mp4 Arrest 1530 2160 -1 -1 +Arrest024_x264.mp4 Arrest 1005 3105 -1 -1 +Arrest030_x264.mp4 Arrest 5535 7200 -1 -1 +Arrest039_x264.mp4 Arrest 7215 10335 -1 -1 +Arson007_x264.mp4 Arson 2250 5700 -1 -1 +Arson009_x264.mp4 Arson 220 315 -1 -1 +Arson010_x264.mp4 Arson 885 1230 -1 -1 +Arson011_x264.mp4 Arson 150 420 680 1267 +Arson016_x264.mp4 Arson 1000 1796 -1 -1 +Arson018_x264.mp4 Arson 270 600 -1 -1 +Arson022_x264.mp4 Arson 3500 4000 -1 -1 +Arson035_x264.mp4 Arson 600 900 -1 -1 +Arson041_x264.mp4 Arson 2130 3615 -1 -1 +Assault006_x264.mp4 Assault 1185 8096 -1 -1 +Assault010_x264.mp4 Assault 11330 11680 12260 12930 +Assault011_x264.mp4 Assault 375 960 -1 -1 +Burglary005_x264.mp4 Burglary 4710 5040 -1 -1 +Burglary017_x264.mp4 Burglary 150 600 -1 -1 +Burglary018_x264.mp4 Burglary 720 1050 -1 -1 +Burglary021_x264.mp4 Burglary 60 200 840 1340 +Burglary024_x264.mp4 Burglary 60 1230 -1 -1 +Burglary032_x264.mp4 Burglary 1290 3690 -1 -1 +Burglary033_x264.mp4 Burglary 60 330 -1 -1 +Burglary035_x264.mp4 Burglary 1 1740 -1 -1 +Burglary037_x264.mp4 Burglary 240 390 540 1800 +Burglary061_x264.mp4 Burglary 4200 5700 -1 -1 +Burglary076_x264.mp4 Burglary 1590 4300 -1 -1 +Burglary079_x264.mp4 Burglary 7750 10710 -1 -1 +Burglary092_x264.mp4 Burglary 240 420 -1 -1 +Explosion002_x264.mp4 Explosion 1500 2100 -1 -1 +Explosion004_x264.mp4 Explosion 75 225 -1 -1 +Explosion007_x264.mp4 Explosion 1590 2280 -1 -1 +Explosion008_x264.mp4 Explosion 1005 1245 -1 -1 +Explosion010_x264.mp4 Explosion 285 1080 -1 -1 +Explosion011_x264.mp4 Explosion 795 945 -1 -1 +Explosion013_x264.mp4 Explosion 2520 2970 -1 -1 +Explosion016_x264.mp4 Explosion 180 450 -1 -1 +Explosion017_x264.mp4 Explosion 990 1440 -1 -1 +Explosion020_x264.mp4 Explosion 60 270 -1 -1 +Explosion021_x264.mp4 Explosion 135 270 -1 -1 +Explosion022_x264.mp4 Explosion 2230 2420 -1 -1 +Explosion025_x264.mp4 Explosion 260 420 -1 -1 +Explosion027_x264.mp4 Explosion 105 180 -1 -1 +Explosion028_x264.mp4 Explosion 280 700 -1 -1 +Explosion029_x264.mp4 Explosion 1830 2020 -1 -1 +Explosion033_x264.mp4 Explosion 970 1350 1550 3156 +Explosion035_x264.mp4 Explosion 250 350 -1 -1 +Explosion036_x264.mp4 Explosion 1950 2070 -1 -1 +Explosion039_x264.mp4 Explosion 60 150 -1 -1 +Explosion043_x264.mp4 Explosion 4460 4600 -1 -1 +Fighting003_x264.mp4 Fighting 1820 3103 -1 -1 +Fighting018_x264.mp4 Fighting 80 420 -1 -1 +Fighting033_x264.mp4 Fighting 570 840 -1 -1 +Fighting042_x264.mp4 Fighting 290 1200 -1 -1 +Fighting047_x264.mp4 Fighting 200 1830 -1 -1 +Normal_Videos_003_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_006_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_010_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_014_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_015_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_018_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_019_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_024_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_025_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_027_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_033_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_034_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_041_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_042_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_048_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_050_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_051_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_056_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_059_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_063_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_067_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_070_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_100_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_129_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_150_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_168_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_175_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_182_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_189_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_196_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_203_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_210_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_217_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_224_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_246_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_247_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_248_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_251_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_289_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_310_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_312_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_317_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_345_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_352_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_360_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_365_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_401_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_417_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_439_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_452_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_453_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_478_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_576_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_597_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_603_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_606_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_621_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_634_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_641_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_656_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_686_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_696_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_702_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_704_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_710_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_717_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_722_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_725_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_745_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_758_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_778_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_780_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_781_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_782_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_783_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_798_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_801_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_828_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_831_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_866_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_867_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_868_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_869_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_870_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_871_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_872_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_873_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_874_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_875_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_876_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_877_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_878_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_879_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_880_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_881_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_882_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_883_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_884_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_885_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_886_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_887_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_888_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_889_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_890_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_891_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_892_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_893_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_894_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_895_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_896_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_897_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_898_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_899_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_900_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_901_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_902_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_903_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_904_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_905_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_906_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_907_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_908_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_909_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_910_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_911_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_912_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_913_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_914_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_915_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_923_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_924_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_925_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_926_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_927_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_928_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_929_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_930_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_931_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_932_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_933_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_934_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_935_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_936_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_937_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_938_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_939_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_940_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_941_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_943_x264.mp4 Normal -1 -1 -1 -1 +Normal_Videos_944_x264.mp4 Normal -1 -1 -1 -1 +RoadAccidents001_x264.mp4 RoadAccidents 210 300 -1 -1 +RoadAccidents002_x264.mp4 RoadAccidents 240 300 -1 -1 +RoadAccidents004_x264.mp4 RoadAccidents 140 189 -1 -1 +RoadAccidents009_x264.mp4 RoadAccidents 210 240 -1 -1 +RoadAccidents010_x264.mp4 RoadAccidents 230 270 -1 -1 +RoadAccidents011_x264.mp4 RoadAccidents 260 300 -1 -1 +RoadAccidents012_x264.mp4 RoadAccidents 250 390 -1 -1 +RoadAccidents016_x264.mp4 RoadAccidents 530 720 -1 -1 +RoadAccidents017_x264.mp4 RoadAccidents 60 130 -1 -1 +RoadAccidents019_x264.mp4 RoadAccidents 750 900 -1 -1 +RoadAccidents020_x264.mp4 RoadAccidents 610 730 -1 -1 +RoadAccidents021_x264.mp4 RoadAccidents 30 90 -1 -1 +RoadAccidents022_x264.mp4 RoadAccidents 120 220 -1 -1 +RoadAccidents121_x264.mp4 RoadAccidents 330 390 -1 -1 +RoadAccidents122_x264.mp4 RoadAccidents 300 360 -1 -1 +RoadAccidents123_x264.mp4 RoadAccidents 130 210 -1 -1 +RoadAccidents124_x264.mp4 RoadAccidents 250 420 -1 -1 +RoadAccidents125_x264.mp4 RoadAccidents 490 600 -1 -1 +RoadAccidents127_x264.mp4 RoadAccidents 2160 2300 -1 -1 +RoadAccidents128_x264.mp4 RoadAccidents 90 200 -1 -1 +RoadAccidents131_x264.mp4 RoadAccidents 180 240 -1 -1 +RoadAccidents132_x264.mp4 RoadAccidents 220 320 -1 -1 +RoadAccidents133_x264.mp4 RoadAccidents 270 450 -1 -1 +Robbery048_x264.mp4 Robbery 450 930 -1 -1 +Robbery050_x264.mp4 Robbery 495 1410 -1 -1 +Robbery102_x264.mp4 Robbery 1080 1560 -1 -1 +Robbery106_x264.mp4 Robbery 480 600 -1 -1 +Robbery137_x264.mp4 Robbery 135 1950 -1 -1 +Shooting002_x264.mp4 Shooting 1020 1100 -1 -1 +Shooting004_x264.mp4 Shooting 500 660 -1 -1 +Shooting007_x264.mp4 Shooting 45 165 -1 -1 +Shooting008_x264.mp4 Shooting 75 315 -1 -1 +Shooting010_x264.mp4 Shooting 1095 1260 -1 -1 +Shooting011_x264.mp4 Shooting 1480 1750 -1 -1 +Shooting013_x264.mp4 Shooting 860 945 -1 -1 +Shooting015_x264.mp4 Shooting 855 1715 -1 -1 +Shooting018_x264.mp4 Shooting 315 480 -1 -1 +Shooting019_x264.mp4 Shooting 1020 1455 -1 -1 +Shooting021_x264.mp4 Shooting 480 630 -1 -1 +Shooting022_x264.mp4 Shooting 2850 3300 -1 -1 +Shooting024_x264.mp4 Shooting 720 1305 -1 -1 +Shooting026_x264.mp4 Shooting 195 600 -1 -1 +Shooting028_x264.mp4 Shooting 285 555 -1 -1 +Shooting032_x264.mp4 Shooting 7995 8205 -1 -1 +Shooting033_x264.mp4 Shooting 1680 2000 -1 -1 +Shooting034_x264.mp4 Shooting 960 1050 -1 -1 +Shooting037_x264.mp4 Shooting 140 260 -1 -1 +Shooting043_x264.mp4 Shooting 945 1230 -1 -1 +Shooting046_x264.mp4 Shooting 4005 4230 4760 5088 +Shooting047_x264.mp4 Shooting 2160 3900 4860 6600 +Shooting048_x264.mp4 Shooting 1410 1730 -1 -1 +Shoplifting001_x264.mp4 Shoplifting 1550 2000 -1 -1 +Shoplifting004_x264.mp4 Shoplifting 2200 4900 -1 -1 +Shoplifting005_x264.mp4 Shoplifting 720 930 -1 -1 +Shoplifting007_x264.mp4 Shoplifting 550 760 4630 4920 +Shoplifting010_x264.mp4 Shoplifting 750 920 1550 1970 +Shoplifting015_x264.mp4 Shoplifting 2010 2160 -1 -1 +Shoplifting016_x264.mp4 Shoplifting 630 720 -1 -1 +Shoplifting017_x264.mp4 Shoplifting 360 420 -1 -1 +Shoplifting020_x264.mp4 Shoplifting 2340 2460 -1 -1 +Shoplifting021_x264.mp4 Shoplifting 2070 2220 -1 -1 +Shoplifting022_x264.mp4 Shoplifting 270 420 1440 1560 +Shoplifting027_x264.mp4 Shoplifting 1080 1160 1470 1710 +Shoplifting028_x264.mp4 Shoplifting 570 840 -1 -1 +Shoplifting029_x264.mp4 Shoplifting 1020 1470 -1 -1 +Shoplifting031_x264.mp4 Shoplifting 120 330 -1 -1 +Shoplifting033_x264.mp4 Shoplifting 630 750 -1 -1 +Shoplifting034_x264.mp4 Shoplifting 7350 7470 -1 -1 +Shoplifting037_x264.mp4 Shoplifting 1140 1200 -1 -1 +Shoplifting039_x264.mp4 Shoplifting 2190 2340 -1 -1 +Shoplifting044_x264.mp4 Shoplifting 11070 11250 -1 -1 +Shoplifting049_x264.mp4 Shoplifting 1020 1350 -1 -1 +Stealing019_x264.mp4 Stealing 2730 2790 4170 4350 +Stealing036_x264.mp4 Stealing 1260 1590 -1 -1 +Stealing058_x264.mp4 Stealing 570 3660 -1 -1 +Stealing062_x264.mp4 Stealing 360 1050 -1 -1 +Stealing079_x264.mp4 Stealing 2550 3210 3510 4500 +Vandalism007_x264.mp4 Vandalism 240 750 -1 -1 +Vandalism015_x264.mp4 Vandalism 2010 2700 -1 -1 +Vandalism017_x264.mp4 Vandalism 270 330 780 840 +Vandalism028_x264.mp4 Vandalism 1830 1980 2400 2670 +Vandalism036_x264.mp4 Vandalism 540 780 990 1080 diff --git a/Ethosight/bin/ucf_unzipper.sh b/Ethosight/bin/ucf_unzipper.sh new file mode 100755 index 0000000..016b554 --- /dev/null +++ b/Ethosight/bin/ucf_unzipper.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# This script will find and unzip all .zip files in the current directory + +# Loop through each file in the current directory +for file in *.zip +do + # Check if the file is a zip file + if [[ "$file" == *.zip ]] + then + # Create a directory with the name of the file minus the '.zip' extension + dir="${file%.zip}" + mkdir -p "$dir" + + # Unzip the file into the directory + unzip -q "$file" -d "$dir" + echo "Unzipped $file into $dir/" + fi +done + +echo "Unzipping complete." + diff --git a/Ethosight/bpe/bpe_simple_vocab_16e6.txt.gz b/Ethosight/bpe/bpe_simple_vocab_16e6.txt.gz new file mode 100644 index 0000000..7b5088a Binary files /dev/null and b/Ethosight/bpe/bpe_simple_vocab_16e6.txt.gz differ diff --git a/Ethosight/config/concur/nginx.template b/Ethosight/config/consul/nginx.template similarity index 80% rename from Ethosight/config/concur/nginx.template rename to Ethosight/config/consul/nginx.template index 5a8503a..062d07b 100644 --- a/Ethosight/config/concur/nginx.template +++ b/Ethosight/config/consul/nginx.template @@ -5,6 +5,13 @@ events { } http { + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" ' + '-> $upstream_addr'; + + access_log /var/log/nginx/access.log main; client_max_body_size 3000M; upstream ethosight_servers { {{ range service "EthosightRESTServer" }} diff --git a/Ethosight/config/nginx/nginx.conf b/Ethosight/config/nginx/nginx.conf index 26c0072..37ee45a 100644 --- a/Ethosight/config/nginx/nginx.conf +++ b/Ethosight/config/nginx/nginx.conf @@ -5,6 +5,12 @@ events { } http { + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" ' + '-> $upstream_addr'; + + access_log /var/log/nginx/access.log main; client_max_body_size 3000M; upstream ethosight_servers { diff --git a/Ethosight/website/EthosightAppBasedir/general01/ethosightapp.yaml b/Ethosight/configs/ethosightapp.yaml similarity index 90% rename from Ethosight/website/EthosightAppBasedir/general01/ethosightapp.yaml rename to Ethosight/configs/ethosightapp.yaml index f3011d8..d0d0ae1 100644 --- a/Ethosight/website/EthosightAppBasedir/general01/ethosightapp.yaml +++ b/Ethosight/configs/ethosightapp.yaml @@ -1,41 +1,43 @@ ################# basic app configuration -analyzeImageMethod: analyzeImage_precompiled -embeddings_path: general.embeddings -labels_path: general.labels -output_type: affinityScores -reasoner_type: chatgpt +analyzeImageMethod: analyzeImage_precompiled # Default method from EthosightMediaAnalyzer +embeddings_path: baseline.embeddings # Embeddings file located in embeddings folder +labels_path: baseline.labels # Labels file located in embeddings folder +output_type: affinityScores # The output result format +reasoner_type: '' # ChatGPT reasoner or others ... If no reasoning just empty string ################## configure the label space optimization label_space_optimization: enabled: true - rerun: false # if true, will rerun the optimization even if a results file exists, + rerun: true # if true, will rerun the optimization even if a results file exists, # otherwise will just load the most recent results file -# method: "semantic_relations" - method: "semantic_similarity" +# method: "semantic_relations" "semantic_similarity" + method: "semantic_similarity" # Default semantic_similarity parameters: threshold: .8 - max_labels: 10 # this is the max per ground truth label (not total) + max_labels: 10 # this is the max per ground truth label (not total) general_templates: - enabled: false + enabled: true template_path: "../templates/general_templates.txt" ################## configure the benchmark +# if benchmark is enabled it does benchmarking during optimization benchmark: enabled: true batch_mode: true batchsize: 256 - image_dir: "/home/hmlatapie/devactive/home/Ethosight/images/robbery" + #Images directory where to perform benchmarks + image_dir: "/home/vahagn/projects/EthosightNew/benchmark/images/robbery" # normallabel used to compute benchmark anomaly metrics normallabel: "normal event" # ground_truth_path can be a .txt file or a .csv # .txt : labels only # .csv : filename, label pairs ... first row in file is assumed to be a header and is ignored # ground_truth_path: "../images/robbery/image-labels2.csv" #only one label per video - ground_truth_path: "/home/hmlatapie/devactive/home/Ethosight/images/robbery/image-labels.csv" #labeled correctly per frame + ground_truth_path: "/home/vahagn/projects/EthosightNew/benchmark/images/robbery/image-labels.csv" #labeled correctly per frame #normally ground_truth_path .csv contains all the labels so that is all you need - #however, if ground_truth_labels_path is specified, it will be used to generate + #however, if ground_truth_labels_path is specified, it will be used to generate # the ground truth labels and embeddings files #if not set and the system needs to generate, the csv will be used #as long as your .csv has all the labels, you don't need to set this @@ -53,23 +55,23 @@ benchmark: mapper: enabled: true # affinity_minthreshold is the minimum affinity score to consider a label - # if the affinity score is below the threshold, the label is set to + # if the affinity score is below the threshold, the label is set to # normallabel affinity_minthreshold: enabled: false threshold: 26 normallabel: "normal event" threshold: 26 #is this needed for video? - # modes: + # modes: # - passthrough - pass input affinity scores through to output # - labelToLabelAffinity01 - uses affinity between expanded labels and ground truth labels # - compositeLabels01 - create composite labels from input affinity scores, no reasoning - # - reasoning01 - create composite labels from input affinity scores, with reasoning + # - reasoning01 - create composite labels from input affinity scores, with reasoning # mode: 'passthrough' #mode: 'labelToLabelAffinity01' #mode: 'gpt4mode' #uses gpt4 to generate labels... input is the single top label # mode: 'gpt4mode2' #uses gpt4 to generate labels, input is the top n affinity scores - #submode: normallabelonly #only remap the normal label + #submode: normallabelonly #only remap the normal label mode: 'hardmap01' hardmapfile: '../datasets/ucf-crime/ucf-crime-hardmap.txt' # prompt: "Return a label we will use to help analyze an image. The context is to identify crime behavior versus normal behavior. The camera angles may be overhead or normal more horizontal security camera angles. Please analyze the following input label and return 'normal event' or 'crime event' <<{label}>> please return only 'normal event' or 'crime event' as the label with no extra text or delimiters of any kind." @@ -194,7 +196,7 @@ video: skip_frames: 0 normal_label_name: "normal event" label_mapping: "periods_count" - # modes: + # modes: # - majority - rank the labels by the number of frames # - longest_period - rank the labels by the number of consecutive frames # - periods_count - rank the labels by the number of periods @@ -206,4 +208,4 @@ visualization: ################# phase2 phase2: - maxiterations: 200 + maxiterations: 200 \ No newline at end of file diff --git a/Ethosight/website/EthosightAppBasedir/baseline01/baseline.embeddings b/Ethosight/embeddings/baseline.embeddings similarity index 100% rename from Ethosight/website/EthosightAppBasedir/baseline01/baseline.embeddings rename to Ethosight/embeddings/baseline.embeddings diff --git a/Ethosight/website/EthosightAppBasedir/baseline01/baseline.labels b/Ethosight/embeddings/baseline.labels similarity index 100% rename from Ethosight/website/EthosightAppBasedir/baseline01/baseline.labels rename to Ethosight/embeddings/baseline.labels diff --git a/Ethosight/website/EthosightAppBasedir/general01/general.embeddings b/Ethosight/embeddings/general.embeddings similarity index 100% rename from Ethosight/website/EthosightAppBasedir/general01/general.embeddings rename to Ethosight/embeddings/general.embeddings diff --git a/Ethosight/website/EthosightAppBasedir/general01/general.labels b/Ethosight/embeddings/general.labels similarity index 100% rename from Ethosight/website/EthosightAppBasedir/general01/general.labels rename to Ethosight/embeddings/general.labels diff --git a/Ethosight/embeddings/general_00796.embeddings b/Ethosight/embeddings/general_00796.embeddings new file mode 100644 index 0000000..075c2ab Binary files /dev/null and b/Ethosight/embeddings/general_00796.embeddings differ diff --git a/Ethosight/embeddings/general_00796.labels b/Ethosight/embeddings/general_00796.labels new file mode 100644 index 0000000..b4bfec4 --- /dev/null +++ b/Ethosight/embeddings/general_00796.labels @@ -0,0 +1,796 @@ +presenting +clouds +bike path +man +yellow lines +boat +bag +tree +rock formation +beach mat +weapon presence +bathing suit +knife +armed robbery +shower +scanning +metal pipe +jogger +loading car +using shopping cart +SUV +shirt +returning items +ramp +floaties +heart attack +rooster +clothing +barbecuing +cruise ship +unloading car +street light +vomiting +palm tree +bus +seizure +beach ball +asthma attack +octopus +guardrail +cityscape +jeans +toothbrush +bleeding +lotus +squirrel +sink +flamethrower +break room +bicycle +pigeon +looking for space +pedestrian +smoke +cooler box +seaweed +cougar_face +flippers +magazine +toys +starfish +column +water cooler +baseball glove +vandalism +swimming +machine gun +entrance ramp +eating +honking +blender +Leopards +security camera +asphalt +barcode scanner +hang glider +toll booth +queueing +refrigerator +elder abuse +suspicious behavior +reversing +dress +speed camera +spoon +bathtub +domestic violence +disturbing the peace +roundabout +roadway +spear +pepper spray +road +mandolin +video conferencing +windshield +faxing +improvised explosive device +money laundering +beach games +driftwood +car jacking +dresser +rash +coughing +beaver +joshua_tree +woman +BACKGROUND_Google +fainting +traffic sign +one-piece swimsuit +elephant +schooner +receipt +closet +pants +road blockage +cup +rifle +metal detector +product +accessory +sunscreen +kitchen +playing games +dive mask +choking +recycling +sale +night +drug overdose +lifeguard +slingshot +shoplifting +reading +ceiling_fan +conference room +getting lost +labor +crayfish +sandcastle +monitor +pillow +barrel +restaurant +rocket launcher +white lines +sun hat +sea_horse +emu +stegosaurus +umbrella +newsstand +pain +customer +paddleboard +building +lamp +ambulance +sale sign +illegal dumping +highway sign +skyscraper +water bottle +island +sea +river +using computer +rescue tube +breaking +nautilus +speed limit sign +ibis +beach tennis +parking +soccer_ball +street +eating out +scissors +manhole cover +sand dune +snorkel +beach cricket +pyramid +poisoning +club +window +paying at meter +bomb +cash +first aid kit +discount hunting +canoe +illegal street racing +furniture +gardening +employee +jewelry +browsing +brainstorming +park +headphones +slide +shorts +frisbee +lane +coffee mug +brontosaurus +parking lot +gerenuk +taking medication +dining table +tick +dollar_bill +carpet +bandaging +bonsai +store +sneezing +writing +taking photos +notebook +baseball bat +grenade +using public transit +saxophone +bookshelf +pagoda +dolphin +traffic +van +threatening +bike accident +sandals +goggles +mental health crisis +cash register +beach +door +cellphone +stalking +scuba gear +walking +child abuse +doing laundry +injury +shoes +sailboat +pedestrian overpass +beach umbrella +molotov cocktail +difficulty breathing +gas leak +traffic light +crocodile +surf +tagging +locking car +rhino +wine glass +headlight +cannon +hawksbill +ketch +mile marker +stroke +expressway +board shorts +printer +bird +accelerating +water scooter +car_side +stop_sign +lobster +yielding +rest area +fishing rod +swim trunks +trash can +using shopping basket +embezzlement +whiteboard +paying +skyline +panda +elevator +lighthouse +dock +garage +pond +meeting +lane markings +toaster +minaret +harbor +wheelchair +floatation device +heels +flower +paper +traffic violation +public disturbance +telephone +bush +kite +ewer +violent gestures +bench +shoe +assault rifle +hedgehog +overpass +hot air balloon +price tag +comparing +penthouse +surfboard +fire truck +beach soccer +picnic blanket +mailbox +ant +shotgun +rescue +explosive device +suicidal behavior +strawberry +hypothermia +face +keyboard +watch +panhandling +oven +taxi +sand toys +buying +exercising +roadside assistance +book +power lines +shoreline +rain +desk +fighting +brain +file cabinet +landscape +buildings +hut +falling +using medical device +running away +crosswalk +living room +sunbathing +exit sign +sarong +platypus +kangaroo +wild_cat +using GPS +bladed weapon +drowning +jellyfish +animal abuse +llama +flamingo_head +jogging +Faces_easy +snoopy +metronome +traffic congestion +antenna +basket +gramophone +seashell +heat stroke +sofa +changing lanes +Motorbikes +cougar_body +drunk and disorderly conduct +towel +evacuation +printing +credit card +standing +driver +emergency lane +fog +typing +blowgun +footprints +animal bite +marker +exit ramp +mace +road shoulder +pedestrian crossing +median +road divider +picnicking +water_lilly +cybercrime +roof +highway +bathroom +shouting +okapi +throwing objects +pizza +day +fitting room +beach chair +wind surf +chair +bass +queue +picnic basket +escalator +nuclear weapon +allergic reaction +trespassing +mannequin +scorpion +sedan +exit +billboard +pencil +fire hydrant +road marking +driving +change +flooding +fireworks +littering +chemical weapon +fax machine +picnic table +fever +corporate crime +lifeguard tower +euphonium +pier +seaplane +blanket +watching TV +street sign +aisle +hitting +falling objects +drug dealing +mouth +jaywalking +sunrise +entrance +noise complaints +dalmatian +leg +nunchaku +cosmetics +construction +shelf +carpool lane +coast +power outage +life vest +crossbow +butterfly +parasail +waiting +sunflower +crab +truck +binocular +bagging items +sun +Faces +gun +t-shirt +chemical spill +posing +turning +landmine +repairing +stealing +yacht +suv +dusk +bridge +playing sports +pickup truck +inflatable raft +head +swing set +car mirror +toilet +trying on clothes +indecent exposure +bedroom +sunglasses +doing yoga +hard shoulder +manager +checkout +drinking coffee +dagger +parachute +wheel +mouse +motorway +whistle +inline_skate +cloud +burglary +fire +chains +binder +semi-truck +freeway +bed +car accident +wave +center divider +ferry +vehicle vandalism +tire +bow and arrow +tunnel +menorah +flamingo +graffiti +airplanes +wrench +drunk driving +mammal +industrial espionage +tennis racket +chandelier +dryer +apparel +diabetic emergency +beach volleyball +camera +employee theft +grand_piano +dragonfly +plate +water +food +burns +paperclip +pickpocketing +road work +bikini +crowbar +crocodile_head +seagull +counterfeiting +flip flop +bathing +service station +public intoxication +beach bag +ocean +sitting +sky +cooler +dog +eating lunch +sightseeing +jet ski +underpass +trilobite +airplane +talking on phone +speeding +decorating +human +kayak +grass +swim cap +tank top +electric_guitar +fork +cliff +books +playing on playground +people +statue +stapler +snow +beach tent +anchor +trolley +handgun +safety barrier +sand +sniper rifle +traffic jam +office supplies +home invasion +tail light +interstate +cooking +pushing +arm +passenger +curtains +sales associate +shopping cart +office building +sidewalk +plant +directional sign +flip flops +working +discount +laptop +manhole +bird watching +electronics +garfield +dog walking +microwave +police car +sandbox +boy +sword +headphone +machete +car +picture frame +protesting without permit +buddha +computer +vehicle +yin_yang +coffee table +feeding pets +sunset +cleaning +hit and run +mayfly +windsor_chair +lady +sleeping +kaftan +relaxing +harassment +fountain +beach hat +binoculars +car theft +loitering +mall +biological weapon +girl +hammer +cycling +structural damage +safety buoy +apartment building +arson +accordion +helicopter +pen +promotion +music player +reading product info +brass knuckles +washing machine +swimwear +insider trading +earthquake +motorcycle +duck +cover up +soap +sexual harassment +taser +playground +revolver +vagrancy +beverage +working from home +fraud +trees +firearm +nose +meditating +mirror +stun gun +shopping +slip and fall +running +soccer ball +window display +beach towel +emergency vehicle +waves +cubicle +dawn +road sign +television +commuting +house +spanish architecture +custom home +cul de sac +driveway +tile roof +garden zone 2 +garden zone 9 +airport +runway +air_traffic_control +ground_crew +hangar +aircraft_maintenance +baggage_handling +security_check +fire_truck +emergency_services +traffic_congestion +police_presence +safety_hazard +public_safety +security_alert +emergency_response +bomb_squad +evacuation_procedure +gas_leak_detection +weapon +fire_extinguisher +explosion +fire_safety +emergency_services +firefighters +police_presence +crowd +riot +protest +civil_unrest +violence +public_safety +fire_alarm +smoke_detector +siren +distress_signal +burn_injury +emergency_landing +fuel_leak +incendiary_device +flammable_material +fire_hose +fire_helicopter +emergency_exit +fire_escape +safety_hazard +fire_drill +fire_department +fire_investigation +fire_inspector +fire_suppression_system +fire_prevention +fire_retardant +burnt_structure +fire_damage +smoke_inhalation +carbon_monoxide_poisoning +emergency_medical_services +burn_ward +fire_insurance +arson_investigation +fire_sprinkler_system +airport accident +peeping tom +voyeur +ogler +meddler +snooper +snoop +sneaking +prowling +running away from crime scene +running to help victims +child in danger +poisoning risk +poisoning +chemicals +chemical hazard +911 +baby diff --git a/Ethosight/website/EthosightAppBasedir/baseline01/ground_truth.embeddings b/Ethosight/embeddings/ground_truth.embeddings similarity index 100% rename from Ethosight/website/EthosightAppBasedir/baseline01/ground_truth.embeddings rename to Ethosight/embeddings/ground_truth.embeddings diff --git a/Ethosight/website/EthosightAppBasedir/baseline01/ground_truth.labels b/Ethosight/embeddings/ground_truth.labels similarity index 100% rename from Ethosight/website/EthosightAppBasedir/baseline01/ground_truth.labels rename to Ethosight/embeddings/ground_truth.labels diff --git a/Ethosight/install/create_conda_env.yaml b/Ethosight/install/create_conda_env.yaml new file mode 100644 index 0000000..9deae2b --- /dev/null +++ b/Ethosight/install/create_conda_env.yaml @@ -0,0 +1,23 @@ +--- +- name: Setup Conda environment and install core package + hosts: all + become: no + + vars: + playbook_dir: "{{ playbook_dir | default(lookup('env', 'PWD')) }}" + conda_env_path: "{{ playbook_dir }}/environment.yml" + ethosight_path: "{{ playbook_dir }}/.." + + tasks: + - name: Create Conda environment from file + shell: conda env create -f "{{ conda_env_path }}" + args: + executable: /bin/bash + + - name: Activate environment and install core package + shell: | + source "$(conda info --base)/etc/profile.d/conda.sh" + conda activate $(head -1 "{{ conda_env_path }}" | cut -d' ' -f2) + pip install -e "{{ ethosight_path }}" + args: + executable: /bin/bash diff --git a/Ethosight/install/environment.yml b/Ethosight/install/environment.yml index e926d17..ed26779 100644 --- a/Ethosight/install/environment.yml +++ b/Ethosight/install/environment.yml @@ -1,4 +1,4 @@ -name: imagebind +name: ethosight channels: - conda-forge - defaults @@ -66,16 +66,20 @@ dependencies: - aiohttp==3.8.5 - aiosignal==1.3.1 - annotated-types==0.5.0 + - ansible==6.7.0 + - ansible-core==2.13.13 - anyio==3.7.1 - apptools==5.2.0 - argon2-cffi==21.3.0 - argon2-cffi-bindings==21.2.0 - arrow==1.2.3 + - asgiref==3.8.1 - asttokens==2.2.1 - async-timeout==4.0.2 - attrs==23.1.0 - av==10.0.0 - backcall==0.2.0 + - backports-zoneinfo==0.2.1 - beautifulsoup4==4.12.2 - bleach==6.0.0 - blinker==1.6.2 @@ -85,7 +89,9 @@ dependencies: - charset-normalizer==3.1.0 - click==8.1.5 - comm==0.1.3 + - configargparse==1.7 - configobj==5.0.8 + - cryptography==42.0.5 - cuda-python==12.2.0 - cython==3.0.2 - dataclasses-json==0.5.13 @@ -93,6 +99,8 @@ dependencies: - decorator==5.1.1 - decord==0.6.0 - defusedxml==0.7.1 + - deprecated==1.2.14 + - django==4.2.9 - dnspython==2.4.2 - einops==0.6.1 - email-validator==2.0.0.post2 @@ -103,13 +111,15 @@ dependencies: - fastjsonschema==2.17.0 - filelock==3.12.2 - flask==2.3.2 + - flask-cors==4.0.0 + - flask-login==0.6.3 - fqdn==1.5.1 - frozenlist==1.4.0 - fsspec==2023.6.0 - ftfy==6.1.1 - fvcore==0.1.5.post20221221 - gevent==23.9.0.post1 - - geventhttpclient==2.0.2 + - geventhttpclient==2.0.11 - google-api-core==2.11.1 - google-api-python-client==2.93.0 - google-auth==2.22.0 @@ -125,7 +135,6 @@ dependencies: - httpx==0.24.1 - huggingface-hub==0.16.4 - idna==3.4 - # - imagenetv2-pytorch==0.1 - importlib-metadata==6.6.0 - importlib-resources==5.12.0 - iniconfig==2.0.0 @@ -139,6 +148,7 @@ dependencies: - jedi==0.18.2 - jinja2==3.1.2 - joblib==1.3.1 + - jsonpatch==1.33 - jsonpointer==2.3 - jsonschema==4.17.3 - jupyter==1.0.0 @@ -154,15 +164,17 @@ dependencies: - jupyter-server-terminals==0.4.4 - jupyterlab-pygments==0.2.2 - jupyterlab-widgets==3.0.7 - # - langchain==0.0.245 - # - langsmith==0.0.15 + - langchain==0.0.347 + - langchain-core==0.0.11 + - langsmith==0.0.92 - llama-index==0.7.13 + - locust==2.24.0 - lxml==4.9.3 - markupsafe==2.1.2 - marshmallow==3.20.1 - matplotlib-inline==0.1.6 - - mayavi==4.8.1 - mistune==2.0.5 + - msgpack==1.0.8 - multidict==6.0.4 - mypy-extensions==1.0.0 - nbclassic==1.0.0 @@ -202,6 +214,7 @@ dependencies: - prompt-toolkit==3.0.38 - protobuf==3.20.3 - psutil==5.9.5 + - psycopg2-binary==2.9.9 - ptyprocess==0.7.0 - pure-eval==0.2.2 - pyasn1==0.5.0 @@ -209,8 +222,6 @@ dependencies: - pycparser==2.21 - pydantic==1.10.12 - pydantic-core==0.25.0 - # - pydantic-extra-types==2.1.0 - #- pydantic-settings==1.99 - pyface==8.0.0 - pygments==2.15.1 - pyrsistent==0.19.3 @@ -226,12 +237,17 @@ dependencies: - pyzmq==25.0.2 - qtconsole==5.4.3 - qtpy==2.3.1 + - redis==4.3.4 - regex==2023.5.5 - requests==2.30.0 - requests-oauthlib==1.3.1 + - resolvelib==0.8.1 - rfc3339-validator==0.1.4 - rfc3986-validator==0.1.1 + - roundrobin==0.0.4 - rsa==4.9 + - ruamel-yaml==0.18.5 + - ruamel-yaml-clib==0.2.8 - safetensors==0.3.1 - scikit-learn==1.3.0 - scipy==1.10.1 @@ -240,6 +256,7 @@ dependencies: - sniffio==1.3.0 - soupsieve==2.4.1 - sqlalchemy==2.0.19 + - sqlparse==0.5.0 - stack-data==0.6.2 - starlette==0.27.0 - tabulate==0.9.0 @@ -281,9 +298,10 @@ dependencies: - websockets==11.0.3 - werkzeug==2.3.6 - widgetsnbextension==4.0.7 + - wrapt==1.16.0 - yacs==0.1.8 - yarl==1.9.2 - zipp==3.15.0 - zope-event==5.0 - zope-interface==6.0 -prefix: /home/hmlatapie/miniconda3/envs/imagebind +prefix: /home/ubuntu/miniconda3/envs/ethosight diff --git a/Ethosight/install/files/ethosight_environment b/Ethosight/install/files/ethosight_environment new file mode 100644 index 0000000..a256f1f --- /dev/null +++ b/Ethosight/install/files/ethosight_environment @@ -0,0 +1,38 @@ +#!/bin/bash +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + + +# EthosightBackend type client +# Not modifiable +export EthosightBackend=client +#export EthosightBackend=core +# EthosightBackendURL is pointing to nginx running +export EthosightBackendURL=http://localhost:80 +# DjangoEthosightAppBaseDir points to the app created on ui side +export DjangoEthosightAppBaseDir=/home/ubuntu/lxdshared/DeepVision/Ethosight/website/EthosightAppBasedir +# EthosightYAMLDirectory points the created config yaml file location +export EthosightYAMLDirectory=/home/ubuntu/lxdshared/DeepVision/Ethosight/configs +# ETHOSIGHT_APP_BASEDIR points to ethosight app created +export ETHOSIGHT_APP_BASEDIR=/home/ubuntu/lxdshared/DeepVision/Ethosight/website/EthosightAppBasedir +# Email sending variables to send access codes to client users. You can modify as per your environment +export EMAIL_HOST=email-smtp.us-east-1.amazonaws.com +export EMAIL_PORT=587 +export EMAIL_USE_TLS=True +export EMAIL_USE_SSL=False +export EMAIL_HOST_USER='' +export EMAIL_HOST_PASSWORD='' + diff --git a/Ethosight/install/files/nginx.conf b/Ethosight/install/files/nginx.conf new file mode 100644 index 0000000..37ee45a --- /dev/null +++ b/Ethosight/install/files/nginx.conf @@ -0,0 +1,50 @@ +events { + worker_connections 1024; + use epoll; # Use epoll for handling events on Linux + multi_accept on; # Accept all new connections at once +} + +http { + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" ' + '-> $upstream_addr'; + + access_log /var/log/nginx/access.log main; + client_max_body_size 3000M; + upstream ethosight_servers { + + server 127.0.0.1:8000; + + } + + # Basic settings for optimal performance + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + + # Include mime types for proper content-type headers + include /etc/nginx/mime.types; + default_type application/octet-stream; + + server { + listen 80; + + # Setting max body size (you can adjust based on your needs) + client_max_body_size 3000M; + + location / { + proxy_pass http://ethosight_servers; + + # Basic Proxy settings + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_buffering off; + } + } +} + diff --git a/Ethosight/install/files/nginx.template b/Ethosight/install/files/nginx.template new file mode 100644 index 0000000..062d07b --- /dev/null +++ b/Ethosight/install/files/nginx.template @@ -0,0 +1,52 @@ +events { + worker_connections 1024; + use epoll; # Use epoll for handling events on Linux + multi_accept on; # Accept all new connections at once +} + +http { + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" ' + '-> $upstream_addr'; + + access_log /var/log/nginx/access.log main; + client_max_body_size 3000M; + upstream ethosight_servers { + {{ range service "EthosightRESTServer" }} + server {{ .Address }}:{{ .Port }}; + {{ end }} + } + + # Basic settings for optimal performance + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + + # Include mime types for proper content-type headers + include /etc/nginx/mime.types; + default_type application/octet-stream; + + server { + listen 80; + + # Setting max body size (you can adjust based on your needs) + client_max_body_size 3000M; + + location / { + proxy_pass http://ethosight_servers; + + # Basic Proxy settings + proxy_read_timeout 300s; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_buffering off; + } + } +} + diff --git a/Ethosight/install/files/run_consultemplate.sh b/Ethosight/install/files/run_consultemplate.sh new file mode 100755 index 0000000..873c86b --- /dev/null +++ b/Ethosight/install/files/run_consultemplate.sh @@ -0,0 +1,21 @@ +#!/usr/bin/bash +LOG_FILE="/var/log/consul-template.log" +TEMPLATE_PATH="/etc/consul-template.d/nginx.template" +CONFIG_PATH="/tmp/nginx/nginx.conf" + +# Check and load Ethosight specific environment variables +if [ -f /etc/ethosight_environment ]; then + source /etc/ethosight_environment +else + echo "$(date): Ethosight environment file not found, skipping load." >> $LOG_FILE +fi + +# Logging the start +echo "$(date): Starting consul-template..." >> $LOG_FILE + +# Run consul-template +consul-template -template "${TEMPLATE_PATH}:${CONFIG_PATH}:docker exec ethosight-nginx nginx -s reload" \ +|| echo "$(date): Error during consul-template execution." >> $LOG_FILE + +# Logging completion +echo "$(date): consul-template processing complete." >> $LOG_FILE diff --git a/Ethosight/install/files/run_nginx.sh b/Ethosight/install/files/run_nginx.sh new file mode 100755 index 0000000..52aa845 --- /dev/null +++ b/Ethosight/install/files/run_nginx.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +CONFIG_DIR="/tmp/nginx" +NGINX_CONF="$CONFIG_DIR/nginx.conf" +CONTAINER_NAME="ethosight-nginx" + +# Ensure the configuration directory exists and has the necessary permissions +mkdir -p $CONFIG_DIR +chmod 755 $CONFIG_DIR + +# Function to restart Nginx Docker container +restart_nginx() { + echo "Restarting Nginx container due to configuration change..." + # Stop and remove the current Nginx container if it exists + docker rm -f $CONTAINER_NAME + # Start a new Nginx container with the custom nginx.conf mounted + docker run --name $CONTAINER_NAME --network host -v "$NGINX_CONF":/etc/nginx/nginx.conf:ro -p 80:80 -d nginx +} + +# Initial start of the Nginx container +restart_nginx + +# Monitor nginx.conf for changes +while inotifywait -e modify,move_self,create,delete $NGINX_CONF; do + restart_nginx +done + diff --git a/Ethosight/install/install_and_run_gpuserver.yaml b/Ethosight/install/install_and_run_gpuserver.yaml new file mode 100644 index 0000000..2b940a9 --- /dev/null +++ b/Ethosight/install/install_and_run_gpuserver.yaml @@ -0,0 +1,37 @@ +--- +- name: Install and run run_gpuserver.sh as a systemd service + hosts: all + become: yes + + vars: + ethosight_gpus: + - 0 + - 1 + + tasks: + - name: Create scripts directory + file: + path: /opt/scripts + state: directory + + - name: Generate startup script + template: + src: gpuserver_startup.sh.j2 + dest: /opt/scripts/gpuserver_startup.sh + mode: '0755' + + - name: Create systemd service unit + template: + src: gpuserver.service.j2 + dest: /etc/systemd/system/gpuserver.service + + - name: Reload systemd daemon + systemd: + daemon_reload: yes + + - name: Start and enable gpuserver service + systemd: + name: gpuserver + state: started + enabled: yes + diff --git a/Ethosight/install/install_and_run_webserver.yaml b/Ethosight/install/install_and_run_webserver.yaml new file mode 100644 index 0000000..2ef5935 --- /dev/null +++ b/Ethosight/install/install_and_run_webserver.yaml @@ -0,0 +1,85 @@ +--- +- name: Setup Django Systemd Service + hosts: all + become: yes + vars_files: + - uservars.yml + vars: + # Add these to your vars file + postgres_password: mysecretpassword + postgres_db: mydatabase + postgres_user: postgres # Default user + postgres_port: 5432 + + django_user: ubuntu + django_group: ubuntu + django_project_dir: /home/ubuntu/lxdshared/DeepVision/Ethosight/website + django_port: 8080 + conda_path: /home/ubuntu/miniconda3 + conda_environment: ethosight + environment_vars_file_path: /home/ubuntu/lxdshared/DeepVision/Ethosight/bin/environment_vars.sh + + tasks: + - name: Deploy environment variables file + template: + src: environment_vars.sh.j2 + dest: "{{ environment_vars_file_path }}" + mode: 0755 + + - name: Deploy PostgreSQL run script + template: + src: templates/postgres_run.sh.j2 + dest: /usr/local/bin/postgres_run.sh + mode: 0755 + + - name: Deploy PostgreSQL systemd service + template: + src: postgres.service.j2 + dest: /etc/systemd/system/postgres.service + notify: + - reload systemd + - restart postgres service + + - name: Enable and start PostgreSQL service + systemd: + name: postgres.service + enabled: yes + state: started + + - name: Deploy Django runserver script + template: + src: templates/django_runserver.sh.j2 + dest: /usr/local/bin/django_runserver.sh + mode: 0755 + + - name: Setup Django systemd service + template: + src: django.service.j2 + dest: /etc/systemd/system/django.service + notify: + - reload systemd + - restart django service + + - name: Enable and start Django service + systemd: + name: django.service + enabled: yes + state: started + notify: + - reload systemd + - restart django service + + handlers: + - name: reload systemd + systemd: + daemon_reload: yes + + - name: restart django service + systemd: + name: django.service + state: restarted + + - name: restart postgres service + systemd: + name: postgres.service + state: restarted \ No newline at end of file diff --git a/Ethosight/install/install_conda.yaml b/Ethosight/install/install_conda.yaml new file mode 100644 index 0000000..d8d0f6b --- /dev/null +++ b/Ethosight/install/install_conda.yaml @@ -0,0 +1,44 @@ +--- +- name: Install and Initialize Miniconda + hosts: all + become: yes + + tasks: + - name: Download Miniconda installer + get_url: + url: "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh" + dest: "/tmp/Miniconda3-latest-Linux-x86_64.sh" + mode: '0755' + + - name: Install Miniconda + shell: bash /tmp/Miniconda3-latest-Linux-x86_64.sh -b -p /opt/miniconda3 + args: + creates: /opt/miniconda3/bin/conda + + - name: Initialize Conda for all users + shell: | + source /opt/miniconda3/etc/profile.d/conda.sh + conda init bash + args: + executable: /bin/bash + + - name: Ensure Conda is in the PATH for all users + lineinfile: + path: /etc/profile + line: 'export PATH="/opt/miniconda3/bin:$PATH"' + create: yes + + - name: Append Conda init script to bashrc + lineinfile: + path: "/home/ubuntu/.bashrc" + line: 'source /opt/miniconda3/etc/profile.d/conda.sh' + create: yes + state: present + + - name: Ensure Conda base environment is activated on shell login + lineinfile: + path: "/home/ubuntu/.bashrc" + line: 'conda activate base' + create: yes + state: present + diff --git a/Ethosight/install/install_consul.yaml b/Ethosight/install/install_consul.yaml new file mode 100644 index 0000000..8cf47a8 --- /dev/null +++ b/Ethosight/install/install_consul.yaml @@ -0,0 +1,91 @@ +--- +- name: Install Consul + hosts: all + become: yes + vars: + consul_version: "1.12.2" + consul_download_url: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_amd64.zip" + consul_install_dir: "/usr/local/bin" + consul_config_dir: "/etc/consul.d" + consul_data_dir: "/opt/consul" + consul_config_file: "consul.json" + consul_datacenter: "dc1" + consul_log_level: "INFO" + consul_server: true + consul_bind_address: "127.0.0.1" + consul_client_address: "127.0.0.1" + consul_bootstrap_expect: 1 + consul_ui_enable: true + + tasks: + - name: Create consul user and group + user: + name: consul + system: yes + home: /etc/consul.d + shell: /sbin/nologin + state: present + + - name: Install unzip + apt: + name: unzip + state: present + + - name: Download Consul + get_url: + url: "{{ consul_download_url }}" + dest: "/tmp/consul.zip" + + - name: Unzip Consul + unarchive: + src: "/tmp/consul.zip" + dest: "{{ consul_install_dir }}" + remote_src: yes + + - name: Create Consul configuration directory + file: + path: "{{ consul_config_dir }}" + state: directory + + - name: Create Consul data directory + file: + path: "{{ consul_data_dir }}" + state: directory + + - name: Ensure Consul data directory exists with correct permissions + file: + path: "{{ consul_data_dir }}" + state: directory + owner: consul + group: consul + mode: '0750' + + - name: Create Consul configuration file + template: + src: templates/consul.json.j2 + dest: "{{ consul_config_dir }}/{{ consul_config_file }}" + notify: + - Restart consul service + + - name: Create Consul systemd service + template: + src: templates/consul.service.j2 + dest: /etc/systemd/system/consul.service + notify: + - Reload systemd + + - name: Start and enable Consul service + systemd: + name: consul + state: started + enabled: yes + + handlers: + - name: Reload systemd + systemd: + daemon_reload: yes + + - name: Restart consul service + systemd: + name: consul + state: restarted \ No newline at end of file diff --git a/Ethosight/install/install_consul_template.yml b/Ethosight/install/install_consul_template.yml new file mode 100644 index 0000000..e22f147 --- /dev/null +++ b/Ethosight/install/install_consul_template.yml @@ -0,0 +1,80 @@ +--- +- name: Install and Configure Consul Template + hosts: all + become: yes + + vars: + consul_template_version: "0.29.0" + consul_template_download_url: "https://releases.hashicorp.com/consul-template/{{ consul_template_version }}/consul-template_{{ consul_template_version }}_linux_amd64.zip" + consul_template_install_dir: "/usr/local/bin" + consul_template_config_dir: "/etc/consul-template.d" + + tasks: + - name: Deploy Ethosight specific environment file + copy: + src: files/ethosight_environment + dest: /etc/ethosight_environment + mode: '0644' + + - name: Install unzip + apt: + name: unzip + state: present + + - name: Download Consul Template + get_url: + url: "{{ consul_template_download_url }}" + dest: "/tmp/consul-template.zip" + mode: '0644' + + - name: Unzip Consul Template + unarchive: + src: "/tmp/consul-template.zip" + dest: "{{ consul_template_install_dir }}" + remote_src: yes + notify: + - Clean up downloaded zip + + - name: Create Consul Template configuration directory + file: + path: "{{ consul_template_config_dir }}" + state: directory + + - name: Deploy nginx template file + copy: + src: files/nginx.template # This assumes the file is in the 'files' directory of your Ansible project + dest: "/etc/consul-template.d/nginx.template" # Destination on the server + mode: '0644' + + - name: Deploy consul-template run script + copy: + src: run_consultemplate.sh + dest: /usr/local/bin/run_consultemplate.sh + mode: '0755' + + - name: Setup Consul Template SystemD service + template: + src: consul-template.service.j2 + dest: /etc/systemd/system/consul-template.service + + - name: Enable and Start Consul Template service + systemd: + name: consul-template + state: started + enabled: yes + + handlers: + - name: Clean up downloaded zip + file: + path: "/tmp/consul-template.zip" + state: absent + + - name: Reload SystemD + systemd: + daemon_reload: yes + + - name: Restart consul-template service + systemd: + name: consul-template + state: restarted + enabled: yes \ No newline at end of file diff --git a/Ethosight/install/install_docker.yaml b/Ethosight/install/install_docker.yaml new file mode 100644 index 0000000..a1bc332 --- /dev/null +++ b/Ethosight/install/install_docker.yaml @@ -0,0 +1,53 @@ +--- +- name: Install Docker + hosts: all + become: yes # Use sudo to run the commands + tasks: + - name: Remove potentially conflicting Docker packages + apt: + name: + - docker + - docker-engine + - docker.io + - docker-ce + state: absent + force_apt_get: yes + purge: yes + + - name: Install required packages + apt: + name: + - apt-transport-https + - ca-certificates + - curl + - software-properties-common + state: present + update_cache: yes + + - name: Add Docker’s official GPG key + ansible.builtin.apt_key: + url: https://download.docker.com/linux/ubuntu/gpg + state: present + + - name: Set up the stable repository + ansible.builtin.apt_repository: + repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_lsb.codename }} stable + state: present + + - name: Install Docker Engine + apt: + name: docker-ce + state: present + update_cache: yes + + - name: Add user to the Docker group + user: + name: "{{ ansible_user_id }}" # this user is your root user as we are using become: yes, + groups: docker # which might be different from your session user, and you will have to add your user to docker group manually + append: yes + + - name: Ensure Docker is started and enabled + systemd: + name: docker + enabled: yes + state: started diff --git a/Ethosight/install/install_ethosight.yaml b/Ethosight/install/install_ethosight.yaml new file mode 100644 index 0000000..a2e5e12 --- /dev/null +++ b/Ethosight/install/install_ethosight.yaml @@ -0,0 +1,16 @@ +--- +- name: Install inotify-tools on Ubuntu Servers + hosts: all + become: yes # Necessary to obtain root privileges for installing packages + + tasks: + - name: Update apt cache + apt: + update_cache: yes + cache_valid_time: 3600 # Cache valid for 1 hour to avoid unnecessary repository updates + + - name: Install inotify-tools + apt: + name: inotify-tools + state: present + diff --git a/Ethosight/install/install_nginx.yaml b/Ethosight/install/install_nginx.yaml new file mode 100644 index 0000000..009da44 --- /dev/null +++ b/Ethosight/install/install_nginx.yaml @@ -0,0 +1,43 @@ +--- +- name: Install and Manage Nginx Script and Configuration as a Service + hosts: all + become: yes # Required for permissions to write to /usr/local/bin and /tmp + + tasks: + - name: Ensure nginx script is present in /usr/local/bin + copy: + src: "{{ playbook_dir }}/files/run_nginx.sh" + dest: /usr/local/bin/run_nginx.sh + mode: '0755' + + - name: Ensure the /tmp/nginx directory exists + file: + path: /tmp/nginx + state: directory + mode: '0755' + + - name: Copy nginx configuration file to /tmp/nginx + copy: + src: "{{ playbook_dir }}/files/nginx.conf" + dest: /tmp/nginx/nginx.conf + + - name: Deploy nginx systemd service file + template: + src: nginx.service.j2 + dest: /etc/systemd/system/nginx.service + notify: + - reload systemd + - restart nginx + + handlers: + - name: reload systemd + systemd: + daemon_reload: yes + + - name: restart nginx + systemd: + name: nginx + state: restarted + enabled: yes + + diff --git a/Ethosight/install/inventory.ini b/Ethosight/install/inventory.ini new file mode 100644 index 0000000..68bbc9c --- /dev/null +++ b/Ethosight/install/inventory.ini @@ -0,0 +1,3 @@ +[myhosts] +ansible-client1 +ansible-client2 diff --git a/Ethosight/install/inventory_local.ini b/Ethosight/install/inventory_local.ini new file mode 100644 index 0000000..036aee6 --- /dev/null +++ b/Ethosight/install/inventory_local.ini @@ -0,0 +1,4 @@ +[local] +localhost ansible_connection=local + + diff --git a/Ethosight/install/readme.md b/Ethosight/install/readme.md new file mode 100644 index 0000000..2ce98ef --- /dev/null +++ b/Ethosight/install/readme.md @@ -0,0 +1,38 @@ +# Ansible install +```commandline +python3 -m pip install ansible +``` + +# Run needed script + +```bash +ansible-playbook -i inventory_local.ini <> +``` +> **Note:** +> Depending on your current user privileges you have to run some playbooks with root user (sudo) +> If in a playbook *.yaml file it is mentioned 'become:yes', this means that it wants root user to execute. + +## Playbooks that you have to run depending on your state + +```commandline +ansible-playbook -i inventory_local.ini updateandupgrade.yaml +ansible-playbook -i inventory_local.ini install_ethosight.yaml +ansible-playbook -i inventory_local.ini install_docker.yaml +ansible-playbook -i inventory_local.ini install_conda.yaml +ansible-playbook -i inventory_local.ini create_conda_env.yaml +ansible-playbook -i inventory_local.ini install_consul.yaml +ansible-playbook -i inventory_local.ini install_nginx.yaml +ansible-playbook -i inventory_local.ini install_consul_template.yml +ansible-playbook -i inventory_local.ini install_and_run_gpuserver.yaml +ansible-playbook -i inventory_local.ini install_and_run_webserver.yaml +``` + +# Todo + +- [ ] Move all variables into uservars.yml +- [ ] Write master script to run all needed sub playbooks +- [ ] Move all playbooks into subdirectory install/playbooks (master script stays in /install) +- [ ] Create a master script for installing gpu server nodes + + + diff --git a/Ethosight/install/reboot_clients.yaml b/Ethosight/install/reboot_clients.yaml new file mode 100644 index 0000000..af78cdb --- /dev/null +++ b/Ethosight/install/reboot_clients.yaml @@ -0,0 +1,9 @@ +--- +- name: Reboot Client Machines + hosts: all + become: yes + + tasks: + - name: Rebooting the machine + reboot: + diff --git a/Ethosight/install/reload_services.yaml b/Ethosight/install/reload_services.yaml new file mode 100644 index 0000000..436973d --- /dev/null +++ b/Ethosight/install/reload_services.yaml @@ -0,0 +1,16 @@ +--- +- name: Reload and Restart Nginx Service + hosts: all + become: yes # Required for permissions to manage system services + + tasks: + - name: Reload systemd daemon to recognize changes + systemd: + daemon_reload: yes + + - name: Restart nginx service to apply changes + systemd: + name: nginx + state: restarted + enabled: yes + diff --git a/Ethosight/install/scripts/run_gpuserver.sh b/Ethosight/install/scripts/run_gpuserver.sh new file mode 100644 index 0000000..e69de29 diff --git a/Ethosight/install/templates/consul-template.service.j2 b/Ethosight/install/templates/consul-template.service.j2 new file mode 100644 index 0000000..1f17a42 --- /dev/null +++ b/Ethosight/install/templates/consul-template.service.j2 @@ -0,0 +1,15 @@ +[Unit] +Description=Consul Template for Nginx Configuration +After=network.target + +[Service] +Type=simple +ExecStart=/usr/local/bin/run_consultemplate.sh +Restart=on-failure +RestartSec=10s +User=ubuntu +Group=ubuntu + +[Install] +WantedBy=multi-user.target + diff --git a/Ethosight/install/templates/consul.json.j2 b/Ethosight/install/templates/consul.json.j2 new file mode 100644 index 0000000..0511ad3 --- /dev/null +++ b/Ethosight/install/templates/consul.json.j2 @@ -0,0 +1,11 @@ +{ + "datacenter": "{{ consul_datacenter }}", + "data_dir": "{{ consul_data_dir }}", + "log_level": "{{ consul_log_level }}", + "node_name": "{{ ansible_hostname }}", + "server": {{ consul_server | lower }}, + "bind_addr": "{{ consul_bind_address }}", + "client_addr": "{{ consul_client_address }}", + "bootstrap_expect": {{ consul_bootstrap_expect }}, + "ui": {{ consul_ui_enable | lower }} +} diff --git a/Ethosight/install/templates/consul.service.j2 b/Ethosight/install/templates/consul.service.j2 new file mode 100644 index 0000000..aca664b --- /dev/null +++ b/Ethosight/install/templates/consul.service.j2 @@ -0,0 +1,17 @@ +[Unit] +Description=Consul Service Discovery Agent +After=network-online.target +Wants=network-online.target + +[Service] +ExecStart=/usr/local/bin/consul agent -config-dir={{ consul_config_dir }} +User=consul +Group=consul +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM +Restart=on-failure +RestartSec=10s + +[Install] +WantedBy=multi-user.target + diff --git a/Ethosight/install/templates/django.service.j2 b/Ethosight/install/templates/django.service.j2 new file mode 100644 index 0000000..70a8c7c --- /dev/null +++ b/Ethosight/install/templates/django.service.j2 @@ -0,0 +1,17 @@ +[Unit] +Description=Django Development Server Managed by Systemd +#After=network.target +After=network.target postgres.service +Requires=postgres.service + +[Service] +Type=simple +User={{ django_user }} +Group={{ django_group }} +WorkingDirectory={{ django_project_dir }} +ExecStart=/usr/local/bin/django_runserver.sh +Restart=always +RestartSec=5 + +[Install] +WantedBy=multi-user.target diff --git a/Ethosight/install/templates/django_runserver.sh.j2 b/Ethosight/install/templates/django_runserver.sh.j2 new file mode 100644 index 0000000..c677ad9 --- /dev/null +++ b/Ethosight/install/templates/django_runserver.sh.j2 @@ -0,0 +1,13 @@ +#!/bin/bash + +# Source Conda environment script to enable 'conda' command +source {{ conda_path }}/etc/profile.d/conda.sh + +# Activate the specific Conda environment +conda activate {{ conda_environment }} + +source {{ environment_vars_file_path }} + +# Starting Django Development Server +echo "Starting Django Development Server on port {{ django_port }}..." +python {{ django_project_dir }}/manage.py runserver 0.0.0.0:{{ django_port }} diff --git a/Ethosight/install/templates/environment_vars.sh.j2 b/Ethosight/install/templates/environment_vars.sh.j2 new file mode 100644 index 0000000..cd66b79 --- /dev/null +++ b/Ethosight/install/templates/environment_vars.sh.j2 @@ -0,0 +1,13 @@ +#!/bin/bash +export EthosightBackend="client" +export EthosightBackendURL="{{ EthosightBackendURL }}" +export DjangoEthosightAppBaseDir="{{ DjangoEthosightAppBaseDir }}" +export EthosightYAMLDirectory="{{ EthosightYAMLDirectory }}" +export ETHOSIGHT_APP_BASEDIR="{{ ETHOSIGHT_APP_BASEDIR }}" +export EMAIL_HOST="{{ EMAIL_HOST }}" +export EMAIL_PORT="{{ EMAIL_PORT }}" +export EMAIL_USE_TLS="{{ EMAIL_USE_TLS }}" +export EMAIL_USE_SSL="{{ EMAIL_USE_SSL }}" +export EMAIL_HOST_USER="{{ EMAIL_HOST_USER }}" +export EMAIL_HOST_PASSWORD="{{ EMAIL_HOST_PASSWORD }}" + diff --git a/Ethosight/install/templates/gpuserver.service.j2 b/Ethosight/install/templates/gpuserver.service.j2 new file mode 100644 index 0000000..bd3b011 --- /dev/null +++ b/Ethosight/install/templates/gpuserver.service.j2 @@ -0,0 +1,13 @@ +[Unit] +Description=Ethosight GPU Server Service +After=network.target + +[Service] +User=ubuntu +Group=ubuntu +ExecStart=/opt/scripts/gpuserver_startup.sh +Restart=always +RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/Ethosight/install/templates/gpuserver_startup.sh.j2 b/Ethosight/install/templates/gpuserver_startup.sh.j2 new file mode 100644 index 0000000..2c0d1e6 --- /dev/null +++ b/Ethosight/install/templates/gpuserver_startup.sh.j2 @@ -0,0 +1,23 @@ +#!/bin/bash + +source /home/ubuntu/miniconda3/etc/profile.d/conda.sh +conda activate ethosight + +# Function to start the runserver processes +start_runserver() { + {% for gpu in ethosight_gpus %} + runserver.py runserver --gpu {{ gpu }} --port {{ 8000 + gpu }} & + {% endfor %} + wait +} + +# Function to stop the runserver processes +stop_runserver() { + pkill -f "runserver.py runserver" +} + +# Trap SIGTERM signal and stop the runserver processes +trap stop_runserver SIGTERM + +# Start the runserver processes +start_runserver diff --git a/Ethosight/install/templates/nginx.service.j2 b/Ethosight/install/templates/nginx.service.j2 new file mode 100644 index 0000000..58338f2 --- /dev/null +++ b/Ethosight/install/templates/nginx.service.j2 @@ -0,0 +1,23 @@ +[Unit] +Description=Nginx Service +# Ensures that the network and Docker are available before starting this service +After=network.target docker.service +Requires=docker.service +Wants=network.target + +[Service] +# Specifies the type of startup and service integration +Type=simple +# Defines under which user the service will run +User=ubuntu +# Command to start your service +ExecStart=/usr/local/bin/run_nginx.sh +# Service restart policy +Restart=always +# Delay before service restart +RestartSec=5s + +[Install] +# Specifies when this service should be automatically started +WantedBy=multi-user.target + diff --git a/Ethosight/install/templates/postgres.service.j2 b/Ethosight/install/templates/postgres.service.j2 new file mode 100644 index 0000000..119618f --- /dev/null +++ b/Ethosight/install/templates/postgres.service.j2 @@ -0,0 +1,13 @@ +[Unit] +Description=PostgreSQL Docker Container Service +After=docker.service +Requires=docker.service + +[Service] +Type=simple +ExecStart=/usr/local/bin/postgres_run.sh +Restart=always +RestartSec=5s + +[Install] +WantedBy=multi-user.target diff --git a/Ethosight/install/templates/postgres_run.sh.j2 b/Ethosight/install/templates/postgres_run.sh.j2 new file mode 100644 index 0000000..dfbd0fb --- /dev/null +++ b/Ethosight/install/templates/postgres_run.sh.j2 @@ -0,0 +1,22 @@ +#!/bin/bash + +# Set the desired database name +DB_NAME="{{ postgres_db }}" + +# Check if the PostgreSQL container exists and start it if it does +if [ $(docker ps -a -f name=djangopostgres | grep -w djangopostgres | wc -l) -eq 1 ]; then + docker start djangopostgres +else + # Run the PostgreSQL container if it does not exist + docker run --name djangopostgres \ + -e POSTGRES_PASSWORD={{ postgres_password }} \ + -e POSTGRES_USER={{ postgres_user }} \ + -e POSTGRES_DB=$DB_NAME \ + -p {{ postgres_port }}:5432 \ + -d postgres +fi + +# Wait for the container to keep running +while docker ps -f name=djangopostgres | grep -wq djangopostgres; do + sleep 10 +done \ No newline at end of file diff --git a/Ethosight/install/updateandupgrade.yaml b/Ethosight/install/updateandupgrade.yaml new file mode 100644 index 0000000..13711f8 --- /dev/null +++ b/Ethosight/install/updateandupgrade.yaml @@ -0,0 +1,14 @@ +--- +- name: Update and Upgrade All Packages + hosts: myhosts + become: yes # Run tasks with sudo + tasks: + - name: Update apt cache + apt: + update_cache: yes + cache_valid_time: 3600 # Optional: Avoid updating the cache if it has been updated in the last hour + + - name: Upgrade all packages + apt: + upgrade: dist + diff --git a/Ethosight/install/uservars.yml b/Ethosight/install/uservars.yml new file mode 100644 index 0000000..3574fde --- /dev/null +++ b/Ethosight/install/uservars.yml @@ -0,0 +1,16 @@ +--- +# General application settings +EthosightBackend: client +EthosightBackendURL: "http://localhost:80" +DjangoEthosightAppBaseDir: "/home/ubuntu/lxdshared/DeepVision/Ethosight/website/EthosightAppBasedir" +EthosightYAMLDirectory: "/home/ubuntu/lxdshared/DeepVision/Ethosight/configs" +ETHOSIGHT_APP_BASEDIR: "/home/ubuntu/lxdshared/DeepVision/Ethosight/website/EthosightAppBasedir" + +# Email settings for sending access codes +# Use your own settings +EMAIL_HOST: "" +EMAIL_PORT: 0 +EMAIL_USE_TLS: true +EMAIL_USE_SSL: false +EMAIL_HOST_USER: "" +EMAIL_HOST_PASSWORD: "" diff --git a/Ethosight/models/__init__.py b/Ethosight/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Ethosight/models/helpers.py b/Ethosight/models/helpers.py new file mode 100644 index 0000000..71abe9b --- /dev/null +++ b/Ethosight/models/helpers.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import einops +import numpy as np +import torch +import torch.nn as nn + + +class Normalize(nn.Module): + def __init__(self, dim: int) -> None: + super().__init__() + self.dim = dim + + def forward(self, x): + return torch.nn.functional.normalize(x, dim=self.dim, p=2) + + +class LearnableLogitScaling(nn.Module): + def __init__( + self, + logit_scale_init: float = 1 / 0.07, + learnable: bool = True, + max_logit_scale: float = 100, + ) -> None: + super().__init__() + self.max_logit_scale = max_logit_scale + self.logit_scale_init = logit_scale_init + self.learnable = learnable + log_logit_scale = torch.ones([]) * np.log(self.logit_scale_init) + if learnable: + self.log_logit_scale = nn.Parameter(log_logit_scale) + else: + self.register_buffer("log_logit_scale", log_logit_scale) + + def forward(self, x): + return torch.clip(self.log_logit_scale.exp(), max=self.max_logit_scale) * x + + def extra_repr(self): + st = f"logit_scale_init={self.logit_scale_init},learnable={self.learnable}," \ + f" max_logit_scale={self.max_logit_scale}" + return st + + +class EinOpsRearrange(nn.Module): + def __init__(self, rearrange_expr: str, **kwargs) -> None: + super().__init__() + self.rearrange_expr = rearrange_expr + self.kwargs = kwargs + + def forward(self, x): + assert isinstance(x, torch.Tensor) + return einops.rearrange(x, self.rearrange_expr, **self.kwargs) + + +class VerboseNNModule(nn.Module): + """ + Wrapper around nn.Module that prints registered buffers and parameter names. + """ + + @staticmethod + def get_readable_tensor_repr(name: str, tensor: torch.Tensor) -> str: + st = ( + "(" + + name + + "): " + + "tensor(" + + str(tuple(tensor[1].shape)) + + ", requires_grad=" + + str(tensor[1].requires_grad) + + ")\n" + ) + return st + + def extra_repr(self) -> str: + named_modules = set() + for p in self.named_modules(): + named_modules.update([p[0]]) + named_modules = list(named_modules) + + string_repr = "" + for p in self.named_parameters(): + name = p[0].split(".")[0] + if name not in named_modules: + string_repr += self.get_readable_tensor_repr(name, p) + + for p in self.named_buffers(): + name = p[0].split(".")[0] + string_repr += self.get_readable_tensor_repr(name, p) + + return string_repr + + +def cast_if_src_dtype( + tensor: torch.Tensor, src_dtype: torch.dtype, tgt_dtype: torch.dtype +): + updated = False + if tensor.dtype == src_dtype: + tensor = tensor.to(dtype=tgt_dtype) + updated = True + return tensor, updated + + +class QuickGELU(nn.Module): + # From https://github.com/openai/CLIP/blob/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1/clip/model.py#L166 + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class SelectElement(nn.Module): + def __init__(self, index) -> None: + super().__init__() + self.index = index + + def forward(self, x): + assert x.ndim >= 3 + return x[:, self.index, ...] + + +class SelectEOSAndProject(nn.Module): + """ + Text Pooling used in OpenCLIP + """ + + def __init__(self, proj: nn.Module) -> None: + super().__init__() + self.proj = proj + + def forward(self, x, seq_len): + assert x.ndim == 3 + # x is of shape B x L x D + # take features from the eot embedding (eot_token is the highest number in each sequence) + x = x[torch.arange(x.shape[0]), seq_len] + x = self.proj(x) + return x diff --git a/Ethosight/models/imagebind_model.py b/Ethosight/models/imagebind_model.py new file mode 100644 index 0000000..d12bc57 --- /dev/null +++ b/Ethosight/models/imagebind_model.py @@ -0,0 +1,509 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import os +from functools import partial +from types import SimpleNamespace + +import torch +import torch.nn as nn + +from models.helpers import (EinOpsRearrange, LearnableLogitScaling, Normalize, + SelectElement, SelectEOSAndProject) +from models.multimodal_preprocessors import (AudioPreprocessor, + IMUPreprocessor, PadIm2Video, + PatchEmbedGeneric, + RGBDTPreprocessor, + SpatioTemporalPosEmbeddingHelper, + TextPreprocessor, + ThermalPreprocessor) +from models.transformer import MultiheadAttention, SimpleTransformer + +ModalityType = SimpleNamespace( + VISION="vision", + TEXT="text", + AUDIO="audio", + THERMAL="thermal", + DEPTH="depth", + IMU="imu", +) + + +class ImageBindModel(nn.Module): + def __init__( + self, + video_frames=2, + kernel_size=(2, 14, 14), + audio_kernel_size=16, + audio_stride=10, + out_embed_dim=768, + vision_embed_dim=1024, + vision_num_blocks=24, + vision_num_heads=16, + audio_embed_dim=768, + audio_num_blocks=12, + audio_num_heads=12, + audio_num_mel_bins=128, + audio_target_len=204, + audio_drop_path=0.1, + text_embed_dim=768, + text_num_blocks=12, + text_num_heads=12, + depth_embed_dim=384, + depth_kernel_size=16, + depth_num_blocks=12, + depth_num_heads=8, + depth_drop_path=0.0, + thermal_embed_dim=768, + thermal_kernel_size=16, + thermal_num_blocks=12, + thermal_num_heads=12, + thermal_drop_path=0.0, + imu_embed_dim=512, + imu_kernel_size=8, + imu_num_blocks=6, + imu_num_heads=8, + imu_drop_path=0.7, + ): + super().__init__() + + self.modality_preprocessors = self._create_modality_preprocessors( + video_frames, + vision_embed_dim, + kernel_size, + text_embed_dim, + audio_embed_dim, + audio_kernel_size, + audio_stride, + audio_num_mel_bins, + audio_target_len, + depth_embed_dim, + depth_kernel_size, + thermal_embed_dim, + thermal_kernel_size, + imu_embed_dim, + ) + + self.modality_trunks = self._create_modality_trunks( + vision_embed_dim, + vision_num_blocks, + vision_num_heads, + text_embed_dim, + text_num_blocks, + text_num_heads, + audio_embed_dim, + audio_num_blocks, + audio_num_heads, + audio_drop_path, + depth_embed_dim, + depth_num_blocks, + depth_num_heads, + depth_drop_path, + thermal_embed_dim, + thermal_num_blocks, + thermal_num_heads, + thermal_drop_path, + imu_embed_dim, + imu_num_blocks, + imu_num_heads, + imu_drop_path, + ) + + self.modality_heads = self._create_modality_heads( + out_embed_dim, + vision_embed_dim, + text_embed_dim, + audio_embed_dim, + depth_embed_dim, + thermal_embed_dim, + imu_embed_dim, + ) + + self.modality_postprocessors = self._create_modality_postprocessors( + out_embed_dim + ) + + def _create_modality_preprocessors( + self, + video_frames=2, + vision_embed_dim=1024, + kernel_size=(2, 14, 14), + text_embed_dim=768, + audio_embed_dim=768, + audio_kernel_size=16, + audio_stride=10, + audio_num_mel_bins=128, + audio_target_len=204, + depth_embed_dim=768, + depth_kernel_size=16, + thermal_embed_dim=768, + thermal_kernel_size=16, + imu_embed_dim=512, + ): + rgbt_stem = PatchEmbedGeneric( + proj_stem=[ + PadIm2Video(pad_type="repeat", ntimes=2), + nn.Conv3d( + in_channels=3, + kernel_size=kernel_size, + out_channels=vision_embed_dim, + stride=kernel_size, + bias=False, + ), + ] + ) + rgbt_preprocessor = RGBDTPreprocessor( + img_size=[3, video_frames, 224, 224], + num_cls_tokens=1, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + rgbt_stem=rgbt_stem, + depth_stem=None, + ) + + text_preprocessor = TextPreprocessor( + context_length=77, + vocab_size=49408, + embed_dim=text_embed_dim, + causal_masking=True, + ) + + audio_stem = PatchEmbedGeneric( + proj_stem=[ + nn.Conv2d( + in_channels=1, + kernel_size=audio_kernel_size, + stride=audio_stride, + out_channels=audio_embed_dim, + bias=False, + ), + ], + norm_layer=nn.LayerNorm(normalized_shape=audio_embed_dim), + ) + audio_preprocessor = AudioPreprocessor( + img_size=[1, audio_num_mel_bins, audio_target_len], + num_cls_tokens=1, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + audio_stem=audio_stem, + ) + + depth_stem = PatchEmbedGeneric( + [ + nn.Conv2d( + kernel_size=depth_kernel_size, + in_channels=1, + out_channels=depth_embed_dim, + stride=depth_kernel_size, + bias=False, + ), + ], + norm_layer=nn.LayerNorm(normalized_shape=depth_embed_dim), + ) + + depth_preprocessor = RGBDTPreprocessor( + img_size=[1, 224, 224], + num_cls_tokens=1, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + rgbt_stem=None, + depth_stem=depth_stem, + ) + + thermal_stem = PatchEmbedGeneric( + [ + nn.Conv2d( + kernel_size=thermal_kernel_size, + in_channels=1, + out_channels=thermal_embed_dim, + stride=thermal_kernel_size, + bias=False, + ), + ], + norm_layer=nn.LayerNorm(normalized_shape=thermal_embed_dim), + ) + thermal_preprocessor = ThermalPreprocessor( + img_size=[1, 224, 224], + num_cls_tokens=1, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + thermal_stem=thermal_stem, + ) + + imu_stem = PatchEmbedGeneric( + [ + nn.Linear( + in_features=48, + out_features=imu_embed_dim, + bias=False, + ), + ], + norm_layer=nn.LayerNorm(normalized_shape=imu_embed_dim), + ) + + imu_preprocessor = IMUPreprocessor( + img_size=[6, 2000], + num_cls_tokens=1, + kernel_size=8, + embed_dim=imu_embed_dim, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + imu_stem=imu_stem, + ) + + modality_preprocessors = { + ModalityType.VISION: rgbt_preprocessor, + ModalityType.TEXT: text_preprocessor, + ModalityType.AUDIO: audio_preprocessor, + ModalityType.DEPTH: depth_preprocessor, + ModalityType.THERMAL: thermal_preprocessor, + ModalityType.IMU: imu_preprocessor, + } + + return nn.ModuleDict(modality_preprocessors) + + def _create_modality_trunks( + self, + vision_embed_dim=1024, + vision_num_blocks=24, + vision_num_heads=16, + text_embed_dim=768, + text_num_blocks=12, + text_num_heads=12, + audio_embed_dim=768, + audio_num_blocks=12, + audio_num_heads=12, + audio_drop_path=0.0, + depth_embed_dim=768, + depth_num_blocks=12, + depth_num_heads=12, + depth_drop_path=0.0, + thermal_embed_dim=768, + thermal_num_blocks=12, + thermal_num_heads=12, + thermal_drop_path=0.0, + imu_embed_dim=512, + imu_num_blocks=6, + imu_num_heads=8, + imu_drop_path=0.7, + ): + def instantiate_trunk( + embed_dim, num_blocks, num_heads, pre_transformer_ln, add_bias_kv, drop_path + ): + return SimpleTransformer( + embed_dim=embed_dim, + num_blocks=num_blocks, + ffn_dropout_rate=0.0, + drop_path_rate=drop_path, + attn_target=partial( + MultiheadAttention, + embed_dim=embed_dim, + num_heads=num_heads, + bias=True, + add_bias_kv=add_bias_kv, + ), + pre_transformer_layer=nn.Sequential( + nn.LayerNorm(embed_dim, eps=1e-6) + if pre_transformer_ln + else nn.Identity(), + EinOpsRearrange("b l d -> l b d"), + ), + post_transformer_layer=EinOpsRearrange("l b d -> b l d"), + ) + + modality_trunks = {} + modality_trunks[ModalityType.VISION] = instantiate_trunk( + vision_embed_dim, + vision_num_blocks, + vision_num_heads, + pre_transformer_ln=True, + add_bias_kv=False, + drop_path=0.0, + ) + modality_trunks[ModalityType.TEXT] = instantiate_trunk( + text_embed_dim, + text_num_blocks, + text_num_heads, + pre_transformer_ln=False, + add_bias_kv=False, + drop_path=0.0, + ) + modality_trunks[ModalityType.AUDIO] = instantiate_trunk( + audio_embed_dim, + audio_num_blocks, + audio_num_heads, + pre_transformer_ln=False, + add_bias_kv=True, + drop_path=audio_drop_path, + ) + modality_trunks[ModalityType.DEPTH] = instantiate_trunk( + depth_embed_dim, + depth_num_blocks, + depth_num_heads, + pre_transformer_ln=False, + add_bias_kv=True, + drop_path=depth_drop_path, + ) + modality_trunks[ModalityType.THERMAL] = instantiate_trunk( + thermal_embed_dim, + thermal_num_blocks, + thermal_num_heads, + pre_transformer_ln=False, + add_bias_kv=True, + drop_path=thermal_drop_path, + ) + modality_trunks[ModalityType.IMU] = instantiate_trunk( + imu_embed_dim, + imu_num_blocks, + imu_num_heads, + pre_transformer_ln=False, + add_bias_kv=True, + drop_path=imu_drop_path, + ) + + return nn.ModuleDict(modality_trunks) + + def _create_modality_heads( + self, + out_embed_dim, + vision_embed_dim, + text_embed_dim, + audio_embed_dim, + depth_embed_dim, + thermal_embed_dim, + imu_embed_dim, + ): + modality_heads = {} + + modality_heads[ModalityType.VISION] = nn.Sequential( + nn.LayerNorm(normalized_shape=vision_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Linear(vision_embed_dim, out_embed_dim, bias=False), + ) + + modality_heads[ModalityType.TEXT] = SelectEOSAndProject( + proj=nn.Sequential( + nn.LayerNorm(normalized_shape=text_embed_dim, eps=1e-6), + nn.Linear(text_embed_dim, out_embed_dim, bias=False), + ) + ) + + modality_heads[ModalityType.AUDIO] = nn.Sequential( + nn.LayerNorm(normalized_shape=audio_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Linear(audio_embed_dim, out_embed_dim, bias=False), + ) + + modality_heads[ModalityType.DEPTH] = nn.Sequential( + nn.LayerNorm(normalized_shape=depth_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Linear(depth_embed_dim, out_embed_dim, bias=False), + ) + + modality_heads[ModalityType.THERMAL] = nn.Sequential( + nn.LayerNorm(normalized_shape=thermal_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Linear(thermal_embed_dim, out_embed_dim, bias=False), + ) + + modality_heads[ModalityType.IMU] = nn.Sequential( + nn.LayerNorm(normalized_shape=imu_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Dropout(p=0.5), + nn.Linear(imu_embed_dim, out_embed_dim, bias=False), + ) + + return nn.ModuleDict(modality_heads) + + def _create_modality_postprocessors(self, out_embed_dim): + modality_postprocessors = {} + + modality_postprocessors[ModalityType.VISION] = Normalize(dim=-1) + modality_postprocessors[ModalityType.TEXT] = nn.Sequential( + Normalize(dim=-1), LearnableLogitScaling(learnable=True) + ) + modality_postprocessors[ModalityType.AUDIO] = nn.Sequential( + Normalize(dim=-1), + LearnableLogitScaling(logit_scale_init=20.0, learnable=False), + ) + modality_postprocessors[ModalityType.DEPTH] = nn.Sequential( + Normalize(dim=-1), + LearnableLogitScaling(logit_scale_init=5.0, learnable=False), + ) + modality_postprocessors[ModalityType.THERMAL] = nn.Sequential( + Normalize(dim=-1), + LearnableLogitScaling(logit_scale_init=10.0, learnable=False), + ) + modality_postprocessors[ModalityType.IMU] = nn.Sequential( + Normalize(dim=-1), + LearnableLogitScaling(logit_scale_init=5.0, learnable=False), + ) + + return nn.ModuleDict(modality_postprocessors) + + def forward(self, inputs): + outputs = {} + for modality_key, modality_value in inputs.items(): + reduce_list = ( + modality_value.ndim >= 5 + ) # Audio and Video inputs consist of multiple clips + if reduce_list: + B, S = modality_value.shape[:2] + modality_value = modality_value.reshape( + B * S, *modality_value.shape[2:] + ) + + if modality_value is not None: + modality_value = self.modality_preprocessors[modality_key]( + **{modality_key: modality_value} + ) + trunk_inputs = modality_value["trunk"] + head_inputs = modality_value["head"] + modality_value = self.modality_trunks[modality_key](**trunk_inputs) + modality_value = self.modality_heads[modality_key]( + modality_value, **head_inputs + ) + modality_value = self.modality_postprocessors[modality_key]( + modality_value + ) + + if reduce_list: + modality_value = modality_value.reshape(B, S, -1) + modality_value = modality_value.mean(dim=1) + + outputs[modality_key] = modality_value + + return outputs + + +def imagebind_huge(pretrained=False, imagebind_dir="./"): + model = ImageBindModel( + vision_embed_dim=1280, + vision_num_blocks=32, + vision_num_heads=16, + text_embed_dim=1024, + text_num_blocks=24, + text_num_heads=16, + out_embed_dim=1024, + audio_drop_path=0.1, + imu_drop_path=0.7, + ) + + model_dir = os.path.join(imagebind_dir, ".checkpoints") + model_filename = os.path.join(model_dir, "imagebind_huge.pth") + if pretrained: + if not os.path.exists(model_filename): + print( + "Downloading imagebind weights to .checkpoints/imagebind_huge.pth ..." + ) + + os.makedirs(model_dir, exist_ok=True) + torch.hub.download_url_to_file( + "https://dl.fbaipublicfiles.com/imagebind/imagebind_huge.pth", + model_filename, + progress=True, + ) + + model.load_state_dict(torch.load(model_filename)) + + return model diff --git a/Ethosight/models/multimodal_preprocessors.py b/Ethosight/models/multimodal_preprocessors.py new file mode 100644 index 0000000..031dc1e --- /dev/null +++ b/Ethosight/models/multimodal_preprocessors.py @@ -0,0 +1,685 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import gzip +import html +import io +import math +from functools import lru_cache +from typing import Callable, List, Optional, Tuple + +import ftfy +import numpy as np +import regex as re +import torch +import torch.nn as nn +from iopath.common.file_io import g_pathmgr +from timm.models.layers import trunc_normal_ + +from models.helpers import VerboseNNModule, cast_if_src_dtype + + +def get_sinusoid_encoding_table(n_position, d_hid): + """Sinusoid position encoding table""" + + # TODO: make it with torch instead of numpy + def get_position_angle_vec(position): + return [ + position / np.power(10000, 2 * (hid_j // 2) / d_hid) + for hid_j in range(d_hid) + ] + + sinusoid_table = np.array( + [get_position_angle_vec(pos_i) for pos_i in range(n_position)] + ) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + return torch.FloatTensor(sinusoid_table).unsqueeze(0) + + +def interpolate_pos_encoding_2d(target_spatial_size, pos_embed): + N = pos_embed.shape[1] + if N == target_spatial_size: + return pos_embed + dim = pos_embed.shape[-1] + # nn.functional.interpolate doesn't work with bfloat16 so we cast to float32 + pos_embed, updated = cast_if_src_dtype(pos_embed, torch.bfloat16, torch.float32) + pos_embed = nn.functional.interpolate( + pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute( + 0, 3, 1, 2 + ), + scale_factor=math.sqrt(target_spatial_size / N), + mode="bicubic", + ) + if updated: + pos_embed, _ = cast_if_src_dtype(pos_embed, torch.float32, torch.bfloat16) + pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + return pos_embed + + +def interpolate_pos_encoding( + npatch_per_img, + pos_embed, + patches_layout, + input_shape=None, + first_patch_idx=1, +): + assert first_patch_idx == 0 or first_patch_idx == 1, "there is 1 CLS token or none" + N = pos_embed.shape[1] - first_patch_idx # since it's 1 if cls_token exists + if npatch_per_img == N: + return pos_embed + + assert ( + patches_layout[-1] == patches_layout[-2] + ), "Interpolation of pos embed not supported for non-square layouts" + + class_emb = pos_embed[:, :first_patch_idx] + pos_embed = pos_embed[:, first_patch_idx:] + + if input_shape is None or patches_layout[0] == 1: + # simple 2D pos embedding, no temporal component + pos_embed = interpolate_pos_encoding_2d(npatch_per_img, pos_embed) + elif patches_layout[0] > 1: + # pos embed has a temporal component + assert len(input_shape) == 4, "temporal interpolation not supported" + # we only support 2D interpolation in this case + num_frames = patches_layout[0] + num_spatial_tokens = patches_layout[1] * patches_layout[2] + pos_embed = pos_embed.view(1, num_frames, num_spatial_tokens, -1) + # interpolate embedding for zeroth frame + pos_embed = interpolate_pos_encoding_2d( + npatch_per_img, pos_embed[0, 0, ...].unsqueeze(0) + ) + else: + raise ValueError("This type of interpolation isn't implemented") + + return torch.cat((class_emb, pos_embed), dim=1) + + +def _get_pos_embedding( + npatch_per_img, + pos_embed, + patches_layout, + input_shape, + first_patch_idx=1, +): + pos_embed = interpolate_pos_encoding( + npatch_per_img, + pos_embed, + patches_layout, + input_shape=input_shape, + first_patch_idx=first_patch_idx, + ) + return pos_embed + + +class PatchEmbedGeneric(nn.Module): + """ + PatchEmbed from Hydra + """ + + def __init__(self, proj_stem, norm_layer: Optional[nn.Module] = None): + super().__init__() + + if len(proj_stem) > 1: + self.proj = nn.Sequential(*proj_stem) + else: + # Special case to be able to load pre-trained models that were + # trained with a standard stem + self.proj = proj_stem[0] + self.norm_layer = norm_layer + + def get_patch_layout(self, img_size): + with torch.no_grad(): + dummy_img = torch.zeros( + [ + 1, + ] + + img_size + ) + dummy_out = self.proj(dummy_img) + embed_dim = dummy_out.shape[1] + patches_layout = tuple(dummy_out.shape[2:]) + num_patches = np.prod(patches_layout) + return patches_layout, num_patches, embed_dim + + def forward(self, x): + x = self.proj(x) + # B C (T) H W -> B (T)HW C + x = x.flatten(2).transpose(1, 2) + if self.norm_layer is not None: + x = self.norm_layer(x) + return x + + +class SpatioTemporalPosEmbeddingHelper(VerboseNNModule): + def __init__( + self, + patches_layout: List, + num_patches: int, + num_cls_tokens: int, + embed_dim: int, + learnable: bool, + ) -> None: + super().__init__() + self.num_cls_tokens = num_cls_tokens + self.patches_layout = patches_layout + self.num_patches = num_patches + self.num_tokens = num_cls_tokens + num_patches + self.learnable = learnable + if self.learnable: + self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, embed_dim)) + trunc_normal_(self.pos_embed, std=0.02) + else: + self.register_buffer( + "pos_embed", get_sinusoid_encoding_table(self.num_tokens, embed_dim) + ) + + def get_pos_embedding(self, vision_input, all_vision_tokens): + input_shape = vision_input.shape + pos_embed = _get_pos_embedding( + all_vision_tokens.size(1) - self.num_cls_tokens, + pos_embed=self.pos_embed, + patches_layout=self.patches_layout, + input_shape=input_shape, + first_patch_idx=self.num_cls_tokens, + ) + return pos_embed + + +class RGBDTPreprocessor(VerboseNNModule): + def __init__( + self, + rgbt_stem: PatchEmbedGeneric, + depth_stem: Optional[PatchEmbedGeneric], + img_size: Tuple = (3, 224, 224), + num_cls_tokens: int = 1, + pos_embed_fn: Optional[Callable] = None, + use_type_embed: bool = False, + init_param_style: str = "openclip", + ) -> None: + super().__init__() + stem = rgbt_stem if rgbt_stem is not None else depth_stem + ( + self.patches_layout, + self.num_patches, + self.embed_dim, + ) = stem.get_patch_layout(img_size) + self.rgbt_stem = rgbt_stem + self.depth_stem = depth_stem + self.use_pos_embed = pos_embed_fn is not None + self.use_type_embed = use_type_embed + self.num_cls_tokens = num_cls_tokens + + if self.use_pos_embed: + self.pos_embedding_helper = pos_embed_fn( + patches_layout=self.patches_layout, + num_cls_tokens=num_cls_tokens, + num_patches=self.num_patches, + embed_dim=self.embed_dim, + ) + if self.num_cls_tokens > 0: + self.cls_token = nn.Parameter( + torch.zeros(1, self.num_cls_tokens, self.embed_dim) + ) + if self.use_type_embed: + self.type_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) + + self.init_parameters(init_param_style) + + @torch.no_grad() + def init_parameters(self, init_param_style): + if init_param_style == "openclip": + # OpenCLIP style initialization + scale = self.embed_dim**-0.5 + if self.use_pos_embed: + nn.init.normal_(self.pos_embedding_helper.pos_embed) + self.pos_embedding_helper.pos_embed *= scale + + if self.num_cls_tokens > 0: + nn.init.normal_(self.cls_token) + self.cls_token *= scale + elif init_param_style == "vit": + self.cls_token.data.fill_(0) + else: + raise ValueError(f"Unknown init {init_param_style}") + + if self.use_type_embed: + nn.init.normal_(self.type_embed) + + def tokenize_input_and_cls_pos(self, input, stem, mask): + # tokens is of shape B x L x D + tokens = stem(input) + assert tokens.ndim == 3 + assert tokens.shape[2] == self.embed_dim + B = tokens.shape[0] + if self.num_cls_tokens > 0: + class_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole class_tokens impl from Phil Wang, thanks + tokens = torch.cat((class_tokens, tokens), dim=1) + if self.use_pos_embed: + pos_embed = self.pos_embedding_helper.get_pos_embedding(input, tokens) + tokens = tokens + pos_embed + if self.use_type_embed: + tokens = tokens + self.type_embed.expand(B, -1, -1) + return tokens + + def forward(self, vision=None, depth=None, patch_mask=None): + if patch_mask is not None: + raise NotImplementedError() + + if vision is not None: + vision_tokens = self.tokenize_input_and_cls_pos( + vision, self.rgbt_stem, patch_mask + ) + + if depth is not None: + depth_tokens = self.tokenize_input_and_cls_pos( + depth, self.depth_stem, patch_mask + ) + + # aggregate tokens + if vision is not None and depth is not None: + final_tokens = vision_tokens + depth_tokens + else: + final_tokens = vision_tokens if vision is not None else depth_tokens + return_dict = { + "trunk": { + "tokens": final_tokens, + }, + "head": {}, + } + return return_dict + + +class AudioPreprocessor(RGBDTPreprocessor): + def __init__(self, audio_stem: PatchEmbedGeneric, **kwargs) -> None: + super().__init__(rgbt_stem=audio_stem, depth_stem=None, **kwargs) + + def forward(self, audio=None): + return super().forward(vision=audio) + + +class ThermalPreprocessor(RGBDTPreprocessor): + def __init__(self, thermal_stem: PatchEmbedGeneric, **kwargs) -> None: + super().__init__(rgbt_stem=thermal_stem, depth_stem=None, **kwargs) + + def forward(self, thermal=None): + return super().forward(vision=thermal) + + +def build_causal_attention_mask(context_length): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(context_length, context_length, requires_grad=False) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + +class TextPreprocessor(VerboseNNModule): + def __init__( + self, + vocab_size: int, + context_length: int, + embed_dim: int, + causal_masking: bool, + supply_seq_len_to_head: bool = True, + num_cls_tokens: int = 0, + init_param_style: str = "openclip", + ) -> None: + super().__init__() + self.vocab_size = vocab_size + self.context_length = context_length + self.token_embedding = nn.Embedding(vocab_size, embed_dim) + self.pos_embed = nn.Parameter( + torch.empty(1, self.context_length + num_cls_tokens, embed_dim) + ) + self.causal_masking = causal_masking + if self.causal_masking: + mask = build_causal_attention_mask(self.context_length) + # register the mask as a buffer so it can be moved to the right device + self.register_buffer("mask", mask) + + self.supply_seq_len_to_head = supply_seq_len_to_head + self.num_cls_tokens = num_cls_tokens + self.embed_dim = embed_dim + if num_cls_tokens > 0: + assert self.causal_masking is False, "Masking + CLS token isn't implemented" + self.cls_token = nn.Parameter( + torch.zeros(1, self.num_cls_tokens, embed_dim) + ) + + self.init_parameters(init_param_style) + + @torch.no_grad() + def init_parameters(self, init_param_style="openclip"): + # OpenCLIP style initialization + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.pos_embed, std=0.01) + + if init_param_style == "openclip": + # OpenCLIP style initialization + scale = self.embed_dim**-0.5 + if self.num_cls_tokens > 0: + nn.init.normal_(self.cls_token) + self.cls_token *= scale + elif init_param_style == "vit": + self.cls_token.data.fill_(0) + else: + raise ValueError(f"Unknown init {init_param_style}") + + def forward(self, text): + # text tokens are of shape B x L x D + text_tokens = self.token_embedding(text) + # concat CLS tokens if any + if self.num_cls_tokens > 0: + B = text_tokens.shape[0] + class_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole class_tokens impl from Phil Wang, thanks + text_tokens = torch.cat((class_tokens, text_tokens), dim=1) + text_tokens = text_tokens + self.pos_embed + return_dict = { + "trunk": { + "tokens": text_tokens, + }, + "head": {}, + } + # Compute sequence length after adding CLS tokens + if self.supply_seq_len_to_head: + text_lengths = text.argmax(dim=-1) + return_dict["head"] = { + "seq_len": text_lengths, + } + if self.causal_masking: + return_dict["trunk"].update({"attn_mask": self.mask}) + return return_dict + + +class Im2Video(nn.Module): + """Convert an image into a trivial video.""" + + def __init__(self, time_dim=2): + super().__init__() + self.time_dim = time_dim + + def forward(self, x): + if x.ndim == 4: + # B, C, H, W -> B, C, T, H, W + return x.unsqueeze(self.time_dim) + elif x.ndim == 5: + return x + else: + raise ValueError(f"Dimension incorrect {x.shape}") + + +class PadIm2Video(Im2Video): + def __init__(self, ntimes, pad_type, time_dim=2): + super().__init__(time_dim=time_dim) + assert ntimes > 0 + assert pad_type in ["zero", "repeat"] + self.ntimes = ntimes + self.pad_type = pad_type + + def forward(self, x): + x = super().forward(x) + if x.shape[self.time_dim] == 1: + if self.pad_type == "repeat": + new_shape = [1] * len(x.shape) + new_shape[self.time_dim] = self.ntimes + x = x.repeat(new_shape) + elif self.pad_type == "zero": + padarg = [0, 0] * len(x.shape) + padarg[2 * self.time_dim + 1] = self.ntimes - x.shape[self.time_dim] + x = nn.functional.pad(x, padarg) + return x + + +# Modified from github.com/openai/CLIP +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + + list(range(ord("¡"), ord("¬") + 1)) + + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r"\s+", " ", text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str, context_length=77): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + + with g_pathmgr.open(bpe_path, "rb") as fh: + bpe_bytes = io.BytesIO(fh.read()) + merges: List[str] = gzip.open(bpe_bytes).read().decode("utf-8").split("\n") + merges = merges[1 : 49152 - 256 - 2 + 1] + merges: List[Tuple[str, ...]] = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v + "" for v in vocab] + for merge in merges: + vocab.append("".join(merge)) + vocab.extend(["<|startoftext|>", "<|endoftext|>"]) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = { + "<|startoftext|>": "<|startoftext|>", + "<|endoftext|>": "<|endoftext|>", + } + self.pat = re.compile( + r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", + re.IGNORECASE, + ) + self.context_length = context_length + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + (token[-1] + "",) + pairs = get_pairs(word) + + if not pairs: + return token + "" + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) + bpe_tokens.extend( + self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ") + ) + return bpe_tokens + + def decode(self, tokens): + text = "".join([self.decoder[token] for token in tokens]) + text = ( + bytearray([self.byte_decoder[c] for c in text]) + .decode("utf-8", errors="replace") + .replace("", " ") + ) + return text + + def __call__(self, texts, context_length=None): + if not context_length: + context_length = self.context_length + + if isinstance(texts, str): + texts = [texts] + + sot_token = self.encoder["<|startoftext|>"] + eot_token = self.encoder["<|endoftext|>"] + all_tokens = [[sot_token] + self.encode(text) + [eot_token] for text in texts] + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + + for i, tokens in enumerate(all_tokens): + tokens = tokens[:context_length] + result[i, : len(tokens)] = torch.tensor(tokens) + + if len(result) == 1: + return result[0] + return result + + +class IMUPreprocessor(VerboseNNModule): + def __init__( + self, + kernel_size: int, + imu_stem: PatchEmbedGeneric, + embed_dim: int, + img_size: Tuple = (6, 2000), + num_cls_tokens: int = 1, + pos_embed_fn: Optional[Callable] = None, + init_param_style: str = "openclip", + ) -> None: + super().__init__() + self.imu_stem = imu_stem + self.embed_dim = embed_dim + self.use_pos_embed = pos_embed_fn is not None + self.num_cls_tokens = num_cls_tokens + self.kernel_size = kernel_size + self.pos_embed = nn.Parameter( + torch.empty(1, (img_size[1] // kernel_size) + num_cls_tokens, embed_dim) + ) + + if self.num_cls_tokens > 0: + self.cls_token = nn.Parameter( + torch.zeros(1, self.num_cls_tokens, self.embed_dim) + ) + + self.init_parameters(init_param_style) + + @torch.no_grad() + def init_parameters(self, init_param_style): + nn.init.normal_(self.pos_embed, std=0.01) + + if init_param_style == "openclip": + # OpenCLIP style initialization + scale = self.embed_dim**-0.5 + + if self.num_cls_tokens > 0: + nn.init.normal_(self.cls_token) + self.cls_token *= scale + elif init_param_style == "vit": + self.cls_token.data.fill_(0) + else: + raise ValueError(f"Unknown init {init_param_style}") + + def tokenize_input_and_cls_pos(self, input, stem): + # tokens is of shape B x L x D + tokens = stem.norm_layer(stem.proj(input)) + assert tokens.ndim == 3 + assert tokens.shape[2] == self.embed_dim + B = tokens.shape[0] + if self.num_cls_tokens > 0: + class_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole class_tokens impl from Phil Wang, thanks + tokens = torch.cat((class_tokens, tokens), dim=1) + if self.use_pos_embed: + tokens = tokens + self.pos_embed + return tokens + + def forward(self, imu): + # Patchify + imu = imu.unfold( + -1, + self.kernel_size, + self.kernel_size, + ).permute(0, 2, 1, 3) + imu = imu.reshape(imu.size(0), imu.size(1), -1) + + imu_tokens = self.tokenize_input_and_cls_pos( + imu, + self.imu_stem, + ) + + return_dict = { + "trunk": { + "tokens": imu_tokens, + }, + "head": {}, + } + return return_dict diff --git a/Ethosight/models/transformer.py b/Ethosight/models/transformer.py new file mode 100644 index 0000000..6224faf --- /dev/null +++ b/Ethosight/models/transformer.py @@ -0,0 +1,280 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# Code modified from +# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py ; +# https://github.com/facebookresearch/deit/blob/main/models.py +# and https://github.com/facebookresearch/vissl/blob/main/vissl/models/trunks/vision_transformer.py + + +from functools import partial +from typing import Callable, List, Optional + +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint +from timm.models.layers import DropPath, trunc_normal_ + + +class Attention(nn.Module): + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, + # can set manually to be compat with prev weights + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = ( + self.qkv(x) + .reshape(B, N, 3, self.num_heads, C // self.num_heads) + .permute(2, 0, 3, 1, 4) + ) + q, k, v = ( + qkv[0], + qkv[1], + qkv[2], + ) # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Mlp(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class MultiheadAttention(nn.MultiheadAttention): + def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): + return super().forward(x, x, x, need_weights=False, attn_mask=attn_mask)[0] + + +class ViTAttention(Attention): + def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): + assert attn_mask is None + return super().forward(x) + + +class BlockWithMasking(nn.Module): + def __init__( + self, + dim: int, + attn_target: Callable, + mlp_ratio: int = 4, + act_layer: Callable = nn.GELU, + norm_layer: Callable = nn.LayerNorm, + ffn_dropout_rate: float = 0.0, + drop_path: float = 0.0, + layer_scale_type: Optional[str] = None, + layer_scale_init_value: float = 1e-4, + ): + super().__init__() + + assert not isinstance( + attn_target, nn.Module + ), "attn_target should be a Callable. Otherwise attn_target is shared across blocks!" + self.attn = attn_target() + if drop_path > 0.0: + self.drop_path = DropPath(drop_path) + else: + self.drop_path = nn.Identity() + self.norm_1 = norm_layer(dim) + mlp_hidden_dim = int(mlp_ratio * dim) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=ffn_dropout_rate, + ) + self.norm_2 = norm_layer(dim) + self.layer_scale_type = layer_scale_type + if self.layer_scale_type is not None: + assert self.layer_scale_type in [ + "per_channel", + "scalar", + ], f"Found Layer scale type {self.layer_scale_type}" + if self.layer_scale_type == "per_channel": + # one gamma value per channel + gamma_shape = [1, 1, dim] + elif self.layer_scale_type == "scalar": + # single gamma value for all channels + gamma_shape = [1, 1, 1] + # two gammas: for each part of the fwd in the encoder + self.layer_scale_gamma1 = nn.Parameter( + torch.ones(size=gamma_shape) * layer_scale_init_value, + requires_grad=True, + ) + self.layer_scale_gamma2 = nn.Parameter( + torch.ones(size=gamma_shape) * layer_scale_init_value, + requires_grad=True, + ) + + def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): + if self.layer_scale_type is None: + x = x + self.drop_path(self.attn(self.norm_1(x), attn_mask)) + x = x + self.drop_path(self.mlp(self.norm_2(x))) + else: + x = ( + x + + self.drop_path(self.attn(self.norm_1(x), attn_mask)) + * self.layer_scale_gamma1 + ) + x = x + self.drop_path(self.mlp(self.norm_2(x))) * self.layer_scale_gamma2 + return x + + +_LAYER_NORM = partial(nn.LayerNorm, eps=1e-6) + + +class SimpleTransformer(nn.Module): + def __init__( + self, + attn_target: Callable, + embed_dim: int, + num_blocks: int, + block: Callable = BlockWithMasking, + pre_transformer_layer: Optional[Callable] = None, + post_transformer_layer: Optional[Callable] = None, + drop_path_rate: float = 0.0, + drop_path_type: str = "progressive", + norm_layer: Callable = _LAYER_NORM, + mlp_ratio: int = 4, + ffn_dropout_rate: float = 0.0, + layer_scale_type: Optional[str] = None, # from cait; possible values are None, "per_channel", "scalar" + layer_scale_init_value: float = 1e-4, # from cait; float + weight_init_style: str = "jax", # possible values jax or pytorch + ): + """ + Simple Transformer with the following features + 1. Supports masked attention + 2. Supports DropPath + 3. Supports LayerScale + 4. Supports Dropout in Attention and FFN + 5. Makes few assumptions about the input except that it is a Tensor + """ + super().__init__() + self.pre_transformer_layer = pre_transformer_layer + if drop_path_type == "progressive": + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_blocks)] + elif drop_path_type == "uniform": + dpr = [drop_path_rate for i in range(num_blocks)] + else: + raise ValueError(f"Unknown drop_path_type: {drop_path_type}") + + self.blocks = nn.Sequential( + *[ + block( + dim=embed_dim, + attn_target=attn_target, + mlp_ratio=mlp_ratio, + ffn_dropout_rate=ffn_dropout_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + layer_scale_type=layer_scale_type, + layer_scale_init_value=layer_scale_init_value, + ) + for i in range(num_blocks) + ] + ) + self.post_transformer_layer = post_transformer_layer + self.weight_init_style = weight_init_style + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + if self.weight_init_style == "jax": + # Based on MAE and official Jax ViT implementation + torch.nn.init.xavier_uniform_(m.weight) + elif self.weight_init_style == "pytorch": + # PyTorch ViT uses trunc_normal_ + trunc_normal_(m.weight, std=0.02) + + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, (nn.LayerNorm)): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def forward( + self, + tokens: torch.Tensor, + attn_mask: torch.Tensor = None, + use_checkpoint: bool = False, + checkpoint_every_n: int = 1, + checkpoint_blk_ids: Optional[List[int]] = None, + ): + """ + Inputs + - tokens: data of shape N x L x D (or L x N x D depending on the attention implementation) + - attn: mask of shape L x L + + Output + - x: data of shape N x L x D (or L x N x D depending on the attention implementation) + """ + if self.pre_transformer_layer: + tokens = self.pre_transformer_layer(tokens) + if use_checkpoint and checkpoint_blk_ids is None: + checkpoint_blk_ids = [ + blk_id + for blk_id in range(len(self.blocks)) + if blk_id % checkpoint_every_n == 0 + ] + if checkpoint_blk_ids: + checkpoint_blk_ids = set(checkpoint_blk_ids) + for blk_id, blk in enumerate(self.blocks): + if use_checkpoint and blk_id in checkpoint_blk_ids: + tokens = checkpoint.checkpoint( + blk, tokens, attn_mask, use_reentrant=False + ) + else: + tokens = blk(tokens, attn_mask=attn_mask) + if self.post_transformer_layer: + tokens = self.post_transformer_layer(tokens) + return tokens diff --git a/Ethosight/setup.py b/Ethosight/setup.py index cdf4219..a5ff785 100644 --- a/Ethosight/setup.py +++ b/Ethosight/setup.py @@ -1,3 +1,19 @@ +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from setuptools import setup, find_packages setup( @@ -18,6 +34,7 @@ 'EthosightAppCLI = Ethosight.EthosightAppCLI:cli', ], }, + scripts=['bin/runserver.py'], classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers',], diff --git a/Ethosight/website/EthosightAppBasedir/baseline01/ethosightapp.yaml b/Ethosight/website/EthosightAppBasedir/baseline01/ethosightapp.yaml deleted file mode 100644 index 573f5ab..0000000 --- a/Ethosight/website/EthosightAppBasedir/baseline01/ethosightapp.yaml +++ /dev/null @@ -1,209 +0,0 @@ -################# basic app configuration -analyzeImageMethod: analyzeImage_precompiled -embeddings_path: baseline.embeddings -labels_path: baseline.labels -output_type: affinityScores -reasoner_type: chatgpt - -################## configure the label space optimization -label_space_optimization: - enabled: true - rerun: false # if true, will rerun the optimization even if a results file exists, - # otherwise will just load the most recent results file -# method: "semantic_relations" - method: "semantic_similarity" - parameters: - threshold: .8 - max_labels: 10 # this is the max per ground truth label (not total) - -general_templates: - enabled: false - template_path: "../templates/general_templates.txt" - -################## configure the benchmark -benchmark: - enabled: true - batch_mode: true - batchsize: 256 - image_dir: "/home/hmlatapie/devactive/home/Ethosight/images/robbery" - # normallabel used to compute benchmark anomaly metrics - normallabel: "normal event" -# ground_truth_path can be a .txt file or a .csv -# .txt : labels only -# .csv : filename, label pairs ... first row in file is assumed to be a header and is ignored -# ground_truth_path: "../images/robbery/image-labels2.csv" #only one label per video - ground_truth_path: "/home/hmlatapie/devactive/home/Ethosight/images/robbery/image-labels.csv" #labeled correctly per frame - - #normally ground_truth_path .csv contains all the labels so that is all you need - #however, if ground_truth_labels_path is specified, it will be used to generate - # the ground truth labels and embeddings files - #if not set and the system needs to generate, the csv will be used - #as long as your .csv has all the labels, you don't need to set this - ground_truth_labels_path: "../datasets/ucf-crime/ucf-crime.txt" - top_n: 5 - bootstrapMode: true # if true, EthosightApp.optimize will start with GT labels - save_affinities: true # if true, will save the affinity scores to a file - - extracted_video_dir: "../extracted_videos" - verbose: false - skip_pre_optimization: true - - -################## configure the mapper -mapper: - enabled: true - # affinity_minthreshold is the minimum affinity score to consider a label - # if the affinity score is below the threshold, the label is set to - # normallabel - affinity_minthreshold: - enabled: false - threshold: 26 - normallabel: "normal event" - threshold: 26 #is this needed for video? - # modes: - # - passthrough - pass input affinity scores through to output - # - labelToLabelAffinity01 - uses affinity between expanded labels and ground truth labels - # - compositeLabels01 - create composite labels from input affinity scores, no reasoning - # - reasoning01 - create composite labels from input affinity scores, with reasoning - # mode: 'passthrough' - #mode: 'labelToLabelAffinity01' - #mode: 'gpt4mode' #uses gpt4 to generate labels... input is the single top label -# mode: 'gpt4mode2' #uses gpt4 to generate labels, input is the top n affinity scores - #submode: normallabelonly #only remap the normal label - mode: 'hardmap01' - hardmapfile: '../datasets/ucf-crime/ucf-crime-hardmap.txt' -# prompt: "Return a label we will use to help analyze an image. The context is to identify crime behavior versus normal behavior. The camera angles may be overhead or normal more horizontal security camera angles. Please analyze the following input label and return 'normal event' or 'crime event' <<{label}>> please return only 'normal event' or 'crime event' as the label with no extra text or delimiters of any kind." - prompt_1: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels. - -# SOA anomaly and 20% top1 - prompt: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# approaching 20% top 1, recall etc... good - prompt_20230814_000118: > - Please analyze the following aggregated and ranked affinity scores from security - camera video footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# SOA anomaly and 20% top1 - prompt_20230813_233003: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# best result so far... SOTA anomaly metrics - prompt_20230813_231755: > - Please analyze the following affinity scores and return 'normal event' or one - of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the scene. please return only - 'normal event' or one of these listed labels as the label with no extra text - or delimiters of any kind. if unsure, return the nearest applicable label erring - on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# this is a gpt4mode2 prompt with affinity scores - prompt_20230813_225106: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list. here - are some - affinity scores you may take into consideration when selecting the - best label from our list: <<{affinityScores}>>. Just do your best. No extraneous - characters. - one of the labels from our list or 'normal event' only. thank you. - -#671 Anomaly Detection Metrics: -#672 Accuracy: 0.7186 -#673 Precision: 0.9500 -#674 Recall: 0.7355 -#675 F1 Score: 0.8291 -#676 Top-1 Accuracy: 0.1617 -#677 Top-5 Accuracy: 0.3234 -#678 Total predictions: 167 - prompt_20230813_2152: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label. - prompt2: > - Return a label we will use to help analyze an image. The context is to identify - crime behavior versus normal behavior. The camera angles may be overhead or normal - more horizontal security camera angles. Please analyze the following input label - and return 'normal event' or 'crime event' <<{label}>> please return only 'normal - event' or 'crime event' as the label with no extra text or delimiters of any - kind. - verbose: true - - -################# configure video -video: - skip_frames: 0 - normal_label_name: "normal event" - label_mapping: "periods_count" - # modes: - # - majority - rank the labels by the number of frames - # - longest_period - rank the labels by the number of consecutive frames - # - periods_count - rank the labels by the number of periods - -################ visualization -visualization: - top_n_affinity: 100 - show_distinct: true - -################# phase2 -phase2: - maxiterations: 200 diff --git a/Ethosight/website/EthosightAppBasedir/baseline01/ethosightapp.yaml.bak b/Ethosight/website/EthosightAppBasedir/baseline01/ethosightapp.yaml.bak deleted file mode 100644 index 573f5ab..0000000 --- a/Ethosight/website/EthosightAppBasedir/baseline01/ethosightapp.yaml.bak +++ /dev/null @@ -1,209 +0,0 @@ -################# basic app configuration -analyzeImageMethod: analyzeImage_precompiled -embeddings_path: baseline.embeddings -labels_path: baseline.labels -output_type: affinityScores -reasoner_type: chatgpt - -################## configure the label space optimization -label_space_optimization: - enabled: true - rerun: false # if true, will rerun the optimization even if a results file exists, - # otherwise will just load the most recent results file -# method: "semantic_relations" - method: "semantic_similarity" - parameters: - threshold: .8 - max_labels: 10 # this is the max per ground truth label (not total) - -general_templates: - enabled: false - template_path: "../templates/general_templates.txt" - -################## configure the benchmark -benchmark: - enabled: true - batch_mode: true - batchsize: 256 - image_dir: "/home/hmlatapie/devactive/home/Ethosight/images/robbery" - # normallabel used to compute benchmark anomaly metrics - normallabel: "normal event" -# ground_truth_path can be a .txt file or a .csv -# .txt : labels only -# .csv : filename, label pairs ... first row in file is assumed to be a header and is ignored -# ground_truth_path: "../images/robbery/image-labels2.csv" #only one label per video - ground_truth_path: "/home/hmlatapie/devactive/home/Ethosight/images/robbery/image-labels.csv" #labeled correctly per frame - - #normally ground_truth_path .csv contains all the labels so that is all you need - #however, if ground_truth_labels_path is specified, it will be used to generate - # the ground truth labels and embeddings files - #if not set and the system needs to generate, the csv will be used - #as long as your .csv has all the labels, you don't need to set this - ground_truth_labels_path: "../datasets/ucf-crime/ucf-crime.txt" - top_n: 5 - bootstrapMode: true # if true, EthosightApp.optimize will start with GT labels - save_affinities: true # if true, will save the affinity scores to a file - - extracted_video_dir: "../extracted_videos" - verbose: false - skip_pre_optimization: true - - -################## configure the mapper -mapper: - enabled: true - # affinity_minthreshold is the minimum affinity score to consider a label - # if the affinity score is below the threshold, the label is set to - # normallabel - affinity_minthreshold: - enabled: false - threshold: 26 - normallabel: "normal event" - threshold: 26 #is this needed for video? - # modes: - # - passthrough - pass input affinity scores through to output - # - labelToLabelAffinity01 - uses affinity between expanded labels and ground truth labels - # - compositeLabels01 - create composite labels from input affinity scores, no reasoning - # - reasoning01 - create composite labels from input affinity scores, with reasoning - # mode: 'passthrough' - #mode: 'labelToLabelAffinity01' - #mode: 'gpt4mode' #uses gpt4 to generate labels... input is the single top label -# mode: 'gpt4mode2' #uses gpt4 to generate labels, input is the top n affinity scores - #submode: normallabelonly #only remap the normal label - mode: 'hardmap01' - hardmapfile: '../datasets/ucf-crime/ucf-crime-hardmap.txt' -# prompt: "Return a label we will use to help analyze an image. The context is to identify crime behavior versus normal behavior. The camera angles may be overhead or normal more horizontal security camera angles. Please analyze the following input label and return 'normal event' or 'crime event' <<{label}>> please return only 'normal event' or 'crime event' as the label with no extra text or delimiters of any kind." - prompt_1: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels. - -# SOA anomaly and 20% top1 - prompt: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# approaching 20% top 1, recall etc... good - prompt_20230814_000118: > - Please analyze the following aggregated and ranked affinity scores from security - camera video footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# SOA anomaly and 20% top1 - prompt_20230813_233003: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# best result so far... SOTA anomaly metrics - prompt_20230813_231755: > - Please analyze the following affinity scores and return 'normal event' or one - of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the scene. please return only - 'normal event' or one of these listed labels as the label with no extra text - or delimiters of any kind. if unsure, return the nearest applicable label erring - on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# this is a gpt4mode2 prompt with affinity scores - prompt_20230813_225106: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list. here - are some - affinity scores you may take into consideration when selecting the - best label from our list: <<{affinityScores}>>. Just do your best. No extraneous - characters. - one of the labels from our list or 'normal event' only. thank you. - -#671 Anomaly Detection Metrics: -#672 Accuracy: 0.7186 -#673 Precision: 0.9500 -#674 Recall: 0.7355 -#675 F1 Score: 0.8291 -#676 Top-1 Accuracy: 0.1617 -#677 Top-5 Accuracy: 0.3234 -#678 Total predictions: 167 - prompt_20230813_2152: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label. - prompt2: > - Return a label we will use to help analyze an image. The context is to identify - crime behavior versus normal behavior. The camera angles may be overhead or normal - more horizontal security camera angles. Please analyze the following input label - and return 'normal event' or 'crime event' <<{label}>> please return only 'normal - event' or 'crime event' as the label with no extra text or delimiters of any - kind. - verbose: true - - -################# configure video -video: - skip_frames: 0 - normal_label_name: "normal event" - label_mapping: "periods_count" - # modes: - # - majority - rank the labels by the number of frames - # - longest_period - rank the labels by the number of consecutive frames - # - periods_count - rank the labels by the number of periods - -################ visualization -visualization: - top_n_affinity: 100 - show_distinct: true - -################# phase2 -phase2: - maxiterations: 200 diff --git a/Ethosight/website/EthosightAppBasedir/baseline01/ethosightapp_template.yaml b/Ethosight/website/EthosightAppBasedir/baseline01/ethosightapp_template.yaml deleted file mode 100644 index 573f5ab..0000000 --- a/Ethosight/website/EthosightAppBasedir/baseline01/ethosightapp_template.yaml +++ /dev/null @@ -1,209 +0,0 @@ -################# basic app configuration -analyzeImageMethod: analyzeImage_precompiled -embeddings_path: baseline.embeddings -labels_path: baseline.labels -output_type: affinityScores -reasoner_type: chatgpt - -################## configure the label space optimization -label_space_optimization: - enabled: true - rerun: false # if true, will rerun the optimization even if a results file exists, - # otherwise will just load the most recent results file -# method: "semantic_relations" - method: "semantic_similarity" - parameters: - threshold: .8 - max_labels: 10 # this is the max per ground truth label (not total) - -general_templates: - enabled: false - template_path: "../templates/general_templates.txt" - -################## configure the benchmark -benchmark: - enabled: true - batch_mode: true - batchsize: 256 - image_dir: "/home/hmlatapie/devactive/home/Ethosight/images/robbery" - # normallabel used to compute benchmark anomaly metrics - normallabel: "normal event" -# ground_truth_path can be a .txt file or a .csv -# .txt : labels only -# .csv : filename, label pairs ... first row in file is assumed to be a header and is ignored -# ground_truth_path: "../images/robbery/image-labels2.csv" #only one label per video - ground_truth_path: "/home/hmlatapie/devactive/home/Ethosight/images/robbery/image-labels.csv" #labeled correctly per frame - - #normally ground_truth_path .csv contains all the labels so that is all you need - #however, if ground_truth_labels_path is specified, it will be used to generate - # the ground truth labels and embeddings files - #if not set and the system needs to generate, the csv will be used - #as long as your .csv has all the labels, you don't need to set this - ground_truth_labels_path: "../datasets/ucf-crime/ucf-crime.txt" - top_n: 5 - bootstrapMode: true # if true, EthosightApp.optimize will start with GT labels - save_affinities: true # if true, will save the affinity scores to a file - - extracted_video_dir: "../extracted_videos" - verbose: false - skip_pre_optimization: true - - -################## configure the mapper -mapper: - enabled: true - # affinity_minthreshold is the minimum affinity score to consider a label - # if the affinity score is below the threshold, the label is set to - # normallabel - affinity_minthreshold: - enabled: false - threshold: 26 - normallabel: "normal event" - threshold: 26 #is this needed for video? - # modes: - # - passthrough - pass input affinity scores through to output - # - labelToLabelAffinity01 - uses affinity between expanded labels and ground truth labels - # - compositeLabels01 - create composite labels from input affinity scores, no reasoning - # - reasoning01 - create composite labels from input affinity scores, with reasoning - # mode: 'passthrough' - #mode: 'labelToLabelAffinity01' - #mode: 'gpt4mode' #uses gpt4 to generate labels... input is the single top label -# mode: 'gpt4mode2' #uses gpt4 to generate labels, input is the top n affinity scores - #submode: normallabelonly #only remap the normal label - mode: 'hardmap01' - hardmapfile: '../datasets/ucf-crime/ucf-crime-hardmap.txt' -# prompt: "Return a label we will use to help analyze an image. The context is to identify crime behavior versus normal behavior. The camera angles may be overhead or normal more horizontal security camera angles. Please analyze the following input label and return 'normal event' or 'crime event' <<{label}>> please return only 'normal event' or 'crime event' as the label with no extra text or delimiters of any kind." - prompt_1: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels. - -# SOA anomaly and 20% top1 - prompt: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# approaching 20% top 1, recall etc... good - prompt_20230814_000118: > - Please analyze the following aggregated and ranked affinity scores from security - camera video footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# SOA anomaly and 20% top1 - prompt_20230813_233003: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# best result so far... SOTA anomaly metrics - prompt_20230813_231755: > - Please analyze the following affinity scores and return 'normal event' or one - of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the scene. please return only - 'normal event' or one of these listed labels as the label with no extra text - or delimiters of any kind. if unsure, return the nearest applicable label erring - on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# this is a gpt4mode2 prompt with affinity scores - prompt_20230813_225106: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list. here - are some - affinity scores you may take into consideration when selecting the - best label from our list: <<{affinityScores}>>. Just do your best. No extraneous - characters. - one of the labels from our list or 'normal event' only. thank you. - -#671 Anomaly Detection Metrics: -#672 Accuracy: 0.7186 -#673 Precision: 0.9500 -#674 Recall: 0.7355 -#675 F1 Score: 0.8291 -#676 Top-1 Accuracy: 0.1617 -#677 Top-5 Accuracy: 0.3234 -#678 Total predictions: 167 - prompt_20230813_2152: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label. - prompt2: > - Return a label we will use to help analyze an image. The context is to identify - crime behavior versus normal behavior. The camera angles may be overhead or normal - more horizontal security camera angles. Please analyze the following input label - and return 'normal event' or 'crime event' <<{label}>> please return only 'normal - event' or 'crime event' as the label with no extra text or delimiters of any - kind. - verbose: true - - -################# configure video -video: - skip_frames: 0 - normal_label_name: "normal event" - label_mapping: "periods_count" - # modes: - # - majority - rank the labels by the number of frames - # - longest_period - rank the labels by the number of consecutive frames - # - periods_count - rank the labels by the number of periods - -################ visualization -visualization: - top_n_affinity: 100 - show_distinct: true - -################# phase2 -phase2: - maxiterations: 200 diff --git a/Ethosight/website/EthosightAppBasedir/booboo1/baseline.embeddings b/Ethosight/website/EthosightAppBasedir/booboo1/baseline.embeddings deleted file mode 100644 index 6dcc4df..0000000 Binary files a/Ethosight/website/EthosightAppBasedir/booboo1/baseline.embeddings and /dev/null differ diff --git a/Ethosight/website/EthosightAppBasedir/booboo1/baseline.labels b/Ethosight/website/EthosightAppBasedir/booboo1/baseline.labels deleted file mode 100644 index babffc4..0000000 --- a/Ethosight/website/EthosightAppBasedir/booboo1/baseline.labels +++ /dev/null @@ -1,53 +0,0 @@ -Indoor -Outdoor -Urban -Rural -Residential -Commercial -Industrial -Natural -Fire -Flood -Accident -Dangerous Material -Unsafe Conditions -Hazardous Environment -Poor Lighting -Obstructed Exit -Overcrowding -Eating -Drinking -Sleeping -Working -Driving -Running -Walking -Shopping -Dancing -Reading -Cooking -Stealing -Vandalism -Assault -Burglary -Trespassing -Fraud -masked-man -break-in -burgler -thief -criminal -crowbar -suspicious-person -breaking-and-entering -home-invasion -burglary -criminal-intent -dangerous-person -window-damage -night-criminal -intent-to-conceal-face -friendly-person -suspicious-person-vandal-thief-burgler-criminal -burglary-break-in -burglary-break-in-night-criminal diff --git a/Ethosight/website/EthosightAppBasedir/booboo1/ethosightapp.yaml b/Ethosight/website/EthosightAppBasedir/booboo1/ethosightapp.yaml deleted file mode 100644 index ff0f65c..0000000 --- a/Ethosight/website/EthosightAppBasedir/booboo1/ethosightapp.yaml +++ /dev/null @@ -1,209 +0,0 @@ -################# basic app configuration -analyzeImageMethod: analyzeImage_precompiled -embeddings_path: baseline.embeddings -labels_path: baseline.labels -output_type: affinityScores -reasoner_type: chatgpt - -################## configure the label space optimization -label_space_optimization: - enabled: false - rerun: false # if true, will rerun the optimization even if a results file exists, - # otherwise will just load the most recent results file -# method: "semantic_relations" - method: "semantic_similarity" - parameters: - threshold: .8 - max_labels: 10 # this is the max per ground truth label (not total) - -general_templates: - enabled: false - template_path: "../templates/general_templates.txt" - -################## configure the benchmark -benchmark: - enabled: false - batch_mode: true - batchsize: 256 - image_dir: "/home/hmlatapie/devactive/home/Ethosight/images/robbery" - # normallabel used to compute benchmark anomaly metrics - normallabel: "normal event" -# ground_truth_path can be a .txt file or a .csv -# .txt : labels only -# .csv : filename, label pairs ... first row in file is assumed to be a header and is ignored -# ground_truth_path: "../images/robbery/image-labels2.csv" #only one label per video - ground_truth_path: "/home/hmlatapie/devactive/home/Ethosight/images/robbery/image-labels.csv" #labeled correctly per frame - - #normally ground_truth_path .csv contains all the labels so that is all you need - #however, if ground_truth_labels_path is specified, it will be used to generate - # the ground truth labels and embeddings files - #if not set and the system needs to generate, the csv will be used - #as long as your .csv has all the labels, you don't need to set this - ground_truth_labels_path: "../datasets/ucf-crime/ucf-crime.txt" - top_n: 5 - bootstrapMode: true # if true, EthosightApp.optimize will start with GT labels - save_affinities: true # if true, will save the affinity scores to a file - - extracted_video_dir: "../extracted_videos" - verbose: false - skip_pre_optimization: true - - -################## configure the mapper -mapper: - enabled: false - # affinity_minthreshold is the minimum affinity score to consider a label - # if the affinity score is below the threshold, the label is set to - # normallabel - affinity_minthreshold: - enabled: false - threshold: 26 - normallabel: "normal event" - threshold: 26 #is this needed for video? - # modes: - # - passthrough - pass input affinity scores through to output - # - labelToLabelAffinity01 - uses affinity between expanded labels and ground truth labels - # - compositeLabels01 - create composite labels from input affinity scores, no reasoning - # - reasoning01 - create composite labels from input affinity scores, with reasoning - # mode: 'passthrough' - #mode: 'labelToLabelAffinity01' - #mode: 'gpt4mode' #uses gpt4 to generate labels... input is the single top label -# mode: 'gpt4mode2' #uses gpt4 to generate labels, input is the top n affinity scores - #submode: normallabelonly #only remap the normal label - mode: 'hardmap01' - hardmapfile: '../datasets/ucf-crime/ucf-crime-hardmap.txt' -# prompt: "Return a label we will use to help analyze an image. The context is to identify crime behavior versus normal behavior. The camera angles may be overhead or normal more horizontal security camera angles. Please analyze the following input label and return 'normal event' or 'crime event' <<{label}>> please return only 'normal event' or 'crime event' as the label with no extra text or delimiters of any kind." - prompt_1: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels. - -# SOA anomaly and 20% top1 - prompt: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# approaching 20% top 1, recall etc... good - prompt_20230814_000118: > - Please analyze the following aggregated and ranked affinity scores from security - camera video footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# SOA anomaly and 20% top1 - prompt_20230813_233003: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# best result so far... SOTA anomaly metrics - prompt_20230813_231755: > - Please analyze the following affinity scores and return 'normal event' or one - of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the scene. please return only - 'normal event' or one of these listed labels as the label with no extra text - or delimiters of any kind. if unsure, return the nearest applicable label erring - on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# this is a gpt4mode2 prompt with affinity scores - prompt_20230813_225106: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list. here - are some - affinity scores you may take into consideration when selecting the - best label from our list: <<{affinityScores}>>. Just do your best. No extraneous - characters. - one of the labels from our list or 'normal event' only. thank you. - -#671 Anomaly Detection Metrics: -#672 Accuracy: 0.7186 -#673 Precision: 0.9500 -#674 Recall: 0.7355 -#675 F1 Score: 0.8291 -#676 Top-1 Accuracy: 0.1617 -#677 Top-5 Accuracy: 0.3234 -#678 Total predictions: 167 - prompt_20230813_2152: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label. - prompt2: > - Return a label we will use to help analyze an image. The context is to identify - crime behavior versus normal behavior. The camera angles may be overhead or normal - more horizontal security camera angles. Please analyze the following input label - and return 'normal event' or 'crime event' <<{label}>> please return only 'normal - event' or 'crime event' as the label with no extra text or delimiters of any - kind. - verbose: true - - -################# configure video -video: - skip_frames: 0 - normal_label_name: "normal event" - label_mapping: "periods_count" - # modes: - # - majority - rank the labels by the number of frames - # - longest_period - rank the labels by the number of consecutive frames - # - periods_count - rank the labels by the number of periods - -################ visualization -visualization: - top_n_affinity: 100 - show_distinct: true - -################# phase2 -phase2: - maxiterations: 200 diff --git a/Ethosight/website/EthosightAppBasedir/booboo1/ethosightapp.yaml.bak b/Ethosight/website/EthosightAppBasedir/booboo1/ethosightapp.yaml.bak deleted file mode 100644 index 573f5ab..0000000 --- a/Ethosight/website/EthosightAppBasedir/booboo1/ethosightapp.yaml.bak +++ /dev/null @@ -1,209 +0,0 @@ -################# basic app configuration -analyzeImageMethod: analyzeImage_precompiled -embeddings_path: baseline.embeddings -labels_path: baseline.labels -output_type: affinityScores -reasoner_type: chatgpt - -################## configure the label space optimization -label_space_optimization: - enabled: true - rerun: false # if true, will rerun the optimization even if a results file exists, - # otherwise will just load the most recent results file -# method: "semantic_relations" - method: "semantic_similarity" - parameters: - threshold: .8 - max_labels: 10 # this is the max per ground truth label (not total) - -general_templates: - enabled: false - template_path: "../templates/general_templates.txt" - -################## configure the benchmark -benchmark: - enabled: true - batch_mode: true - batchsize: 256 - image_dir: "/home/hmlatapie/devactive/home/Ethosight/images/robbery" - # normallabel used to compute benchmark anomaly metrics - normallabel: "normal event" -# ground_truth_path can be a .txt file or a .csv -# .txt : labels only -# .csv : filename, label pairs ... first row in file is assumed to be a header and is ignored -# ground_truth_path: "../images/robbery/image-labels2.csv" #only one label per video - ground_truth_path: "/home/hmlatapie/devactive/home/Ethosight/images/robbery/image-labels.csv" #labeled correctly per frame - - #normally ground_truth_path .csv contains all the labels so that is all you need - #however, if ground_truth_labels_path is specified, it will be used to generate - # the ground truth labels and embeddings files - #if not set and the system needs to generate, the csv will be used - #as long as your .csv has all the labels, you don't need to set this - ground_truth_labels_path: "../datasets/ucf-crime/ucf-crime.txt" - top_n: 5 - bootstrapMode: true # if true, EthosightApp.optimize will start with GT labels - save_affinities: true # if true, will save the affinity scores to a file - - extracted_video_dir: "../extracted_videos" - verbose: false - skip_pre_optimization: true - - -################## configure the mapper -mapper: - enabled: true - # affinity_minthreshold is the minimum affinity score to consider a label - # if the affinity score is below the threshold, the label is set to - # normallabel - affinity_minthreshold: - enabled: false - threshold: 26 - normallabel: "normal event" - threshold: 26 #is this needed for video? - # modes: - # - passthrough - pass input affinity scores through to output - # - labelToLabelAffinity01 - uses affinity between expanded labels and ground truth labels - # - compositeLabels01 - create composite labels from input affinity scores, no reasoning - # - reasoning01 - create composite labels from input affinity scores, with reasoning - # mode: 'passthrough' - #mode: 'labelToLabelAffinity01' - #mode: 'gpt4mode' #uses gpt4 to generate labels... input is the single top label -# mode: 'gpt4mode2' #uses gpt4 to generate labels, input is the top n affinity scores - #submode: normallabelonly #only remap the normal label - mode: 'hardmap01' - hardmapfile: '../datasets/ucf-crime/ucf-crime-hardmap.txt' -# prompt: "Return a label we will use to help analyze an image. The context is to identify crime behavior versus normal behavior. The camera angles may be overhead or normal more horizontal security camera angles. Please analyze the following input label and return 'normal event' or 'crime event' <<{label}>> please return only 'normal event' or 'crime event' as the label with no extra text or delimiters of any kind." - prompt_1: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels. - -# SOA anomaly and 20% top1 - prompt: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# approaching 20% top 1, recall etc... good - prompt_20230814_000118: > - Please analyze the following aggregated and ranked affinity scores from security - camera video footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# SOA anomaly and 20% top1 - prompt_20230813_233003: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# best result so far... SOTA anomaly metrics - prompt_20230813_231755: > - Please analyze the following affinity scores and return 'normal event' or one - of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the scene. please return only - 'normal event' or one of these listed labels as the label with no extra text - or delimiters of any kind. if unsure, return the nearest applicable label erring - on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# this is a gpt4mode2 prompt with affinity scores - prompt_20230813_225106: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list. here - are some - affinity scores you may take into consideration when selecting the - best label from our list: <<{affinityScores}>>. Just do your best. No extraneous - characters. - one of the labels from our list or 'normal event' only. thank you. - -#671 Anomaly Detection Metrics: -#672 Accuracy: 0.7186 -#673 Precision: 0.9500 -#674 Recall: 0.7355 -#675 F1 Score: 0.8291 -#676 Top-1 Accuracy: 0.1617 -#677 Top-5 Accuracy: 0.3234 -#678 Total predictions: 167 - prompt_20230813_2152: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label. - prompt2: > - Return a label we will use to help analyze an image. The context is to identify - crime behavior versus normal behavior. The camera angles may be overhead or normal - more horizontal security camera angles. Please analyze the following input label - and return 'normal event' or 'crime event' <<{label}>> please return only 'normal - event' or 'crime event' as the label with no extra text or delimiters of any - kind. - verbose: true - - -################# configure video -video: - skip_frames: 0 - normal_label_name: "normal event" - label_mapping: "periods_count" - # modes: - # - majority - rank the labels by the number of frames - # - longest_period - rank the labels by the number of consecutive frames - # - periods_count - rank the labels by the number of periods - -################ visualization -visualization: - top_n_affinity: 100 - show_distinct: true - -################# phase2 -phase2: - maxiterations: 200 diff --git a/Ethosight/website/EthosightAppBasedir/booboo1/ethosightapp_template.yaml b/Ethosight/website/EthosightAppBasedir/booboo1/ethosightapp_template.yaml deleted file mode 100644 index 573f5ab..0000000 --- a/Ethosight/website/EthosightAppBasedir/booboo1/ethosightapp_template.yaml +++ /dev/null @@ -1,209 +0,0 @@ -################# basic app configuration -analyzeImageMethod: analyzeImage_precompiled -embeddings_path: baseline.embeddings -labels_path: baseline.labels -output_type: affinityScores -reasoner_type: chatgpt - -################## configure the label space optimization -label_space_optimization: - enabled: true - rerun: false # if true, will rerun the optimization even if a results file exists, - # otherwise will just load the most recent results file -# method: "semantic_relations" - method: "semantic_similarity" - parameters: - threshold: .8 - max_labels: 10 # this is the max per ground truth label (not total) - -general_templates: - enabled: false - template_path: "../templates/general_templates.txt" - -################## configure the benchmark -benchmark: - enabled: true - batch_mode: true - batchsize: 256 - image_dir: "/home/hmlatapie/devactive/home/Ethosight/images/robbery" - # normallabel used to compute benchmark anomaly metrics - normallabel: "normal event" -# ground_truth_path can be a .txt file or a .csv -# .txt : labels only -# .csv : filename, label pairs ... first row in file is assumed to be a header and is ignored -# ground_truth_path: "../images/robbery/image-labels2.csv" #only one label per video - ground_truth_path: "/home/hmlatapie/devactive/home/Ethosight/images/robbery/image-labels.csv" #labeled correctly per frame - - #normally ground_truth_path .csv contains all the labels so that is all you need - #however, if ground_truth_labels_path is specified, it will be used to generate - # the ground truth labels and embeddings files - #if not set and the system needs to generate, the csv will be used - #as long as your .csv has all the labels, you don't need to set this - ground_truth_labels_path: "../datasets/ucf-crime/ucf-crime.txt" - top_n: 5 - bootstrapMode: true # if true, EthosightApp.optimize will start with GT labels - save_affinities: true # if true, will save the affinity scores to a file - - extracted_video_dir: "../extracted_videos" - verbose: false - skip_pre_optimization: true - - -################## configure the mapper -mapper: - enabled: true - # affinity_minthreshold is the minimum affinity score to consider a label - # if the affinity score is below the threshold, the label is set to - # normallabel - affinity_minthreshold: - enabled: false - threshold: 26 - normallabel: "normal event" - threshold: 26 #is this needed for video? - # modes: - # - passthrough - pass input affinity scores through to output - # - labelToLabelAffinity01 - uses affinity between expanded labels and ground truth labels - # - compositeLabels01 - create composite labels from input affinity scores, no reasoning - # - reasoning01 - create composite labels from input affinity scores, with reasoning - # mode: 'passthrough' - #mode: 'labelToLabelAffinity01' - #mode: 'gpt4mode' #uses gpt4 to generate labels... input is the single top label -# mode: 'gpt4mode2' #uses gpt4 to generate labels, input is the top n affinity scores - #submode: normallabelonly #only remap the normal label - mode: 'hardmap01' - hardmapfile: '../datasets/ucf-crime/ucf-crime-hardmap.txt' -# prompt: "Return a label we will use to help analyze an image. The context is to identify crime behavior versus normal behavior. The camera angles may be overhead or normal more horizontal security camera angles. Please analyze the following input label and return 'normal event' or 'crime event' <<{label}>> please return only 'normal event' or 'crime event' as the label with no extra text or delimiters of any kind." - prompt_1: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels. - -# SOA anomaly and 20% top1 - prompt: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# approaching 20% top 1, recall etc... good - prompt_20230814_000118: > - Please analyze the following aggregated and ranked affinity scores from security - camera video footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# SOA anomaly and 20% top1 - prompt_20230813_233003: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# best result so far... SOTA anomaly metrics - prompt_20230813_231755: > - Please analyze the following affinity scores and return 'normal event' or one - of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the scene. please return only - 'normal event' or one of these listed labels as the label with no extra text - or delimiters of any kind. if unsure, return the nearest applicable label erring - on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# this is a gpt4mode2 prompt with affinity scores - prompt_20230813_225106: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list. here - are some - affinity scores you may take into consideration when selecting the - best label from our list: <<{affinityScores}>>. Just do your best. No extraneous - characters. - one of the labels from our list or 'normal event' only. thank you. - -#671 Anomaly Detection Metrics: -#672 Accuracy: 0.7186 -#673 Precision: 0.9500 -#674 Recall: 0.7355 -#675 F1 Score: 0.8291 -#676 Top-1 Accuracy: 0.1617 -#677 Top-5 Accuracy: 0.3234 -#678 Total predictions: 167 - prompt_20230813_2152: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label. - prompt2: > - Return a label we will use to help analyze an image. The context is to identify - crime behavior versus normal behavior. The camera angles may be overhead or normal - more horizontal security camera angles. Please analyze the following input label - and return 'normal event' or 'crime event' <<{label}>> please return only 'normal - event' or 'crime event' as the label with no extra text or delimiters of any - kind. - verbose: true - - -################# configure video -video: - skip_frames: 0 - normal_label_name: "normal event" - label_mapping: "periods_count" - # modes: - # - majority - rank the labels by the number of frames - # - longest_period - rank the labels by the number of consecutive frames - # - periods_count - rank the labels by the number of periods - -################ visualization -visualization: - top_n_affinity: 100 - show_distinct: true - -################# phase2 -phase2: - maxiterations: 200 diff --git a/Ethosight/website/EthosightAppBasedir/booboo1/ground_truth.embeddings b/Ethosight/website/EthosightAppBasedir/booboo1/ground_truth.embeddings deleted file mode 100644 index f8e8fc1..0000000 Binary files a/Ethosight/website/EthosightAppBasedir/booboo1/ground_truth.embeddings and /dev/null differ diff --git a/Ethosight/website/EthosightAppBasedir/booboo1/ground_truth.labels b/Ethosight/website/EthosightAppBasedir/booboo1/ground_truth.labels deleted file mode 100644 index 88e9582..0000000 --- a/Ethosight/website/EthosightAppBasedir/booboo1/ground_truth.labels +++ /dev/null @@ -1,2 +0,0 @@ -normal event -robbery diff --git a/Ethosight/website/EthosightAppBasedir/general01/ethosightapp.yaml.bak b/Ethosight/website/EthosightAppBasedir/general01/ethosightapp.yaml.bak deleted file mode 100644 index f3011d8..0000000 --- a/Ethosight/website/EthosightAppBasedir/general01/ethosightapp.yaml.bak +++ /dev/null @@ -1,209 +0,0 @@ -################# basic app configuration -analyzeImageMethod: analyzeImage_precompiled -embeddings_path: general.embeddings -labels_path: general.labels -output_type: affinityScores -reasoner_type: chatgpt - -################## configure the label space optimization -label_space_optimization: - enabled: true - rerun: false # if true, will rerun the optimization even if a results file exists, - # otherwise will just load the most recent results file -# method: "semantic_relations" - method: "semantic_similarity" - parameters: - threshold: .8 - max_labels: 10 # this is the max per ground truth label (not total) - -general_templates: - enabled: false - template_path: "../templates/general_templates.txt" - -################## configure the benchmark -benchmark: - enabled: true - batch_mode: true - batchsize: 256 - image_dir: "/home/hmlatapie/devactive/home/Ethosight/images/robbery" - # normallabel used to compute benchmark anomaly metrics - normallabel: "normal event" -# ground_truth_path can be a .txt file or a .csv -# .txt : labels only -# .csv : filename, label pairs ... first row in file is assumed to be a header and is ignored -# ground_truth_path: "../images/robbery/image-labels2.csv" #only one label per video - ground_truth_path: "/home/hmlatapie/devactive/home/Ethosight/images/robbery/image-labels.csv" #labeled correctly per frame - - #normally ground_truth_path .csv contains all the labels so that is all you need - #however, if ground_truth_labels_path is specified, it will be used to generate - # the ground truth labels and embeddings files - #if not set and the system needs to generate, the csv will be used - #as long as your .csv has all the labels, you don't need to set this - ground_truth_labels_path: "../datasets/ucf-crime/ucf-crime.txt" - top_n: 5 - bootstrapMode: true # if true, EthosightApp.optimize will start with GT labels - save_affinities: true # if true, will save the affinity scores to a file - - extracted_video_dir: "../extracted_videos" - verbose: false - skip_pre_optimization: true - - -################## configure the mapper -mapper: - enabled: true - # affinity_minthreshold is the minimum affinity score to consider a label - # if the affinity score is below the threshold, the label is set to - # normallabel - affinity_minthreshold: - enabled: false - threshold: 26 - normallabel: "normal event" - threshold: 26 #is this needed for video? - # modes: - # - passthrough - pass input affinity scores through to output - # - labelToLabelAffinity01 - uses affinity between expanded labels and ground truth labels - # - compositeLabels01 - create composite labels from input affinity scores, no reasoning - # - reasoning01 - create composite labels from input affinity scores, with reasoning - # mode: 'passthrough' - #mode: 'labelToLabelAffinity01' - #mode: 'gpt4mode' #uses gpt4 to generate labels... input is the single top label -# mode: 'gpt4mode2' #uses gpt4 to generate labels, input is the top n affinity scores - #submode: normallabelonly #only remap the normal label - mode: 'hardmap01' - hardmapfile: '../datasets/ucf-crime/ucf-crime-hardmap.txt' -# prompt: "Return a label we will use to help analyze an image. The context is to identify crime behavior versus normal behavior. The camera angles may be overhead or normal more horizontal security camera angles. Please analyze the following input label and return 'normal event' or 'crime event' <<{label}>> please return only 'normal event' or 'crime event' as the label with no extra text or delimiters of any kind." - prompt_1: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels. - -# SOA anomaly and 20% top1 - prompt: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# approaching 20% top 1, recall etc... good - prompt_20230814_000118: > - Please analyze the following aggregated and ranked affinity scores from security - camera video footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# SOA anomaly and 20% top1 - prompt_20230813_233003: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# best result so far... SOTA anomaly metrics - prompt_20230813_231755: > - Please analyze the following affinity scores and return 'normal event' or one - of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the scene. please return only - 'normal event' or one of these listed labels as the label with no extra text - or delimiters of any kind. if unsure, return the nearest applicable label erring - on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# this is a gpt4mode2 prompt with affinity scores - prompt_20230813_225106: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list. here - are some - affinity scores you may take into consideration when selecting the - best label from our list: <<{affinityScores}>>. Just do your best. No extraneous - characters. - one of the labels from our list or 'normal event' only. thank you. - -#671 Anomaly Detection Metrics: -#672 Accuracy: 0.7186 -#673 Precision: 0.9500 -#674 Recall: 0.7355 -#675 F1 Score: 0.8291 -#676 Top-1 Accuracy: 0.1617 -#677 Top-5 Accuracy: 0.3234 -#678 Total predictions: 167 - prompt_20230813_2152: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label. - prompt2: > - Return a label we will use to help analyze an image. The context is to identify - crime behavior versus normal behavior. The camera angles may be overhead or normal - more horizontal security camera angles. Please analyze the following input label - and return 'normal event' or 'crime event' <<{label}>> please return only 'normal - event' or 'crime event' as the label with no extra text or delimiters of any - kind. - verbose: true - - -################# configure video -video: - skip_frames: 0 - normal_label_name: "normal event" - label_mapping: "periods_count" - # modes: - # - majority - rank the labels by the number of frames - # - longest_period - rank the labels by the number of consecutive frames - # - periods_count - rank the labels by the number of periods - -################ visualization -visualization: - top_n_affinity: 100 - show_distinct: true - -################# phase2 -phase2: - maxiterations: 200 diff --git a/Ethosight/website/EthosightAppBasedir/general01/ethosightapp_template.yaml b/Ethosight/website/EthosightAppBasedir/general01/ethosightapp_template.yaml deleted file mode 100644 index f3011d8..0000000 --- a/Ethosight/website/EthosightAppBasedir/general01/ethosightapp_template.yaml +++ /dev/null @@ -1,209 +0,0 @@ -################# basic app configuration -analyzeImageMethod: analyzeImage_precompiled -embeddings_path: general.embeddings -labels_path: general.labels -output_type: affinityScores -reasoner_type: chatgpt - -################## configure the label space optimization -label_space_optimization: - enabled: true - rerun: false # if true, will rerun the optimization even if a results file exists, - # otherwise will just load the most recent results file -# method: "semantic_relations" - method: "semantic_similarity" - parameters: - threshold: .8 - max_labels: 10 # this is the max per ground truth label (not total) - -general_templates: - enabled: false - template_path: "../templates/general_templates.txt" - -################## configure the benchmark -benchmark: - enabled: true - batch_mode: true - batchsize: 256 - image_dir: "/home/hmlatapie/devactive/home/Ethosight/images/robbery" - # normallabel used to compute benchmark anomaly metrics - normallabel: "normal event" -# ground_truth_path can be a .txt file or a .csv -# .txt : labels only -# .csv : filename, label pairs ... first row in file is assumed to be a header and is ignored -# ground_truth_path: "../images/robbery/image-labels2.csv" #only one label per video - ground_truth_path: "/home/hmlatapie/devactive/home/Ethosight/images/robbery/image-labels.csv" #labeled correctly per frame - - #normally ground_truth_path .csv contains all the labels so that is all you need - #however, if ground_truth_labels_path is specified, it will be used to generate - # the ground truth labels and embeddings files - #if not set and the system needs to generate, the csv will be used - #as long as your .csv has all the labels, you don't need to set this - ground_truth_labels_path: "../datasets/ucf-crime/ucf-crime.txt" - top_n: 5 - bootstrapMode: true # if true, EthosightApp.optimize will start with GT labels - save_affinities: true # if true, will save the affinity scores to a file - - extracted_video_dir: "../extracted_videos" - verbose: false - skip_pre_optimization: true - - -################## configure the mapper -mapper: - enabled: true - # affinity_minthreshold is the minimum affinity score to consider a label - # if the affinity score is below the threshold, the label is set to - # normallabel - affinity_minthreshold: - enabled: false - threshold: 26 - normallabel: "normal event" - threshold: 26 #is this needed for video? - # modes: - # - passthrough - pass input affinity scores through to output - # - labelToLabelAffinity01 - uses affinity between expanded labels and ground truth labels - # - compositeLabels01 - create composite labels from input affinity scores, no reasoning - # - reasoning01 - create composite labels from input affinity scores, with reasoning - # mode: 'passthrough' - #mode: 'labelToLabelAffinity01' - #mode: 'gpt4mode' #uses gpt4 to generate labels... input is the single top label -# mode: 'gpt4mode2' #uses gpt4 to generate labels, input is the top n affinity scores - #submode: normallabelonly #only remap the normal label - mode: 'hardmap01' - hardmapfile: '../datasets/ucf-crime/ucf-crime-hardmap.txt' -# prompt: "Return a label we will use to help analyze an image. The context is to identify crime behavior versus normal behavior. The camera angles may be overhead or normal more horizontal security camera angles. Please analyze the following input label and return 'normal event' or 'crime event' <<{label}>> please return only 'normal event' or 'crime event' as the label with no extra text or delimiters of any kind." - prompt_1: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels. - -# SOA anomaly and 20% top1 - prompt: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# approaching 20% top 1, recall etc... good - prompt_20230814_000118: > - Please analyze the following aggregated and ranked affinity scores from security - camera video footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# SOA anomaly and 20% top1 - prompt_20230813_233003: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# best result so far... SOTA anomaly metrics - prompt_20230813_231755: > - Please analyze the following affinity scores and return 'normal event' or one - of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the scene. please return only - 'normal event' or one of these listed labels as the label with no extra text - or delimiters of any kind. if unsure, return the nearest applicable label erring - on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# this is a gpt4mode2 prompt with affinity scores - prompt_20230813_225106: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list. here - are some - affinity scores you may take into consideration when selecting the - best label from our list: <<{affinityScores}>>. Just do your best. No extraneous - characters. - one of the labels from our list or 'normal event' only. thank you. - -#671 Anomaly Detection Metrics: -#672 Accuracy: 0.7186 -#673 Precision: 0.9500 -#674 Recall: 0.7355 -#675 F1 Score: 0.8291 -#676 Top-1 Accuracy: 0.1617 -#677 Top-5 Accuracy: 0.3234 -#678 Total predictions: 167 - prompt_20230813_2152: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label. - prompt2: > - Return a label we will use to help analyze an image. The context is to identify - crime behavior versus normal behavior. The camera angles may be overhead or normal - more horizontal security camera angles. Please analyze the following input label - and return 'normal event' or 'crime event' <<{label}>> please return only 'normal - event' or 'crime event' as the label with no extra text or delimiters of any - kind. - verbose: true - - -################# configure video -video: - skip_frames: 0 - normal_label_name: "normal event" - label_mapping: "periods_count" - # modes: - # - majority - rank the labels by the number of frames - # - longest_period - rank the labels by the number of consecutive frames - # - periods_count - rank the labels by the number of periods - -################ visualization -visualization: - top_n_affinity: 100 - show_distinct: true - -################# phase2 -phase2: - maxiterations: 200 diff --git a/Ethosight/website/EthosightAppBasedir/general01/ground_truth.embeddings b/Ethosight/website/EthosightAppBasedir/general01/ground_truth.embeddings deleted file mode 100644 index f8e8fc1..0000000 Binary files a/Ethosight/website/EthosightAppBasedir/general01/ground_truth.embeddings and /dev/null differ diff --git a/Ethosight/website/EthosightAppBasedir/general01/ground_truth.labels b/Ethosight/website/EthosightAppBasedir/general01/ground_truth.labels deleted file mode 100644 index d64bf23..0000000 --- a/Ethosight/website/EthosightAppBasedir/general01/ground_truth.labels +++ /dev/null @@ -1,2 +0,0 @@ -robbery -normal event diff --git a/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/baseline.embeddings b/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/baseline.embeddings deleted file mode 100644 index f67f712..0000000 Binary files a/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/baseline.embeddings and /dev/null differ diff --git a/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/baseline.labels b/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/baseline.labels deleted file mode 100644 index 7c80be3..0000000 --- a/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/baseline.labels +++ /dev/null @@ -1,156 +0,0 @@ -Indoor -Outdoor -Urban -Rural -Residential -Commercial -Industrial -Natural -Fire -Flood -Accident -Dangerous Material -Unsafe Conditions -Hazardous Environment -Poor Lighting -Obstructed Exit -Overcrowding -Eating -Drinking -Sleeping -Working -Driving -Running -Walking -Shopping -Dancing -Reading -Cooking -Stealing -Vandalism -Assault -Burglary -Trespassing -Fraud -baby -danger -electrocution-risk -call-for-help -childabuse -baby-needs-help -emergency -baby-emergency -safe-situation -normal-situation -emergency-situation -safe-scene -normal-scene -emergency-scene -safe -normal -normal+scene -normal_scene -normalScene -dangerous -dangerous+scene -dangerous_scene -dangerousScene -electrocutionRisk -electrocution_risk -electrocution+risk -baby-electrocution-risk-danger -indoor -outdoor -home -retail -store -hospital -school -mall -office -baby-electrocution-risk-danger-home-indoor -baby-electrocution-risk-danger-home-indoor-emergency -punch-in-the-face angry-people assault -punch-in-the-face -angry-people -assault -man-on-left-winning -man-on-right-winning -man-on-right-injured -man-on-left-injured -punch-in-the-face-angry-people-assault -punch-in-the-face+angry-people+assault -punch-in-the-face-angry-people-assault-fight-outdoor -punch-in-the-face-angry-people-assault-fight-outdoor-dangerous -punch-in-the-face-angry-people-assault-fight-outdoor-men -punch-in-the-face-angry-people-assault-fight-outdoor-men-whitemen -punch-in-the-face-angry-people-assault-fight-outdoor-men-daytime -punch-in-the-face-angry-people-assault-fight-outdoor-men-sunny -punch-in-the-face-angry-people-assault-fight-outdoor-men-day -punch-in-the-face-angry-people-assault-fight-outdoor-men-day-public -man-kicking-man -man-kicking-man-assault-daytime-outdoor-headlock -baby-poison-risk-danger-home-indoor -man-threatening-man-assault-daytime-outdoor-headlock -violent-man-assault-daytime-outdoor-parkinglot -man-assaulting-man-daytime-outdoor-parkinglot -evening-man-home-breakin-burglary-criminal -night-man-home-criminal-peepingtom -night-man-home -criminal-peepingtom -night-man-home-teenager-prankster -vagrant -homeless -homeless-harmless -homeless-dangerous -homeless-harmless-daytime-outdoor-lawful -homeless-harmless-daytime-outdoor-unlawful -homeless-dangerous-daytime-outdoor-unlawful-traffic-pandering -homeless-dangerous-daytime-outdoor-unlawful-traffic-pandering-roadaccident -homeless-dangerous-daytime-outdoor-unlawful-traffic-pandering-roadaccidentrisk -homeless-outdoor-daytime -homeless-man -homeless-man-begging -homeless-man-with-dog-begging -dangerous-homeless-man-with-dog-begging -harmless-homeless-man-with-dog-begging -vioent-homeless-man-with-dog-begging -man-daytime-outdoor-parkinglot -suspicious-man-outdoor-parkinglot -suspicious-man-outdoor-parkinglot-daytime -suspicious-man-outdoor-parkinglot-prowling -suspicious-men-outdoor-parkinglot-prowling -suspicious-men-outdoor-parkinglot-prowling-crimerisk -suspicious-men-outdoor-parkinglot-prowling-daytime - -dangerous-daytime-outdoor-unlawful-traffic-roadaccident -daytime-outdoor-roadaccident-policepresence -daytime-outdoor-roadaccident -daytime-outdoor-roadaccident-damage -daytime-outdoor-roadaccident-policepresence-damage-injuries -daytime-outdoor-serious-roadaccident -daytime-outdoor-serious-injuly-roadaccident -emergency-scene-damagedcar -emergency-scene-damage -emergency-scene-damaged-home -damage -damaged -box -damaged-box -damaged-in-shipping -shipping-box-goodcondition -shipping-box-badcondition -shipping-box -damaged-shipping-box -crushed-shipping-box -broken-shipping-box -shipping-box-damaged -shipping-box-crushed -shipping-box-OK -shipping-box-normal -normal-shipping-box -notdamaged-shipping-box -shipping-box-notdamaged -seriously-damaged-shipping-box -undamaged-shipping-box -shipping-box-with-minor-damage diff --git a/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/ethosightapp.yaml b/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/ethosightapp.yaml deleted file mode 100644 index 4e58b3e..0000000 --- a/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/ethosightapp.yaml +++ /dev/null @@ -1,209 +0,0 @@ -################# basic app configuration -analyzeImageMethod: analyzeImage_precompiled -embeddings_path: baseline.embeddings -labels_path: baseline.labels -output_type: affinityScores -reasoner_type: chatgpt - -################## configure the label space optimization -label_space_optimization: - enabled: false - rerun: false # if true, will rerun the optimization even if a results file exists, - # otherwise will just load the most recent results file -# method: "semantic_relations" - method: "semantic_similarity" - parameters: - threshold: .8 - max_labels: 10 # this is the max per ground truth label (not total) - -general_templates: - enabled: false - template_path: "../templates/general_templates.txt" - -################## configure the benchmark -benchmark: - enabled: false - batch_mode: true - batchsize: 256 - image_dir: "/home/hmlatapie/devactive/home/Ethosight/images/robbery" - # normallabel used to compute benchmark anomaly metrics - normallabel: "normal event" -# ground_truth_path can be a .txt file or a .csv -# .txt : labels only -# .csv : filename, label pairs ... first row in file is assumed to be a header and is ignored -# ground_truth_path: "../images/robbery/image-labels2.csv" #only one label per video - ground_truth_path: "/home/hmlatapie/devactive/home/Ethosight/images/robbery/image-labels.csv" #labeled correctly per frame - - #normally ground_truth_path .csv contains all the labels so that is all you need - #however, if ground_truth_labels_path is specified, it will be used to generate - # the ground truth labels and embeddings files - #if not set and the system needs to generate, the csv will be used - #as long as your .csv has all the labels, you don't need to set this - ground_truth_labels_path: "../datasets/ucf-crime/ucf-crime.txt" - top_n: 5 - bootstrapMode: true # if true, EthosightApp.optimize will start with GT labels - save_affinities: true # if true, will save the affinity scores to a file - - extracted_video_dir: "../extracted_videos" - verbose: false - skip_pre_optimization: true - - -################## configure the mapper -mapper: - enabled: false - # affinity_minthreshold is the minimum affinity score to consider a label - # if the affinity score is below the threshold, the label is set to - # normallabel - affinity_minthreshold: - enabled: false - threshold: 26 - normallabel: "normal event" - threshold: 26 #is this needed for video? - # modes: - # - passthrough - pass input affinity scores through to output - # - labelToLabelAffinity01 - uses affinity between expanded labels and ground truth labels - # - compositeLabels01 - create composite labels from input affinity scores, no reasoning - # - reasoning01 - create composite labels from input affinity scores, with reasoning - # mode: 'passthrough' - #mode: 'labelToLabelAffinity01' - #mode: 'gpt4mode' #uses gpt4 to generate labels... input is the single top label -# mode: 'gpt4mode2' #uses gpt4 to generate labels, input is the top n affinity scores - #submode: normallabelonly #only remap the normal label - mode: 'hardmap01' - hardmapfile: '../datasets/ucf-crime/ucf-crime-hardmap.txt' -# prompt: "Return a label we will use to help analyze an image. The context is to identify crime behavior versus normal behavior. The camera angles may be overhead or normal more horizontal security camera angles. Please analyze the following input label and return 'normal event' or 'crime event' <<{label}>> please return only 'normal event' or 'crime event' as the label with no extra text or delimiters of any kind." - prompt_1: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels. - -# SOA anomaly and 20% top1 - prompt: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# approaching 20% top 1, recall etc... good - prompt_20230814_000118: > - Please analyze the following aggregated and ranked affinity scores from security - camera video footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# SOA anomaly and 20% top1 - prompt_20230813_233003: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# best result so far... SOTA anomaly metrics - prompt_20230813_231755: > - Please analyze the following affinity scores and return 'normal event' or one - of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the scene. please return only - 'normal event' or one of these listed labels as the label with no extra text - or delimiters of any kind. if unsure, return the nearest applicable label erring - on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# this is a gpt4mode2 prompt with affinity scores - prompt_20230813_225106: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list. here - are some - affinity scores you may take into consideration when selecting the - best label from our list: <<{affinityScores}>>. Just do your best. No extraneous - characters. - one of the labels from our list or 'normal event' only. thank you. - -#671 Anomaly Detection Metrics: -#672 Accuracy: 0.7186 -#673 Precision: 0.9500 -#674 Recall: 0.7355 -#675 F1 Score: 0.8291 -#676 Top-1 Accuracy: 0.1617 -#677 Top-5 Accuracy: 0.3234 -#678 Total predictions: 167 - prompt_20230813_2152: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label. - prompt2: > - Return a label we will use to help analyze an image. The context is to identify - crime behavior versus normal behavior. The camera angles may be overhead or normal - more horizontal security camera angles. Please analyze the following input label - and return 'normal event' or 'crime event' <<{label}>> please return only 'normal - event' or 'crime event' as the label with no extra text or delimiters of any - kind. - verbose: true - - -################# configure video -video: - skip_frames: 0 - normal_label_name: "normal event" - label_mapping: "periods_count" - # modes: - # - majority - rank the labels by the number of frames - # - longest_period - rank the labels by the number of consecutive frames - # - periods_count - rank the labels by the number of periods - -################ visualization -visualization: - top_n_affinity: 100 - show_distinct: true - -################# phase2 -phase2: - maxiterations: 200 diff --git a/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/ethosightapp.yaml.bak b/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/ethosightapp.yaml.bak deleted file mode 100644 index 4e58b3e..0000000 --- a/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/ethosightapp.yaml.bak +++ /dev/null @@ -1,209 +0,0 @@ -################# basic app configuration -analyzeImageMethod: analyzeImage_precompiled -embeddings_path: baseline.embeddings -labels_path: baseline.labels -output_type: affinityScores -reasoner_type: chatgpt - -################## configure the label space optimization -label_space_optimization: - enabled: false - rerun: false # if true, will rerun the optimization even if a results file exists, - # otherwise will just load the most recent results file -# method: "semantic_relations" - method: "semantic_similarity" - parameters: - threshold: .8 - max_labels: 10 # this is the max per ground truth label (not total) - -general_templates: - enabled: false - template_path: "../templates/general_templates.txt" - -################## configure the benchmark -benchmark: - enabled: false - batch_mode: true - batchsize: 256 - image_dir: "/home/hmlatapie/devactive/home/Ethosight/images/robbery" - # normallabel used to compute benchmark anomaly metrics - normallabel: "normal event" -# ground_truth_path can be a .txt file or a .csv -# .txt : labels only -# .csv : filename, label pairs ... first row in file is assumed to be a header and is ignored -# ground_truth_path: "../images/robbery/image-labels2.csv" #only one label per video - ground_truth_path: "/home/hmlatapie/devactive/home/Ethosight/images/robbery/image-labels.csv" #labeled correctly per frame - - #normally ground_truth_path .csv contains all the labels so that is all you need - #however, if ground_truth_labels_path is specified, it will be used to generate - # the ground truth labels and embeddings files - #if not set and the system needs to generate, the csv will be used - #as long as your .csv has all the labels, you don't need to set this - ground_truth_labels_path: "../datasets/ucf-crime/ucf-crime.txt" - top_n: 5 - bootstrapMode: true # if true, EthosightApp.optimize will start with GT labels - save_affinities: true # if true, will save the affinity scores to a file - - extracted_video_dir: "../extracted_videos" - verbose: false - skip_pre_optimization: true - - -################## configure the mapper -mapper: - enabled: false - # affinity_minthreshold is the minimum affinity score to consider a label - # if the affinity score is below the threshold, the label is set to - # normallabel - affinity_minthreshold: - enabled: false - threshold: 26 - normallabel: "normal event" - threshold: 26 #is this needed for video? - # modes: - # - passthrough - pass input affinity scores through to output - # - labelToLabelAffinity01 - uses affinity between expanded labels and ground truth labels - # - compositeLabels01 - create composite labels from input affinity scores, no reasoning - # - reasoning01 - create composite labels from input affinity scores, with reasoning - # mode: 'passthrough' - #mode: 'labelToLabelAffinity01' - #mode: 'gpt4mode' #uses gpt4 to generate labels... input is the single top label -# mode: 'gpt4mode2' #uses gpt4 to generate labels, input is the top n affinity scores - #submode: normallabelonly #only remap the normal label - mode: 'hardmap01' - hardmapfile: '../datasets/ucf-crime/ucf-crime-hardmap.txt' -# prompt: "Return a label we will use to help analyze an image. The context is to identify crime behavior versus normal behavior. The camera angles may be overhead or normal more horizontal security camera angles. Please analyze the following input label and return 'normal event' or 'crime event' <<{label}>> please return only 'normal event' or 'crime event' as the label with no extra text or delimiters of any kind." - prompt_1: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels. - -# SOA anomaly and 20% top1 - prompt: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# approaching 20% top 1, recall etc... good - prompt_20230814_000118: > - Please analyze the following aggregated and ranked affinity scores from security - camera video footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# SOA anomaly and 20% top1 - prompt_20230813_233003: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# best result so far... SOTA anomaly metrics - prompt_20230813_231755: > - Please analyze the following affinity scores and return 'normal event' or one - of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the scene. please return only - 'normal event' or one of these listed labels as the label with no extra text - or delimiters of any kind. if unsure, return the nearest applicable label erring - on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# this is a gpt4mode2 prompt with affinity scores - prompt_20230813_225106: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list. here - are some - affinity scores you may take into consideration when selecting the - best label from our list: <<{affinityScores}>>. Just do your best. No extraneous - characters. - one of the labels from our list or 'normal event' only. thank you. - -#671 Anomaly Detection Metrics: -#672 Accuracy: 0.7186 -#673 Precision: 0.9500 -#674 Recall: 0.7355 -#675 F1 Score: 0.8291 -#676 Top-1 Accuracy: 0.1617 -#677 Top-5 Accuracy: 0.3234 -#678 Total predictions: 167 - prompt_20230813_2152: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label. - prompt2: > - Return a label we will use to help analyze an image. The context is to identify - crime behavior versus normal behavior. The camera angles may be overhead or normal - more horizontal security camera angles. Please analyze the following input label - and return 'normal event' or 'crime event' <<{label}>> please return only 'normal - event' or 'crime event' as the label with no extra text or delimiters of any - kind. - verbose: true - - -################# configure video -video: - skip_frames: 0 - normal_label_name: "normal event" - label_mapping: "periods_count" - # modes: - # - majority - rank the labels by the number of frames - # - longest_period - rank the labels by the number of consecutive frames - # - periods_count - rank the labels by the number of periods - -################ visualization -visualization: - top_n_affinity: 100 - show_distinct: true - -################# phase2 -phase2: - maxiterations: 200 diff --git a/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/ethosightapp_template.yaml b/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/ethosightapp_template.yaml deleted file mode 100644 index 4e58b3e..0000000 --- a/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/ethosightapp_template.yaml +++ /dev/null @@ -1,209 +0,0 @@ -################# basic app configuration -analyzeImageMethod: analyzeImage_precompiled -embeddings_path: baseline.embeddings -labels_path: baseline.labels -output_type: affinityScores -reasoner_type: chatgpt - -################## configure the label space optimization -label_space_optimization: - enabled: false - rerun: false # if true, will rerun the optimization even if a results file exists, - # otherwise will just load the most recent results file -# method: "semantic_relations" - method: "semantic_similarity" - parameters: - threshold: .8 - max_labels: 10 # this is the max per ground truth label (not total) - -general_templates: - enabled: false - template_path: "../templates/general_templates.txt" - -################## configure the benchmark -benchmark: - enabled: false - batch_mode: true - batchsize: 256 - image_dir: "/home/hmlatapie/devactive/home/Ethosight/images/robbery" - # normallabel used to compute benchmark anomaly metrics - normallabel: "normal event" -# ground_truth_path can be a .txt file or a .csv -# .txt : labels only -# .csv : filename, label pairs ... first row in file is assumed to be a header and is ignored -# ground_truth_path: "../images/robbery/image-labels2.csv" #only one label per video - ground_truth_path: "/home/hmlatapie/devactive/home/Ethosight/images/robbery/image-labels.csv" #labeled correctly per frame - - #normally ground_truth_path .csv contains all the labels so that is all you need - #however, if ground_truth_labels_path is specified, it will be used to generate - # the ground truth labels and embeddings files - #if not set and the system needs to generate, the csv will be used - #as long as your .csv has all the labels, you don't need to set this - ground_truth_labels_path: "../datasets/ucf-crime/ucf-crime.txt" - top_n: 5 - bootstrapMode: true # if true, EthosightApp.optimize will start with GT labels - save_affinities: true # if true, will save the affinity scores to a file - - extracted_video_dir: "../extracted_videos" - verbose: false - skip_pre_optimization: true - - -################## configure the mapper -mapper: - enabled: false - # affinity_minthreshold is the minimum affinity score to consider a label - # if the affinity score is below the threshold, the label is set to - # normallabel - affinity_minthreshold: - enabled: false - threshold: 26 - normallabel: "normal event" - threshold: 26 #is this needed for video? - # modes: - # - passthrough - pass input affinity scores through to output - # - labelToLabelAffinity01 - uses affinity between expanded labels and ground truth labels - # - compositeLabels01 - create composite labels from input affinity scores, no reasoning - # - reasoning01 - create composite labels from input affinity scores, with reasoning - # mode: 'passthrough' - #mode: 'labelToLabelAffinity01' - #mode: 'gpt4mode' #uses gpt4 to generate labels... input is the single top label -# mode: 'gpt4mode2' #uses gpt4 to generate labels, input is the top n affinity scores - #submode: normallabelonly #only remap the normal label - mode: 'hardmap01' - hardmapfile: '../datasets/ucf-crime/ucf-crime-hardmap.txt' -# prompt: "Return a label we will use to help analyze an image. The context is to identify crime behavior versus normal behavior. The camera angles may be overhead or normal more horizontal security camera angles. Please analyze the following input label and return 'normal event' or 'crime event' <<{label}>> please return only 'normal event' or 'crime event' as the label with no extra text or delimiters of any kind." - prompt_1: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels. - -# SOA anomaly and 20% top1 - prompt: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# approaching 20% top 1, recall etc... good - prompt_20230814_000118: > - Please analyze the following aggregated and ranked affinity scores from security - camera video footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# SOA anomaly and 20% top1 - prompt_20230813_233003: > - Please analyze the following aggregated and ranked affinity scores from video - footage and return 'normal event' or one of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the most important - event in the scene. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# best result so far... SOTA anomaly metrics - prompt_20230813_231755: > - Please analyze the following affinity scores and return 'normal event' or one - of the - following labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism' that best characterizes the scene. please return only - 'normal event' or one of these listed labels as the label with no extra text - or delimiters of any kind. if unsure, return the nearest applicable label erring - on - the side of potential health, safety, and security labels from our list ('abuse', - 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism', 'normal event'). here are the affinity - scores <<{affinityScores}>>. Just do your best. No extraneous characters. - -# this is a gpt4mode2 prompt with affinity scores - prompt_20230813_225106: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these listed - labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label erring on - the side of potential health, safety, and security labels from our list. here - are some - affinity scores you may take into consideration when selecting the - best label from our list: <<{affinityScores}>>. Just do your best. No extraneous - characters. - one of the labels from our list or 'normal event' only. thank you. - -#671 Anomaly Detection Metrics: -#672 Accuracy: 0.7186 -#673 Precision: 0.9500 -#674 Recall: 0.7355 -#675 F1 Score: 0.8291 -#676 Top-1 Accuracy: 0.1617 -#677 Top-5 Accuracy: 0.3234 -#678 Total predictions: 167 - prompt_20230813_2152: > - Please analyze this input label <<{label}>> and return 'normal event' or one of - the - following potential labels: 'abuse', 'arrest', 'arson', 'assault', - 'burglary', 'explosion', 'fighting', 'road accidents', 'robbery', 'shooting', 'shoplifting', - 'stealing', 'vandalism'. please return only 'normal event' or one of these crime - event labels as the label with no extra text or delimiters of any kind. if unsure, - return the nearest applicable label. - prompt2: > - Return a label we will use to help analyze an image. The context is to identify - crime behavior versus normal behavior. The camera angles may be overhead or normal - more horizontal security camera angles. Please analyze the following input label - and return 'normal event' or 'crime event' <<{label}>> please return only 'normal - event' or 'crime event' as the label with no extra text or delimiters of any - kind. - verbose: true - - -################# configure video -video: - skip_frames: 0 - normal_label_name: "normal event" - label_mapping: "periods_count" - # modes: - # - majority - rank the labels by the number of frames - # - longest_period - rank the labels by the number of consecutive frames - # - periods_count - rank the labels by the number of periods - -################ visualization -visualization: - top_n_affinity: 100 - show_distinct: true - -################# phase2 -phase2: - maxiterations: 200 diff --git a/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/ground_truth.embeddings b/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/ground_truth.embeddings deleted file mode 100644 index f8e8fc1..0000000 Binary files a/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/ground_truth.embeddings and /dev/null differ diff --git a/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/ground_truth.labels b/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/ground_truth.labels deleted file mode 100644 index 88e9582..0000000 --- a/Ethosight/website/EthosightAppBasedir/singleimage_labelplay/ground_truth.labels +++ /dev/null @@ -1,2 +0,0 @@ -normal event -robbery diff --git a/Ethosight/website/accounts/__init__.py b/Ethosight/website/accounts/__init__.py index e69de29..ba74d54 100644 --- a/Ethosight/website/accounts/__init__.py +++ b/Ethosight/website/accounts/__init__.py @@ -0,0 +1,17 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + diff --git a/Ethosight/website/accounts/admin.py b/Ethosight/website/accounts/admin.py index 2cef761..a30cd3b 100644 --- a/Ethosight/website/accounts/admin.py +++ b/Ethosight/website/accounts/admin.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.contrib import admin from .models import PendingUser from .utils import approve_user diff --git a/Ethosight/website/accounts/apps.py b/Ethosight/website/accounts/apps.py index 3e3c765..82eddf8 100644 --- a/Ethosight/website/accounts/apps.py +++ b/Ethosight/website/accounts/apps.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.apps import AppConfig diff --git a/Ethosight/website/accounts/management/commands/sendtestemail.py b/Ethosight/website/accounts/management/commands/sendtestemail.py index fba300e..27b6964 100644 --- a/Ethosight/website/accounts/management/commands/sendtestemail.py +++ b/Ethosight/website/accounts/management/commands/sendtestemail.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.core.management.base import BaseCommand from django.core.mail import send_mail diff --git a/Ethosight/website/accounts/migrations/0001_initial.py b/Ethosight/website/accounts/migrations/0001_initial.py index f432a62..58fae18 100644 --- a/Ethosight/website/accounts/migrations/0001_initial.py +++ b/Ethosight/website/accounts/migrations/0001_initial.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + # Generated by Django 4.2.5 on 2023-09-12 13:45 import django.contrib.auth.models diff --git a/Ethosight/website/accounts/migrations/0002_pendinguser.py b/Ethosight/website/accounts/migrations/0002_pendinguser.py index 9ee2f8d..09fac87 100644 --- a/Ethosight/website/accounts/migrations/0002_pendinguser.py +++ b/Ethosight/website/accounts/migrations/0002_pendinguser.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + # Generated by Django 4.2.5 on 2023-09-13 22:59 from django.db import migrations, models diff --git a/Ethosight/website/accounts/migrations/__init__.py b/Ethosight/website/accounts/migrations/__init__.py index e69de29..ba74d54 100644 --- a/Ethosight/website/accounts/migrations/__init__.py +++ b/Ethosight/website/accounts/migrations/__init__.py @@ -0,0 +1,17 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + diff --git a/Ethosight/website/accounts/models.py b/Ethosight/website/accounts/models.py index 0273309..cc03dc2 100644 --- a/Ethosight/website/accounts/models.py +++ b/Ethosight/website/accounts/models.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.contrib.auth.models import AbstractUser from django.db import models from django.utils.translation import gettext as _ diff --git a/Ethosight/website/accounts/tests.py b/Ethosight/website/accounts/tests.py index 7ce503c..bd4b4fd 100644 --- a/Ethosight/website/accounts/tests.py +++ b/Ethosight/website/accounts/tests.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.test import TestCase # Create your tests here. diff --git a/Ethosight/website/accounts/utils.py b/Ethosight/website/accounts/utils.py index 1081291..3a630f9 100644 --- a/Ethosight/website/accounts/utils.py +++ b/Ethosight/website/accounts/utils.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.core.mail import send_mail import redis import uuid diff --git a/Ethosight/website/accounts/views.py b/Ethosight/website/accounts/views.py index 7561770..12a368d 100644 --- a/Ethosight/website/accounts/views.py +++ b/Ethosight/website/accounts/views.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.shortcuts import render, redirect from django.contrib.auth import authenticate, login from .models import CustomUser, AccessCode diff --git a/Ethosight/website/configmanager/__init__.py b/Ethosight/website/configmanager/__init__.py index e69de29..ba74d54 100644 --- a/Ethosight/website/configmanager/__init__.py +++ b/Ethosight/website/configmanager/__init__.py @@ -0,0 +1,17 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + diff --git a/Ethosight/website/configmanager/admin.py b/Ethosight/website/configmanager/admin.py index 700b407..0bfdf85 100644 --- a/Ethosight/website/configmanager/admin.py +++ b/Ethosight/website/configmanager/admin.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.contrib import admin from .models import Config diff --git a/Ethosight/website/configmanager/apps.py b/Ethosight/website/configmanager/apps.py index 1e9a68e..b2864df 100644 --- a/Ethosight/website/configmanager/apps.py +++ b/Ethosight/website/configmanager/apps.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.apps import AppConfig diff --git a/Ethosight/website/configmanager/migrations/0001_initial.py b/Ethosight/website/configmanager/migrations/0001_initial.py index a77b313..42010b4 100644 --- a/Ethosight/website/configmanager/migrations/0001_initial.py +++ b/Ethosight/website/configmanager/migrations/0001_initial.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + # Generated by Django 4.2.5 on 2023-09-18 00:29 from django.db import migrations, models diff --git a/Ethosight/website/configmanager/migrations/__init__.py b/Ethosight/website/configmanager/migrations/__init__.py index e69de29..ba74d54 100644 --- a/Ethosight/website/configmanager/migrations/__init__.py +++ b/Ethosight/website/configmanager/migrations/__init__.py @@ -0,0 +1,17 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + diff --git a/Ethosight/website/configmanager/models.py b/Ethosight/website/configmanager/models.py index c353814..f4ec51e 100644 --- a/Ethosight/website/configmanager/models.py +++ b/Ethosight/website/configmanager/models.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.db import models from django.conf import settings from ruamel.yaml import YAML diff --git a/Ethosight/website/configmanager/tests.py b/Ethosight/website/configmanager/tests.py index 7ce503c..bd4b4fd 100644 --- a/Ethosight/website/configmanager/tests.py +++ b/Ethosight/website/configmanager/tests.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.test import TestCase # Create your tests here. diff --git a/Ethosight/website/configmanager/urls.py b/Ethosight/website/configmanager/urls.py index 5967a5b..7a00d43 100644 --- a/Ethosight/website/configmanager/urls.py +++ b/Ethosight/website/configmanager/urls.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + # configmanager/urls.py from django.urls import path, re_path diff --git a/Ethosight/website/configmanager/views.py b/Ethosight/website/configmanager/views.py index bb601a9..95d05fb 100644 --- a/Ethosight/website/configmanager/views.py +++ b/Ethosight/website/configmanager/views.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.conf import settings from django.contrib.auth.decorators import user_passes_test from django.http import HttpResponse diff --git a/Ethosight/website/dashboard/__init__.py b/Ethosight/website/dashboard/__init__.py index e69de29..ba74d54 100644 --- a/Ethosight/website/dashboard/__init__.py +++ b/Ethosight/website/dashboard/__init__.py @@ -0,0 +1,17 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + diff --git a/Ethosight/website/dashboard/admin.py b/Ethosight/website/dashboard/admin.py index 8c38f3f..b421e10 100644 --- a/Ethosight/website/dashboard/admin.py +++ b/Ethosight/website/dashboard/admin.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.contrib import admin # Register your models here. diff --git a/Ethosight/website/dashboard/apps.py b/Ethosight/website/dashboard/apps.py index 7b1cc05..e13e464 100644 --- a/Ethosight/website/dashboard/apps.py +++ b/Ethosight/website/dashboard/apps.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.apps import AppConfig diff --git a/Ethosight/website/dashboard/migrations/0001_initial.py b/Ethosight/website/dashboard/migrations/0001_initial.py index bf5377b..b30f24a 100644 --- a/Ethosight/website/dashboard/migrations/0001_initial.py +++ b/Ethosight/website/dashboard/migrations/0001_initial.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + # Generated by Django 4.2.5 on 2023-09-14 20:44 from django.conf import settings diff --git a/Ethosight/website/dashboard/migrations/0002_uploadedimage.py b/Ethosight/website/dashboard/migrations/0002_uploadedimage.py index ec98f51..e840de7 100644 --- a/Ethosight/website/dashboard/migrations/0002_uploadedimage.py +++ b/Ethosight/website/dashboard/migrations/0002_uploadedimage.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + # Generated by Django 4.2.5 on 2023-09-19 22:13 import dashboard.models diff --git a/Ethosight/website/dashboard/migrations/__init__.py b/Ethosight/website/dashboard/migrations/__init__.py index e69de29..ba74d54 100644 --- a/Ethosight/website/dashboard/migrations/__init__.py +++ b/Ethosight/website/dashboard/migrations/__init__.py @@ -0,0 +1,17 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + diff --git a/Ethosight/website/dashboard/models.py b/Ethosight/website/dashboard/models.py index e8cf590..f0b87b6 100644 --- a/Ethosight/website/dashboard/models.py +++ b/Ethosight/website/dashboard/models.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.db import models from django.contrib.auth import get_user_model from django.conf import settings diff --git a/Ethosight/website/dashboard/templates/model_interaction.html b/Ethosight/website/dashboard/templates/model_interaction.html index 9e93fb1..db8de70 100644 --- a/Ethosight/website/dashboard/templates/model_interaction.html +++ b/Ethosight/website/dashboard/templates/model_interaction.html @@ -8,8 +8,8 @@

Interact with Models



- -

+ +

diff --git a/Ethosight/website/dashboard/tests.py b/Ethosight/website/dashboard/tests.py index 7ce503c..bd4b4fd 100644 --- a/Ethosight/website/dashboard/tests.py +++ b/Ethosight/website/dashboard/tests.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.test import TestCase # Create your tests here. diff --git a/Ethosight/website/dashboard/urls.py b/Ethosight/website/dashboard/urls.py index efea115..79519d5 100644 --- a/Ethosight/website/dashboard/urls.py +++ b/Ethosight/website/dashboard/urls.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + # website/urls.py or website/dashboard/urls.py from django.urls import path from .views import dashboard, create_app, model_interaction diff --git a/Ethosight/website/dashboard/views.py b/Ethosight/website/dashboard/views.py index b3d26f1..1eb4c54 100644 --- a/Ethosight/website/dashboard/views.py +++ b/Ethosight/website/dashboard/views.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + # website/dashboard/views.py from django.shortcuts import render, redirect from .models import EthosightApplication, UploadedImage diff --git a/Ethosight/website/manage.py b/Ethosight/website/manage.py index ac2f90c..9e64741 100755 --- a/Ethosight/website/manage.py +++ b/Ethosight/website/manage.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + #!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os diff --git a/Ethosight/website/psql.sh b/Ethosight/website/psql.sh index dd527c4..5bce254 100755 --- a/Ethosight/website/psql.sh +++ b/Ethosight/website/psql.sh @@ -1 +1,18 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + docker exec -it djangopostgres psql -U postgres diff --git a/Ethosight/website/runpostgress.sh b/Ethosight/website/runpostgress.sh index ca174d5..2ee4e06 100755 --- a/Ethosight/website/runpostgress.sh +++ b/Ethosight/website/runpostgress.sh @@ -1,5 +1,31 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +#!/bin/bash + +# Set the desired database name +DB_NAME="mydatabase" + if [ $(docker ps -a -f name=djangopostgres | grep -w djangopostgres | wc -l) -eq 1 ]; then docker start djangopostgres else - docker run --name djangopostgres -e POSTGRES_PASSWORD=mysecretpassword -p 5432:5432 -d postgres + docker run --name djangopostgres \ + -e POSTGRES_PASSWORD=mysecretpassword \ + -e POSTGRES_DB=$DB_NAME \ + -p 5432:5432 \ + -d postgres fi diff --git a/Ethosight/website/runwebapp.sh b/Ethosight/website/runwebapp.sh new file mode 100755 index 0000000..54f0e53 --- /dev/null +++ b/Ethosight/website/runwebapp.sh @@ -0,0 +1,24 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +#!/bin/bash +SCRIPT_DIR="$(dirname "$(readlink -f "$0")")" + +python3 "$SCRIPT_DIR/manage.py" makemigrations +python3 "$SCRIPT_DIR/manage.py" migrate +python3 "$SCRIPT_DIR/manage.py" createsuperuser +python3 "$SCRIPT_DIR/manage.py" runserver 8080 diff --git a/Ethosight/website/website/__init__.py b/Ethosight/website/website/__init__.py index e69de29..ba74d54 100644 --- a/Ethosight/website/website/__init__.py +++ b/Ethosight/website/website/__init__.py @@ -0,0 +1,17 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + diff --git a/Ethosight/website/website/asgi.py b/Ethosight/website/website/asgi.py index a156533..2210d6c 100644 --- a/Ethosight/website/website/asgi.py +++ b/Ethosight/website/website/asgi.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + """ ASGI config for website project. diff --git a/Ethosight/website/website/settings.py b/Ethosight/website/website/settings.py index f2bcab5..2564bd9 100644 --- a/Ethosight/website/website/settings.py +++ b/Ethosight/website/website/settings.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + """ Django settings for website project. @@ -15,9 +32,8 @@ # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent -ETHOSIGHT_APP_BASE_DIR = os.environ.get('DjangoEthosightAppBaseDir', '/home/hmlatapie/devactive/home/Ethosight/website/EthosightAppBasedir') -CONFIG_YAML_DIRECTORY = os.environ.get('EthosightYAMLDirectory', - '/home/hmlatapie/devactive/home/Ethosight/configs') +ETHOSIGHT_APP_BASE_DIR = os.environ.get('DjangoEthosightAppBaseDir', '') +CONFIG_YAML_DIRECTORY = os.environ.get('EthosightYAMLDirectory', '') MEDIA_ROOT = os.path.join(BASE_DIR, 'media/') MEDIA_URL = '/media/' @@ -152,10 +168,10 @@ DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' -EMAIL_HOST = 'email-smtp.us-east-1.amazonaws.com' -EMAIL_PORT = 587 # Typically 587 for TLS or 465 for SSL -EMAIL_USE_TLS = True # Or EMAIL_USE_SSL = True if using SSL -EMAIL_USE_SSL = False -EMAIL_HOST_USER = 'AKIA2XIYHNOLG45YDV5G' -EMAIL_HOST_PASSWORD = 'BHpYbWBYlRKF02ynIoh60ovZ9OJLsn7Ybx8j4MHYX+gw' +EMAIL_HOST = os.environ.get('EMAIL_HOST', 'email-smtp.us-east-1.amazonaws.com') +EMAIL_PORT = int(os.environ.get('EMAIL_PORT', 587)) # Convert to int, as os.environ.get returns a string +EMAIL_USE_TLS = os.environ.get('EMAIL_USE_TLS', 'True') == 'True' # Convert string to boolean +EMAIL_USE_SSL = os.environ.get('EMAIL_USE_SSL', 'False') == 'True' # Convert string to boolean +EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER', '') +EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', '') diff --git a/Ethosight/website/website/urls.py b/Ethosight/website/website/urls.py index a790fbd..1dbeb31 100644 --- a/Ethosight/website/website/urls.py +++ b/Ethosight/website/website/urls.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + from django.urls import path, include, re_path from accounts.views import register, user_login, home from django.contrib import admin diff --git a/Ethosight/website/website/wsgi.py b/Ethosight/website/website/wsgi.py index 568e8ab..9d27301 100644 --- a/Ethosight/website/website/wsgi.py +++ b/Ethosight/website/website/wsgi.py @@ -1,3 +1,20 @@ + +# Copyright 2022 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + """ WSGI config for website project.