Sample Code-structure for Anomaly Detection
Sample Code-structure for Anomaly Detection
import cv2
import numpy as np
import torch
# ========================================================
# ========================================================
"""
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
cap = cv2.VideoCapture(video_path)
frame_count = 0
success = True
while success:
cv2.imwrite(frame_filename, frame)
frame_count += 1
cap.release()
# video_name = os.path.basename(video_path).split('.')[0]
# extract_frames(video_path, output_dir=f"{dataset}_frames/{video_name}")
# ========================================================
# ========================================================
model.eval()
model = model.cuda()
class VideoDataset(Dataset):
self.frame_paths = frame_paths
self.transform = transform
self.clip_length = clip_length
def __len__(self):
clip = [
self.transform(cv2.imread(self.frame_paths[i]))
return torch.stack(clip)
# Preprocessing
transform = Compose([
Resize((224, 224)),
ToTensor(),
])
# Extract Features
def extract_features(dataset_dir):
features = []
with torch.no_grad():
feature = model(clips.unsqueeze(0))
features.append(feature.cpu().numpy())
return np.vstack(features)
# train_features = extract_features("UCF-Crime/train_frames")
# test_features = extract_features("UCF-Crime/test_frames")
# ========================================================
# ========================================================
class Autoencoder(nn.Module):
super(Autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(input_dim, 512),
nn.ReLU(),
nn.Linear(512, 128),
nn.ReLU()
self.decoder = nn.Sequential(
nn.Linear(128, 512),
nn.ReLU(),
nn.Linear(512, input_dim)
)
def forward(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
autoencoder = Autoencoder(input_dim).cuda()
criterion = nn.MSELoss()
features = torch.tensor(features).float().cuda()
optimizer.zero_grad()
reconstructed = autoencoder(feature)
loss.backward()
optimizer.step()
return autoencoder
gmm.fit(features)
return gmm
def train_dbn(features):
dbn = DynamicBayesianNetwork([
])
dbn.fit(features)
return dbn
# ========================================================
# ========================================================
if method == "Autoencoder":
criterion = nn.MSELoss()
reconstruction_errors = []
feature = torch.tensor(feature).float().cuda()
with torch.no_grad():
reconstructed = model(feature)
reconstruction_errors.append(error.item())
scores = np.array(reconstruction_errors)
scores = -model.score_samples(test_features)
dbn_inference = DBNInference(model)
log_likelihoods = []
log_likelihoods.append(likelihood.log_probability(observation))
scores = -np.array(log_likelihoods)
# Define threshold
plt.figure(figsize=(10, 6))
plt.legend()
plt.title(title)
plt.xlabel("Frames or Clips")
plt.ylabel("Anomaly Score")
plt.show()
# ========================================================
# ========================================================
# UCF-Crime
# train_features = extract_features("UCF-Crime/train_frames")
# test_features = extract_features("UCF-Crime/test_frames")
# Avenue
# train_features = extract_features("Avenue/train_frames")
# test_features = extract_features("Avenue/test_frames")
# gmm = train_gmm(train_features)