Team Updates

#Dataset images in rows:
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import os
file= open('names.txt','r')
names=file.readlines()
new_im = Image.new('RGB', (40000, np.size(names)))
for i in range(np.size(names)):
names[i]=names[i][0:8]
print('Dataset has ', np.size(names), 'images')
#For each image
for j in range(np.size(names)):
img = Image.open(names[j])
img = img.resize((200, 200))
width, height = img.size
#for each row in image
for i in range(200):
area = (0, i, width, (i+1))
cropped = img.crop(area)
width, height = cropped.size
new_im.paste(cropped, (i*200, j))
#resultant image saved in the same location
new_im.save("cropped_picture.jpg")
width, height = new_im.size
output_array = np.array(new_im.getdata(),np.uint8).reshape(new_im.size[1], new_im.size[0], 3)
#resultant file.csv saved in the same location
#np.savetxt("fire_data_img_in_row.csv", output_array, delimiter=",")
print('Our new dataset is of size: ', height,' rows and ',width,' columns, RGB')
#print('Dataset: ')
#print(output_array)
##########################################################
#Dataset images in 4D array:
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import os
width_re= 200
height_re= 200
#read pictures names:
file= open('names.txt','r')
names=file.readlines()
for i in range(np.size(names)):
names[i]=names[i][0:8]
print('Dataset has ', np.size(names), 'images')
#Stack each image in the dataset (4D) array:
output_array=np.arange(np.size(names)*width_re*height_re*3).reshape((np.size(names), width_re, height_re, 3))
for j in range(np.size(names)):
img = Image.open(names[j])
img = img.resize((width_re,height_re))
depth=np.shape(img)
output_array[j] = np.array(img.getdata(),np.uint8).reshape(width_re, width_re, 3)
#resultant file.csv saved in the same location
#numpy.savetxt("fire_data_pixels.csv", output_array, delimiter=",")
print('each image in dataset is of size: ', height_re,' rows and ',width_re,' columns, RGB')
#print('Dataset: ')
#print(output_array)
#############################################################
#Dataset images in RGB array for pytorch:
output=np.arange(np.size(names)*width_re*height_re*3).reshape((np.size(names), 3, width_re, height_re))
for m in range(np.size(names)):
for i in range (200):
for j in range(200):
for k in range(3):
output[m][k][i][j]=output_array[m][i][j][k]
#np.savetxt("fire_data_RGB.csv", output, delimiter=",")
###########################################################
#Dataset images labeling array:
file= open('labels.txt','r')
labels=file.readlines()
for i in range(np.size(labels)):
labels[i]=labels[i][0]
print(labels)
label=map(int,labels)
#print(label)
#np.savetxt("fire_data_labels.csv", labels, delimiter=",")
view raw Data_set_Code hosted with ❤ by GitHub
0
1
1
0
0
0
1
1
0
0
0
1
0
0
0
0
0
1
0
1
1
0
0
1
0
1
1
0
1
0
1
1
0
1
0
1
0
0
0
1
0
1
0
1
0
0
0
1
1
0
0
0
0
1
0
0
1
0
1
0
0
1
0
0
0
0
0
1
1
1
1
1
1
0
1
0
0
1
0
0
1
0
0
1
1
1
1
0
0
1
1
0
0
1
1
0
0
1
0
0
0
1
0
0
1
1
0
1
0
0
1
0
1
0
0
0
1
0
1
0
1
0
0
1
0
0
1
0
1
0
0
0
0
1
0
0
0
0
0
1
0
0
1
0
0
0
1
1
0
1
1
0
0
1
1
1
1
0
0
0
0
0
0
1
1
0
1
0
0
1
1
1
1
1
0
0
1
1
1
0
0
1
1
1
0
0
0
1
1
1
0
1
0
1
1
0
1
0
1
1
1
0
1
1
0
0
0
1
1
0
0
0
0
1
1
0
1
0
1
0
1
0
0
0
1
0
1
0
0
0
1
0
1
1
0
1
0
1
1
1
1
1
0
0
1
0
0
1
0
0
1
1
1
0
0
1
1
1
1
1
0
0
1
0
0
0
0
0
0
1
0
0
1
0
1
0
1
1
0
0
0
1
1
0
1
0
1
0
1
0
0
1
0
1
0
1
1
1
1
1
1
1
1
0
1
0
1
0
0
0
0
0
1
0
1
0
1
0
1
0
1
0
0
0
0
1
0
0
1
1
1
0
0
0
1
0
0
1
1
1
0
1
0
1
1
1
0
1
1
0
0
0
0
1
1
1
1
1
0
1
0
0
1
1
0
1
1
1
1
0
0
1
1
1
0
0
1
1
0
0
1
0
0
0
1
1
1
1
1
1
0
0
1
0
1
1
0
0
1
1
1
1
1
1
1
1
1
0
0
0
1
1
1
1
0
0
1
0
0
1
1
view raw Images_Labels hosted with ❤ by GitHub
197.jpg
161.jpg
402.jpg
222.jpg
239.jpg
339.jpg
043.jpg
012.jpg
294.jpg
226.jpg
261.jpg
017.jpg
389.jpg
384.jpg
311.jpg
179.jpg
318.jpg
074.jpg
217.jpg
064.jpg
420.jpg
192.jpg
363.jpg
417.jpg
328.jpg
004.jpg
432.jpg
317.jpg
405.jpg
316.jpg
069.jpg
089.jpg
366.jpg
096.jpg
213.jpg
431.jpg
238.jpg
279.jpg
269.jpg
105.jpg
293.jpg
166.jpg
353.jpg
039.jpg
315.jpg
237.jpg
167.jpg
090.jpg
427.jpg
369.jpg
379.jpg
288.jpg
227.jpg
400.jpg
242.jpg
331.jpg
007.jpg
357.jpg
118.jpg
172.jpg
356.jpg
139.jpg
286.jpg
351.jpg
295.jpg
182.jpg
224.jpg
415.jpg
032.jpg
410.jpg
021.jpg
419.jpg
122.jpg
185.jpg
053.jpg
252.jpg
236.jpg
408.jpg
184.jpg
219.jpg
435.jpg
285.jpg
326.jpg
087.jpg
404.jpg
144.jpg
029.jpg
386.jpg
361.jpg
134.jpg
401.jpg
378.jpg
277.jpg
071.jpg
038.jpg
381.jpg
247.jpg
014.jpg
319.jpg
263.jpg
223.jpg
141.jpg
260.jpg
359.jpg
119.jpg
393.jpg
391.jpg
106.jpg
377.jpg
175.jpg
143.jpg
235.jpg
165.jpg
253.jpg
230.jpg
390.jpg
061.jpg
251.jpg
108.jpg
385.jpg
423.jpg
338.jpg
299.jpg
138.jpg
342.jpg
254.jpg
068.jpg
323.jpg
136.jpg
283.jpg
194.jpg
374.jpg
169.jpg
079.jpg
259.jpg
257.jpg
320.jpg
301.jpg
258.jpg
127.jpg
354.jpg
249.jpg
067.jpg
202.jpg
243.jpg
392.jpg
020.jpg
097.jpg
228.jpg
406.jpg
055.jpg
287.jpg
371.jpg
147.jpg
430.jpg
011.jpg
403.jpg
234.jpg
336.jpg
191.jpg
281.jpg
313.jpg
308.jpg
137.jpg
162.jpg
225.jpg
084.jpg
343.jpg
174.jpg
145.jpg
414.jpg
023.jpg
024.jpg
416.jpg
248.jpg
190.jpg
075.jpg
091.jpg
131.jpg
344.jpg
206.jpg
093.jpg
051.jpg
028.jpg
200.jpg
215.jpg
188.jpg
048.jpg
059.jpg
010.jpg
358.jpg
153.jpg
367.jpg
155.jpg
037.jpg
199.jpg
034.jpg
214.jpg
049.jpg
160.jpg
113.jpg
300.jpg
422.jpg
094.jpg
245.jpg
183.jpg
201.jpg
164.jpg
044.jpg
347.jpg
346.jpg
208.jpg
332.jpg
054.jpg
006.jpg
348.jpg
425.jpg
270.jpg
121.jpg
267.jpg
072.jpg
171.jpg
324.jpg
350.jpg
100.jpg
340.jpg
120.jpg
337.jpg
282.jpg
203.jpg
015.jpg
231.jpg
065.jpg
066.jpg
220.jpg
112.jpg
388.jpg
437.jpg
426.jpg
076.jpg
078.jpg
063.jpg
218.jpg
330.jpg
151.jpg
383.jpg
170.jpg
126.jpg
305.jpg
232.jpg
073.jpg
413.jpg
031.jpg
276.jpg
255.jpg
398.jpg
397.jpg
003.jpg
428.jpg
128.jpg
280.jpg
209.jpg
407.jpg
207.jpg
196.jpg
271.jpg
233.jpg
362.jpg
272.jpg
157.jpg
210.jpg
291.jpg
008.jpg
375.jpg
130.jpg
303.jpg
025.jpg
140.jpg
265.jpg
345.jpg
290.jpg
040.jpg
081.jpg
278.jpg
070.jpg
240.jpg
434.jpg
186.jpg
102.jpg
314.jpg
266.jpg
030.jpg
246.jpg
150.jpg
321.jpg
396.jpg
123.jpg
086.jpg
082.jpg
156.jpg
099.jpg
395.jpg
080.jpg
292.jpg
433.jpg
387.jpg
107.jpg
304.jpg
205.jpg
273.jpg
189.jpg
334.jpg
088.jpg
241.jpg
083.jpg
302.jpg
062.jpg
298.jpg
158.jpg
325.jpg
114.jpg
327.jpg
289.jpg
178.jpg
380.jpg
436.jpg
195.jpg
309.jpg
041.jpg
019.jpg
418.jpg
365.jpg
268.jpg
349.jpg
163.jpg
193.jpg
341.jpg
412.jpg
109.jpg
429.jpg
250.jpg
125.jpg
333.jpg
409.jpg
439.jpg
018.jpg
264.jpg
111.jpg
101.jpg
382.jpg
296.jpg
370.jpg
180.jpg
047.jpg
152.jpg
159.jpg
057.jpg
103.jpg
376.jpg
050.jpg
312.jpg
211.jpg
052.jpg
411.jpg
221.jpg
115.jpg
085.jpg
027.jpg
124.jpg
181.jpg
329.jpg
146.jpg
095.jpg
016.jpg
216.jpg
229.jpg
022.jpg
129.jpg
284.jpg
372.jpg
394.jpg
173.jpg
307.jpg
187.jpg
424.jpg
046.jpg
116.jpg
036.jpg
045.jpg
026.jpg
360.jpg
177.jpg
042.jpg
275.jpg
092.jpg
035.jpg
176.jpg
310.jpg
148.jpg
399.jpg
002.jpg
033.jpg
133.jpg
142.jpg
438.jpg
154.jpg
056.jpg
368.jpg
322.jpg
212.jpg
001.jpg
149.jpg
132.jpg
098.jpg
352.jpg
198.jpg
110.jpg
256.jpg
204.jpg
060.jpg
005.jpg
view raw Images_Names hosted with ❤ by GitHub
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import io
import requests
from PIL import Image
from torchvision import models, transforms
from torch.autograd import Variable
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'new140'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=16,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
print(class_names)
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
def train_model(model, criterion, optimizer, scheduler, num_epochs=1):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
model_conv = torchvision.models.resnet18(pretrained=True)
for param in model_conv.parameters():
param.requires_grad = False
# Parameters of newly constructed modules have requires_grad=True by default
num_ftrs = model_conv.fc.in_features
model_conv.fc = nn.Linear(num_ftrs,2)
model_conv = model_conv.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that only parameters of final layer are being optimized as
# opoosed to before.
optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.01, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)
model_conv = train_model(model_conv, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=10)
visualize_model(model_conv)
plt.ioff()
plt.show()
def Test (model,IMG_path):
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
preprocess = transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
test_image = Image.open(IMG_path)
img_tensor = preprocess(test_image)
img_tensor.unsqueeze_(0)
img_variable = Variable(img_tensor)
fc_out = model_conv(img_variable)
return fc_out, test_image
img = r'C:\Users\user\Nasa\safe.jpg'
scores, image = Test(model_conv,img)
scores.data
if scores.data[0,0] > 0.5:
print('There is no wildfire')
elif scores.data[0,1] > 0.5:
print('Careful! There is a wildfire')
else:
print('The system is confused')
image
hebatullahHebatullah Mostafa Sakr Mohamed

Highlights of the problems

Over the past year, we have witnessed long and arduous battles against record-breaking wildfires across the world. Some of these fires burned thousands of acres of land and destroyed hundreds of homes and buildings. Additionally, smoke from the fire creates issues related to poor air quality, raising health concerns for people not just in the vicinity of the fire, but at distances hundreds of miles away. The challenge is to reduce the time it takes to detect fires using satellite information from 3-4 hours to ten minutes or less. Leveraging the accuracy of wildfire detection is also a challenging point. The lack of convenient training datasets and the insufficient verification models harden that problem.

Observation

The forest fire is usually only observed when it has already spread over a large area, making its control and stoppage arduous and even impossible at times. The result is devastating loss and irreparable damage to the environment and atmosphere (30% of carbon dioxide (CO2) in the atmosphere comes from forest fires), in addition to irreparable damage to the ecology (huge amounts of smoke and carbon dioxide (CO2) in the atmosphere). Among other terrible consequences of forest fires are long-term disastrous effects such as impacts on local weather patterns, global warming, and extinction of rare species of the flora and fauna

Idea

The objective is to detect the fire as fast as possible and its verify the existence of fire in that very spot. Early notification to the fire units is vital, speeding it up can be achieved by the following procedure.

  • A citizen notices a fire near or inside a forest area where trees, dry and parching wood, leaves, and so forth act as a fuel source.
  • He/she uploads a photo or a video showing the fire on a mobile app or website designed for this specific application.
  • The app copies the location of the citizen and runs a machine learning model to verify if the input is wildfire related or not.
  • If the model verified a wildfire case, a notification is sent to fire units and the satellites covering the area to track the spread of fire.
  • The fire units send notifications to all citizens in the region near the fire.

Data

Currently, there are massive data provided by NASA “Active Fire” satellites. The data includes maps, shapefiles, longitude/latitude coordinates of a wildfire location, etc. However, the data is taken for recorded wildfires which makes it not suitable for early fire detection. Other available datasets are incomplete and inconvenient for fire verification. The lack of variety of wild and non-wild fire samples corrupt the learning process of such problem.

Contribution and Results:

  • We created a dataset of images (downloaded from google images) classified as wildfire and non-wildfire. The dataset includes 1126 labelled images; divided as 60% training set and 40% validation set of our generated dataset. Test set was manually collected to verify the usual wild fire and distinguish it from the other oddly similar samples which do not indicate a wildfire.
  • We trained a Convolutional Neural Network (CNN) model to predict if an input picture indicates a wildfire or not.
  • The computational power and the complexity of the model are minimized by the use of transfer learning technique. Nonetheless, outstanding performance was achieved with minimal model optimization –due to lack of resources, since we have faced a technical issue with AWS-

Future work

  • It is planned to increase both size and diversity of the wildfire dataset to cover more situations, since we have noticed that the more the variety of the collected samples the better the accuracy of the model.
  • Train the model to class the size of fire and predict its spread time and direction
  • Create a mobile-based app and website for fire reports and tracking occupying our model.
  • Make use of NASA Active Fire data to build a real-time aid for fire prevention.
hebatullahHebatullah Mostafa Sakr Mohamed
ahmed a. shaheenAhmed Abdel-Aziz Shaheen
hebatullahHebatullah Mostafa Sakr Mohamed
Spot That Fire
Spot That Fire
hebatullahHebatullah Mostafa Sakr Mohamed
hebatullahHebatullah Mostafa Sakr Mohamed
NASA Logo

SpaceApps is a NASA incubator innovation program.