mirror of
https://github.com/jayofelony/pwnagotchi.git
synced 2025-07-01 18:37:27 -04:00
@ -54,7 +54,7 @@ class Stats(object):
|
|||||||
def load(self):
|
def load(self):
|
||||||
with self._lock:
|
with self._lock:
|
||||||
if os.path.exists(self.path) and os.path.getsize(self.path) > 0:
|
if os.path.exists(self.path) and os.path.getsize(self.path) > 0:
|
||||||
logging.info("[ai] loading %s" % self.path)
|
logging.info("[AI] loading %s" % self.path)
|
||||||
with open(self.path, 'rt') as fp:
|
with open(self.path, 'rt') as fp:
|
||||||
obj = json.load(fp)
|
obj = json.load(fp)
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ class Stats(object):
|
|||||||
|
|
||||||
def save(self):
|
def save(self):
|
||||||
with self._lock:
|
with self._lock:
|
||||||
logging.info("[ai] saving %s" % self.path)
|
logging.info("[AI] saving %s" % self.path)
|
||||||
|
|
||||||
data = json.dumps({
|
data = json.dumps({
|
||||||
'born_at': self.born_at,
|
'born_at': self.born_at,
|
||||||
@ -114,7 +114,7 @@ class AsyncTrainer(object):
|
|||||||
_thread.start_new_thread(self._ai_worker, ())
|
_thread.start_new_thread(self._ai_worker, ())
|
||||||
|
|
||||||
def _save_ai(self):
|
def _save_ai(self):
|
||||||
logging.info("[ai] saving model to %s ..." % self._nn_path)
|
logging.info("[AI] saving model to %s ..." % self._nn_path)
|
||||||
temp = "%s.tmp" % self._nn_path
|
temp = "%s.tmp" % self._nn_path
|
||||||
self._model.save(temp)
|
self._model.save(temp)
|
||||||
os.replace(temp, self._nn_path)
|
os.replace(temp, self._nn_path)
|
||||||
@ -133,15 +133,15 @@ class AsyncTrainer(object):
|
|||||||
|
|
||||||
def on_ai_policy(self, new_params):
|
def on_ai_policy(self, new_params):
|
||||||
plugins.on('ai_policy', self, new_params)
|
plugins.on('ai_policy', self, new_params)
|
||||||
logging.info("[ai] setting new policy:")
|
logging.info("[AI] setting new policy:")
|
||||||
for name, value in new_params.items():
|
for name, value in new_params.items():
|
||||||
if name in self._config['personality']:
|
if name in self._config['personality']:
|
||||||
curr_value = self._config['personality'][name]
|
curr_value = self._config['personality'][name]
|
||||||
if curr_value != value:
|
if curr_value != value:
|
||||||
logging.info("[ai] ! %s: %s -> %s" % (name, curr_value, value))
|
logging.info("[AI] ! %s: %s -> %s" % (name, curr_value, value))
|
||||||
self._config['personality'][name] = value
|
self._config['personality'][name] = value
|
||||||
else:
|
else:
|
||||||
logging.error("[ai] param %s not in personality configuration!" % name)
|
logging.error("[AI] param %s not in personality configuration!" % name)
|
||||||
|
|
||||||
self.run('set wifi.ap.ttl %d' % self._config['personality']['ap_ttl'])
|
self.run('set wifi.ap.ttl %d' % self._config['personality']['ap_ttl'])
|
||||||
self.run('set wifi.sta.ttl %d' % self._config['personality']['sta_ttl'])
|
self.run('set wifi.sta.ttl %d' % self._config['personality']['sta_ttl'])
|
||||||
@ -152,12 +152,12 @@ class AsyncTrainer(object):
|
|||||||
plugins.on('ai_ready', self)
|
plugins.on('ai_ready', self)
|
||||||
|
|
||||||
def on_ai_best_reward(self, r):
|
def on_ai_best_reward(self, r):
|
||||||
logging.info("[ai] best reward so far: %s" % r)
|
logging.info("[AI] best reward so far: %s" % r)
|
||||||
self._view.on_motivated(r)
|
self._view.on_motivated(r)
|
||||||
plugins.on('ai_best_reward', self, r)
|
plugins.on('ai_best_reward', self, r)
|
||||||
|
|
||||||
def on_ai_worst_reward(self, r):
|
def on_ai_worst_reward(self, r):
|
||||||
logging.info("[ai] worst reward so far: %s" % r)
|
logging.info("[AI] worst reward so far: %s" % r)
|
||||||
self._view.on_demotivated(r)
|
self._view.on_demotivated(r)
|
||||||
plugins.on('ai_worst_reward', self, r)
|
plugins.on('ai_worst_reward', self, r)
|
||||||
|
|
||||||
@ -174,7 +174,7 @@ class AsyncTrainer(object):
|
|||||||
self._model.env.render()
|
self._model.env.render()
|
||||||
# enter in training mode?
|
# enter in training mode?
|
||||||
if random.random() > self._config['ai']['laziness']:
|
if random.random() > self._config['ai']['laziness']:
|
||||||
logging.info("[ai] learning for %d epochs ..." % epochs_per_episode)
|
logging.info("[AI] learning for %d epochs ..." % epochs_per_episode)
|
||||||
try:
|
try:
|
||||||
self.set_training(True, epochs_per_episode)
|
self.set_training(True, epochs_per_episode)
|
||||||
# back up brain file before starting new training set
|
# back up brain file before starting new training set
|
||||||
@ -184,7 +184,7 @@ class AsyncTrainer(object):
|
|||||||
self._view.set("mode", " AI")
|
self._view.set("mode", " AI")
|
||||||
self._model.learn(total_timesteps=epochs_per_episode, callback=self.on_ai_training_step)
|
self._model.learn(total_timesteps=epochs_per_episode, callback=self.on_ai_training_step)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.exception("[ai] error while training (%s)", e)
|
logging.exception("[AI] error while training (%s)", e)
|
||||||
finally:
|
finally:
|
||||||
self.set_training(False)
|
self.set_training(False)
|
||||||
obs = self._model.env.reset()
|
obs = self._model.env.reset()
|
||||||
|
@ -348,6 +348,8 @@ class FixServices(plugins.Plugin):
|
|||||||
logging.info("[Fix_Services] wlan0mon didn't make it. trying again")
|
logging.info("[Fix_Services] wlan0mon didn't make it. trying again")
|
||||||
if not display:
|
if not display:
|
||||||
print(" wlan0mon didn't make it. trying again")
|
print(" wlan0mon didn't make it. trying again")
|
||||||
|
else:
|
||||||
|
pwnagotchi.reboot()
|
||||||
|
|
||||||
# exited the loop, so hopefully it loaded
|
# exited the loop, so hopefully it loaded
|
||||||
if tries < 3:
|
if tries < 3:
|
||||||
|
Reference in New Issue
Block a user