def publish(self): # Finish if inactive if not self._active: return # Process network events. self.client.loop() # Wait if backoff is required. if Publisher.should_backoff: # If backoff time is too large, give up. if Publisher.minimum_backoff_time > MAXIMUM_BACKOFF_TIME: print('Exceeded maximum backoff time. Giving up.') return # Otherwise, wait and connect again. delay = (Publisher.minimum_backoff_time + random.randint(0, 1000) / 1000.0) print('Waiting for {} before reconnecting.'.format(delay)) time.sleep(delay) Publisher.minimum_backoff_time *= 2 self.client.connect(self.mqtt_bridge_hostname, self.mqtt_bridge_port) # Refresh token if JWT IAT has expired. seconds_since_issue = (datetime.datetime.utcnow() - self._jwt_iat).seconds if seconds_since_issue > 60 * self.jwt_exp_mins: print('Refreshing token after {}s').format(seconds_since_issue) self._jwt_iat = datetime.datetime.utcnow() self.client = self.get_client() # Generate payload d, t = self._data[self._count] Publisher.rotate_message(self._msg, d, t) payload = json.dumps(self._msg).encode('utf-8') # Publish "payload" to the MQTT topic. qos=1 means at least once # delivery. Cloud IoT Core also supports qos=0 for at most once # delivery. self.client.publish(self._mqtt_topic, payload, qos=1) self._count += 1
def publish(self):
# Finish if inactive
if not self._active:
return
# Process network events.
self.client.loop()
# Wait if backoff is required.
if Publisher.should_backoff:
# If backoff time is too large, give up.
if Publisher.minimum_backoff_time > MAXIMUM_BACKOFF_TIME:
print('Exceeded maximum backoff time. Giving up.')
# Otherwise, wait and connect again.
delay = (Publisher.minimum_backoff_time +
random.randint(0, 1000) / 1000.0)
print('Waiting for {} before reconnecting.'.format(delay))
time.sleep(delay)
Publisher.minimum_backoff_time *= 2
self.client.connect(self.mqtt_bridge_hostname, self.mqtt_bridge_port)
# Refresh token if JWT IAT has expired.
seconds_since_issue = (datetime.datetime.utcnow() - self._jwt_iat).seconds
if seconds_since_issue > 60 * self.jwt_exp_mins:
print('Refreshing token after {}s').format(seconds_since_issue)
self._jwt_iat = datetime.datetime.utcnow()
self.client = self.get_client()
# Generate payload
d, t = self._data[self._count]
Publisher.rotate_message(self._msg, d, t)
payload = json.dumps(self._msg).encode('utf-8')
# Publish "payload" to the MQTT topic. qos=1 means at least once
# delivery. Cloud IoT Core also supports qos=0 for at most once
# delivery.
self.client.publish(self._mqtt_topic, payload, qos=1)
self._count += 1
EnergyDisaggregationDemo_Client.ipynb
EnergyDisaggregationDemo_View.ipynb
README
def _mk_data(*argv): data = {'ActivePower_{}'.format(i+1): x for i, x in enumerate(tf.split(argv[0], seq_len))} flags = [tf.split(x, seq_len)[-1][0] for x in argv[1:]] return (data, tf.cast(tf.stack(flags), dtype=tf.uint8)) record_defaults = [tf.float64,] + [tf.int32] * (len(cols) - 1) dataset = tf.contrib.data.CsvDataset( [data_file,], record_defaults, header=True, select_cols=cols) dataset = dataset.apply( tf.contrib.data.sliding_window_batch(window_size=seq_len)) dataset = dataset.map(_mk_data, num_parallel_calls=os.cpu_count())
def _mk_data(*argv):
data = {'ActivePower_{}'.format(i+1): x
for i, x in enumerate(tf.split(argv[0], seq_len))}
flags = [tf.split(x, seq_len)[-1][0] for x in argv[1:]]
return (data, tf.cast(tf.stack(flags), dtype=tf.uint8))
record_defaults = [tf.float64,] + [tf.int32] * (len(cols) - 1)
dataset = tf.contrib.data.CsvDataset(
[data_file,], record_defaults, header=True, select_cols=cols)
dataset = dataset.apply(
tf.contrib.data.sliding_window_batch(window_size=seq_len))
dataset = dataset.map(_mk_data, num_parallel_calls=os.cpu_count())
tf.data
def _filter_data(data, labels): rand_num = tf.random_uniform([], 0, 1, dtype=tf.float64) thresh = tf.constant(filter_prob, dtype=tf.float64, shape=[]) is_all_zero = tf.equal(tf.reduce_sum(labels), 0) return tf.logical_or(tf.logical_not(is_all_zero), tf.less(rand_num, thresh)) if train_flag: dataset = dataset.filter(_filter_data)
def _filter_data(data, labels):
rand_num = tf.random_uniform([], 0, 1, dtype=tf.float64)
thresh = tf.constant(filter_prob, dtype=tf.float64, shape=[])
is_all_zero = tf.equal(tf.reduce_sum(labels), 0)
return tf.logical_or(tf.logical_not(is_all_zero), tf.less(rand_num, thresh))
if train_flag:
dataset = dataset.filter(_filter_data)
if shuffle: dataset = dataset.apply( tf.contrib.data.shuffle_and_repeat( buffer_size=batch_size * 10, count=num_epochs)) else: dataset = dataset.repeat(count=num_epochs) dataset = dataset.batch(batch_size) dataset = dataset.prefetch(buffer_size=None)
if shuffle:
tf.contrib.data.shuffle_and_repeat(
buffer_size=batch_size * 10,
count=num_epochs))
else:
dataset = dataset.repeat(count=num_epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(buffer_size=None)
n
m
tf.layers
tf.nn
tf.keras
# RNN network using multilayer LSTM cells = [tf.nn.rnn_cell.DropoutWrapper( tf.nn.rnn_cell.LSTMCell(params['lstm_size']), input_keep_prob=1 - params['dropout_rate']) for _ in range(params['num_layers'])] lstm = tf.nn.rnn_cell.MultiRNNCell(cells) # Initialize the state of each LSTM cell to zero state = lstm.zero_state(batch_size, dtype=tf.float32) outputs, states = tf.nn.dynamic_rnn(cell=lstm, inputs=tf.expand_dims(seq_data, -1), initial_state=state, dtype=tf.float32) # Flatten the 3D output to 2D flatten_outputs = tf.layers.Flatten()(outputs) logits = tf.layers.Dense(params['num_appliances'])(flatten_outputs)
# RNN network using multilayer LSTM
cells = [tf.nn.rnn_cell.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(params['lstm_size']), input_keep_prob=1 - params['dropout_rate'])
for _ in range(params['num_layers'])]
lstm = tf.nn.rnn_cell.MultiRNNCell(cells)
# Initialize the state of each LSTM cell to zero
state = lstm.zero_state(batch_size, dtype=tf.float32)
outputs, states = tf.nn.dynamic_rnn(cell=lstm,
inputs=tf.expand_dims(seq_data, -1),
initial_state=state,
dtype=tf.float32)
# Flatten the 3D output to 2D
flatten_outputs = tf.layers.Flatten()(outputs)
logits = tf.layers.Dense(params['num_appliances'])(flatten_outputs)
# RNN network using multilayer LSTM with the help of Keras model = keras.Sequential() for _ in range(params['num_layers']): model.add( keras.layers.LSTM(params['lstm_size'], dropout=params['dropout_rate'], return_sequences=True) ) # Flatten the 3D output to 2D model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(params['num_appliances'])) logits = model(tf.expand_dims(seq_data, -1))
# RNN network using multilayer LSTM with the help of Keras
model = keras.Sequential()
for _ in range(params['num_layers']):
model.add(
keras.layers.LSTM(params['lstm_size'],
dropout=params['dropout_rate'],
return_sequences=True)
)
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(params['num_appliances']))
logits = model(tf.expand_dims(seq_data, -1))