dgbpy.dgbkeras

Module Contents

Classes

ProgressBarCallback

ProgressNoBarCallback

BokehProgressCallback

Send progress message to bokeh

LogNrOfSamplesCallback

TransformCallback

Functions

hasKeras()

getMLPlatform()

getUIMLPlatform()

can_use_gpu()

get_cpu_preference()

get_keras_infos()

set_compute_device(prefercpu)

use_mixed_precision()

Use this function to set the global policy to mixed precision.

getParams(dodec=keras_dict[dgbkeys.decimkeystr], nbchunk=keras_dict['nbchunk'], epochs=keras_dict['epochs'], batch=keras_dict['batch'], patience=keras_dict['patience'], learnrate=keras_dict['learnrate'], epochdrop=keras_dict['epochdrop'], nntype=keras_dict['type'], prefercpu=keras_dict['prefercpu'], transform=keras_dict['transform'], validation_split=keras_dict['split'], nbfold=keras_dict['nbfold'], scale=keras_dict['scale'], withtensorboard=keras_dict['withtensorboard'], tofp16=keras_dict['tofp16'])

adaptive_schedule(initial_lrate=keras_dict['learnrate'], epochs_drop=keras_dict['epochdrop'])

get_data_format(model)

hasValidCubeletShape(cubeszs)

getCubeletShape(model)

get_model_shape(shape, nrattribs, attribfirst=True)

getModelDims(model_shape, data_format)

getModelsByType(learntype, classification, ndim)

getModelsByInfo(infos)

getDefaultModel(setup, type=keras_dict['type'], learnrate=keras_dict['learnrate'], data_format='channels_first')

hasFastprogress()

epoch0endCB(epoch, logs)

init_callbacks(monitor, params, logdir, silent, custom_config, cbfn=None)

train(model, training, params=keras_dict, trainfile=None, silent=False, cbfn=None, logdir=None, tempnm=None)

updateModelShape(infos, model, forinput)

save(model, outfnm)

load(modelfnm, fortrain, infos=None, pars=keras_dict)

transfer(model)

Transfer learning utility function for fine-tuning a Keras model.

apply(model, samples, isclassification, withpred, withprobs, withconfidence, doprobabilities, dictinpshape=None, scaler=None, batch_size=None)

adaptToModel(model, samples, dictinpshape=None, sample_data_format='channels_first')

adaptFromModel(model, samples, inp_shape, ret_data_format)

plot(model, outfnm, showshapes=True, withlaynames=False, vertical=True)

compute_capability_from_device_desc(device_desc)

getDevicesInfo(gpusonly=True)

is_gpu_ready()

is_mixed_precision_compatible(min_version=(7, 0))

need_channels_last()

get_validation_data(trainseq)

Attributes

withtensorboard

platform

cudacores

defbatchstr

keras_infos

default_transforms

keras_dict

dgbpy.dgbkeras.hasKeras()
dgbpy.dgbkeras.withtensorboard
dgbpy.dgbkeras.platform
dgbpy.dgbkeras.cudacores = ['1', '2', '4', '8', '16', '32', '48', '64', '96', '128', '144', '192', '256', '288', '384',...
dgbpy.dgbkeras.getMLPlatform()
dgbpy.dgbkeras.getUIMLPlatform()
dgbpy.dgbkeras.defbatchstr = defaultbatchsz
dgbpy.dgbkeras.keras_infos
dgbpy.dgbkeras.default_transforms = []
dgbpy.dgbkeras.keras_dict
dgbpy.dgbkeras.can_use_gpu()
dgbpy.dgbkeras.get_cpu_preference()
dgbpy.dgbkeras.get_keras_infos()
dgbpy.dgbkeras.set_compute_device(prefercpu)
dgbpy.dgbkeras.use_mixed_precision()

Use this function to set the global policy to mixed precision.

dgbpy.dgbkeras.getParams(dodec=keras_dict[dgbkeys.decimkeystr], nbchunk=keras_dict['nbchunk'], epochs=keras_dict['epochs'], batch=keras_dict['batch'], patience=keras_dict['patience'], learnrate=keras_dict['learnrate'], epochdrop=keras_dict['epochdrop'], nntype=keras_dict['type'], prefercpu=keras_dict['prefercpu'], transform=keras_dict['transform'], validation_split=keras_dict['split'], nbfold=keras_dict['nbfold'], scale=keras_dict['scale'], withtensorboard=keras_dict['withtensorboard'], tofp16=keras_dict['tofp16'])
dgbpy.dgbkeras.adaptive_schedule(initial_lrate=keras_dict['learnrate'], epochs_drop=keras_dict['epochdrop'])
dgbpy.dgbkeras.get_data_format(model)
dgbpy.dgbkeras.hasValidCubeletShape(cubeszs)
dgbpy.dgbkeras.getCubeletShape(model)
dgbpy.dgbkeras.get_model_shape(shape, nrattribs, attribfirst=True)
dgbpy.dgbkeras.getModelDims(model_shape, data_format)
dgbpy.dgbkeras.getModelsByType(learntype, classification, ndim)
dgbpy.dgbkeras.getModelsByInfo(infos)
dgbpy.dgbkeras.getDefaultModel(setup, type=keras_dict['type'], learnrate=keras_dict['learnrate'], data_format='channels_first')
dgbpy.dgbkeras.hasFastprogress()
class dgbpy.dgbkeras.ProgressBarCallback(config)

Bases: keras.callbacks.Callback

on_train_begin(self, logs=None)
on_epoch_begin(self, epoch, logs=None)
on_epoch_end(self, epoch, logs=None)
on_test_begin(self, logs=None)
on_train_batch_end(self, batch, logs=None)
on_test_batch_end(self, batch, logs=None)
on_train_end(self, logs=None)
class dgbpy.dgbkeras.ProgressNoBarCallback(config)

Bases: keras.callbacks.Callback

on_train_begin(self, logs=None)
on_epoch_begin(self, epoch, logs=None)
on_epoch_end(self, epoch, logs=None)
class dgbpy.dgbkeras.BokehProgressCallback(config)

Bases: keras.callbacks.Callback

Send progress message to bokeh

on_train_begin(self, logs=None)
on_epoch_begin(self, epoch, logs=None)
on_epoch_end(self, epoch, logs=None)
on_train_batch_begin(self, batch, logs=None)
on_test_batch_begin(self, batch, logs=None)
on_train_batch_end(self, batch, logs=None)
on_test_batch_end(self, batch, logs=None)
on_train_begin_chunk(self)
on_train_begin_fold(self)
class dgbpy.dgbkeras.LogNrOfSamplesCallback(config)

Bases: keras.callbacks.Callback

on_train_begin(self, logs=None)
on_train_begin_fold(self)
class dgbpy.dgbkeras.TransformCallback(config)

Bases: keras.callbacks.Callback

on_epoch_begin(self, epoch, logs=None)
dgbpy.dgbkeras.epoch0endCB(epoch, logs)
dgbpy.dgbkeras.init_callbacks(monitor, params, logdir, silent, custom_config, cbfn=None)
dgbpy.dgbkeras.train(model, training, params=keras_dict, trainfile=None, silent=False, cbfn=None, logdir=None, tempnm=None)
dgbpy.dgbkeras.updateModelShape(infos, model, forinput)
dgbpy.dgbkeras.save(model, outfnm)
dgbpy.dgbkeras.load(modelfnm, fortrain, infos=None, pars=keras_dict)
dgbpy.dgbkeras.transfer(model)

Transfer learning utility function for fine-tuning a Keras model.

This function takes a Keras model and prepares it for transfer learning by selectively setting layers to be trainable. The layers to be made trainable are determined as follows:

  1. All layers before the first Conv1D, Conv2D, or Conv3D layer (or a Sequential containing such layers) are set to trainable.

  2. All layers after the last Conv1D, Conv2D, Conv3D, or Dense layer (or a Sequential containing such layers) are set to trainable.

  3. All layers between the first and last Conv1D, Conv2D, Conv3D, or Dense layer (or a Sequential

    containing such layers) are set to non-trainable.

dgbpy.dgbkeras.apply(model, samples, isclassification, withpred, withprobs, withconfidence, doprobabilities, dictinpshape=None, scaler=None, batch_size=None)
dgbpy.dgbkeras.adaptToModel(model, samples, dictinpshape=None, sample_data_format='channels_first')
dgbpy.dgbkeras.adaptFromModel(model, samples, inp_shape, ret_data_format)
dgbpy.dgbkeras.plot(model, outfnm, showshapes=True, withlaynames=False, vertical=True)
dgbpy.dgbkeras.compute_capability_from_device_desc(device_desc)
dgbpy.dgbkeras.getDevicesInfo(gpusonly=True)
dgbpy.dgbkeras.is_gpu_ready()
dgbpy.dgbkeras.is_mixed_precision_compatible(min_version=(7, 0))
dgbpy.dgbkeras.need_channels_last()
dgbpy.dgbkeras.get_validation_data(trainseq)