diff --git a/examples/quantization/post_train_quant/resnet18_imagenet_post_train.yaml b/examples/quantization/post_train_quant/resnet18_imagenet_post_train.yaml index ac74ef68b96463042207d766fbce05ef6f6bdb9e..cf91160e996db62dc0171db577d11a7da3519704 100644 --- a/examples/quantization/post_train_quant/resnet18_imagenet_post_train.yaml +++ b/examples/quantization/post_train_quant/resnet18_imagenet_post_train.yaml @@ -67,16 +67,18 @@ quantizers: conv1: # The input to the first layer in the model will never have quantization metadata input_overrides: - # Shorthand to take the quantization settings of the output (ignores any other settings) - from_outputs: True + 0: + # Shorthand to take the quantization settings of the output (ignores any other settings) + from_output: True fc: # In ResNet, the FC layer has a view op before, which kills the quantization metadata. So we have to override # (or enable auto_fallback). input_overrides: - # Example of setting the actual value. Applicable only if 'from_outputs' isn't set. - # The following keys are supported: 'bits_activations', 'mode', 'clip_acts', 'clip_n_stds' - # Any key not explicitly set will default to the output setting - bits_activations: 6 + 0: + # Example of setting the actual value. Applicable only if 'from_outputs' isn't set. + # The following keys are supported: 'bits_activations', 'mode', 'clip_acts', 'clip_n_stds' + # Any key not explicitly set will default to the output setting + bits_activations: 6 # Overrides section for run 3 # overrides: