Tensorflow 2-確率:ValueError:NumPy配列をTensorに変換できませんでした(サポートされていないnumpyタイプ:NPY_INT)

user8270077

これは、私にとっては不思議なエラーであり、支え続けています。

再現可能な例については、Jupyter Notebookをここで見つけることができます:https//github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/blob/master/Chapter5_LossFunctions/Ch5_LossFunctions_TFP.ipynb)-章5(損失関数)。

便利なことに、この例では、データは人工的であり、オンザフライで構築されます。

問題を引き起こすコードの部分は次のとおりです(私はtensorflow 2を実行しています):

# Code for creating artificial "dummy" data
# This is a common strategy for testing our models
# before applying it to real-world data



num_data = 100
X_data = (0.025 * tfd.Normal(loc=0.,scale=1.).sample(sample_shape=num_data))
Y_data = (0.5 * X_data + 0.01 * tfd.Normal(loc=0.,scale=1.).sample(sample_shape=num_data))

tf_var_data = tf.nn.moments(X_data, axes=0)[1]
covar = tfp.stats.covariance(X_data,Y_data, sample_axis=0, event_axis=None)
ls_coef = covar / tf_var_data

[
    X_data_, Y_data_, ls_coef_,
] = [
    X_data.numpy(), Y_data.numpy(), ls_coef.numpy(),
]

ls_intercept_ = Y_data_.mean() - ls_coef_ * X_data_.mean()

obs_stdev = tf.sqrt(
        tf.reduce_mean(tf.math.squared_difference(Y_data_, tf.reduce_mean(Y_data_, axis=0)),
                      axis=0))

# Let's define the log probability of the bayesian regression function
def finance_posterior_log_prob(X_data_, Y_data_, alpha, beta, sigma):
    """
    Our posterior log probability, as a function of states

    Args:
      alpha_: scalar, taken from state of the HMC
      beta_: scalar, taken from state of the HMC
      sigma_: scalar, the standard deviation of , taken from state of the HMC
    Returns: 
      Scalar sum of log probabilities
    Closure over: Y_data, X_data
    """
    rv_std = tfd.Uniform(name="std", low=0., high=100.)
    rv_beta = tfd.Normal(name="beta", loc=0., scale=100.)
    rv_alpha = tfd.Normal(name="alpha", loc=0., scale=100.)

    mean = alpha + beta * X_data_
    rv_observed = tfd.Normal(name="obs", loc=mean, scale=sigma)

    return (
        rv_alpha.log_prob(alpha) 
        + rv_beta.log_prob(beta) 
        + rv_std.log_prob(sigma)
        + tf.reduce_sum(rv_observed.log_prob(Y_data_))
    )

number_of_steps = 30000
burnin = 5000

# Set the chain's start state.
initial_chain_state = [
    tf.cast(1.,dtype=tf.float32) * tf.ones([], name='init_alpha', dtype=tf.float32),
    tf.cast(0.01,dtype=tf.float32) * tf.ones([], name='init_beta', dtype=tf.float32),
    tf.cast(obs_stdev,dtype=tf.float32) * tf.ones([], name='init_sigma', dtype=tf.float32)
]

# Since HMC operates over unconstrained space, we need to transform the
# samples so they live in real-space.
# Beta and sigma are 100x and 10x of alpha, approximately, so apply Affine scalar bijector
# to multiply the unconstrained beta and sigma by 100x and 10x to get back to 
# the problem space
unconstraining_bijectors = [
    tfp.bijectors.Identity(), #alpha
    tfp.bijectors.Shift(100.), #beta
    tfp.bijectors.Scale(10.),  #sigma
]

# Define a closure over our joint_log_prob.
unnormalized_posterior_log_prob = lambda *args: finance_posterior_log_prob(X_data_, Y_data_, *args)

step_size = 0.5

# Defining the HMC
kernel=tfp.mcmc.TransformedTransitionKernel(
    inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
        target_log_prob_fn=unnormalized_posterior_log_prob,
        num_leapfrog_steps=2,
        step_size=step_size,
        state_gradients_are_stopped=True),        
    bijector=unconstraining_bijectors)

kernel = tfp.mcmc.SimpleStepSizeAdaptation(
    inner_kernel=kernel, num_adaptation_steps=int(burnin * 0.8))

# Sampling from the chain.
[
    alpha, 
    beta, 
    sigma
], kernel_results = tfp.mcmc.sample_chain(
    num_results = number_of_steps,
    num_burnin_steps = burnin,
    current_state=initial_chain_state,
    kernel=kernel,
    name='HMC_sampling'
) 

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-63-b2e46a99062a> in <module>
     21     current_state=initial_chain_state,
     22     kernel=kernel,
---> 23     name='HMC_sampling'
     24 ) 
     25 

~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\sample.py in sample_chain(num_results, current_state, previous_kernel_results, kernel, num_burnin_steps, num_steps_between_results, trace_fn, return_final_kernel_results, parallel_iterations, name)
    357                                             trace_fn(*state_and_results)),
    358         # pylint: enable=g-long-lambda
--> 359         parallel_iterations=parallel_iterations)
    360 
    361     if return_final_kernel_results:

~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\internal\util.py in trace_scan(loop_fn, initial_state, elems, trace_fn, parallel_iterations, name)
    393         body=_body,
    394         loop_vars=(0, initial_state, trace_arrays),
--> 395         parallel_iterations=parallel_iterations)
    396 
    397     stacked_trace = tf.nest.map_structure(lambda x: x.stack(), trace_arrays)

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\util\deprecation.py in new_func(*args, **kwargs)
    572                   func.__module__, arg_name, arg_value, 'in a future version'
    573                   if date is None else ('after %s' % date), instructions)
--> 574       return func(*args, **kwargs)
    575 
    576     doc = _add_deprecated_arg_value_notice_to_docstring(

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\control_flow_ops.py in while_loop_v2(cond, body, loop_vars, shape_invariants, parallel_iterations, back_prop, swap_memory, maximum_iterations, name)
   2489       name=name,
   2490       maximum_iterations=maximum_iterations,
-> 2491       return_same_structure=True)
   2492 
   2493 

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\control_flow_ops.py in while_loop(cond, body, loop_vars, shape_invariants, parallel_iterations, back_prop, swap_memory, name, maximum_iterations, return_same_structure)
   2725                                               list(loop_vars))
   2726       while cond(*loop_vars):
-> 2727         loop_vars = body(*loop_vars)
   2728         if try_to_pack and not isinstance(loop_vars, (list, _basetuple)):
   2729           packed = True

~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\internal\util.py in _body(i, state, trace_arrays)
    382 
    383     def _body(i, state, trace_arrays):
--> 384       state = loop_fn(state, elems_array.read(i))
    385       trace_arrays = tf.nest.pack_sequence_as(trace_arrays, [
    386           a.write(i, v) for a, v in zip(

~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\sample.py in _trace_scan_fn(state_and_results, num_steps)
    341           body_fn=kernel.one_step,
    342           initial_loop_vars=list(state_and_results),
--> 343           parallel_iterations=parallel_iterations)
    344       return next_state, current_kernel_results
    345 

~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\internal\util.py in smart_for_loop(loop_num_iter, body_fn, initial_loop_vars, parallel_iterations, name)
    315           body=lambda i, *args: [i + 1] + list(body_fn(*args)),
    316           loop_vars=[np.int32(0)] + initial_loop_vars,
--> 317           parallel_iterations=parallel_iterations
    318       )[1:]
    319     result = initial_loop_vars

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\util\deprecation.py in new_func(*args, **kwargs)
    572                   func.__module__, arg_name, arg_value, 'in a future version'
    573                   if date is None else ('after %s' % date), instructions)
--> 574       return func(*args, **kwargs)
    575 
    576     doc = _add_deprecated_arg_value_notice_to_docstring(

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\control_flow_ops.py in while_loop_v2(cond, body, loop_vars, shape_invariants, parallel_iterations, back_prop, swap_memory, maximum_iterations, name)
   2489       name=name,
   2490       maximum_iterations=maximum_iterations,
-> 2491       return_same_structure=True)
   2492 
   2493 

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\control_flow_ops.py in while_loop(cond, body, loop_vars, shape_invariants, parallel_iterations, back_prop, swap_memory, name, maximum_iterations, return_same_structure)
   2725                                               list(loop_vars))
   2726       while cond(*loop_vars):
-> 2727         loop_vars = body(*loop_vars)
   2728         if try_to_pack and not isinstance(loop_vars, (list, _basetuple)):
   2729           packed = True

~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\internal\util.py in <lambda>(i, *args)
    313       return tf.while_loop(
    314           cond=lambda i, *args: i < loop_num_iter,
--> 315           body=lambda i, *args: [i + 1] + list(body_fn(*args)),
    316           loop_vars=[np.int32(0)] + initial_loop_vars,
    317           parallel_iterations=parallel_iterations

~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\simple_step_size_adaptation.py in one_step(self, current_state, previous_kernel_results)
    378         reduced_log_accept_prob = reduce_logmeanexp(
    379             log_accept_prob,
--> 380             axis=prefer_static.range(num_reduce_dims))
    381         # reduced_log_accept_prob must broadcast into step_size_part on the
    382         # left, so we do an additional reduction over dimensions where their

~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\math\generic.py in reduce_logmeanexp(input_tensor, axis, keepdims, name)
    109     lse = tf.reduce_logsumexp(input_tensor, axis=axis, keepdims=keepdims)
    110     n = prefer_static.size(input_tensor) // prefer_static.size(lse)
--> 111     log_n = tf.math.log(tf.cast(n, lse.dtype))
    112     return lse - log_n
    113 

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\util\dispatch.py in wrapper(*args, **kwargs)
    178     """Call target, and fall back on dispatchers if there is a TypeError."""
    179     try:
--> 180       return target(*args, **kwargs)
    181     except (TypeError, ValueError):
    182       # Note: convert_to_eager_tensor currently raises a ValueError, not a

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\math_ops.py in cast(x, dtype, name)
    746       # allows some conversions that cast() can't do, e.g. casting numbers to
    747       # strings.
--> 748       x = ops.convert_to_tensor(x, name="x")
    749       if x.dtype.base_dtype != base_type:
    750         x = gen_math_ops.cast(x, base_type, name=name)

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
   1348 
   1349     if ret is None:
-> 1350       ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
   1351 
   1352     if ret is NotImplemented:

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\tensor_conversion_registry.py in _default_conversion_function(***failed resolving arguments***)
     50 def _default_conversion_function(value, dtype, name, as_ref):
     51   del as_ref  # Unused.
---> 52   return constant_op.constant(value, dtype, name=name)
     53 
     54 

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\constant_op.py in constant(value, dtype, shape, name)
    256   """
    257   return _constant_impl(value, dtype, shape, name, verify_shape=False,
--> 258                         allow_broadcast=True)
    259 
    260 

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
    264   ctx = context.context()
    265   if ctx.executing_eagerly():
--> 266     t = convert_to_eager_tensor(value, ctx, dtype)
    267     if shape is None:
    268       return t

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
     94       dtype = dtypes.as_dtype(dtype).as_datatype_enum
     95   ctx.ensure_initialized()
---> 96   return ops.EagerTensor(value, ctx.device_name, dtype)
     97 
     98 

ValueError: Failed to convert a NumPy array to a Tensor (Unsupported numpy type: NPY_INT).
ウェンドン

問題はから来ているようです

kernel = tfp.mcmc.SimpleStepSizeAdaptation(inner_kernel = kernel、num_adaptation_steps = int(burnin * 0.8))

別の同様の例では、同じエラーが発生しました。この行をスキップすると、機能します。

この記事はインターネットから収集されたものであり、転載の際にはソースを示してください。

侵害の場合は、連絡してください[email protected]

編集
0

コメントを追加

0

関連記事

Related 関連記事

ホットタグ

アーカイブ