need help how to debug xfs crash issue xfs_iunlink_remove: xfs_inotobp() returned error 22
符永涛
yongtaofu at gmail.com
Fri Apr 12 00:23:05 CDT 2013
sudo stap -L 'kernel.trace("*")'|grep xfs_iunlink
sudo stap -L 'kernel.trace("*")'|grep xfs_ifree
sudo stap -L 'kernel.trace("*")'|grep xfs
kernel.trace("xfs_agf") $mp:struct xfs_mount* $agf:struct xfs_agf*
$flags:int $caller_ip:long unsigned int
kernel.trace("xfs_alloc_busy") $mp:struct xfs_mount* $agno:xfs_agnumber_t
$agbno:xfs_agblock_t $len:xfs_extlen_t
kernel.trace("xfs_alloc_busy_clear") $mp:struct xfs_mount*
$agno:xfs_agnumber_t $agbno:xfs_agblock_t $len:xfs_extlen_t
kernel.trace("xfs_alloc_busy_enomem") $mp:struct xfs_mount*
$agno:xfs_agnumber_t $agbno:xfs_agblock_t $len:xfs_extlen_t
kernel.trace("xfs_alloc_busy_force") $mp:struct xfs_mount*
$agno:xfs_agnumber_t $agbno:xfs_agblock_t $len:xfs_extlen_t
kernel.trace("xfs_alloc_busy_reuse") $mp:struct xfs_mount*
$agno:xfs_agnumber_t $agbno:xfs_agblock_t $len:xfs_extlen_t
kernel.trace("xfs_alloc_busy_trim") $mp:struct xfs_mount*
$agno:xfs_agnumber_t $agbno:xfs_agblock_t $len:xfs_extlen_t
$tbno:xfs_agblock_t $tlen:xfs_extlen_t
kernel.trace("xfs_alloc_exact_done") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_exact_error") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_exact_notfound") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_file_space") $ip:struct xfs_inode*
kernel.trace("xfs_alloc_near_busy") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_near_error") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_near_first") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_near_greater") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_near_lesser") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_near_noentry") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_near_nominleft") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_size_busy") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_size_done") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_size_error") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_size_neither") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_size_noentry") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_size_nominleft") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_small_done") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_small_error") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_small_freelist") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_small_notenough") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_vextent_allfailed") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_vextent_badargs") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_vextent_loopfailed") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_vextent_noagbp") $args:struct xfs_alloc_arg*
kernel.trace("xfs_alloc_vextent_nofix") $args:struct xfs_alloc_arg*
kernel.trace("xfs_attr_list_add") $ctx:struct xfs_attr_list_context*
kernel.trace("xfs_attr_list_full") $ctx:struct xfs_attr_list_context*
kernel.trace("xfs_attr_list_leaf") $ctx:struct xfs_attr_list_context*
kernel.trace("xfs_attr_list_leaf_end") $ctx:struct xfs_attr_list_context*
kernel.trace("xfs_attr_list_node_descend") $ctx:struct
xfs_attr_list_context* $btree:struct xfs_da_node_entry*
kernel.trace("xfs_attr_list_notfound") $ctx:struct xfs_attr_list_context*
kernel.trace("xfs_attr_list_sf") $ctx:struct xfs_attr_list_context*
kernel.trace("xfs_attr_list_sf_all") $ctx:struct xfs_attr_list_context*
kernel.trace("xfs_attr_list_wrong_blk") $ctx:struct xfs_attr_list_context*
kernel.trace("xfs_bdstrat_shut") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_bmap_post_update") $ip:struct xfs_inode*
$idx:xfs_extnum_t $state:int $caller_ip:long unsigned int
kernel.trace("xfs_bmap_pre_update") $ip:struct xfs_inode* $idx:xfs_extnum_t
$state:int $caller_ip:long unsigned int
kernel.trace("xfs_btree_corrupt") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_buf_bawrite") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_buf_bdwrite") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_buf_cond_lock") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_buf_delwri_dequeue") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_buf_delwri_queue") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_buf_delwri_split") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_buf_error_relse") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_buf_find") $bp:struct xfs_buf* $flags:unsigned int
$caller_ip:long unsigned int
kernel.trace("xfs_buf_free") $bp:struct xfs_buf* $caller_ip:long unsigned
int
kernel.trace("xfs_buf_get") $bp:struct xfs_buf* $flags:unsigned int
$caller_ip:long unsigned int
kernel.trace("xfs_buf_get_uncached") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_buf_hold") $bp:struct xfs_buf* $caller_ip:long unsigned
int
kernel.trace("xfs_buf_init") $bp:struct xfs_buf* $caller_ip:long unsigned
int
kernel.trace("xfs_buf_iodone") $bp:struct xfs_buf* $caller_ip:long unsigned
int
kernel.trace("xfs_buf_ioerror") $bp:struct xfs_buf* $error:int
$caller_ip:long unsigned int
kernel.trace("xfs_buf_iorequest") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_buf_iowait") $bp:struct xfs_buf* $caller_ip:long unsigned
int
kernel.trace("xfs_buf_iowait_done") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_buf_item_committed") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_buf_item_format") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_buf_item_format_stale") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_buf_item_iodone") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_buf_item_iodone_async") $bp:struct xfs_buf*
$caller_ip:long unsigned int
kernel.trace("xfs_buf_item_pin") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_buf_item_push") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_buf_item_pushbuf") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_buf_item_relse") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_buf_item_size") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_buf_item_size_stale") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_buf_item_trylock") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_buf_item_unlock") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_buf_item_unlock_stale") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_buf_item_unpin") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_buf_item_unpin_stale") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_buf_lock") $bp:struct xfs_buf* $caller_ip:long unsigned
int
kernel.trace("xfs_buf_lock_done") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_buf_read") $bp:struct xfs_buf* $flags:unsigned int
$caller_ip:long unsigned int
kernel.trace("xfs_buf_rele") $bp:struct xfs_buf* $caller_ip:long unsigned
int
kernel.trace("xfs_buf_unlock") $bp:struct xfs_buf* $caller_ip:long unsigned
int
kernel.trace("xfs_bunmap") $ip:struct xfs_inode* $bno:xfs_fileoff_t
$len:xfs_filblks_t $flags:int $caller_ip:long unsigned int
kernel.trace("xfs_check_acl") $ip:struct xfs_inode*
kernel.trace("xfs_clear_inode") $ip:struct xfs_inode*
kernel.trace("xfs_create") $dp:struct xfs_inode* $xfs_create:struct
xfs_name*
kernel.trace("xfs_da_btree_corrupt") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_delalloc_enospc") $ip:struct xfs_inode* $offset:xfs_off_t
$count:ssize_t
kernel.trace("xfs_destroy_inode") $ip:struct xfs_inode*
kernel.trace("xfs_dir2_block_addname") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_block_lookup") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_block_removename") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_block_replace") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_block_to_leaf") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_block_to_sf") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_grow_inode") $args:struct xfs_da_args* $idx:int
kernel.trace("xfs_dir2_leaf_addname") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_leaf_lookup") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_leaf_removename") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_leaf_replace") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_leaf_to_block") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_leaf_to_node") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_leafn_add") $args:struct xfs_da_args* $idx:int
kernel.trace("xfs_dir2_leafn_moveents") $args:struct xfs_da_args*
$src_idx:int $dst_idx:int $count:int
kernel.trace("xfs_dir2_leafn_remove") $args:struct xfs_da_args* $idx:int
kernel.trace("xfs_dir2_node_addname") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_node_lookup") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_node_removename") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_node_replace") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_node_to_leaf") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_sf_addname") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_sf_create") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_sf_lookup") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_sf_removename") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_sf_replace") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_sf_to_block") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_sf_toino4") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_sf_toino8") $args:struct xfs_da_args*
kernel.trace("xfs_dir2_shrink_inode") $args:struct xfs_da_args* $idx:int
kernel.trace("xfs_discard_busy") $mp:struct xfs_mount* $agno:xfs_agnumber_t
$agbno:xfs_agblock_t $len:xfs_extlen_t
kernel.trace("xfs_discard_exclude") $mp:struct xfs_mount*
$agno:xfs_agnumber_t $agbno:xfs_agblock_t $len:xfs_extlen_t
kernel.trace("xfs_discard_extent") $mp:struct xfs_mount*
$agno:xfs_agnumber_t $agbno:xfs_agblock_t $len:xfs_extlen_t
kernel.trace("xfs_discard_toosmall") $mp:struct xfs_mount*
$agno:xfs_agnumber_t $agbno:xfs_agblock_t $len:xfs_extlen_t
kernel.trace("xfs_dqadjust") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqalloc") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqattach_found") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqattach_get") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqflush") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqflush_done") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqflush_force") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqget_hit") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqget_miss") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqinit") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqlookup_done") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqlookup_found") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqlookup_freelist") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqlookup_want") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqput") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqput_free") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqput_wait") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqread") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqread_fail") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqreclaim_dirty") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqreclaim_unlink") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqreclaim_want") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqrele") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqreuse") $dqp:struct xfs_dquot*
kernel.trace("xfs_dqtobp_read") $dqp:struct xfs_dquot*
kernel.trace("xfs_dquot_dqalloc") $ip:struct xfs_inode*
kernel.trace("xfs_dquot_dqdetach") $ip:struct xfs_inode*
kernel.trace("xfs_extlist") $ip:struct xfs_inode* $idx:xfs_extnum_t
$state:int $caller_ip:long unsigned int
kernel.trace("xfs_file_buffered_write") $ip:struct xfs_inode* $count:size_t
$offset:loff_t $flags:int
kernel.trace("xfs_file_compat_ioctl") $ip:struct xfs_inode*
kernel.trace("xfs_file_direct_write") $ip:struct xfs_inode* $count:size_t
$offset:loff_t $flags:int
kernel.trace("xfs_file_fsync") $ip:struct xfs_inode*
kernel.trace("xfs_file_ioctl") $ip:struct xfs_inode*
kernel.trace("xfs_file_read") $ip:struct xfs_inode* $count:size_t
$offset:loff_t $flags:int
kernel.trace("xfs_file_splice_read") $ip:struct xfs_inode* $count:size_t
$offset:loff_t $flags:int
kernel.trace("xfs_file_splice_write") $ip:struct xfs_inode* $count:size_t
$offset:loff_t $flags:int
kernel.trace("xfs_free_extent") $mp:struct xfs_mount* $agno:xfs_agnumber_t
$agbno:xfs_agblock_t $len:xfs_extlen_t $isfl:bool $haveleft:int
$haveright:int
kernel.trace("xfs_free_file_space") $ip:struct xfs_inode*
kernel.trace("xfs_get_blocks_alloc") $ip:struct xfs_inode*
$offset:xfs_off_t $count:ssize_t $type:int $irec:struct xfs_bmbt_irec*
kernel.trace("xfs_get_blocks_found") $ip:struct xfs_inode*
$offset:xfs_off_t $count:ssize_t $type:int $irec:struct xfs_bmbt_irec*
kernel.trace("xfs_get_blocks_notfound") $ip:struct xfs_inode*
$offset:xfs_off_t $count:ssize_t
kernel.trace("xfs_getattr") $ip:struct xfs_inode*
kernel.trace("xfs_iext_insert") $ip:struct xfs_inode* $idx:xfs_extnum_t
$r:struct xfs_bmbt_irec* $state:int $caller_ip:long unsigned int
kernel.trace("xfs_iext_remove") $ip:struct xfs_inode* $idx:xfs_extnum_t
$state:int $caller_ip:long unsigned int
kernel.trace("xfs_iget_hit") $ip:struct xfs_inode*
kernel.trace("xfs_iget_miss") $ip:struct xfs_inode*
kernel.trace("xfs_iget_reclaim") $ip:struct xfs_inode*
kernel.trace("xfs_iget_reclaim_fail") $ip:struct xfs_inode*
kernel.trace("xfs_iget_skip") $ip:struct xfs_inode*
kernel.trace("xfs_ihold") $ip:struct xfs_inode* $caller_ip:long unsigned int
kernel.trace("xfs_ilock") $ip:struct xfs_inode* $lock_flags:unsigned int
$caller_ip:long unsigned int
kernel.trace("xfs_ilock_demote") $ip:struct xfs_inode* $lock_flags:unsigned
int $caller_ip:long unsigned int
kernel.trace("xfs_ilock_nowait") $ip:struct xfs_inode* $lock_flags:unsigned
int $caller_ip:long unsigned int
kernel.trace("xfs_inode_item_push") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_inode_pin") $ip:struct xfs_inode* $caller_ip:long
unsigned int
kernel.trace("xfs_inode_unpin") $ip:struct xfs_inode* $caller_ip:long
unsigned int
kernel.trace("xfs_inode_unpin_nowait") $ip:struct xfs_inode*
$caller_ip:long unsigned int
kernel.trace("xfs_invalidatepage") $inode:struct inode* $page:struct page*
$off:long unsigned int
kernel.trace("xfs_ioctl_setattr") $ip:struct xfs_inode*
kernel.trace("xfs_irele") $ip:struct xfs_inode* $caller_ip:long unsigned int
kernel.trace("xfs_itruncate_finish_end") $ip:struct xfs_inode*
$new_size:xfs_fsize_t
kernel.trace("xfs_itruncate_finish_start") $ip:struct xfs_inode*
$new_size:xfs_fsize_t
kernel.trace("xfs_itruncate_start") $ip:struct xfs_inode*
$new_size:xfs_fsize_t $flag:int $toss_start:xfs_off_t $toss_finish:xfs_off_t
kernel.trace("xfs_iunlock") $ip:struct xfs_inode* $lock_flags:unsigned int
$caller_ip:long unsigned int
kernel.trace("xfs_link") $dp:struct xfs_inode* $xfs_link:struct xfs_name*
kernel.trace("xfs_log_done_nonperm") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_done_perm") $log:struct log* $tic:struct xlog_ticket*
kernel.trace("xfs_log_grant_enter") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_grant_error") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_grant_exit") $log:struct log* $tic:struct xlog_ticket*
kernel.trace("xfs_log_grant_sleep1") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_grant_sleep2") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_grant_wake1") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_grant_wake2") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_grant_wake_up") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_recover_buf_cancel") $log:struct log* $buf_f:struct
xfs_buf_log_format*
kernel.trace("xfs_log_recover_buf_cancel_add") $log:struct log*
$buf_f:struct xfs_buf_log_format*
kernel.trace("xfs_log_recover_buf_cancel_ref_inc") $log:struct log*
$buf_f:struct xfs_buf_log_format*
kernel.trace("xfs_log_recover_buf_dquot_buf") $log:struct log*
$buf_f:struct xfs_buf_log_format*
kernel.trace("xfs_log_recover_buf_inode_buf") $log:struct log*
$buf_f:struct xfs_buf_log_format*
kernel.trace("xfs_log_recover_buf_not_cancel") $log:struct log*
$buf_f:struct xfs_buf_log_format*
kernel.trace("xfs_log_recover_buf_recover") $log:struct log* $buf_f:struct
xfs_buf_log_format*
kernel.trace("xfs_log_recover_buf_reg_buf") $log:struct log* $buf_f:struct
xfs_buf_log_format*
kernel.trace("xfs_log_recover_inode_cancel") $log:struct log* $in_f:struct
xfs_inode_log_format*
kernel.trace("xfs_log_recover_inode_recover") $log:struct log* $in_f:struct
xfs_inode_log_format*
kernel.trace("xfs_log_recover_inode_skip") $log:struct log* $in_f:struct
xfs_inode_log_format*
kernel.trace("xfs_log_recover_item_add") $log:struct log* $trans:struct
xlog_recover* $item:struct xlog_recover_item* $pass:int
kernel.trace("xfs_log_recover_item_add_cont") $log:struct log*
$trans:struct xlog_recover* $item:struct xlog_recover_item* $pass:int
kernel.trace("xfs_log_recover_item_recover") $log:struct log* $trans:struct
xlog_recover* $item:struct xlog_recover_item* $pass:int
kernel.trace("xfs_log_recover_item_reorder_head") $log:struct log*
$trans:struct xlog_recover* $item:struct xlog_recover_item* $pass:int
kernel.trace("xfs_log_recover_item_reorder_tail") $log:struct log*
$trans:struct xlog_recover* $item:struct xlog_recover_item* $pass:int
kernel.trace("xfs_log_regrant_reserve_enter") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_regrant_reserve_exit") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_regrant_reserve_sub") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_regrant_write_enter") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_regrant_write_error") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_regrant_write_exit") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_regrant_write_sleep1") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_regrant_write_sleep2") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_regrant_write_wake1") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_regrant_write_wake2") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_regrant_write_wake_up") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_reserve") $log:struct log* $tic:struct xlog_ticket*
kernel.trace("xfs_log_umount_write") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_ungrant_enter") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_ungrant_exit") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_log_ungrant_sub") $log:struct log* $tic:struct
xlog_ticket*
kernel.trace("xfs_lookup") $dp:struct xfs_inode* $xfs_lookup:struct
xfs_name*
kernel.trace("xfs_map_blocks_alloc") $ip:struct xfs_inode*
$offset:xfs_off_t $count:ssize_t $type:int $irec:struct xfs_bmbt_irec*
kernel.trace("xfs_map_blocks_found") $ip:struct xfs_inode*
$offset:xfs_off_t $count:ssize_t $type:int $irec:struct xfs_bmbt_irec*
kernel.trace("xfs_pagecache_inval") $ip:struct xfs_inode* $start:xfs_off_t
$finish:xfs_off_t
kernel.trace("xfs_perag_clear_reclaim") $mp:struct xfs_mount*
$agno:xfs_agnumber_t $refcount:int $caller_ip:long unsigned int
kernel.trace("xfs_perag_get") $mp:struct xfs_mount* $agno:xfs_agnumber_t
$refcount:int $caller_ip:long unsigned int
kernel.trace("xfs_perag_get_tag") $mp:struct xfs_mount*
$agno:xfs_agnumber_t $refcount:int $caller_ip:long unsigned int
kernel.trace("xfs_perag_put") $mp:struct xfs_mount* $agno:xfs_agnumber_t
$refcount:int $caller_ip:long unsigned int
kernel.trace("xfs_perag_set_reclaim") $mp:struct xfs_mount*
$agno:xfs_agnumber_t $refcount:int $caller_ip:long unsigned int
kernel.trace("xfs_readdir") $ip:struct xfs_inode*
kernel.trace("xfs_readlink") $ip:struct xfs_inode*
kernel.trace("xfs_releasepage") $inode:struct inode* $page:struct page*
$off:long unsigned int
kernel.trace("xfs_remove") $dp:struct xfs_inode* $xfs_remove:struct
xfs_name*
kernel.trace("xfs_rename") $src_dp:struct xfs_inode* $target_dp:struct
xfs_inode* $src_name:struct xfs_name* $target_name:struct xfs_name*
kernel.trace("xfs_reset_dqcounts") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_setattr") $ip:struct xfs_inode*
kernel.trace("xfs_swap_extent_after") $ip:struct xfs_inode* $which:int
kernel.trace("xfs_swap_extent_before") $ip:struct xfs_inode* $which:int
kernel.trace("xfs_symlink") $dp:struct xfs_inode* $xfs_symlink:struct
xfs_name*
kernel.trace("xfs_trans_bhold") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_trans_bhold_release") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_trans_binval") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_trans_bjoin") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_trans_brelse") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_trans_commit_lsn") $trans:struct xfs_trans*
kernel.trace("xfs_trans_get_buf") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_trans_get_buf_recur") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_trans_getsb") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_trans_getsb_recur") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_trans_log_buf") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_trans_read_buf") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_trans_read_buf_io") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_trans_read_buf_recur") $bip:struct xfs_buf_log_item*
kernel.trace("xfs_trans_read_buf_shut") $bp:struct xfs_buf* $caller_ip:long
unsigned int
kernel.trace("xfs_unwritten_convert") $ip:struct xfs_inode*
$offset:xfs_off_t $count:ssize_t
kernel.trace("xfs_vm_bmap") $ip:struct xfs_inode*
kernel.trace("xfs_write_inode") $ip:struct xfs_inode*
kernel.trace("xfs_writepage") $inode:struct inode* $page:struct page*
$off:long unsigned int
2013/4/12 符永涛 <yongtaofu at gmail.com>
> Hi Brian,
> Sorry but when I execute the script it says:
> WARNING: cannot find module xfs debuginfo: No DWARF information found
> semantic error: no match while resolving probe point
> module("xfs").function("xfs_iunlink")
>
> uname -a
> 2.6.32-279.el6.x86_64
> kernel debuginfo has been installed.
>
> Where can I find the correct xfs debuginfo?
>
>
> Thank you for your help.
>
>
> 2013/4/12 Brian Foster <bfoster at redhat.com>
>
>> On 04/11/2013 03:11 PM, 符永涛 wrote:
>> > It happens tonight again on one of our servers, how to debug the root
>> > cause? Thank you.
>> >
>>
>> Hi,
>>
>> I've attached a system tap script (stap -v xfs.stp) that should
>> hopefully print out a bit more data should the issue happen again. Do
>> you have a small enough number of nodes (or predictable enough pattern)
>> that you could run this on the nodes that tend to fail and collect the
>> output?
>>
>> Also, could you collect an xfs_metadump of the filesystem in question
>> and make it available for download and analysis somewhere? I believe the
>> ideal approach is to mount/umount the filesystem first to replay the log
>> before collecting a metadump, but somebody could correct me on that (to
>> be safe, you could collect multiple dumps: pre-mount and post-mount).
>>
>> Could you also describe your workload a little bit? Thanks.
>>
>> Brian
>>
>> > Apr 12 02:32:10 cqdx kernel: XFS (sdb): xfs_iunlink_remove:
>> > xfs_inotobp() returned error 22.
>> > Apr 12 02:32:10 cqdx kernel: XFS (sdb): xfs_inactive: xfs_ifree returned
>> > error 22
>> > Apr 12 02:32:10 cqdx kernel: XFS (sdb): xfs_do_force_shutdown(0x1)
>> > called from line 1184 of file fs/xfs/xfs_vnodeops.c. Return address =
>> > 0xffffffffa02ee20a
>> > Apr 12 02:32:10 cqdx kernel: XFS (sdb): I/O Error Detected. Shutting
>> > down filesystem
>> > Apr 12 02:32:10 cqdx kernel: XFS (sdb): Please umount the filesystem and
>> > rectify the problem(s)
>> > Apr 12 02:32:19 cqdx kernel: XFS (sdb): xfs_log_force: error 5 returned.
>> > Apr 12 02:32:49 cqdx kernel: XFS (sdb): xfs_log_force: error 5 returned.
>> > Apr 12 02:33:19 cqdx kernel: XFS (sdb): xfs_log_force: error 5 returned.
>> > Apr 12 02:33:49 cqdx kernel: XFS (sdb): xfs_log_force: error 5 returned.
>> >
>> > xfs_repair -n
>> >
>> >
>> > Phase 7 - verify link counts...
>> > would have reset inode 20021 nlinks from 0 to 1
>> > would have reset inode 20789 nlinks from 0 to 1
>> > would have reset inode 35125 nlinks from 0 to 1
>> > would have reset inode 35637 nlinks from 0 to 1
>> > would have reset inode 36149 nlinks from 0 to 1
>> > would have reset inode 38197 nlinks from 0 to 1
>> > would have reset inode 39477 nlinks from 0 to 1
>> > would have reset inode 54069 nlinks from 0 to 1
>> > would have reset inode 62261 nlinks from 0 to 1
>> > would have reset inode 63029 nlinks from 0 to 1
>> > would have reset inode 72501 nlinks from 0 to 1
>> > would have reset inode 79925 nlinks from 0 to 1
>> > would have reset inode 81205 nlinks from 0 to 1
>> > would have reset inode 84789 nlinks from 0 to 1
>> > would have reset inode 87861 nlinks from 0 to 1
>> > would have reset inode 90663 nlinks from 0 to 1
>> > would have reset inode 91189 nlinks from 0 to 1
>> > would have reset inode 95541 nlinks from 0 to 1
>> > would have reset inode 98101 nlinks from 0 to 1
>> > would have reset inode 101173 nlinks from 0 to 1
>> > would have reset inode 113205 nlinks from 0 to 1
>> > would have reset inode 114741 nlinks from 0 to 1
>> > would have reset inode 126261 nlinks from 0 to 1
>> > would have reset inode 140597 nlinks from 0 to 1
>> > would have reset inode 144693 nlinks from 0 to 1
>> > would have reset inode 147765 nlinks from 0 to 1
>> > would have reset inode 152885 nlinks from 0 to 1
>> > would have reset inode 161333 nlinks from 0 to 1
>> > would have reset inode 161845 nlinks from 0 to 1
>> > would have reset inode 167477 nlinks from 0 to 1
>> > would have reset inode 172341 nlinks from 0 to 1
>> > would have reset inode 191797 nlinks from 0 to 1
>> > would have reset inode 204853 nlinks from 0 to 1
>> > would have reset inode 205365 nlinks from 0 to 1
>> > would have reset inode 215349 nlinks from 0 to 1
>> > would have reset inode 215861 nlinks from 0 to 1
>> > would have reset inode 216373 nlinks from 0 to 1
>> > would have reset inode 217397 nlinks from 0 to 1
>> > would have reset inode 224309 nlinks from 0 to 1
>> > would have reset inode 225589 nlinks from 0 to 1
>> > would have reset inode 234549 nlinks from 0 to 1
>> > would have reset inode 234805 nlinks from 0 to 1
>> > would have reset inode 249653 nlinks from 0 to 1
>> > would have reset inode 250677 nlinks from 0 to 1
>> > would have reset inode 252469 nlinks from 0 to 1
>> > would have reset inode 261429 nlinks from 0 to 1
>> > would have reset inode 265013 nlinks from 0 to 1
>> > would have reset inode 266805 nlinks from 0 to 1
>> > would have reset inode 267317 nlinks from 0 to 1
>> > would have reset inode 268853 nlinks from 0 to 1
>> > would have reset inode 272437 nlinks from 0 to 1
>> > would have reset inode 273205 nlinks from 0 to 1
>> > would have reset inode 274229 nlinks from 0 to 1
>> > would have reset inode 278325 nlinks from 0 to 1
>> > would have reset inode 278837 nlinks from 0 to 1
>> > would have reset inode 281397 nlinks from 0 to 1
>> > would have reset inode 292661 nlinks from 0 to 1
>> > would have reset inode 300853 nlinks from 0 to 1
>> > would have reset inode 302901 nlinks from 0 to 1
>> > would have reset inode 305205 nlinks from 0 to 1
>> > would have reset inode 314165 nlinks from 0 to 1
>> > would have reset inode 315189 nlinks from 0 to 1
>> > would have reset inode 320309 nlinks from 0 to 1
>> > would have reset inode 324917 nlinks from 0 to 1
>> > would have reset inode 328245 nlinks from 0 to 1
>> > would have reset inode 335925 nlinks from 0 to 1
>> > would have reset inode 339253 nlinks from 0 to 1
>> > would have reset inode 339765 nlinks from 0 to 1
>> > would have reset inode 348213 nlinks from 0 to 1
>> > would have reset inode 360501 nlinks from 0 to 1
>> > would have reset inode 362037 nlinks from 0 to 1
>> > would have reset inode 366389 nlinks from 0 to 1
>> > would have reset inode 385845 nlinks from 0 to 1
>> > would have reset inode 390709 nlinks from 0 to 1
>> > would have reset inode 409141 nlinks from 0 to 1
>> > would have reset inode 413237 nlinks from 0 to 1
>> > would have reset inode 414773 nlinks from 0 to 1
>> > would have reset inode 417845 nlinks from 0 to 1
>> > would have reset inode 436021 nlinks from 0 to 1
>> > would have reset inode 439349 nlinks from 0 to 1
>> > would have reset inode 447029 nlinks from 0 to 1
>> > would have reset inode 491317 nlinks from 0 to 1
>> > would have reset inode 494133 nlinks from 0 to 1
>> > would have reset inode 495413 nlinks from 0 to 1
>> > would have reset inode 501301 nlinks from 0 to 1
>> > would have reset inode 506421 nlinks from 0 to 1
>> > would have reset inode 508469 nlinks from 0 to 1
>> > would have reset inode 508981 nlinks from 0 to 1
>> > would have reset inode 511797 nlinks from 0 to 1
>> > would have reset inode 513077 nlinks from 0 to 1
>> > would have reset inode 517941 nlinks from 0 to 1
>> > would have reset inode 521013 nlinks from 0 to 1
>> > would have reset inode 522805 nlinks from 0 to 1
>> > would have reset inode 523317 nlinks from 0 to 1
>> > would have reset inode 525621 nlinks from 0 to 1
>> > would have reset inode 527925 nlinks from 0 to 1
>> > would have reset inode 535605 nlinks from 0 to 1
>> > would have reset inode 541749 nlinks from 0 to 1
>> > would have reset inode 573493 nlinks from 0 to 1
>> > would have reset inode 578613 nlinks from 0 to 1
>> > would have reset inode 583029 nlinks from 0 to 1
>> > would have reset inode 585525 nlinks from 0 to 1
>> > would have reset inode 586293 nlinks from 0 to 1
>> > would have reset inode 586805 nlinks from 0 to 1
>> > would have reset inode 591413 nlinks from 0 to 1
>> > would have reset inode 594485 nlinks from 0 to 1
>> > would have reset inode 596277 nlinks from 0 to 1
>> > would have reset inode 603189 nlinks from 0 to 1
>> > would have reset inode 613429 nlinks from 0 to 1
>> > would have reset inode 617781 nlinks from 0 to 1
>> > would have reset inode 621877 nlinks from 0 to 1
>> > would have reset inode 623925 nlinks from 0 to 1
>> > would have reset inode 625205 nlinks from 0 to 1
>> > would have reset inode 626741 nlinks from 0 to 1
>> > would have reset inode 639541 nlinks from 0 to 1
>> > would have reset inode 640053 nlinks from 0 to 1
>> > would have reset inode 640565 nlinks from 0 to 1
>> > would have reset inode 645173 nlinks from 0 to 1
>> > would have reset inode 652853 nlinks from 0 to 1
>> > would have reset inode 656181 nlinks from 0 to 1
>> > would have reset inode 659253 nlinks from 0 to 1
>> > would have reset inode 663605 nlinks from 0 to 1
>> > would have reset inode 667445 nlinks from 0 to 1
>> > would have reset inode 680757 nlinks from 0 to 1
>> > would have reset inode 691253 nlinks from 0 to 1
>> > would have reset inode 691765 nlinks from 0 to 1
>> > would have reset inode 697653 nlinks from 0 to 1
>> > would have reset inode 700469 nlinks from 0 to 1
>> > would have reset inode 707893 nlinks from 0 to 1
>> > would have reset inode 716853 nlinks from 0 to 1
>> > would have reset inode 722229 nlinks from 0 to 1
>> > would have reset inode 722741 nlinks from 0 to 1
>> > would have reset inode 723765 nlinks from 0 to 1
>> > would have reset inode 731957 nlinks from 0 to 1
>> > would have reset inode 742965 nlinks from 0 to 1
>> > would have reset inode 743477 nlinks from 0 to 1
>> > would have reset inode 745781 nlinks from 0 to 1
>> > would have reset inode 746293 nlinks from 0 to 1
>> > would have reset inode 774453 nlinks from 0 to 1
>> > would have reset inode 778805 nlinks from 0 to 1
>> > would have reset inode 785013 nlinks from 0 to 1
>> > would have reset inode 785973 nlinks from 0 to 1
>> > would have reset inode 791349 nlinks from 0 to 1
>> > would have reset inode 796981 nlinks from 0 to 1
>> > would have reset inode 803381 nlinks from 0 to 1
>> > would have reset inode 806965 nlinks from 0 to 1
>> > would have reset inode 811798 nlinks from 0 to 1
>> > would have reset inode 812310 nlinks from 0 to 1
>> > would have reset inode 813078 nlinks from 0 to 1
>> > would have reset inode 813607 nlinks from 0 to 1
>> > would have reset inode 814183 nlinks from 0 to 1
>> > would have reset inode 822069 nlinks from 0 to 1
>> > would have reset inode 828469 nlinks from 0 to 1
>> > would have reset inode 830005 nlinks from 0 to 1
>> > would have reset inode 832053 nlinks from 0 to 1
>> > would have reset inode 832565 nlinks from 0 to 1
>> > would have reset inode 836661 nlinks from 0 to 1
>> > would have reset inode 841013 nlinks from 0 to 1
>> > would have reset inode 841525 nlinks from 0 to 1
>> > would have reset inode 845365 nlinks from 0 to 1
>> > would have reset inode 846133 nlinks from 0 to 1
>> > would have reset inode 847157 nlinks from 0 to 1
>> > would have reset inode 852533 nlinks from 0 to 1
>> > would have reset inode 857141 nlinks from 0 to 1
>> > would have reset inode 863271 nlinks from 0 to 1
>> > would have reset inode 866855 nlinks from 0 to 1
>> > would have reset inode 887861 nlinks from 0 to 1
>> > would have reset inode 891701 nlinks from 0 to 1
>> > would have reset inode 894773 nlinks from 0 to 1
>> > would have reset inode 900149 nlinks from 0 to 1
>> > would have reset inode 902197 nlinks from 0 to 1
>> > would have reset inode 906293 nlinks from 0 to 1
>> > would have reset inode 906805 nlinks from 0 to 1
>> > would have reset inode 909877 nlinks from 0 to 1
>> > would have reset inode 925493 nlinks from 0 to 1
>> > would have reset inode 949543 nlinks from 0 to 1
>> > would have reset inode 955175 nlinks from 0 to 1
>> > would have reset inode 963623 nlinks from 0 to 1
>> > would have reset inode 967733 nlinks from 0 to 1
>> > would have reset inode 968231 nlinks from 0 to 1
>> > would have reset inode 982069 nlinks from 0 to 1
>> > would have reset inode 1007413 nlinks from 0 to 1
>> > would have reset inode 1011509 nlinks from 0 to 1
>> > would have reset inode 1014069 nlinks from 0 to 1
>> > would have reset inode 1014581 nlinks from 0 to 1
>> > would have reset inode 1022005 nlinks from 0 to 1
>> > would have reset inode 1022517 nlinks from 0 to 1
>> > would have reset inode 1023029 nlinks from 0 to 1
>> > would have reset inode 1025333 nlinks from 0 to 1
>> > would have reset inode 1043765 nlinks from 0 to 1
>> > would have reset inode 1044789 nlinks from 0 to 1
>> > would have reset inode 1049397 nlinks from 0 to 1
>> > would have reset inode 1050933 nlinks from 0 to 1
>> > would have reset inode 1051445 nlinks from 0 to 1
>> > would have reset inode 1054261 nlinks from 0 to 1
>> > would have reset inode 1060917 nlinks from 0 to 1
>> > would have reset inode 1063477 nlinks from 0 to 1
>> > would have reset inode 1076021 nlinks from 0 to 1
>> > would have reset inode 1081141 nlinks from 0 to 1
>> > would have reset inode 1086261 nlinks from 0 to 1
>> > would have reset inode 1097269 nlinks from 0 to 1
>> > would have reset inode 1099829 nlinks from 0 to 1
>> > would have reset inode 1100853 nlinks from 0 to 1
>> > would have reset inode 1101877 nlinks from 0 to 1
>> > would have reset inode 1126709 nlinks from 0 to 1
>> > would have reset inode 1134389 nlinks from 0 to 1
>> > would have reset inode 1141045 nlinks from 0 to 1
>> > would have reset inode 1141557 nlinks from 0 to 1
>> > would have reset inode 1142581 nlinks from 0 to 1
>> > would have reset inode 1148469 nlinks from 0 to 1
>> > would have reset inode 1153333 nlinks from 0 to 1
>> > would have reset inode 1181749 nlinks from 0 to 1
>> > would have reset inode 1192245 nlinks from 0 to 1
>> > would have reset inode 1198133 nlinks from 0 to 1
>> > would have reset inode 1203765 nlinks from 0 to 1
>> > would have reset inode 1221429 nlinks from 0 to 1
>> > would have reset inode 1223989 nlinks from 0 to 1
>> > would have reset inode 1235509 nlinks from 0 to 1
>> > would have reset inode 1239349 nlinks from 0 to 1
>> > would have reset inode 1240885 nlinks from 0 to 1
>> > would have reset inode 1241397 nlinks from 0 to 1
>> > would have reset inode 1241909 nlinks from 0 to 1
>> > would have reset inode 1242421 nlinks from 0 to 1
>> > would have reset inode 1244981 nlinks from 0 to 1
>> > would have reset inode 1246517 nlinks from 0 to 1
>> > would have reset inode 1253429 nlinks from 0 to 1
>> > would have reset inode 1271861 nlinks from 0 to 1
>> > would have reset inode 1274677 nlinks from 0 to 1
>> > would have reset inode 1277749 nlinks from 0 to 1
>> > would have reset inode 1278773 nlinks from 0 to 1
>> > would have reset inode 1286709 nlinks from 0 to 1
>> > would have reset inode 1288245 nlinks from 0 to 1
>> > would have reset inode 1299765 nlinks from 0 to 1
>> > would have reset inode 1302325 nlinks from 0 to 1
>> > would have reset inode 1304885 nlinks from 0 to 1
>> > would have reset inode 1305397 nlinks from 0 to 1
>> > would have reset inode 1307509 nlinks from 0 to 1
>> > would have reset inode 1309493 nlinks from 0 to 1
>> > would have reset inode 1310517 nlinks from 0 to 1
>> > would have reset inode 1311029 nlinks from 0 to 1
>> > would have reset inode 1312053 nlinks from 0 to 1
>> > would have reset inode 1316917 nlinks from 0 to 1
>> > would have reset inode 1317941 nlinks from 0 to 1
>> > would have reset inode 1320821 nlinks from 0 to 1
>> > would have reset inode 1322805 nlinks from 0 to 1
>> > would have reset inode 1332789 nlinks from 0 to 1
>> > would have reset inode 1336373 nlinks from 0 to 1
>> > would have reset inode 1345653 nlinks from 0 to 1
>> > would have reset inode 1354549 nlinks from 0 to 1
>> > would have reset inode 1361973 nlinks from 0 to 1
>> > would have reset inode 1369909 nlinks from 0 to 1
>> > would have reset inode 1372981 nlinks from 0 to 1
>> > would have reset inode 1388853 nlinks from 0 to 1
>> > would have reset inode 1402933 nlinks from 0 to 1
>> > would have reset inode 1403445 nlinks from 0 to 1
>> > would have reset inode 1420085 nlinks from 0 to 1
>> > would have reset inode 1452853 nlinks from 0 to 1
>> > would have reset inode 1456437 nlinks from 0 to 1
>> > would have reset inode 1457973 nlinks from 0 to 1
>> > would have reset inode 1459253 nlinks from 0 to 1
>> > would have reset inode 1467957 nlinks from 0 to 1
>> > would have reset inode 1471541 nlinks from 0 to 1
>> > would have reset inode 1476661 nlinks from 0 to 1
>> > would have reset inode 1479733 nlinks from 0 to 1
>> > would have reset inode 1483061 nlinks from 0 to 1
>> > would have reset inode 1484085 nlinks from 0 to 1
>> > would have reset inode 1486133 nlinks from 0 to 1
>> > would have reset inode 1489461 nlinks from 0 to 1
>> > would have reset inode 1490037 nlinks from 0 to 1
>> > would have reset inode 1492021 nlinks from 0 to 1
>> > would have reset inode 1493557 nlinks from 0 to 1
>> > would have reset inode 1494069 nlinks from 0 to 1
>> > would have reset inode 1496885 nlinks from 0 to 1
>> > would have reset inode 1498421 nlinks from 0 to 1
>> > would have reset inode 1498933 nlinks from 0 to 1
>> > would have reset inode 1499957 nlinks from 0 to 1
>> > would have reset inode 1506101 nlinks from 0 to 1
>> > would have reset inode 1507637 nlinks from 0 to 1
>> > would have reset inode 1510453 nlinks from 0 to 1
>> > would have reset inode 1514293 nlinks from 0 to 1
>> > would have reset inode 1517365 nlinks from 0 to 1
>> > would have reset inode 1520693 nlinks from 0 to 1
>> > would have reset inode 1521973 nlinks from 0 to 1
>> > would have reset inode 1530421 nlinks from 0 to 1
>> > would have reset inode 1530933 nlinks from 0 to 1
>> > would have reset inode 1537333 nlinks from 0 to 1
>> > would have reset inode 1538357 nlinks from 0 to 1
>> > would have reset inode 1548853 nlinks from 0 to 1
>> > would have reset inode 1553973 nlinks from 0 to 1
>> > would have reset inode 1557301 nlinks from 0 to 1
>> > would have reset inode 1564213 nlinks from 0 to 1
>> > would have reset inode 1564725 nlinks from 0 to 1
>> > would have reset inode 1576501 nlinks from 0 to 1
>> > would have reset inode 1580597 nlinks from 0 to 1
>> > would have reset inode 1584693 nlinks from 0 to 1
>> > would have reset inode 1586485 nlinks from 0 to 1
>> > would have reset inode 1589301 nlinks from 0 to 1
>> > would have reset inode 1589813 nlinks from 0 to 1
>> > would have reset inode 1592629 nlinks from 0 to 1
>> > would have reset inode 1595701 nlinks from 0 to 1
>> > would have reset inode 1601077 nlinks from 0 to 1
>> > would have reset inode 1623861 nlinks from 0 to 1
>> > would have reset inode 1626677 nlinks from 0 to 1
>> > would have reset inode 1627701 nlinks from 0 to 1
>> > would have reset inode 1633333 nlinks from 0 to 1
>> > would have reset inode 1639221 nlinks from 0 to 1
>> > would have reset inode 1649205 nlinks from 0 to 1
>> > would have reset inode 1686325 nlinks from 0 to 1
>> > would have reset inode 1690677 nlinks from 0 to 1
>> > would have reset inode 1693749 nlinks from 0 to 1
>> > would have reset inode 1704757 nlinks from 0 to 1
>> > would have reset inode 1707061 nlinks from 0 to 1
>> > would have reset inode 1709109 nlinks from 0 to 1
>> > would have reset inode 1719349 nlinks from 0 to 1
>> > would have reset inode 1737013 nlinks from 0 to 1
>> > would have reset inode 1741365 nlinks from 0 to 1
>> > would have reset inode 1747509 nlinks from 0 to 1
>> > would have reset inode 1770805 nlinks from 0 to 1
>> > would have reset inode 1780789 nlinks from 0 to 1
>> > would have reset inode 1793589 nlinks from 0 to 1
>> > would have reset inode 1795125 nlinks from 0 to 1
>> > would have reset inode 1800757 nlinks from 0 to 1
>> > would have reset inode 1801269 nlinks from 0 to 1
>> > would have reset inode 1802549 nlinks from 0 to 1
>> > would have reset inode 1804085 nlinks from 0 to 1
>> > would have reset inode 1817141 nlinks from 0 to 1
>> > would have reset inode 1821749 nlinks from 0 to 1
>> > would have reset inode 1832757 nlinks from 0 to 1
>> > would have reset inode 1836341 nlinks from 0 to 1
>> > would have reset inode 1856309 nlinks from 0 to 1
>> > would have reset inode 1900597 nlinks from 0 to 1
>> > would have reset inode 1902901 nlinks from 0 to 1
>> > would have reset inode 1912373 nlinks from 0 to 1
>> > would have reset inode 1943093 nlinks from 0 to 1
>> > would have reset inode 1944373 nlinks from 0 to 1
>> > would have reset inode 1954101 nlinks from 0 to 1
>> > would have reset inode 1955893 nlinks from 0 to 1
>> > would have reset inode 1961781 nlinks from 0 to 1
>> > would have reset inode 1974325 nlinks from 0 to 1
>> > would have reset inode 1978677 nlinks from 0 to 1
>> > would have reset inode 1981237 nlinks from 0 to 1
>> > would have reset inode 1992245 nlinks from 0 to 1
>> > would have reset inode 2000949 nlinks from 0 to 1
>> > would have reset inode 2002229 nlinks from 0 to 1
>> > would have reset inode 2004789 nlinks from 0 to 1
>> > would have reset inode 2005301 nlinks from 0 to 1
>> > would have reset inode 2011189 nlinks from 0 to 1
>> > would have reset inode 2012981 nlinks from 0 to 1
>> > would have reset inode 2015285 nlinks from 0 to 1
>> > would have reset inode 2018869 nlinks from 0 to 1
>> > would have reset inode 2028341 nlinks from 0 to 1
>> > would have reset inode 2028853 nlinks from 0 to 1
>> > would have reset inode 2030901 nlinks from 0 to 1
>> > would have reset inode 2032181 nlinks from 0 to 1
>> > would have reset inode 2032693 nlinks from 0 to 1
>> > would have reset inode 2040117 nlinks from 0 to 1
>> > would have reset inode 2053685 nlinks from 0 to 1
>> > would have reset inode 2083893 nlinks from 0 to 1
>> > would have reset inode 2087221 nlinks from 0 to 1
>> > would have reset inode 2095925 nlinks from 0 to 1
>> > would have reset inode 2098741 nlinks from 0 to 1
>> > would have reset inode 2100533 nlinks from 0 to 1
>> > would have reset inode 2101301 nlinks from 0 to 1
>> > would have reset inode 2123573 nlinks from 0 to 1
>> > would have reset inode 2132789 nlinks from 0 to 1
>> > would have reset inode 2133813 nlinks from 0 to 1
>> >
>> >
>> >
>> >
>> >
>> > 2013/4/10 符永涛 <yongtaofu at gmail.com <mailto:yongtaofu at gmail.com>>
>> >
>> > The storage info is as following:
>> > RAID-6
>> > SATA HDD
>> > Controller: PERC H710P Mini (Embedded)
>> > Disk /dev/sdb: 30000.3 GB, 30000346562560 bytes
>> > 255 heads, 63 sectors/track, 3647334 cylinders
>> > Units = cylinders of 16065 * 512 = 8225280 bytes
>> > Sector size (logical/physical): 512 bytes / 512 bytes
>> > I/O size (minimum/optimal): 512 bytes / 512 bytes
>> > Disk identifier: 0x00000000
>> >
>> > sd 0:2:1:0: [sdb] 58594426880 512-byte logical blocks: (30.0 TB/27.2
>> > TiB)
>> > sd 0:2:1:0: [sdb] Write Protect is off
>> > sd 0:2:1:0: [sdb] Mode Sense: 1f 00 00 08
>> > sd 0:2:1:0: [sdb] Write cache: enabled, read cache: enabled, doesn't
>> > support DPO or FUA
>> > sd 0:2:1:0: [sdb] Attached SCSI disk
>> >
>> > *-storage
>> > description: RAID bus controller
>> > product: MegaRAID SAS 2208 [Thunderbolt]
>> > vendor: LSI Logic / Symbios Logic
>> > physical id: 0
>> > bus info: pci at 0000:02:00.0
>> > logical name: scsi0
>> > version: 01
>> > width: 64 bits
>> > clock: 33MHz
>> > capabilities: storage pm pciexpress vpd msi msix bus_master
>> > cap_list rom
>> > configuration: driver=megaraid_sas latency=0
>> > resources: irq:42 ioport:fc00(size=256)
>> > memory:dd7fc000-dd7fffff memory:dd780000-dd7bffff
>> > memory:dc800000-dc81ffff(prefetchable)
>> > *-disk:0
>> > description: SCSI Disk
>> > product: PERC H710P
>> > vendor: DELL
>> > physical id: 2.0.0
>> > bus info: scsi at 0:2.0.0
>> > logical name: /dev/sda
>> > version: 3.13
>> > serial: 0049d6ce1d9f2035180096fde490f648
>> > size: 558GiB (599GB)
>> > capabilities: partitioned partitioned:dos
>> > configuration: ansiversion=5 signature=000aa336
>> > *-disk:1
>> > description: SCSI Disk
>> > product: PERC H710P
>> > vendor: DELL
>> > physical id: 2.1.0
>> > bus info: scsi at 0:2.1.0
>> > logical name: /dev/sdb
>> > logical name: /mnt/xfsd
>> > version: 3.13
>> > serial: 003366f71da22035180096fde490f648
>> > size: 27TiB (30TB)
>> > configuration: ansiversion=5 mount.fstype=xfs
>> >
>> mount.options=rw,relatime,attr2,delaylog,logbsize=64k,sunit=128,swidth=1280,noquota
>> > state=mounted
>> >
>> > Thank you.
>> >
>> >
>> > 2013/4/10 Emmanuel Florac <eflorac at intellique.com
>> > <mailto:eflorac at intellique.com>>
>> >
>> > Le Tue, 9 Apr 2013 23:10:03 +0800
>> > 符永涛 <yongtaofu at gmail.com <mailto:yongtaofu at gmail.com>>
>> écrivait:
>> >
>> > > > Apr 9 11:01:30 cqdx kernel: XFS (sdb): I/O Error Detected.
>> > > > Shutting down filesystem
>> >
>> > This. I/O error detected. That means that at some point the
>> > underlying
>> > device (disk, RAID array, SAN volume) couldn't be reached. So
>> this
>> > could very well be a case of a flakey drive, array, cable or
>> SCSI
>> > driver.
>> >
>> > What's the storage setup here?
>> >
>> > --
>> >
>> ------------------------------------------------------------------------
>> > Emmanuel Florac | Direction technique
>> > | Intellique
>> > | <eflorac at intellique.com
>> > <mailto:eflorac at intellique.com>>
>> > | +33 1 78 94 84 02
>> >
>> ------------------------------------------------------------------------
>> >
>> >
>> >
>> >
>> > --
>> > 符永涛
>> >
>> >
>> >
>> >
>> > --
>> > 符永涛
>> >
>> >
>> > _______________________________________________
>> > xfs mailing list
>> > xfs at oss.sgi.com
>> > http://oss.sgi.com/mailman/listinfo/xfs
>> >
>>
>>
>
>
> --
> 符永涛
>
--
符永涛
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://oss.sgi.com/pipermail/xfs/attachments/20130412/3df4de93/attachment-0001.html>
More information about the xfs
mailing list