1- import json
21import logging
32from types import SimpleNamespace
43import src .writer_kafka as wk
@@ -23,6 +22,44 @@ def produce(self, topic, key, value, callback): # noqa: D401
2322 callback ("ERR" , None )
2423
2524
25+ class FakeProducerFlushSequence (FakeProducerSuccess ):
26+ def __init__ (self , sequence ): # sequence of remaining counts per flush call
27+ super ().__init__ ()
28+ self .sequence = sequence
29+ self .flush_calls = 0
30+
31+ def flush (self , * a , ** kw ):
32+ # Simulate decreasing remaining messages
33+ if self .flush_calls < len (self .sequence ):
34+ val = self .sequence [self .flush_calls ]
35+ else :
36+ val = self .sequence [- 1 ]
37+ self .flush_calls += 1
38+ return val
39+
40+
41+ class FakeProducerTimeout (FakeProducerSuccess ):
42+ def __init__ (self , remaining_value ):
43+ super ().__init__ ()
44+ self .remaining_value = remaining_value
45+ self .flush_calls = 0
46+
47+ def flush (self , * a , ** kw ): # always returns same remaining >0 to force timeout warning
48+ self .flush_calls += 1
49+ return self .remaining_value
50+
51+
52+ class FakeProducerTypeError (FakeProducerSuccess ):
53+ def __init__ (self ):
54+ super ().__init__ ()
55+ self .flush_calls = 0
56+
57+ # Intentionally omit timeout parameter causing TypeError on first attempt inside flush_with_timeout
58+ def flush (self ): # noqa: D401
59+ self .flush_calls += 1
60+ return 0
61+
62+
2663def test_write_skips_when_producer_none (monkeypatch ):
2764 wk .STATE ["logger" ] = logging .getLogger ("test" )
2865 wk .STATE ["producer" ] = None
@@ -60,3 +97,45 @@ def produce(self, *a, **kw): # noqa: D401
6097 wk .STATE ["producer" ] = RaisingProducer ()
6198 ok , err = wk .write ("topic" , {"d" : 4 })
6299 assert not ok and "boom" in err
100+
101+
102+ def test_write_flush_retries_until_success (monkeypatch , caplog ):
103+ wk .STATE ["logger" ] = logging .getLogger ("test" )
104+ caplog .set_level (logging .WARNING )
105+ # Force smaller max retries for deterministic sequence length
106+ monkeypatch .setattr (wk , "_MAX_RETRIES" , 5 , raising = False )
107+ producer = FakeProducerFlushSequence ([5 , 4 , 3 , 1 , 0 ])
108+ wk .STATE ["producer" ] = producer
109+ ok , err = wk .write ("topic" , {"e" : 5 })
110+ assert ok and err is None
111+ # It should break as soon as remaining == 0 (after flush call returning 0)
112+ assert producer .flush_calls == 5 # sequence consumed until 0
113+ # Warnings logged for attempts before success (flush_calls -1) because last attempt didn't warn
114+ warn_messages = [r .message for r in caplog .records if r .levelno == logging .WARNING ]
115+ assert any ("attempt 1" in m or "attempt 2" in m for m in warn_messages )
116+
117+
118+ def test_write_timeout_warning_when_remaining_after_retries (monkeypatch , caplog ):
119+ wk .STATE ["logger" ] = logging .getLogger ("test" )
120+ caplog .set_level (logging .WARNING )
121+ monkeypatch .setattr (wk , "_MAX_RETRIES" , 3 , raising = False )
122+ producer = FakeProducerTimeout (2 )
123+ wk .STATE ["producer" ] = producer
124+ ok , err = wk .write ("topic" , {"f" : 6 })
125+ timeout_warnings = [
126+ r .message for r in caplog .records if "timeout" in r .message
127+ ] # final warning should mention timeout
128+ assert ok and err is None # function returns success even if timeout warning
129+ assert timeout_warnings , "Expected timeout warning logged"
130+ assert producer .flush_calls == 3 # retried 3 times
131+
132+
133+ def test_flush_with_timeout_typeerror_fallback (monkeypatch ):
134+ wk .STATE ["logger" ] = logging .getLogger ("test" )
135+ monkeypatch .setattr (wk , "_MAX_RETRIES" , 4 , raising = False )
136+ producer = FakeProducerTypeError ()
137+ wk .STATE ["producer" ] = producer
138+ ok , err = wk .write ("topic" , {"g" : 7 })
139+ assert ok and err is None
140+ # Since flush returns 0 immediately, only one flush call should be needed
141+ assert producer .flush_calls == 1
0 commit comments